code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _invalid_triple_quote(self, quote, row, col=None):
"""Add a message for an invalid triple quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
"""
self.add_message(
'invalid-triple-quote',
line=row,
args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)),
**self.get_offset(col)
) | def function[_invalid_triple_quote, parameter[self, quote, row, col]]:
constant[Add a message for an invalid triple quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
]
call[name[self].add_message, parameter[constant[invalid-triple-quote]]] | keyword[def] identifier[_invalid_triple_quote] ( identifier[self] , identifier[quote] , identifier[row] , identifier[col] = keyword[None] ):
literal[string]
identifier[self] . identifier[add_message] (
literal[string] ,
identifier[line] = identifier[row] ,
identifier[args] =( identifier[quote] , identifier[TRIPLE_QUOTE_OPTS] . identifier[get] ( identifier[self] . identifier[config] . identifier[triple_quote] )),
** identifier[self] . identifier[get_offset] ( identifier[col] )
) | def _invalid_triple_quote(self, quote, row, col=None):
"""Add a message for an invalid triple quote.
Args:
quote: The quote characters that were found.
row: The row number the quote characters were found on.
col: The column the quote characters were found on.
"""
self.add_message('invalid-triple-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)), **self.get_offset(col)) |
def air_range(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
if weapon:
return weapon.range
return 0 | def function[air_range, parameter[self]]:
constant[ Does not include upgrades ]
if name[self]._weapons begin[:]
variable[weapon] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da20c7c8a30>, constant[None]]]
if name[weapon] begin[:]
return[name[weapon].range]
return[constant[0]] | keyword[def] identifier[air_range] ( identifier[self] )-> identifier[Union] [ identifier[int] , identifier[float] ]:
literal[string]
keyword[if] identifier[self] . identifier[_weapons] :
identifier[weapon] = identifier[next] (
( identifier[weapon] keyword[for] identifier[weapon] keyword[in] identifier[self] . identifier[_weapons] keyword[if] identifier[weapon] . identifier[type] keyword[in] { identifier[TargetType] . identifier[Air] . identifier[value] , identifier[TargetType] . identifier[Any] . identifier[value] }),
keyword[None] ,
)
keyword[if] identifier[weapon] :
keyword[return] identifier[weapon] . identifier[range]
keyword[return] literal[int] | def air_range(self) -> Union[int, float]:
""" Does not include upgrades """
if self._weapons:
weapon = next((weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}), None)
if weapon:
return weapon.range # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return 0 |
def read_string(self, advance=True):
"""
Read a string terminated by null byte '\0'. The returned string
object is ASCII decoded, and will not include the terminating null byte.
"""
target = self._blob.find(b'\0', self.pos)
assert target >= self._pos
data = self._blob[self._pos:target]
if advance:
self._pos = target + 1
return data.decode('ascii') | def function[read_string, parameter[self, advance]]:
constant[
Read a string terminated by null byte ' '. The returned string
object is ASCII decoded, and will not include the terminating null byte.
]
variable[target] assign[=] call[name[self]._blob.find, parameter[constant[b'\x00'], name[self].pos]]
assert[compare[name[target] greater_or_equal[>=] name[self]._pos]]
variable[data] assign[=] call[name[self]._blob][<ast.Slice object at 0x7da18fe90760>]
if name[advance] begin[:]
name[self]._pos assign[=] binary_operation[name[target] + constant[1]]
return[call[name[data].decode, parameter[constant[ascii]]]] | keyword[def] identifier[read_string] ( identifier[self] , identifier[advance] = keyword[True] ):
literal[string]
identifier[target] = identifier[self] . identifier[_blob] . identifier[find] ( literal[string] , identifier[self] . identifier[pos] )
keyword[assert] identifier[target] >= identifier[self] . identifier[_pos]
identifier[data] = identifier[self] . identifier[_blob] [ identifier[self] . identifier[_pos] : identifier[target] ]
keyword[if] identifier[advance] :
identifier[self] . identifier[_pos] = identifier[target] + literal[int]
keyword[return] identifier[data] . identifier[decode] ( literal[string] ) | def read_string(self, advance=True):
"""
Read a string terminated by null byte '\x00'. The returned string
object is ASCII decoded, and will not include the terminating null byte.
"""
target = self._blob.find(b'\x00', self.pos)
assert target >= self._pos
data = self._blob[self._pos:target]
if advance:
self._pos = target + 1 # depends on [control=['if'], data=[]]
return data.decode('ascii') |
def punctuate(current_text, new_text, add_punctuation):
""" Add punctuation as needed """
if add_punctuation and current_text and not current_text[-1] in string.punctuation:
current_text += '. '
spacer = ' ' if not current_text or (not current_text[-1].isspace() and not new_text[0].isspace()) else ''
return current_text + spacer + new_text | def function[punctuate, parameter[current_text, new_text, add_punctuation]]:
constant[ Add punctuation as needed ]
if <ast.BoolOp object at 0x7da1b15f3eb0> begin[:]
<ast.AugAssign object at 0x7da2047e89a0>
variable[spacer] assign[=] <ast.IfExp object at 0x7da2047e98a0>
return[binary_operation[binary_operation[name[current_text] + name[spacer]] + name[new_text]]] | keyword[def] identifier[punctuate] ( identifier[current_text] , identifier[new_text] , identifier[add_punctuation] ):
literal[string]
keyword[if] identifier[add_punctuation] keyword[and] identifier[current_text] keyword[and] keyword[not] identifier[current_text] [- literal[int] ] keyword[in] identifier[string] . identifier[punctuation] :
identifier[current_text] += literal[string]
identifier[spacer] = literal[string] keyword[if] keyword[not] identifier[current_text] keyword[or] ( keyword[not] identifier[current_text] [- literal[int] ]. identifier[isspace] () keyword[and] keyword[not] identifier[new_text] [ literal[int] ]. identifier[isspace] ()) keyword[else] literal[string]
keyword[return] identifier[current_text] + identifier[spacer] + identifier[new_text] | def punctuate(current_text, new_text, add_punctuation):
""" Add punctuation as needed """
if add_punctuation and current_text and (not current_text[-1] in string.punctuation):
current_text += '. ' # depends on [control=['if'], data=[]]
spacer = ' ' if not current_text or (not current_text[-1].isspace() and (not new_text[0].isspace())) else ''
return current_text + spacer + new_text |
def for_each(self, action, filter=None):
"""! @brief Apply an action to every component defined in the ROM table and child tables.
This method iterates over every entry in the ROM table. For each entry it calls the
filter function if provided. If the filter passes (returns True or was not provided) then
the action function is called.
The ROM table must have been initialized by calling init() prior to using this method.
@param self This object.
@param action Callable that accepts a single parameter, a CoreSightComponentID instance.
@param filter Optional filter callable. Must accept a CoreSightComponentID instance and
return a boolean indicating whether to perform the action (True applies action).
"""
for component in self.components:
# Recurse into child ROM tables.
if isinstance(component, ROMTable):
component.for_each(action, filter)
continue
# Skip component if the filter returns False.
if filter is not None and not filter(component):
continue
# Perform the action.
action(component) | def function[for_each, parameter[self, action, filter]]:
constant[! @brief Apply an action to every component defined in the ROM table and child tables.
This method iterates over every entry in the ROM table. For each entry it calls the
filter function if provided. If the filter passes (returns True or was not provided) then
the action function is called.
The ROM table must have been initialized by calling init() prior to using this method.
@param self This object.
@param action Callable that accepts a single parameter, a CoreSightComponentID instance.
@param filter Optional filter callable. Must accept a CoreSightComponentID instance and
return a boolean indicating whether to perform the action (True applies action).
]
for taget[name[component]] in starred[name[self].components] begin[:]
if call[name[isinstance], parameter[name[component], name[ROMTable]]] begin[:]
call[name[component].for_each, parameter[name[action], name[filter]]]
continue
if <ast.BoolOp object at 0x7da1b18addb0> begin[:]
continue
call[name[action], parameter[name[component]]] | keyword[def] identifier[for_each] ( identifier[self] , identifier[action] , identifier[filter] = keyword[None] ):
literal[string]
keyword[for] identifier[component] keyword[in] identifier[self] . identifier[components] :
keyword[if] identifier[isinstance] ( identifier[component] , identifier[ROMTable] ):
identifier[component] . identifier[for_each] ( identifier[action] , identifier[filter] )
keyword[continue]
keyword[if] identifier[filter] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[filter] ( identifier[component] ):
keyword[continue]
identifier[action] ( identifier[component] ) | def for_each(self, action, filter=None):
"""! @brief Apply an action to every component defined in the ROM table and child tables.
This method iterates over every entry in the ROM table. For each entry it calls the
filter function if provided. If the filter passes (returns True or was not provided) then
the action function is called.
The ROM table must have been initialized by calling init() prior to using this method.
@param self This object.
@param action Callable that accepts a single parameter, a CoreSightComponentID instance.
@param filter Optional filter callable. Must accept a CoreSightComponentID instance and
return a boolean indicating whether to perform the action (True applies action).
"""
for component in self.components:
# Recurse into child ROM tables.
if isinstance(component, ROMTable):
component.for_each(action, filter)
continue # depends on [control=['if'], data=[]]
# Skip component if the filter returns False.
if filter is not None and (not filter(component)):
continue # depends on [control=['if'], data=[]]
# Perform the action.
action(component) # depends on [control=['for'], data=['component']] |
def photo_infos(self):
"""
:returns: list of :class:`~okcupyd.photo.Info` instances for each photo
displayed on okcupid.
"""
from . import photo
pics_request = self._session.okc_get(
u'profile/{0}/album/0'.format(self.username),
)
pics_tree = html.fromstring(u'{0}{1}{2}'.format(
u'<div>', pics_request.json()['fulls'], u'</div>'
))
return [photo.Info.from_cdn_uri(uri)
for uri in self._photo_info_xpb.apply_(pics_tree)] | def function[photo_infos, parameter[self]]:
constant[
:returns: list of :class:`~okcupyd.photo.Info` instances for each photo
displayed on okcupid.
]
from relative_module[None] import module[photo]
variable[pics_request] assign[=] call[name[self]._session.okc_get, parameter[call[constant[profile/{0}/album/0].format, parameter[name[self].username]]]]
variable[pics_tree] assign[=] call[name[html].fromstring, parameter[call[constant[{0}{1}{2}].format, parameter[constant[<div>], call[call[name[pics_request].json, parameter[]]][constant[fulls]], constant[</div>]]]]]
return[<ast.ListComp object at 0x7da1b283bc10>] | keyword[def] identifier[photo_infos] ( identifier[self] ):
literal[string]
keyword[from] . keyword[import] identifier[photo]
identifier[pics_request] = identifier[self] . identifier[_session] . identifier[okc_get] (
literal[string] . identifier[format] ( identifier[self] . identifier[username] ),
)
identifier[pics_tree] = identifier[html] . identifier[fromstring] ( literal[string] . identifier[format] (
literal[string] , identifier[pics_request] . identifier[json] ()[ literal[string] ], literal[string]
))
keyword[return] [ identifier[photo] . identifier[Info] . identifier[from_cdn_uri] ( identifier[uri] )
keyword[for] identifier[uri] keyword[in] identifier[self] . identifier[_photo_info_xpb] . identifier[apply_] ( identifier[pics_tree] )] | def photo_infos(self):
"""
:returns: list of :class:`~okcupyd.photo.Info` instances for each photo
displayed on okcupid.
"""
from . import photo
pics_request = self._session.okc_get(u'profile/{0}/album/0'.format(self.username))
pics_tree = html.fromstring(u'{0}{1}{2}'.format(u'<div>', pics_request.json()['fulls'], u'</div>'))
return [photo.Info.from_cdn_uri(uri) for uri in self._photo_info_xpb.apply_(pics_tree)] |
def get_system_config_dirs(app_name, app_author, force_xdg=True):
r"""Returns a list of system-wide config folders for the application.
For an example application called ``"My App"`` by ``"Acme"``,
something like the following folders could be returned:
macOS (non-XDG):
``['/Library/Application Support/My App']``
Mac OS X (XDG):
``['/etc/xdg/my-app']``
Unix:
``['/etc/xdg/my-app']``
Windows 7:
``['C:\ProgramData\Acme\My App']``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param app_author: The app author's name (or company). This should be
properly capitalized and can contain whitespace.
:param force_xdg: if this is set to `True`, then on macOS the XDG Base
Directory Specification will be followed. Has no effect
on non-macOS systems.
"""
if WIN:
folder = os.environ.get('PROGRAMDATA')
return [os.path.join(folder, app_author, app_name)]
if MAC and not force_xdg:
return [os.path.join('/Library/Application Support', app_name)]
dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg')
paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)]
return [os.path.join(d, _pathify(app_name)) for d in paths] | def function[get_system_config_dirs, parameter[app_name, app_author, force_xdg]]:
constant[Returns a list of system-wide config folders for the application.
For an example application called ``"My App"`` by ``"Acme"``,
something like the following folders could be returned:
macOS (non-XDG):
``['/Library/Application Support/My App']``
Mac OS X (XDG):
``['/etc/xdg/my-app']``
Unix:
``['/etc/xdg/my-app']``
Windows 7:
``['C:\ProgramData\Acme\My App']``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param app_author: The app author's name (or company). This should be
properly capitalized and can contain whitespace.
:param force_xdg: if this is set to `True`, then on macOS the XDG Base
Directory Specification will be followed. Has no effect
on non-macOS systems.
]
if name[WIN] begin[:]
variable[folder] assign[=] call[name[os].environ.get, parameter[constant[PROGRAMDATA]]]
return[list[[<ast.Call object at 0x7da20c6e6a10>]]]
if <ast.BoolOp object at 0x7da20c6e74f0> begin[:]
return[list[[<ast.Call object at 0x7da20c6e6020>]]]
variable[dirs] assign[=] call[name[os].environ.get, parameter[constant[XDG_CONFIG_DIRS], constant[/etc/xdg]]]
variable[paths] assign[=] <ast.ListComp object at 0x7da20c6e60e0>
return[<ast.ListComp object at 0x7da20c6e6620>] | keyword[def] identifier[get_system_config_dirs] ( identifier[app_name] , identifier[app_author] , identifier[force_xdg] = keyword[True] ):
literal[string]
keyword[if] identifier[WIN] :
identifier[folder] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[app_author] , identifier[app_name] )]
keyword[if] identifier[MAC] keyword[and] keyword[not] identifier[force_xdg] :
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[app_name] )]
identifier[dirs] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )
identifier[paths] =[ identifier[os] . identifier[path] . identifier[expanduser] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[dirs] . identifier[split] ( identifier[os] . identifier[pathsep] )]
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[_pathify] ( identifier[app_name] )) keyword[for] identifier[d] keyword[in] identifier[paths] ] | def get_system_config_dirs(app_name, app_author, force_xdg=True):
"""Returns a list of system-wide config folders for the application.
For an example application called ``"My App"`` by ``"Acme"``,
something like the following folders could be returned:
macOS (non-XDG):
``['/Library/Application Support/My App']``
Mac OS X (XDG):
``['/etc/xdg/my-app']``
Unix:
``['/etc/xdg/my-app']``
Windows 7:
``['C:\\ProgramData\\Acme\\My App']``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param app_author: The app author's name (or company). This should be
properly capitalized and can contain whitespace.
:param force_xdg: if this is set to `True`, then on macOS the XDG Base
Directory Specification will be followed. Has no effect
on non-macOS systems.
"""
if WIN:
folder = os.environ.get('PROGRAMDATA')
return [os.path.join(folder, app_author, app_name)] # depends on [control=['if'], data=[]]
if MAC and (not force_xdg):
return [os.path.join('/Library/Application Support', app_name)] # depends on [control=['if'], data=[]]
dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg')
paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)]
return [os.path.join(d, _pathify(app_name)) for d in paths] |
def cli(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
# TODO: prompt for values someday.
click.echo("""Welcome to Apollo's Arrow!""")
if os.path.exists(config.global_config_path()):
info("Your arrow configuration already exists. Please edit it instead: %s" % config.global_config_path())
return 0
while True:
apollo_url= click.prompt("Please entry your Apollo's URL")
apollo_username = click.prompt("Please entry your Apollo Username")
apollo_password = click.prompt("Please entry your Apollo Password", hide_input=True)
info("Testing connection...")
try:
ai = ApolloInstance(apollo_url, apollo_username, apollo_password)
try:
ai.metrics.get_metrics()
# Ok, success
info("Ok! Everything looks good.")
break
except Exception as e:
print(e)
warn("Error, we could not access the configuration data for your instance.")
should_break = click.prompt("Continue despite inability to contact this Apollo Instance? [y/n]")
if should_break in ('Y', 'y'):
break
except Exception as e:
warn("Error, we could not access the configuration data for your instance.")
should_break = click.prompt("Continue despite inability to contact this Apollo Instance? [y/n]")
if should_break in ('Y', 'y'):
break
config_path = config.global_config_path()
if os.path.exists(config_path):
warn("File %s already exists, refusing to overwrite." % config_path)
return -1
with open(config_path, "w") as f:
f.write(
CONFIG_TEMPLATE % {
'url': apollo_url,
'username': apollo_username,
'password': apollo_password,
})
info(SUCCESS_MESSAGE) | def function[cli, parameter[ctx, url, api_key, admin]]:
constant[Help initialize global configuration (in home directory)
]
call[name[click].echo, parameter[constant[Welcome to Apollo's Arrow!]]]
if call[name[os].path.exists, parameter[call[name[config].global_config_path, parameter[]]]] begin[:]
call[name[info], parameter[binary_operation[constant[Your arrow configuration already exists. Please edit it instead: %s] <ast.Mod object at 0x7da2590d6920> call[name[config].global_config_path, parameter[]]]]]
return[constant[0]]
while constant[True] begin[:]
variable[apollo_url] assign[=] call[name[click].prompt, parameter[constant[Please entry your Apollo's URL]]]
variable[apollo_username] assign[=] call[name[click].prompt, parameter[constant[Please entry your Apollo Username]]]
variable[apollo_password] assign[=] call[name[click].prompt, parameter[constant[Please entry your Apollo Password]]]
call[name[info], parameter[constant[Testing connection...]]]
<ast.Try object at 0x7da1b255c970>
variable[config_path] assign[=] call[name[config].global_config_path, parameter[]]
if call[name[os].path.exists, parameter[name[config_path]]] begin[:]
call[name[warn], parameter[binary_operation[constant[File %s already exists, refusing to overwrite.] <ast.Mod object at 0x7da2590d6920> name[config_path]]]]
return[<ast.UnaryOp object at 0x7da1b255b880>]
with call[name[open], parameter[name[config_path], constant[w]]] begin[:]
call[name[f].write, parameter[binary_operation[name[CONFIG_TEMPLATE] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b255bfd0>, <ast.Constant object at 0x7da1b255bd90>, <ast.Constant object at 0x7da1b2559930>], [<ast.Name object at 0x7da1b2559660>, <ast.Name object at 0x7da1b255a860>, <ast.Name object at 0x7da1b255a1a0>]]]]]
call[name[info], parameter[name[SUCCESS_MESSAGE]]] | keyword[def] identifier[cli] ( identifier[ctx] , identifier[url] = keyword[None] , identifier[api_key] = keyword[None] , identifier[admin] = keyword[False] ,** identifier[kwds] ):
literal[string]
identifier[click] . identifier[echo] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[config] . identifier[global_config_path] ()):
identifier[info] ( literal[string] % identifier[config] . identifier[global_config_path] ())
keyword[return] literal[int]
keyword[while] keyword[True] :
identifier[apollo_url] = identifier[click] . identifier[prompt] ( literal[string] )
identifier[apollo_username] = identifier[click] . identifier[prompt] ( literal[string] )
identifier[apollo_password] = identifier[click] . identifier[prompt] ( literal[string] , identifier[hide_input] = keyword[True] )
identifier[info] ( literal[string] )
keyword[try] :
identifier[ai] = identifier[ApolloInstance] ( identifier[apollo_url] , identifier[apollo_username] , identifier[apollo_password] )
keyword[try] :
identifier[ai] . identifier[metrics] . identifier[get_metrics] ()
identifier[info] ( literal[string] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
identifier[warn] ( literal[string] )
identifier[should_break] = identifier[click] . identifier[prompt] ( literal[string] )
keyword[if] identifier[should_break] keyword[in] ( literal[string] , literal[string] ):
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[warn] ( literal[string] )
identifier[should_break] = identifier[click] . identifier[prompt] ( literal[string] )
keyword[if] identifier[should_break] keyword[in] ( literal[string] , literal[string] ):
keyword[break]
identifier[config_path] = identifier[config] . identifier[global_config_path] ()
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[config_path] ):
identifier[warn] ( literal[string] % identifier[config_path] )
keyword[return] - literal[int]
keyword[with] identifier[open] ( identifier[config_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] (
identifier[CONFIG_TEMPLATE] %{
literal[string] : identifier[apollo_url] ,
literal[string] : identifier[apollo_username] ,
literal[string] : identifier[apollo_password] ,
})
identifier[info] ( identifier[SUCCESS_MESSAGE] ) | def cli(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
# TODO: prompt for values someday.
click.echo("Welcome to Apollo's Arrow!")
if os.path.exists(config.global_config_path()):
info('Your arrow configuration already exists. Please edit it instead: %s' % config.global_config_path())
return 0 # depends on [control=['if'], data=[]]
while True:
apollo_url = click.prompt("Please entry your Apollo's URL")
apollo_username = click.prompt('Please entry your Apollo Username')
apollo_password = click.prompt('Please entry your Apollo Password', hide_input=True)
info('Testing connection...')
try:
ai = ApolloInstance(apollo_url, apollo_username, apollo_password)
try:
ai.metrics.get_metrics()
# Ok, success
info('Ok! Everything looks good.')
break # depends on [control=['try'], data=[]]
except Exception as e:
print(e)
warn('Error, we could not access the configuration data for your instance.')
should_break = click.prompt('Continue despite inability to contact this Apollo Instance? [y/n]')
if should_break in ('Y', 'y'):
break # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['try'], data=[]]
except Exception as e:
warn('Error, we could not access the configuration data for your instance.')
should_break = click.prompt('Continue despite inability to contact this Apollo Instance? [y/n]')
if should_break in ('Y', 'y'):
break # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
config_path = config.global_config_path()
if os.path.exists(config_path):
warn('File %s already exists, refusing to overwrite.' % config_path)
return -1 # depends on [control=['if'], data=[]]
with open(config_path, 'w') as f:
f.write(CONFIG_TEMPLATE % {'url': apollo_url, 'username': apollo_username, 'password': apollo_password})
info(SUCCESS_MESSAGE) # depends on [control=['with'], data=['f']] |
def follow_user(self, user, delegate):
"""Follow the given user.
Returns the user info back to the given delegate
"""
parser = txml.Users(delegate)
return self.__postPage('/friendships/create/%s.xml' % (user), parser) | def function[follow_user, parameter[self, user, delegate]]:
constant[Follow the given user.
Returns the user info back to the given delegate
]
variable[parser] assign[=] call[name[txml].Users, parameter[name[delegate]]]
return[call[name[self].__postPage, parameter[binary_operation[constant[/friendships/create/%s.xml] <ast.Mod object at 0x7da2590d6920> name[user]], name[parser]]]] | keyword[def] identifier[follow_user] ( identifier[self] , identifier[user] , identifier[delegate] ):
literal[string]
identifier[parser] = identifier[txml] . identifier[Users] ( identifier[delegate] )
keyword[return] identifier[self] . identifier[__postPage] ( literal[string] %( identifier[user] ), identifier[parser] ) | def follow_user(self, user, delegate):
"""Follow the given user.
Returns the user info back to the given delegate
"""
parser = txml.Users(delegate)
return self.__postPage('/friendships/create/%s.xml' % user, parser) |
def get_all_specs(self):
"""Returns a dict mapping kernel names and resource directories.
"""
# This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93
specs = self.get_all_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())
return specs | def function[get_all_specs, parameter[self]]:
constant[Returns a dict mapping kernel names and resource directories.
]
variable[specs] assign[=] call[name[self].get_all_kernel_specs_for_envs, parameter[]]
call[name[specs].update, parameter[call[call[name[super], parameter[name[EnvironmentKernelSpecManager], name[self]]].get_all_specs, parameter[]]]]
return[name[specs]] | keyword[def] identifier[get_all_specs] ( identifier[self] ):
literal[string]
identifier[specs] = identifier[self] . identifier[get_all_kernel_specs_for_envs] ()
identifier[specs] . identifier[update] ( identifier[super] ( identifier[EnvironmentKernelSpecManager] , identifier[self] ). identifier[get_all_specs] ())
keyword[return] identifier[specs] | def get_all_specs(self):
"""Returns a dict mapping kernel names and resource directories.
"""
# This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93
specs = self.get_all_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())
return specs |
def ewsformat(self):
"""
ISO 8601 format to satisfy xs:datetime as interpreted by EWS. Examples:
2009-01-15T13:45:56Z
2009-01-15T13:45:56+01:00
"""
if not self.tzinfo:
raise ValueError('EWSDateTime must be timezone-aware')
if self.tzinfo.zone == 'UTC':
return self.strftime('%Y-%m-%dT%H:%M:%SZ')
return self.replace(microsecond=0).isoformat() | def function[ewsformat, parameter[self]]:
constant[
ISO 8601 format to satisfy xs:datetime as interpreted by EWS. Examples:
2009-01-15T13:45:56Z
2009-01-15T13:45:56+01:00
]
if <ast.UnaryOp object at 0x7da20c6aa0b0> begin[:]
<ast.Raise object at 0x7da20c6aa2c0>
if compare[name[self].tzinfo.zone equal[==] constant[UTC]] begin[:]
return[call[name[self].strftime, parameter[constant[%Y-%m-%dT%H:%M:%SZ]]]]
return[call[call[name[self].replace, parameter[]].isoformat, parameter[]]] | keyword[def] identifier[ewsformat] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[tzinfo] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[tzinfo] . identifier[zone] == literal[string] :
keyword[return] identifier[self] . identifier[strftime] ( literal[string] )
keyword[return] identifier[self] . identifier[replace] ( identifier[microsecond] = literal[int] ). identifier[isoformat] () | def ewsformat(self):
"""
ISO 8601 format to satisfy xs:datetime as interpreted by EWS. Examples:
2009-01-15T13:45:56Z
2009-01-15T13:45:56+01:00
"""
if not self.tzinfo:
raise ValueError('EWSDateTime must be timezone-aware') # depends on [control=['if'], data=[]]
if self.tzinfo.zone == 'UTC':
return self.strftime('%Y-%m-%dT%H:%M:%SZ') # depends on [control=['if'], data=[]]
return self.replace(microsecond=0).isoformat() |
def alerts(self):
"""Query for alerts attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'alerts'))
return self.alertFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | def function[alerts, parameter[self]]:
constant[Query for alerts attached to this incident.]
variable[endpoint] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da1b06fd660>, <ast.Attribute object at 0x7da1b06fcac0>, <ast.Constant object at 0x7da1b06fc2b0>]]]]
return[call[name[self].alertFactory.find, parameter[]]] | keyword[def] identifier[alerts] ( identifier[self] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] (( identifier[self] . identifier[endpoint] , identifier[self] . identifier[id] , literal[string] ))
keyword[return] identifier[self] . identifier[alertFactory] . identifier[find] (
identifier[endpoint] = identifier[endpoint] ,
identifier[api_key] = identifier[self] . identifier[api_key] ,
) | def alerts(self):
"""Query for alerts attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'alerts'))
return self.alertFactory.find(endpoint=endpoint, api_key=self.api_key) |
def get_defaults_file(*a, **kw):
"""Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
"""
fd = StringIO()
fd.write(get_defaults_str(*a, **kw))
fd.seek(0)
return fd | def function[get_defaults_file, parameter[]]:
constant[Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
]
variable[fd] assign[=] call[name[StringIO], parameter[]]
call[name[fd].write, parameter[call[name[get_defaults_str], parameter[<ast.Starred object at 0x7da18fe90df0>]]]]
call[name[fd].seek, parameter[constant[0]]]
return[name[fd]] | keyword[def] identifier[get_defaults_file] (* identifier[a] ,** identifier[kw] ):
literal[string]
identifier[fd] = identifier[StringIO] ()
identifier[fd] . identifier[write] ( identifier[get_defaults_str] (* identifier[a] ,** identifier[kw] ))
identifier[fd] . identifier[seek] ( literal[int] )
keyword[return] identifier[fd] | def get_defaults_file(*a, **kw):
"""Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
"""
fd = StringIO()
fd.write(get_defaults_str(*a, **kw))
fd.seek(0)
return fd |
def get_all(self, start=0, count=-1, sort=''):
"""
Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start
and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of logical interconnects.
"""
return self._helper.get_all(start, count, sort=sort) | def function[get_all, parameter[self, start, count, sort]]:
constant[
Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start
and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of logical interconnects.
]
return[call[name[self]._helper.get_all, parameter[name[start], name[count]]]] | keyword[def] identifier[get_all] ( identifier[self] , identifier[start] = literal[int] , identifier[count] =- literal[int] , identifier[sort] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[_helper] . identifier[get_all] ( identifier[start] , identifier[count] , identifier[sort] = identifier[sort] ) | def get_all(self, start=0, count=-1, sort=''):
"""
Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start
and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of logical interconnects.
"""
return self._helper.get_all(start, count, sort=sort) |
def using_config(_func=None):
"""
This allows a function to use Summernote configuration
as a global variable, temporarily.
"""
def decorator(func):
@wraps(func)
def inner_dec(*args, **kwargs):
g = func.__globals__
var_name = 'config'
sentinel = object()
oldvalue = g.get(var_name, sentinel)
g[var_name] = apps.get_app_config('django_summernote').config
try:
res = func(*args, **kwargs)
finally:
if oldvalue is sentinel:
del g[var_name]
else:
g[var_name] = oldvalue
return res
return inner_dec
if _func is None:
return decorator
else:
return decorator(_func) | def function[using_config, parameter[_func]]:
constant[
This allows a function to use Summernote configuration
as a global variable, temporarily.
]
def function[decorator, parameter[func]]:
def function[inner_dec, parameter[]]:
variable[g] assign[=] name[func].__globals__
variable[var_name] assign[=] constant[config]
variable[sentinel] assign[=] call[name[object], parameter[]]
variable[oldvalue] assign[=] call[name[g].get, parameter[name[var_name], name[sentinel]]]
call[name[g]][name[var_name]] assign[=] call[name[apps].get_app_config, parameter[constant[django_summernote]]].config
<ast.Try object at 0x7da18ede5ff0>
return[name[res]]
return[name[inner_dec]]
if compare[name[_func] is constant[None]] begin[:]
return[name[decorator]] | keyword[def] identifier[using_config] ( identifier[_func] = keyword[None] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[inner_dec] (* identifier[args] ,** identifier[kwargs] ):
identifier[g] = identifier[func] . identifier[__globals__]
identifier[var_name] = literal[string]
identifier[sentinel] = identifier[object] ()
identifier[oldvalue] = identifier[g] . identifier[get] ( identifier[var_name] , identifier[sentinel] )
identifier[g] [ identifier[var_name] ]= identifier[apps] . identifier[get_app_config] ( literal[string] ). identifier[config]
keyword[try] :
identifier[res] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[finally] :
keyword[if] identifier[oldvalue] keyword[is] identifier[sentinel] :
keyword[del] identifier[g] [ identifier[var_name] ]
keyword[else] :
identifier[g] [ identifier[var_name] ]= identifier[oldvalue]
keyword[return] identifier[res]
keyword[return] identifier[inner_dec]
keyword[if] identifier[_func] keyword[is] keyword[None] :
keyword[return] identifier[decorator]
keyword[else] :
keyword[return] identifier[decorator] ( identifier[_func] ) | def using_config(_func=None):
"""
This allows a function to use Summernote configuration
as a global variable, temporarily.
"""
def decorator(func):
@wraps(func)
def inner_dec(*args, **kwargs):
g = func.__globals__
var_name = 'config'
sentinel = object()
oldvalue = g.get(var_name, sentinel)
g[var_name] = apps.get_app_config('django_summernote').config
try:
res = func(*args, **kwargs) # depends on [control=['try'], data=[]]
finally:
if oldvalue is sentinel:
del g[var_name] # depends on [control=['if'], data=[]]
else:
g[var_name] = oldvalue
return res
return inner_dec
if _func is None:
return decorator # depends on [control=['if'], data=[]]
else:
return decorator(_func) |
def calibrate_cameras(self):
"""Calibrate cameras based on found chessboard corners."""
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
100, 1e-5)
flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST +
cv2.CALIB_SAME_FOCAL_LENGTH)
calib = StereoCalibration()
(calib.cam_mats["left"], calib.dist_coefs["left"],
calib.cam_mats["right"], calib.dist_coefs["right"],
calib.rot_mat, calib.trans_vec, calib.e_mat,
calib.f_mat) = cv2.stereoCalibrate(self.object_points,
self.image_points["left"],
self.image_points["right"],
self.image_size,
calib.cam_mats["left"],
calib.dist_coefs["left"],
calib.cam_mats["right"],
calib.dist_coefs["right"],
calib.rot_mat,
calib.trans_vec,
calib.e_mat,
calib.f_mat,
criteria=criteria,
flags=flags)[1:]
(calib.rect_trans["left"], calib.rect_trans["right"],
calib.proj_mats["left"], calib.proj_mats["right"],
calib.disp_to_depth_mat, calib.valid_boxes["left"],
calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"],
calib.dist_coefs["left"],
calib.cam_mats["right"],
calib.dist_coefs["right"],
self.image_size,
calib.rot_mat,
calib.trans_vec,
flags=0)
for side in ("left", "right"):
(calib.undistortion_map[side],
calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
calib.cam_mats[side],
calib.dist_coefs[side],
calib.rect_trans[side],
calib.proj_mats[side],
self.image_size,
cv2.CV_32FC1)
# This is replaced because my results were always bad. Estimates are
# taken from the OpenCV samples.
width, height = self.image_size
focal_length = 0.8 * width
calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width],
[0, -1, 0, 0.5 * height],
[0, 0, 0, -focal_length],
[0, 0, 1, 0]])
return calib | def function[calibrate_cameras, parameter[self]]:
constant[Calibrate cameras based on found chessboard corners.]
variable[criteria] assign[=] tuple[[<ast.BinOp object at 0x7da2043443d0>, <ast.Constant object at 0x7da204347fa0>, <ast.Constant object at 0x7da204344bb0>]]
variable[flags] assign[=] binary_operation[binary_operation[name[cv2].CALIB_FIX_ASPECT_RATIO + name[cv2].CALIB_ZERO_TANGENT_DIST] + name[cv2].CALIB_SAME_FOCAL_LENGTH]
variable[calib] assign[=] call[name[StereoCalibration], parameter[]]
<ast.Tuple object at 0x7da1b26ac4f0> assign[=] call[call[name[cv2].stereoCalibrate, parameter[name[self].object_points, call[name[self].image_points][constant[left]], call[name[self].image_points][constant[right]], name[self].image_size, call[name[calib].cam_mats][constant[left]], call[name[calib].dist_coefs][constant[left]], call[name[calib].cam_mats][constant[right]], call[name[calib].dist_coefs][constant[right]], name[calib].rot_mat, name[calib].trans_vec, name[calib].e_mat, name[calib].f_mat]]][<ast.Slice object at 0x7da204963fd0>]
<ast.Tuple object at 0x7da204960ee0> assign[=] call[name[cv2].stereoRectify, parameter[call[name[calib].cam_mats][constant[left]], call[name[calib].dist_coefs][constant[left]], call[name[calib].cam_mats][constant[right]], call[name[calib].dist_coefs][constant[right]], name[self].image_size, name[calib].rot_mat, name[calib].trans_vec]]
for taget[name[side]] in starred[tuple[[<ast.Constant object at 0x7da204961930>, <ast.Constant object at 0x7da204960e50>]]] begin[:]
<ast.Tuple object at 0x7da204963160> assign[=] call[name[cv2].initUndistortRectifyMap, parameter[call[name[calib].cam_mats][name[side]], call[name[calib].dist_coefs][name[side]], call[name[calib].rect_trans][name[side]], call[name[calib].proj_mats][name[side]], name[self].image_size, name[cv2].CV_32FC1]]
<ast.Tuple object at 0x7da204963730> assign[=] name[self].image_size
variable[focal_length] assign[=] binary_operation[constant[0.8] * name[width]]
name[calib].disp_to_depth_mat assign[=] call[name[np].float32, parameter[list[[<ast.List object at 0x7da204962ef0>, <ast.List object at 0x7da204961a80>, <ast.List object at 0x7da204960850>, <ast.List object at 0x7da204963f70>]]]]
return[name[calib]] | keyword[def] identifier[calibrate_cameras] ( identifier[self] ):
literal[string]
identifier[criteria] =( identifier[cv2] . identifier[TERM_CRITERIA_MAX_ITER] + identifier[cv2] . identifier[TERM_CRITERIA_EPS] ,
literal[int] , literal[int] )
identifier[flags] =( identifier[cv2] . identifier[CALIB_FIX_ASPECT_RATIO] + identifier[cv2] . identifier[CALIB_ZERO_TANGENT_DIST] +
identifier[cv2] . identifier[CALIB_SAME_FOCAL_LENGTH] )
identifier[calib] = identifier[StereoCalibration] ()
( identifier[calib] . identifier[cam_mats] [ literal[string] ], identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[calib] . identifier[cam_mats] [ literal[string] ], identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[calib] . identifier[rot_mat] , identifier[calib] . identifier[trans_vec] , identifier[calib] . identifier[e_mat] ,
identifier[calib] . identifier[f_mat] )= identifier[cv2] . identifier[stereoCalibrate] ( identifier[self] . identifier[object_points] ,
identifier[self] . identifier[image_points] [ literal[string] ],
identifier[self] . identifier[image_points] [ literal[string] ],
identifier[self] . identifier[image_size] ,
identifier[calib] . identifier[cam_mats] [ literal[string] ],
identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[calib] . identifier[cam_mats] [ literal[string] ],
identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[calib] . identifier[rot_mat] ,
identifier[calib] . identifier[trans_vec] ,
identifier[calib] . identifier[e_mat] ,
identifier[calib] . identifier[f_mat] ,
identifier[criteria] = identifier[criteria] ,
identifier[flags] = identifier[flags] )[ literal[int] :]
( identifier[calib] . identifier[rect_trans] [ literal[string] ], identifier[calib] . identifier[rect_trans] [ literal[string] ],
identifier[calib] . identifier[proj_mats] [ literal[string] ], identifier[calib] . identifier[proj_mats] [ literal[string] ],
identifier[calib] . identifier[disp_to_depth_mat] , identifier[calib] . identifier[valid_boxes] [ literal[string] ],
identifier[calib] . identifier[valid_boxes] [ literal[string] ])= identifier[cv2] . identifier[stereoRectify] ( identifier[calib] . identifier[cam_mats] [ literal[string] ],
identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[calib] . identifier[cam_mats] [ literal[string] ],
identifier[calib] . identifier[dist_coefs] [ literal[string] ],
identifier[self] . identifier[image_size] ,
identifier[calib] . identifier[rot_mat] ,
identifier[calib] . identifier[trans_vec] ,
identifier[flags] = literal[int] )
keyword[for] identifier[side] keyword[in] ( literal[string] , literal[string] ):
( identifier[calib] . identifier[undistortion_map] [ identifier[side] ],
identifier[calib] . identifier[rectification_map] [ identifier[side] ])= identifier[cv2] . identifier[initUndistortRectifyMap] (
identifier[calib] . identifier[cam_mats] [ identifier[side] ],
identifier[calib] . identifier[dist_coefs] [ identifier[side] ],
identifier[calib] . identifier[rect_trans] [ identifier[side] ],
identifier[calib] . identifier[proj_mats] [ identifier[side] ],
identifier[self] . identifier[image_size] ,
identifier[cv2] . identifier[CV_32FC1] )
identifier[width] , identifier[height] = identifier[self] . identifier[image_size]
identifier[focal_length] = literal[int] * identifier[width]
identifier[calib] . identifier[disp_to_depth_mat] = identifier[np] . identifier[float32] ([[ literal[int] , literal[int] , literal[int] ,- literal[int] * identifier[width] ],
[ literal[int] ,- literal[int] , literal[int] , literal[int] * identifier[height] ],
[ literal[int] , literal[int] , literal[int] ,- identifier[focal_length] ],
[ literal[int] , literal[int] , literal[int] , literal[int] ]])
keyword[return] identifier[calib] | def calibrate_cameras(self):
"""Calibrate cameras based on found chessboard corners."""
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-05)
flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH
calib = StereoCalibration()
(calib.cam_mats['left'], calib.dist_coefs['left'], calib.cam_mats['right'], calib.dist_coefs['right'], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points['left'], self.image_points['right'], self.image_size, calib.cam_mats['left'], calib.dist_coefs['left'], calib.cam_mats['right'], calib.dist_coefs['right'], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:]
(calib.rect_trans['left'], calib.rect_trans['right'], calib.proj_mats['left'], calib.proj_mats['right'], calib.disp_to_depth_mat, calib.valid_boxes['left'], calib.valid_boxes['right']) = cv2.stereoRectify(calib.cam_mats['left'], calib.dist_coefs['left'], calib.cam_mats['right'], calib.dist_coefs['right'], self.image_size, calib.rot_mat, calib.trans_vec, flags=0)
for side in ('left', 'right'):
(calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap(calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # depends on [control=['for'], data=['side']]
# This is replaced because my results were always bad. Estimates are
# taken from the OpenCV samples.
(width, height) = self.image_size
focal_length = 0.8 * width
calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]])
return calib |
def release(self):
"""Release the lock.
This method should only be called in the locked state;
it changes the state to unlocked and returns immediately.
If an attempt is made to release an unlocked lock,
a RuntimeError will be raised.
"""
if self._local.locked:
while self._queue:
future = self._queue.popleft()
if not future.done():
return future.set_result(None)
self._local.locked = None
else:
raise RuntimeError('release unlocked lock') | def function[release, parameter[self]]:
constant[Release the lock.
This method should only be called in the locked state;
it changes the state to unlocked and returns immediately.
If an attempt is made to release an unlocked lock,
a RuntimeError will be raised.
]
if name[self]._local.locked begin[:]
while name[self]._queue begin[:]
variable[future] assign[=] call[name[self]._queue.popleft, parameter[]]
if <ast.UnaryOp object at 0x7da18bc711e0> begin[:]
return[call[name[future].set_result, parameter[constant[None]]]]
name[self]._local.locked assign[=] constant[None] | keyword[def] identifier[release] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_local] . identifier[locked] :
keyword[while] identifier[self] . identifier[_queue] :
identifier[future] = identifier[self] . identifier[_queue] . identifier[popleft] ()
keyword[if] keyword[not] identifier[future] . identifier[done] ():
keyword[return] identifier[future] . identifier[set_result] ( keyword[None] )
identifier[self] . identifier[_local] . identifier[locked] = keyword[None]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def release(self):
"""Release the lock.
This method should only be called in the locked state;
it changes the state to unlocked and returns immediately.
If an attempt is made to release an unlocked lock,
a RuntimeError will be raised.
"""
if self._local.locked:
while self._queue:
future = self._queue.popleft()
if not future.done():
return future.set_result(None) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
self._local.locked = None # depends on [control=['if'], data=[]]
else:
raise RuntimeError('release unlocked lock') |
def parallel_evaluation_pp(candidates, args):
"""Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_.
"""
import pp
logger = args['_ec'].logger
try:
evaluator = args['pp_evaluator']
except KeyError:
logger.error('parallel_evaluation_pp requires \'pp_evaluator\' be defined in the keyword arguments list')
raise
secret_key = args.setdefault('pp_secret', 'inspyred')
try:
job_server = args['_pp_job_server']
except KeyError:
pp_servers = args.get('pp_servers', ("*",))
pp_nprocs = args.get('pp_nprocs', 'autodetect')
job_server = pp.Server(ncpus=pp_nprocs, ppservers=pp_servers, secret=secret_key)
args['_pp_job_server'] = job_server
pp_depends = args.setdefault('pp_dependencies', ())
pp_modules = args.setdefault('pp_modules', ())
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_pp'.format(key))
pass
func_template = pp.Template(job_server, evaluator, pp_depends, pp_modules)
jobs = [func_template.submit([c], pickled_args) for c in candidates]
fitness = []
for i, job in enumerate(jobs):
r = job()
try:
fitness.append(r[0])
except TypeError:
logger.warning('parallel_evaluation_pp generated an invalid fitness for candidate {0}'.format(candidates[i]))
fitness.append(None)
return fitness | def function[parallel_evaluation_pp, parameter[candidates, args]]:
constant[Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_.
]
import module[pp]
variable[logger] assign[=] call[name[args]][constant[_ec]].logger
<ast.Try object at 0x7da1b13581c0>
variable[secret_key] assign[=] call[name[args].setdefault, parameter[constant[pp_secret], constant[inspyred]]]
<ast.Try object at 0x7da1b1358310>
variable[pp_depends] assign[=] call[name[args].setdefault, parameter[constant[pp_dependencies], tuple[[]]]]
variable[pp_modules] assign[=] call[name[args].setdefault, parameter[constant[pp_modules], tuple[[]]]]
variable[pickled_args] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[args]] begin[:]
<ast.Try object at 0x7da1b1359c90>
variable[func_template] assign[=] call[name[pp].Template, parameter[name[job_server], name[evaluator], name[pp_depends], name[pp_modules]]]
variable[jobs] assign[=] <ast.ListComp object at 0x7da1b135a0b0>
variable[fitness] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b135a890>, <ast.Name object at 0x7da1b135a7a0>]]] in starred[call[name[enumerate], parameter[name[jobs]]]] begin[:]
variable[r] assign[=] call[name[job], parameter[]]
<ast.Try object at 0x7da1b135a650>
return[name[fitness]] | keyword[def] identifier[parallel_evaluation_pp] ( identifier[candidates] , identifier[args] ):
literal[string]
keyword[import] identifier[pp]
identifier[logger] = identifier[args] [ literal[string] ]. identifier[logger]
keyword[try] :
identifier[evaluator] = identifier[args] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise]
identifier[secret_key] = identifier[args] . identifier[setdefault] ( literal[string] , literal[string] )
keyword[try] :
identifier[job_server] = identifier[args] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[pp_servers] = identifier[args] . identifier[get] ( literal[string] ,( literal[string] ,))
identifier[pp_nprocs] = identifier[args] . identifier[get] ( literal[string] , literal[string] )
identifier[job_server] = identifier[pp] . identifier[Server] ( identifier[ncpus] = identifier[pp_nprocs] , identifier[ppservers] = identifier[pp_servers] , identifier[secret] = identifier[secret_key] )
identifier[args] [ literal[string] ]= identifier[job_server]
identifier[pp_depends] = identifier[args] . identifier[setdefault] ( literal[string] ,())
identifier[pp_modules] = identifier[args] . identifier[setdefault] ( literal[string] ,())
identifier[pickled_args] ={}
keyword[for] identifier[key] keyword[in] identifier[args] :
keyword[try] :
identifier[pickle] . identifier[dumps] ( identifier[args] [ identifier[key] ])
identifier[pickled_args] [ identifier[key] ]= identifier[args] [ identifier[key] ]
keyword[except] ( identifier[TypeError] , identifier[pickle] . identifier[PickleError] , identifier[pickle] . identifier[PicklingError] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[key] ))
keyword[pass]
identifier[func_template] = identifier[pp] . identifier[Template] ( identifier[job_server] , identifier[evaluator] , identifier[pp_depends] , identifier[pp_modules] )
identifier[jobs] =[ identifier[func_template] . identifier[submit] ([ identifier[c] ], identifier[pickled_args] ) keyword[for] identifier[c] keyword[in] identifier[candidates] ]
identifier[fitness] =[]
keyword[for] identifier[i] , identifier[job] keyword[in] identifier[enumerate] ( identifier[jobs] ):
identifier[r] = identifier[job] ()
keyword[try] :
identifier[fitness] . identifier[append] ( identifier[r] [ literal[int] ])
keyword[except] identifier[TypeError] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[candidates] [ identifier[i] ]))
identifier[fitness] . identifier[append] ( keyword[None] )
keyword[return] identifier[fitness] | def parallel_evaluation_pp(candidates, args):
"""Evaluate the candidates in parallel using Parallel Python.
This function allows parallel evaluation of candidate solutions.
It uses the `Parallel Python <http://www.parallelpython.com>`_ (pp)
library to accomplish the parallelization. This library must already
be installed in order to use this function. The function assigns the
evaluation of each candidate to its own job, all of which are then
distributed to the available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *pp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *pp_dependencies* -- tuple of functional dependencies of the serial
evaluator (default ())
- *pp_modules* -- tuple of modules that must be imported for the
functional dependencies (default ())
- *pp_servers* -- tuple of servers (on a cluster) that will be used
for parallel processing (default ("*",))
- *pp_secret* -- string representing the secret key needed to authenticate
on a worker node (default "inspyred")
- *pp_nprocs* -- integer representing the number of worker processes to
start on the local machine (default "autodetect", which sets it to the
number of processors in the system)
For more information about these arguments, please consult the
documentation for `Parallel Python <http://www.parallelpython.com>`_.
"""
import pp
logger = args['_ec'].logger
try:
evaluator = args['pp_evaluator'] # depends on [control=['try'], data=[]]
except KeyError:
logger.error("parallel_evaluation_pp requires 'pp_evaluator' be defined in the keyword arguments list")
raise # depends on [control=['except'], data=[]]
secret_key = args.setdefault('pp_secret', 'inspyred')
try:
job_server = args['_pp_job_server'] # depends on [control=['try'], data=[]]
except KeyError:
pp_servers = args.get('pp_servers', ('*',))
pp_nprocs = args.get('pp_nprocs', 'autodetect')
job_server = pp.Server(ncpus=pp_nprocs, ppservers=pp_servers, secret=secret_key)
args['_pp_job_server'] = job_server # depends on [control=['except'], data=[]]
pp_depends = args.setdefault('pp_dependencies', ())
pp_modules = args.setdefault('pp_modules', ())
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key] # depends on [control=['try'], data=[]]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_pp'.format(key))
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']]
func_template = pp.Template(job_server, evaluator, pp_depends, pp_modules)
jobs = [func_template.submit([c], pickled_args) for c in candidates]
fitness = []
for (i, job) in enumerate(jobs):
r = job()
try:
fitness.append(r[0]) # depends on [control=['try'], data=[]]
except TypeError:
logger.warning('parallel_evaluation_pp generated an invalid fitness for candidate {0}'.format(candidates[i]))
fitness.append(None) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return fitness |
def dump(self, msg, fn_):
'''
Serialize the correct data into the named file object
'''
if six.PY2:
fn_.write(self.dumps(msg))
else:
# When using Python 3, write files in such a way
# that the 'bytes' and 'str' types are distinguishable
# by using "use_bin_type=True".
fn_.write(self.dumps(msg, use_bin_type=True))
fn_.close() | def function[dump, parameter[self, msg, fn_]]:
constant[
Serialize the correct data into the named file object
]
if name[six].PY2 begin[:]
call[name[fn_].write, parameter[call[name[self].dumps, parameter[name[msg]]]]]
call[name[fn_].close, parameter[]] | keyword[def] identifier[dump] ( identifier[self] , identifier[msg] , identifier[fn_] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
identifier[fn_] . identifier[write] ( identifier[self] . identifier[dumps] ( identifier[msg] ))
keyword[else] :
identifier[fn_] . identifier[write] ( identifier[self] . identifier[dumps] ( identifier[msg] , identifier[use_bin_type] = keyword[True] ))
identifier[fn_] . identifier[close] () | def dump(self, msg, fn_):
"""
Serialize the correct data into the named file object
"""
if six.PY2:
fn_.write(self.dumps(msg)) # depends on [control=['if'], data=[]]
else:
# When using Python 3, write files in such a way
# that the 'bytes' and 'str' types are distinguishable
# by using "use_bin_type=True".
fn_.write(self.dumps(msg, use_bin_type=True))
fn_.close() |
def exec(self, operand1, operand2):
"""
Uses two operands and performs a function on their content.::
operand1 = function(operand1, operand2)
"""
in1 = self.register_interface.read(operand1)
in2 = self.register_interface.read(operand2)
out = self.function(in1, in2)
self.register_interface.write(operand2, out) | def function[exec, parameter[self, operand1, operand2]]:
constant[
Uses two operands and performs a function on their content.::
operand1 = function(operand1, operand2)
]
variable[in1] assign[=] call[name[self].register_interface.read, parameter[name[operand1]]]
variable[in2] assign[=] call[name[self].register_interface.read, parameter[name[operand2]]]
variable[out] assign[=] call[name[self].function, parameter[name[in1], name[in2]]]
call[name[self].register_interface.write, parameter[name[operand2], name[out]]] | keyword[def] identifier[exec] ( identifier[self] , identifier[operand1] , identifier[operand2] ):
literal[string]
identifier[in1] = identifier[self] . identifier[register_interface] . identifier[read] ( identifier[operand1] )
identifier[in2] = identifier[self] . identifier[register_interface] . identifier[read] ( identifier[operand2] )
identifier[out] = identifier[self] . identifier[function] ( identifier[in1] , identifier[in2] )
identifier[self] . identifier[register_interface] . identifier[write] ( identifier[operand2] , identifier[out] ) | def exec(self, operand1, operand2):
"""
Uses two operands and performs a function on their content.::
operand1 = function(operand1, operand2)
"""
in1 = self.register_interface.read(operand1)
in2 = self.register_interface.read(operand2)
out = self.function(in1, in2)
self.register_interface.write(operand2, out) |
def history(self, storage_version):
"""Get reminder changes.
"""
params = {
"storageVersion": storage_version,
"includeSnoozePresetUpdates": True,
}
params.update(self.static_params)
return self.send(
url=self._base_url + 'history',
method='POST',
json=params
) | def function[history, parameter[self, storage_version]]:
constant[Get reminder changes.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f00fdc0>, <ast.Constant object at 0x7da18f00cb20>], [<ast.Name object at 0x7da18f00ffd0>, <ast.Constant object at 0x7da18f00d7e0>]]
call[name[params].update, parameter[name[self].static_params]]
return[call[name[self].send, parameter[]]] | keyword[def] identifier[history] ( identifier[self] , identifier[storage_version] ):
literal[string]
identifier[params] ={
literal[string] : identifier[storage_version] ,
literal[string] : keyword[True] ,
}
identifier[params] . identifier[update] ( identifier[self] . identifier[static_params] )
keyword[return] identifier[self] . identifier[send] (
identifier[url] = identifier[self] . identifier[_base_url] + literal[string] ,
identifier[method] = literal[string] ,
identifier[json] = identifier[params]
) | def history(self, storage_version):
"""Get reminder changes.
"""
params = {'storageVersion': storage_version, 'includeSnoozePresetUpdates': True}
params.update(self.static_params)
return self.send(url=self._base_url + 'history', method='POST', json=params) |
def gen_ordered_statistics(transaction_manager, record):
"""
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
"""
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = (
record.support / transaction_manager.calc_support(items_base))
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(
frozenset(items_base), frozenset(items_add), confidence, lift) | def function[gen_ordered_statistics, parameter[transaction_manager, record]]:
constant[
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
]
variable[items] assign[=] name[record].items
for taget[name[combination_set]] in starred[call[name[combinations], parameter[call[name[sorted], parameter[name[items]]], binary_operation[call[name[len], parameter[name[items]]] - constant[1]]]]] begin[:]
variable[items_base] assign[=] call[name[frozenset], parameter[name[combination_set]]]
variable[items_add] assign[=] call[name[frozenset], parameter[call[name[items].difference, parameter[name[items_base]]]]]
variable[confidence] assign[=] binary_operation[name[record].support / call[name[transaction_manager].calc_support, parameter[name[items_base]]]]
variable[lift] assign[=] binary_operation[name[confidence] / call[name[transaction_manager].calc_support, parameter[name[items_add]]]]
<ast.Yield object at 0x7da2047e81c0> | keyword[def] identifier[gen_ordered_statistics] ( identifier[transaction_manager] , identifier[record] ):
literal[string]
identifier[items] = identifier[record] . identifier[items]
keyword[for] identifier[combination_set] keyword[in] identifier[combinations] ( identifier[sorted] ( identifier[items] ), identifier[len] ( identifier[items] )- literal[int] ):
identifier[items_base] = identifier[frozenset] ( identifier[combination_set] )
identifier[items_add] = identifier[frozenset] ( identifier[items] . identifier[difference] ( identifier[items_base] ))
identifier[confidence] =(
identifier[record] . identifier[support] / identifier[transaction_manager] . identifier[calc_support] ( identifier[items_base] ))
identifier[lift] = identifier[confidence] / identifier[transaction_manager] . identifier[calc_support] ( identifier[items_add] )
keyword[yield] identifier[OrderedStatistic] (
identifier[frozenset] ( identifier[items_base] ), identifier[frozenset] ( identifier[items_add] ), identifier[confidence] , identifier[lift] ) | def gen_ordered_statistics(transaction_manager, record):
"""
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
"""
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = record.support / transaction_manager.calc_support(items_base)
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(frozenset(items_base), frozenset(items_add), confidence, lift) # depends on [control=['for'], data=['combination_set']] |
def _message(status, content):
"""Send message interface.
Parameters
----------
status : str
The type of message
content : str
"""
event = f'message.{status}'
if flask.has_request_context():
emit(event, dict(data=pack(content)))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(event, dict(data=pack(content)))
eventlet.sleep() | def function[_message, parameter[status, content]]:
constant[Send message interface.
Parameters
----------
status : str
The type of message
content : str
]
variable[event] assign[=] <ast.JoinedStr object at 0x7da18eb562c0>
if call[name[flask].has_request_context, parameter[]] begin[:]
call[name[emit], parameter[name[event], call[name[dict], parameter[]]]]
call[name[eventlet].sleep, parameter[]] | keyword[def] identifier[_message] ( identifier[status] , identifier[content] ):
literal[string]
identifier[event] = literal[string]
keyword[if] identifier[flask] . identifier[has_request_context] ():
identifier[emit] ( identifier[event] , identifier[dict] ( identifier[data] = identifier[pack] ( identifier[content] )))
keyword[else] :
identifier[sio] = identifier[flask] . identifier[current_app] . identifier[extensions] [ literal[string] ]
identifier[sio] . identifier[emit] ( identifier[event] , identifier[dict] ( identifier[data] = identifier[pack] ( identifier[content] )))
identifier[eventlet] . identifier[sleep] () | def _message(status, content):
"""Send message interface.
Parameters
----------
status : str
The type of message
content : str
"""
event = f'message.{status}'
if flask.has_request_context():
emit(event, dict(data=pack(content))) # depends on [control=['if'], data=[]]
else:
sio = flask.current_app.extensions['socketio']
sio.emit(event, dict(data=pack(content)))
eventlet.sleep() |
def get_attachment(
self,
attachment,
headers=None,
write_to=None,
attachment_type=None):
"""
Retrieves a document's attachment and optionally writes it to a file.
If the content_type of the attachment is 'application/json' then the
data returned will be in JSON format otherwise the response content will
be returned as text or binary.
:param str attachment: Attachment file name used to identify the
attachment.
:param dict headers: Optional, additional headers to be sent
with request.
:param file write_to: Optional file handler to write the attachment to.
The write_to file must be opened for writing prior to including it
as an argument for this method.
:param str attachment_type: Optional setting to define how to handle the
attachment when returning its contents from this method. Valid
values are ``'text'``, ``'json'``, and ``'binary'`` If
omitted then the returned content will be based on the
response Content-Type.
:returns: The attachment content
"""
# need latest rev
self.fetch()
attachment_url = '/'.join((self.document_url, attachment))
if headers is None:
headers = {'If-Match': self['_rev']}
else:
headers['If-Match'] = self['_rev']
resp = self.r_session.get(attachment_url, headers=headers)
resp.raise_for_status()
if attachment_type is None:
if resp.headers['Content-Type'].startswith('text/'):
attachment_type = 'text'
elif resp.headers['Content-Type'] == 'application/json':
attachment_type = 'json'
else:
attachment_type = 'binary'
if write_to is not None:
if attachment_type in ('text', 'json'):
write_to.write(resp.text)
else:
write_to.write(resp.content)
if attachment_type == 'text':
return resp.text
if attachment_type == 'json':
return response_to_json_dict(resp)
return resp.content | def function[get_attachment, parameter[self, attachment, headers, write_to, attachment_type]]:
constant[
Retrieves a document's attachment and optionally writes it to a file.
If the content_type of the attachment is 'application/json' then the
data returned will be in JSON format otherwise the response content will
be returned as text or binary.
:param str attachment: Attachment file name used to identify the
attachment.
:param dict headers: Optional, additional headers to be sent
with request.
:param file write_to: Optional file handler to write the attachment to.
The write_to file must be opened for writing prior to including it
as an argument for this method.
:param str attachment_type: Optional setting to define how to handle the
attachment when returning its contents from this method. Valid
values are ``'text'``, ``'json'``, and ``'binary'`` If
omitted then the returned content will be based on the
response Content-Type.
:returns: The attachment content
]
call[name[self].fetch, parameter[]]
variable[attachment_url] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da20c76ea40>, <ast.Name object at 0x7da20c76ce80>]]]]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b23452a0>], [<ast.Subscript object at 0x7da20c796080>]]
variable[resp] assign[=] call[name[self].r_session.get, parameter[name[attachment_url]]]
call[name[resp].raise_for_status, parameter[]]
if compare[name[attachment_type] is constant[None]] begin[:]
if call[call[name[resp].headers][constant[Content-Type]].startswith, parameter[constant[text/]]] begin[:]
variable[attachment_type] assign[=] constant[text]
if compare[name[write_to] is_not constant[None]] begin[:]
if compare[name[attachment_type] in tuple[[<ast.Constant object at 0x7da20c7c9de0>, <ast.Constant object at 0x7da20c7c8f10>]]] begin[:]
call[name[write_to].write, parameter[name[resp].text]]
if compare[name[attachment_type] equal[==] constant[text]] begin[:]
return[name[resp].text]
if compare[name[attachment_type] equal[==] constant[json]] begin[:]
return[call[name[response_to_json_dict], parameter[name[resp]]]]
return[name[resp].content] | keyword[def] identifier[get_attachment] (
identifier[self] ,
identifier[attachment] ,
identifier[headers] = keyword[None] ,
identifier[write_to] = keyword[None] ,
identifier[attachment_type] = keyword[None] ):
literal[string]
identifier[self] . identifier[fetch] ()
identifier[attachment_url] = literal[string] . identifier[join] (( identifier[self] . identifier[document_url] , identifier[attachment] ))
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] ={ literal[string] : identifier[self] [ literal[string] ]}
keyword[else] :
identifier[headers] [ literal[string] ]= identifier[self] [ literal[string] ]
identifier[resp] = identifier[self] . identifier[r_session] . identifier[get] ( identifier[attachment_url] , identifier[headers] = identifier[headers] )
identifier[resp] . identifier[raise_for_status] ()
keyword[if] identifier[attachment_type] keyword[is] keyword[None] :
keyword[if] identifier[resp] . identifier[headers] [ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[attachment_type] = literal[string]
keyword[elif] identifier[resp] . identifier[headers] [ literal[string] ]== literal[string] :
identifier[attachment_type] = literal[string]
keyword[else] :
identifier[attachment_type] = literal[string]
keyword[if] identifier[write_to] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[attachment_type] keyword[in] ( literal[string] , literal[string] ):
identifier[write_to] . identifier[write] ( identifier[resp] . identifier[text] )
keyword[else] :
identifier[write_to] . identifier[write] ( identifier[resp] . identifier[content] )
keyword[if] identifier[attachment_type] == literal[string] :
keyword[return] identifier[resp] . identifier[text]
keyword[if] identifier[attachment_type] == literal[string] :
keyword[return] identifier[response_to_json_dict] ( identifier[resp] )
keyword[return] identifier[resp] . identifier[content] | def get_attachment(self, attachment, headers=None, write_to=None, attachment_type=None):
"""
Retrieves a document's attachment and optionally writes it to a file.
If the content_type of the attachment is 'application/json' then the
data returned will be in JSON format otherwise the response content will
be returned as text or binary.
:param str attachment: Attachment file name used to identify the
attachment.
:param dict headers: Optional, additional headers to be sent
with request.
:param file write_to: Optional file handler to write the attachment to.
The write_to file must be opened for writing prior to including it
as an argument for this method.
:param str attachment_type: Optional setting to define how to handle the
attachment when returning its contents from this method. Valid
values are ``'text'``, ``'json'``, and ``'binary'`` If
omitted then the returned content will be based on the
response Content-Type.
:returns: The attachment content
"""
# need latest rev
self.fetch()
attachment_url = '/'.join((self.document_url, attachment))
if headers is None:
headers = {'If-Match': self['_rev']} # depends on [control=['if'], data=['headers']]
else:
headers['If-Match'] = self['_rev']
resp = self.r_session.get(attachment_url, headers=headers)
resp.raise_for_status()
if attachment_type is None:
if resp.headers['Content-Type'].startswith('text/'):
attachment_type = 'text' # depends on [control=['if'], data=[]]
elif resp.headers['Content-Type'] == 'application/json':
attachment_type = 'json' # depends on [control=['if'], data=[]]
else:
attachment_type = 'binary' # depends on [control=['if'], data=['attachment_type']]
if write_to is not None:
if attachment_type in ('text', 'json'):
write_to.write(resp.text) # depends on [control=['if'], data=[]]
else:
write_to.write(resp.content) # depends on [control=['if'], data=['write_to']]
if attachment_type == 'text':
return resp.text # depends on [control=['if'], data=[]]
if attachment_type == 'json':
return response_to_json_dict(resp) # depends on [control=['if'], data=[]]
return resp.content |
def audio_send_stream(self, httptype=None,
channel=None, path_file=None, encode=None):
"""
Params:
path_file - path to audio file
channel: - integer
httptype - type string (singlepart or multipart)
singlepart: HTTP content is a continuos flow of audio packets
multipart: HTTP content type is multipart/x-mixed-replace, and
each audio packet ends with a boundary string
Supported audio encode type according with documentation:
PCM
ADPCM
G.711A
G.711.Mu
G.726
G.729
MPEG2
AMR
AAC
"""
if httptype is None or channel is None:
raise RuntimeError("Requires htttype and channel")
file_audio = {
'file': open(path_file, 'rb'),
}
header = {
'content-type': 'Audio/' + encode,
'content-length': '9999999'
}
self.command_audio(
'audio.cgi?action=postAudio&httptype={0}&channel={1}'.format(
httptype, channel),
file_content=file_audio,
http_header=header
) | def function[audio_send_stream, parameter[self, httptype, channel, path_file, encode]]:
constant[
Params:
path_file - path to audio file
channel: - integer
httptype - type string (singlepart or multipart)
singlepart: HTTP content is a continuos flow of audio packets
multipart: HTTP content type is multipart/x-mixed-replace, and
each audio packet ends with a boundary string
Supported audio encode type according with documentation:
PCM
ADPCM
G.711A
G.711.Mu
G.726
G.729
MPEG2
AMR
AAC
]
if <ast.BoolOp object at 0x7da1b1116e60> begin[:]
<ast.Raise object at 0x7da1b1115ae0>
variable[file_audio] assign[=] dictionary[[<ast.Constant object at 0x7da1b1116c50>], [<ast.Call object at 0x7da1b1117940>]]
variable[header] assign[=] dictionary[[<ast.Constant object at 0x7da1b11159f0>, <ast.Constant object at 0x7da1b11164d0>], [<ast.BinOp object at 0x7da1b11179a0>, <ast.Constant object at 0x7da1b1116950>]]
call[name[self].command_audio, parameter[call[constant[audio.cgi?action=postAudio&httptype={0}&channel={1}].format, parameter[name[httptype], name[channel]]]]] | keyword[def] identifier[audio_send_stream] ( identifier[self] , identifier[httptype] = keyword[None] ,
identifier[channel] = keyword[None] , identifier[path_file] = keyword[None] , identifier[encode] = keyword[None] ):
literal[string]
keyword[if] identifier[httptype] keyword[is] keyword[None] keyword[or] identifier[channel] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[file_audio] ={
literal[string] : identifier[open] ( identifier[path_file] , literal[string] ),
}
identifier[header] ={
literal[string] : literal[string] + identifier[encode] ,
literal[string] : literal[string]
}
identifier[self] . identifier[command_audio] (
literal[string] . identifier[format] (
identifier[httptype] , identifier[channel] ),
identifier[file_content] = identifier[file_audio] ,
identifier[http_header] = identifier[header]
) | def audio_send_stream(self, httptype=None, channel=None, path_file=None, encode=None):
"""
Params:
path_file - path to audio file
channel: - integer
httptype - type string (singlepart or multipart)
singlepart: HTTP content is a continuos flow of audio packets
multipart: HTTP content type is multipart/x-mixed-replace, and
each audio packet ends with a boundary string
Supported audio encode type according with documentation:
PCM
ADPCM
G.711A
G.711.Mu
G.726
G.729
MPEG2
AMR
AAC
"""
if httptype is None or channel is None:
raise RuntimeError('Requires htttype and channel') # depends on [control=['if'], data=[]]
file_audio = {'file': open(path_file, 'rb')}
header = {'content-type': 'Audio/' + encode, 'content-length': '9999999'}
self.command_audio('audio.cgi?action=postAudio&httptype={0}&channel={1}'.format(httptype, channel), file_content=file_audio, http_header=header) |
def plot_shapes(df_shapes, shape_i_columns, axis=None, autoxlim=True,
autoylim=True, **kwargs):
'''
Plot shapes from table/data-frame where each row corresponds to a vertex of
a shape. Shape vertices are grouped by `shape_i_columns`.
For example, consider the following dataframe:
shape_i vertex_i x y
0 0 0 81.679949 264.69306
1 0 1 81.679949 286.51788
2 0 2 102.87004 286.51788
3 0 3 102.87004 264.69306
4 1 0 103.11417 264.40011
5 1 1 103.11417 242.72177
6 1 2 81.435824 242.72177
7 1 3 81.435824 264.40011
8 2 0 124.84134 264.69306
9 2 1 103.65125 264.69306
10 2 2 103.65125 286.37141
11 2 3 124.84134 286.37141
This dataframe corresponds to three shapes, with (ordered) shape vertices
grouped by `shape_i`. Note that the column `vertex_i` is not required.
'''
if axis is None:
fig, axis = plt.subplots()
props = itertools.cycle(mpl.rcParams['axes.prop_cycle'])
color = kwargs.pop('fc', None)
# Cycle through default colors to set face color, unless face color was set
# explicitly.
patches = [Polygon(df_shape_i[['x', 'y']].values, fc=props.next()['color']
if color is None else color, **kwargs)
for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns)]
collection = PatchCollection(patches)
axis.add_collection(collection)
xy_stats = df_shapes[['x', 'y']].describe()
if autoxlim:
axis.set_xlim(*xy_stats.x.loc[['min', 'max']])
if autoylim:
axis.set_ylim(*xy_stats.y.loc[['min', 'max']])
return axis | def function[plot_shapes, parameter[df_shapes, shape_i_columns, axis, autoxlim, autoylim]]:
constant[
Plot shapes from table/data-frame where each row corresponds to a vertex of
a shape. Shape vertices are grouped by `shape_i_columns`.
For example, consider the following dataframe:
shape_i vertex_i x y
0 0 0 81.679949 264.69306
1 0 1 81.679949 286.51788
2 0 2 102.87004 286.51788
3 0 3 102.87004 264.69306
4 1 0 103.11417 264.40011
5 1 1 103.11417 242.72177
6 1 2 81.435824 242.72177
7 1 3 81.435824 264.40011
8 2 0 124.84134 264.69306
9 2 1 103.65125 264.69306
10 2 2 103.65125 286.37141
11 2 3 124.84134 286.37141
This dataframe corresponds to three shapes, with (ordered) shape vertices
grouped by `shape_i`. Note that the column `vertex_i` is not required.
]
if compare[name[axis] is constant[None]] begin[:]
<ast.Tuple object at 0x7da20c76c2b0> assign[=] call[name[plt].subplots, parameter[]]
variable[props] assign[=] call[name[itertools].cycle, parameter[call[name[mpl].rcParams][constant[axes.prop_cycle]]]]
variable[color] assign[=] call[name[kwargs].pop, parameter[constant[fc], constant[None]]]
variable[patches] assign[=] <ast.ListComp object at 0x7da20c76ca00>
variable[collection] assign[=] call[name[PatchCollection], parameter[name[patches]]]
call[name[axis].add_collection, parameter[name[collection]]]
variable[xy_stats] assign[=] call[call[name[df_shapes]][list[[<ast.Constant object at 0x7da20c76ca90>, <ast.Constant object at 0x7da20c76ca60>]]].describe, parameter[]]
if name[autoxlim] begin[:]
call[name[axis].set_xlim, parameter[<ast.Starred object at 0x7da20c76c040>]]
if name[autoylim] begin[:]
call[name[axis].set_ylim, parameter[<ast.Starred object at 0x7da20c76dff0>]]
return[name[axis]] | keyword[def] identifier[plot_shapes] ( identifier[df_shapes] , identifier[shape_i_columns] , identifier[axis] = keyword[None] , identifier[autoxlim] = keyword[True] ,
identifier[autoylim] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[axis] keyword[is] keyword[None] :
identifier[fig] , identifier[axis] = identifier[plt] . identifier[subplots] ()
identifier[props] = identifier[itertools] . identifier[cycle] ( identifier[mpl] . identifier[rcParams] [ literal[string] ])
identifier[color] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[patches] =[ identifier[Polygon] ( identifier[df_shape_i] [[ literal[string] , literal[string] ]]. identifier[values] , identifier[fc] = identifier[props] . identifier[next] ()[ literal[string] ]
keyword[if] identifier[color] keyword[is] keyword[None] keyword[else] identifier[color] ,** identifier[kwargs] )
keyword[for] identifier[shape_i] , identifier[df_shape_i] keyword[in] identifier[df_shapes] . identifier[groupby] ( identifier[shape_i_columns] )]
identifier[collection] = identifier[PatchCollection] ( identifier[patches] )
identifier[axis] . identifier[add_collection] ( identifier[collection] )
identifier[xy_stats] = identifier[df_shapes] [[ literal[string] , literal[string] ]]. identifier[describe] ()
keyword[if] identifier[autoxlim] :
identifier[axis] . identifier[set_xlim] (* identifier[xy_stats] . identifier[x] . identifier[loc] [[ literal[string] , literal[string] ]])
keyword[if] identifier[autoylim] :
identifier[axis] . identifier[set_ylim] (* identifier[xy_stats] . identifier[y] . identifier[loc] [[ literal[string] , literal[string] ]])
keyword[return] identifier[axis] | def plot_shapes(df_shapes, shape_i_columns, axis=None, autoxlim=True, autoylim=True, **kwargs):
"""
Plot shapes from table/data-frame where each row corresponds to a vertex of
a shape. Shape vertices are grouped by `shape_i_columns`.
For example, consider the following dataframe:
shape_i vertex_i x y
0 0 0 81.679949 264.69306
1 0 1 81.679949 286.51788
2 0 2 102.87004 286.51788
3 0 3 102.87004 264.69306
4 1 0 103.11417 264.40011
5 1 1 103.11417 242.72177
6 1 2 81.435824 242.72177
7 1 3 81.435824 264.40011
8 2 0 124.84134 264.69306
9 2 1 103.65125 264.69306
10 2 2 103.65125 286.37141
11 2 3 124.84134 286.37141
This dataframe corresponds to three shapes, with (ordered) shape vertices
grouped by `shape_i`. Note that the column `vertex_i` is not required.
"""
if axis is None:
(fig, axis) = plt.subplots() # depends on [control=['if'], data=['axis']]
props = itertools.cycle(mpl.rcParams['axes.prop_cycle'])
color = kwargs.pop('fc', None)
# Cycle through default colors to set face color, unless face color was set
# explicitly.
patches = [Polygon(df_shape_i[['x', 'y']].values, fc=props.next()['color'] if color is None else color, **kwargs) for (shape_i, df_shape_i) in df_shapes.groupby(shape_i_columns)]
collection = PatchCollection(patches)
axis.add_collection(collection)
xy_stats = df_shapes[['x', 'y']].describe()
if autoxlim:
axis.set_xlim(*xy_stats.x.loc[['min', 'max']]) # depends on [control=['if'], data=[]]
if autoylim:
axis.set_ylim(*xy_stats.y.loc[['min', 'max']]) # depends on [control=['if'], data=[]]
return axis |
def sign_digest(sock, keygrip, digest, sp=subprocess, environ=None):
"""Sign a digest using specified key using GPG agent."""
hash_algo = 8 # SHA256
assert len(digest) == 32
assert communicate(sock, 'RESET').startswith(b'OK')
ttyname = check_output(args=['tty'], sp=sp).strip()
options = ['ttyname={}'.format(ttyname)] # set TTY for passphrase entry
display = (environ or os.environ).get('DISPLAY')
if display is not None:
options.append('display={}'.format(display))
for opt in options:
assert communicate(sock, 'OPTION {}'.format(opt)) == b'OK'
assert communicate(sock, 'SIGKEY {}'.format(keygrip)) == b'OK'
hex_digest = binascii.hexlify(digest).upper().decode('ascii')
assert communicate(sock, 'SETHASH {} {}'.format(hash_algo,
hex_digest)) == b'OK'
assert communicate(sock, 'SETKEYDESC '
'Sign+a+new+TREZOR-based+subkey') == b'OK'
assert communicate(sock, 'PKSIGN') == b'OK'
while True:
line = recvline(sock).strip()
if line.startswith(b'S PROGRESS'):
continue
else:
break
line = unescape(line)
log.debug('unescaped: %r', line)
prefix, sig = line.split(b' ', 1)
if prefix != b'D':
raise ValueError(prefix)
sig, leftover = parse(sig)
assert not leftover, leftover
return parse_sig(sig) | def function[sign_digest, parameter[sock, keygrip, digest, sp, environ]]:
constant[Sign a digest using specified key using GPG agent.]
variable[hash_algo] assign[=] constant[8]
assert[compare[call[name[len], parameter[name[digest]]] equal[==] constant[32]]]
assert[call[call[name[communicate], parameter[name[sock], constant[RESET]]].startswith, parameter[constant[b'OK']]]]
variable[ttyname] assign[=] call[call[name[check_output], parameter[]].strip, parameter[]]
variable[options] assign[=] list[[<ast.Call object at 0x7da1b12d8c70>]]
variable[display] assign[=] call[<ast.BoolOp object at 0x7da1b12d8e80>.get, parameter[constant[DISPLAY]]]
if compare[name[display] is_not constant[None]] begin[:]
call[name[options].append, parameter[call[constant[display={}].format, parameter[name[display]]]]]
for taget[name[opt]] in starred[name[options]] begin[:]
assert[compare[call[name[communicate], parameter[name[sock], call[constant[OPTION {}].format, parameter[name[opt]]]]] equal[==] constant[b'OK']]]
assert[compare[call[name[communicate], parameter[name[sock], call[constant[SIGKEY {}].format, parameter[name[keygrip]]]]] equal[==] constant[b'OK']]]
variable[hex_digest] assign[=] call[call[call[name[binascii].hexlify, parameter[name[digest]]].upper, parameter[]].decode, parameter[constant[ascii]]]
assert[compare[call[name[communicate], parameter[name[sock], call[constant[SETHASH {} {}].format, parameter[name[hash_algo], name[hex_digest]]]]] equal[==] constant[b'OK']]]
assert[compare[call[name[communicate], parameter[name[sock], constant[SETKEYDESC Sign+a+new+TREZOR-based+subkey]]] equal[==] constant[b'OK']]]
assert[compare[call[name[communicate], parameter[name[sock], constant[PKSIGN]]] equal[==] constant[b'OK']]]
while constant[True] begin[:]
variable[line] assign[=] call[call[name[recvline], parameter[name[sock]]].strip, parameter[]]
if call[name[line].startswith, parameter[constant[b'S PROGRESS']]] begin[:]
continue
variable[line] assign[=] call[name[unescape], parameter[name[line]]]
call[name[log].debug, parameter[constant[unescaped: %r], name[line]]]
<ast.Tuple object at 0x7da1b12d9390> assign[=] call[name[line].split, parameter[constant[b' '], constant[1]]]
if compare[name[prefix] not_equal[!=] constant[b'D']] begin[:]
<ast.Raise object at 0x7da1b12d9e10>
<ast.Tuple object at 0x7da1b12c2980> assign[=] call[name[parse], parameter[name[sig]]]
assert[<ast.UnaryOp object at 0x7da1b12c0850>]
return[call[name[parse_sig], parameter[name[sig]]]] | keyword[def] identifier[sign_digest] ( identifier[sock] , identifier[keygrip] , identifier[digest] , identifier[sp] = identifier[subprocess] , identifier[environ] = keyword[None] ):
literal[string]
identifier[hash_algo] = literal[int]
keyword[assert] identifier[len] ( identifier[digest] )== literal[int]
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string] ). identifier[startswith] ( literal[string] )
identifier[ttyname] = identifier[check_output] ( identifier[args] =[ literal[string] ], identifier[sp] = identifier[sp] ). identifier[strip] ()
identifier[options] =[ literal[string] . identifier[format] ( identifier[ttyname] )]
identifier[display] =( identifier[environ] keyword[or] identifier[os] . identifier[environ] ). identifier[get] ( literal[string] )
keyword[if] identifier[display] keyword[is] keyword[not] keyword[None] :
identifier[options] . identifier[append] ( literal[string] . identifier[format] ( identifier[display] ))
keyword[for] identifier[opt] keyword[in] identifier[options] :
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string] . identifier[format] ( identifier[opt] ))== literal[string]
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string] . identifier[format] ( identifier[keygrip] ))== literal[string]
identifier[hex_digest] = identifier[binascii] . identifier[hexlify] ( identifier[digest] ). identifier[upper] (). identifier[decode] ( literal[string] )
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string] . identifier[format] ( identifier[hash_algo] ,
identifier[hex_digest] ))== literal[string]
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string]
literal[string] )== literal[string]
keyword[assert] identifier[communicate] ( identifier[sock] , literal[string] )== literal[string]
keyword[while] keyword[True] :
identifier[line] = identifier[recvline] ( identifier[sock] ). identifier[strip] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[else] :
keyword[break]
identifier[line] = identifier[unescape] ( identifier[line] )
identifier[log] . identifier[debug] ( literal[string] , identifier[line] )
identifier[prefix] , identifier[sig] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[prefix] != literal[string] :
keyword[raise] identifier[ValueError] ( identifier[prefix] )
identifier[sig] , identifier[leftover] = identifier[parse] ( identifier[sig] )
keyword[assert] keyword[not] identifier[leftover] , identifier[leftover]
keyword[return] identifier[parse_sig] ( identifier[sig] ) | def sign_digest(sock, keygrip, digest, sp=subprocess, environ=None):
"""Sign a digest using specified key using GPG agent."""
hash_algo = 8 # SHA256
assert len(digest) == 32
assert communicate(sock, 'RESET').startswith(b'OK')
ttyname = check_output(args=['tty'], sp=sp).strip()
options = ['ttyname={}'.format(ttyname)] # set TTY for passphrase entry
display = (environ or os.environ).get('DISPLAY')
if display is not None:
options.append('display={}'.format(display)) # depends on [control=['if'], data=['display']]
for opt in options:
assert communicate(sock, 'OPTION {}'.format(opt)) == b'OK' # depends on [control=['for'], data=['opt']]
assert communicate(sock, 'SIGKEY {}'.format(keygrip)) == b'OK'
hex_digest = binascii.hexlify(digest).upper().decode('ascii')
assert communicate(sock, 'SETHASH {} {}'.format(hash_algo, hex_digest)) == b'OK'
assert communicate(sock, 'SETKEYDESC Sign+a+new+TREZOR-based+subkey') == b'OK'
assert communicate(sock, 'PKSIGN') == b'OK'
while True:
line = recvline(sock).strip()
if line.startswith(b'S PROGRESS'):
continue # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
line = unescape(line)
log.debug('unescaped: %r', line)
(prefix, sig) = line.split(b' ', 1)
if prefix != b'D':
raise ValueError(prefix) # depends on [control=['if'], data=['prefix']]
(sig, leftover) = parse(sig)
assert not leftover, leftover
return parse_sig(sig) |
def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params) | def function[get_product_order_book, parameter[self, product_id, level]]:
constant[Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b18bcc70>], [<ast.Name object at 0x7da1b18bee00>]]
return[call[name[self]._send_message, parameter[constant[get], call[constant[/products/{}/book].format, parameter[name[product_id]]]]]] | keyword[def] identifier[get_product_order_book] ( identifier[self] , identifier[product_id] , identifier[level] = literal[int] ):
literal[string]
identifier[params] ={ literal[string] : identifier[level] }
keyword[return] identifier[self] . identifier[_send_message] ( literal[string] ,
literal[string] . identifier[format] ( identifier[product_id] ),
identifier[params] = identifier[params] ) | def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get', '/products/{}/book'.format(product_id), params=params) |
def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ["."]:
if self.options.verbose:
utils.info("Checking {0} for metadata.".format(path))
tree = fmf.Tree(path)
for node in tree.prune(
self.options.whole, self.options.keys, self.options.names,
self.options.filters):
if brief:
show = node.show(brief=True)
else:
show = node.show(
brief=False,
formatting=self.options.formatting,
values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color("{0}\n".format(source), "blue")
if show is not None:
output.append(show)
# Print output and summary
if brief or self.options.formatting:
joined = "".join(output)
else:
joined = "\n".join(output)
try: # pragma: no cover
print(joined, end="")
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end="")
if self.options.verbose:
utils.info("Found {0}.".format(
utils.listed(len(output), "object")))
self.output = joined | def function[show, parameter[self, brief]]:
constant[ Show metadata for each path given ]
variable[output] assign[=] list[[]]
for taget[name[path]] in starred[<ast.BoolOp object at 0x7da18f58d4b0>] begin[:]
if name[self].options.verbose begin[:]
call[name[utils].info, parameter[call[constant[Checking {0} for metadata.].format, parameter[name[path]]]]]
variable[tree] assign[=] call[name[fmf].Tree, parameter[name[path]]]
for taget[name[node]] in starred[call[name[tree].prune, parameter[name[self].options.whole, name[self].options.keys, name[self].options.names, name[self].options.filters]]] begin[:]
if name[brief] begin[:]
variable[show] assign[=] call[name[node].show, parameter[]]
if name[self].options.debug begin[:]
for taget[name[source]] in starred[name[node].sources] begin[:]
<ast.AugAssign object at 0x7da18dc04400>
if compare[name[show] is_not constant[None]] begin[:]
call[name[output].append, parameter[name[show]]]
if <ast.BoolOp object at 0x7da18dc06b90> begin[:]
variable[joined] assign[=] call[constant[].join, parameter[name[output]]]
<ast.Try object at 0x7da18dc06680>
if name[self].options.verbose begin[:]
call[name[utils].info, parameter[call[constant[Found {0}.].format, parameter[call[name[utils].listed, parameter[call[name[len], parameter[name[output]]], constant[object]]]]]]]
name[self].output assign[=] name[joined] | keyword[def] identifier[show] ( identifier[self] , identifier[brief] = keyword[False] ):
literal[string]
identifier[output] =[]
keyword[for] identifier[path] keyword[in] identifier[self] . identifier[options] . identifier[paths] keyword[or] [ literal[string] ]:
keyword[if] identifier[self] . identifier[options] . identifier[verbose] :
identifier[utils] . identifier[info] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[tree] = identifier[fmf] . identifier[Tree] ( identifier[path] )
keyword[for] identifier[node] keyword[in] identifier[tree] . identifier[prune] (
identifier[self] . identifier[options] . identifier[whole] , identifier[self] . identifier[options] . identifier[keys] , identifier[self] . identifier[options] . identifier[names] ,
identifier[self] . identifier[options] . identifier[filters] ):
keyword[if] identifier[brief] :
identifier[show] = identifier[node] . identifier[show] ( identifier[brief] = keyword[True] )
keyword[else] :
identifier[show] = identifier[node] . identifier[show] (
identifier[brief] = keyword[False] ,
identifier[formatting] = identifier[self] . identifier[options] . identifier[formatting] ,
identifier[values] = identifier[self] . identifier[options] . identifier[values] )
keyword[if] identifier[self] . identifier[options] . identifier[debug] :
keyword[for] identifier[source] keyword[in] identifier[node] . identifier[sources] :
identifier[show] += identifier[utils] . identifier[color] ( literal[string] . identifier[format] ( identifier[source] ), literal[string] )
keyword[if] identifier[show] keyword[is] keyword[not] keyword[None] :
identifier[output] . identifier[append] ( identifier[show] )
keyword[if] identifier[brief] keyword[or] identifier[self] . identifier[options] . identifier[formatting] :
identifier[joined] = literal[string] . identifier[join] ( identifier[output] )
keyword[else] :
identifier[joined] = literal[string] . identifier[join] ( identifier[output] )
keyword[try] :
identifier[print] ( identifier[joined] , identifier[end] = literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[print] ( identifier[joined] . identifier[encode] ( literal[string] ), identifier[end] = literal[string] )
keyword[if] identifier[self] . identifier[options] . identifier[verbose] :
identifier[utils] . identifier[info] ( literal[string] . identifier[format] (
identifier[utils] . identifier[listed] ( identifier[len] ( identifier[output] ), literal[string] )))
identifier[self] . identifier[output] = identifier[joined] | def show(self, brief=False):
""" Show metadata for each path given """
output = []
for path in self.options.paths or ['.']:
if self.options.verbose:
utils.info('Checking {0} for metadata.'.format(path)) # depends on [control=['if'], data=[]]
tree = fmf.Tree(path)
for node in tree.prune(self.options.whole, self.options.keys, self.options.names, self.options.filters):
if brief:
show = node.show(brief=True) # depends on [control=['if'], data=[]]
else:
show = node.show(brief=False, formatting=self.options.formatting, values=self.options.values)
# List source files when in debug mode
if self.options.debug:
for source in node.sources:
show += utils.color('{0}\n'.format(source), 'blue') # depends on [control=['for'], data=['source']] # depends on [control=['if'], data=[]]
if show is not None:
output.append(show) # depends on [control=['if'], data=['show']] # depends on [control=['for'], data=['node']] # depends on [control=['for'], data=['path']]
# Print output and summary
if brief or self.options.formatting:
joined = ''.join(output) # depends on [control=['if'], data=[]]
else:
joined = '\n'.join(output)
try: # pragma: no cover
print(joined, end='') # depends on [control=['try'], data=[]]
except UnicodeEncodeError: # pragma: no cover
print(joined.encode('utf-8'), end='') # depends on [control=['except'], data=[]]
if self.options.verbose:
utils.info('Found {0}.'.format(utils.listed(len(output), 'object'))) # depends on [control=['if'], data=[]]
self.output = joined |
def to_property(obj, prop=None,
dtype=Ellipsis,
outliers=None, data_range=None, clipped=np.inf,
weights=None, weight_min=0, weight_transform=Ellipsis,
mask=None, valid_range=None, null=np.nan,
transform=None, yield_weight=False):
'''
to_property(obj, prop) yields the given property from obj after performing a set of filters on
the property, as specified by the options. In the property array that is returned, the values
that are considered outliers (data out of some range) are indicated by numpy.inf, and values
that are not in the optionally-specified mask are given the value numpy.nan; these may be
changed with the clipped and null options, respectively.
to_property((obj, prop)) is equivalent to to_property(obj, prop).
The property argument prop may be either specified as a string (a property name in the object)
or as a property vector. The weights option may also be specified this way. Additionally, the
prop arg may be a list such as ['polar_angle', 'eccentricity'] where each element is either a
string or a vector, in which case the result is a matrix of properties. Finally, prop may be
a set of property names, in which case the return value is an itable whose keys are the property
names.
The obj argument may be either a VertexSet object (such as a Mesh or Tesselation) or a mapping
object such as a pimms ITable. If no strings are used to specify properties, it may additionally
be omitted or set to None.
The following options are accepted:
* outliers (default:None) specifies the vertices that should be considered outliers; this
may be either None (no outliers explicitly specified), a list of indices, or a boolean
mask.
* data_range (default:None) specifies the acceptable data range for values in the
property; if None then this paramter is ignored. If specified as a pair of numbers
(min, max), then data that is less than the min or greater than the max is marked as an
outlier (in addition to other explicitly specified outliers). The values np.inf or
-np.inf can be specified to indicate a one-sided range.
* clipped (default:np.inf) specifies the value to be used to mark an out-of-range value in
the returned array.
* mask (default:None) specifies the vertices that should be included in the property
array; values are specified in the mask similarly to the outliers option, except that
mask values are included rather than excluded. The mask takes precedence over the
outliers, in that a null (out-of-mask) value is always marked as null rather than
clipped.
* valid_range (default: None) specifies the range of values that are considered valid;
i.e., values outside of the range are marked as null. Specified the same way as
data_range.
* null (default: np.nan) specifies the value marked in the array as out-of-mask.
* transform (default:None) may optionally provide a function to be passed the array prior
to being returned (after null and clipped values are marked).
* dtype (defaut:Ellipsis) specifies the type of the array that should be returned.
Ellipsis indicates that the type of the given property should be used. If None, then a
normal Python array is returned. Otherwise, should be a numpy type such as numpy.real64
or numpy.complex128.
* weights (default:Ellipsis) specifies the property or property array that should be
examined as the weights. The default, Ellipsis, simply chops values that are close to or
less than 0 such that they are equal to 0. None specifies that no transformation should
be applied.
* weight_min (default:0) specifies the value at-or-below which the weight is considered
insignificant and the value is marked as clipped.
* weight_transform (default:None) specifies a function that should be applied to the
weight array before being used in the function.
* yield_weight (default:False) specifies, if True, that instead of yielding prop, yield
the tuple (prop, weights).
'''
# was an arg given, or is obj a tuple?
if pimms.is_vector(obj) and len(obj) < 4 and prop is None:
kw0 = dict(dtype=dtype, null=null,
outliers=outliers, data_range=data_range,
clipped=clipped, weights=weights,
weight_min=weight_min, weight_transform=weight_transform,
mask=mask, valid_range=valid_range,
transform=transform, yield_weight=yield_weight)
if len(obj) == 2: return to_property(obj[0], obj[1], **kw0)
elif len(obj) == 3: return to_property(obj[0], obj[1], **pimms.merge(kw0, obj[2]))
else: raise ValueError('Bad input vector given to to_property()')
# we could have been given a property alone or a map/vertex-set and a property
if prop is None: raise ValueError('No property given to to_property()')
# if it's a vertex-set, we want to note that and get the map
if isinstance(obj, VertexSet): (vset, obj) = (obj, obj.properties)
elif pimms.is_map(obj): (vset, obj) = (None, obj)
elif obj is None: (vset, obj) = (None, None)
else: ValueError('Data object given to to_properties() is neither a vertex-set nor a mapping')
# Now, get the property array, as an array
if pimms.is_str(prop):
if obj is None: raise ValueError('a property name but no data object given to to_property')
else: prop = obj[prop]
if is_set(prop):
def _lazy_prop(kk):
return lambda:to_property(obj, kk,
dtype=dtype, null=null,
outliers=outliers, data_range=data_range,
clipped=clipped, weights=weights,
weight_min=weight_min, weight_transform=weight_transform,
mask=mask, valid_range=valid_range,
transform=transform, yield_weight=yield_weight)
return pimms.itable({k:_lazy_prop(k) for k in prop})
elif (pimms.is_matrix(prop) or
(pimms.is_vector(prop) and all(pimms.is_str(p) or pimms.is_vector(p) for p in prop))):
return np.asarray([to_property(obj, k,
dtype=dtype, null=null,
outliers=outliers, data_range=data_range,
clipped=clipped, weights=weights,
weight_min=weight_min, weight_transform=weight_transform,
mask=mask, valid_range=valid_range,
transform=transform, yield_weight=yield_weight)
for k in prop])
elif not pimms.is_vector(prop):
raise ValueError('prop must be a property name or a vector or a combination of these')
else: prop = np.asarray(prop)
if dtype is Ellipsis: dtype = prop.dtype
# Go ahead and process the weights
if pimms.is_str(weights):
if obj is None: raise ValueError('a weight name but no data object given to to_property')
else: weights = obj[weights]
weights_orig = weights
if weights is None or weight_min is None: low_weight = np.asarray([], dtype=np.int)
else:
if weight_transform is Ellipsis:
weights = np.array(weights, dtype=np.float)
weights[weights < 0] = 0
elif weight_transform is not None:
weight = weight_transform(np.asarray(weights))
if not pimms.is_vector(weights, 'real'):
raise ValueError('weights must be a real-valued vector or property name for such')
low_weight = (np.asarray([], dtype=np.int) if weight_min is None else
np.where(weights < weight_min)[0])
# we can also process the outliers
outliers = np.asarray([], dtype=np.int) if outliers is None else np.arange(len(prop))[outliers]
outliers = np.union1d(outliers, low_weight) # low-weight vertices are treated as outliers
# make sure we interpret mask correctly...
mask = to_mask(obj, mask, indices=True)
# Now process the property depending on whether the type is numeric or not
if pimms.is_array(prop, 'number'):
if pimms.is_array(prop, 'int'): prop = np.array(prop, dtype=np.float)
else: prop = np.array(prop) # complex or reals can support nan
if not np.isnan(null): prop[prop == null] = np.nan
mask_nan = np.isnan(prop)
mask_inf = np.isinf(prop)
where_nan = np.where(mask_nan)[0]
where_inf = np.where(mask_inf)[0]
where_ok = np.where(np.logical_not(mask_nan | mask_inf))[0]
# look at the valid_range...
if valid_range is None: where_inv = np.asarray([], dtype=np.int)
else: where_inv = where_ok[(prop[where_ok] < valid_range[0]) |
(prop[where_ok] > valid_range[1])]
where_nan = np.union1d(where_nan, where_inv)
mask = np.setdiff1d(mask, where_nan)
# Find the outliers: values specified as outliers or inf values; will build this as we go
outliers = np.intersect1d(outliers, mask) # outliers not in the mask don't matter anyway
# If there's a data range argument, deal with how it affects outliers
if data_range is not None:
if not pimms.is_vector(data_range): data_range = (0, data_range)
mii = mask[(prop[mask] < data_range[0]) | (prop[mask] > data_range[1])]
outliers = np.union1d(outliers, mii)
# no matter what, trim out the infinite values (even if inf was in the data range)
outliers = np.union1d(outliers, mask[np.isinf(prop[mask])])
# Okay, mark everything in the prop:
unmask = np.setdiff1d(np.arange(len(prop), dtype=np.int), mask)
if len(outliers) > 0: prop[outliers] = clipped
if len(unmask) > 0: prop[unmask] = null
prop = prop.astype(dtype)
elif len(mask) < len(prop) or len(outliers) > 0:
# not a number array; we cannot do fancy trimming of values
tmp = np.full(len(prop), null, dtype=dtype)
tmp[mask] = prop[mask]
if len(outliers) > 0: tmp[outliers] = clipped
if yield_weight:
if weights is None or not pimms.is_vector(weights): weights = np.ones(len(prop))
else: weights = np.array(weights, dtype=np.float)
weights[where_nan] = 0
weights[outliers] = 0
# transform?
if transform: prop = transform(prop)
# That's it, just return
return (prop, weights) if yield_weight else prop | def function[to_property, parameter[obj, prop, dtype, outliers, data_range, clipped, weights, weight_min, weight_transform, mask, valid_range, null, transform, yield_weight]]:
constant[
to_property(obj, prop) yields the given property from obj after performing a set of filters on
the property, as specified by the options. In the property array that is returned, the values
that are considered outliers (data out of some range) are indicated by numpy.inf, and values
that are not in the optionally-specified mask are given the value numpy.nan; these may be
changed with the clipped and null options, respectively.
to_property((obj, prop)) is equivalent to to_property(obj, prop).
The property argument prop may be either specified as a string (a property name in the object)
or as a property vector. The weights option may also be specified this way. Additionally, the
prop arg may be a list such as ['polar_angle', 'eccentricity'] where each element is either a
string or a vector, in which case the result is a matrix of properties. Finally, prop may be
a set of property names, in which case the return value is an itable whose keys are the property
names.
The obj argument may be either a VertexSet object (such as a Mesh or Tesselation) or a mapping
object such as a pimms ITable. If no strings are used to specify properties, it may additionally
be omitted or set to None.
The following options are accepted:
* outliers (default:None) specifies the vertices that should be considered outliers; this
may be either None (no outliers explicitly specified), a list of indices, or a boolean
mask.
* data_range (default:None) specifies the acceptable data range for values in the
property; if None then this paramter is ignored. If specified as a pair of numbers
(min, max), then data that is less than the min or greater than the max is marked as an
outlier (in addition to other explicitly specified outliers). The values np.inf or
-np.inf can be specified to indicate a one-sided range.
* clipped (default:np.inf) specifies the value to be used to mark an out-of-range value in
the returned array.
* mask (default:None) specifies the vertices that should be included in the property
array; values are specified in the mask similarly to the outliers option, except that
mask values are included rather than excluded. The mask takes precedence over the
outliers, in that a null (out-of-mask) value is always marked as null rather than
clipped.
* valid_range (default: None) specifies the range of values that are considered valid;
i.e., values outside of the range are marked as null. Specified the same way as
data_range.
* null (default: np.nan) specifies the value marked in the array as out-of-mask.
* transform (default:None) may optionally provide a function to be passed the array prior
to being returned (after null and clipped values are marked).
* dtype (defaut:Ellipsis) specifies the type of the array that should be returned.
Ellipsis indicates that the type of the given property should be used. If None, then a
normal Python array is returned. Otherwise, should be a numpy type such as numpy.real64
or numpy.complex128.
* weights (default:Ellipsis) specifies the property or property array that should be
examined as the weights. The default, Ellipsis, simply chops values that are close to or
less than 0 such that they are equal to 0. None specifies that no transformation should
be applied.
* weight_min (default:0) specifies the value at-or-below which the weight is considered
insignificant and the value is marked as clipped.
* weight_transform (default:None) specifies a function that should be applied to the
weight array before being used in the function.
* yield_weight (default:False) specifies, if True, that instead of yielding prop, yield
the tuple (prop, weights).
]
if <ast.BoolOp object at 0x7da18dc07e80> begin[:]
variable[kw0] assign[=] call[name[dict], parameter[]]
if compare[call[name[len], parameter[name[obj]]] equal[==] constant[2]] begin[:]
return[call[name[to_property], parameter[call[name[obj]][constant[0]], call[name[obj]][constant[1]]]]]
if compare[name[prop] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc055a0>
if call[name[isinstance], parameter[name[obj], name[VertexSet]]] begin[:]
<ast.Tuple object at 0x7da18dc05db0> assign[=] tuple[[<ast.Name object at 0x7da18dc05720>, <ast.Attribute object at 0x7da18dc05450>]]
if call[name[pimms].is_str, parameter[name[prop]]] begin[:]
if compare[name[obj] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc06d40>
if call[name[is_set], parameter[name[prop]]] begin[:]
def function[_lazy_prop, parameter[kk]]:
return[<ast.Lambda object at 0x7da18dc05780>]
return[call[name[pimms].itable, parameter[<ast.DictComp object at 0x7da18dc05ae0>]]]
if compare[name[dtype] is name[Ellipsis]] begin[:]
variable[dtype] assign[=] name[prop].dtype
if call[name[pimms].is_str, parameter[name[weights]]] begin[:]
if compare[name[obj] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede46d0>
variable[weights_orig] assign[=] name[weights]
if <ast.BoolOp object at 0x7da18ede7100> begin[:]
variable[low_weight] assign[=] call[name[np].asarray, parameter[list[[]]]]
variable[outliers] assign[=] <ast.IfExp object at 0x7da18ede5e40>
variable[outliers] assign[=] call[name[np].union1d, parameter[name[outliers], name[low_weight]]]
variable[mask] assign[=] call[name[to_mask], parameter[name[obj], name[mask]]]
if call[name[pimms].is_array, parameter[name[prop], constant[number]]] begin[:]
if call[name[pimms].is_array, parameter[name[prop], constant[int]]] begin[:]
variable[prop] assign[=] call[name[np].array, parameter[name[prop]]]
if <ast.UnaryOp object at 0x7da18ede6710> begin[:]
call[name[prop]][compare[name[prop] equal[==] name[null]]] assign[=] name[np].nan
variable[mask_nan] assign[=] call[name[np].isnan, parameter[name[prop]]]
variable[mask_inf] assign[=] call[name[np].isinf, parameter[name[prop]]]
variable[where_nan] assign[=] call[call[name[np].where, parameter[name[mask_nan]]]][constant[0]]
variable[where_inf] assign[=] call[call[name[np].where, parameter[name[mask_inf]]]][constant[0]]
variable[where_ok] assign[=] call[call[name[np].where, parameter[call[name[np].logical_not, parameter[binary_operation[name[mask_nan] <ast.BitOr object at 0x7da2590d6aa0> name[mask_inf]]]]]]][constant[0]]
if compare[name[valid_range] is constant[None]] begin[:]
variable[where_inv] assign[=] call[name[np].asarray, parameter[list[[]]]]
variable[where_nan] assign[=] call[name[np].union1d, parameter[name[where_nan], name[where_inv]]]
variable[mask] assign[=] call[name[np].setdiff1d, parameter[name[mask], name[where_nan]]]
variable[outliers] assign[=] call[name[np].intersect1d, parameter[name[outliers], name[mask]]]
if compare[name[data_range] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da207f98ac0> begin[:]
variable[data_range] assign[=] tuple[[<ast.Constant object at 0x7da207f99a20>, <ast.Name object at 0x7da207f9a0b0>]]
variable[mii] assign[=] call[name[mask]][binary_operation[compare[call[name[prop]][name[mask]] less[<] call[name[data_range]][constant[0]]] <ast.BitOr object at 0x7da2590d6aa0> compare[call[name[prop]][name[mask]] greater[>] call[name[data_range]][constant[1]]]]]
variable[outliers] assign[=] call[name[np].union1d, parameter[name[outliers], name[mii]]]
variable[outliers] assign[=] call[name[np].union1d, parameter[name[outliers], call[name[mask]][call[name[np].isinf, parameter[call[name[prop]][name[mask]]]]]]]
variable[unmask] assign[=] call[name[np].setdiff1d, parameter[call[name[np].arange, parameter[call[name[len], parameter[name[prop]]]]], name[mask]]]
if compare[call[name[len], parameter[name[outliers]]] greater[>] constant[0]] begin[:]
call[name[prop]][name[outliers]] assign[=] name[clipped]
if compare[call[name[len], parameter[name[unmask]]] greater[>] constant[0]] begin[:]
call[name[prop]][name[unmask]] assign[=] name[null]
variable[prop] assign[=] call[name[prop].astype, parameter[name[dtype]]]
if name[yield_weight] begin[:]
if <ast.BoolOp object at 0x7da207f99c90> begin[:]
variable[weights] assign[=] call[name[np].ones, parameter[call[name[len], parameter[name[prop]]]]]
call[name[weights]][name[where_nan]] assign[=] constant[0]
call[name[weights]][name[outliers]] assign[=] constant[0]
if name[transform] begin[:]
variable[prop] assign[=] call[name[transform], parameter[name[prop]]]
return[<ast.IfExp object at 0x7da207f9a0e0>] | keyword[def] identifier[to_property] ( identifier[obj] , identifier[prop] = keyword[None] ,
identifier[dtype] = identifier[Ellipsis] ,
identifier[outliers] = keyword[None] , identifier[data_range] = keyword[None] , identifier[clipped] = identifier[np] . identifier[inf] ,
identifier[weights] = keyword[None] , identifier[weight_min] = literal[int] , identifier[weight_transform] = identifier[Ellipsis] ,
identifier[mask] = keyword[None] , identifier[valid_range] = keyword[None] , identifier[null] = identifier[np] . identifier[nan] ,
identifier[transform] = keyword[None] , identifier[yield_weight] = keyword[False] ):
literal[string]
keyword[if] identifier[pimms] . identifier[is_vector] ( identifier[obj] ) keyword[and] identifier[len] ( identifier[obj] )< literal[int] keyword[and] identifier[prop] keyword[is] keyword[None] :
identifier[kw0] = identifier[dict] ( identifier[dtype] = identifier[dtype] , identifier[null] = identifier[null] ,
identifier[outliers] = identifier[outliers] , identifier[data_range] = identifier[data_range] ,
identifier[clipped] = identifier[clipped] , identifier[weights] = identifier[weights] ,
identifier[weight_min] = identifier[weight_min] , identifier[weight_transform] = identifier[weight_transform] ,
identifier[mask] = identifier[mask] , identifier[valid_range] = identifier[valid_range] ,
identifier[transform] = identifier[transform] , identifier[yield_weight] = identifier[yield_weight] )
keyword[if] identifier[len] ( identifier[obj] )== literal[int] : keyword[return] identifier[to_property] ( identifier[obj] [ literal[int] ], identifier[obj] [ literal[int] ],** identifier[kw0] )
keyword[elif] identifier[len] ( identifier[obj] )== literal[int] : keyword[return] identifier[to_property] ( identifier[obj] [ literal[int] ], identifier[obj] [ literal[int] ],** identifier[pimms] . identifier[merge] ( identifier[kw0] , identifier[obj] [ literal[int] ]))
keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[prop] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[VertexSet] ):( identifier[vset] , identifier[obj] )=( identifier[obj] , identifier[obj] . identifier[properties] )
keyword[elif] identifier[pimms] . identifier[is_map] ( identifier[obj] ):( identifier[vset] , identifier[obj] )=( keyword[None] , identifier[obj] )
keyword[elif] identifier[obj] keyword[is] keyword[None] :( identifier[vset] , identifier[obj] )=( keyword[None] , keyword[None] )
keyword[else] : identifier[ValueError] ( literal[string] )
keyword[if] identifier[pimms] . identifier[is_str] ( identifier[prop] ):
keyword[if] identifier[obj] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] : identifier[prop] = identifier[obj] [ identifier[prop] ]
keyword[if] identifier[is_set] ( identifier[prop] ):
keyword[def] identifier[_lazy_prop] ( identifier[kk] ):
keyword[return] keyword[lambda] : identifier[to_property] ( identifier[obj] , identifier[kk] ,
identifier[dtype] = identifier[dtype] , identifier[null] = identifier[null] ,
identifier[outliers] = identifier[outliers] , identifier[data_range] = identifier[data_range] ,
identifier[clipped] = identifier[clipped] , identifier[weights] = identifier[weights] ,
identifier[weight_min] = identifier[weight_min] , identifier[weight_transform] = identifier[weight_transform] ,
identifier[mask] = identifier[mask] , identifier[valid_range] = identifier[valid_range] ,
identifier[transform] = identifier[transform] , identifier[yield_weight] = identifier[yield_weight] )
keyword[return] identifier[pimms] . identifier[itable] ({ identifier[k] : identifier[_lazy_prop] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[prop] })
keyword[elif] ( identifier[pimms] . identifier[is_matrix] ( identifier[prop] ) keyword[or]
( identifier[pimms] . identifier[is_vector] ( identifier[prop] ) keyword[and] identifier[all] ( identifier[pimms] . identifier[is_str] ( identifier[p] ) keyword[or] identifier[pimms] . identifier[is_vector] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[prop] ))):
keyword[return] identifier[np] . identifier[asarray] ([ identifier[to_property] ( identifier[obj] , identifier[k] ,
identifier[dtype] = identifier[dtype] , identifier[null] = identifier[null] ,
identifier[outliers] = identifier[outliers] , identifier[data_range] = identifier[data_range] ,
identifier[clipped] = identifier[clipped] , identifier[weights] = identifier[weights] ,
identifier[weight_min] = identifier[weight_min] , identifier[weight_transform] = identifier[weight_transform] ,
identifier[mask] = identifier[mask] , identifier[valid_range] = identifier[valid_range] ,
identifier[transform] = identifier[transform] , identifier[yield_weight] = identifier[yield_weight] )
keyword[for] identifier[k] keyword[in] identifier[prop] ])
keyword[elif] keyword[not] identifier[pimms] . identifier[is_vector] ( identifier[prop] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] : identifier[prop] = identifier[np] . identifier[asarray] ( identifier[prop] )
keyword[if] identifier[dtype] keyword[is] identifier[Ellipsis] : identifier[dtype] = identifier[prop] . identifier[dtype]
keyword[if] identifier[pimms] . identifier[is_str] ( identifier[weights] ):
keyword[if] identifier[obj] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] : identifier[weights] = identifier[obj] [ identifier[weights] ]
identifier[weights_orig] = identifier[weights]
keyword[if] identifier[weights] keyword[is] keyword[None] keyword[or] identifier[weight_min] keyword[is] keyword[None] : identifier[low_weight] = identifier[np] . identifier[asarray] ([], identifier[dtype] = identifier[np] . identifier[int] )
keyword[else] :
keyword[if] identifier[weight_transform] keyword[is] identifier[Ellipsis] :
identifier[weights] = identifier[np] . identifier[array] ( identifier[weights] , identifier[dtype] = identifier[np] . identifier[float] )
identifier[weights] [ identifier[weights] < literal[int] ]= literal[int]
keyword[elif] identifier[weight_transform] keyword[is] keyword[not] keyword[None] :
identifier[weight] = identifier[weight_transform] ( identifier[np] . identifier[asarray] ( identifier[weights] ))
keyword[if] keyword[not] identifier[pimms] . identifier[is_vector] ( identifier[weights] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[low_weight] =( identifier[np] . identifier[asarray] ([], identifier[dtype] = identifier[np] . identifier[int] ) keyword[if] identifier[weight_min] keyword[is] keyword[None] keyword[else]
identifier[np] . identifier[where] ( identifier[weights] < identifier[weight_min] )[ literal[int] ])
identifier[outliers] = identifier[np] . identifier[asarray] ([], identifier[dtype] = identifier[np] . identifier[int] ) keyword[if] identifier[outliers] keyword[is] keyword[None] keyword[else] identifier[np] . identifier[arange] ( identifier[len] ( identifier[prop] ))[ identifier[outliers] ]
identifier[outliers] = identifier[np] . identifier[union1d] ( identifier[outliers] , identifier[low_weight] )
identifier[mask] = identifier[to_mask] ( identifier[obj] , identifier[mask] , identifier[indices] = keyword[True] )
keyword[if] identifier[pimms] . identifier[is_array] ( identifier[prop] , literal[string] ):
keyword[if] identifier[pimms] . identifier[is_array] ( identifier[prop] , literal[string] ): identifier[prop] = identifier[np] . identifier[array] ( identifier[prop] , identifier[dtype] = identifier[np] . identifier[float] )
keyword[else] : identifier[prop] = identifier[np] . identifier[array] ( identifier[prop] )
keyword[if] keyword[not] identifier[np] . identifier[isnan] ( identifier[null] ): identifier[prop] [ identifier[prop] == identifier[null] ]= identifier[np] . identifier[nan]
identifier[mask_nan] = identifier[np] . identifier[isnan] ( identifier[prop] )
identifier[mask_inf] = identifier[np] . identifier[isinf] ( identifier[prop] )
identifier[where_nan] = identifier[np] . identifier[where] ( identifier[mask_nan] )[ literal[int] ]
identifier[where_inf] = identifier[np] . identifier[where] ( identifier[mask_inf] )[ literal[int] ]
identifier[where_ok] = identifier[np] . identifier[where] ( identifier[np] . identifier[logical_not] ( identifier[mask_nan] | identifier[mask_inf] ))[ literal[int] ]
keyword[if] identifier[valid_range] keyword[is] keyword[None] : identifier[where_inv] = identifier[np] . identifier[asarray] ([], identifier[dtype] = identifier[np] . identifier[int] )
keyword[else] : identifier[where_inv] = identifier[where_ok] [( identifier[prop] [ identifier[where_ok] ]< identifier[valid_range] [ literal[int] ])|
( identifier[prop] [ identifier[where_ok] ]> identifier[valid_range] [ literal[int] ])]
identifier[where_nan] = identifier[np] . identifier[union1d] ( identifier[where_nan] , identifier[where_inv] )
identifier[mask] = identifier[np] . identifier[setdiff1d] ( identifier[mask] , identifier[where_nan] )
identifier[outliers] = identifier[np] . identifier[intersect1d] ( identifier[outliers] , identifier[mask] )
keyword[if] identifier[data_range] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[pimms] . identifier[is_vector] ( identifier[data_range] ): identifier[data_range] =( literal[int] , identifier[data_range] )
identifier[mii] = identifier[mask] [( identifier[prop] [ identifier[mask] ]< identifier[data_range] [ literal[int] ])|( identifier[prop] [ identifier[mask] ]> identifier[data_range] [ literal[int] ])]
identifier[outliers] = identifier[np] . identifier[union1d] ( identifier[outliers] , identifier[mii] )
identifier[outliers] = identifier[np] . identifier[union1d] ( identifier[outliers] , identifier[mask] [ identifier[np] . identifier[isinf] ( identifier[prop] [ identifier[mask] ])])
identifier[unmask] = identifier[np] . identifier[setdiff1d] ( identifier[np] . identifier[arange] ( identifier[len] ( identifier[prop] ), identifier[dtype] = identifier[np] . identifier[int] ), identifier[mask] )
keyword[if] identifier[len] ( identifier[outliers] )> literal[int] : identifier[prop] [ identifier[outliers] ]= identifier[clipped]
keyword[if] identifier[len] ( identifier[unmask] )> literal[int] : identifier[prop] [ identifier[unmask] ]= identifier[null]
identifier[prop] = identifier[prop] . identifier[astype] ( identifier[dtype] )
keyword[elif] identifier[len] ( identifier[mask] )< identifier[len] ( identifier[prop] ) keyword[or] identifier[len] ( identifier[outliers] )> literal[int] :
identifier[tmp] = identifier[np] . identifier[full] ( identifier[len] ( identifier[prop] ), identifier[null] , identifier[dtype] = identifier[dtype] )
identifier[tmp] [ identifier[mask] ]= identifier[prop] [ identifier[mask] ]
keyword[if] identifier[len] ( identifier[outliers] )> literal[int] : identifier[tmp] [ identifier[outliers] ]= identifier[clipped]
keyword[if] identifier[yield_weight] :
keyword[if] identifier[weights] keyword[is] keyword[None] keyword[or] keyword[not] identifier[pimms] . identifier[is_vector] ( identifier[weights] ): identifier[weights] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[prop] ))
keyword[else] : identifier[weights] = identifier[np] . identifier[array] ( identifier[weights] , identifier[dtype] = identifier[np] . identifier[float] )
identifier[weights] [ identifier[where_nan] ]= literal[int]
identifier[weights] [ identifier[outliers] ]= literal[int]
keyword[if] identifier[transform] : identifier[prop] = identifier[transform] ( identifier[prop] )
keyword[return] ( identifier[prop] , identifier[weights] ) keyword[if] identifier[yield_weight] keyword[else] identifier[prop] | def to_property(obj, prop=None, dtype=Ellipsis, outliers=None, data_range=None, clipped=np.inf, weights=None, weight_min=0, weight_transform=Ellipsis, mask=None, valid_range=None, null=np.nan, transform=None, yield_weight=False):
"""
to_property(obj, prop) yields the given property from obj after performing a set of filters on
the property, as specified by the options. In the property array that is returned, the values
that are considered outliers (data out of some range) are indicated by numpy.inf, and values
that are not in the optionally-specified mask are given the value numpy.nan; these may be
changed with the clipped and null options, respectively.
to_property((obj, prop)) is equivalent to to_property(obj, prop).
The property argument prop may be either specified as a string (a property name in the object)
or as a property vector. The weights option may also be specified this way. Additionally, the
prop arg may be a list such as ['polar_angle', 'eccentricity'] where each element is either a
string or a vector, in which case the result is a matrix of properties. Finally, prop may be
a set of property names, in which case the return value is an itable whose keys are the property
names.
The obj argument may be either a VertexSet object (such as a Mesh or Tesselation) or a mapping
object such as a pimms ITable. If no strings are used to specify properties, it may additionally
be omitted or set to None.
The following options are accepted:
* outliers (default:None) specifies the vertices that should be considered outliers; this
may be either None (no outliers explicitly specified), a list of indices, or a boolean
mask.
* data_range (default:None) specifies the acceptable data range for values in the
property; if None then this paramter is ignored. If specified as a pair of numbers
(min, max), then data that is less than the min or greater than the max is marked as an
outlier (in addition to other explicitly specified outliers). The values np.inf or
-np.inf can be specified to indicate a one-sided range.
* clipped (default:np.inf) specifies the value to be used to mark an out-of-range value in
the returned array.
* mask (default:None) specifies the vertices that should be included in the property
array; values are specified in the mask similarly to the outliers option, except that
mask values are included rather than excluded. The mask takes precedence over the
outliers, in that a null (out-of-mask) value is always marked as null rather than
clipped.
* valid_range (default: None) specifies the range of values that are considered valid;
i.e., values outside of the range are marked as null. Specified the same way as
data_range.
* null (default: np.nan) specifies the value marked in the array as out-of-mask.
* transform (default:None) may optionally provide a function to be passed the array prior
to being returned (after null and clipped values are marked).
* dtype (defaut:Ellipsis) specifies the type of the array that should be returned.
Ellipsis indicates that the type of the given property should be used. If None, then a
normal Python array is returned. Otherwise, should be a numpy type such as numpy.real64
or numpy.complex128.
* weights (default:Ellipsis) specifies the property or property array that should be
examined as the weights. The default, Ellipsis, simply chops values that are close to or
less than 0 such that they are equal to 0. None specifies that no transformation should
be applied.
* weight_min (default:0) specifies the value at-or-below which the weight is considered
insignificant and the value is marked as clipped.
* weight_transform (default:None) specifies a function that should be applied to the
weight array before being used in the function.
* yield_weight (default:False) specifies, if True, that instead of yielding prop, yield
the tuple (prop, weights).
"""
# was an arg given, or is obj a tuple?
if pimms.is_vector(obj) and len(obj) < 4 and (prop is None):
kw0 = dict(dtype=dtype, null=null, outliers=outliers, data_range=data_range, clipped=clipped, weights=weights, weight_min=weight_min, weight_transform=weight_transform, mask=mask, valid_range=valid_range, transform=transform, yield_weight=yield_weight)
if len(obj) == 2:
return to_property(obj[0], obj[1], **kw0) # depends on [control=['if'], data=[]]
elif len(obj) == 3:
return to_property(obj[0], obj[1], **pimms.merge(kw0, obj[2])) # depends on [control=['if'], data=[]]
else:
raise ValueError('Bad input vector given to to_property()') # depends on [control=['if'], data=[]]
# we could have been given a property alone or a map/vertex-set and a property
if prop is None:
raise ValueError('No property given to to_property()') # depends on [control=['if'], data=[]]
# if it's a vertex-set, we want to note that and get the map
if isinstance(obj, VertexSet):
(vset, obj) = (obj, obj.properties) # depends on [control=['if'], data=[]]
elif pimms.is_map(obj):
(vset, obj) = (None, obj) # depends on [control=['if'], data=[]]
elif obj is None:
(vset, obj) = (None, None) # depends on [control=['if'], data=['obj']]
else:
ValueError('Data object given to to_properties() is neither a vertex-set nor a mapping')
# Now, get the property array, as an array
if pimms.is_str(prop):
if obj is None:
raise ValueError('a property name but no data object given to to_property') # depends on [control=['if'], data=[]]
else:
prop = obj[prop] # depends on [control=['if'], data=[]]
if is_set(prop):
def _lazy_prop(kk):
return lambda : to_property(obj, kk, dtype=dtype, null=null, outliers=outliers, data_range=data_range, clipped=clipped, weights=weights, weight_min=weight_min, weight_transform=weight_transform, mask=mask, valid_range=valid_range, transform=transform, yield_weight=yield_weight)
return pimms.itable({k: _lazy_prop(k) for k in prop}) # depends on [control=['if'], data=[]]
elif pimms.is_matrix(prop) or (pimms.is_vector(prop) and all((pimms.is_str(p) or pimms.is_vector(p) for p in prop))):
return np.asarray([to_property(obj, k, dtype=dtype, null=null, outliers=outliers, data_range=data_range, clipped=clipped, weights=weights, weight_min=weight_min, weight_transform=weight_transform, mask=mask, valid_range=valid_range, transform=transform, yield_weight=yield_weight) for k in prop]) # depends on [control=['if'], data=[]]
elif not pimms.is_vector(prop):
raise ValueError('prop must be a property name or a vector or a combination of these') # depends on [control=['if'], data=[]]
else:
prop = np.asarray(prop)
if dtype is Ellipsis:
dtype = prop.dtype # depends on [control=['if'], data=['dtype']]
# Go ahead and process the weights
if pimms.is_str(weights):
if obj is None:
raise ValueError('a weight name but no data object given to to_property') # depends on [control=['if'], data=[]]
else:
weights = obj[weights] # depends on [control=['if'], data=[]]
weights_orig = weights
if weights is None or weight_min is None:
low_weight = np.asarray([], dtype=np.int) # depends on [control=['if'], data=[]]
else:
if weight_transform is Ellipsis:
weights = np.array(weights, dtype=np.float)
weights[weights < 0] = 0 # depends on [control=['if'], data=[]]
elif weight_transform is not None:
weight = weight_transform(np.asarray(weights)) # depends on [control=['if'], data=['weight_transform']]
if not pimms.is_vector(weights, 'real'):
raise ValueError('weights must be a real-valued vector or property name for such') # depends on [control=['if'], data=[]]
low_weight = np.asarray([], dtype=np.int) if weight_min is None else np.where(weights < weight_min)[0]
# we can also process the outliers
outliers = np.asarray([], dtype=np.int) if outliers is None else np.arange(len(prop))[outliers]
outliers = np.union1d(outliers, low_weight) # low-weight vertices are treated as outliers
# make sure we interpret mask correctly...
mask = to_mask(obj, mask, indices=True)
# Now process the property depending on whether the type is numeric or not
if pimms.is_array(prop, 'number'):
if pimms.is_array(prop, 'int'):
prop = np.array(prop, dtype=np.float) # depends on [control=['if'], data=[]]
else:
prop = np.array(prop) # complex or reals can support nan
if not np.isnan(null):
prop[prop == null] = np.nan # depends on [control=['if'], data=[]]
mask_nan = np.isnan(prop)
mask_inf = np.isinf(prop)
where_nan = np.where(mask_nan)[0]
where_inf = np.where(mask_inf)[0]
where_ok = np.where(np.logical_not(mask_nan | mask_inf))[0]
# look at the valid_range...
if valid_range is None:
where_inv = np.asarray([], dtype=np.int) # depends on [control=['if'], data=[]]
else:
where_inv = where_ok[(prop[where_ok] < valid_range[0]) | (prop[where_ok] > valid_range[1])]
where_nan = np.union1d(where_nan, where_inv)
mask = np.setdiff1d(mask, where_nan)
# Find the outliers: values specified as outliers or inf values; will build this as we go
outliers = np.intersect1d(outliers, mask) # outliers not in the mask don't matter anyway
# If there's a data range argument, deal with how it affects outliers
if data_range is not None:
if not pimms.is_vector(data_range):
data_range = (0, data_range) # depends on [control=['if'], data=[]]
mii = mask[(prop[mask] < data_range[0]) | (prop[mask] > data_range[1])]
outliers = np.union1d(outliers, mii) # depends on [control=['if'], data=['data_range']]
# no matter what, trim out the infinite values (even if inf was in the data range)
outliers = np.union1d(outliers, mask[np.isinf(prop[mask])])
# Okay, mark everything in the prop:
unmask = np.setdiff1d(np.arange(len(prop), dtype=np.int), mask)
if len(outliers) > 0:
prop[outliers] = clipped # depends on [control=['if'], data=[]]
if len(unmask) > 0:
prop[unmask] = null # depends on [control=['if'], data=[]]
prop = prop.astype(dtype) # depends on [control=['if'], data=[]]
elif len(mask) < len(prop) or len(outliers) > 0:
# not a number array; we cannot do fancy trimming of values
tmp = np.full(len(prop), null, dtype=dtype)
tmp[mask] = prop[mask]
if len(outliers) > 0:
tmp[outliers] = clipped # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if yield_weight:
if weights is None or not pimms.is_vector(weights):
weights = np.ones(len(prop)) # depends on [control=['if'], data=[]]
else:
weights = np.array(weights, dtype=np.float)
weights[where_nan] = 0
weights[outliers] = 0 # depends on [control=['if'], data=[]]
# transform?
if transform:
prop = transform(prop) # depends on [control=['if'], data=[]]
# That's it, just return
return (prop, weights) if yield_weight else prop |
def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
if _raw_document_class(opts.document_class):
return opts.document_class(data, opts)
_, end = _get_object_size(data, 0, len(data))
return _elements_to_dict(data, 4, end, opts)
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb) | def function[_bson_to_dict, parameter[data, opts]]:
constant[Decode a BSON string to document_class.]
<ast.Try object at 0x7da18bc71f60> | keyword[def] identifier[_bson_to_dict] ( identifier[data] , identifier[opts] ):
literal[string]
keyword[try] :
keyword[if] identifier[_raw_document_class] ( identifier[opts] . identifier[document_class] ):
keyword[return] identifier[opts] . identifier[document_class] ( identifier[data] , identifier[opts] )
identifier[_] , identifier[end] = identifier[_get_object_size] ( identifier[data] , literal[int] , identifier[len] ( identifier[data] ))
keyword[return] identifier[_elements_to_dict] ( identifier[data] , literal[int] , identifier[end] , identifier[opts] )
keyword[except] identifier[InvalidBSON] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[_] , identifier[exc_value] , identifier[exc_tb] = identifier[sys] . identifier[exc_info] ()
identifier[reraise] ( identifier[InvalidBSON] , identifier[exc_value] , identifier[exc_tb] ) | def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
if _raw_document_class(opts.document_class):
return opts.document_class(data, opts) # depends on [control=['if'], data=[]]
(_, end) = _get_object_size(data, 0, len(data))
return _elements_to_dict(data, 4, end, opts) # depends on [control=['try'], data=[]]
except InvalidBSON:
raise # depends on [control=['except'], data=[]]
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
(_, exc_value, exc_tb) = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb) # depends on [control=['except'], data=[]] |
def get(self, property):
"""
Gets the value of the given property. First checks client config properties, then environment variables
and lastly fall backs to the default value of the property.
:param property: (:class:`~hazelcast.config.ClientProperty`), Property to get value from
:return: Value of the given property
"""
return self._properties.get(property.name) or os.getenv(property.name) or property.default_value | def function[get, parameter[self, property]]:
constant[
Gets the value of the given property. First checks client config properties, then environment variables
and lastly fall backs to the default value of the property.
:param property: (:class:`~hazelcast.config.ClientProperty`), Property to get value from
:return: Value of the given property
]
return[<ast.BoolOp object at 0x7da1b16a5e10>] | keyword[def] identifier[get] ( identifier[self] , identifier[property] ):
literal[string]
keyword[return] identifier[self] . identifier[_properties] . identifier[get] ( identifier[property] . identifier[name] ) keyword[or] identifier[os] . identifier[getenv] ( identifier[property] . identifier[name] ) keyword[or] identifier[property] . identifier[default_value] | def get(self, property):
"""
Gets the value of the given property. First checks client config properties, then environment variables
and lastly fall backs to the default value of the property.
:param property: (:class:`~hazelcast.config.ClientProperty`), Property to get value from
:return: Value of the given property
"""
return self._properties.get(property.name) or os.getenv(property.name) or property.default_value |
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result | def function[slave_open, parameter[tty_name]]:
constant[slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead.]
variable[result] assign[=] call[name[os].open, parameter[name[tty_name], name[os].O_RDWR]]
<ast.Try object at 0x7da20c76fa30>
<ast.Try object at 0x7da20c76fbe0>
return[name[result]] | keyword[def] identifier[slave_open] ( identifier[tty_name] ):
literal[string]
identifier[result] = identifier[os] . identifier[open] ( identifier[tty_name] , identifier[os] . identifier[O_RDWR] )
keyword[try] :
keyword[from] identifier[fcntl] keyword[import] identifier[ioctl] , identifier[I_PUSH]
keyword[except] identifier[ImportError] :
keyword[return] identifier[result]
keyword[try] :
identifier[ioctl] ( identifier[result] , identifier[I_PUSH] , literal[string] )
identifier[ioctl] ( identifier[result] , identifier[I_PUSH] , literal[string] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[return] identifier[result] | def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH # depends on [control=['try'], data=[]]
except ImportError:
return result # depends on [control=['except'], data=[]]
try:
ioctl(result, I_PUSH, 'ptem')
ioctl(result, I_PUSH, 'ldterm') # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
return result |
def scale_columns(A, v, copy=True):
"""Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale columns needs a sparse matrix')
if N != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_columns(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_columns(csr_matrix(A), v).asformat(fmt)
return A | def function[scale_columns, parameter[A, v, copy]]:
constant[Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
]
variable[v] assign[=] call[name[np].ravel, parameter[name[v]]]
<ast.Tuple object at 0x7da1b08da2f0> assign[=] name[A].shape
if <ast.UnaryOp object at 0x7da1b08da620> begin[:]
<ast.Raise object at 0x7da1b08d9ea0>
if compare[name[N] not_equal[!=] call[name[len], parameter[name[v]]]] begin[:]
<ast.Raise object at 0x7da1b08d8550>
if name[copy] begin[:]
variable[A] assign[=] call[name[A].copy, parameter[]]
name[A].data assign[=] call[name[np].asarray, parameter[name[A].data]]
if call[name[isspmatrix_csr], parameter[name[A]]] begin[:]
call[name[csr_scale_columns], parameter[name[M], name[N], name[A].indptr, name[A].indices, name[A].data, name[v]]]
return[name[A]] | keyword[def] identifier[scale_columns] ( identifier[A] , identifier[v] , identifier[copy] = keyword[True] ):
literal[string]
identifier[v] = identifier[np] . identifier[ravel] ( identifier[v] )
identifier[M] , identifier[N] = identifier[A] . identifier[shape]
keyword[if] keyword[not] identifier[isspmatrix] ( identifier[A] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[N] != identifier[len] ( identifier[v] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[copy] :
identifier[A] = identifier[A] . identifier[copy] ()
identifier[A] . identifier[data] = identifier[np] . identifier[asarray] ( identifier[A] . identifier[data] , identifier[dtype] = identifier[upcast] ( identifier[A] . identifier[dtype] , identifier[v] . identifier[dtype] ))
keyword[else] :
identifier[v] = identifier[np] . identifier[asarray] ( identifier[v] , identifier[dtype] = identifier[A] . identifier[dtype] )
keyword[if] identifier[isspmatrix_csr] ( identifier[A] ):
identifier[csr_scale_columns] ( identifier[M] , identifier[N] , identifier[A] . identifier[indptr] , identifier[A] . identifier[indices] , identifier[A] . identifier[data] , identifier[v] )
keyword[elif] identifier[isspmatrix_bsr] ( identifier[A] ):
identifier[R] , identifier[C] = identifier[A] . identifier[blocksize]
identifier[bsr_scale_columns] ( identifier[int] ( identifier[M] / identifier[R] ), identifier[int] ( identifier[N] / identifier[C] ), identifier[R] , identifier[C] , identifier[A] . identifier[indptr] , identifier[A] . identifier[indices] ,
identifier[np] . identifier[ravel] ( identifier[A] . identifier[data] ), identifier[v] )
keyword[elif] identifier[isspmatrix_csc] ( identifier[A] ):
identifier[pyamg] . identifier[amg_core] . identifier[csc_scale_columns] ( identifier[M] , identifier[N] , identifier[A] . identifier[indptr] , identifier[A] . identifier[indices] , identifier[A] . identifier[data] , identifier[v] )
keyword[else] :
identifier[fmt] = identifier[A] . identifier[format]
identifier[A] = identifier[scale_columns] ( identifier[csr_matrix] ( identifier[A] ), identifier[v] ). identifier[asformat] ( identifier[fmt] )
keyword[return] identifier[A] | def scale_columns(A, v, copy=True):
"""Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
(M, N) = A.shape
if not isspmatrix(A):
raise ValueError('scale columns needs a sparse matrix') # depends on [control=['if'], data=[]]
if N != len(v):
raise ValueError('scale vector has incompatible shape') # depends on [control=['if'], data=[]]
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) # depends on [control=['if'], data=[]]
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v) # depends on [control=['if'], data=[]]
elif isspmatrix_bsr(A):
(R, C) = A.blocksize
bsr_scale_columns(int(M / R), int(N / C), R, C, A.indptr, A.indices, np.ravel(A.data), v) # depends on [control=['if'], data=[]]
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v) # depends on [control=['if'], data=[]]
else:
fmt = A.format
A = scale_columns(csr_matrix(A), v).asformat(fmt)
return A |
def score_meaning(text):
"""
Returns a score in [0,1] range if the text makes any sense in English.
"""
#all_characters = re.findall('[ -~]', text) # match 32-126 in ASCII table
all_characters = re.findall('[a-zA-Z ]', text) # match 32-126 in ASCII table
if len(all_characters) == 0:
return 0
repetition_count = Counter(all_characters)
score = (len(all_characters)) ** 2 / (len(repetition_count) + len(text) / 26)
return score | def function[score_meaning, parameter[text]]:
constant[
Returns a score in [0,1] range if the text makes any sense in English.
]
variable[all_characters] assign[=] call[name[re].findall, parameter[constant[[a-zA-Z ]], name[text]]]
if compare[call[name[len], parameter[name[all_characters]]] equal[==] constant[0]] begin[:]
return[constant[0]]
variable[repetition_count] assign[=] call[name[Counter], parameter[name[all_characters]]]
variable[score] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[all_characters]]] ** constant[2]] / binary_operation[call[name[len], parameter[name[repetition_count]]] + binary_operation[call[name[len], parameter[name[text]]] / constant[26]]]]
return[name[score]] | keyword[def] identifier[score_meaning] ( identifier[text] ):
literal[string]
identifier[all_characters] = identifier[re] . identifier[findall] ( literal[string] , identifier[text] )
keyword[if] identifier[len] ( identifier[all_characters] )== literal[int] :
keyword[return] literal[int]
identifier[repetition_count] = identifier[Counter] ( identifier[all_characters] )
identifier[score] =( identifier[len] ( identifier[all_characters] ))** literal[int] /( identifier[len] ( identifier[repetition_count] )+ identifier[len] ( identifier[text] )/ literal[int] )
keyword[return] identifier[score] | def score_meaning(text):
"""
Returns a score in [0,1] range if the text makes any sense in English.
"""
#all_characters = re.findall('[ -~]', text) # match 32-126 in ASCII table
all_characters = re.findall('[a-zA-Z ]', text) # match 32-126 in ASCII table
if len(all_characters) == 0:
return 0 # depends on [control=['if'], data=[]]
repetition_count = Counter(all_characters)
score = len(all_characters) ** 2 / (len(repetition_count) + len(text) / 26)
return score |
def start_proc_mask_signal(proc):
"""
Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread.
"""
if not isinstance(proc, list):
proc = [proc]
with mask_sigint():
for p in proc:
if isinstance(p, mp.Process):
if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
log_once(
"Starting a process with 'fork' method is not safe and may consume unnecessary extra memory."
" Use 'forkserver' method (available after Py3.4) instead if you run into any issues. "
"See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods",
'warn') # noqa
p.start() | def function[start_proc_mask_signal, parameter[proc]]:
constant[
Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread.
]
if <ast.UnaryOp object at 0x7da2046203a0> begin[:]
variable[proc] assign[=] list[[<ast.Name object at 0x7da18f00e0b0>]]
with call[name[mask_sigint], parameter[]] begin[:]
for taget[name[p]] in starred[name[proc]] begin[:]
if call[name[isinstance], parameter[name[p], name[mp].Process]] begin[:]
if <ast.BoolOp object at 0x7da18f00c0a0> begin[:]
call[name[log_once], parameter[constant[Starting a process with 'fork' method is not safe and may consume unnecessary extra memory. Use 'forkserver' method (available after Py3.4) instead if you run into any issues. See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods], constant[warn]]]
call[name[p].start, parameter[]] | keyword[def] identifier[start_proc_mask_signal] ( identifier[proc] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[proc] , identifier[list] ):
identifier[proc] =[ identifier[proc] ]
keyword[with] identifier[mask_sigint] ():
keyword[for] identifier[p] keyword[in] identifier[proc] :
keyword[if] identifier[isinstance] ( identifier[p] , identifier[mp] . identifier[Process] ):
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ) keyword[or] identifier[mp] . identifier[get_start_method] ()== literal[string] :
identifier[log_once] (
literal[string]
literal[string]
literal[string] ,
literal[string] )
identifier[p] . identifier[start] () | def start_proc_mask_signal(proc):
"""
Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread.
"""
if not isinstance(proc, list):
proc = [proc] # depends on [control=['if'], data=[]]
with mask_sigint():
for p in proc:
if isinstance(p, mp.Process):
if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
log_once("Starting a process with 'fork' method is not safe and may consume unnecessary extra memory. Use 'forkserver' method (available after Py3.4) instead if you run into any issues. See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods", 'warn') # noqa # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
p.start() # depends on [control=['for'], data=['p']] # depends on [control=['with'], data=[]] |
def download_file(self, url, download_dir, sceneName):
""" Downloads large files in pieces """
try:
# Log
logger.info('\nStarting download..')
print('\n Starting download..\n')
# Request
req = urllib.request.urlopen(url)
try:
if req.info().get_content_type() == 'text/html':
logger.error("error : the file format is html")
lines = req.read()
if lines.find('Download Not Found') > 0:
raise TypeError('Download USGS not found for scene: %s' % self.sceneInfo.name)
else:
print(lines)
print(sys.exit(-1))
except Exception as e:
logger.error('Erro in USGS download for scene %s error: %s' % (self.sceneInfo.name, e))
raise CredentialsUsgsError('User or Password invalid ! ')
total_size = int(req.getheader('Content-Length').strip())
if total_size < 50000:
logger.error("Error: The file is too small to be a Landsat Image for scene %s" % self.sceneInfo.name)
raise SmallLandsatImageError("Error: The file is too small to be a Landsat Image")
total_size_fmt = sizeof_fmt(total_size)
downloaded = 0
CHUNK = 1024 * 1024 * 8
with open(download_dir + '/' + sceneName, 'wb') as fp:
start = time.clock()
logger.debug('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt))
print('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt))
while True:
chunk = req.read(CHUNK)
downloaded += len(chunk)
done = int(50 * downloaded / total_size)
print('\r[{1}{2}]{0:3.0f}% {3}ps'.format(floor((float(downloaded) / total_size) * 100), '-' * done,
' ' * (50 - done), sizeof_fmt((downloaded //
(time.clock() - start)) / 8)))
if not chunk:
logger.debug('Download {0} completed({1}):'.format(self.sceneInfo.name, total_size_fmt))
break
fp.write(chunk)
except urllib.error.HTTPError as e:
if e.code == 500:
logger.error("File doesn't exist")
print("\n File doesn't exist: %s " % e)
raise RemoteFileDoesntExist("File doesn't exist")
elif e.code == 403:
# Log celery
logger.error("HTTP Error:", e.code, url)
logger.debug('\n trying to download it again scene: %s' % self.sceneInfo.name)
# Log shell
print("\n HTTP Error:", e.code, url)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
self.connect_earthexplorer()
self.download_file(url, download_dir, sceneName)
else:
logger.error("HTTP Error:", e)
print("HTTP Error:", e.code, url)
raise e
except urllib.error.URLError as e:
print("URL Error:", e.reason, url)
logger.error("URL Error: %s in %s" % (e, url))
raise e
except ConnectionResetError as e:
print('Error ConnectionResetError: %s' % e)
logger.error('Error ConnectionResetError: %s' % e)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName)
except urllib.error.HTTPError as e:
print('\n HttpError: %s' % e)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
logger.error('HttpError: %s' % e)
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName)
except Exception as error:
logger.error('Error unknown %s in download %s at scene: %s' % (error, url, self.sceneInfo.name))
print('Error unknown %s in download % at scene: %s' % (error, url, self.sceneInfo.name))
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName)
percent = floor((float(downloaded) / total_size) * 100) or 0
if percent != 100:
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
logger.error('Download interrupted in %s%%, trying to download it again scene: %s' % (
percent, self.sceneInfo.name))
print('\n Download interrupted in %s%%, trying to download it again scene: %s' % (
percent, self.sceneInfo.name))
self.download_file(url, download_dir, sceneName)
path_item = download_dir + '/' + sceneName
info = {'total_size': total_size_fmt, 'scene': self.sceneInfo.name,
'sucess': verify_sucess(total_size, path_item), 'file_path': path_item}
return info | def function[download_file, parameter[self, url, download_dir, sceneName]]:
constant[ Downloads large files in pieces ]
<ast.Try object at 0x7da1b0210f70>
variable[percent] assign[=] <ast.BoolOp object at 0x7da1b01fd060>
if compare[name[percent] not_equal[!=] constant[100]] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[trying to download it again scene: %s] <ast.Mod object at 0x7da2590d6920> name[self].sceneInfo.name]]]
call[name[logger].error, parameter[binary_operation[constant[Download interrupted in %s%%, trying to download it again scene: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b01fdfc0>, <ast.Attribute object at 0x7da1b01fe560>]]]]]
call[name[print], parameter[binary_operation[constant[
Download interrupted in %s%%, trying to download it again scene: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b01fdb40>, <ast.Attribute object at 0x7da1b01fdff0>]]]]]
call[name[self].download_file, parameter[name[url], name[download_dir], name[sceneName]]]
variable[path_item] assign[=] binary_operation[binary_operation[name[download_dir] + constant[/]] + name[sceneName]]
variable[info] assign[=] dictionary[[<ast.Constant object at 0x7da1b01fcfa0>, <ast.Constant object at 0x7da1b01ffc40>, <ast.Constant object at 0x7da1b01fe4d0>, <ast.Constant object at 0x7da1b01fc1f0>], [<ast.Name object at 0x7da1b01fdab0>, <ast.Attribute object at 0x7da1b01fd6f0>, <ast.Call object at 0x7da1b01fca30>, <ast.Name object at 0x7da1b01fdcf0>]]
return[name[info]] | keyword[def] identifier[download_file] ( identifier[self] , identifier[url] , identifier[download_dir] , identifier[sceneName] ):
literal[string]
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[print] ( literal[string] )
identifier[req] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[url] )
keyword[try] :
keyword[if] identifier[req] . identifier[info] (). identifier[get_content_type] ()== literal[string] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[lines] = identifier[req] . identifier[read] ()
keyword[if] identifier[lines] . identifier[find] ( literal[string] )> literal[int] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
keyword[else] :
identifier[print] ( identifier[lines] )
identifier[print] ( identifier[sys] . identifier[exit] (- literal[int] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] %( identifier[self] . identifier[sceneInfo] . identifier[name] , identifier[e] ))
keyword[raise] identifier[CredentialsUsgsError] ( literal[string] )
identifier[total_size] = identifier[int] ( identifier[req] . identifier[getheader] ( literal[string] ). identifier[strip] ())
keyword[if] identifier[total_size] < literal[int] :
identifier[logger] . identifier[error] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
keyword[raise] identifier[SmallLandsatImageError] ( literal[string] )
identifier[total_size_fmt] = identifier[sizeof_fmt] ( identifier[total_size] )
identifier[downloaded] = literal[int]
identifier[CHUNK] = literal[int] * literal[int] * literal[int]
keyword[with] identifier[open] ( identifier[download_dir] + literal[string] + identifier[sceneName] , literal[string] ) keyword[as] identifier[fp] :
identifier[start] = identifier[time] . identifier[clock] ()
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[sceneInfo] . identifier[name] , identifier[total_size_fmt] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[sceneInfo] . identifier[name] , identifier[total_size_fmt] ))
keyword[while] keyword[True] :
identifier[chunk] = identifier[req] . identifier[read] ( identifier[CHUNK] )
identifier[downloaded] += identifier[len] ( identifier[chunk] )
identifier[done] = identifier[int] ( literal[int] * identifier[downloaded] / identifier[total_size] )
identifier[print] ( literal[string] . identifier[format] ( identifier[floor] (( identifier[float] ( identifier[downloaded] )/ identifier[total_size] )* literal[int] ), literal[string] * identifier[done] ,
literal[string] *( literal[int] - identifier[done] ), identifier[sizeof_fmt] (( identifier[downloaded] //
( identifier[time] . identifier[clock] ()- identifier[start] ))/ literal[int] )))
keyword[if] keyword[not] identifier[chunk] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[sceneInfo] . identifier[name] , identifier[total_size_fmt] ))
keyword[break]
identifier[fp] . identifier[write] ( identifier[chunk] )
keyword[except] identifier[urllib] . identifier[error] . identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[code] == literal[int] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[print] ( literal[string] % identifier[e] )
keyword[raise] identifier[RemoteFileDoesntExist] ( literal[string] )
keyword[elif] identifier[e] . identifier[code] == literal[int] :
identifier[logger] . identifier[error] ( literal[string] , identifier[e] . identifier[code] , identifier[url] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[print] ( literal[string] , identifier[e] . identifier[code] , identifier[url] )
identifier[print] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[self] . identifier[connect_earthexplorer] ()
identifier[self] . identifier[download_file] ( identifier[url] , identifier[download_dir] , identifier[sceneName] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] , identifier[e] )
identifier[print] ( literal[string] , identifier[e] . identifier[code] , identifier[url] )
keyword[raise] identifier[e]
keyword[except] identifier[urllib] . identifier[error] . identifier[URLError] keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[e] . identifier[reason] , identifier[url] )
identifier[logger] . identifier[error] ( literal[string] %( identifier[e] , identifier[url] ))
keyword[raise] identifier[e]
keyword[except] identifier[ConnectionResetError] keyword[as] identifier[e] :
identifier[print] ( literal[string] % identifier[e] )
identifier[logger] . identifier[error] ( literal[string] % identifier[e] )
identifier[print] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[self] . identifier[download_file] ( identifier[url] , identifier[download_dir] , identifier[sceneName] )
keyword[except] identifier[urllib] . identifier[error] . identifier[HTTPError] keyword[as] identifier[e] :
identifier[print] ( literal[string] % identifier[e] )
identifier[print] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[logger] . identifier[error] ( literal[string] % identifier[e] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[self] . identifier[download_file] ( identifier[url] , identifier[download_dir] , identifier[sceneName] )
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[logger] . identifier[error] ( literal[string] %( identifier[error] , identifier[url] , identifier[self] . identifier[sceneInfo] . identifier[name] ))
identifier[print] ( literal[string] %( identifier[error] , identifier[url] , identifier[self] . identifier[sceneInfo] . identifier[name] ))
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[self] . identifier[download_file] ( identifier[url] , identifier[download_dir] , identifier[sceneName] )
identifier[percent] = identifier[floor] (( identifier[float] ( identifier[downloaded] )/ identifier[total_size] )* literal[int] ) keyword[or] literal[int]
keyword[if] identifier[percent] != literal[int] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[sceneInfo] . identifier[name] )
identifier[logger] . identifier[error] ( literal[string] %(
identifier[percent] , identifier[self] . identifier[sceneInfo] . identifier[name] ))
identifier[print] ( literal[string] %(
identifier[percent] , identifier[self] . identifier[sceneInfo] . identifier[name] ))
identifier[self] . identifier[download_file] ( identifier[url] , identifier[download_dir] , identifier[sceneName] )
identifier[path_item] = identifier[download_dir] + literal[string] + identifier[sceneName]
identifier[info] ={ literal[string] : identifier[total_size_fmt] , literal[string] : identifier[self] . identifier[sceneInfo] . identifier[name] ,
literal[string] : identifier[verify_sucess] ( identifier[total_size] , identifier[path_item] ), literal[string] : identifier[path_item] }
keyword[return] identifier[info] | def download_file(self, url, download_dir, sceneName):
""" Downloads large files in pieces """
try:
# Log
logger.info('\nStarting download..')
print('\n Starting download..\n')
# Request
req = urllib.request.urlopen(url)
try:
if req.info().get_content_type() == 'text/html':
logger.error('error : the file format is html')
lines = req.read()
if lines.find('Download Not Found') > 0:
raise TypeError('Download USGS not found for scene: %s' % self.sceneInfo.name) # depends on [control=['if'], data=[]]
else:
print(lines)
print(sys.exit(-1)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Erro in USGS download for scene %s error: %s' % (self.sceneInfo.name, e))
raise CredentialsUsgsError('User or Password invalid ! ') # depends on [control=['except'], data=['e']]
total_size = int(req.getheader('Content-Length').strip())
if total_size < 50000:
logger.error('Error: The file is too small to be a Landsat Image for scene %s' % self.sceneInfo.name)
raise SmallLandsatImageError('Error: The file is too small to be a Landsat Image') # depends on [control=['if'], data=[]]
total_size_fmt = sizeof_fmt(total_size)
downloaded = 0
CHUNK = 1024 * 1024 * 8
with open(download_dir + '/' + sceneName, 'wb') as fp:
start = time.clock()
logger.debug('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt))
print('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt))
while True:
chunk = req.read(CHUNK)
downloaded += len(chunk)
done = int(50 * downloaded / total_size)
print('\r[{1}{2}]{0:3.0f}% {3}ps'.format(floor(float(downloaded) / total_size * 100), '-' * done, ' ' * (50 - done), sizeof_fmt(downloaded // (time.clock() - start) / 8)))
if not chunk:
logger.debug('Download {0} completed({1}):'.format(self.sceneInfo.name, total_size_fmt))
break # depends on [control=['if'], data=[]]
fp.write(chunk) # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]]
except urllib.error.HTTPError as e:
if e.code == 500:
logger.error("File doesn't exist")
print("\n File doesn't exist: %s " % e)
raise RemoteFileDoesntExist("File doesn't exist") # depends on [control=['if'], data=[]]
elif e.code == 403:
# Log celery
logger.error('HTTP Error:', e.code, url)
logger.debug('\n trying to download it again scene: %s' % self.sceneInfo.name)
# Log shell
print('\n HTTP Error:', e.code, url)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
self.connect_earthexplorer()
self.download_file(url, download_dir, sceneName) # depends on [control=['if'], data=[]]
else:
logger.error('HTTP Error:', e)
print('HTTP Error:', e.code, url)
raise e # depends on [control=['except'], data=['e']]
except urllib.error.URLError as e:
print('URL Error:', e.reason, url)
logger.error('URL Error: %s in %s' % (e, url))
raise e # depends on [control=['except'], data=['e']]
except ConnectionResetError as e:
print('Error ConnectionResetError: %s' % e)
logger.error('Error ConnectionResetError: %s' % e)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName) # depends on [control=['except'], data=['e']]
except urllib.error.HTTPError as e:
print('\n HttpError: %s' % e)
print('\n trying to download it again scene: %s' % self.sceneInfo.name)
logger.error('HttpError: %s' % e)
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName) # depends on [control=['except'], data=['e']]
except Exception as error:
logger.error('Error unknown %s in download %s at scene: %s' % (error, url, self.sceneInfo.name))
print('Error unknown %s in download % at scene: %s' % (error, url, self.sceneInfo.name))
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
self.download_file(url, download_dir, sceneName) # depends on [control=['except'], data=['error']]
percent = floor(float(downloaded) / total_size * 100) or 0
if percent != 100:
logger.debug('trying to download it again scene: %s' % self.sceneInfo.name)
logger.error('Download interrupted in %s%%, trying to download it again scene: %s' % (percent, self.sceneInfo.name))
print('\n Download interrupted in %s%%, trying to download it again scene: %s' % (percent, self.sceneInfo.name))
self.download_file(url, download_dir, sceneName) # depends on [control=['if'], data=['percent']]
path_item = download_dir + '/' + sceneName
info = {'total_size': total_size_fmt, 'scene': self.sceneInfo.name, 'sucess': verify_sucess(total_size, path_item), 'file_path': path_item}
return info |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Frame(key)
if key not in Frame._member_map_:
extend_enum(Frame, key, default)
return Frame[key] | def function[get, parameter[key, default]]:
constant[Backport support for original codes.]
if call[name[isinstance], parameter[name[key], name[int]]] begin[:]
return[call[name[Frame], parameter[name[key]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[Frame]._member_map_] begin[:]
call[name[extend_enum], parameter[name[Frame], name[key], name[default]]]
return[call[name[Frame]][name[key]]] | keyword[def] identifier[get] ( identifier[key] , identifier[default] =- literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[int] ):
keyword[return] identifier[Frame] ( identifier[key] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[Frame] . identifier[_member_map_] :
identifier[extend_enum] ( identifier[Frame] , identifier[key] , identifier[default] )
keyword[return] identifier[Frame] [ identifier[key] ] | def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Frame(key) # depends on [control=['if'], data=[]]
if key not in Frame._member_map_:
extend_enum(Frame, key, default) # depends on [control=['if'], data=['key']]
return Frame[key] |
def heterogzygote_counts(paired):
"""Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), "structural", "counts"))
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x])
cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data)
tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data)
normal_counts = (_run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data)
if paired.normal_data else None)
if normal_counts:
tumor_counts, normal_counts = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data)
return tumor_counts, normal_counts | def function[heterogzygote_counts, parameter[paired]]:
constant[Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
]
variable[work_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dd].get_work_dir, parameter[name[paired].tumor_data]], constant[structural], constant[counts]]]]]
variable[key] assign[=] constant[germline_het_pon]
variable[het_bed] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b17d5750>, <ast.Constant object at 0x7da1b17d6e90>, <ast.Name object at 0x7da1b17d6b90>]], name[paired].tumor_data]]
variable[vr] assign[=] call[name[bedutils].population_variant_regions, parameter[<ast.ListComp object at 0x7da1b17d5600>]]
variable[cur_het_bed] assign[=] call[name[bedutils].intersect_two, parameter[name[het_bed], name[vr], name[work_dir], name[paired].tumor_data]]
variable[tumor_counts] assign[=] call[name[_run_collect_allelic_counts], parameter[name[cur_het_bed], name[key], name[work_dir], name[paired].tumor_data]]
variable[normal_counts] assign[=] <ast.IfExp object at 0x7da1b17d64a0>
if name[normal_counts] begin[:]
<ast.Tuple object at 0x7da1b17d6950> assign[=] call[name[_filter_by_normal], parameter[name[tumor_counts], name[normal_counts], name[paired].tumor_data]]
return[tuple[[<ast.Name object at 0x7da1b17d7040>, <ast.Name object at 0x7da1b17d71f0>]]] | keyword[def] identifier[heterogzygote_counts] ( identifier[paired] ):
literal[string]
identifier[work_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dd] . identifier[get_work_dir] ( identifier[paired] . identifier[tumor_data] ), literal[string] , literal[string] ))
identifier[key] = literal[string]
identifier[het_bed] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , identifier[key] ], identifier[paired] . identifier[tumor_data] )
identifier[vr] = identifier[bedutils] . identifier[population_variant_regions] ([ identifier[x] keyword[for] identifier[x] keyword[in] [ identifier[paired] . identifier[tumor_data] , identifier[paired] . identifier[normal_data] ] keyword[if] identifier[x] ])
identifier[cur_het_bed] = identifier[bedutils] . identifier[intersect_two] ( identifier[het_bed] , identifier[vr] , identifier[work_dir] , identifier[paired] . identifier[tumor_data] )
identifier[tumor_counts] = identifier[_run_collect_allelic_counts] ( identifier[cur_het_bed] , identifier[key] , identifier[work_dir] , identifier[paired] . identifier[tumor_data] )
identifier[normal_counts] =( identifier[_run_collect_allelic_counts] ( identifier[cur_het_bed] , identifier[key] , identifier[work_dir] , identifier[paired] . identifier[normal_data] )
keyword[if] identifier[paired] . identifier[normal_data] keyword[else] keyword[None] )
keyword[if] identifier[normal_counts] :
identifier[tumor_counts] , identifier[normal_counts] = identifier[_filter_by_normal] ( identifier[tumor_counts] , identifier[normal_counts] , identifier[paired] . identifier[tumor_data] )
keyword[return] identifier[tumor_counts] , identifier[normal_counts] | def heterogzygote_counts(paired):
"""Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), 'structural', 'counts'))
key = 'germline_het_pon'
het_bed = tz.get_in(['genome_resources', 'variation', key], paired.tumor_data)
vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x])
cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data)
tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data)
normal_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data) if paired.normal_data else None
if normal_counts:
(tumor_counts, normal_counts) = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data) # depends on [control=['if'], data=[]]
return (tumor_counts, normal_counts) |
def encode(self, lname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a last name.
Parameters
----------
lname : str
The last name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pe = PSHPSoundexLast()
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Waters')
'W350'
>>> pe.encode('James')
'J500'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Ashcroft')
'A225'
"""
lname = unicode_normalize('NFKD', text_type(lname.upper()))
lname = lname.replace('ß', 'SS')
lname = ''.join(c for c in lname if c in self._uc_set)
# A. Prefix treatment
if lname[:3] == 'VON' or lname[:3] == 'VAN':
lname = lname[3:].strip()
# The rule implemented below says "MC, MAC become 1". I believe it
# meant to say they become M except in German data (where superscripted
# 1 indicates "except in German data"). It doesn't make sense for them
# to become 1 (BPFV -> 1) or to apply outside German. Unfortunately,
# both articles have this error(?).
if not german:
if lname[:3] == 'MAC':
lname = 'M' + lname[3:]
elif lname[:2] == 'MC':
lname = 'M' + lname[2:]
# The non-German-only rule to strip ' is unnecessary due to filtering
if lname[:1] in {'E', 'I', 'O', 'U'}:
lname = 'A' + lname[1:]
elif lname[:2] in {'GE', 'GI', 'GY'}:
lname = 'J' + lname[1:]
elif lname[:2] in {'CE', 'CI', 'CY'}:
lname = 'S' + lname[1:]
elif lname[:3] == 'CHR':
lname = 'K' + lname[1:]
elif lname[:1] == 'C' and lname[:2] != 'CH':
lname = 'K' + lname[1:]
if lname[:2] == 'KN':
lname = 'N' + lname[1:]
elif lname[:2] == 'PH':
lname = 'F' + lname[1:]
elif lname[:3] in {'WIE', 'WEI'}:
lname = 'V' + lname[1:]
if german and lname[:1] in {'W', 'M', 'Y', 'Z'}:
lname = {'W': 'V', 'M': 'N', 'Y': 'J', 'Z': 'S'}[lname[0]] + lname[
1:
]
code = lname[:1]
# B. Postfix treatment
if german: # moved from end of postfix treatment due to blocking
if lname[-3:] == 'TES':
lname = lname[:-3]
elif lname[-2:] == 'TS':
lname = lname[:-2]
if lname[-3:] == 'TZE':
lname = lname[:-3]
elif lname[-2:] == 'ZE':
lname = lname[:-2]
if lname[-1:] == 'Z':
lname = lname[:-1]
elif lname[-2:] == 'TE':
lname = lname[:-2]
if lname[-1:] == 'R':
lname = lname[:-1] + 'N'
elif lname[-2:] in {'SE', 'CE'}:
lname = lname[:-2]
if lname[-2:] == 'SS':
lname = lname[:-2]
elif lname[-1:] == 'S':
lname = lname[:-1]
if not german:
l5_repl = {'STOWN': 'SAWON', 'MPSON': 'MASON'}
l4_repl = {
'NSEN': 'ASEN',
'MSON': 'ASON',
'STEN': 'SAEN',
'STON': 'SAON',
}
if lname[-5:] in l5_repl:
lname = lname[:-5] + l5_repl[lname[-5:]]
elif lname[-4:] in l4_repl:
lname = lname[:-4] + l4_repl[lname[-4:]]
if lname[-2:] in {'NG', 'ND'}:
lname = lname[:-1]
if not german and lname[-3:] in {'GAN', 'GEN'}:
lname = lname[:-3] + 'A' + lname[-2:]
# C. Infix Treatment
lname = lname.replace('CK', 'C')
lname = lname.replace('SCH', 'S')
lname = lname.replace('DT', 'T')
lname = lname.replace('ND', 'N')
lname = lname.replace('NG', 'N')
lname = lname.replace('LM', 'M')
lname = lname.replace('MN', 'M')
lname = lname.replace('WIE', 'VIE')
lname = lname.replace('WEI', 'VEI')
# D. Soundexing
# code for X & Y are unspecified, but presumably are 2 & 0
lname = lname.translate(self._trans)
lname = self._delete_consecutive_repeats(lname)
code += lname[1:]
code = code.replace('0', '') # rule 1
if max_length != -1:
if len(code) < max_length:
code += '0' * (max_length - len(code))
else:
code = code[:max_length]
return code | def function[encode, parameter[self, lname, max_length, german]]:
constant[Calculate the PSHP Soundex/Viewex Coding of a last name.
Parameters
----------
lname : str
The last name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pe = PSHPSoundexLast()
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Waters')
'W350'
>>> pe.encode('James')
'J500'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Ashcroft')
'A225'
]
variable[lname] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[lname].upper, parameter[]]]]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[ß], constant[SS]]]
variable[lname] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18bcc8bb0>]]
if <ast.BoolOp object at 0x7da18bccbc40> begin[:]
variable[lname] assign[=] call[call[name[lname]][<ast.Slice object at 0x7da18bcc82e0>].strip, parameter[]]
if <ast.UnaryOp object at 0x7da18bcc8550> begin[:]
if compare[call[name[lname]][<ast.Slice object at 0x7da18bcc8e80>] equal[==] constant[MAC]] begin[:]
variable[lname] assign[=] binary_operation[constant[M] + call[name[lname]][<ast.Slice object at 0x7da18bcc88b0>]]
if compare[call[name[lname]][<ast.Slice object at 0x7da18bcc9ff0>] in <ast.Set object at 0x7da18bcc8640>] begin[:]
variable[lname] assign[=] binary_operation[constant[A] + call[name[lname]][<ast.Slice object at 0x7da18bcc8df0>]]
if compare[call[name[lname]][<ast.Slice object at 0x7da18bcc8880>] equal[==] constant[KN]] begin[:]
variable[lname] assign[=] binary_operation[constant[N] + call[name[lname]][<ast.Slice object at 0x7da18bcc9660>]]
if <ast.BoolOp object at 0x7da1b011be50> begin[:]
variable[lname] assign[=] binary_operation[call[dictionary[[<ast.Constant object at 0x7da1b011abf0>, <ast.Constant object at 0x7da1b011ab90>, <ast.Constant object at 0x7da1b011ac80>, <ast.Constant object at 0x7da1b011ab60>], [<ast.Constant object at 0x7da1b011aef0>, <ast.Constant object at 0x7da1b011af50>, <ast.Constant object at 0x7da1b011af20>, <ast.Constant object at 0x7da1b011aec0>]]][call[name[lname]][constant[0]]] + call[name[lname]][<ast.Slice object at 0x7da1b011ae00>]]
variable[code] assign[=] call[name[lname]][<ast.Slice object at 0x7da1b011afe0>]
if name[german] begin[:]
if compare[call[name[lname]][<ast.Slice object at 0x7da1b011b190>] equal[==] constant[TES]] begin[:]
variable[lname] assign[=] call[name[lname]][<ast.Slice object at 0x7da1b011b250>]
if compare[call[name[lname]][<ast.Slice object at 0x7da1b011b7c0>] equal[==] constant[TZE]] begin[:]
variable[lname] assign[=] call[name[lname]][<ast.Slice object at 0x7da1b011b5e0>]
if compare[call[name[lname]][<ast.Slice object at 0x7da1b011a050>] equal[==] constant[Z]] begin[:]
variable[lname] assign[=] call[name[lname]][<ast.Slice object at 0x7da1b011a230>]
if compare[call[name[lname]][<ast.Slice object at 0x7da1b011a8f0>] equal[==] constant[R]] begin[:]
variable[lname] assign[=] binary_operation[call[name[lname]][<ast.Slice object at 0x7da1b011a7d0>] + constant[N]]
if compare[call[name[lname]][<ast.Slice object at 0x7da1b0119cf0>] equal[==] constant[SS]] begin[:]
variable[lname] assign[=] call[name[lname]][<ast.Slice object at 0x7da1b0119b40>]
if <ast.UnaryOp object at 0x7da1b0119ea0> begin[:]
variable[l5_repl] assign[=] dictionary[[<ast.Constant object at 0x7da1b011bfa0>, <ast.Constant object at 0x7da1b011bc40>], [<ast.Constant object at 0x7da1b011bd60>, <ast.Constant object at 0x7da1b011bf70>]]
variable[l4_repl] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b1690>, <ast.Constant object at 0x7da20e9b30d0>, <ast.Constant object at 0x7da20e9b1e70>, <ast.Constant object at 0x7da20e9b0790>], [<ast.Constant object at 0x7da20e9b20b0>, <ast.Constant object at 0x7da20e9b2560>, <ast.Constant object at 0x7da20e9b1450>, <ast.Constant object at 0x7da20e9b3a00>]]
if compare[call[name[lname]][<ast.Slice object at 0x7da20e9b20e0>] in name[l5_repl]] begin[:]
variable[lname] assign[=] binary_operation[call[name[lname]][<ast.Slice object at 0x7da20e9b0cd0>] + call[name[l5_repl]][call[name[lname]][<ast.Slice object at 0x7da20e9b3130>]]]
if compare[call[name[lname]][<ast.Slice object at 0x7da20e9b3c10>] in <ast.Set object at 0x7da20e9b3820>] begin[:]
variable[lname] assign[=] call[name[lname]][<ast.Slice object at 0x7da20e9b1930>]
if <ast.BoolOp object at 0x7da20e9b2890> begin[:]
variable[lname] assign[=] binary_operation[binary_operation[call[name[lname]][<ast.Slice object at 0x7da20e9b2e90>] + constant[A]] + call[name[lname]][<ast.Slice object at 0x7da20e9b0f70>]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[CK], constant[C]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[SCH], constant[S]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[DT], constant[T]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[ND], constant[N]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[NG], constant[N]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[LM], constant[M]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[MN], constant[M]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[WIE], constant[VIE]]]
variable[lname] assign[=] call[name[lname].replace, parameter[constant[WEI], constant[VEI]]]
variable[lname] assign[=] call[name[lname].translate, parameter[name[self]._trans]]
variable[lname] assign[=] call[name[self]._delete_consecutive_repeats, parameter[name[lname]]]
<ast.AugAssign object at 0x7da1b01d6bc0>
variable[code] assign[=] call[name[code].replace, parameter[constant[0], constant[]]]
if compare[name[max_length] not_equal[!=] <ast.UnaryOp object at 0x7da1b01d55a0>] begin[:]
if compare[call[name[len], parameter[name[code]]] less[<] name[max_length]] begin[:]
<ast.AugAssign object at 0x7da1b01d5420>
return[name[code]] | keyword[def] identifier[encode] ( identifier[self] , identifier[lname] , identifier[max_length] = literal[int] , identifier[german] = keyword[False] ):
literal[string]
identifier[lname] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[lname] . identifier[upper] ()))
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[lname] keyword[if] identifier[c] keyword[in] identifier[self] . identifier[_uc_set] )
keyword[if] identifier[lname] [: literal[int] ]== literal[string] keyword[or] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = identifier[lname] [ literal[int] :]. identifier[strip] ()
keyword[if] keyword[not] identifier[german] :
keyword[if] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[if] identifier[lname] [: literal[int] ] keyword[in] { literal[string] , literal[string] , literal[string] , literal[string] }:
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ] keyword[in] { literal[string] , literal[string] , literal[string] }:
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ] keyword[in] { literal[string] , literal[string] , literal[string] }:
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ]== literal[string] keyword[and] identifier[lname] [: literal[int] ]!= literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[if] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ]== literal[string] :
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[elif] identifier[lname] [: literal[int] ] keyword[in] { literal[string] , literal[string] }:
identifier[lname] = literal[string] + identifier[lname] [ literal[int] :]
keyword[if] identifier[german] keyword[and] identifier[lname] [: literal[int] ] keyword[in] { literal[string] , literal[string] , literal[string] , literal[string] }:
identifier[lname] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }[ identifier[lname] [ literal[int] ]]+ identifier[lname] [
literal[int] :
]
identifier[code] = identifier[lname] [: literal[int] ]
keyword[if] identifier[german] :
keyword[if] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[elif] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[elif] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[elif] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]+ literal[string]
keyword[elif] identifier[lname] [- literal[int] :] keyword[in] { literal[string] , literal[string] }:
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[elif] identifier[lname] [- literal[int] :]== literal[string] :
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] keyword[not] identifier[german] :
identifier[l5_repl] ={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[l4_repl] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[if] identifier[lname] [- literal[int] :] keyword[in] identifier[l5_repl] :
identifier[lname] = identifier[lname] [:- literal[int] ]+ identifier[l5_repl] [ identifier[lname] [- literal[int] :]]
keyword[elif] identifier[lname] [- literal[int] :] keyword[in] identifier[l4_repl] :
identifier[lname] = identifier[lname] [:- literal[int] ]+ identifier[l4_repl] [ identifier[lname] [- literal[int] :]]
keyword[if] identifier[lname] [- literal[int] :] keyword[in] { literal[string] , literal[string] }:
identifier[lname] = identifier[lname] [:- literal[int] ]
keyword[if] keyword[not] identifier[german] keyword[and] identifier[lname] [- literal[int] :] keyword[in] { literal[string] , literal[string] }:
identifier[lname] = identifier[lname] [:- literal[int] ]+ literal[string] + identifier[lname] [- literal[int] :]
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[replace] ( literal[string] , literal[string] )
identifier[lname] = identifier[lname] . identifier[translate] ( identifier[self] . identifier[_trans] )
identifier[lname] = identifier[self] . identifier[_delete_consecutive_repeats] ( identifier[lname] )
identifier[code] += identifier[lname] [ literal[int] :]
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[max_length] !=- literal[int] :
keyword[if] identifier[len] ( identifier[code] )< identifier[max_length] :
identifier[code] += literal[string] *( identifier[max_length] - identifier[len] ( identifier[code] ))
keyword[else] :
identifier[code] = identifier[code] [: identifier[max_length] ]
keyword[return] identifier[code] | def encode(self, lname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a last name.
Parameters
----------
lname : str
The last name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pe = PSHPSoundexLast()
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Waters')
'W350'
>>> pe.encode('James')
'J500'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Ashcroft')
'A225'
"""
lname = unicode_normalize('NFKD', text_type(lname.upper()))
lname = lname.replace('ß', 'SS')
lname = ''.join((c for c in lname if c in self._uc_set))
# A. Prefix treatment
if lname[:3] == 'VON' or lname[:3] == 'VAN':
lname = lname[3:].strip() # depends on [control=['if'], data=[]]
# The rule implemented below says "MC, MAC become 1". I believe it
# meant to say they become M except in German data (where superscripted
# 1 indicates "except in German data"). It doesn't make sense for them
# to become 1 (BPFV -> 1) or to apply outside German. Unfortunately,
# both articles have this error(?).
if not german:
if lname[:3] == 'MAC':
lname = 'M' + lname[3:] # depends on [control=['if'], data=[]]
elif lname[:2] == 'MC':
lname = 'M' + lname[2:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# The non-German-only rule to strip ' is unnecessary due to filtering
if lname[:1] in {'E', 'I', 'O', 'U'}:
lname = 'A' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:2] in {'GE', 'GI', 'GY'}:
lname = 'J' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:2] in {'CE', 'CI', 'CY'}:
lname = 'S' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:3] == 'CHR':
lname = 'K' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:1] == 'C' and lname[:2] != 'CH':
lname = 'K' + lname[1:] # depends on [control=['if'], data=[]]
if lname[:2] == 'KN':
lname = 'N' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:2] == 'PH':
lname = 'F' + lname[1:] # depends on [control=['if'], data=[]]
elif lname[:3] in {'WIE', 'WEI'}:
lname = 'V' + lname[1:] # depends on [control=['if'], data=[]]
if german and lname[:1] in {'W', 'M', 'Y', 'Z'}:
lname = {'W': 'V', 'M': 'N', 'Y': 'J', 'Z': 'S'}[lname[0]] + lname[1:] # depends on [control=['if'], data=[]]
code = lname[:1]
# B. Postfix treatment
if german: # moved from end of postfix treatment due to blocking
if lname[-3:] == 'TES':
lname = lname[:-3] # depends on [control=['if'], data=[]]
elif lname[-2:] == 'TS':
lname = lname[:-2] # depends on [control=['if'], data=[]]
if lname[-3:] == 'TZE':
lname = lname[:-3] # depends on [control=['if'], data=[]]
elif lname[-2:] == 'ZE':
lname = lname[:-2] # depends on [control=['if'], data=[]]
if lname[-1:] == 'Z':
lname = lname[:-1] # depends on [control=['if'], data=[]]
elif lname[-2:] == 'TE':
lname = lname[:-2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if lname[-1:] == 'R':
lname = lname[:-1] + 'N' # depends on [control=['if'], data=[]]
elif lname[-2:] in {'SE', 'CE'}:
lname = lname[:-2] # depends on [control=['if'], data=[]]
if lname[-2:] == 'SS':
lname = lname[:-2] # depends on [control=['if'], data=[]]
elif lname[-1:] == 'S':
lname = lname[:-1] # depends on [control=['if'], data=[]]
if not german:
l5_repl = {'STOWN': 'SAWON', 'MPSON': 'MASON'}
l4_repl = {'NSEN': 'ASEN', 'MSON': 'ASON', 'STEN': 'SAEN', 'STON': 'SAON'}
if lname[-5:] in l5_repl:
lname = lname[:-5] + l5_repl[lname[-5:]] # depends on [control=['if'], data=['l5_repl']]
elif lname[-4:] in l4_repl:
lname = lname[:-4] + l4_repl[lname[-4:]] # depends on [control=['if'], data=['l4_repl']] # depends on [control=['if'], data=[]]
if lname[-2:] in {'NG', 'ND'}:
lname = lname[:-1] # depends on [control=['if'], data=[]]
if not german and lname[-3:] in {'GAN', 'GEN'}:
lname = lname[:-3] + 'A' + lname[-2:] # depends on [control=['if'], data=[]]
# C. Infix Treatment
lname = lname.replace('CK', 'C')
lname = lname.replace('SCH', 'S')
lname = lname.replace('DT', 'T')
lname = lname.replace('ND', 'N')
lname = lname.replace('NG', 'N')
lname = lname.replace('LM', 'M')
lname = lname.replace('MN', 'M')
lname = lname.replace('WIE', 'VIE')
lname = lname.replace('WEI', 'VEI')
# D. Soundexing
# code for X & Y are unspecified, but presumably are 2 & 0
lname = lname.translate(self._trans)
lname = self._delete_consecutive_repeats(lname)
code += lname[1:]
code = code.replace('0', '') # rule 1
if max_length != -1:
if len(code) < max_length:
code += '0' * (max_length - len(code)) # depends on [control=['if'], data=['max_length']]
else:
code = code[:max_length] # depends on [control=['if'], data=['max_length']]
return code |
def concat(a, b):
"Same as a + b, for a and b sequences."
if not hasattr(a, '__getitem__'):
msg = "'%s' object can't be concatenated" % type(a).__name__
raise TypeError(msg)
return a + b | def function[concat, parameter[a, b]]:
constant[Same as a + b, for a and b sequences.]
if <ast.UnaryOp object at 0x7da18dc98250> begin[:]
variable[msg] assign[=] binary_operation[constant['%s' object can't be concatenated] <ast.Mod object at 0x7da2590d6920> call[name[type], parameter[name[a]]].__name__]
<ast.Raise object at 0x7da18dc9afb0>
return[binary_operation[name[a] + name[b]]] | keyword[def] identifier[concat] ( identifier[a] , identifier[b] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[a] , literal[string] ):
identifier[msg] = literal[string] % identifier[type] ( identifier[a] ). identifier[__name__]
keyword[raise] identifier[TypeError] ( identifier[msg] )
keyword[return] identifier[a] + identifier[b] | def concat(a, b):
"""Same as a + b, for a and b sequences."""
if not hasattr(a, '__getitem__'):
msg = "'%s' object can't be concatenated" % type(a).__name__
raise TypeError(msg) # depends on [control=['if'], data=[]]
return a + b |
def get_samples_live_last(self, sensor_id):
"""Get the last sample recorded by the sensor.
Args:
sensor_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
Returns:
list: dictionary objects containing sample data
"""
url = "https://api.neur.io/v1/samples/live/last"
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
params = { "sensorId": sensor_id }
url = self.__append_url_params(url, params)
r = requests.get(url, headers=headers)
return r.json() | def function[get_samples_live_last, parameter[self, sensor_id]]:
constant[Get the last sample recorded by the sensor.
Args:
sensor_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
Returns:
list: dictionary objects containing sample data
]
variable[url] assign[=] constant[https://api.neur.io/v1/samples/live/last]
variable[headers] assign[=] call[name[self].__gen_headers, parameter[]]
call[name[headers]][constant[Content-Type]] assign[=] constant[application/json]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c65060>], [<ast.Name object at 0x7da1b0c64100>]]
variable[url] assign[=] call[name[self].__append_url_params, parameter[name[url], name[params]]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
return[call[name[r].json, parameter[]]] | keyword[def] identifier[get_samples_live_last] ( identifier[self] , identifier[sensor_id] ):
literal[string]
identifier[url] = literal[string]
identifier[headers] = identifier[self] . identifier[__gen_headers] ()
identifier[headers] [ literal[string] ]= literal[string]
identifier[params] ={ literal[string] : identifier[sensor_id] }
identifier[url] = identifier[self] . identifier[__append_url_params] ( identifier[url] , identifier[params] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] )
keyword[return] identifier[r] . identifier[json] () | def get_samples_live_last(self, sensor_id):
"""Get the last sample recorded by the sensor.
Args:
sensor_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
Returns:
list: dictionary objects containing sample data
"""
url = 'https://api.neur.io/v1/samples/live/last'
headers = self.__gen_headers()
headers['Content-Type'] = 'application/json'
params = {'sensorId': sensor_id}
url = self.__append_url_params(url, params)
r = requests.get(url, headers=headers)
return r.json() |
def _validate_iss(claims, issuer=None):
"""Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
"""
if issuer is not None:
if isinstance(issuer, string_types):
issuer = (issuer,)
if claims.get('iss') not in issuer:
raise JWTClaimsError('Invalid issuer') | def function[_validate_iss, parameter[claims, issuer]]:
constant[Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
]
if compare[name[issuer] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[issuer], name[string_types]]] begin[:]
variable[issuer] assign[=] tuple[[<ast.Name object at 0x7da18eb55c00>]]
if compare[call[name[claims].get, parameter[constant[iss]]] <ast.NotIn object at 0x7da2590d7190> name[issuer]] begin[:]
<ast.Raise object at 0x7da18eb578e0> | keyword[def] identifier[_validate_iss] ( identifier[claims] , identifier[issuer] = keyword[None] ):
literal[string]
keyword[if] identifier[issuer] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[issuer] , identifier[string_types] ):
identifier[issuer] =( identifier[issuer] ,)
keyword[if] identifier[claims] . identifier[get] ( literal[string] ) keyword[not] keyword[in] identifier[issuer] :
keyword[raise] identifier[JWTClaimsError] ( literal[string] ) | def _validate_iss(claims, issuer=None):
"""Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
"""
if issuer is not None:
if isinstance(issuer, string_types):
issuer = (issuer,) # depends on [control=['if'], data=[]]
if claims.get('iss') not in issuer:
raise JWTClaimsError('Invalid issuer') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['issuer']] |
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | def function[flatten_and_batch_shift_indices, parameter[indices, sequence_length]]:
constant[
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
]
variable[offsets] assign[=] binary_operation[call[name[get_range_vector], parameter[call[name[indices].size, parameter[constant[0]]], call[name[get_device_of], parameter[name[indices]]]]] * name[sequence_length]]
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[call[name[indices].size, parameter[]]]] - constant[1]]]]] begin[:]
variable[offsets] assign[=] call[name[offsets].unsqueeze, parameter[constant[1]]]
variable[offset_indices] assign[=] binary_operation[name[indices] + name[offsets]]
variable[offset_indices] assign[=] call[name[offset_indices].view, parameter[<ast.UnaryOp object at 0x7da20c990b50>]]
return[name[offset_indices]] | keyword[def] identifier[flatten_and_batch_shift_indices] ( identifier[indices] : identifier[torch] . identifier[Tensor] ,
identifier[sequence_length] : identifier[int] )-> identifier[torch] . identifier[Tensor] :
literal[string]
identifier[offsets] = identifier[get_range_vector] ( identifier[indices] . identifier[size] ( literal[int] ), identifier[get_device_of] ( identifier[indices] ))* identifier[sequence_length]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[indices] . identifier[size] ())- literal[int] ):
identifier[offsets] = identifier[offsets] . identifier[unsqueeze] ( literal[int] )
identifier[offset_indices] = identifier[indices] + identifier[offsets]
identifier[offset_indices] = identifier[offset_indices] . identifier[view] (- literal[int] )
keyword[return] identifier[offset_indices] | def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1) # depends on [control=['for'], data=[]]
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices |
def removedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['puntedIssuesEstimateSum']['value'] | def function[removedIssuesEstimateSum, parameter[self, board_id, sprint_id]]:
constant[Return the total incompleted points this sprint.]
return[call[call[call[call[name[self]._get_json, parameter[binary_operation[constant[rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1f977c0>, <ast.Name object at 0x7da1b1f95240>]]]]]][constant[contents]]][constant[puntedIssuesEstimateSum]]][constant[value]]] | keyword[def] identifier[removedIssuesEstimateSum] ( identifier[self] , identifier[board_id] , identifier[sprint_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_json] ( literal[string] %( identifier[board_id] , identifier[sprint_id] ),
identifier[base] = identifier[self] . identifier[AGILE_BASE_URL] )[ literal[string] ][ literal[string] ][ literal[string] ] | def removedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id), base=self.AGILE_BASE_URL)['contents']['puntedIssuesEstimateSum']['value'] |
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights | def function[min_var_portfolio, parameter[cov_mat, allow_short]]:
constant[
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
]
if <ast.UnaryOp object at 0x7da18bc70910> begin[:]
<ast.Raise object at 0x7da18bc73f10>
variable[n] assign[=] call[name[len], parameter[name[cov_mat]]]
variable[P] assign[=] call[name[opt].matrix, parameter[name[cov_mat].values]]
variable[q] assign[=] call[name[opt].matrix, parameter[constant[0.0], tuple[[<ast.Name object at 0x7da18bc71d80>, <ast.Constant object at 0x7da18bc70730>]]]]
if <ast.UnaryOp object at 0x7da18bc71900> begin[:]
variable[G] assign[=] call[name[opt].matrix, parameter[<ast.UnaryOp object at 0x7da18bc73940>]]
variable[h] assign[=] call[name[opt].matrix, parameter[constant[0.0], tuple[[<ast.Name object at 0x7da18bc72c80>, <ast.Constant object at 0x7da18bc70f40>]]]]
variable[A] assign[=] call[name[opt].matrix, parameter[constant[1.0], tuple[[<ast.Constant object at 0x7da18bc73d00>, <ast.Name object at 0x7da18bc72710>]]]]
variable[b] assign[=] call[name[opt].matrix, parameter[constant[1.0]]]
call[name[optsolvers].options][constant[show_progress]] assign[=] constant[False]
variable[sol] assign[=] call[name[optsolvers].qp, parameter[name[P], name[q], name[G], name[h], name[A], name[b]]]
if compare[call[name[sol]][constant[status]] not_equal[!=] constant[optimal]] begin[:]
call[name[warnings].warn, parameter[constant[Convergence problem]]]
variable[weights] assign[=] call[name[pd].Series, parameter[call[name[sol]][constant[x]]]]
return[name[weights]] | keyword[def] identifier[min_var_portfolio] ( identifier[cov_mat] , identifier[allow_short] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[cov_mat] , identifier[pd] . identifier[DataFrame] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[n] = identifier[len] ( identifier[cov_mat] )
identifier[P] = identifier[opt] . identifier[matrix] ( identifier[cov_mat] . identifier[values] )
identifier[q] = identifier[opt] . identifier[matrix] ( literal[int] ,( identifier[n] , literal[int] ))
keyword[if] keyword[not] identifier[allow_short] :
identifier[G] = identifier[opt] . identifier[matrix] (- identifier[np] . identifier[identity] ( identifier[n] ))
identifier[h] = identifier[opt] . identifier[matrix] ( literal[int] ,( identifier[n] , literal[int] ))
keyword[else] :
identifier[G] = keyword[None]
identifier[h] = keyword[None]
identifier[A] = identifier[opt] . identifier[matrix] ( literal[int] ,( literal[int] , identifier[n] ))
identifier[b] = identifier[opt] . identifier[matrix] ( literal[int] )
identifier[optsolvers] . identifier[options] [ literal[string] ]= keyword[False]
identifier[sol] = identifier[optsolvers] . identifier[qp] ( identifier[P] , identifier[q] , identifier[G] , identifier[h] , identifier[A] , identifier[b] )
keyword[if] identifier[sol] [ literal[string] ]!= literal[string] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[weights] = identifier[pd] . identifier[Series] ( identifier[sol] [ literal[string] ], identifier[index] = identifier[cov_mat] . identifier[index] )
keyword[return] identifier[weights] | def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError('Covariance matrix is not a DataFrame') # depends on [control=['if'], data=[]]
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1)) # depends on [control=['if'], data=[]]
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn('Convergence problem') # depends on [control=['if'], data=[]]
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights |
def update_unnamed_class(decls):
"""
Adds name to class_t declarations.
If CastXML is being used, the type definitions with an unnamed
class/struct are split across two nodes in the XML tree. For example,
typedef struct {} cls;
produces
<Struct id="_7" name="" context="_1" .../>
<Typedef id="_8" name="cls" type="_7" context="_1" .../>
For each typedef, we look at which class it refers to, and update the name
accordingly. This helps the matcher classes finding these declarations.
This was the behaviour with gccxml too, so this is important for
backward compatibility.
If the castxml epic version 1 is used, there is even an elaborated type
declaration between the typedef and the struct/class, that also needs to be
taken care of.
Args:
decls (list[declaration_t]): a list of declarations to be patched.
Returns:
None
"""
for decl in decls:
if isinstance(decl, declarations.typedef_t):
referent = decl.decl_type
if isinstance(referent, declarations.elaborated_t):
referent = referent.base
if not isinstance(referent, declarations.declarated_t):
continue
referent = referent.declaration
if referent.name or not isinstance(referent, declarations.class_t):
continue
referent.name = decl.name | def function[update_unnamed_class, parameter[decls]]:
constant[
Adds name to class_t declarations.
If CastXML is being used, the type definitions with an unnamed
class/struct are split across two nodes in the XML tree. For example,
typedef struct {} cls;
produces
<Struct id="_7" name="" context="_1" .../>
<Typedef id="_8" name="cls" type="_7" context="_1" .../>
For each typedef, we look at which class it refers to, and update the name
accordingly. This helps the matcher classes finding these declarations.
This was the behaviour with gccxml too, so this is important for
backward compatibility.
If the castxml epic version 1 is used, there is even an elaborated type
declaration between the typedef and the struct/class, that also needs to be
taken care of.
Args:
decls (list[declaration_t]): a list of declarations to be patched.
Returns:
None
]
for taget[name[decl]] in starred[name[decls]] begin[:]
if call[name[isinstance], parameter[name[decl], name[declarations].typedef_t]] begin[:]
variable[referent] assign[=] name[decl].decl_type
if call[name[isinstance], parameter[name[referent], name[declarations].elaborated_t]] begin[:]
variable[referent] assign[=] name[referent].base
if <ast.UnaryOp object at 0x7da1b13a6ec0> begin[:]
continue
variable[referent] assign[=] name[referent].declaration
if <ast.BoolOp object at 0x7da1b13a7be0> begin[:]
continue
name[referent].name assign[=] name[decl].name | keyword[def] identifier[update_unnamed_class] ( identifier[decls] ):
literal[string]
keyword[for] identifier[decl] keyword[in] identifier[decls] :
keyword[if] identifier[isinstance] ( identifier[decl] , identifier[declarations] . identifier[typedef_t] ):
identifier[referent] = identifier[decl] . identifier[decl_type]
keyword[if] identifier[isinstance] ( identifier[referent] , identifier[declarations] . identifier[elaborated_t] ):
identifier[referent] = identifier[referent] . identifier[base]
keyword[if] keyword[not] identifier[isinstance] ( identifier[referent] , identifier[declarations] . identifier[declarated_t] ):
keyword[continue]
identifier[referent] = identifier[referent] . identifier[declaration]
keyword[if] identifier[referent] . identifier[name] keyword[or] keyword[not] identifier[isinstance] ( identifier[referent] , identifier[declarations] . identifier[class_t] ):
keyword[continue]
identifier[referent] . identifier[name] = identifier[decl] . identifier[name] | def update_unnamed_class(decls):
"""
Adds name to class_t declarations.
If CastXML is being used, the type definitions with an unnamed
class/struct are split across two nodes in the XML tree. For example,
typedef struct {} cls;
produces
<Struct id="_7" name="" context="_1" .../>
<Typedef id="_8" name="cls" type="_7" context="_1" .../>
For each typedef, we look at which class it refers to, and update the name
accordingly. This helps the matcher classes finding these declarations.
This was the behaviour with gccxml too, so this is important for
backward compatibility.
If the castxml epic version 1 is used, there is even an elaborated type
declaration between the typedef and the struct/class, that also needs to be
taken care of.
Args:
decls (list[declaration_t]): a list of declarations to be patched.
Returns:
None
"""
for decl in decls:
if isinstance(decl, declarations.typedef_t):
referent = decl.decl_type
if isinstance(referent, declarations.elaborated_t):
referent = referent.base # depends on [control=['if'], data=[]]
if not isinstance(referent, declarations.declarated_t):
continue # depends on [control=['if'], data=[]]
referent = referent.declaration
if referent.name or not isinstance(referent, declarations.class_t):
continue # depends on [control=['if'], data=[]]
referent.name = decl.name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['decl']] |
def get_token_details(self, show_listing_details=False, show_inactive=False):
"""
Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange.
"""
api_params = {
"show_listing_details": show_listing_details,
"show_inactive": show_inactive
}
return self.request.get(path='/exchange/tokens', params=api_params) | def function[get_token_details, parameter[self, show_listing_details, show_inactive]]:
constant[
Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange.
]
variable[api_params] assign[=] dictionary[[<ast.Constant object at 0x7da2054a4d00>, <ast.Constant object at 0x7da2054a42b0>], [<ast.Name object at 0x7da2054a7460>, <ast.Name object at 0x7da2054a5c00>]]
return[call[name[self].request.get, parameter[]]] | keyword[def] identifier[get_token_details] ( identifier[self] , identifier[show_listing_details] = keyword[False] , identifier[show_inactive] = keyword[False] ):
literal[string]
identifier[api_params] ={
literal[string] : identifier[show_listing_details] ,
literal[string] : identifier[show_inactive]
}
keyword[return] identifier[self] . identifier[request] . identifier[get] ( identifier[path] = literal[string] , identifier[params] = identifier[api_params] ) | def get_token_details(self, show_listing_details=False, show_inactive=False):
"""
Function to fetch the available tokens available to trade on the Switcheo exchange.
Execution of this function is as follows::
get_token_details()
get_token_details(show_listing_details=True)
get_token_details(show_inactive=True)
get_token_details(show_listing_details=True, show_inactive=True)
The expected return result for this function is as follows::
{
'NEO': {
'hash': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b',
'decimals': 8
},
'GAS': {
'hash': '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7',
'decimals': 8
},
'SWTH': {
'hash': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'decimals': 8
},
...
}
:param show_listing_details: Parameter flag to indicate whether or not to show the token listing details.
:type show_listing_details: bool
:param show_inactive: Flag to return the tokens that are no longer traded on the Switcheo Exchange.
:type show_inactive: bool
:return: Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange.
"""
api_params = {'show_listing_details': show_listing_details, 'show_inactive': show_inactive}
return self.request.get(path='/exchange/tokens', params=api_params) |
def as_xyz_string(self):
"""
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
"""
xyz = ['x', 'y', 'z']
strings = []
# test for invalid rotation matrix
if not np.all(np.isclose(self.rotation_matrix,
np.round(self.rotation_matrix))):
warnings.warn('Rotation matrix should be integer')
return transformation_to_string(self.rotation_matrix, translation_vec=self.translation_vector, delim=", ") | def function[as_xyz_string, parameter[self]]:
constant[
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
]
variable[xyz] assign[=] list[[<ast.Constant object at 0x7da207f00d00>, <ast.Constant object at 0x7da207f035b0>, <ast.Constant object at 0x7da207f02470>]]
variable[strings] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da207f007f0> begin[:]
call[name[warnings].warn, parameter[constant[Rotation matrix should be integer]]]
return[call[name[transformation_to_string], parameter[name[self].rotation_matrix]]] | keyword[def] identifier[as_xyz_string] ( identifier[self] ):
literal[string]
identifier[xyz] =[ literal[string] , literal[string] , literal[string] ]
identifier[strings] =[]
keyword[if] keyword[not] identifier[np] . identifier[all] ( identifier[np] . identifier[isclose] ( identifier[self] . identifier[rotation_matrix] ,
identifier[np] . identifier[round] ( identifier[self] . identifier[rotation_matrix] ))):
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[transformation_to_string] ( identifier[self] . identifier[rotation_matrix] , identifier[translation_vec] = identifier[self] . identifier[translation_vector] , identifier[delim] = literal[string] ) | def as_xyz_string(self):
"""
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
"""
xyz = ['x', 'y', 'z']
strings = []
# test for invalid rotation matrix
if not np.all(np.isclose(self.rotation_matrix, np.round(self.rotation_matrix))):
warnings.warn('Rotation matrix should be integer') # depends on [control=['if'], data=[]]
return transformation_to_string(self.rotation_matrix, translation_vec=self.translation_vector, delim=', ') |
def _handleSmsStatusReport(self, notificationLine):
""" Handler for SMS status reports """
self.log.debug('SMS status report received')
cdsiMatch = self.CDSI_REGEX.match(notificationLine)
if cdsiMatch:
msgMemory = cdsiMatch.group(1)
msgIndex = cdsiMatch.group(2)
report = self.readStoredSms(msgIndex, msgMemory)
self.deleteStoredSms(msgIndex)
# Update sent SMS status if possible
if report.reference in self.sentSms:
self.sentSms[report.reference].report = report
if self._smsStatusReportEvent:
# A sendSms() call is waiting for this response - notify waiting thread
self._smsStatusReportEvent.set()
else:
# Nothing is waiting for this report directly - use callback
self.smsStatusReportCallback(report) | def function[_handleSmsStatusReport, parameter[self, notificationLine]]:
constant[ Handler for SMS status reports ]
call[name[self].log.debug, parameter[constant[SMS status report received]]]
variable[cdsiMatch] assign[=] call[name[self].CDSI_REGEX.match, parameter[name[notificationLine]]]
if name[cdsiMatch] begin[:]
variable[msgMemory] assign[=] call[name[cdsiMatch].group, parameter[constant[1]]]
variable[msgIndex] assign[=] call[name[cdsiMatch].group, parameter[constant[2]]]
variable[report] assign[=] call[name[self].readStoredSms, parameter[name[msgIndex], name[msgMemory]]]
call[name[self].deleteStoredSms, parameter[name[msgIndex]]]
if compare[name[report].reference in name[self].sentSms] begin[:]
call[name[self].sentSms][name[report].reference].report assign[=] name[report]
if name[self]._smsStatusReportEvent begin[:]
call[name[self]._smsStatusReportEvent.set, parameter[]] | keyword[def] identifier[_handleSmsStatusReport] ( identifier[self] , identifier[notificationLine] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[cdsiMatch] = identifier[self] . identifier[CDSI_REGEX] . identifier[match] ( identifier[notificationLine] )
keyword[if] identifier[cdsiMatch] :
identifier[msgMemory] = identifier[cdsiMatch] . identifier[group] ( literal[int] )
identifier[msgIndex] = identifier[cdsiMatch] . identifier[group] ( literal[int] )
identifier[report] = identifier[self] . identifier[readStoredSms] ( identifier[msgIndex] , identifier[msgMemory] )
identifier[self] . identifier[deleteStoredSms] ( identifier[msgIndex] )
keyword[if] identifier[report] . identifier[reference] keyword[in] identifier[self] . identifier[sentSms] :
identifier[self] . identifier[sentSms] [ identifier[report] . identifier[reference] ]. identifier[report] = identifier[report]
keyword[if] identifier[self] . identifier[_smsStatusReportEvent] :
identifier[self] . identifier[_smsStatusReportEvent] . identifier[set] ()
keyword[else] :
identifier[self] . identifier[smsStatusReportCallback] ( identifier[report] ) | def _handleSmsStatusReport(self, notificationLine):
""" Handler for SMS status reports """
self.log.debug('SMS status report received')
cdsiMatch = self.CDSI_REGEX.match(notificationLine)
if cdsiMatch:
msgMemory = cdsiMatch.group(1)
msgIndex = cdsiMatch.group(2)
report = self.readStoredSms(msgIndex, msgMemory)
self.deleteStoredSms(msgIndex) # Update sent SMS status if possible
if report.reference in self.sentSms:
self.sentSms[report.reference].report = report # depends on [control=['if'], data=[]]
if self._smsStatusReportEvent:
# A sendSms() call is waiting for this response - notify waiting thread
self._smsStatusReportEvent.set() # depends on [control=['if'], data=[]]
else:
# Nothing is waiting for this report directly - use callback
self.smsStatusReportCallback(report) # depends on [control=['if'], data=[]] |
def lines_iter(self):
'''
Returns contents of the Dockerfile as an array, where each line in the file is an element in the array.
:return: list
'''
# Convert unicode chars to string
byte_to_string = lambda x: x.strip().decode(u'utf-8') if isinstance(x, bytes) else x.strip()
# Read the entire contents of the Dockerfile, decoding each line, and return the result as an array
with open(self.docker_file_path, u'r') as f:
for line in f:
yield byte_to_string(line) | def function[lines_iter, parameter[self]]:
constant[
Returns contents of the Dockerfile as an array, where each line in the file is an element in the array.
:return: list
]
variable[byte_to_string] assign[=] <ast.Lambda object at 0x7da1b16ab070>
with call[name[open], parameter[name[self].docker_file_path, constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
<ast.Yield object at 0x7da18bc715d0> | keyword[def] identifier[lines_iter] ( identifier[self] ):
literal[string]
identifier[byte_to_string] = keyword[lambda] identifier[x] : identifier[x] . identifier[strip] (). identifier[decode] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[x] , identifier[bytes] ) keyword[else] identifier[x] . identifier[strip] ()
keyword[with] identifier[open] ( identifier[self] . identifier[docker_file_path] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[yield] identifier[byte_to_string] ( identifier[line] ) | def lines_iter(self):
"""
Returns contents of the Dockerfile as an array, where each line in the file is an element in the array.
:return: list
"""
# Convert unicode chars to string
byte_to_string = lambda x: x.strip().decode(u'utf-8') if isinstance(x, bytes) else x.strip()
# Read the entire contents of the Dockerfile, decoding each line, and return the result as an array
with open(self.docker_file_path, u'r') as f:
for line in f:
yield byte_to_string(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] |
def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
"""
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | def function[cmp_pkgrevno, parameter[package, revno, pkgcache]]:
constant[Compare supplied revno with the revno of the installed package.
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
]
import module[apt_pkg]
if <ast.UnaryOp object at 0x7da1b121b580> begin[:]
from relative_module[charmhelpers.fetch] import module[apt_cache]
variable[pkgcache] assign[=] call[name[apt_cache], parameter[]]
variable[pkg] assign[=] call[name[pkgcache]][name[package]]
return[call[name[apt_pkg].version_compare, parameter[name[pkg].current_ver.ver_str, name[revno]]]] | keyword[def] identifier[cmp_pkgrevno] ( identifier[package] , identifier[revno] , identifier[pkgcache] = keyword[None] ):
literal[string]
keyword[import] identifier[apt_pkg]
keyword[if] keyword[not] identifier[pkgcache] :
keyword[from] identifier[charmhelpers] . identifier[fetch] keyword[import] identifier[apt_cache]
identifier[pkgcache] = identifier[apt_cache] ()
identifier[pkg] = identifier[pkgcache] [ identifier[package] ]
keyword[return] identifier[apt_pkg] . identifier[version_compare] ( identifier[pkg] . identifier[current_ver] . identifier[ver_str] , identifier[revno] ) | def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
"""
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache() # depends on [control=['if'], data=[]]
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
def add_disk(self, path, force_disk_indexes=True, **args):
"""Adds a disk specified by the path to the ImageParser.
:param path: The path to the disk volume
:param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the
second volume you add. If you plan on using this method, always leave this True.
If you add a second disk when the previous disk has no index, an error is raised.
:param args: Arguments to pass to the constructor of the Disk.
"""
if self.disks and self.disks[0].index is None:
raise DiskIndexError("First disk has no index.")
if force_disk_indexes or self.disks:
index = len(self.disks) + 1
else:
index = None
disk = Disk(self, path, index=str(index) if index else None, **args)
self.disks.append(disk)
return disk | def function[add_disk, parameter[self, path, force_disk_indexes]]:
constant[Adds a disk specified by the path to the ImageParser.
:param path: The path to the disk volume
:param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the
second volume you add. If you plan on using this method, always leave this True.
If you add a second disk when the previous disk has no index, an error is raised.
:param args: Arguments to pass to the constructor of the Disk.
]
if <ast.BoolOp object at 0x7da1b0406500> begin[:]
<ast.Raise object at 0x7da1b0505a80>
if <ast.BoolOp object at 0x7da1b05056f0> begin[:]
variable[index] assign[=] binary_operation[call[name[len], parameter[name[self].disks]] + constant[1]]
variable[disk] assign[=] call[name[Disk], parameter[name[self], name[path]]]
call[name[self].disks.append, parameter[name[disk]]]
return[name[disk]] | keyword[def] identifier[add_disk] ( identifier[self] , identifier[path] , identifier[force_disk_indexes] = keyword[True] ,** identifier[args] ):
literal[string]
keyword[if] identifier[self] . identifier[disks] keyword[and] identifier[self] . identifier[disks] [ literal[int] ]. identifier[index] keyword[is] keyword[None] :
keyword[raise] identifier[DiskIndexError] ( literal[string] )
keyword[if] identifier[force_disk_indexes] keyword[or] identifier[self] . identifier[disks] :
identifier[index] = identifier[len] ( identifier[self] . identifier[disks] )+ literal[int]
keyword[else] :
identifier[index] = keyword[None]
identifier[disk] = identifier[Disk] ( identifier[self] , identifier[path] , identifier[index] = identifier[str] ( identifier[index] ) keyword[if] identifier[index] keyword[else] keyword[None] ,** identifier[args] )
identifier[self] . identifier[disks] . identifier[append] ( identifier[disk] )
keyword[return] identifier[disk] | def add_disk(self, path, force_disk_indexes=True, **args):
"""Adds a disk specified by the path to the ImageParser.
:param path: The path to the disk volume
:param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the
second volume you add. If you plan on using this method, always leave this True.
If you add a second disk when the previous disk has no index, an error is raised.
:param args: Arguments to pass to the constructor of the Disk.
"""
if self.disks and self.disks[0].index is None:
raise DiskIndexError('First disk has no index.') # depends on [control=['if'], data=[]]
if force_disk_indexes or self.disks:
index = len(self.disks) + 1 # depends on [control=['if'], data=[]]
else:
index = None
disk = Disk(self, path, index=str(index) if index else None, **args)
self.disks.append(disk)
return disk |
def _render_context(self, template, block, **context):
"""
Render a block to a string with its context
"""
return u''.join(block(template.new_context(context))) | def function[_render_context, parameter[self, template, block]]:
constant[
Render a block to a string with its context
]
return[call[constant[].join, parameter[call[name[block], parameter[call[name[template].new_context, parameter[name[context]]]]]]]] | keyword[def] identifier[_render_context] ( identifier[self] , identifier[template] , identifier[block] ,** identifier[context] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[block] ( identifier[template] . identifier[new_context] ( identifier[context] ))) | def _render_context(self, template, block, **context):
"""
Render a block to a string with its context
"""
return u''.join(block(template.new_context(context))) |
def fail_to(future):
"""A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written.
"""
assert is_future(future), 'you forgot to pass a future'
def decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
future.set_exc_info(sys.exc_info())
return new_f
return decorator | def function[fail_to, parameter[future]]:
constant[A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written.
]
assert[call[name[is_future], parameter[name[future]]]]
def function[decorator, parameter[f]]:
def function[new_f, parameter[]]:
<ast.Try object at 0x7da18f09d270>
return[name[new_f]]
return[name[decorator]] | keyword[def] identifier[fail_to] ( identifier[future] ):
literal[string]
keyword[assert] identifier[is_future] ( identifier[future] ), literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[new_f] (* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] :
identifier[future] . identifier[set_exc_info] ( identifier[sys] . identifier[exc_info] ())
keyword[return] identifier[new_f]
keyword[return] identifier[decorator] | def fail_to(future):
"""A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written.
"""
assert is_future(future), 'you forgot to pass a future'
def decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs) # depends on [control=['try'], data=[]]
except Exception:
future.set_exc_info(sys.exc_info()) # depends on [control=['except'], data=[]]
return new_f
return decorator |
def dict_to_instance(self, content):
"""
transforms the content to a new instace of
object self.schema['title']
:param content: valid response
:returns new instance of current class
"""
klass = self.schema['title']
cls = get_model_class(klass, api=self.__api__)
# jdict = json.loads(content, encoding="utf-8")
### check if we have a response
properties_dict = content[self.schema['title']][self.schema['title']]
#@todo: find a way to handle the data
# validation fails if the none values are not removed
new_dict = helpers.remove_properties_containing_None(properties_dict)
obj = cls(new_dict)
#obj.links = content[self.schema['title']]['links']
return obj | def function[dict_to_instance, parameter[self, content]]:
constant[
transforms the content to a new instace of
object self.schema['title']
:param content: valid response
:returns new instance of current class
]
variable[klass] assign[=] call[name[self].schema][constant[title]]
variable[cls] assign[=] call[name[get_model_class], parameter[name[klass]]]
variable[properties_dict] assign[=] call[call[name[content]][call[name[self].schema][constant[title]]]][call[name[self].schema][constant[title]]]
variable[new_dict] assign[=] call[name[helpers].remove_properties_containing_None, parameter[name[properties_dict]]]
variable[obj] assign[=] call[name[cls], parameter[name[new_dict]]]
return[name[obj]] | keyword[def] identifier[dict_to_instance] ( identifier[self] , identifier[content] ):
literal[string]
identifier[klass] = identifier[self] . identifier[schema] [ literal[string] ]
identifier[cls] = identifier[get_model_class] ( identifier[klass] , identifier[api] = identifier[self] . identifier[__api__] )
identifier[properties_dict] = identifier[content] [ identifier[self] . identifier[schema] [ literal[string] ]][ identifier[self] . identifier[schema] [ literal[string] ]]
identifier[new_dict] = identifier[helpers] . identifier[remove_properties_containing_None] ( identifier[properties_dict] )
identifier[obj] = identifier[cls] ( identifier[new_dict] )
keyword[return] identifier[obj] | def dict_to_instance(self, content):
"""
transforms the content to a new instace of
object self.schema['title']
:param content: valid response
:returns new instance of current class
"""
klass = self.schema['title']
cls = get_model_class(klass, api=self.__api__)
# jdict = json.loads(content, encoding="utf-8")
### check if we have a response
properties_dict = content[self.schema['title']][self.schema['title']]
#@todo: find a way to handle the data
# validation fails if the none values are not removed
new_dict = helpers.remove_properties_containing_None(properties_dict)
obj = cls(new_dict)
#obj.links = content[self.schema['title']]['links']
return obj |
def _readfloat(self, length, start):
"""Read bits and interpret as a float."""
if not (start + self._offset) % 8:
startbyte = (start + self._offset) // 8
if length == 32:
f, = struct.unpack('>f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4)))
elif length == 64:
f, = struct.unpack('>d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8)))
else:
if length == 32:
f, = struct.unpack('>f', self._readbytes(32, start))
elif length == 64:
f, = struct.unpack('>d', self._readbytes(64, start))
try:
return f
except NameError:
raise InterpretError("floats can only be 32 or 64 bits long, not {0} bits", length) | def function[_readfloat, parameter[self, length, start]]:
constant[Read bits and interpret as a float.]
if <ast.UnaryOp object at 0x7da1b101ac20> begin[:]
variable[startbyte] assign[=] binary_operation[binary_operation[name[start] + name[self]._offset] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]]
if compare[name[length] equal[==] constant[32]] begin[:]
<ast.Tuple object at 0x7da1b101a1d0> assign[=] call[name[struct].unpack, parameter[constant[>f], call[name[bytes], parameter[call[name[self]._datastore.getbyteslice, parameter[name[startbyte], binary_operation[name[startbyte] + constant[4]]]]]]]]
<ast.Try object at 0x7da1b10196c0> | keyword[def] identifier[_readfloat] ( identifier[self] , identifier[length] , identifier[start] ):
literal[string]
keyword[if] keyword[not] ( identifier[start] + identifier[self] . identifier[_offset] )% literal[int] :
identifier[startbyte] =( identifier[start] + identifier[self] . identifier[_offset] )// literal[int]
keyword[if] identifier[length] == literal[int] :
identifier[f] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[bytes] ( identifier[self] . identifier[_datastore] . identifier[getbyteslice] ( identifier[startbyte] , identifier[startbyte] + literal[int] )))
keyword[elif] identifier[length] == literal[int] :
identifier[f] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[bytes] ( identifier[self] . identifier[_datastore] . identifier[getbyteslice] ( identifier[startbyte] , identifier[startbyte] + literal[int] )))
keyword[else] :
keyword[if] identifier[length] == literal[int] :
identifier[f] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[_readbytes] ( literal[int] , identifier[start] ))
keyword[elif] identifier[length] == literal[int] :
identifier[f] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[_readbytes] ( literal[int] , identifier[start] ))
keyword[try] :
keyword[return] identifier[f]
keyword[except] identifier[NameError] :
keyword[raise] identifier[InterpretError] ( literal[string] , identifier[length] ) | def _readfloat(self, length, start):
"""Read bits and interpret as a float."""
if not (start + self._offset) % 8:
startbyte = (start + self._offset) // 8
if length == 32:
(f,) = struct.unpack('>f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4))) # depends on [control=['if'], data=[]]
elif length == 64:
(f,) = struct.unpack('>d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif length == 32:
(f,) = struct.unpack('>f', self._readbytes(32, start)) # depends on [control=['if'], data=[]]
elif length == 64:
(f,) = struct.unpack('>d', self._readbytes(64, start)) # depends on [control=['if'], data=[]]
try:
return f # depends on [control=['try'], data=[]]
except NameError:
raise InterpretError('floats can only be 32 or 64 bits long, not {0} bits', length) # depends on [control=['except'], data=[]] |
def solve(self, x0, params=(), internal_x0=None, solver=None,
conditional_maxiter=20, initial_conditions=None, **kwargs):
""" Solve the problem (systems of equations)
Parameters
----------
x0 : array
Guess.
params : array
See :meth:`NeqSys.solve`.
internal_x0 : array
See :meth:`NeqSys.solve`.
solver : str or callable or iterable of such.
See :meth:`NeqSys.solve`.
conditional_maxiter : int
Maximum number of switches between conditions.
initial_conditions : iterable of bools
Corresponding conditions to ``x0``
\\*\\*kwargs :
Keyword arguments passed on to :meth:`NeqSys.solve`.
"""
if initial_conditions is not None:
conds = initial_conditions
else:
conds = self.get_conds(x0, params, initial_conditions)
idx, nfev, njev = 0, 0, 0
while idx < conditional_maxiter:
neqsys = self.neqsys_factory(conds)
x0, info = neqsys.solve(x0, params, internal_x0, solver, **kwargs)
if idx == 0:
internal_x0 = None
nfev += info['nfev']
njev += info.get('njev', 0)
new_conds = self.get_conds(x0, params, conds)
if new_conds == conds:
break
else:
conds = new_conds
idx += 1
if idx == conditional_maxiter:
raise Exception("Solving failed, conditional_maxiter reached")
self.internal_x = info['x']
self.internal_params = neqsys.internal_params
result = {
'x': info['x'],
'success': info['success'],
'conditions': conds,
'nfev': nfev,
'njev': njev,
}
if 'fun' in info:
result['fun'] = info['fun']
return x0, result | def function[solve, parameter[self, x0, params, internal_x0, solver, conditional_maxiter, initial_conditions]]:
constant[ Solve the problem (systems of equations)
Parameters
----------
x0 : array
Guess.
params : array
See :meth:`NeqSys.solve`.
internal_x0 : array
See :meth:`NeqSys.solve`.
solver : str or callable or iterable of such.
See :meth:`NeqSys.solve`.
conditional_maxiter : int
Maximum number of switches between conditions.
initial_conditions : iterable of bools
Corresponding conditions to ``x0``
\*\*kwargs :
Keyword arguments passed on to :meth:`NeqSys.solve`.
]
if compare[name[initial_conditions] is_not constant[None]] begin[:]
variable[conds] assign[=] name[initial_conditions]
<ast.Tuple object at 0x7da2047e9150> assign[=] tuple[[<ast.Constant object at 0x7da2047e8eb0>, <ast.Constant object at 0x7da2047e97e0>, <ast.Constant object at 0x7da2047eb040>]]
while compare[name[idx] less[<] name[conditional_maxiter]] begin[:]
variable[neqsys] assign[=] call[name[self].neqsys_factory, parameter[name[conds]]]
<ast.Tuple object at 0x7da204962500> assign[=] call[name[neqsys].solve, parameter[name[x0], name[params], name[internal_x0], name[solver]]]
if compare[name[idx] equal[==] constant[0]] begin[:]
variable[internal_x0] assign[=] constant[None]
<ast.AugAssign object at 0x7da20c795fc0>
<ast.AugAssign object at 0x7da20c794220>
variable[new_conds] assign[=] call[name[self].get_conds, parameter[name[x0], name[params], name[conds]]]
if compare[name[new_conds] equal[==] name[conds]] begin[:]
break
<ast.AugAssign object at 0x7da1b26ad3c0>
if compare[name[idx] equal[==] name[conditional_maxiter]] begin[:]
<ast.Raise object at 0x7da1b26affa0>
name[self].internal_x assign[=] call[name[info]][constant[x]]
name[self].internal_params assign[=] name[neqsys].internal_params
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ace80>, <ast.Constant object at 0x7da1b26ada20>, <ast.Constant object at 0x7da1b26acdc0>, <ast.Constant object at 0x7da1b26afa60>, <ast.Constant object at 0x7da1b26aeaa0>], [<ast.Subscript object at 0x7da1b26ac250>, <ast.Subscript object at 0x7da1b26afee0>, <ast.Name object at 0x7da1b26ad990>, <ast.Name object at 0x7da1b26afdf0>, <ast.Name object at 0x7da1b26ad810>]]
if compare[constant[fun] in name[info]] begin[:]
call[name[result]][constant[fun]] assign[=] call[name[info]][constant[fun]]
return[tuple[[<ast.Name object at 0x7da1b26aebc0>, <ast.Name object at 0x7da1b26ad180>]]] | keyword[def] identifier[solve] ( identifier[self] , identifier[x0] , identifier[params] =(), identifier[internal_x0] = keyword[None] , identifier[solver] = keyword[None] ,
identifier[conditional_maxiter] = literal[int] , identifier[initial_conditions] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[initial_conditions] keyword[is] keyword[not] keyword[None] :
identifier[conds] = identifier[initial_conditions]
keyword[else] :
identifier[conds] = identifier[self] . identifier[get_conds] ( identifier[x0] , identifier[params] , identifier[initial_conditions] )
identifier[idx] , identifier[nfev] , identifier[njev] = literal[int] , literal[int] , literal[int]
keyword[while] identifier[idx] < identifier[conditional_maxiter] :
identifier[neqsys] = identifier[self] . identifier[neqsys_factory] ( identifier[conds] )
identifier[x0] , identifier[info] = identifier[neqsys] . identifier[solve] ( identifier[x0] , identifier[params] , identifier[internal_x0] , identifier[solver] ,** identifier[kwargs] )
keyword[if] identifier[idx] == literal[int] :
identifier[internal_x0] = keyword[None]
identifier[nfev] += identifier[info] [ literal[string] ]
identifier[njev] += identifier[info] . identifier[get] ( literal[string] , literal[int] )
identifier[new_conds] = identifier[self] . identifier[get_conds] ( identifier[x0] , identifier[params] , identifier[conds] )
keyword[if] identifier[new_conds] == identifier[conds] :
keyword[break]
keyword[else] :
identifier[conds] = identifier[new_conds]
identifier[idx] += literal[int]
keyword[if] identifier[idx] == identifier[conditional_maxiter] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[internal_x] = identifier[info] [ literal[string] ]
identifier[self] . identifier[internal_params] = identifier[neqsys] . identifier[internal_params]
identifier[result] ={
literal[string] : identifier[info] [ literal[string] ],
literal[string] : identifier[info] [ literal[string] ],
literal[string] : identifier[conds] ,
literal[string] : identifier[nfev] ,
literal[string] : identifier[njev] ,
}
keyword[if] literal[string] keyword[in] identifier[info] :
identifier[result] [ literal[string] ]= identifier[info] [ literal[string] ]
keyword[return] identifier[x0] , identifier[result] | def solve(self, x0, params=(), internal_x0=None, solver=None, conditional_maxiter=20, initial_conditions=None, **kwargs):
""" Solve the problem (systems of equations)
Parameters
----------
x0 : array
Guess.
params : array
See :meth:`NeqSys.solve`.
internal_x0 : array
See :meth:`NeqSys.solve`.
solver : str or callable or iterable of such.
See :meth:`NeqSys.solve`.
conditional_maxiter : int
Maximum number of switches between conditions.
initial_conditions : iterable of bools
Corresponding conditions to ``x0``
\\*\\*kwargs :
Keyword arguments passed on to :meth:`NeqSys.solve`.
"""
if initial_conditions is not None:
conds = initial_conditions # depends on [control=['if'], data=['initial_conditions']]
else:
conds = self.get_conds(x0, params, initial_conditions)
(idx, nfev, njev) = (0, 0, 0)
while idx < conditional_maxiter:
neqsys = self.neqsys_factory(conds)
(x0, info) = neqsys.solve(x0, params, internal_x0, solver, **kwargs)
if idx == 0:
internal_x0 = None # depends on [control=['if'], data=[]]
nfev += info['nfev']
njev += info.get('njev', 0)
new_conds = self.get_conds(x0, params, conds)
if new_conds == conds:
break # depends on [control=['if'], data=[]]
else:
conds = new_conds
idx += 1 # depends on [control=['while'], data=['idx']]
if idx == conditional_maxiter:
raise Exception('Solving failed, conditional_maxiter reached') # depends on [control=['if'], data=[]]
self.internal_x = info['x']
self.internal_params = neqsys.internal_params
result = {'x': info['x'], 'success': info['success'], 'conditions': conds, 'nfev': nfev, 'njev': njev}
if 'fun' in info:
result['fun'] = info['fun'] # depends on [control=['if'], data=['info']]
return (x0, result) |
def _delly_count_evidence_filter(in_file, data):
"""Filter delly outputs based on read support (DV) and evidence (split and paired).
We require DV > 4 and either both paired end and split read evidence or
5 or more evidence for either individually.
"""
filtname = "DVSupport"
filtdoc = "FMT/DV < 4 || (SR < 1 && PE < 5) || (SR < 5 && PE < 1)"
out_file = "%s-filter%s" % utils.splitext_plus(in_file)
cur_out_file = out_file.replace(".vcf.gz", ".vcf")
if not utils.file_exists(out_file):
with file_transaction(data, cur_out_file) as tx_out_file:
with utils.open_gzipsafe(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
inp = vcf.Reader(in_handle, in_file)
inp.filters["DVSupport"] = vcf.parser._Filter(filtname, filtdoc)
outp = vcf.Writer(out_handle, inp)
for rec in inp:
sr = rec.INFO.get("SR", 0)
pe = rec.INFO.get("PE", 0)
call = rec.samples[0].data
dv = call.DV if hasattr(call, "DV") else 0
if dv < 4 or (sr < 1 and pe < 5) or (sr < 5 and pe < 1):
rec.add_filter(filtname)
outp.write_record(rec)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(cur_out_file, data["config"])
return out_file | def function[_delly_count_evidence_filter, parameter[in_file, data]]:
constant[Filter delly outputs based on read support (DV) and evidence (split and paired).
We require DV > 4 and either both paired end and split read evidence or
5 or more evidence for either individually.
]
variable[filtname] assign[=] constant[DVSupport]
variable[filtdoc] assign[=] constant[FMT/DV < 4 || (SR < 1 && PE < 5) || (SR < 5 && PE < 1)]
variable[out_file] assign[=] binary_operation[constant[%s-filter%s] <ast.Mod object at 0x7da2590d6920> call[name[utils].splitext_plus, parameter[name[in_file]]]]
variable[cur_out_file] assign[=] call[name[out_file].replace, parameter[constant[.vcf.gz], constant[.vcf]]]
if <ast.UnaryOp object at 0x7da1b17d6aa0> begin[:]
with call[name[file_transaction], parameter[name[data], name[cur_out_file]]] begin[:]
with call[name[utils].open_gzipsafe, parameter[name[in_file]]] begin[:]
with call[name[open], parameter[name[tx_out_file], constant[w]]] begin[:]
variable[inp] assign[=] call[name[vcf].Reader, parameter[name[in_handle], name[in_file]]]
call[name[inp].filters][constant[DVSupport]] assign[=] call[name[vcf].parser._Filter, parameter[name[filtname], name[filtdoc]]]
variable[outp] assign[=] call[name[vcf].Writer, parameter[name[out_handle], name[inp]]]
for taget[name[rec]] in starred[name[inp]] begin[:]
variable[sr] assign[=] call[name[rec].INFO.get, parameter[constant[SR], constant[0]]]
variable[pe] assign[=] call[name[rec].INFO.get, parameter[constant[PE], constant[0]]]
variable[call] assign[=] call[name[rec].samples][constant[0]].data
variable[dv] assign[=] <ast.IfExp object at 0x7da1b178ddb0>
if <ast.BoolOp object at 0x7da1b178d1b0> begin[:]
call[name[rec].add_filter, parameter[name[filtname]]]
call[name[outp].write_record, parameter[name[rec]]]
if call[name[out_file].endswith, parameter[constant[.vcf.gz]]] begin[:]
variable[out_file] assign[=] call[name[vcfutils].bgzip_and_index, parameter[name[cur_out_file], call[name[data]][constant[config]]]]
return[name[out_file]] | keyword[def] identifier[_delly_count_evidence_filter] ( identifier[in_file] , identifier[data] ):
literal[string]
identifier[filtname] = literal[string]
identifier[filtdoc] = literal[string]
identifier[out_file] = literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[in_file] )
identifier[cur_out_file] = identifier[out_file] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ):
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[cur_out_file] ) keyword[as] identifier[tx_out_file] :
keyword[with] identifier[utils] . identifier[open_gzipsafe] ( identifier[in_file] ) keyword[as] identifier[in_handle] :
keyword[with] identifier[open] ( identifier[tx_out_file] , literal[string] ) keyword[as] identifier[out_handle] :
identifier[inp] = identifier[vcf] . identifier[Reader] ( identifier[in_handle] , identifier[in_file] )
identifier[inp] . identifier[filters] [ literal[string] ]= identifier[vcf] . identifier[parser] . identifier[_Filter] ( identifier[filtname] , identifier[filtdoc] )
identifier[outp] = identifier[vcf] . identifier[Writer] ( identifier[out_handle] , identifier[inp] )
keyword[for] identifier[rec] keyword[in] identifier[inp] :
identifier[sr] = identifier[rec] . identifier[INFO] . identifier[get] ( literal[string] , literal[int] )
identifier[pe] = identifier[rec] . identifier[INFO] . identifier[get] ( literal[string] , literal[int] )
identifier[call] = identifier[rec] . identifier[samples] [ literal[int] ]. identifier[data]
identifier[dv] = identifier[call] . identifier[DV] keyword[if] identifier[hasattr] ( identifier[call] , literal[string] ) keyword[else] literal[int]
keyword[if] identifier[dv] < literal[int] keyword[or] ( identifier[sr] < literal[int] keyword[and] identifier[pe] < literal[int] ) keyword[or] ( identifier[sr] < literal[int] keyword[and] identifier[pe] < literal[int] ):
identifier[rec] . identifier[add_filter] ( identifier[filtname] )
identifier[outp] . identifier[write_record] ( identifier[rec] )
keyword[if] identifier[out_file] . identifier[endswith] ( literal[string] ):
identifier[out_file] = identifier[vcfutils] . identifier[bgzip_and_index] ( identifier[cur_out_file] , identifier[data] [ literal[string] ])
keyword[return] identifier[out_file] | def _delly_count_evidence_filter(in_file, data):
"""Filter delly outputs based on read support (DV) and evidence (split and paired).
We require DV > 4 and either both paired end and split read evidence or
5 or more evidence for either individually.
"""
filtname = 'DVSupport'
filtdoc = 'FMT/DV < 4 || (SR < 1 && PE < 5) || (SR < 5 && PE < 1)'
out_file = '%s-filter%s' % utils.splitext_plus(in_file)
cur_out_file = out_file.replace('.vcf.gz', '.vcf')
if not utils.file_exists(out_file):
with file_transaction(data, cur_out_file) as tx_out_file:
with utils.open_gzipsafe(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
inp = vcf.Reader(in_handle, in_file)
inp.filters['DVSupport'] = vcf.parser._Filter(filtname, filtdoc)
outp = vcf.Writer(out_handle, inp)
for rec in inp:
sr = rec.INFO.get('SR', 0)
pe = rec.INFO.get('PE', 0)
call = rec.samples[0].data
dv = call.DV if hasattr(call, 'DV') else 0
if dv < 4 or (sr < 1 and pe < 5) or (sr < 5 and pe < 1):
rec.add_filter(filtname) # depends on [control=['if'], data=[]]
outp.write_record(rec) # depends on [control=['for'], data=['rec']] # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['in_handle']] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
if out_file.endswith('.vcf.gz'):
out_file = vcfutils.bgzip_and_index(cur_out_file, data['config']) # depends on [control=['if'], data=[]]
return out_file |
def ip_frag(packet):
'''
Not fragments:
ip_frag(packet) == 0
not ip_frag(packet)
First packet of fragments:
ip_frag(packet) == IP_FRAG_ANY
Not first packet of fragments:
ip_frag(packet) & IP_FRAG_LATER
All fragments:
ip_frag(packet) & IP_FRAG_ANY
'''
return ((packet.frag_off & IP_OFFMASK) and IP_FRAG_LATER) | ((packet.frag_off & (IP_OFFMASK | IP_MF)) and IP_FRAG_ANY) | def function[ip_frag, parameter[packet]]:
constant[
Not fragments:
ip_frag(packet) == 0
not ip_frag(packet)
First packet of fragments:
ip_frag(packet) == IP_FRAG_ANY
Not first packet of fragments:
ip_frag(packet) & IP_FRAG_LATER
All fragments:
ip_frag(packet) & IP_FRAG_ANY
]
return[binary_operation[<ast.BoolOp object at 0x7da1b05e0eb0> <ast.BitOr object at 0x7da2590d6aa0> <ast.BoolOp object at 0x7da1b05e2c20>]] | keyword[def] identifier[ip_frag] ( identifier[packet] ):
literal[string]
keyword[return] (( identifier[packet] . identifier[frag_off] & identifier[IP_OFFMASK] ) keyword[and] identifier[IP_FRAG_LATER] )|(( identifier[packet] . identifier[frag_off] &( identifier[IP_OFFMASK] | identifier[IP_MF] )) keyword[and] identifier[IP_FRAG_ANY] ) | def ip_frag(packet):
"""
Not fragments:
ip_frag(packet) == 0
not ip_frag(packet)
First packet of fragments:
ip_frag(packet) == IP_FRAG_ANY
Not first packet of fragments:
ip_frag(packet) & IP_FRAG_LATER
All fragments:
ip_frag(packet) & IP_FRAG_ANY
"""
return (packet.frag_off & IP_OFFMASK and IP_FRAG_LATER) | (packet.frag_off & (IP_OFFMASK | IP_MF) and IP_FRAG_ANY) |
def send(self, event):
"""Insert the event in to the Mongo collection"""
try:
self.collection.insert(event, manipulate=False)
except (PyMongoError, BSONError):
# The event will be lost in case of a connection error or any error
# that occurs when trying to insert the event into Mongo.
# pymongo will re-connect/re-authenticate automatically
# during the next event.
msg = 'Error inserting to MongoDB event tracker backend'
log.exception(msg) | def function[send, parameter[self, event]]:
constant[Insert the event in to the Mongo collection]
<ast.Try object at 0x7da207f01db0> | keyword[def] identifier[send] ( identifier[self] , identifier[event] ):
literal[string]
keyword[try] :
identifier[self] . identifier[collection] . identifier[insert] ( identifier[event] , identifier[manipulate] = keyword[False] )
keyword[except] ( identifier[PyMongoError] , identifier[BSONError] ):
identifier[msg] = literal[string]
identifier[log] . identifier[exception] ( identifier[msg] ) | def send(self, event):
"""Insert the event in to the Mongo collection"""
try:
self.collection.insert(event, manipulate=False) # depends on [control=['try'], data=[]]
except (PyMongoError, BSONError):
# The event will be lost in case of a connection error or any error
# that occurs when trying to insert the event into Mongo.
# pymongo will re-connect/re-authenticate automatically
# during the next event.
msg = 'Error inserting to MongoDB event tracker backend'
log.exception(msg) # depends on [control=['except'], data=[]] |
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval | def function[set_option, parameter[self, key, value]]:
constant[Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
]
variable[akey] assign[=] call[name[AVal], parameter[name[key]]]
variable[aval] assign[=] call[name[AVal], parameter[name[value]]]
variable[res] assign[=] call[name[librtmp].RTMP_SetOpt, parameter[name[self].rtmp, name[akey].aval, name[aval].aval]]
if compare[name[res] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da18f09e7a0>
call[name[self]._options][name[akey]] assign[=] name[aval] | keyword[def] identifier[set_option] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[akey] = identifier[AVal] ( identifier[key] )
identifier[aval] = identifier[AVal] ( identifier[value] )
identifier[res] = identifier[librtmp] . identifier[RTMP_SetOpt] ( identifier[self] . identifier[rtmp] , identifier[akey] . identifier[aval] , identifier[aval] . identifier[aval] )
keyword[if] identifier[res] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[key] ))
identifier[self] . identifier[_options] [ identifier[akey] ]= identifier[aval] | def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError('Unable to set option {0}'.format(key)) # depends on [control=['if'], data=[]]
self._options[akey] = aval |
def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print("\nCoverage.py tracer stats:")
for k in sorted(stats.keys()):
print("%16s: %s" % (k, stats[k]))
threading.settrace(None) | def function[pause, parameter[self]]:
constant[Pause tracing, but be prepared to `resume`.]
for taget[name[tracer]] in starred[name[self].tracers] begin[:]
call[name[tracer].stop, parameter[]]
variable[stats] assign[=] call[name[tracer].get_stats, parameter[]]
if name[stats] begin[:]
call[name[print], parameter[constant[
Coverage.py tracer stats:]]]
for taget[name[k]] in starred[call[name[sorted], parameter[call[name[stats].keys, parameter[]]]]] begin[:]
call[name[print], parameter[binary_operation[constant[%16s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09d540>, <ast.Subscript object at 0x7da18f09c040>]]]]]
call[name[threading].settrace, parameter[constant[None]]] | keyword[def] identifier[pause] ( identifier[self] ):
literal[string]
keyword[for] identifier[tracer] keyword[in] identifier[self] . identifier[tracers] :
identifier[tracer] . identifier[stop] ()
identifier[stats] = identifier[tracer] . identifier[get_stats] ()
keyword[if] identifier[stats] :
identifier[print] ( literal[string] )
keyword[for] identifier[k] keyword[in] identifier[sorted] ( identifier[stats] . identifier[keys] ()):
identifier[print] ( literal[string] %( identifier[k] , identifier[stats] [ identifier[k] ]))
identifier[threading] . identifier[settrace] ( keyword[None] ) | def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print('\nCoverage.py tracer stats:')
for k in sorted(stats.keys()):
print('%16s: %s' % (k, stats[k])) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tracer']]
threading.settrace(None) |
def validate_generations(self):
'''
Make sure that the descendent depth is valid.
'''
nodes = self.arc_root_node.get_descendants()
for node in nodes:
logger.debug("Checking parent for node of type %s" % node.arc_element_type)
parent = ArcElementNode.objects.get(pk=node.pk).get_parent(update=True)
if 'mile' in node.arc_element_type and parent.get_depth() > 1:
logger.debug("Milestone node... with leaf parent")
raise ArcGenerationError(_("Milestones cannot be descendants of anything besides the root!"))
if (parent.get_depth() > 1 and
parent.arc_element_type not in ARC_NODE_ELEMENT_DEFINITIONS[node.arc_element_type]['allowed_parents']):
raise ArcGenerationError(_("Node %s cannot be a descendant of node %s" % (node, parent)))
return None | def function[validate_generations, parameter[self]]:
constant[
Make sure that the descendent depth is valid.
]
variable[nodes] assign[=] call[name[self].arc_root_node.get_descendants, parameter[]]
for taget[name[node]] in starred[name[nodes]] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[Checking parent for node of type %s] <ast.Mod object at 0x7da2590d6920> name[node].arc_element_type]]]
variable[parent] assign[=] call[call[name[ArcElementNode].objects.get, parameter[]].get_parent, parameter[]]
if <ast.BoolOp object at 0x7da18f09c880> begin[:]
call[name[logger].debug, parameter[constant[Milestone node... with leaf parent]]]
<ast.Raise object at 0x7da18f09f640>
if <ast.BoolOp object at 0x7da18f09c6d0> begin[:]
<ast.Raise object at 0x7da18f09c340>
return[constant[None]] | keyword[def] identifier[validate_generations] ( identifier[self] ):
literal[string]
identifier[nodes] = identifier[self] . identifier[arc_root_node] . identifier[get_descendants] ()
keyword[for] identifier[node] keyword[in] identifier[nodes] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[node] . identifier[arc_element_type] )
identifier[parent] = identifier[ArcElementNode] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[node] . identifier[pk] ). identifier[get_parent] ( identifier[update] = keyword[True] )
keyword[if] literal[string] keyword[in] identifier[node] . identifier[arc_element_type] keyword[and] identifier[parent] . identifier[get_depth] ()> literal[int] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[raise] identifier[ArcGenerationError] ( identifier[_] ( literal[string] ))
keyword[if] ( identifier[parent] . identifier[get_depth] ()> literal[int] keyword[and]
identifier[parent] . identifier[arc_element_type] keyword[not] keyword[in] identifier[ARC_NODE_ELEMENT_DEFINITIONS] [ identifier[node] . identifier[arc_element_type] ][ literal[string] ]):
keyword[raise] identifier[ArcGenerationError] ( identifier[_] ( literal[string] %( identifier[node] , identifier[parent] )))
keyword[return] keyword[None] | def validate_generations(self):
"""
Make sure that the descendent depth is valid.
"""
nodes = self.arc_root_node.get_descendants()
for node in nodes:
logger.debug('Checking parent for node of type %s' % node.arc_element_type)
parent = ArcElementNode.objects.get(pk=node.pk).get_parent(update=True)
if 'mile' in node.arc_element_type and parent.get_depth() > 1:
logger.debug('Milestone node... with leaf parent')
raise ArcGenerationError(_('Milestones cannot be descendants of anything besides the root!')) # depends on [control=['if'], data=[]]
if parent.get_depth() > 1 and parent.arc_element_type not in ARC_NODE_ELEMENT_DEFINITIONS[node.arc_element_type]['allowed_parents']:
raise ArcGenerationError(_('Node %s cannot be a descendant of node %s' % (node, parent))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return None |
def stats_add_duration(self, key, duration):
"""Add a duration to the per-message measurements
.. versionadded:: 3.19.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`~rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to add the timing to
:type key: :class:`str`
:param duration: The timing value in seconds
:type duration: :class:`int` or :class:`float`
"""
if not self._measurement:
if not self.IGNORE_OOB_STATS:
self.logger.warning(
'stats_add_timing invoked outside execution')
return
self._measurement.add_duration(key, duration) | def function[stats_add_duration, parameter[self, key, duration]]:
constant[Add a duration to the per-message measurements
.. versionadded:: 3.19.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`~rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to add the timing to
:type key: :class:`str`
:param duration: The timing value in seconds
:type duration: :class:`int` or :class:`float`
]
if <ast.UnaryOp object at 0x7da18dc07c70> begin[:]
if <ast.UnaryOp object at 0x7da18dc04430> begin[:]
call[name[self].logger.warning, parameter[constant[stats_add_timing invoked outside execution]]]
return[None]
call[name[self]._measurement.add_duration, parameter[name[key], name[duration]]] | keyword[def] identifier[stats_add_duration] ( identifier[self] , identifier[key] , identifier[duration] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_measurement] :
keyword[if] keyword[not] identifier[self] . identifier[IGNORE_OOB_STATS] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] )
keyword[return]
identifier[self] . identifier[_measurement] . identifier[add_duration] ( identifier[key] , identifier[duration] ) | def stats_add_duration(self, key, duration):
"""Add a duration to the per-message measurements
.. versionadded:: 3.19.0
.. note:: If this method is called when there is not a message being
processed, a message will be logged at the ``warning`` level to
indicate the value is being dropped. To suppress these warnings,
set the :attr:`~rejected.consumer.Consumer.IGNORE_OOB_STATS`
attribute to :data:`True`.
:param key: The key to add the timing to
:type key: :class:`str`
:param duration: The timing value in seconds
:type duration: :class:`int` or :class:`float`
"""
if not self._measurement:
if not self.IGNORE_OOB_STATS:
self.logger.warning('stats_add_timing invoked outside execution') # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
self._measurement.add_duration(key, duration) |
def _initSymbols(ptc):
"""
Initialize symbols and single character constants.
"""
# build am and pm lists to contain
# original case, lowercase, first-char and dotted
# versions of the meridian text
ptc.am = ['', '']
ptc.pm = ['', '']
for idx, xm in enumerate(ptc.locale.meridian[:2]):
# 0: am
# 1: pm
target = ['am', 'pm'][idx]
setattr(ptc, target, [xm])
target = getattr(ptc, target)
if xm:
lxm = xm.lower()
target.extend((xm[0], '{0}.{1}.'.format(*xm),
lxm, lxm[0], '{0}.{1}.'.format(*lxm))) | def function[_initSymbols, parameter[ptc]]:
constant[
Initialize symbols and single character constants.
]
name[ptc].am assign[=] list[[<ast.Constant object at 0x7da18dc04ac0>, <ast.Constant object at 0x7da18dc06320>]]
name[ptc].pm assign[=] list[[<ast.Constant object at 0x7da18dc06bf0>, <ast.Constant object at 0x7da18dc07940>]]
for taget[tuple[[<ast.Name object at 0x7da18dc07e50>, <ast.Name object at 0x7da18dc070a0>]]] in starred[call[name[enumerate], parameter[call[name[ptc].locale.meridian][<ast.Slice object at 0x7da18dc06770>]]]] begin[:]
variable[target] assign[=] call[list[[<ast.Constant object at 0x7da18dc07b80>, <ast.Constant object at 0x7da18dc078b0>]]][name[idx]]
call[name[setattr], parameter[name[ptc], name[target], list[[<ast.Name object at 0x7da18dc065f0>]]]]
variable[target] assign[=] call[name[getattr], parameter[name[ptc], name[target]]]
if name[xm] begin[:]
variable[lxm] assign[=] call[name[xm].lower, parameter[]]
call[name[target].extend, parameter[tuple[[<ast.Subscript object at 0x7da18dc06350>, <ast.Call object at 0x7da18dc07460>, <ast.Name object at 0x7da1b07cd6f0>, <ast.Subscript object at 0x7da1b07cf430>, <ast.Call object at 0x7da1b07ce2c0>]]]] | keyword[def] identifier[_initSymbols] ( identifier[ptc] ):
literal[string]
identifier[ptc] . identifier[am] =[ literal[string] , literal[string] ]
identifier[ptc] . identifier[pm] =[ literal[string] , literal[string] ]
keyword[for] identifier[idx] , identifier[xm] keyword[in] identifier[enumerate] ( identifier[ptc] . identifier[locale] . identifier[meridian] [: literal[int] ]):
identifier[target] =[ literal[string] , literal[string] ][ identifier[idx] ]
identifier[setattr] ( identifier[ptc] , identifier[target] ,[ identifier[xm] ])
identifier[target] = identifier[getattr] ( identifier[ptc] , identifier[target] )
keyword[if] identifier[xm] :
identifier[lxm] = identifier[xm] . identifier[lower] ()
identifier[target] . identifier[extend] (( identifier[xm] [ literal[int] ], literal[string] . identifier[format] (* identifier[xm] ),
identifier[lxm] , identifier[lxm] [ literal[int] ], literal[string] . identifier[format] (* identifier[lxm] ))) | def _initSymbols(ptc):
"""
Initialize symbols and single character constants.
"""
# build am and pm lists to contain
# original case, lowercase, first-char and dotted
# versions of the meridian text
ptc.am = ['', '']
ptc.pm = ['', '']
for (idx, xm) in enumerate(ptc.locale.meridian[:2]):
# 0: am
# 1: pm
target = ['am', 'pm'][idx]
setattr(ptc, target, [xm])
target = getattr(ptc, target)
if xm:
lxm = xm.lower()
target.extend((xm[0], '{0}.{1}.'.format(*xm), lxm, lxm[0], '{0}.{1}.'.format(*lxm))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def request_token():
"""
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
"""
while True:
email, password, captcha_solution, captcha_id = win_login()
options = {
'source': 'radio',
'alias': email,
'form_password': password,
'captcha_solution': captcha_solution,
'captcha_id': captcha_id,
'task': 'sync_channel_list'
}
r = requests.post('https://douban.fm/j/login', data=options, headers=HEADERS)
req_json = json.loads(r.text, object_hook=decode_dict)
# req_json = json.loads(r.text)
if req_json['r'] == 0:
post_data = {
# will not save
'liked': req_json['user_info']['play_record']['liked'],
'banned': req_json['user_info']['play_record']['banned'],
'played': req_json['user_info']['play_record']['played'],
'is_pro': req_json['user_info']['is_pro'],
'user_name': req_json['user_info']['name'],
# to save
'cookies': r.cookies,
'valume': 50,
'channel': 0,
'theme_id': 0
}
return post_data
print(req_json['err_msg'])
print(ERROR + req_json['err_msg']) | def function[request_token, parameter[]]:
constant[
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
]
while constant[True] begin[:]
<ast.Tuple object at 0x7da18f58e980> assign[=] call[name[win_login], parameter[]]
variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da18f58fa00>, <ast.Constant object at 0x7da18f58dcf0>, <ast.Constant object at 0x7da18f58dae0>, <ast.Constant object at 0x7da18f58c880>, <ast.Constant object at 0x7da18f58e5f0>, <ast.Constant object at 0x7da18f58ceb0>], [<ast.Constant object at 0x7da18f58dab0>, <ast.Name object at 0x7da18f58e620>, <ast.Name object at 0x7da18f58d990>, <ast.Name object at 0x7da18f58e830>, <ast.Name object at 0x7da18f58c9a0>, <ast.Constant object at 0x7da18f58e740>]]
variable[r] assign[=] call[name[requests].post, parameter[constant[https://douban.fm/j/login]]]
variable[req_json] assign[=] call[name[json].loads, parameter[name[r].text]]
if compare[call[name[req_json]][constant[r]] equal[==] constant[0]] begin[:]
variable[post_data] assign[=] dictionary[[<ast.Constant object at 0x7da18f58e530>, <ast.Constant object at 0x7da18f58e800>, <ast.Constant object at 0x7da18f58f8b0>, <ast.Constant object at 0x7da18f58f9d0>, <ast.Constant object at 0x7da18f58ebc0>, <ast.Constant object at 0x7da18f58c700>, <ast.Constant object at 0x7da18f58ce50>, <ast.Constant object at 0x7da18f58cac0>, <ast.Constant object at 0x7da18f58de70>], [<ast.Subscript object at 0x7da18f58d6f0>, <ast.Subscript object at 0x7da18f58ce20>, <ast.Subscript object at 0x7da18f58c850>, <ast.Subscript object at 0x7da18f58f340>, <ast.Subscript object at 0x7da18f58d510>, <ast.Attribute object at 0x7da18f58f370>, <ast.Constant object at 0x7da18f58d8d0>, <ast.Constant object at 0x7da18f58f160>, <ast.Constant object at 0x7da18f58dd20>]]
return[name[post_data]]
call[name[print], parameter[call[name[req_json]][constant[err_msg]]]]
call[name[print], parameter[binary_operation[name[ERROR] + call[name[req_json]][constant[err_msg]]]]] | keyword[def] identifier[request_token] ():
literal[string]
keyword[while] keyword[True] :
identifier[email] , identifier[password] , identifier[captcha_solution] , identifier[captcha_id] = identifier[win_login] ()
identifier[options] ={
literal[string] : literal[string] ,
literal[string] : identifier[email] ,
literal[string] : identifier[password] ,
literal[string] : identifier[captcha_solution] ,
literal[string] : identifier[captcha_id] ,
literal[string] : literal[string]
}
identifier[r] = identifier[requests] . identifier[post] ( literal[string] , identifier[data] = identifier[options] , identifier[headers] = identifier[HEADERS] )
identifier[req_json] = identifier[json] . identifier[loads] ( identifier[r] . identifier[text] , identifier[object_hook] = identifier[decode_dict] )
keyword[if] identifier[req_json] [ literal[string] ]== literal[int] :
identifier[post_data] ={
literal[string] : identifier[req_json] [ literal[string] ][ literal[string] ][ literal[string] ],
literal[string] : identifier[req_json] [ literal[string] ][ literal[string] ][ literal[string] ],
literal[string] : identifier[req_json] [ literal[string] ][ literal[string] ][ literal[string] ],
literal[string] : identifier[req_json] [ literal[string] ][ literal[string] ],
literal[string] : identifier[req_json] [ literal[string] ][ literal[string] ],
literal[string] : identifier[r] . identifier[cookies] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}
keyword[return] identifier[post_data]
identifier[print] ( identifier[req_json] [ literal[string] ])
identifier[print] ( identifier[ERROR] + identifier[req_json] [ literal[string] ]) | def request_token():
"""
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
"""
while True:
(email, password, captcha_solution, captcha_id) = win_login()
options = {'source': 'radio', 'alias': email, 'form_password': password, 'captcha_solution': captcha_solution, 'captcha_id': captcha_id, 'task': 'sync_channel_list'}
r = requests.post('https://douban.fm/j/login', data=options, headers=HEADERS)
req_json = json.loads(r.text, object_hook=decode_dict)
# req_json = json.loads(r.text)
if req_json['r'] == 0:
# will not save
# to save
post_data = {'liked': req_json['user_info']['play_record']['liked'], 'banned': req_json['user_info']['play_record']['banned'], 'played': req_json['user_info']['play_record']['played'], 'is_pro': req_json['user_info']['is_pro'], 'user_name': req_json['user_info']['name'], 'cookies': r.cookies, 'valume': 50, 'channel': 0, 'theme_id': 0}
return post_data # depends on [control=['if'], data=[]]
print(req_json['err_msg'])
print(ERROR + req_json['err_msg']) # depends on [control=['while'], data=[]] |
def start_tpot(automated_run, session, path):
"""Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
extraction = session.query(models.Extraction).first()
X, y = extraction.return_train_dataset()
tpot_learner = module.tpot_learner
tpot_learner.fit(X, y)
temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))
tpot_learner.export(temp_filename)
with open(temp_filename) as f:
base_learner_source = f.read()
base_learner_source = constants.tpot_learner_docstring + base_learner_source
try:
os.remove(temp_filename)
except OSError:
pass
blo = models.BaseLearnerOrigin(
source=base_learner_source,
name='TPOT Learner',
meta_feature_generator='predict'
)
session.add(blo)
session.commit() | def function[start_tpot, parameter[automated_run, session, path]]:
constant[Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
]
variable[module] assign[=] call[name[functions].import_string_code_as_module, parameter[name[automated_run].source]]
variable[extraction] assign[=] call[call[name[session].query, parameter[name[models].Extraction]].first, parameter[]]
<ast.Tuple object at 0x7da1b12c6cb0> assign[=] call[name[extraction].return_train_dataset, parameter[]]
variable[tpot_learner] assign[=] name[module].tpot_learner
call[name[tpot_learner].fit, parameter[name[X], name[y]]]
variable[temp_filename] assign[=] call[name[os].path.join, parameter[name[path], call[constant[tpot-temp-export-{}].format, parameter[call[name[os].getpid, parameter[]]]]]]
call[name[tpot_learner].export, parameter[name[temp_filename]]]
with call[name[open], parameter[name[temp_filename]]] begin[:]
variable[base_learner_source] assign[=] call[name[f].read, parameter[]]
variable[base_learner_source] assign[=] binary_operation[name[constants].tpot_learner_docstring + name[base_learner_source]]
<ast.Try object at 0x7da1b12c7490>
variable[blo] assign[=] call[name[models].BaseLearnerOrigin, parameter[]]
call[name[session].add, parameter[name[blo]]]
call[name[session].commit, parameter[]] | keyword[def] identifier[start_tpot] ( identifier[automated_run] , identifier[session] , identifier[path] ):
literal[string]
identifier[module] = identifier[functions] . identifier[import_string_code_as_module] ( identifier[automated_run] . identifier[source] )
identifier[extraction] = identifier[session] . identifier[query] ( identifier[models] . identifier[Extraction] ). identifier[first] ()
identifier[X] , identifier[y] = identifier[extraction] . identifier[return_train_dataset] ()
identifier[tpot_learner] = identifier[module] . identifier[tpot_learner]
identifier[tpot_learner] . identifier[fit] ( identifier[X] , identifier[y] )
identifier[temp_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] . identifier[format] ( identifier[os] . identifier[getpid] ()))
identifier[tpot_learner] . identifier[export] ( identifier[temp_filename] )
keyword[with] identifier[open] ( identifier[temp_filename] ) keyword[as] identifier[f] :
identifier[base_learner_source] = identifier[f] . identifier[read] ()
identifier[base_learner_source] = identifier[constants] . identifier[tpot_learner_docstring] + identifier[base_learner_source]
keyword[try] :
identifier[os] . identifier[remove] ( identifier[temp_filename] )
keyword[except] identifier[OSError] :
keyword[pass]
identifier[blo] = identifier[models] . identifier[BaseLearnerOrigin] (
identifier[source] = identifier[base_learner_source] ,
identifier[name] = literal[string] ,
identifier[meta_feature_generator] = literal[string]
)
identifier[session] . identifier[add] ( identifier[blo] )
identifier[session] . identifier[commit] () | def start_tpot(automated_run, session, path):
"""Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
extraction = session.query(models.Extraction).first()
(X, y) = extraction.return_train_dataset()
tpot_learner = module.tpot_learner
tpot_learner.fit(X, y)
temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))
tpot_learner.export(temp_filename)
with open(temp_filename) as f:
base_learner_source = f.read() # depends on [control=['with'], data=['f']]
base_learner_source = constants.tpot_learner_docstring + base_learner_source
try:
os.remove(temp_filename) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
blo = models.BaseLearnerOrigin(source=base_learner_source, name='TPOT Learner', meta_feature_generator='predict')
session.add(blo)
session.commit() |
def tryDynLocal(name):
'''
Dynamically import a module and return a module local or raise an exception.
'''
if name.find('.') == -1:
raise s_exc.NoSuchDyn(name=name)
modname, objname = name.rsplit('.', 1)
mod = tryDynMod(modname)
item = getattr(mod, objname, s_common.novalu)
if item is s_common.novalu:
raise s_exc.NoSuchDyn(name=name)
return item | def function[tryDynLocal, parameter[name]]:
constant[
Dynamically import a module and return a module local or raise an exception.
]
if compare[call[name[name].find, parameter[constant[.]]] equal[==] <ast.UnaryOp object at 0x7da20c76cd90>] begin[:]
<ast.Raise object at 0x7da20c76c190>
<ast.Tuple object at 0x7da20c76cd00> assign[=] call[name[name].rsplit, parameter[constant[.], constant[1]]]
variable[mod] assign[=] call[name[tryDynMod], parameter[name[modname]]]
variable[item] assign[=] call[name[getattr], parameter[name[mod], name[objname], name[s_common].novalu]]
if compare[name[item] is name[s_common].novalu] begin[:]
<ast.Raise object at 0x7da20c76c130>
return[name[item]] | keyword[def] identifier[tryDynLocal] ( identifier[name] ):
literal[string]
keyword[if] identifier[name] . identifier[find] ( literal[string] )==- literal[int] :
keyword[raise] identifier[s_exc] . identifier[NoSuchDyn] ( identifier[name] = identifier[name] )
identifier[modname] , identifier[objname] = identifier[name] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[mod] = identifier[tryDynMod] ( identifier[modname] )
identifier[item] = identifier[getattr] ( identifier[mod] , identifier[objname] , identifier[s_common] . identifier[novalu] )
keyword[if] identifier[item] keyword[is] identifier[s_common] . identifier[novalu] :
keyword[raise] identifier[s_exc] . identifier[NoSuchDyn] ( identifier[name] = identifier[name] )
keyword[return] identifier[item] | def tryDynLocal(name):
"""
Dynamically import a module and return a module local or raise an exception.
"""
if name.find('.') == -1:
raise s_exc.NoSuchDyn(name=name) # depends on [control=['if'], data=[]]
(modname, objname) = name.rsplit('.', 1)
mod = tryDynMod(modname)
item = getattr(mod, objname, s_common.novalu)
if item is s_common.novalu:
raise s_exc.NoSuchDyn(name=name) # depends on [control=['if'], data=[]]
return item |
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self | def function[load_module, parameter[self, path, squash]]:
constant[Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
]
variable[config_obj] assign[=] call[name[load], parameter[name[path]]]
variable[obj] assign[=] <ast.DictComp object at 0x7da20c993f40>
if name[squash] begin[:]
call[name[self].load_dict, parameter[name[obj]]]
return[name[self]] | keyword[def] identifier[load_module] ( identifier[self] , identifier[path] , identifier[squash] = keyword[True] ):
literal[string]
identifier[config_obj] = identifier[load] ( identifier[path] )
identifier[obj] ={ identifier[key] : identifier[getattr] ( identifier[config_obj] , identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[dir] ( identifier[config_obj] )
keyword[if] identifier[key] . identifier[isupper] ()}
keyword[if] identifier[squash] :
identifier[self] . identifier[load_dict] ( identifier[obj] )
keyword[else] :
identifier[self] . identifier[update] ( identifier[obj] )
keyword[return] identifier[self] | def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj) if key.isupper()}
if squash:
self.load_dict(obj) # depends on [control=['if'], data=[]]
else:
self.update(obj)
return self |
def hash_name(name, script_pubkey, register_addr=None):
"""
Generate the hash over a name and hex-string script pubkey
"""
bin_name = b40_to_bin(name)
name_and_pubkey = bin_name + unhexlify(script_pubkey)
if register_addr is not None:
name_and_pubkey += str(register_addr)
return hex_hash160(name_and_pubkey) | def function[hash_name, parameter[name, script_pubkey, register_addr]]:
constant[
Generate the hash over a name and hex-string script pubkey
]
variable[bin_name] assign[=] call[name[b40_to_bin], parameter[name[name]]]
variable[name_and_pubkey] assign[=] binary_operation[name[bin_name] + call[name[unhexlify], parameter[name[script_pubkey]]]]
if compare[name[register_addr] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b2344e20>
return[call[name[hex_hash160], parameter[name[name_and_pubkey]]]] | keyword[def] identifier[hash_name] ( identifier[name] , identifier[script_pubkey] , identifier[register_addr] = keyword[None] ):
literal[string]
identifier[bin_name] = identifier[b40_to_bin] ( identifier[name] )
identifier[name_and_pubkey] = identifier[bin_name] + identifier[unhexlify] ( identifier[script_pubkey] )
keyword[if] identifier[register_addr] keyword[is] keyword[not] keyword[None] :
identifier[name_and_pubkey] += identifier[str] ( identifier[register_addr] )
keyword[return] identifier[hex_hash160] ( identifier[name_and_pubkey] ) | def hash_name(name, script_pubkey, register_addr=None):
"""
Generate the hash over a name and hex-string script pubkey
"""
bin_name = b40_to_bin(name)
name_and_pubkey = bin_name + unhexlify(script_pubkey)
if register_addr is not None:
name_and_pubkey += str(register_addr) # depends on [control=['if'], data=['register_addr']]
return hex_hash160(name_and_pubkey) |
def ungroup(grouped_items, groupxs, maxval=None, fill=None):
"""
Ungroups items
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> maxval = None
>>> ungrouped_items = ungroup(grouped_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1.1, 2.1, 1.2, 3.2, 3.1, 2.2]
"""
if maxval is None:
# Determine the number of items if unknown
maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
maxval = max(maxpergroup) if len(maxpergroup) else 0
# Allocate an array containing the newly flattened items
ungrouped_items = [fill] * (maxval + 1)
# Populate the array
for itemgroup, xs in zip(grouped_items, groupxs):
for item, x in zip(itemgroup, xs):
ungrouped_items[x] = item
return ungrouped_items | def function[ungroup, parameter[grouped_items, groupxs, maxval, fill]]:
constant[
Ungroups items
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> maxval = None
>>> ungrouped_items = ungroup(grouped_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1.1, 2.1, 1.2, 3.2, 3.1, 2.2]
]
if compare[name[maxval] is constant[None]] begin[:]
variable[maxpergroup] assign[=] <ast.ListComp object at 0x7da1b235aad0>
variable[maxval] assign[=] <ast.IfExp object at 0x7da1b2358550>
variable[ungrouped_items] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b23593f0>]] * binary_operation[name[maxval] + constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b235b850>, <ast.Name object at 0x7da1b23591b0>]]] in starred[call[name[zip], parameter[name[grouped_items], name[groupxs]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b24b6050>, <ast.Name object at 0x7da1b24b53f0>]]] in starred[call[name[zip], parameter[name[itemgroup], name[xs]]]] begin[:]
call[name[ungrouped_items]][name[x]] assign[=] name[item]
return[name[ungrouped_items]] | keyword[def] identifier[ungroup] ( identifier[grouped_items] , identifier[groupxs] , identifier[maxval] = keyword[None] , identifier[fill] = keyword[None] ):
literal[string]
keyword[if] identifier[maxval] keyword[is] keyword[None] :
identifier[maxpergroup] =[ identifier[max] ( identifier[xs] ) keyword[if] identifier[len] ( identifier[xs] ) keyword[else] literal[int] keyword[for] identifier[xs] keyword[in] identifier[groupxs] ]
identifier[maxval] = identifier[max] ( identifier[maxpergroup] ) keyword[if] identifier[len] ( identifier[maxpergroup] ) keyword[else] literal[int]
identifier[ungrouped_items] =[ identifier[fill] ]*( identifier[maxval] + literal[int] )
keyword[for] identifier[itemgroup] , identifier[xs] keyword[in] identifier[zip] ( identifier[grouped_items] , identifier[groupxs] ):
keyword[for] identifier[item] , identifier[x] keyword[in] identifier[zip] ( identifier[itemgroup] , identifier[xs] ):
identifier[ungrouped_items] [ identifier[x] ]= identifier[item]
keyword[return] identifier[ungrouped_items] | def ungroup(grouped_items, groupxs, maxval=None, fill=None):
"""
Ungroups items
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> maxval = None
>>> ungrouped_items = ungroup(grouped_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1.1, 2.1, 1.2, 3.2, 3.1, 2.2]
"""
if maxval is None:
# Determine the number of items if unknown
maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
maxval = max(maxpergroup) if len(maxpergroup) else 0 # depends on [control=['if'], data=['maxval']]
# Allocate an array containing the newly flattened items
ungrouped_items = [fill] * (maxval + 1)
# Populate the array
for (itemgroup, xs) in zip(grouped_items, groupxs):
for (item, x) in zip(itemgroup, xs):
ungrouped_items[x] = item # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return ungrouped_items |
def indexOf(self, action):
"""
Returns the index of the inputed action.
:param action | <QAction> || None
:return <int>
"""
for i, act in enumerate(self.actionGroup().actions()):
if action in (act, act.objectName(), act.text()):
return i
return -1 | def function[indexOf, parameter[self, action]]:
constant[
Returns the index of the inputed action.
:param action | <QAction> || None
:return <int>
]
for taget[tuple[[<ast.Name object at 0x7da18f09dea0>, <ast.Name object at 0x7da18f09c310>]]] in starred[call[name[enumerate], parameter[call[call[name[self].actionGroup, parameter[]].actions, parameter[]]]]] begin[:]
if compare[name[action] in tuple[[<ast.Name object at 0x7da18f09df60>, <ast.Call object at 0x7da18f09f400>, <ast.Call object at 0x7da18f09e950>]]] begin[:]
return[name[i]]
return[<ast.UnaryOp object at 0x7da18eb55810>] | keyword[def] identifier[indexOf] ( identifier[self] , identifier[action] ):
literal[string]
keyword[for] identifier[i] , identifier[act] keyword[in] identifier[enumerate] ( identifier[self] . identifier[actionGroup] (). identifier[actions] ()):
keyword[if] identifier[action] keyword[in] ( identifier[act] , identifier[act] . identifier[objectName] (), identifier[act] . identifier[text] ()):
keyword[return] identifier[i]
keyword[return] - literal[int] | def indexOf(self, action):
"""
Returns the index of the inputed action.
:param action | <QAction> || None
:return <int>
"""
for (i, act) in enumerate(self.actionGroup().actions()):
if action in (act, act.objectName(), act.text()):
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return -1 |
def main():
"""
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
"""
dir_path="./"
change='l'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
out_file=dir_path+'/'+sys.argv[ind+1]
else: out_file=magic_file
if '-keys' in sys.argv:
ind=sys.argv.index('-keys')
grab_keys=sys.argv[ind+1].split(":")
else:
print(main.__doc__)
sys.exit()
if '-U' in sys.argv: change='U'
#
#
# get data read in
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for grab_key in grab_keys:
for rec in Data:
if change=='l':
rec[grab_key]=rec[grab_key].lower()
else:
rec[grab_key]=rec[grab_key].upper()
else:
print('bad file name')
pmag.magic_write(out_file,Data,file_type) | def function[main, parameter[]]:
constant[
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
]
variable[dir_path] assign[=] constant[./]
variable[change] assign[=] constant[l]
if compare[constant[-WD] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-WD]]]
variable[dir_path] assign[=] call[name[sys].argv][binary_operation[name[ind] + constant[1]]]
if compare[constant[-h] in name[sys].argv] begin[:]
call[name[print], parameter[name[main].__doc__]]
call[name[sys].exit, parameter[]]
if compare[constant[-f] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-f]]]
variable[magic_file] assign[=] binary_operation[binary_operation[name[dir_path] + constant[/]] + call[name[sys].argv][binary_operation[name[ind] + constant[1]]]]
if compare[constant[-F] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-F]]]
variable[out_file] assign[=] binary_operation[binary_operation[name[dir_path] + constant[/]] + call[name[sys].argv][binary_operation[name[ind] + constant[1]]]]
if compare[constant[-keys] in name[sys].argv] begin[:]
variable[ind] assign[=] call[name[sys].argv.index, parameter[constant[-keys]]]
variable[grab_keys] assign[=] call[call[name[sys].argv][binary_operation[name[ind] + constant[1]]].split, parameter[constant[:]]]
if compare[constant[-U] in name[sys].argv] begin[:]
variable[change] assign[=] constant[U]
<ast.Tuple object at 0x7da1b03d1f60> assign[=] call[name[pmag].magic_read, parameter[name[magic_file]]]
if compare[call[name[len], parameter[name[Data]]] greater[>] constant[0]] begin[:]
for taget[name[grab_key]] in starred[name[grab_keys]] begin[:]
for taget[name[rec]] in starred[name[Data]] begin[:]
if compare[name[change] equal[==] constant[l]] begin[:]
call[name[rec]][name[grab_key]] assign[=] call[call[name[rec]][name[grab_key]].lower, parameter[]]
call[name[pmag].magic_write, parameter[name[out_file], name[Data], name[file_type]]] | keyword[def] identifier[main] ():
literal[string]
identifier[dir_path] = literal[string]
identifier[change] = literal[string]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[dir_path] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[sys] . identifier[exit] ()
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[magic_file] = identifier[dir_path] + literal[string] + identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[sys] . identifier[exit] ()
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[out_file] = identifier[dir_path] + literal[string] + identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]
keyword[else] : identifier[out_file] = identifier[magic_file]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] :
identifier[ind] = identifier[sys] . identifier[argv] . identifier[index] ( literal[string] )
identifier[grab_keys] = identifier[sys] . identifier[argv] [ identifier[ind] + literal[int] ]. identifier[split] ( literal[string] )
keyword[else] :
identifier[print] ( identifier[main] . identifier[__doc__] )
identifier[sys] . identifier[exit] ()
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[argv] : identifier[change] = literal[string]
identifier[Data] , identifier[file_type] = identifier[pmag] . identifier[magic_read] ( identifier[magic_file] )
keyword[if] identifier[len] ( identifier[Data] )> literal[int] :
keyword[for] identifier[grab_key] keyword[in] identifier[grab_keys] :
keyword[for] identifier[rec] keyword[in] identifier[Data] :
keyword[if] identifier[change] == literal[string] :
identifier[rec] [ identifier[grab_key] ]= identifier[rec] [ identifier[grab_key] ]. identifier[lower] ()
keyword[else] :
identifier[rec] [ identifier[grab_key] ]= identifier[rec] [ identifier[grab_key] ]. identifier[upper] ()
keyword[else] :
identifier[print] ( literal[string] )
identifier[pmag] . identifier[magic_write] ( identifier[out_file] , identifier[Data] , identifier[file_type] ) | def main():
"""
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
"""
dir_path = './'
change = 'l'
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind + 1] # depends on [control=['if'], data=[]]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit() # depends on [control=['if'], data=[]]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
magic_file = dir_path + '/' + sys.argv[ind + 1] # depends on [control=['if'], data=[]]
else:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
out_file = dir_path + '/' + sys.argv[ind + 1] # depends on [control=['if'], data=[]]
else:
out_file = magic_file
if '-keys' in sys.argv:
ind = sys.argv.index('-keys')
grab_keys = sys.argv[ind + 1].split(':') # depends on [control=['if'], data=[]]
else:
print(main.__doc__)
sys.exit()
if '-U' in sys.argv:
change = 'U' # depends on [control=['if'], data=[]]
#
#
# get data read in
(Data, file_type) = pmag.magic_read(magic_file)
if len(Data) > 0:
for grab_key in grab_keys:
for rec in Data:
if change == 'l':
rec[grab_key] = rec[grab_key].lower() # depends on [control=['if'], data=[]]
else:
rec[grab_key] = rec[grab_key].upper() # depends on [control=['for'], data=['rec']] # depends on [control=['for'], data=['grab_key']] # depends on [control=['if'], data=[]]
else:
print('bad file name')
pmag.magic_write(out_file, Data, file_type) |
def create(self, check, notification_plan, criteria=None,
disabled=False, label=None, name=None, metadata=None):
"""
Creates an alarm that binds the check on the given entity with a
notification plan.
Note that the 'criteria' parameter, if supplied, should be a string
representing the DSL for describing alerting conditions and their
output states. Pyrax does not do any validation of these criteria
statements; it is up to you as the developer to understand the language
and correctly form the statement. This alarm language is documented
online in the Cloud Monitoring section of http://docs.rackspace.com.
"""
uri = "/%s" % self.uri_base
body = {"check_id": utils.get_id(check),
"notification_plan_id": utils.get_id(notification_plan),
}
if criteria:
body["criteria"] = criteria
if disabled is not None:
body["disabled"] = disabled
label_name = label or name
if label_name:
body["label"] = label_name
if metadata:
body["metadata"] = metadata
resp, resp_body = self.api.method_post(uri, body=body)
if resp.status_code == 201:
alarm_id = resp.headers["x-object-id"]
return self.get(alarm_id) | def function[create, parameter[self, check, notification_plan, criteria, disabled, label, name, metadata]]:
constant[
Creates an alarm that binds the check on the given entity with a
notification plan.
Note that the 'criteria' parameter, if supplied, should be a string
representing the DSL for describing alerting conditions and their
output states. Pyrax does not do any validation of these criteria
statements; it is up to you as the developer to understand the language
and correctly form the statement. This alarm language is documented
online in the Cloud Monitoring section of http://docs.rackspace.com.
]
variable[uri] assign[=] binary_operation[constant[/%s] <ast.Mod object at 0x7da2590d6920> name[self].uri_base]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b055b100>, <ast.Constant object at 0x7da1b0558d60>], [<ast.Call object at 0x7da1b0558670>, <ast.Call object at 0x7da1b055b6d0>]]
if name[criteria] begin[:]
call[name[body]][constant[criteria]] assign[=] name[criteria]
if compare[name[disabled] is_not constant[None]] begin[:]
call[name[body]][constant[disabled]] assign[=] name[disabled]
variable[label_name] assign[=] <ast.BoolOp object at 0x7da1b0559840>
if name[label_name] begin[:]
call[name[body]][constant[label]] assign[=] name[label_name]
if name[metadata] begin[:]
call[name[body]][constant[metadata]] assign[=] name[metadata]
<ast.Tuple object at 0x7da1b056f130> assign[=] call[name[self].api.method_post, parameter[name[uri]]]
if compare[name[resp].status_code equal[==] constant[201]] begin[:]
variable[alarm_id] assign[=] call[name[resp].headers][constant[x-object-id]]
return[call[name[self].get, parameter[name[alarm_id]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[check] , identifier[notification_plan] , identifier[criteria] = keyword[None] ,
identifier[disabled] = keyword[False] , identifier[label] = keyword[None] , identifier[name] = keyword[None] , identifier[metadata] = keyword[None] ):
literal[string]
identifier[uri] = literal[string] % identifier[self] . identifier[uri_base]
identifier[body] ={ literal[string] : identifier[utils] . identifier[get_id] ( identifier[check] ),
literal[string] : identifier[utils] . identifier[get_id] ( identifier[notification_plan] ),
}
keyword[if] identifier[criteria] :
identifier[body] [ literal[string] ]= identifier[criteria]
keyword[if] identifier[disabled] keyword[is] keyword[not] keyword[None] :
identifier[body] [ literal[string] ]= identifier[disabled]
identifier[label_name] = identifier[label] keyword[or] identifier[name]
keyword[if] identifier[label_name] :
identifier[body] [ literal[string] ]= identifier[label_name]
keyword[if] identifier[metadata] :
identifier[body] [ literal[string] ]= identifier[metadata]
identifier[resp] , identifier[resp_body] = identifier[self] . identifier[api] . identifier[method_post] ( identifier[uri] , identifier[body] = identifier[body] )
keyword[if] identifier[resp] . identifier[status_code] == literal[int] :
identifier[alarm_id] = identifier[resp] . identifier[headers] [ literal[string] ]
keyword[return] identifier[self] . identifier[get] ( identifier[alarm_id] ) | def create(self, check, notification_plan, criteria=None, disabled=False, label=None, name=None, metadata=None):
"""
Creates an alarm that binds the check on the given entity with a
notification plan.
Note that the 'criteria' parameter, if supplied, should be a string
representing the DSL for describing alerting conditions and their
output states. Pyrax does not do any validation of these criteria
statements; it is up to you as the developer to understand the language
and correctly form the statement. This alarm language is documented
online in the Cloud Monitoring section of http://docs.rackspace.com.
"""
uri = '/%s' % self.uri_base
body = {'check_id': utils.get_id(check), 'notification_plan_id': utils.get_id(notification_plan)}
if criteria:
body['criteria'] = criteria # depends on [control=['if'], data=[]]
if disabled is not None:
body['disabled'] = disabled # depends on [control=['if'], data=['disabled']]
label_name = label or name
if label_name:
body['label'] = label_name # depends on [control=['if'], data=[]]
if metadata:
body['metadata'] = metadata # depends on [control=['if'], data=[]]
(resp, resp_body) = self.api.method_post(uri, body=body)
if resp.status_code == 201:
alarm_id = resp.headers['x-object-id']
return self.get(alarm_id) # depends on [control=['if'], data=[]] |
def get_partition_names(self, db_name, tbl_name, max_parts):
"""
Parameters:
- db_name
- tbl_name
- max_parts
"""
self.send_get_partition_names(db_name, tbl_name, max_parts)
return self.recv_get_partition_names() | def function[get_partition_names, parameter[self, db_name, tbl_name, max_parts]]:
constant[
Parameters:
- db_name
- tbl_name
- max_parts
]
call[name[self].send_get_partition_names, parameter[name[db_name], name[tbl_name], name[max_parts]]]
return[call[name[self].recv_get_partition_names, parameter[]]] | keyword[def] identifier[get_partition_names] ( identifier[self] , identifier[db_name] , identifier[tbl_name] , identifier[max_parts] ):
literal[string]
identifier[self] . identifier[send_get_partition_names] ( identifier[db_name] , identifier[tbl_name] , identifier[max_parts] )
keyword[return] identifier[self] . identifier[recv_get_partition_names] () | def get_partition_names(self, db_name, tbl_name, max_parts):
"""
Parameters:
- db_name
- tbl_name
- max_parts
"""
self.send_get_partition_names(db_name, tbl_name, max_parts)
return self.recv_get_partition_names() |
def confusion_matrix_and_correct_series(self, y_info):
''' Generate confusion matrix from y_info '''
a = deepcopy(y_info['true'])
true_count = dict((i, a.count(i)) for i in set(a))
a = deepcopy(y_info['pred'])
pred_count = dict((i, a.count(i)) for i in set(a))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct/float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
for inst_cat in all_cols:
inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]
correct_list.append(inst_correct)
ser_correct = pd.Series(data=correct_list, index=all_cols)
populations = {}
populations['true'] = true_count
populations['pred'] = pred_count
return df_conf, populations, ser_correct, fraction_correct | def function[confusion_matrix_and_correct_series, parameter[self, y_info]]:
constant[ Generate confusion matrix from y_info ]
variable[a] assign[=] call[name[deepcopy], parameter[call[name[y_info]][constant[true]]]]
variable[true_count] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b050c7f0>]]
variable[a] assign[=] call[name[deepcopy], parameter[call[name[y_info]][constant[pred]]]]
variable[pred_count] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b050e230>]]
variable[sorted_cats] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[binary_operation[call[name[y_info]][constant[true]] + call[name[y_info]][constant[pred]]]]]]]]]
variable[conf_mat] assign[=] call[name[confusion_matrix], parameter[call[name[y_info]][constant[true]], call[name[y_info]][constant[pred]], name[sorted_cats]]]
variable[df_conf] assign[=] call[name[pd].DataFrame, parameter[name[conf_mat]]]
variable[total_correct] assign[=] call[name[np].trace, parameter[name[df_conf]]]
variable[total_pred] assign[=] call[call[name[df_conf].sum, parameter[]].sum, parameter[]]
variable[fraction_correct] assign[=] binary_operation[name[total_correct] / call[name[float], parameter[name[total_pred]]]]
variable[correct_list] assign[=] list[[]]
variable[cat_counts] assign[=] call[name[df_conf].sum, parameter[]]
variable[all_cols] assign[=] call[name[df_conf].columns.tolist, parameter[]]
for taget[name[inst_cat]] in starred[name[all_cols]] begin[:]
variable[inst_correct] assign[=] binary_operation[call[call[name[df_conf]][name[inst_cat]].loc][name[inst_cat]] / call[name[cat_counts]][name[inst_cat]]]
call[name[correct_list].append, parameter[name[inst_correct]]]
variable[ser_correct] assign[=] call[name[pd].Series, parameter[]]
variable[populations] assign[=] dictionary[[], []]
call[name[populations]][constant[true]] assign[=] name[true_count]
call[name[populations]][constant[pred]] assign[=] name[pred_count]
return[tuple[[<ast.Name object at 0x7da1b26ac3a0>, <ast.Name object at 0x7da1b26aefe0>, <ast.Name object at 0x7da1b26ada80>, <ast.Name object at 0x7da1b26af670>]]] | keyword[def] identifier[confusion_matrix_and_correct_series] ( identifier[self] , identifier[y_info] ):
literal[string]
identifier[a] = identifier[deepcopy] ( identifier[y_info] [ literal[string] ])
identifier[true_count] = identifier[dict] (( identifier[i] , identifier[a] . identifier[count] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[set] ( identifier[a] ))
identifier[a] = identifier[deepcopy] ( identifier[y_info] [ literal[string] ])
identifier[pred_count] = identifier[dict] (( identifier[i] , identifier[a] . identifier[count] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[set] ( identifier[a] ))
identifier[sorted_cats] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[y_info] [ literal[string] ]+ identifier[y_info] [ literal[string] ])))
identifier[conf_mat] = identifier[confusion_matrix] ( identifier[y_info] [ literal[string] ], identifier[y_info] [ literal[string] ], identifier[sorted_cats] )
identifier[df_conf] = identifier[pd] . identifier[DataFrame] ( identifier[conf_mat] , identifier[index] = identifier[sorted_cats] , identifier[columns] = identifier[sorted_cats] )
identifier[total_correct] = identifier[np] . identifier[trace] ( identifier[df_conf] )
identifier[total_pred] = identifier[df_conf] . identifier[sum] (). identifier[sum] ()
identifier[fraction_correct] = identifier[total_correct] / identifier[float] ( identifier[total_pred] )
identifier[correct_list] =[]
identifier[cat_counts] = identifier[df_conf] . identifier[sum] ( identifier[axis] = literal[int] )
identifier[all_cols] = identifier[df_conf] . identifier[columns] . identifier[tolist] ()
keyword[for] identifier[inst_cat] keyword[in] identifier[all_cols] :
identifier[inst_correct] = identifier[df_conf] [ identifier[inst_cat] ]. identifier[loc] [ identifier[inst_cat] ]/ identifier[cat_counts] [ identifier[inst_cat] ]
identifier[correct_list] . identifier[append] ( identifier[inst_correct] )
identifier[ser_correct] = identifier[pd] . identifier[Series] ( identifier[data] = identifier[correct_list] , identifier[index] = identifier[all_cols] )
identifier[populations] ={}
identifier[populations] [ literal[string] ]= identifier[true_count]
identifier[populations] [ literal[string] ]= identifier[pred_count]
keyword[return] identifier[df_conf] , identifier[populations] , identifier[ser_correct] , identifier[fraction_correct] | def confusion_matrix_and_correct_series(self, y_info):
""" Generate confusion matrix from y_info """
a = deepcopy(y_info['true'])
true_count = dict(((i, a.count(i)) for i in set(a)))
a = deepcopy(y_info['pred'])
pred_count = dict(((i, a.count(i)) for i in set(a)))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct / float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
for inst_cat in all_cols:
inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]
correct_list.append(inst_correct) # depends on [control=['for'], data=['inst_cat']]
ser_correct = pd.Series(data=correct_list, index=all_cols)
populations = {}
populations['true'] = true_count
populations['pred'] = pred_count
return (df_conf, populations, ser_correct, fraction_correct) |
def parse_yaml(self, y):
'''Parse a YAML specification of a target execution context into this
object.
'''
super(TargetExecutionContext, self).parse_yaml(y)
if 'id' in y:
self.id = y['id']
else:
self.id = ''
return self | def function[parse_yaml, parameter[self, y]]:
constant[Parse a YAML specification of a target execution context into this
object.
]
call[call[name[super], parameter[name[TargetExecutionContext], name[self]]].parse_yaml, parameter[name[y]]]
if compare[constant[id] in name[y]] begin[:]
name[self].id assign[=] call[name[y]][constant[id]]
return[name[self]] | keyword[def] identifier[parse_yaml] ( identifier[self] , identifier[y] ):
literal[string]
identifier[super] ( identifier[TargetExecutionContext] , identifier[self] ). identifier[parse_yaml] ( identifier[y] )
keyword[if] literal[string] keyword[in] identifier[y] :
identifier[self] . identifier[id] = identifier[y] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[id] = literal[string]
keyword[return] identifier[self] | def parse_yaml(self, y):
"""Parse a YAML specification of a target execution context into this
object.
"""
super(TargetExecutionContext, self).parse_yaml(y)
if 'id' in y:
self.id = y['id'] # depends on [control=['if'], data=['y']]
else:
self.id = ''
return self |
def _list_tenants(self, admin):
"""
Returns either a list of all tenants (admin=True), or the tenant for
the currently-authenticated user (admin=False).
"""
resp, resp_body = self.method_get("tenants", admin=admin)
if 200 <= resp.status_code < 300:
tenants = resp_body.get("tenants", [])
return [Tenant(self, tenant) for tenant in tenants]
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to list "
"tenants.")
else:
raise exc.TenantNotFound("Could not get a list of tenants.") | def function[_list_tenants, parameter[self, admin]]:
constant[
Returns either a list of all tenants (admin=True), or the tenant for
the currently-authenticated user (admin=False).
]
<ast.Tuple object at 0x7da2054a74f0> assign[=] call[name[self].method_get, parameter[constant[tenants]]]
if compare[constant[200] less_or_equal[<=] name[resp].status_code] begin[:]
variable[tenants] assign[=] call[name[resp_body].get, parameter[constant[tenants], list[[]]]]
return[<ast.ListComp object at 0x7da2054a47c0>] | keyword[def] identifier[_list_tenants] ( identifier[self] , identifier[admin] ):
literal[string]
identifier[resp] , identifier[resp_body] = identifier[self] . identifier[method_get] ( literal[string] , identifier[admin] = identifier[admin] )
keyword[if] literal[int] <= identifier[resp] . identifier[status_code] < literal[int] :
identifier[tenants] = identifier[resp_body] . identifier[get] ( literal[string] ,[])
keyword[return] [ identifier[Tenant] ( identifier[self] , identifier[tenant] ) keyword[for] identifier[tenant] keyword[in] identifier[tenants] ]
keyword[elif] identifier[resp] . identifier[status_code] keyword[in] ( literal[int] , literal[int] ):
keyword[raise] identifier[exc] . identifier[AuthorizationFailure] ( literal[string]
literal[string] )
keyword[else] :
keyword[raise] identifier[exc] . identifier[TenantNotFound] ( literal[string] ) | def _list_tenants(self, admin):
"""
Returns either a list of all tenants (admin=True), or the tenant for
the currently-authenticated user (admin=False).
"""
(resp, resp_body) = self.method_get('tenants', admin=admin)
if 200 <= resp.status_code < 300:
tenants = resp_body.get('tenants', [])
return [Tenant(self, tenant) for tenant in tenants] # depends on [control=['if'], data=[]]
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure('You are not authorized to list tenants.') # depends on [control=['if'], data=[]]
else:
raise exc.TenantNotFound('Could not get a list of tenants.') |
def Debugger_setVariableValue(self, scopeNumber, variableName, newValue,
callFrameId):
"""
Function path: Debugger.setVariableValue
Domain: Debugger
Method name: setVariableValue
Parameters:
Required arguments:
'scopeNumber' (type: integer) -> 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually.
'variableName' (type: string) -> Variable name.
'newValue' (type: Runtime.CallArgument) -> New variable value.
'callFrameId' (type: CallFrameId) -> Id of callframe that holds variable.
No return value.
Description: Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually.
"""
assert isinstance(scopeNumber, (int,)
), "Argument 'scopeNumber' must be of type '['int']'. Received type: '%s'" % type(
scopeNumber)
assert isinstance(variableName, (str,)
), "Argument 'variableName' must be of type '['str']'. Received type: '%s'" % type(
variableName)
subdom_funcs = self.synchronous_command('Debugger.setVariableValue',
scopeNumber=scopeNumber, variableName=variableName, newValue=newValue,
callFrameId=callFrameId)
return subdom_funcs | def function[Debugger_setVariableValue, parameter[self, scopeNumber, variableName, newValue, callFrameId]]:
constant[
Function path: Debugger.setVariableValue
Domain: Debugger
Method name: setVariableValue
Parameters:
Required arguments:
'scopeNumber' (type: integer) -> 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually.
'variableName' (type: string) -> Variable name.
'newValue' (type: Runtime.CallArgument) -> New variable value.
'callFrameId' (type: CallFrameId) -> Id of callframe that holds variable.
No return value.
Description: Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually.
]
assert[call[name[isinstance], parameter[name[scopeNumber], tuple[[<ast.Name object at 0x7da1b1106740>]]]]]
assert[call[name[isinstance], parameter[name[variableName], tuple[[<ast.Name object at 0x7da1b1105600>]]]]]
variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Debugger.setVariableValue]]]
return[name[subdom_funcs]] | keyword[def] identifier[Debugger_setVariableValue] ( identifier[self] , identifier[scopeNumber] , identifier[variableName] , identifier[newValue] ,
identifier[callFrameId] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[scopeNumber] ,( identifier[int] ,)
), literal[string] % identifier[type] (
identifier[scopeNumber] )
keyword[assert] identifier[isinstance] ( identifier[variableName] ,( identifier[str] ,)
), literal[string] % identifier[type] (
identifier[variableName] )
identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] ,
identifier[scopeNumber] = identifier[scopeNumber] , identifier[variableName] = identifier[variableName] , identifier[newValue] = identifier[newValue] ,
identifier[callFrameId] = identifier[callFrameId] )
keyword[return] identifier[subdom_funcs] | def Debugger_setVariableValue(self, scopeNumber, variableName, newValue, callFrameId):
"""
Function path: Debugger.setVariableValue
Domain: Debugger
Method name: setVariableValue
Parameters:
Required arguments:
'scopeNumber' (type: integer) -> 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually.
'variableName' (type: string) -> Variable name.
'newValue' (type: Runtime.CallArgument) -> New variable value.
'callFrameId' (type: CallFrameId) -> Id of callframe that holds variable.
No return value.
Description: Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually.
"""
assert isinstance(scopeNumber, (int,)), "Argument 'scopeNumber' must be of type '['int']'. Received type: '%s'" % type(scopeNumber)
assert isinstance(variableName, (str,)), "Argument 'variableName' must be of type '['str']'. Received type: '%s'" % type(variableName)
subdom_funcs = self.synchronous_command('Debugger.setVariableValue', scopeNumber=scopeNumber, variableName=variableName, newValue=newValue, callFrameId=callFrameId)
return subdom_funcs |
def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
metadata_conventions = getattr(dataset, 'Metadata_Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
accepted_conventions = 'CF-1.6'
test_ctx.assert_true(conventions == accepted_conventions,
'Conventions attribute is missing or is not equal to CF-1.6: {}'.format(conventions))
test_ctx.assert_true(metadata_conventions == 'Unidata Dataset Discovery v1.0',
"Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}".format(metadata_conventions))
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
test_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
test_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
return test_ctx.to_result() | def function[check_base_required_attributes, parameter[self, dataset]]:
constant[
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
]
variable[test_ctx] assign[=] call[name[TestCtx], parameter[name[BaseCheck].HIGH, constant[Required global attributes]]]
variable[conventions] assign[=] call[name[getattr], parameter[name[dataset], constant[Conventions], constant[]]]
variable[metadata_conventions] assign[=] call[name[getattr], parameter[name[dataset], constant[Metadata_Conventions], constant[]]]
variable[feature_type] assign[=] call[name[getattr], parameter[name[dataset], constant[featureType], constant[]]]
variable[cdm_data_type] assign[=] call[name[getattr], parameter[name[dataset], constant[cdm_data_type], constant[]]]
variable[standard_name_vocab] assign[=] call[name[getattr], parameter[name[dataset], constant[standard_name_vocabulary], constant[]]]
variable[accepted_conventions] assign[=] constant[CF-1.6]
call[name[test_ctx].assert_true, parameter[compare[name[conventions] equal[==] name[accepted_conventions]], call[constant[Conventions attribute is missing or is not equal to CF-1.6: {}].format, parameter[name[conventions]]]]]
call[name[test_ctx].assert_true, parameter[compare[name[metadata_conventions] equal[==] constant[Unidata Dataset Discovery v1.0]], call[constant[Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}].format, parameter[name[metadata_conventions]]]]]
call[name[test_ctx].assert_true, parameter[compare[name[feature_type] in list[[<ast.Constant object at 0x7da18ede5900>, <ast.Constant object at 0x7da18ede4f70>, <ast.Constant object at 0x7da18ede72b0>, <ast.Constant object at 0x7da18ede58a0>, <ast.Constant object at 0x7da18ede62c0>, <ast.Constant object at 0x7da18ede6fb0>]]], call[constant[Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}].format, parameter[name[feature_type]]]]]
call[name[test_ctx].assert_true, parameter[compare[call[name[cdm_data_type].lower, parameter[]] in list[[<ast.Constant object at 0x7da18ede4670>, <ast.Constant object at 0x7da18ede4610>, <ast.Constant object at 0x7da18ede5ff0>, <ast.Constant object at 0x7da18ede4730>, <ast.Constant object at 0x7da18ede57e0>, <ast.Constant object at 0x7da18ede7880>, <ast.Constant object at 0x7da18ede4dc0>]]], call[constant[cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}].format, parameter[name[cdm_data_type]]]]]
variable[regex] assign[=] call[name[re].compile, parameter[constant[[sS]tandard [nN]ame [tT]able]]]
call[name[test_ctx].assert_true, parameter[call[name[regex].search, parameter[name[standard_name_vocab]]], call[constant[standard_name_vocabulary doesn't contain 'Standard Name Table': {}].format, parameter[name[standard_name_vocab]]]]]
return[call[name[test_ctx].to_result, parameter[]]] | keyword[def] identifier[check_base_required_attributes] ( identifier[self] , identifier[dataset] ):
literal[string]
identifier[test_ctx] = identifier[TestCtx] ( identifier[BaseCheck] . identifier[HIGH] , literal[string] )
identifier[conventions] = identifier[getattr] ( identifier[dataset] , literal[string] , literal[string] )
identifier[metadata_conventions] = identifier[getattr] ( identifier[dataset] , literal[string] , literal[string] )
identifier[feature_type] = identifier[getattr] ( identifier[dataset] , literal[string] , literal[string] )
identifier[cdm_data_type] = identifier[getattr] ( identifier[dataset] , literal[string] , literal[string] )
identifier[standard_name_vocab] = identifier[getattr] ( identifier[dataset] , literal[string] , literal[string] )
identifier[accepted_conventions] = literal[string]
identifier[test_ctx] . identifier[assert_true] ( identifier[conventions] == identifier[accepted_conventions] ,
literal[string] . identifier[format] ( identifier[conventions] ))
identifier[test_ctx] . identifier[assert_true] ( identifier[metadata_conventions] == literal[string] ,
literal[string] . identifier[format] ( identifier[metadata_conventions] ))
identifier[test_ctx] . identifier[assert_true] ( identifier[feature_type] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ],
literal[string] . identifier[format] ( identifier[feature_type] ))
identifier[test_ctx] . identifier[assert_true] ( identifier[cdm_data_type] . identifier[lower] () keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ],
literal[string] . identifier[format] ( identifier[cdm_data_type] ))
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] )
identifier[test_ctx] . identifier[assert_true] ( identifier[regex] . identifier[search] ( identifier[standard_name_vocab] ),
literal[string] . identifier[format] ( identifier[standard_name_vocab] ))
keyword[return] identifier[test_ctx] . identifier[to_result] () | def check_base_required_attributes(self, dataset):
"""
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
"""
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
metadata_conventions = getattr(dataset, 'Metadata_Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
accepted_conventions = 'CF-1.6'
test_ctx.assert_true(conventions == accepted_conventions, 'Conventions attribute is missing or is not equal to CF-1.6: {}'.format(conventions))
test_ctx.assert_true(metadata_conventions == 'Unidata Dataset Discovery v1.0', "Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}".format(metadata_conventions))
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'], 'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
test_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'], 'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
regex = re.compile('[sS]tandard [nN]ame [tT]able')
test_ctx.assert_true(regex.search(standard_name_vocab), "standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
return test_ctx.to_result() |
def canonical_extension(fmt_ext):
""" Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
"""
if MimeType.has_value(fmt_ext):
return fmt_ext
try:
return {
'tif': MimeType.TIFF.value,
'jpeg': MimeType.JPG.value,
'hdf5': MimeType.HDF.value,
'h5': MimeType.HDF.value
}[fmt_ext]
except KeyError:
raise ValueError('Data format .{} is not supported'.format(fmt_ext)) | def function[canonical_extension, parameter[fmt_ext]]:
constant[ Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
]
if call[name[MimeType].has_value, parameter[name[fmt_ext]]] begin[:]
return[name[fmt_ext]]
<ast.Try object at 0x7da20c6aae00> | keyword[def] identifier[canonical_extension] ( identifier[fmt_ext] ):
literal[string]
keyword[if] identifier[MimeType] . identifier[has_value] ( identifier[fmt_ext] ):
keyword[return] identifier[fmt_ext]
keyword[try] :
keyword[return] {
literal[string] : identifier[MimeType] . identifier[TIFF] . identifier[value] ,
literal[string] : identifier[MimeType] . identifier[JPG] . identifier[value] ,
literal[string] : identifier[MimeType] . identifier[HDF] . identifier[value] ,
literal[string] : identifier[MimeType] . identifier[HDF] . identifier[value]
}[ identifier[fmt_ext] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[fmt_ext] )) | def canonical_extension(fmt_ext):
""" Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
"""
if MimeType.has_value(fmt_ext):
return fmt_ext # depends on [control=['if'], data=[]]
try:
return {'tif': MimeType.TIFF.value, 'jpeg': MimeType.JPG.value, 'hdf5': MimeType.HDF.value, 'h5': MimeType.HDF.value}[fmt_ext] # depends on [control=['try'], data=[]]
except KeyError:
raise ValueError('Data format .{} is not supported'.format(fmt_ext)) # depends on [control=['except'], data=[]] |
def _get_tls_object(self, ssl_params):
"""
Return a TLS object to establish a secure connection to a server
"""
if ssl_params is None:
return None
if not ssl_params["verify"] and ssl_params["ca_certs"]:
self.warning(
"Incorrect configuration: trying to disable server certificate validation, "
"while also specifying a capath. No validation will be performed. Fix your "
"configuration to remove this warning"
)
validate = ssl.CERT_REQUIRED if ssl_params["verify"] else ssl.CERT_NONE
if ssl_params["ca_certs"] is None or os.path.isfile(ssl_params["ca_certs"]):
tls = ldap3.core.tls.Tls(
local_private_key_file=ssl_params["key"],
local_certificate_file=ssl_params["cert"],
ca_certs_file=ssl_params["ca_certs"],
version=ssl.PROTOCOL_SSLv23,
validate=validate,
)
elif os.path.isdir(ssl_params["ca_certs"]):
tls = ldap3.core.tls.Tls(
local_private_key_file=ssl_params["key"],
local_certificate_file=ssl_params["cert"],
ca_certs_path=ssl_params["ca_certs"],
version=ssl.PROTOCOL_SSLv23,
validate=validate,
)
else:
raise ConfigurationError(
'Invalid path {} for ssl_ca_certs: no such file or directory'.format(ssl_params['ca_certs'])
)
return tls | def function[_get_tls_object, parameter[self, ssl_params]]:
constant[
Return a TLS object to establish a secure connection to a server
]
if compare[name[ssl_params] is constant[None]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da18f00de40> begin[:]
call[name[self].warning, parameter[constant[Incorrect configuration: trying to disable server certificate validation, while also specifying a capath. No validation will be performed. Fix your configuration to remove this warning]]]
variable[validate] assign[=] <ast.IfExp object at 0x7da18f00e3e0>
if <ast.BoolOp object at 0x7da18f00fc10> begin[:]
variable[tls] assign[=] call[name[ldap3].core.tls.Tls, parameter[]]
return[name[tls]] | keyword[def] identifier[_get_tls_object] ( identifier[self] , identifier[ssl_params] ):
literal[string]
keyword[if] identifier[ssl_params] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[ssl_params] [ literal[string] ] keyword[and] identifier[ssl_params] [ literal[string] ]:
identifier[self] . identifier[warning] (
literal[string]
literal[string]
literal[string]
)
identifier[validate] = identifier[ssl] . identifier[CERT_REQUIRED] keyword[if] identifier[ssl_params] [ literal[string] ] keyword[else] identifier[ssl] . identifier[CERT_NONE]
keyword[if] identifier[ssl_params] [ literal[string] ] keyword[is] keyword[None] keyword[or] identifier[os] . identifier[path] . identifier[isfile] ( identifier[ssl_params] [ literal[string] ]):
identifier[tls] = identifier[ldap3] . identifier[core] . identifier[tls] . identifier[Tls] (
identifier[local_private_key_file] = identifier[ssl_params] [ literal[string] ],
identifier[local_certificate_file] = identifier[ssl_params] [ literal[string] ],
identifier[ca_certs_file] = identifier[ssl_params] [ literal[string] ],
identifier[version] = identifier[ssl] . identifier[PROTOCOL_SSLv23] ,
identifier[validate] = identifier[validate] ,
)
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[ssl_params] [ literal[string] ]):
identifier[tls] = identifier[ldap3] . identifier[core] . identifier[tls] . identifier[Tls] (
identifier[local_private_key_file] = identifier[ssl_params] [ literal[string] ],
identifier[local_certificate_file] = identifier[ssl_params] [ literal[string] ],
identifier[ca_certs_path] = identifier[ssl_params] [ literal[string] ],
identifier[version] = identifier[ssl] . identifier[PROTOCOL_SSLv23] ,
identifier[validate] = identifier[validate] ,
)
keyword[else] :
keyword[raise] identifier[ConfigurationError] (
literal[string] . identifier[format] ( identifier[ssl_params] [ literal[string] ])
)
keyword[return] identifier[tls] | def _get_tls_object(self, ssl_params):
"""
Return a TLS object to establish a secure connection to a server
"""
if ssl_params is None:
return None # depends on [control=['if'], data=[]]
if not ssl_params['verify'] and ssl_params['ca_certs']:
self.warning('Incorrect configuration: trying to disable server certificate validation, while also specifying a capath. No validation will be performed. Fix your configuration to remove this warning') # depends on [control=['if'], data=[]]
validate = ssl.CERT_REQUIRED if ssl_params['verify'] else ssl.CERT_NONE
if ssl_params['ca_certs'] is None or os.path.isfile(ssl_params['ca_certs']):
tls = ldap3.core.tls.Tls(local_private_key_file=ssl_params['key'], local_certificate_file=ssl_params['cert'], ca_certs_file=ssl_params['ca_certs'], version=ssl.PROTOCOL_SSLv23, validate=validate) # depends on [control=['if'], data=[]]
elif os.path.isdir(ssl_params['ca_certs']):
tls = ldap3.core.tls.Tls(local_private_key_file=ssl_params['key'], local_certificate_file=ssl_params['cert'], ca_certs_path=ssl_params['ca_certs'], version=ssl.PROTOCOL_SSLv23, validate=validate) # depends on [control=['if'], data=[]]
else:
raise ConfigurationError('Invalid path {} for ssl_ca_certs: no such file or directory'.format(ssl_params['ca_certs']))
return tls |
def basename_without_extension(self):
"""
Get the ``os.path.basename`` of the local file, if any, with extension removed.
"""
ret = self.basename.rsplit('.', 1)[0]
if ret.endswith('.tar'):
ret = ret[0:len(ret)-4]
return ret | def function[basename_without_extension, parameter[self]]:
constant[
Get the ``os.path.basename`` of the local file, if any, with extension removed.
]
variable[ret] assign[=] call[call[name[self].basename.rsplit, parameter[constant[.], constant[1]]]][constant[0]]
if call[name[ret].endswith, parameter[constant[.tar]]] begin[:]
variable[ret] assign[=] call[name[ret]][<ast.Slice object at 0x7da1b0383820>]
return[name[ret]] | keyword[def] identifier[basename_without_extension] ( identifier[self] ):
literal[string]
identifier[ret] = identifier[self] . identifier[basename] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
keyword[if] identifier[ret] . identifier[endswith] ( literal[string] ):
identifier[ret] = identifier[ret] [ literal[int] : identifier[len] ( identifier[ret] )- literal[int] ]
keyword[return] identifier[ret] | def basename_without_extension(self):
"""
Get the ``os.path.basename`` of the local file, if any, with extension removed.
"""
ret = self.basename.rsplit('.', 1)[0]
if ret.endswith('.tar'):
ret = ret[0:len(ret) - 4] # depends on [control=['if'], data=[]]
return ret |
def write_data_to_file(data, filepath):
'''
Write data to file
'''
try:
os.makedirs(os.path.dirname(filepath), 0o700)
except OSError:
pass
write_to_disk(filepath, content=data) | def function[write_data_to_file, parameter[data, filepath]]:
constant[
Write data to file
]
<ast.Try object at 0x7da18dc99b10>
call[name[write_to_disk], parameter[name[filepath]]] | keyword[def] identifier[write_data_to_file] ( identifier[data] , identifier[filepath] ):
literal[string]
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filepath] ), literal[int] )
keyword[except] identifier[OSError] :
keyword[pass]
identifier[write_to_disk] ( identifier[filepath] , identifier[content] = identifier[data] ) | def write_data_to_file(data, filepath):
"""
Write data to file
"""
try:
os.makedirs(os.path.dirname(filepath), 448) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
write_to_disk(filepath, content=data) |
def add_digital_object(
self,
information_object_slug,
identifier=None,
title=None,
uri=None,
location_of_originals=None,
object_type=None,
xlink_show="embed",
xlink_actuate="onLoad",
restricted=False,
use_statement="",
use_conditions=None,
access_conditions=None,
size=None,
format_name=None,
format_version=None,
format_registry_key=None,
format_registry_name=None,
file_uuid=None,
aip_uuid=None,
inherit_dates=False,
usage=None,
):
""" Creates a new digital object. """
new_object = {"information_object_slug": information_object_slug}
if title is not None:
new_object["name"] = title
if uri is not None:
new_object["uri"] = uri
if size is not None:
new_object["byte_size"] = size
if object_type is not None:
new_object["media_type"] = object_type
if usage is not None:
new_object["usage"] = usage
if file_uuid is not None:
new_object["file_uuid"] = file_uuid
if aip_uuid is not None:
new_object["aip_uuid"] = aip_uuid
if format_name is not None:
new_object["format_name"] = format_name
if format_version is not None:
new_object["format_version"] = format_version
if format_registry_key is not None:
new_object["format_registry_key"] = format_registry_key
if format_registry_name is not None:
new_object["format_registry_name"] = format_registry_name
new_object["slug"] = self._post(
urljoin(self.base_url, "digitalobjects"),
data=json.dumps(new_object),
expected_response=201,
).json()["slug"]
return new_object | def function[add_digital_object, parameter[self, information_object_slug, identifier, title, uri, location_of_originals, object_type, xlink_show, xlink_actuate, restricted, use_statement, use_conditions, access_conditions, size, format_name, format_version, format_registry_key, format_registry_name, file_uuid, aip_uuid, inherit_dates, usage]]:
constant[ Creates a new digital object. ]
variable[new_object] assign[=] dictionary[[<ast.Constant object at 0x7da20c76d570>], [<ast.Name object at 0x7da20c76d180>]]
if compare[name[title] is_not constant[None]] begin[:]
call[name[new_object]][constant[name]] assign[=] name[title]
if compare[name[uri] is_not constant[None]] begin[:]
call[name[new_object]][constant[uri]] assign[=] name[uri]
if compare[name[size] is_not constant[None]] begin[:]
call[name[new_object]][constant[byte_size]] assign[=] name[size]
if compare[name[object_type] is_not constant[None]] begin[:]
call[name[new_object]][constant[media_type]] assign[=] name[object_type]
if compare[name[usage] is_not constant[None]] begin[:]
call[name[new_object]][constant[usage]] assign[=] name[usage]
if compare[name[file_uuid] is_not constant[None]] begin[:]
call[name[new_object]][constant[file_uuid]] assign[=] name[file_uuid]
if compare[name[aip_uuid] is_not constant[None]] begin[:]
call[name[new_object]][constant[aip_uuid]] assign[=] name[aip_uuid]
if compare[name[format_name] is_not constant[None]] begin[:]
call[name[new_object]][constant[format_name]] assign[=] name[format_name]
if compare[name[format_version] is_not constant[None]] begin[:]
call[name[new_object]][constant[format_version]] assign[=] name[format_version]
if compare[name[format_registry_key] is_not constant[None]] begin[:]
call[name[new_object]][constant[format_registry_key]] assign[=] name[format_registry_key]
if compare[name[format_registry_name] is_not constant[None]] begin[:]
call[name[new_object]][constant[format_registry_name]] assign[=] name[format_registry_name]
call[name[new_object]][constant[slug]] assign[=] call[call[call[name[self]._post, parameter[call[name[urljoin], parameter[name[self].base_url, constant[digitalobjects]]]]].json, parameter[]]][constant[slug]]
return[name[new_object]] | keyword[def] identifier[add_digital_object] (
identifier[self] ,
identifier[information_object_slug] ,
identifier[identifier] = keyword[None] ,
identifier[title] = keyword[None] ,
identifier[uri] = keyword[None] ,
identifier[location_of_originals] = keyword[None] ,
identifier[object_type] = keyword[None] ,
identifier[xlink_show] = literal[string] ,
identifier[xlink_actuate] = literal[string] ,
identifier[restricted] = keyword[False] ,
identifier[use_statement] = literal[string] ,
identifier[use_conditions] = keyword[None] ,
identifier[access_conditions] = keyword[None] ,
identifier[size] = keyword[None] ,
identifier[format_name] = keyword[None] ,
identifier[format_version] = keyword[None] ,
identifier[format_registry_key] = keyword[None] ,
identifier[format_registry_name] = keyword[None] ,
identifier[file_uuid] = keyword[None] ,
identifier[aip_uuid] = keyword[None] ,
identifier[inherit_dates] = keyword[False] ,
identifier[usage] = keyword[None] ,
):
literal[string]
identifier[new_object] ={ literal[string] : identifier[information_object_slug] }
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[title]
keyword[if] identifier[uri] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[uri]
keyword[if] identifier[size] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[size]
keyword[if] identifier[object_type] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[object_type]
keyword[if] identifier[usage] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[usage]
keyword[if] identifier[file_uuid] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[file_uuid]
keyword[if] identifier[aip_uuid] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[aip_uuid]
keyword[if] identifier[format_name] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[format_name]
keyword[if] identifier[format_version] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[format_version]
keyword[if] identifier[format_registry_key] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[format_registry_key]
keyword[if] identifier[format_registry_name] keyword[is] keyword[not] keyword[None] :
identifier[new_object] [ literal[string] ]= identifier[format_registry_name]
identifier[new_object] [ literal[string] ]= identifier[self] . identifier[_post] (
identifier[urljoin] ( identifier[self] . identifier[base_url] , literal[string] ),
identifier[data] = identifier[json] . identifier[dumps] ( identifier[new_object] ),
identifier[expected_response] = literal[int] ,
). identifier[json] ()[ literal[string] ]
keyword[return] identifier[new_object] | def add_digital_object(self, information_object_slug, identifier=None, title=None, uri=None, location_of_originals=None, object_type=None, xlink_show='embed', xlink_actuate='onLoad', restricted=False, use_statement='', use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, format_registry_key=None, format_registry_name=None, file_uuid=None, aip_uuid=None, inherit_dates=False, usage=None):
""" Creates a new digital object. """
new_object = {'information_object_slug': information_object_slug}
if title is not None:
new_object['name'] = title # depends on [control=['if'], data=['title']]
if uri is not None:
new_object['uri'] = uri # depends on [control=['if'], data=['uri']]
if size is not None:
new_object['byte_size'] = size # depends on [control=['if'], data=['size']]
if object_type is not None:
new_object['media_type'] = object_type # depends on [control=['if'], data=['object_type']]
if usage is not None:
new_object['usage'] = usage # depends on [control=['if'], data=['usage']]
if file_uuid is not None:
new_object['file_uuid'] = file_uuid # depends on [control=['if'], data=['file_uuid']]
if aip_uuid is not None:
new_object['aip_uuid'] = aip_uuid # depends on [control=['if'], data=['aip_uuid']]
if format_name is not None:
new_object['format_name'] = format_name # depends on [control=['if'], data=['format_name']]
if format_version is not None:
new_object['format_version'] = format_version # depends on [control=['if'], data=['format_version']]
if format_registry_key is not None:
new_object['format_registry_key'] = format_registry_key # depends on [control=['if'], data=['format_registry_key']]
if format_registry_name is not None:
new_object['format_registry_name'] = format_registry_name # depends on [control=['if'], data=['format_registry_name']]
new_object['slug'] = self._post(urljoin(self.base_url, 'digitalobjects'), data=json.dumps(new_object), expected_response=201).json()['slug']
return new_object |
def get_text_tokenizer(query_string):
"""
Tokenize the input string and return two lists, exclude list is for words that
start with a dash (ex: -word) and include list is for all other words
"""
# Regex to split on double-quotes, single-quotes, and continuous non-whitespace characters.
split_pattern = re.compile('("[^"]+"|\'[^\']+\'|\S+)')
# Pattern to remove more than one inter white-spaces and more than one "-"
space_cleanup_pattern = re.compile('[\s]{2,}')
dash_cleanup_pattern = re.compile('^[-]{2,}')
# Return the list of keywords.
keywords = [dash_cleanup_pattern.sub('-', space_cleanup_pattern.sub(' ', t.strip(' "\'')))
for t in split_pattern.findall(query_string) if len(t.strip(' "\'')) > 0]
include = [word for word in keywords if not word.startswith('-')]
exclude = [word.lstrip('-') for word in keywords if word.startswith('-')]
return include, exclude | def function[get_text_tokenizer, parameter[query_string]]:
constant[
Tokenize the input string and return two lists, exclude list is for words that
start with a dash (ex: -word) and include list is for all other words
]
variable[split_pattern] assign[=] call[name[re].compile, parameter[constant[("[^"]+"|'[^']+'|\S+)]]]
variable[space_cleanup_pattern] assign[=] call[name[re].compile, parameter[constant[[\s]{2,}]]]
variable[dash_cleanup_pattern] assign[=] call[name[re].compile, parameter[constant[^[-]{2,}]]]
variable[keywords] assign[=] <ast.ListComp object at 0x7da18f722c50>
variable[include] assign[=] <ast.ListComp object at 0x7da18f720be0>
variable[exclude] assign[=] <ast.ListComp object at 0x7da18f09efb0>
return[tuple[[<ast.Name object at 0x7da18f09f250>, <ast.Name object at 0x7da18f09e110>]]] | keyword[def] identifier[get_text_tokenizer] ( identifier[query_string] ):
literal[string]
identifier[split_pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[space_cleanup_pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[dash_cleanup_pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[keywords] =[ identifier[dash_cleanup_pattern] . identifier[sub] ( literal[string] , identifier[space_cleanup_pattern] . identifier[sub] ( literal[string] , identifier[t] . identifier[strip] ( literal[string] )))
keyword[for] identifier[t] keyword[in] identifier[split_pattern] . identifier[findall] ( identifier[query_string] ) keyword[if] identifier[len] ( identifier[t] . identifier[strip] ( literal[string] ))> literal[int] ]
identifier[include] =[ identifier[word] keyword[for] identifier[word] keyword[in] identifier[keywords] keyword[if] keyword[not] identifier[word] . identifier[startswith] ( literal[string] )]
identifier[exclude] =[ identifier[word] . identifier[lstrip] ( literal[string] ) keyword[for] identifier[word] keyword[in] identifier[keywords] keyword[if] identifier[word] . identifier[startswith] ( literal[string] )]
keyword[return] identifier[include] , identifier[exclude] | def get_text_tokenizer(query_string):
"""
Tokenize the input string and return two lists, exclude list is for words that
start with a dash (ex: -word) and include list is for all other words
"""
# Regex to split on double-quotes, single-quotes, and continuous non-whitespace characters.
split_pattern = re.compile('("[^"]+"|\'[^\']+\'|\\S+)')
# Pattern to remove more than one inter white-spaces and more than one "-"
space_cleanup_pattern = re.compile('[\\s]{2,}')
dash_cleanup_pattern = re.compile('^[-]{2,}')
# Return the list of keywords.
keywords = [dash_cleanup_pattern.sub('-', space_cleanup_pattern.sub(' ', t.strip(' "\''))) for t in split_pattern.findall(query_string) if len(t.strip(' "\'')) > 0]
include = [word for word in keywords if not word.startswith('-')]
exclude = [word.lstrip('-') for word in keywords if word.startswith('-')]
return (include, exclude) |
def animation_card(card: AnimationCard) -> Attachment:
"""
Returns an attachment for an animation card. Will raise a TypeError if the 'card' argument is not an
AnimationCard.
:param card:
:return:
"""
if not isinstance(card, AnimationCard):
raise TypeError('CardFactory.animation_card(): `card` argument is not an instance of an AnimationCard, '
'unable to prepare attachment.')
return Attachment(content_type=CardFactory.content_types.animation_card,
content=card) | def function[animation_card, parameter[card]]:
constant[
Returns an attachment for an animation card. Will raise a TypeError if the 'card' argument is not an
AnimationCard.
:param card:
:return:
]
if <ast.UnaryOp object at 0x7da20c6e7cd0> begin[:]
<ast.Raise object at 0x7da20c6e7ee0>
return[call[name[Attachment], parameter[]]] | keyword[def] identifier[animation_card] ( identifier[card] : identifier[AnimationCard] )-> identifier[Attachment] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[card] , identifier[AnimationCard] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[return] identifier[Attachment] ( identifier[content_type] = identifier[CardFactory] . identifier[content_types] . identifier[animation_card] ,
identifier[content] = identifier[card] ) | def animation_card(card: AnimationCard) -> Attachment:
"""
Returns an attachment for an animation card. Will raise a TypeError if the 'card' argument is not an
AnimationCard.
:param card:
:return:
"""
if not isinstance(card, AnimationCard):
raise TypeError('CardFactory.animation_card(): `card` argument is not an instance of an AnimationCard, unable to prepare attachment.') # depends on [control=['if'], data=[]]
return Attachment(content_type=CardFactory.content_types.animation_card, content=card) |
def units(self, value):
"""
Set the units for every model in the scene without
converting any units just setting the tag.
Parameters
------------
value : str
Value to set every geometry unit value to
"""
for m in self.geometry.values():
m.units = value | def function[units, parameter[self, value]]:
constant[
Set the units for every model in the scene without
converting any units just setting the tag.
Parameters
------------
value : str
Value to set every geometry unit value to
]
for taget[name[m]] in starred[call[name[self].geometry.values, parameter[]]] begin[:]
name[m].units assign[=] name[value] | keyword[def] identifier[units] ( identifier[self] , identifier[value] ):
literal[string]
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[geometry] . identifier[values] ():
identifier[m] . identifier[units] = identifier[value] | def units(self, value):
"""
Set the units for every model in the scene without
converting any units just setting the tag.
Parameters
------------
value : str
Value to set every geometry unit value to
"""
for m in self.geometry.values():
m.units = value # depends on [control=['for'], data=['m']] |
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0 | def function[watch_logfile, parameter[self, logfile_path]]:
constant[Analyzes queries from the tail of a given log file]
call[name[self]._run_stats][constant[logSource]] assign[=] name[logfile_path]
variable[log_parser] assign[=] call[name[LogParser], parameter[]]
variable[output_time] assign[=] binary_operation[call[name[time].time, parameter[]] + name[WATCH_DISPLAY_REFRESH_SECONDS]]
<ast.Try object at 0x7da1b0432140>
return[constant[0]] | keyword[def] identifier[watch_logfile] ( identifier[self] , identifier[logfile_path] ):
literal[string]
identifier[self] . identifier[_run_stats] [ literal[string] ]= identifier[logfile_path]
identifier[log_parser] = identifier[LogParser] ()
identifier[output_time] = identifier[time] . identifier[time] ()+ identifier[WATCH_DISPLAY_REFRESH_SECONDS]
keyword[try] :
identifier[firstLine] = keyword[True]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[_tail_file] ( identifier[open] ( identifier[logfile_path] ),
identifier[WATCH_INTERVAL_SECONDS] ):
keyword[if] identifier[firstLine] :
identifier[self] . identifier[_run_stats] [ literal[string] ][ literal[string] ]= identifier[get_line_time] ( identifier[line] )
identifier[self] . identifier[_process_query] ( identifier[line] , identifier[log_parser] )
identifier[self] . identifier[_run_stats] [ literal[string] ][ literal[string] ]= identifier[get_line_time] ( identifier[line] )
keyword[if] identifier[time] . identifier[time] ()>= identifier[output_time] :
identifier[self] . identifier[_output_aggregated_report] ( identifier[sys] . identifier[stderr] )
identifier[output_time] = identifier[time] . identifier[time] ()+ identifier[WATCH_DISPLAY_REFRESH_SECONDS]
keyword[except] identifier[KeyboardInterrupt] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
keyword[finally] :
identifier[self] . identifier[_output_aggregated_report] ( identifier[sys] . identifier[stdout] )
keyword[return] literal[int] | def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path), WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line) # depends on [control=['if'], data=[]]
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS # depends on [control=['if'], data=['output_time']] # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
sys.stderr.write('Interrupt received\n') # depends on [control=['except'], data=[]]
finally:
self._output_aggregated_report(sys.stdout)
return 0 |
def IntGreaterThanOne(n):
"""If *n* is an integer > 1, returns it, otherwise an error."""
try:
n = int(n)
except:
raise ValueError("%s is not an integer" % n)
if n <= 1:
raise ValueError("%d is not > 1" % n)
else:
return n | def function[IntGreaterThanOne, parameter[n]]:
constant[If *n* is an integer > 1, returns it, otherwise an error.]
<ast.Try object at 0x7da2054a7a90>
if compare[name[n] less_or_equal[<=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b2347e80> | keyword[def] identifier[IntGreaterThanOne] ( identifier[n] ):
literal[string]
keyword[try] :
identifier[n] = identifier[int] ( identifier[n] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[n] )
keyword[if] identifier[n] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[n] )
keyword[else] :
keyword[return] identifier[n] | def IntGreaterThanOne(n):
"""If *n* is an integer > 1, returns it, otherwise an error."""
try:
n = int(n) # depends on [control=['try'], data=[]]
except:
raise ValueError('%s is not an integer' % n) # depends on [control=['except'], data=[]]
if n <= 1:
raise ValueError('%d is not > 1' % n) # depends on [control=['if'], data=['n']]
else:
return n |
def retrieveVals(self):
"""Retrieve values for graphs."""
name = 'diskspace'
if self.hasGraph(name):
for fspath in self._fslist:
if self._statsSpace.has_key(fspath):
self.setGraphVal(name, fspath,
self._statsSpace[fspath]['inuse_pcent'])
name = 'diskinode'
if self.hasGraph(name):
for fspath in self._fslist:
if self._statsInode.has_key(fspath):
self.setGraphVal(name, fspath,
self._statsInode[fspath]['inuse_pcent']) | def function[retrieveVals, parameter[self]]:
constant[Retrieve values for graphs.]
variable[name] assign[=] constant[diskspace]
if call[name[self].hasGraph, parameter[name[name]]] begin[:]
for taget[name[fspath]] in starred[name[self]._fslist] begin[:]
if call[name[self]._statsSpace.has_key, parameter[name[fspath]]] begin[:]
call[name[self].setGraphVal, parameter[name[name], name[fspath], call[call[name[self]._statsSpace][name[fspath]]][constant[inuse_pcent]]]]
variable[name] assign[=] constant[diskinode]
if call[name[self].hasGraph, parameter[name[name]]] begin[:]
for taget[name[fspath]] in starred[name[self]._fslist] begin[:]
if call[name[self]._statsInode.has_key, parameter[name[fspath]]] begin[:]
call[name[self].setGraphVal, parameter[name[name], name[fspath], call[call[name[self]._statsInode][name[fspath]]][constant[inuse_pcent]]]] | keyword[def] identifier[retrieveVals] ( identifier[self] ):
literal[string]
identifier[name] = literal[string]
keyword[if] identifier[self] . identifier[hasGraph] ( identifier[name] ):
keyword[for] identifier[fspath] keyword[in] identifier[self] . identifier[_fslist] :
keyword[if] identifier[self] . identifier[_statsSpace] . identifier[has_key] ( identifier[fspath] ):
identifier[self] . identifier[setGraphVal] ( identifier[name] , identifier[fspath] ,
identifier[self] . identifier[_statsSpace] [ identifier[fspath] ][ literal[string] ])
identifier[name] = literal[string]
keyword[if] identifier[self] . identifier[hasGraph] ( identifier[name] ):
keyword[for] identifier[fspath] keyword[in] identifier[self] . identifier[_fslist] :
keyword[if] identifier[self] . identifier[_statsInode] . identifier[has_key] ( identifier[fspath] ):
identifier[self] . identifier[setGraphVal] ( identifier[name] , identifier[fspath] ,
identifier[self] . identifier[_statsInode] [ identifier[fspath] ][ literal[string] ]) | def retrieveVals(self):
"""Retrieve values for graphs."""
name = 'diskspace'
if self.hasGraph(name):
for fspath in self._fslist:
if self._statsSpace.has_key(fspath):
self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fspath']] # depends on [control=['if'], data=[]]
name = 'diskinode'
if self.hasGraph(name):
for fspath in self._fslist:
if self._statsInode.has_key(fspath):
self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fspath']] # depends on [control=['if'], data=[]] |
def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path | def function[_walk_omniglot_dir, parameter[directory]]:
constant[Walk an Omniglot directory and yield examples.]
variable[directory] assign[=] call[name[os].path.join, parameter[name[directory], call[call[name[tf].io.gfile.listdir, parameter[name[directory]]]][constant[0]]]]
variable[alphabets] assign[=] call[name[sorted], parameter[call[name[tf].io.gfile.listdir, parameter[name[directory]]]]]
for taget[name[alphabet]] in starred[name[alphabets]] begin[:]
variable[alphabet_dir] assign[=] call[name[os].path.join, parameter[name[directory], name[alphabet]]]
variable[characters] assign[=] call[name[sorted], parameter[call[name[tf].io.gfile.listdir, parameter[name[alphabet_dir]]]]]
for taget[name[character]] in starred[name[characters]] begin[:]
variable[character_id] assign[=] binary_operation[call[name[int], parameter[call[name[character]][<ast.Slice object at 0x7da1b20c9330>]]] - constant[1]]
variable[character_dir] assign[=] call[name[os].path.join, parameter[name[alphabet_dir], name[character]]]
variable[images] assign[=] call[name[tf].io.gfile.listdir, parameter[name[character_dir]]]
for taget[name[image]] in starred[name[images]] begin[:]
<ast.Tuple object at 0x7da1b20c9e40> assign[=] call[name[image].split, parameter[constant[_]]]
variable[label] assign[=] binary_operation[call[name[int], parameter[name[label]]] - constant[1]]
variable[image_path] assign[=] call[name[os].path.join, parameter[name[character_dir], name[image]]]
<ast.Yield object at 0x7da1b20c8cd0> | keyword[def] identifier[_walk_omniglot_dir] ( identifier[directory] ):
literal[string]
identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[tf] . identifier[io] . identifier[gfile] . identifier[listdir] ( identifier[directory] )[ literal[int] ])
identifier[alphabets] = identifier[sorted] ( identifier[tf] . identifier[io] . identifier[gfile] . identifier[listdir] ( identifier[directory] ))
keyword[for] identifier[alphabet] keyword[in] identifier[alphabets] :
identifier[alphabet_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[alphabet] )
identifier[characters] = identifier[sorted] ( identifier[tf] . identifier[io] . identifier[gfile] . identifier[listdir] ( identifier[alphabet_dir] ))
keyword[for] identifier[character] keyword[in] identifier[characters] :
identifier[character_id] = identifier[int] ( identifier[character] [ identifier[len] ( literal[string] ):])- literal[int]
identifier[character_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[alphabet_dir] , identifier[character] )
identifier[images] = identifier[tf] . identifier[io] . identifier[gfile] . identifier[listdir] ( identifier[character_dir] )
keyword[for] identifier[image] keyword[in] identifier[images] :
identifier[label] , identifier[_] = identifier[image] . identifier[split] ( literal[string] )
identifier[label] = identifier[int] ( identifier[label] )- literal[int]
identifier[image_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[character_dir] , identifier[image] )
keyword[yield] identifier[alphabet] , identifier[character_id] , identifier[label] , identifier[image_path] | def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len('character'):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
(label, _) = image.split('_')
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield (alphabet, character_id, label, image_path) # depends on [control=['for'], data=['image']] # depends on [control=['for'], data=['character']] # depends on [control=['for'], data=['alphabet']] |
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions)) | def function[feasible_ratio, parameter[self, solutions]]:
constant[counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
]
<ast.Raise object at 0x7da1b0cb5870>
variable[count] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[call[name[solutions]][constant[0]]]]]]
for taget[name[x]] in starred[name[solutions]] begin[:]
<ast.AugAssign object at 0x7da1b0cb7ee0>
return[binary_operation[name[count] / call[name[float], parameter[call[name[len], parameter[name[solutions]]]]]]] | keyword[def] identifier[feasible_ratio] ( identifier[self] , identifier[solutions] ):
literal[string]
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[count] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[solutions] [ literal[int] ]))
keyword[for] identifier[x] keyword[in] identifier[solutions] :
identifier[count] += identifier[x] . identifier[unrepaired] == identifier[x]
keyword[return] identifier[count] / identifier[float] ( identifier[len] ( identifier[solutions] )) | def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x # depends on [control=['for'], data=['x']]
return count / float(len(solutions)) |
def write(text, filename, encoding='utf-8', mode='wb'):
"""
Write 'text' to file ('filename') assuming 'encoding' in an atomic way
Return (eventually new) encoding
"""
text, encoding = encode(text, encoding)
if 'a' in mode:
with open(filename, mode) as textfile:
textfile.write(text)
else:
with atomic_write(filename,
overwrite=True,
mode=mode) as textfile:
textfile.write(text)
return encoding | def function[write, parameter[text, filename, encoding, mode]]:
constant[
Write 'text' to file ('filename') assuming 'encoding' in an atomic way
Return (eventually new) encoding
]
<ast.Tuple object at 0x7da1b21d6230> assign[=] call[name[encode], parameter[name[text], name[encoding]]]
if compare[constant[a] in name[mode]] begin[:]
with call[name[open], parameter[name[filename], name[mode]]] begin[:]
call[name[textfile].write, parameter[name[text]]]
return[name[encoding]] | keyword[def] identifier[write] ( identifier[text] , identifier[filename] , identifier[encoding] = literal[string] , identifier[mode] = literal[string] ):
literal[string]
identifier[text] , identifier[encoding] = identifier[encode] ( identifier[text] , identifier[encoding] )
keyword[if] literal[string] keyword[in] identifier[mode] :
keyword[with] identifier[open] ( identifier[filename] , identifier[mode] ) keyword[as] identifier[textfile] :
identifier[textfile] . identifier[write] ( identifier[text] )
keyword[else] :
keyword[with] identifier[atomic_write] ( identifier[filename] ,
identifier[overwrite] = keyword[True] ,
identifier[mode] = identifier[mode] ) keyword[as] identifier[textfile] :
identifier[textfile] . identifier[write] ( identifier[text] )
keyword[return] identifier[encoding] | def write(text, filename, encoding='utf-8', mode='wb'):
"""
Write 'text' to file ('filename') assuming 'encoding' in an atomic way
Return (eventually new) encoding
"""
(text, encoding) = encode(text, encoding)
if 'a' in mode:
with open(filename, mode) as textfile:
textfile.write(text) # depends on [control=['with'], data=['textfile']] # depends on [control=['if'], data=['mode']]
else:
with atomic_write(filename, overwrite=True, mode=mode) as textfile:
textfile.write(text) # depends on [control=['with'], data=['textfile']]
return encoding |
def _merge_groups(self, backends_list):
""" merge a list backends_groups"""
ret = {}
for backends in backends_list:
for b in backends:
if b not in ret:
ret[b] = set([])
for group in backends[b]:
ret[b].add(group)
for b in ret:
ret[b] = list(ret[b])
ret[b].sort()
return ret | def function[_merge_groups, parameter[self, backends_list]]:
constant[ merge a list backends_groups]
variable[ret] assign[=] dictionary[[], []]
for taget[name[backends]] in starred[name[backends_list]] begin[:]
for taget[name[b]] in starred[name[backends]] begin[:]
if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[ret]] begin[:]
call[name[ret]][name[b]] assign[=] call[name[set], parameter[list[[]]]]
for taget[name[group]] in starred[call[name[backends]][name[b]]] begin[:]
call[call[name[ret]][name[b]].add, parameter[name[group]]]
for taget[name[b]] in starred[name[ret]] begin[:]
call[name[ret]][name[b]] assign[=] call[name[list], parameter[call[name[ret]][name[b]]]]
call[call[name[ret]][name[b]].sort, parameter[]]
return[name[ret]] | keyword[def] identifier[_merge_groups] ( identifier[self] , identifier[backends_list] ):
literal[string]
identifier[ret] ={}
keyword[for] identifier[backends] keyword[in] identifier[backends_list] :
keyword[for] identifier[b] keyword[in] identifier[backends] :
keyword[if] identifier[b] keyword[not] keyword[in] identifier[ret] :
identifier[ret] [ identifier[b] ]= identifier[set] ([])
keyword[for] identifier[group] keyword[in] identifier[backends] [ identifier[b] ]:
identifier[ret] [ identifier[b] ]. identifier[add] ( identifier[group] )
keyword[for] identifier[b] keyword[in] identifier[ret] :
identifier[ret] [ identifier[b] ]= identifier[list] ( identifier[ret] [ identifier[b] ])
identifier[ret] [ identifier[b] ]. identifier[sort] ()
keyword[return] identifier[ret] | def _merge_groups(self, backends_list):
""" merge a list backends_groups"""
ret = {}
for backends in backends_list:
for b in backends:
if b not in ret:
ret[b] = set([]) # depends on [control=['if'], data=['b', 'ret']]
for group in backends[b]:
ret[b].add(group) # depends on [control=['for'], data=['group']] # depends on [control=['for'], data=['b']] # depends on [control=['for'], data=['backends']]
for b in ret:
ret[b] = list(ret[b])
ret[b].sort() # depends on [control=['for'], data=['b']]
return ret |
def get_port_by_ref(self, port_ref):
'''Get a port of this component by reference to a CORBA PortService
object.
'''
with self._mutex:
for p in self.ports:
if p.object._is_equivalent(port_ref):
return p
return None | def function[get_port_by_ref, parameter[self, port_ref]]:
constant[Get a port of this component by reference to a CORBA PortService
object.
]
with name[self]._mutex begin[:]
for taget[name[p]] in starred[name[self].ports] begin[:]
if call[name[p].object._is_equivalent, parameter[name[port_ref]]] begin[:]
return[name[p]]
return[constant[None]] | keyword[def] identifier[get_port_by_ref] ( identifier[self] , identifier[port_ref] ):
literal[string]
keyword[with] identifier[self] . identifier[_mutex] :
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[ports] :
keyword[if] identifier[p] . identifier[object] . identifier[_is_equivalent] ( identifier[port_ref] ):
keyword[return] identifier[p]
keyword[return] keyword[None] | def get_port_by_ref(self, port_ref):
"""Get a port of this component by reference to a CORBA PortService
object.
"""
with self._mutex:
for p in self.ports:
if p.object._is_equivalent(port_ref):
return p # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
return None # depends on [control=['with'], data=[]] |
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag) | def function[deep_run, parameter[self]]:
constant[Derive period-based features.]
call[name[self].get_period_LS, parameter[name[self].date, name[self].mag, name[self].n_threads, name[self].min_period]]
variable[phase_folded_date] assign[=] binary_operation[name[self].date <ast.Mod object at 0x7da2590d6920> binary_operation[name[self].period * constant[2.0]]]
variable[sorted_index] assign[=] call[name[np].argsort, parameter[name[phase_folded_date]]]
variable[folded_date] assign[=] call[name[phase_folded_date]][name[sorted_index]]
variable[folded_mag] assign[=] call[name[self].mag][name[sorted_index]]
name[self].phase_eta assign[=] call[name[self].get_eta, parameter[name[folded_mag], name[self].weighted_std]]
<ast.Tuple object at 0x7da207f030a0> assign[=] call[name[self].slope_percentile, parameter[name[folded_date], name[folded_mag]]]
name[self].phase_cusum assign[=] call[name[self].get_cusum, parameter[name[folded_mag]]] | keyword[def] identifier[deep_run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[get_period_LS] ( identifier[self] . identifier[date] , identifier[self] . identifier[mag] , identifier[self] . identifier[n_threads] , identifier[self] . identifier[min_period] )
identifier[phase_folded_date] = identifier[self] . identifier[date] %( identifier[self] . identifier[period] * literal[int] )
identifier[sorted_index] = identifier[np] . identifier[argsort] ( identifier[phase_folded_date] )
identifier[folded_date] = identifier[phase_folded_date] [ identifier[sorted_index] ]
identifier[folded_mag] = identifier[self] . identifier[mag] [ identifier[sorted_index] ]
identifier[self] . identifier[phase_eta] = identifier[self] . identifier[get_eta] ( identifier[folded_mag] , identifier[self] . identifier[weighted_std] )
identifier[self] . identifier[slope_per10] , identifier[self] . identifier[slope_per90] = identifier[self] . identifier[slope_percentile] ( identifier[folded_date] , identifier[folded_mag] )
identifier[self] . identifier[phase_cusum] = identifier[self] . identifier[get_cusum] ( identifier[folded_mag] ) | def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.0)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
(self.slope_per10, self.slope_per90) = self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag) |
def put_task_info(self, task_name, key, value):
"""
Put information into a task.
:param task_name: name of the task
:param key: key of the information item
:param value: value of the information item
"""
params = OrderedDict([('info', ''), ('taskname', task_name)])
headers = {'Content-Type': 'application/xml'}
body = self.TaskInfo(key=key, value=value).serialize()
self._client.put(self.resource(), params=params, headers=headers, data=body) | def function[put_task_info, parameter[self, task_name, key, value]]:
constant[
Put information into a task.
:param task_name: name of the task
:param key: key of the information item
:param value: value of the information item
]
variable[params] assign[=] call[name[OrderedDict], parameter[list[[<ast.Tuple object at 0x7da204346b60>, <ast.Tuple object at 0x7da2043452d0>]]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da204347a00>], [<ast.Constant object at 0x7da2043443a0>]]
variable[body] assign[=] call[call[name[self].TaskInfo, parameter[]].serialize, parameter[]]
call[name[self]._client.put, parameter[call[name[self].resource, parameter[]]]] | keyword[def] identifier[put_task_info] ( identifier[self] , identifier[task_name] , identifier[key] , identifier[value] ):
literal[string]
identifier[params] = identifier[OrderedDict] ([( literal[string] , literal[string] ),( literal[string] , identifier[task_name] )])
identifier[headers] ={ literal[string] : literal[string] }
identifier[body] = identifier[self] . identifier[TaskInfo] ( identifier[key] = identifier[key] , identifier[value] = identifier[value] ). identifier[serialize] ()
identifier[self] . identifier[_client] . identifier[put] ( identifier[self] . identifier[resource] (), identifier[params] = identifier[params] , identifier[headers] = identifier[headers] , identifier[data] = identifier[body] ) | def put_task_info(self, task_name, key, value):
"""
Put information into a task.
:param task_name: name of the task
:param key: key of the information item
:param value: value of the information item
"""
params = OrderedDict([('info', ''), ('taskname', task_name)])
headers = {'Content-Type': 'application/xml'}
body = self.TaskInfo(key=key, value=value).serialize()
self._client.put(self.resource(), params=params, headers=headers, data=body) |
def __find_source_files(self):
"""
Searches recursively for all source files in a directory.
"""
for dir_path, _, files in os.walk(self._source_directory):
for name in files:
if name.lower().endswith(self._source_file_extension):
basename = os.path.splitext(os.path.basename(name))[0]
relative_path = os.path.relpath(os.path.join(dir_path, name))
if basename in self._source_file_names:
self._io.error("Files '{0}' and '{1}' have the same basename.".
format(self._source_file_names[basename], relative_path))
self.error_file_names.add(relative_path)
else:
self._source_file_names[basename] = relative_path | def function[__find_source_files, parameter[self]]:
constant[
Searches recursively for all source files in a directory.
]
for taget[tuple[[<ast.Name object at 0x7da18f00efb0>, <ast.Name object at 0x7da18f00e500>, <ast.Name object at 0x7da18f00ca90>]]] in starred[call[name[os].walk, parameter[name[self]._source_directory]]] begin[:]
for taget[name[name]] in starred[name[files]] begin[:]
if call[call[name[name].lower, parameter[]].endswith, parameter[name[self]._source_file_extension]] begin[:]
variable[basename] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[name]]]]]][constant[0]]
variable[relative_path] assign[=] call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[name[dir_path], name[name]]]]]
if compare[name[basename] in name[self]._source_file_names] begin[:]
call[name[self]._io.error, parameter[call[constant[Files '{0}' and '{1}' have the same basename.].format, parameter[call[name[self]._source_file_names][name[basename]], name[relative_path]]]]]
call[name[self].error_file_names.add, parameter[name[relative_path]]] | keyword[def] identifier[__find_source_files] ( identifier[self] ):
literal[string]
keyword[for] identifier[dir_path] , identifier[_] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[_source_directory] ):
keyword[for] identifier[name] keyword[in] identifier[files] :
keyword[if] identifier[name] . identifier[lower] (). identifier[endswith] ( identifier[self] . identifier[_source_file_extension] ):
identifier[basename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[name] ))[ literal[int] ]
identifier[relative_path] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_path] , identifier[name] ))
keyword[if] identifier[basename] keyword[in] identifier[self] . identifier[_source_file_names] :
identifier[self] . identifier[_io] . identifier[error] ( literal[string] .
identifier[format] ( identifier[self] . identifier[_source_file_names] [ identifier[basename] ], identifier[relative_path] ))
identifier[self] . identifier[error_file_names] . identifier[add] ( identifier[relative_path] )
keyword[else] :
identifier[self] . identifier[_source_file_names] [ identifier[basename] ]= identifier[relative_path] | def __find_source_files(self):
"""
Searches recursively for all source files in a directory.
"""
for (dir_path, _, files) in os.walk(self._source_directory):
for name in files:
if name.lower().endswith(self._source_file_extension):
basename = os.path.splitext(os.path.basename(name))[0]
relative_path = os.path.relpath(os.path.join(dir_path, name))
if basename in self._source_file_names:
self._io.error("Files '{0}' and '{1}' have the same basename.".format(self._source_file_names[basename], relative_path))
self.error_file_names.add(relative_path) # depends on [control=['if'], data=['basename']]
else:
self._source_file_names[basename] = relative_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.