code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def format_servers(servers):
"""
:param servers: server list, element in it can have two kinds of format.
- dict
servers = [
{'name':'node1','host':'127.0.0.1','port':10000,'db':0},
{'name':'node2','host':'127.0.0.1','port':11000,'db':0},
{'name':'node3','host':'127.0.0.1','port':12000,'db':0},
]
- url_schema
servers = ['redis://127.0.0.1:10000/0?name=node1',
'redis://127.0.0.1:11000/0?name=node2',
'redis://127.0.0.1:12000/0?name=node3'
]
"""
configs = []
if not isinstance(servers, list):
raise ValueError("server's config must be list")
_type = type(servers[0])
if _type == dict:
return servers
if (sys.version_info[0] == 3 and _type in [str, bytes]) \
or (sys.version_info[0] == 2 and _type in [str, unicode]):
for config in servers:
configs.append(parse_url(config))
else:
raise ValueError("invalid server config")
return configs | def function[format_servers, parameter[servers]]:
constant[
:param servers: server list, element in it can have two kinds of format.
- dict
servers = [
{'name':'node1','host':'127.0.0.1','port':10000,'db':0},
{'name':'node2','host':'127.0.0.1','port':11000,'db':0},
{'name':'node3','host':'127.0.0.1','port':12000,'db':0},
]
- url_schema
servers = ['redis://127.0.0.1:10000/0?name=node1',
'redis://127.0.0.1:11000/0?name=node2',
'redis://127.0.0.1:12000/0?name=node3'
]
]
variable[configs] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da20c992bf0> begin[:]
<ast.Raise object at 0x7da20c990190>
variable[_type] assign[=] call[name[type], parameter[call[name[servers]][constant[0]]]]
if compare[name[_type] equal[==] name[dict]] begin[:]
return[name[servers]]
if <ast.BoolOp object at 0x7da20c990c70> begin[:]
for taget[name[config]] in starred[name[servers]] begin[:]
call[name[configs].append, parameter[call[name[parse_url], parameter[name[config]]]]]
return[name[configs]] | keyword[def] identifier[format_servers] ( identifier[servers] ):
literal[string]
identifier[configs] =[]
keyword[if] keyword[not] identifier[isinstance] ( identifier[servers] , identifier[list] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[_type] = identifier[type] ( identifier[servers] [ literal[int] ])
keyword[if] identifier[_type] == identifier[dict] :
keyword[return] identifier[servers]
keyword[if] ( identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[and] identifier[_type] keyword[in] [ identifier[str] , identifier[bytes] ]) keyword[or] ( identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[and] identifier[_type] keyword[in] [ identifier[str] , identifier[unicode] ]):
keyword[for] identifier[config] keyword[in] identifier[servers] :
identifier[configs] . identifier[append] ( identifier[parse_url] ( identifier[config] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[configs] | def format_servers(servers):
"""
:param servers: server list, element in it can have two kinds of format.
- dict
servers = [
{'name':'node1','host':'127.0.0.1','port':10000,'db':0},
{'name':'node2','host':'127.0.0.1','port':11000,'db':0},
{'name':'node3','host':'127.0.0.1','port':12000,'db':0},
]
- url_schema
servers = ['redis://127.0.0.1:10000/0?name=node1',
'redis://127.0.0.1:11000/0?name=node2',
'redis://127.0.0.1:12000/0?name=node3'
]
"""
configs = []
if not isinstance(servers, list):
raise ValueError("server's config must be list") # depends on [control=['if'], data=[]]
_type = type(servers[0])
if _type == dict:
return servers # depends on [control=['if'], data=[]]
if sys.version_info[0] == 3 and _type in [str, bytes] or (sys.version_info[0] == 2 and _type in [str, unicode]):
for config in servers:
configs.append(parse_url(config)) # depends on [control=['for'], data=['config']] # depends on [control=['if'], data=[]]
else:
raise ValueError('invalid server config')
return configs |
def get_video_transcript_data(video_id, language_code):
"""
Get video transcript data
Arguments:
video_id(unicode): An id identifying the Video.
language_code(unicode): it will be the language code of the requested transcript.
Returns:
A dict containing transcript file name and its content.
"""
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
try:
return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read())
except Exception:
logger.exception(
'[edx-val] Error while retrieving transcript for video=%s -- language_code=%s',
video_id,
language_code
)
raise | def function[get_video_transcript_data, parameter[video_id, language_code]]:
constant[
Get video transcript data
Arguments:
video_id(unicode): An id identifying the Video.
language_code(unicode): it will be the language code of the requested transcript.
Returns:
A dict containing transcript file name and its content.
]
variable[video_transcript] assign[=] call[name[VideoTranscript].get_or_none, parameter[name[video_id], name[language_code]]]
if name[video_transcript] begin[:]
<ast.Try object at 0x7da1b02d83d0> | keyword[def] identifier[get_video_transcript_data] ( identifier[video_id] , identifier[language_code] ):
literal[string]
identifier[video_transcript] = identifier[VideoTranscript] . identifier[get_or_none] ( identifier[video_id] , identifier[language_code] )
keyword[if] identifier[video_transcript] :
keyword[try] :
keyword[return] identifier[dict] ( identifier[file_name] = identifier[video_transcript] . identifier[filename] , identifier[content] = identifier[video_transcript] . identifier[transcript] . identifier[file] . identifier[read] ())
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] (
literal[string] ,
identifier[video_id] ,
identifier[language_code]
)
keyword[raise] | def get_video_transcript_data(video_id, language_code):
"""
Get video transcript data
Arguments:
video_id(unicode): An id identifying the Video.
language_code(unicode): it will be the language code of the requested transcript.
Returns:
A dict containing transcript file name and its content.
"""
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
try:
return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read()) # depends on [control=['try'], data=[]]
except Exception:
logger.exception('[edx-val] Error while retrieving transcript for video=%s -- language_code=%s', video_id, language_code)
raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] |
def is_valid_ip(self, ip):
"""Return true if the given address in amongst the usable addresses,
or if the given CIDR is contained in this one."""
if not isinstance(ip, (IPv4Address, CIDR)):
if str(ip).find('/') == -1:
ip = IPv4Address(ip)
else:
# Support for CIDR strings/objects, an idea of Nicola Novello.
ip = CIDR(ip)
if isinstance(ip, IPv4Address):
if ip < self._first_ip or ip > self._last_ip:
return False
elif isinstance(ip, CIDR):
# NOTE: manage /31 networks; 127.0.0.1/31 is considered to
# be included in 127.0.0.1/8.
if ip._nm._ip_dec == 0xFFFFFFFE \
and self._nm._ip_dec != 0xFFFFFFFE:
compare_to_first = self._net_ip._ip_dec
compare_to_last = self._bc_ip._ip_dec
else:
compare_to_first = self._first_ip._ip_dec
compare_to_last = self._last_ip._ip_dec
if ip._first_ip._ip_dec < compare_to_first or \
ip._last_ip._ip_dec > compare_to_last:
return False
return True | def function[is_valid_ip, parameter[self, ip]]:
constant[Return true if the given address in amongst the usable addresses,
or if the given CIDR is contained in this one.]
if <ast.UnaryOp object at 0x7da2047e9f60> begin[:]
if compare[call[call[name[str], parameter[name[ip]]].find, parameter[constant[/]]] equal[==] <ast.UnaryOp object at 0x7da2047e85e0>] begin[:]
variable[ip] assign[=] call[name[IPv4Address], parameter[name[ip]]]
if call[name[isinstance], parameter[name[ip], name[IPv4Address]]] begin[:]
if <ast.BoolOp object at 0x7da2047eb2e0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_valid_ip] ( identifier[self] , identifier[ip] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[ip] ,( identifier[IPv4Address] , identifier[CIDR] )):
keyword[if] identifier[str] ( identifier[ip] ). identifier[find] ( literal[string] )==- literal[int] :
identifier[ip] = identifier[IPv4Address] ( identifier[ip] )
keyword[else] :
identifier[ip] = identifier[CIDR] ( identifier[ip] )
keyword[if] identifier[isinstance] ( identifier[ip] , identifier[IPv4Address] ):
keyword[if] identifier[ip] < identifier[self] . identifier[_first_ip] keyword[or] identifier[ip] > identifier[self] . identifier[_last_ip] :
keyword[return] keyword[False]
keyword[elif] identifier[isinstance] ( identifier[ip] , identifier[CIDR] ):
keyword[if] identifier[ip] . identifier[_nm] . identifier[_ip_dec] == literal[int] keyword[and] identifier[self] . identifier[_nm] . identifier[_ip_dec] != literal[int] :
identifier[compare_to_first] = identifier[self] . identifier[_net_ip] . identifier[_ip_dec]
identifier[compare_to_last] = identifier[self] . identifier[_bc_ip] . identifier[_ip_dec]
keyword[else] :
identifier[compare_to_first] = identifier[self] . identifier[_first_ip] . identifier[_ip_dec]
identifier[compare_to_last] = identifier[self] . identifier[_last_ip] . identifier[_ip_dec]
keyword[if] identifier[ip] . identifier[_first_ip] . identifier[_ip_dec] < identifier[compare_to_first] keyword[or] identifier[ip] . identifier[_last_ip] . identifier[_ip_dec] > identifier[compare_to_last] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_valid_ip(self, ip):
"""Return true if the given address in amongst the usable addresses,
or if the given CIDR is contained in this one."""
if not isinstance(ip, (IPv4Address, CIDR)):
if str(ip).find('/') == -1:
ip = IPv4Address(ip) # depends on [control=['if'], data=[]]
else:
# Support for CIDR strings/objects, an idea of Nicola Novello.
ip = CIDR(ip) # depends on [control=['if'], data=[]]
if isinstance(ip, IPv4Address):
if ip < self._first_ip or ip > self._last_ip:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(ip, CIDR):
# NOTE: manage /31 networks; 127.0.0.1/31 is considered to
# be included in 127.0.0.1/8.
if ip._nm._ip_dec == 4294967294 and self._nm._ip_dec != 4294967294:
compare_to_first = self._net_ip._ip_dec
compare_to_last = self._bc_ip._ip_dec # depends on [control=['if'], data=[]]
else:
compare_to_first = self._first_ip._ip_dec
compare_to_last = self._last_ip._ip_dec
if ip._first_ip._ip_dec < compare_to_first or ip._last_ip._ip_dec > compare_to_last:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def get_avatar(self):
"""Gets the asset.
return: (osid.repository.Asset) - the asset
raise: IllegalState - ``has_avatar()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['avatarId']):
raise errors.IllegalState('this Resource has no avatar')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_asset_lookup():
raise errors.OperationFailed('Repository does not support Asset lookup')
lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
osid_object = lookup_session.get_asset(self.get_avatar_id())
return osid_object | def function[get_avatar, parameter[self]]:
constant[Gets the asset.
return: (osid.repository.Asset) - the asset
raise: IllegalState - ``has_avatar()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
]
if <ast.UnaryOp object at 0x7da1b0a23f40> begin[:]
<ast.Raise object at 0x7da20e954be0>
variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[REPOSITORY]]]
if <ast.UnaryOp object at 0x7da20c6ab370> begin[:]
<ast.Raise object at 0x7da20c6a8dc0>
variable[lookup_session] assign[=] call[name[mgr].get_asset_lookup_session, parameter[]]
call[name[lookup_session].use_federated_repository_view, parameter[]]
variable[osid_object] assign[=] call[name[lookup_session].get_asset, parameter[call[name[self].get_avatar_id, parameter[]]]]
return[name[osid_object]] | keyword[def] identifier[get_avatar] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[bool] ( identifier[self] . identifier[_my_map] [ literal[string] ]):
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] )
keyword[if] keyword[not] identifier[mgr] . identifier[supports_asset_lookup] ():
keyword[raise] identifier[errors] . identifier[OperationFailed] ( literal[string] )
identifier[lookup_session] = identifier[mgr] . identifier[get_asset_lookup_session] ( identifier[proxy] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ))
identifier[lookup_session] . identifier[use_federated_repository_view] ()
identifier[osid_object] = identifier[lookup_session] . identifier[get_asset] ( identifier[self] . identifier[get_avatar_id] ())
keyword[return] identifier[osid_object] | def get_avatar(self):
"""Gets the asset.
return: (osid.repository.Asset) - the asset
raise: IllegalState - ``has_avatar()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['avatarId']):
raise errors.IllegalState('this Resource has no avatar') # depends on [control=['if'], data=[]]
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_asset_lookup():
raise errors.OperationFailed('Repository does not support Asset lookup') # depends on [control=['if'], data=[]]
lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, '_proxy', None))
lookup_session.use_federated_repository_view()
osid_object = lookup_session.get_asset(self.get_avatar_id())
return osid_object |
def range_to_numeric(ranges):
"""Converts a sequence of string ranges to a sequence of floats.
E.g.::
>>> range_to_numeric(['1 uV', '2 mV', '1 V'])
[1E-6, 0.002, 1.0]
"""
values, units = zip(*(r.split() for r in ranges))
# Detect common unit.
unit = os.path.commonprefix([u[::-1] for u in units])
# Strip unit to get just the SI prefix.
prefixes = (u[:-len(unit)] for u in units)
# Convert string value and scale with prefix.
values = [float(v) * SI_PREFIX[p] for v, p in zip(values, prefixes)]
return values | def function[range_to_numeric, parameter[ranges]]:
constant[Converts a sequence of string ranges to a sequence of floats.
E.g.::
>>> range_to_numeric(['1 uV', '2 mV', '1 V'])
[1E-6, 0.002, 1.0]
]
<ast.Tuple object at 0x7da1b0b12d40> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b0a22860>]]
variable[unit] assign[=] call[name[os].path.commonprefix, parameter[<ast.ListComp object at 0x7da1b0a23460>]]
variable[prefixes] assign[=] <ast.GeneratorExp object at 0x7da1b0a21c60>
variable[values] assign[=] <ast.ListComp object at 0x7da1b0a23070>
return[name[values]] | keyword[def] identifier[range_to_numeric] ( identifier[ranges] ):
literal[string]
identifier[values] , identifier[units] = identifier[zip] (*( identifier[r] . identifier[split] () keyword[for] identifier[r] keyword[in] identifier[ranges] ))
identifier[unit] = identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[u] [::- literal[int] ] keyword[for] identifier[u] keyword[in] identifier[units] ])
identifier[prefixes] =( identifier[u] [:- identifier[len] ( identifier[unit] )] keyword[for] identifier[u] keyword[in] identifier[units] )
identifier[values] =[ identifier[float] ( identifier[v] )* identifier[SI_PREFIX] [ identifier[p] ] keyword[for] identifier[v] , identifier[p] keyword[in] identifier[zip] ( identifier[values] , identifier[prefixes] )]
keyword[return] identifier[values] | def range_to_numeric(ranges):
"""Converts a sequence of string ranges to a sequence of floats.
E.g.::
>>> range_to_numeric(['1 uV', '2 mV', '1 V'])
[1E-6, 0.002, 1.0]
"""
(values, units) = zip(*(r.split() for r in ranges))
# Detect common unit.
unit = os.path.commonprefix([u[::-1] for u in units])
# Strip unit to get just the SI prefix.
prefixes = (u[:-len(unit)] for u in units)
# Convert string value and scale with prefix.
values = [float(v) * SI_PREFIX[p] for (v, p) in zip(values, prefixes)]
return values |
def make_template_name(self, model_type, sourcekey):
""" Make the name of a template file for particular component
Parameters
----------
model_type : str
Type of model to use for this component
sourcekey : str
Key to identify this component
Returns filename or None if component does not require a template file
"""
format_dict = self.__dict__.copy()
format_dict['sourcekey'] = sourcekey
if model_type == 'IsoSource':
return self._name_factory.spectral_template(**format_dict)
elif model_type in ['MapCubeSource', 'SpatialMap']:
return self._name_factory.diffuse_template(**format_dict)
else:
raise ValueError("Unexpected model_type %s" % model_type) | def function[make_template_name, parameter[self, model_type, sourcekey]]:
constant[ Make the name of a template file for particular component
Parameters
----------
model_type : str
Type of model to use for this component
sourcekey : str
Key to identify this component
Returns filename or None if component does not require a template file
]
variable[format_dict] assign[=] call[name[self].__dict__.copy, parameter[]]
call[name[format_dict]][constant[sourcekey]] assign[=] name[sourcekey]
if compare[name[model_type] equal[==] constant[IsoSource]] begin[:]
return[call[name[self]._name_factory.spectral_template, parameter[]]] | keyword[def] identifier[make_template_name] ( identifier[self] , identifier[model_type] , identifier[sourcekey] ):
literal[string]
identifier[format_dict] = identifier[self] . identifier[__dict__] . identifier[copy] ()
identifier[format_dict] [ literal[string] ]= identifier[sourcekey]
keyword[if] identifier[model_type] == literal[string] :
keyword[return] identifier[self] . identifier[_name_factory] . identifier[spectral_template] (** identifier[format_dict] )
keyword[elif] identifier[model_type] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[self] . identifier[_name_factory] . identifier[diffuse_template] (** identifier[format_dict] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[model_type] ) | def make_template_name(self, model_type, sourcekey):
""" Make the name of a template file for particular component
Parameters
----------
model_type : str
Type of model to use for this component
sourcekey : str
Key to identify this component
Returns filename or None if component does not require a template file
"""
format_dict = self.__dict__.copy()
format_dict['sourcekey'] = sourcekey
if model_type == 'IsoSource':
return self._name_factory.spectral_template(**format_dict) # depends on [control=['if'], data=[]]
elif model_type in ['MapCubeSource', 'SpatialMap']:
return self._name_factory.diffuse_template(**format_dict) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unexpected model_type %s' % model_type) |
def authority(self, column=None, value=None, **kwargs):
"""Provides codes and associated authorizing statutes."""
return self._resolve_call('GIC_AUTHORITY', column, value, **kwargs) | def function[authority, parameter[self, column, value]]:
constant[Provides codes and associated authorizing statutes.]
return[call[name[self]._resolve_call, parameter[constant[GIC_AUTHORITY], name[column], name[value]]]] | keyword[def] identifier[authority] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_resolve_call] ( literal[string] , identifier[column] , identifier[value] ,** identifier[kwargs] ) | def authority(self, column=None, value=None, **kwargs):
"""Provides codes and associated authorizing statutes."""
return self._resolve_call('GIC_AUTHORITY', column, value, **kwargs) |
def refetch(self):
'''Reload children.'''
# Reset children
for child in self.children[:]:
self.removeChild(child)
# Enable children fetching
self._fetched = False | def function[refetch, parameter[self]]:
constant[Reload children.]
for taget[name[child]] in starred[call[name[self].children][<ast.Slice object at 0x7da1b03ba020>]] begin[:]
call[name[self].removeChild, parameter[name[child]]]
name[self]._fetched assign[=] constant[False] | keyword[def] identifier[refetch] ( identifier[self] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] [:]:
identifier[self] . identifier[removeChild] ( identifier[child] )
identifier[self] . identifier[_fetched] = keyword[False] | def refetch(self):
"""Reload children."""
# Reset children
for child in self.children[:]:
self.removeChild(child) # depends on [control=['for'], data=['child']]
# Enable children fetching
self._fetched = False |
def set_wait(self, wait):
"""
set the waiting time.
:Parameters:
#. wait (number): The time delay between each attempt to lock. By default it's
set to 0 to keeping the aquiring mechanism trying to acquire the lock without
losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds
or higher can be very useful in special cases when many processes are trying
to acquire the lock and one of them needs to hold it a release it at a higher
frequency or rate.
"""
try:
wait = float(wait)
assert wait>=0
except:
raise Exception('wait must be a positive number')
self.__wait = wait | def function[set_wait, parameter[self, wait]]:
constant[
set the waiting time.
:Parameters:
#. wait (number): The time delay between each attempt to lock. By default it's
set to 0 to keeping the aquiring mechanism trying to acquire the lock without
losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds
or higher can be very useful in special cases when many processes are trying
to acquire the lock and one of them needs to hold it a release it at a higher
frequency or rate.
]
<ast.Try object at 0x7da1b25861d0>
name[self].__wait assign[=] name[wait] | keyword[def] identifier[set_wait] ( identifier[self] , identifier[wait] ):
literal[string]
keyword[try] :
identifier[wait] = identifier[float] ( identifier[wait] )
keyword[assert] identifier[wait] >= literal[int]
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[__wait] = identifier[wait] | def set_wait(self, wait):
"""
set the waiting time.
:Parameters:
#. wait (number): The time delay between each attempt to lock. By default it's
set to 0 to keeping the aquiring mechanism trying to acquire the lock without
losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds
or higher can be very useful in special cases when many processes are trying
to acquire the lock and one of them needs to hold it a release it at a higher
frequency or rate.
"""
try:
wait = float(wait)
assert wait >= 0 # depends on [control=['try'], data=[]]
except:
raise Exception('wait must be a positive number') # depends on [control=['except'], data=[]]
self.__wait = wait |
def woven(fun):
'''Decorator that will initialize and eventually start nested fibers.'''
def wrapper(*args, **kwargs):
section = WovenSection()
section.enter()
result = fun(*args, **kwargs)
return section.exit(result)
return wrapper | def function[woven, parameter[fun]]:
constant[Decorator that will initialize and eventually start nested fibers.]
def function[wrapper, parameter[]]:
variable[section] assign[=] call[name[WovenSection], parameter[]]
call[name[section].enter, parameter[]]
variable[result] assign[=] call[name[fun], parameter[<ast.Starred object at 0x7da1b2344310>]]
return[call[name[section].exit, parameter[name[result]]]]
return[name[wrapper]] | keyword[def] identifier[woven] ( identifier[fun] ):
literal[string]
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[section] = identifier[WovenSection] ()
identifier[section] . identifier[enter] ()
identifier[result] = identifier[fun] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[section] . identifier[exit] ( identifier[result] )
keyword[return] identifier[wrapper] | def woven(fun):
"""Decorator that will initialize and eventually start nested fibers."""
def wrapper(*args, **kwargs):
section = WovenSection()
section.enter()
result = fun(*args, **kwargs)
return section.exit(result)
return wrapper |
def as_dictionary(self):
"""Return the key as a python dictionary."""
values = {
'id': self._id,
'type': self._type
}
if self._owner:
values['owner'] = self._owner
return values | def function[as_dictionary, parameter[self]]:
constant[Return the key as a python dictionary.]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da18bccbb80>, <ast.Constant object at 0x7da18bcc99f0>], [<ast.Attribute object at 0x7da18bcc9510>, <ast.Attribute object at 0x7da18bcc8190>]]
if name[self]._owner begin[:]
call[name[values]][constant[owner]] assign[=] name[self]._owner
return[name[values]] | keyword[def] identifier[as_dictionary] ( identifier[self] ):
literal[string]
identifier[values] ={
literal[string] : identifier[self] . identifier[_id] ,
literal[string] : identifier[self] . identifier[_type]
}
keyword[if] identifier[self] . identifier[_owner] :
identifier[values] [ literal[string] ]= identifier[self] . identifier[_owner]
keyword[return] identifier[values] | def as_dictionary(self):
"""Return the key as a python dictionary."""
values = {'id': self._id, 'type': self._type}
if self._owner:
values['owner'] = self._owner # depends on [control=['if'], data=[]]
return values |
def table_present(name, db, schema, force=False):
'''
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn,
"SELECT sql FROM sqlite_master " +
"WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'"
else:
changes['result'] = False
changes['comment'] = "Expected schema=" + sql + \
"\nactual schema=" + tables[0][0]
else:
changes['result'] = True
changes['comment'] = "'" + name + \
"' exists with matching schema"
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'"
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name
except Exception as e:
changes['result'] = False
changes['comment'] = str(e)
finally:
if conn:
conn.close()
return changes | def function[table_present, parameter[name, db, schema, force]]:
constant[
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
]
variable[changes] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b39d0>, <ast.Constant object at 0x7da20e9b1900>, <ast.Constant object at 0x7da20e9b2950>, <ast.Constant object at 0x7da20e9b29b0>], [<ast.Name object at 0x7da20e9b2560>, <ast.Dict object at 0x7da20e9b07f0>, <ast.Constant object at 0x7da20e9b2230>, <ast.Constant object at 0x7da20e9b05b0>]]
variable[conn] assign[=] constant[None]
<ast.Try object at 0x7da20e9b3940>
return[name[changes]] | keyword[def] identifier[table_present] ( identifier[name] , identifier[db] , identifier[schema] , identifier[force] = keyword[False] ):
literal[string]
identifier[changes] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : literal[string] }
identifier[conn] = keyword[None]
keyword[try] :
identifier[conn] = identifier[sqlite3] . identifier[connect] ( identifier[db] , identifier[detect_types] = identifier[sqlite3] . identifier[PARSE_DECLTYPES] )
identifier[tables] = identifier[_query] ( identifier[conn] ,
literal[string] +
literal[string] ,[ identifier[name] ])
keyword[if] identifier[len] ( identifier[tables] )== literal[int] :
identifier[sql] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[schema] , identifier[six] . identifier[string_types] ):
identifier[sql] = identifier[schema] . identifier[strip] ()
keyword[else] :
identifier[sql] = identifier[_get_sql_from_schema] ( identifier[name] , identifier[schema] )
keyword[if] identifier[sql] != identifier[tables] [ literal[int] ][ literal[int] ]:
keyword[if] identifier[force] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[tables] [ literal[int] ][ literal[int] ]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[sql]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[conn] . identifier[execute] ( literal[string] + identifier[name] + literal[string] )
identifier[conn] . identifier[execute] ( identifier[sql] )
identifier[conn] . identifier[commit] ()
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[tables] [ literal[int] ][ literal[int] ]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[sql]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[changes] [ literal[string] ]= keyword[False]
identifier[changes] [ literal[string] ]= literal[string] + identifier[sql] + literal[string] + identifier[tables] [ literal[int] ][ literal[int] ]
keyword[else] :
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[elif] keyword[not] identifier[tables] :
identifier[sql] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[schema] , identifier[six] . identifier[string_types] ):
identifier[sql] = identifier[schema]
keyword[else] :
identifier[sql] = identifier[_get_sql_from_schema] ( identifier[name] , identifier[schema] )
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[sql]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[conn] . identifier[execute] ( identifier[sql] )
identifier[conn] . identifier[commit] ()
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[sql]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[changes] [ literal[string] ]= keyword[False]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[changes] [ literal[string] ]= keyword[False]
identifier[changes] [ literal[string] ]= identifier[str] ( identifier[e] )
keyword[finally] :
keyword[if] identifier[conn] :
identifier[conn] . identifier[close] ()
keyword[return] identifier[changes] | def table_present(name, db, schema, force=False):
"""
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
"""
changes = {'name': name, 'changes': {}, 'result': None, 'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, 'SELECT sql FROM sqlite_master ' + "WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip() # depends on [control=['if'], data=[]]
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced" # depends on [control=['if'], data=[]]
else:
conn.execute('DROP TABLE `' + name + '`')
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'" # depends on [control=['if'], data=[]]
else:
changes['result'] = False
changes['comment'] = 'Expected schema=' + sql + '\nactual schema=' + tables[0][0] # depends on [control=['if'], data=['sql']]
else:
changes['result'] = True
changes['comment'] = "'" + name + "' exists with matching schema" # depends on [control=['if'], data=[]]
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema # depends on [control=['if'], data=[]]
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created" # depends on [control=['if'], data=[]]
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'" # depends on [control=['if'], data=[]]
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name # depends on [control=['try'], data=[]]
except Exception as e:
changes['result'] = False
changes['comment'] = str(e) # depends on [control=['except'], data=['e']]
finally:
if conn:
conn.close() # depends on [control=['if'], data=[]]
return changes |
def start_child_span(operation_name, tracer=None, parent=None, tags=None):
"""
Start a new span as a child of parent_span. If parent_span is None,
start a new root span.
:param operation_name: operation name
:param tracer: Tracer or None (defaults to opentracing.tracer)
:param parent: parent Span or None
:param tags: optional tags
:return: new span
"""
tracer = tracer or opentracing.tracer
return tracer.start_span(
operation_name=operation_name,
child_of=parent.context if parent else None,
tags=tags
) | def function[start_child_span, parameter[operation_name, tracer, parent, tags]]:
constant[
Start a new span as a child of parent_span. If parent_span is None,
start a new root span.
:param operation_name: operation name
:param tracer: Tracer or None (defaults to opentracing.tracer)
:param parent: parent Span or None
:param tags: optional tags
:return: new span
]
variable[tracer] assign[=] <ast.BoolOp object at 0x7da1b157f280>
return[call[name[tracer].start_span, parameter[]]] | keyword[def] identifier[start_child_span] ( identifier[operation_name] , identifier[tracer] = keyword[None] , identifier[parent] = keyword[None] , identifier[tags] = keyword[None] ):
literal[string]
identifier[tracer] = identifier[tracer] keyword[or] identifier[opentracing] . identifier[tracer]
keyword[return] identifier[tracer] . identifier[start_span] (
identifier[operation_name] = identifier[operation_name] ,
identifier[child_of] = identifier[parent] . identifier[context] keyword[if] identifier[parent] keyword[else] keyword[None] ,
identifier[tags] = identifier[tags]
) | def start_child_span(operation_name, tracer=None, parent=None, tags=None):
"""
Start a new span as a child of parent_span. If parent_span is None,
start a new root span.
:param operation_name: operation name
:param tracer: Tracer or None (defaults to opentracing.tracer)
:param parent: parent Span or None
:param tags: optional tags
:return: new span
"""
tracer = tracer or opentracing.tracer
return tracer.start_span(operation_name=operation_name, child_of=parent.context if parent else None, tags=tags) |
async def jsk_vc_volume(self, ctx: commands.Context, *, percentage: float):
"""
Adjusts the volume of an audio source if it is supported.
"""
volume = max(0.0, min(1.0, percentage / 100))
source = ctx.guild.voice_client.source
if not isinstance(source, discord.PCMVolumeTransformer):
return await ctx.send("This source doesn't support adjusting volume or "
"the interface to do so is not exposed.")
source.volume = volume
await ctx.send(f"Volume set to {volume * 100:.2f}%") | <ast.AsyncFunctionDef object at 0x7da1b1edafe0> | keyword[async] keyword[def] identifier[jsk_vc_volume] ( identifier[self] , identifier[ctx] : identifier[commands] . identifier[Context] ,*, identifier[percentage] : identifier[float] ):
literal[string]
identifier[volume] = identifier[max] ( literal[int] , identifier[min] ( literal[int] , identifier[percentage] / literal[int] ))
identifier[source] = identifier[ctx] . identifier[guild] . identifier[voice_client] . identifier[source]
keyword[if] keyword[not] identifier[isinstance] ( identifier[source] , identifier[discord] . identifier[PCMVolumeTransformer] ):
keyword[return] keyword[await] identifier[ctx] . identifier[send] ( literal[string]
literal[string] )
identifier[source] . identifier[volume] = identifier[volume]
keyword[await] identifier[ctx] . identifier[send] ( literal[string] ) | async def jsk_vc_volume(self, ctx: commands.Context, *, percentage: float):
"""
Adjusts the volume of an audio source if it is supported.
"""
volume = max(0.0, min(1.0, percentage / 100))
source = ctx.guild.voice_client.source
if not isinstance(source, discord.PCMVolumeTransformer):
return await ctx.send("This source doesn't support adjusting volume or the interface to do so is not exposed.") # depends on [control=['if'], data=[]]
source.volume = volume
await ctx.send(f'Volume set to {volume * 100:.2f}%') |
def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self | def function[start, parameter[self]]:
constant[
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
]
call[name[self].debug_print, parameter[constant[Starting networking.]]]
call[name[self].debug_print, parameter[constant[Make sure to iterate over replies if you need connection alive management!]]]
call[name[signal].signal, parameter[name[signal].SIGINT, name[self].stop]]
call[name[self].debug_print, parameter[binary_operation[constant[WAN IP = ] + call[name[str], parameter[name[self].wan_ip]]]]]
<ast.Try object at 0x7da18dc99060>
call[name[self].start_passive_server, parameter[]]
if compare[name[self].nat_type equal[==] constant[unknown]] begin[:]
call[name[self].debug_print, parameter[constant[Determining NAT type.]]]
variable[nat_type] assign[=] call[name[self].rendezvous.determine_nat, parameter[]]
if <ast.BoolOp object at 0x7da18dc98bb0> begin[:]
name[self].nat_type assign[=] name[nat_type]
name[self].rendezvous.nat_type assign[=] name[nat_type]
call[name[self].debug_print, parameter[binary_operation[constant[NAT type = ] + name[nat_type]]]]
if compare[name[self].node_type equal[==] constant[simultaneous]] begin[:]
if compare[name[self].nat_type <ast.NotIn object at 0x7da2590d7190> name[self].rendezvous.predictable_nats] begin[:]
call[name[self].debug_print, parameter[constant[Manual setting of simultanous specified but ignored since NAT does not support it.]]]
name[self].node_type assign[=] constant[active]
if compare[name[self].net_type equal[==] constant[p2p]] begin[:]
constant[
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
]
if compare[name[self].node_type equal[==] constant[simultaneous]] begin[:]
call[name[self].debug_print, parameter[constant[Simultaneous is not allowed for P2P]]]
name[self].node_type assign[=] constant[active]
call[name[self].disable_simultaneous, parameter[]]
call[name[self].debug_print, parameter[binary_operation[constant[Node type = ] + name[self].node_type]]]
call[name[self].close_cons, parameter[]]
name[self].is_net_started assign[=] constant[1]
name[self].unl assign[=] call[name[UNL], parameter[]]
return[name[self]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[self] . identifier[debug_print] ( literal[string] )
identifier[self] . identifier[debug_print] ( literal[string]
literal[string] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[self] . identifier[stop] )
identifier[self] . identifier[debug_print] ( literal[string] + identifier[str] ( identifier[self] . identifier[wan_ip] ))
keyword[try] :
identifier[rendezvous_con] = identifier[self] . identifier[rendezvous] . identifier[server_connect] ()
identifier[rendezvous_con] . identifier[close] ()
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[start_passive_server] ()
keyword[if] identifier[self] . identifier[nat_type] == literal[string] :
identifier[self] . identifier[debug_print] ( literal[string] )
identifier[nat_type] = identifier[self] . identifier[rendezvous] . identifier[determine_nat] ()
keyword[if] identifier[nat_type] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nat_type] != literal[string] :
identifier[self] . identifier[nat_type] = identifier[nat_type]
identifier[self] . identifier[rendezvous] . identifier[nat_type] = identifier[nat_type]
identifier[self] . identifier[debug_print] ( literal[string] + identifier[nat_type] )
keyword[else] :
identifier[self] . identifier[debug_print] ( literal[string] )
keyword[if] identifier[self] . identifier[node_type] == literal[string] :
keyword[if] identifier[self] . identifier[nat_type] keyword[not] keyword[in] identifier[self] . identifier[rendezvous] . identifier[predictable_nats] :
identifier[self] . identifier[debug_print] ( literal[string]
literal[string] )
identifier[self] . identifier[node_type] = literal[string]
keyword[else] :
identifier[self] . identifier[debug_print] ( literal[string] )
keyword[if] identifier[self] . identifier[node_type] == literal[string] :
identifier[self] . identifier[node_type] = identifier[self] . identifier[determine_node] ()
keyword[if] identifier[self] . identifier[net_type] == literal[string] :
literal[string]
keyword[if] identifier[self] . identifier[node_type] == literal[string] :
identifier[self] . identifier[debug_print] ( literal[string] )
identifier[self] . identifier[node_type] = literal[string]
identifier[self] . identifier[disable_simultaneous] ()
identifier[self] . identifier[debug_print] ( literal[string] + identifier[self] . identifier[node_type] )
identifier[self] . identifier[close_cons] ()
identifier[self] . identifier[is_net_started] = literal[int]
identifier[self] . identifier[unl] = identifier[UNL] (
identifier[net] = identifier[self] ,
identifier[dht_node] = identifier[self] . identifier[dht_node] ,
identifier[wan_ip] = identifier[self] . identifier[wan_ip]
)
keyword[return] identifier[self] | def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print('Starting networking.')
self.debug_print('Make sure to iterate over replies if you need connection alive management!') # Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop) # Save WAN IP.
self.debug_print('WAN IP = ' + str(self.wan_ip)) # Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close() # depends on [control=['try'], data=[]]
except:
raise Exception('Unable to connect to rendezvous server.') # depends on [control=['except'], data=[]] # Started no matter what
# since LAN connections are always possible.
self.start_passive_server() # Determine NAT type.
if self.nat_type == 'unknown':
self.debug_print('Determining NAT type.')
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != 'unknown':
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print('NAT type = ' + nat_type) # depends on [control=['if'], data=[]]
else:
self.debug_print('Unable to determine NAT type.') # depends on [control=['if'], data=[]] # Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == 'simultaneous':
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print('Manual setting of simultanous specified but ignored since NAT does not support it.')
self.node_type = 'active' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # Determine node type.
self.debug_print('Determining node type.') # No checks for manually specifying passive
# (there probably should be.)
if self.node_type == 'unknown':
self.node_type = self.determine_node() # depends on [control=['if'], data=[]] # Prevent P2P nodes from running as simultaneous.
if self.net_type == 'p2p':
"\n TCP hole punching is reserved specifically for direct networks\n (a net object reserved for receiving direct connections\n -- p2p is for connecting to the main network. The reason for this\n is you can't do multiple TCP hole punches at the same time so\n reserved for direct network where it's most needed.\n "
if self.node_type == 'simultaneous':
self.debug_print('Simultaneous is not allowed for P2P')
self.node_type = 'active'
self.disable_simultaneous() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.debug_print('Node type = ' + self.node_type) # Close stray cons from determine_node() tests.
self.close_cons() # Set net started status.
self.is_net_started = 1 # Initialise our UNL details.
self.unl = UNL(net=self, dht_node=self.dht_node, wan_ip=self.wan_ip) # Nestled calls.
return self |
def dhcp_options_present(name, dhcp_options_id=None, vpc_name=None, vpc_id=None,
domain_name=None, domain_name_servers=None, ntp_servers=None,
netbios_name_servers=None, netbios_node_type=None,
tags=None, region=None, key=None, keyid=None, profile=None):
'''
Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
_new = {'domain_name': domain_name,
'domain_name_servers': domain_name_servers,
'ntp_servers': ntp_servers,
'netbios_name_servers': netbios_name_servers,
'netbios_node_type': netbios_node_type
}
# boto provides no "update_dhcp_options()" functionality, and you can't delete it if
# it's attached, and you can't detach it if it's the only one, so just check if it's
# there or not, and make no effort to validate it's actual settings... :(
### TODO - add support for multiple sets of DHCP options, and then for "swapping out"
### sets by creating new, mapping, then deleting the old.
r = __salt__['boto_vpc.dhcp_options_exists'](dhcp_options_id=dhcp_options_id,
dhcp_options_name=name,
region=region, key=key, keyid=keyid,
profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to validate DHCP options: {0}.'.format(r['error']['message'])
return ret
if r.get('exists'):
ret['comment'] = 'DHCP options already present.'
return ret
else:
if __opts__['test']:
ret['comment'] = 'DHCP options {0} are set to be created.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_vpc.create_dhcp_options'](domain_name=domain_name,
domain_name_servers=domain_name_servers,
ntp_servers=ntp_servers,
netbios_name_servers=netbios_name_servers,
netbios_node_type=netbios_node_type,
dhcp_options_name=name, tags=tags,
vpc_id=vpc_id, vpc_name=vpc_name,
region=region, key=key, keyid=keyid,
profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create DHCP options: {0}'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'dhcp_options': None}
ret['changes']['new'] = {'dhcp_options': _new}
ret['comment'] = 'DHCP options {0} created.'.format(name)
return ret | def function[dhcp_options_present, parameter[name, dhcp_options_id, vpc_name, vpc_id, domain_name, domain_name_servers, ntp_servers, netbios_name_servers, netbios_node_type, tags, region, key, keyid, profile]]:
constant[
Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2169930>, <ast.Constant object at 0x7da1b2168a30>, <ast.Constant object at 0x7da1b21699c0>, <ast.Constant object at 0x7da1b2169810>], [<ast.Name object at 0x7da1b21689a0>, <ast.Constant object at 0x7da1b216a830>, <ast.Constant object at 0x7da1b2168b20>, <ast.Dict object at 0x7da1b21699f0>]]
variable[_new] assign[=] dictionary[[<ast.Constant object at 0x7da1b216a620>, <ast.Constant object at 0x7da1b216b340>, <ast.Constant object at 0x7da1b216b010>, <ast.Constant object at 0x7da1b216a1a0>, <ast.Constant object at 0x7da1b2169ba0>], [<ast.Name object at 0x7da1b2169ab0>, <ast.Name object at 0x7da1b21692a0>, <ast.Name object at 0x7da1b21696c0>, <ast.Name object at 0x7da1b2169bd0>, <ast.Name object at 0x7da1b216a0e0>]]
variable[r] assign[=] call[call[name[__salt__]][constant[boto_vpc.dhcp_options_exists]], parameter[]]
if compare[constant[error] in name[r]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to validate DHCP options: {0}.].format, parameter[call[call[name[r]][constant[error]]][constant[message]]]]
return[name[ret]]
if call[name[r].get, parameter[constant[exists]]] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[DHCP options already present.]
return[name[ret]] | keyword[def] identifier[dhcp_options_present] ( identifier[name] , identifier[dhcp_options_id] = keyword[None] , identifier[vpc_name] = keyword[None] , identifier[vpc_id] = keyword[None] ,
identifier[domain_name] = keyword[None] , identifier[domain_name_servers] = keyword[None] , identifier[ntp_servers] = keyword[None] ,
identifier[netbios_name_servers] = keyword[None] , identifier[netbios_node_type] = keyword[None] ,
identifier[tags] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[True] ,
literal[string] : literal[string] ,
literal[string] :{}
}
identifier[_new] ={ literal[string] : identifier[domain_name] ,
literal[string] : identifier[domain_name_servers] ,
literal[string] : identifier[ntp_servers] ,
literal[string] : identifier[netbios_name_servers] ,
literal[string] : identifier[netbios_node_type]
}
identifier[r] = identifier[__salt__] [ literal[string] ]( identifier[dhcp_options_id] = identifier[dhcp_options_id] ,
identifier[dhcp_options_name] = identifier[name] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )
keyword[if] literal[string] keyword[in] identifier[r] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[r] [ literal[string] ][ literal[string] ])
keyword[return] identifier[ret]
keyword[if] identifier[r] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[else] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[r] = identifier[__salt__] [ literal[string] ]( identifier[domain_name] = identifier[domain_name] ,
identifier[domain_name_servers] = identifier[domain_name_servers] ,
identifier[ntp_servers] = identifier[ntp_servers] ,
identifier[netbios_name_servers] = identifier[netbios_name_servers] ,
identifier[netbios_node_type] = identifier[netbios_node_type] ,
identifier[dhcp_options_name] = identifier[name] , identifier[tags] = identifier[tags] ,
identifier[vpc_id] = identifier[vpc_id] , identifier[vpc_name] = identifier[vpc_name] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[r] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[r] [ literal[string] ][ literal[string] ])
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : keyword[None] }
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[_new] }
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def dhcp_options_present(name, dhcp_options_id=None, vpc_name=None, vpc_id=None, domain_name=None, domain_name_servers=None, ntp_servers=None, netbios_name_servers=None, netbios_node_type=None, tags=None, region=None, key=None, keyid=None, profile=None):
"""
Ensure a set of DHCP options with the given settings exist.
Note that the current implementation only SETS values during option set
creation. It is unable to update option sets in place, and thus merely
verifies the set exists via the given name and/or dhcp_options_id param.
name
(string)
Name of the DHCP options.
vpc_name
(string)
Name of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
vpc_id
(string)
Id of a VPC to which the options should be associated. Either
vpc_name or vpc_id must be provided.
domain_name
(string)
Domain name to be assiciated with this option set.
domain_name_servers
(list of strings)
The IP address(es) of up to four domain name servers.
ntp_servers
(list of strings)
The IP address(es) of up to four desired NTP servers.
netbios_name_servers
(list of strings)
The IP address(es) of up to four NetBIOS name servers.
netbios_node_type
(string)
The NetBIOS node type (1, 2, 4, or 8). For more information about
the allowed values, see RFC 2132. The recommended is 2 at this
time (broadcast and multicast are currently not supported).
tags
(dict of key:value pairs)
A set of tags to be added.
region
(string)
Region to connect to.
key
(string)
Secret key to be used.
keyid
(string)
Access key to be used.
profile
(various)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
"""
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
_new = {'domain_name': domain_name, 'domain_name_servers': domain_name_servers, 'ntp_servers': ntp_servers, 'netbios_name_servers': netbios_name_servers, 'netbios_node_type': netbios_node_type}
# boto provides no "update_dhcp_options()" functionality, and you can't delete it if
# it's attached, and you can't detach it if it's the only one, so just check if it's
# there or not, and make no effort to validate it's actual settings... :(
### TODO - add support for multiple sets of DHCP options, and then for "swapping out"
### sets by creating new, mapping, then deleting the old.
r = __salt__['boto_vpc.dhcp_options_exists'](dhcp_options_id=dhcp_options_id, dhcp_options_name=name, region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to validate DHCP options: {0}.'.format(r['error']['message'])
return ret # depends on [control=['if'], data=['r']]
if r.get('exists'):
ret['comment'] = 'DHCP options already present.'
return ret # depends on [control=['if'], data=[]]
else:
if __opts__['test']:
ret['comment'] = 'DHCP options {0} are set to be created.'.format(name)
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
r = __salt__['boto_vpc.create_dhcp_options'](domain_name=domain_name, domain_name_servers=domain_name_servers, ntp_servers=ntp_servers, netbios_name_servers=netbios_name_servers, netbios_node_type=netbios_node_type, dhcp_options_name=name, tags=tags, vpc_id=vpc_id, vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create DHCP options: {0}'.format(r['error']['message'])
return ret # depends on [control=['if'], data=[]]
ret['changes']['old'] = {'dhcp_options': None}
ret['changes']['new'] = {'dhcp_options': _new}
ret['comment'] = 'DHCP options {0} created.'.format(name)
return ret |
def _save_history_to_file(history, path, size=-1):
"""Save a history list to a file for later loading (possibly in another
session).
:param history: the history list to save
:type history: list(str)
:param path: the path to the file where to save the history
:param size: the number of lines to save (0 means no lines, < 0 means
all lines)
:type size: int
:type path: str
:returns: None
"""
if size == 0:
return
if size > 0:
history = history[-size:]
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# Write linewise to avoid building a large string in menory.
with codecs.open(path, 'w', encoding='utf-8') as histfile:
for line in history:
histfile.write(line)
histfile.write('\n') | def function[_save_history_to_file, parameter[history, path, size]]:
constant[Save a history list to a file for later loading (possibly in another
session).
:param history: the history list to save
:type history: list(str)
:param path: the path to the file where to save the history
:param size: the number of lines to save (0 means no lines, < 0 means
all lines)
:type size: int
:type path: str
:returns: None
]
if compare[name[size] equal[==] constant[0]] begin[:]
return[None]
if compare[name[size] greater[>] constant[0]] begin[:]
variable[history] assign[=] call[name[history]][<ast.Slice object at 0x7da1b0723460>]
variable[directory] assign[=] call[name[os].path.dirname, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da1b0720b50> begin[:]
call[name[os].makedirs, parameter[name[directory]]]
with call[name[codecs].open, parameter[name[path], constant[w]]] begin[:]
for taget[name[line]] in starred[name[history]] begin[:]
call[name[histfile].write, parameter[name[line]]]
call[name[histfile].write, parameter[constant[
]]] | keyword[def] identifier[_save_history_to_file] ( identifier[history] , identifier[path] , identifier[size] =- literal[int] ):
literal[string]
keyword[if] identifier[size] == literal[int] :
keyword[return]
keyword[if] identifier[size] > literal[int] :
identifier[history] = identifier[history] [- identifier[size] :]
identifier[directory] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ):
identifier[os] . identifier[makedirs] ( identifier[directory] )
keyword[with] identifier[codecs] . identifier[open] ( identifier[path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[histfile] :
keyword[for] identifier[line] keyword[in] identifier[history] :
identifier[histfile] . identifier[write] ( identifier[line] )
identifier[histfile] . identifier[write] ( literal[string] ) | def _save_history_to_file(history, path, size=-1):
"""Save a history list to a file for later loading (possibly in another
session).
:param history: the history list to save
:type history: list(str)
:param path: the path to the file where to save the history
:param size: the number of lines to save (0 means no lines, < 0 means
all lines)
:type size: int
:type path: str
:returns: None
"""
if size == 0:
return # depends on [control=['if'], data=[]]
if size > 0:
history = history[-size:] # depends on [control=['if'], data=['size']]
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory) # depends on [control=['if'], data=[]]
# Write linewise to avoid building a large string in menory.
with codecs.open(path, 'w', encoding='utf-8') as histfile:
for line in history:
histfile.write(line)
histfile.write('\n') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['histfile']] |
def _renumber(a: np.ndarray, keys: np.ndarray, values: np.ndarray) -> np.ndarray:
"""
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
"""
ordering = np.argsort(keys)
keys = keys[ordering]
values = keys[ordering]
index = np.digitize(a.ravel(), keys, right=True)
return(values[index].reshape(a.shape)) | def function[_renumber, parameter[a, keys, values]]:
constant[
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
]
variable[ordering] assign[=] call[name[np].argsort, parameter[name[keys]]]
variable[keys] assign[=] call[name[keys]][name[ordering]]
variable[values] assign[=] call[name[keys]][name[ordering]]
variable[index] assign[=] call[name[np].digitize, parameter[call[name[a].ravel, parameter[]], name[keys]]]
return[call[call[name[values]][name[index]].reshape, parameter[name[a].shape]]] | keyword[def] identifier[_renumber] ( identifier[a] : identifier[np] . identifier[ndarray] , identifier[keys] : identifier[np] . identifier[ndarray] , identifier[values] : identifier[np] . identifier[ndarray] )-> identifier[np] . identifier[ndarray] :
literal[string]
identifier[ordering] = identifier[np] . identifier[argsort] ( identifier[keys] )
identifier[keys] = identifier[keys] [ identifier[ordering] ]
identifier[values] = identifier[keys] [ identifier[ordering] ]
identifier[index] = identifier[np] . identifier[digitize] ( identifier[a] . identifier[ravel] (), identifier[keys] , identifier[right] = keyword[True] )
keyword[return] ( identifier[values] [ identifier[index] ]. identifier[reshape] ( identifier[a] . identifier[shape] )) | def _renumber(a: np.ndarray, keys: np.ndarray, values: np.ndarray) -> np.ndarray:
"""
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
"""
ordering = np.argsort(keys)
keys = keys[ordering]
values = keys[ordering]
index = np.digitize(a.ravel(), keys, right=True)
return values[index].reshape(a.shape) |
def parse_selection(lexer: Lexer) -> SelectionNode:
"""Selection: Field or FragmentSpread or InlineFragment"""
return (parse_fragment if peek(lexer, TokenKind.SPREAD) else parse_field)(lexer) | def function[parse_selection, parameter[lexer]]:
constant[Selection: Field or FragmentSpread or InlineFragment]
return[call[<ast.IfExp object at 0x7da1b22e84f0>, parameter[name[lexer]]]] | keyword[def] identifier[parse_selection] ( identifier[lexer] : identifier[Lexer] )-> identifier[SelectionNode] :
literal[string]
keyword[return] ( identifier[parse_fragment] keyword[if] identifier[peek] ( identifier[lexer] , identifier[TokenKind] . identifier[SPREAD] ) keyword[else] identifier[parse_field] )( identifier[lexer] ) | def parse_selection(lexer: Lexer) -> SelectionNode:
"""Selection: Field or FragmentSpread or InlineFragment"""
return (parse_fragment if peek(lexer, TokenKind.SPREAD) else parse_field)(lexer) |
def add_input(self, **kwargs):
"""Add workflow input.
Args:
kwargs (dict): A dict with a `name: type` item
and optionally a `default: value` item, where name is the
name (id) of the workflow input (e.g., `dir_in`) and type is
the type of the input (e.g., `'Directory'`).
The type of input parameter can be learned from
`step.inputs(step_name=input_name)`.
Returns:
inputname
Raises:
ValueError: No or multiple parameter(s) have been specified.
"""
self._closed()
def _get_item(args):
"""Get a single item from args."""
if not args:
raise ValueError("No parameter specified.")
item = args.popitem()
if args:
raise ValueError("Too many parameters, not clear what to do "
"with {}".format(kwargs))
return item
symbols = None
input_dict = CommentedMap()
if 'default' in kwargs:
input_dict['default'] = kwargs.pop('default')
if 'label' in kwargs:
input_dict['label'] = kwargs.pop('label')
if 'symbols' in kwargs:
symbols = kwargs.pop('symbols')
name, input_type = _get_item(kwargs)
if input_type == 'enum':
typ = CommentedMap()
typ['type'] = 'enum'
# make sure symbols is set
if symbols is None:
raise ValueError("Please specify the enum's symbols.")
# make sure symbols is not empty
if symbols == []:
raise ValueError("The enum's symbols cannot be empty.")
# make sure the symbols are a list
if type(symbols) != list:
raise ValueError('Symbols should be a list.')
# make sure symbols is a list of strings
symbols = [str(s) for s in symbols]
typ['symbols'] = symbols
input_dict['type'] = typ
else:
# Set the 'type' if we can't use simple notation (because there is
# a default value or a label)
if bool(input_dict):
input_dict['type'] = input_type
msg = '"{}" is already used as a workflow input. Please use a ' +\
'different name.'
if name in self.wf_inputs:
raise ValueError(msg.format(name))
# Add 'type' for complex input types, so the user doesn't have to do it
if isinstance(input_type, dict):
input_dict['type'] = input_type
# Make sure we can use the notation without 'type' if the input allows
# it.
if bool(input_dict):
self.wf_inputs[name] = input_dict
else:
self.wf_inputs[name] = input_type
return Reference(input_name=name) | def function[add_input, parameter[self]]:
constant[Add workflow input.
Args:
kwargs (dict): A dict with a `name: type` item
and optionally a `default: value` item, where name is the
name (id) of the workflow input (e.g., `dir_in`) and type is
the type of the input (e.g., `'Directory'`).
The type of input parameter can be learned from
`step.inputs(step_name=input_name)`.
Returns:
inputname
Raises:
ValueError: No or multiple parameter(s) have been specified.
]
call[name[self]._closed, parameter[]]
def function[_get_item, parameter[args]]:
constant[Get a single item from args.]
if <ast.UnaryOp object at 0x7da20cabe980> begin[:]
<ast.Raise object at 0x7da20cabcc10>
variable[item] assign[=] call[name[args].popitem, parameter[]]
if name[args] begin[:]
<ast.Raise object at 0x7da20cabcb20>
return[name[item]]
variable[symbols] assign[=] constant[None]
variable[input_dict] assign[=] call[name[CommentedMap], parameter[]]
if compare[constant[default] in name[kwargs]] begin[:]
call[name[input_dict]][constant[default]] assign[=] call[name[kwargs].pop, parameter[constant[default]]]
if compare[constant[label] in name[kwargs]] begin[:]
call[name[input_dict]][constant[label]] assign[=] call[name[kwargs].pop, parameter[constant[label]]]
if compare[constant[symbols] in name[kwargs]] begin[:]
variable[symbols] assign[=] call[name[kwargs].pop, parameter[constant[symbols]]]
<ast.Tuple object at 0x7da20cabf400> assign[=] call[name[_get_item], parameter[name[kwargs]]]
if compare[name[input_type] equal[==] constant[enum]] begin[:]
variable[typ] assign[=] call[name[CommentedMap], parameter[]]
call[name[typ]][constant[type]] assign[=] constant[enum]
if compare[name[symbols] is constant[None]] begin[:]
<ast.Raise object at 0x7da20cabe0e0>
if compare[name[symbols] equal[==] list[[]]] begin[:]
<ast.Raise object at 0x7da20cabedd0>
if compare[call[name[type], parameter[name[symbols]]] not_equal[!=] name[list]] begin[:]
<ast.Raise object at 0x7da20cabcdf0>
variable[symbols] assign[=] <ast.ListComp object at 0x7da20c6c6050>
call[name[typ]][constant[symbols]] assign[=] name[symbols]
call[name[input_dict]][constant[type]] assign[=] name[typ]
variable[msg] assign[=] binary_operation[constant["{}" is already used as a workflow input. Please use a ] + constant[different name.]]
if compare[name[name] in name[self].wf_inputs] begin[:]
<ast.Raise object at 0x7da18eb55540>
if call[name[isinstance], parameter[name[input_type], name[dict]]] begin[:]
call[name[input_dict]][constant[type]] assign[=] name[input_type]
if call[name[bool], parameter[name[input_dict]]] begin[:]
call[name[self].wf_inputs][name[name]] assign[=] name[input_dict]
return[call[name[Reference], parameter[]]] | keyword[def] identifier[add_input] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_closed] ()
keyword[def] identifier[_get_item] ( identifier[args] ):
literal[string]
keyword[if] keyword[not] identifier[args] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[item] = identifier[args] . identifier[popitem] ()
keyword[if] identifier[args] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[kwargs] ))
keyword[return] identifier[item]
identifier[symbols] = keyword[None]
identifier[input_dict] = identifier[CommentedMap] ()
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[input_dict] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[input_dict] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[symbols] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[name] , identifier[input_type] = identifier[_get_item] ( identifier[kwargs] )
keyword[if] identifier[input_type] == literal[string] :
identifier[typ] = identifier[CommentedMap] ()
identifier[typ] [ literal[string] ]= literal[string]
keyword[if] identifier[symbols] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[symbols] ==[]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[type] ( identifier[symbols] )!= identifier[list] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[symbols] =[ identifier[str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[symbols] ]
identifier[typ] [ literal[string] ]= identifier[symbols]
identifier[input_dict] [ literal[string] ]= identifier[typ]
keyword[else] :
keyword[if] identifier[bool] ( identifier[input_dict] ):
identifier[input_dict] [ literal[string] ]= identifier[input_type]
identifier[msg] = literal[string] + literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[wf_inputs] :
keyword[raise] identifier[ValueError] ( identifier[msg] . identifier[format] ( identifier[name] ))
keyword[if] identifier[isinstance] ( identifier[input_type] , identifier[dict] ):
identifier[input_dict] [ literal[string] ]= identifier[input_type]
keyword[if] identifier[bool] ( identifier[input_dict] ):
identifier[self] . identifier[wf_inputs] [ identifier[name] ]= identifier[input_dict]
keyword[else] :
identifier[self] . identifier[wf_inputs] [ identifier[name] ]= identifier[input_type]
keyword[return] identifier[Reference] ( identifier[input_name] = identifier[name] ) | def add_input(self, **kwargs):
"""Add workflow input.
Args:
kwargs (dict): A dict with a `name: type` item
and optionally a `default: value` item, where name is the
name (id) of the workflow input (e.g., `dir_in`) and type is
the type of the input (e.g., `'Directory'`).
The type of input parameter can be learned from
`step.inputs(step_name=input_name)`.
Returns:
inputname
Raises:
ValueError: No or multiple parameter(s) have been specified.
"""
self._closed()
def _get_item(args):
"""Get a single item from args."""
if not args:
raise ValueError('No parameter specified.') # depends on [control=['if'], data=[]]
item = args.popitem()
if args:
raise ValueError('Too many parameters, not clear what to do with {}'.format(kwargs)) # depends on [control=['if'], data=[]]
return item
symbols = None
input_dict = CommentedMap()
if 'default' in kwargs:
input_dict['default'] = kwargs.pop('default') # depends on [control=['if'], data=['kwargs']]
if 'label' in kwargs:
input_dict['label'] = kwargs.pop('label') # depends on [control=['if'], data=['kwargs']]
if 'symbols' in kwargs:
symbols = kwargs.pop('symbols') # depends on [control=['if'], data=['kwargs']]
(name, input_type) = _get_item(kwargs)
if input_type == 'enum':
typ = CommentedMap()
typ['type'] = 'enum'
# make sure symbols is set
if symbols is None:
raise ValueError("Please specify the enum's symbols.") # depends on [control=['if'], data=[]]
# make sure symbols is not empty
if symbols == []:
raise ValueError("The enum's symbols cannot be empty.") # depends on [control=['if'], data=[]]
# make sure the symbols are a list
if type(symbols) != list:
raise ValueError('Symbols should be a list.') # depends on [control=['if'], data=[]]
# make sure symbols is a list of strings
symbols = [str(s) for s in symbols]
typ['symbols'] = symbols
input_dict['type'] = typ # depends on [control=['if'], data=[]]
# Set the 'type' if we can't use simple notation (because there is
# a default value or a label)
elif bool(input_dict):
input_dict['type'] = input_type # depends on [control=['if'], data=[]]
msg = '"{}" is already used as a workflow input. Please use a ' + 'different name.'
if name in self.wf_inputs:
raise ValueError(msg.format(name)) # depends on [control=['if'], data=['name']]
# Add 'type' for complex input types, so the user doesn't have to do it
if isinstance(input_type, dict):
input_dict['type'] = input_type # depends on [control=['if'], data=[]]
# Make sure we can use the notation without 'type' if the input allows
# it.
if bool(input_dict):
self.wf_inputs[name] = input_dict # depends on [control=['if'], data=[]]
else:
self.wf_inputs[name] = input_type
return Reference(input_name=name) |
def put(index_name, doc_type, identifier, body, force, verbose):
"""Index input data."""
result = current_search_client.index(
index=index_name,
doc_type=doc_type or index_name,
id=identifier,
body=json.load(body),
op_type='index' if force or identifier is None else 'create',
)
if verbose:
click.echo(json.dumps(result)) | def function[put, parameter[index_name, doc_type, identifier, body, force, verbose]]:
constant[Index input data.]
variable[result] assign[=] call[name[current_search_client].index, parameter[]]
if name[verbose] begin[:]
call[name[click].echo, parameter[call[name[json].dumps, parameter[name[result]]]]] | keyword[def] identifier[put] ( identifier[index_name] , identifier[doc_type] , identifier[identifier] , identifier[body] , identifier[force] , identifier[verbose] ):
literal[string]
identifier[result] = identifier[current_search_client] . identifier[index] (
identifier[index] = identifier[index_name] ,
identifier[doc_type] = identifier[doc_type] keyword[or] identifier[index_name] ,
identifier[id] = identifier[identifier] ,
identifier[body] = identifier[json] . identifier[load] ( identifier[body] ),
identifier[op_type] = literal[string] keyword[if] identifier[force] keyword[or] identifier[identifier] keyword[is] keyword[None] keyword[else] literal[string] ,
)
keyword[if] identifier[verbose] :
identifier[click] . identifier[echo] ( identifier[json] . identifier[dumps] ( identifier[result] )) | def put(index_name, doc_type, identifier, body, force, verbose):
"""Index input data."""
result = current_search_client.index(index=index_name, doc_type=doc_type or index_name, id=identifier, body=json.load(body), op_type='index' if force or identifier is None else 'create')
if verbose:
click.echo(json.dumps(result)) # depends on [control=['if'], data=[]] |
def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s) | def function[decode_text, parameter[s]]:
constant[Decodes a PDFDocEncoding string to Unicode.]
if call[name[s].startswith, parameter[constant[b'\xfe\xff']]] begin[:]
return[call[name[unicode], parameter[call[name[s]][<ast.Slice object at 0x7da2041dbd90>], constant[utf-16be], constant[ignore]]]] | keyword[def] identifier[decode_text] ( identifier[s] ):
literal[string]
keyword[if] identifier[s] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[unicode] ( identifier[s] [ literal[int] :], literal[string] , literal[string] )
keyword[else] :
keyword[return] literal[string] . identifier[join] ( identifier[PDFDocEncoding] [ identifier[ord] ( identifier[c] )] keyword[for] identifier[c] keyword[in] identifier[s] ) | def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore') # depends on [control=['if'], data=[]]
else:
return ''.join((PDFDocEncoding[ord(c)] for c in s)) |
def sequence_names(fasta):
"""
return a list of the sequence IDs in a FASTA file
"""
sequences = SeqIO.parse(fasta, "fasta")
records = [record.id for record in sequences]
return records | def function[sequence_names, parameter[fasta]]:
constant[
return a list of the sequence IDs in a FASTA file
]
variable[sequences] assign[=] call[name[SeqIO].parse, parameter[name[fasta], constant[fasta]]]
variable[records] assign[=] <ast.ListComp object at 0x7da1b18a11e0>
return[name[records]] | keyword[def] identifier[sequence_names] ( identifier[fasta] ):
literal[string]
identifier[sequences] = identifier[SeqIO] . identifier[parse] ( identifier[fasta] , literal[string] )
identifier[records] =[ identifier[record] . identifier[id] keyword[for] identifier[record] keyword[in] identifier[sequences] ]
keyword[return] identifier[records] | def sequence_names(fasta):
"""
return a list of the sequence IDs in a FASTA file
"""
sequences = SeqIO.parse(fasta, 'fasta')
records = [record.id for record in sequences]
return records |
def data_filler_user_agent(self, number_of_rows, db):
'''creates and fills the table with user agent data
'''
try:
user_agent = db
data_list = list()
for i in range(0, number_of_rows):
post_uo_reg = {
"id": rnd_id_generator(self),
"ip": self.faker.ipv4(),
"countrycode": self.faker.country_code(),
"useragent": self.faker.user_agent()
}
user_agent.save(post_uo_reg)
logger.warning(
'user_agent Commits are successful after write job!',
extra=d)
except Exception as e:
logger.error(e, extra=d) | def function[data_filler_user_agent, parameter[self, number_of_rows, db]]:
constant[creates and fills the table with user agent data
]
<ast.Try object at 0x7da1b0716260> | keyword[def] identifier[data_filler_user_agent] ( identifier[self] , identifier[number_of_rows] , identifier[db] ):
literal[string]
keyword[try] :
identifier[user_agent] = identifier[db]
identifier[data_list] = identifier[list] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[number_of_rows] ):
identifier[post_uo_reg] ={
literal[string] : identifier[rnd_id_generator] ( identifier[self] ),
literal[string] : identifier[self] . identifier[faker] . identifier[ipv4] (),
literal[string] : identifier[self] . identifier[faker] . identifier[country_code] (),
literal[string] : identifier[self] . identifier[faker] . identifier[user_agent] ()
}
identifier[user_agent] . identifier[save] ( identifier[post_uo_reg] )
identifier[logger] . identifier[warning] (
literal[string] ,
identifier[extra] = identifier[d] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[e] , identifier[extra] = identifier[d] ) | def data_filler_user_agent(self, number_of_rows, db):
"""creates and fills the table with user agent data
"""
try:
user_agent = db
data_list = list()
for i in range(0, number_of_rows):
post_uo_reg = {'id': rnd_id_generator(self), 'ip': self.faker.ipv4(), 'countrycode': self.faker.country_code(), 'useragent': self.faker.user_agent()}
user_agent.save(post_uo_reg) # depends on [control=['for'], data=[]]
logger.warning('user_agent Commits are successful after write job!', extra=d) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error(e, extra=d) # depends on [control=['except'], data=['e']] |
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn) | def function[add_done_callback, parameter[self, fn]]:
constant[Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
]
if name[self]._done begin[:]
call[name[fn], parameter[name[self]]] | keyword[def] identifier[add_done_callback] ( identifier[self] , identifier[fn] ):
literal[string]
keyword[if] identifier[self] . identifier[_done] :
identifier[fn] ( identifier[self] )
keyword[else] :
identifier[self] . identifier[_callbacks] . identifier[append] ( identifier[fn] ) | def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self) # depends on [control=['if'], data=[]]
else:
self._callbacks.append(fn) |
def format_(blocks):
"""Produce Python module from blocks of tests
Arguments:
blocks (list): Blocks of tests from func:`parse()`
"""
tests = list()
function_count = 0 # For each test to have a unique name
for block in blocks:
# Validate docstring format of body
if not any(line[:3] == ">>>" for line in block["body"]):
# A doctest requires at least one `>>>` directive.
block["body"].insert(0, ">>> assert False, "
"'Body must be in docstring format'\n")
# Validate binding on first line
if not block["binding"] in ("PySide", "PySide2", "PyQt5", "PyQt4"):
block["body"].insert(0, ">>> assert False, "
"'Invalid binding'\n")
if sys.version_info > (3, 4) and block["binding"] in ("PySide"):
# Skip caveat test if it requires PySide on Python > 3.4
continue
else:
function_count += 1
block["header"] = block["header"]
block["count"] = str(function_count)
block["body"] = " ".join(block["body"])
tests.append("""\
def test_{count}_{header}():
'''Test {header}
>>> import os, sys
>>> PYTHON = sys.version_info[0]
>>> long = int if PYTHON == 3 else long
>>> _ = os.environ.pop("QT_VERBOSE", None) # Disable debug output
>>> os.environ["QT_PREFERRED_BINDING"] = "{binding}"
{body}
'''
""".format(**block))
return tests | def function[format_, parameter[blocks]]:
constant[Produce Python module from blocks of tests
Arguments:
blocks (list): Blocks of tests from func:`parse()`
]
variable[tests] assign[=] call[name[list], parameter[]]
variable[function_count] assign[=] constant[0]
for taget[name[block]] in starred[name[blocks]] begin[:]
if <ast.UnaryOp object at 0x7da1b17f7040> begin[:]
call[call[name[block]][constant[body]].insert, parameter[constant[0], constant[>>> assert False, 'Body must be in docstring format'
]]]
if <ast.UnaryOp object at 0x7da1b17f5390> begin[:]
call[call[name[block]][constant[body]].insert, parameter[constant[0], constant[>>> assert False, 'Invalid binding'
]]]
if <ast.BoolOp object at 0x7da1b17f7760> begin[:]
continue
return[name[tests]] | keyword[def] identifier[format_] ( identifier[blocks] ):
literal[string]
identifier[tests] = identifier[list] ()
identifier[function_count] = literal[int]
keyword[for] identifier[block] keyword[in] identifier[blocks] :
keyword[if] keyword[not] identifier[any] ( identifier[line] [: literal[int] ]== literal[string] keyword[for] identifier[line] keyword[in] identifier[block] [ literal[string] ]):
identifier[block] [ literal[string] ]. identifier[insert] ( literal[int] , literal[string]
literal[string] )
keyword[if] keyword[not] identifier[block] [ literal[string] ] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[block] [ literal[string] ]. identifier[insert] ( literal[int] , literal[string]
literal[string] )
keyword[if] identifier[sys] . identifier[version_info] >( literal[int] , literal[int] ) keyword[and] identifier[block] [ literal[string] ] keyword[in] ( literal[string] ):
keyword[continue]
keyword[else] :
identifier[function_count] += literal[int]
identifier[block] [ literal[string] ]= identifier[block] [ literal[string] ]
identifier[block] [ literal[string] ]= identifier[str] ( identifier[function_count] )
identifier[block] [ literal[string] ]= literal[string] . identifier[join] ( identifier[block] [ literal[string] ])
identifier[tests] . identifier[append] ( literal[string] . identifier[format] (** identifier[block] ))
keyword[return] identifier[tests] | def format_(blocks):
"""Produce Python module from blocks of tests
Arguments:
blocks (list): Blocks of tests from func:`parse()`
"""
tests = list()
function_count = 0 # For each test to have a unique name
for block in blocks:
# Validate docstring format of body
if not any((line[:3] == '>>>' for line in block['body'])):
# A doctest requires at least one `>>>` directive.
block['body'].insert(0, ">>> assert False, 'Body must be in docstring format'\n") # depends on [control=['if'], data=[]]
# Validate binding on first line
if not block['binding'] in ('PySide', 'PySide2', 'PyQt5', 'PyQt4'):
block['body'].insert(0, ">>> assert False, 'Invalid binding'\n") # depends on [control=['if'], data=[]]
if sys.version_info > (3, 4) and block['binding'] in 'PySide':
# Skip caveat test if it requires PySide on Python > 3.4
continue # depends on [control=['if'], data=[]]
else:
function_count += 1
block['header'] = block['header']
block['count'] = str(function_count)
block['body'] = ' '.join(block['body'])
tests.append('\ndef test_{count}_{header}():\n \'\'\'Test {header}\n\n >>> import os, sys\n >>> PYTHON = sys.version_info[0]\n >>> long = int if PYTHON == 3 else long\n >>> _ = os.environ.pop("QT_VERBOSE", None) # Disable debug output\n >>> os.environ["QT_PREFERRED_BINDING"] = "{binding}"\n {body}\n \'\'\'\n\n '.format(**block)) # depends on [control=['for'], data=['block']]
return tests |
def setup(self, context):
"""Implements TextFile Generator's setup method"""
myindex = context.get_partition_index()
self._files_to_consume = self._files[myindex::context.get_num_partitions()]
self.logger.info("TextFileSpout files to consume %s" % self._files_to_consume)
self._lines_to_consume = self._get_next_lines()
self._emit_count = 0 | def function[setup, parameter[self, context]]:
constant[Implements TextFile Generator's setup method]
variable[myindex] assign[=] call[name[context].get_partition_index, parameter[]]
name[self]._files_to_consume assign[=] call[name[self]._files][<ast.Slice object at 0x7da20c76e6b0>]
call[name[self].logger.info, parameter[binary_operation[constant[TextFileSpout files to consume %s] <ast.Mod object at 0x7da2590d6920> name[self]._files_to_consume]]]
name[self]._lines_to_consume assign[=] call[name[self]._get_next_lines, parameter[]]
name[self]._emit_count assign[=] constant[0] | keyword[def] identifier[setup] ( identifier[self] , identifier[context] ):
literal[string]
identifier[myindex] = identifier[context] . identifier[get_partition_index] ()
identifier[self] . identifier[_files_to_consume] = identifier[self] . identifier[_files] [ identifier[myindex] :: identifier[context] . identifier[get_num_partitions] ()]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[_files_to_consume] )
identifier[self] . identifier[_lines_to_consume] = identifier[self] . identifier[_get_next_lines] ()
identifier[self] . identifier[_emit_count] = literal[int] | def setup(self, context):
"""Implements TextFile Generator's setup method"""
myindex = context.get_partition_index()
self._files_to_consume = self._files[myindex::context.get_num_partitions()]
self.logger.info('TextFileSpout files to consume %s' % self._files_to_consume)
self._lines_to_consume = self._get_next_lines()
self._emit_count = 0 |
def related_tags(self,release_id=None,tag_names=None,response_type=None,params=None):
"""
Function to request FRED related tags for a particular release.
FRED tags are attributes assigned to series.
Series are assigned tags and releases. Indirectly through series,
it is possible to get the tags for a category. No tags exist for a
release that does not have series.
`<https://research.stlouisfed.org/docs/api/fred/release_related_tags.html>`_
:arg int release_id: The id for a release. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str search_text: The words to find matching tags with. For example 'mortgage rates'
:arg bool ssl_verify: To verify HTTPs.
"""
path='/release/related_tags?'
params['release_id'], params['tag_names'] = release_id, tag_names
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response | def function[related_tags, parameter[self, release_id, tag_names, response_type, params]]:
constant[
Function to request FRED related tags for a particular release.
FRED tags are attributes assigned to series.
Series are assigned tags and releases. Indirectly through series,
it is possible to get the tags for a category. No tags exist for a
release that does not have series.
`<https://research.stlouisfed.org/docs/api/fred/release_related_tags.html>`_
:arg int release_id: The id for a release. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str search_text: The words to find matching tags with. For example 'mortgage rates'
:arg bool ssl_verify: To verify HTTPs.
]
variable[path] assign[=] constant[/release/related_tags?]
<ast.Tuple object at 0x7da1b12cd810> assign[=] tuple[[<ast.Name object at 0x7da1b124fac0>, <ast.Name object at 0x7da1b124eb00>]]
variable[response_type] assign[=] <ast.IfExp object at 0x7da1b124e920>
if compare[name[response_type] not_equal[!=] constant[xml]] begin[:]
call[name[params]][constant[file_type]] assign[=] constant[json]
variable[response] assign[=] call[name[_get_request], parameter[name[self].url_root, name[self].api_key, name[path], name[response_type], name[params], name[self].ssl_verify]]
return[name[response]] | keyword[def] identifier[related_tags] ( identifier[self] , identifier[release_id] = keyword[None] , identifier[tag_names] = keyword[None] , identifier[response_type] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
identifier[path] = literal[string]
identifier[params] [ literal[string] ], identifier[params] [ literal[string] ]= identifier[release_id] , identifier[tag_names]
identifier[response_type] = identifier[response_type] keyword[if] identifier[response_type] keyword[else] identifier[self] . identifier[response_type]
keyword[if] identifier[response_type] != literal[string] : identifier[params] [ literal[string] ]= literal[string]
identifier[response] = identifier[_get_request] ( identifier[self] . identifier[url_root] , identifier[self] . identifier[api_key] , identifier[path] , identifier[response_type] , identifier[params] , identifier[self] . identifier[ssl_verify] )
keyword[return] identifier[response] | def related_tags(self, release_id=None, tag_names=None, response_type=None, params=None):
"""
Function to request FRED related tags for a particular release.
FRED tags are attributes assigned to series.
Series are assigned tags and releases. Indirectly through series,
it is possible to get the tags for a category. No tags exist for a
release that does not have series.
`<https://research.stlouisfed.org/docs/api/fred/release_related_tags.html>`_
:arg int release_id: The id for a release. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str search_text: The words to find matching tags with. For example 'mortgage rates'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/release/related_tags?'
(params['release_id'], params['tag_names']) = (release_id, tag_names)
response_type = response_type if response_type else self.response_type
if response_type != 'xml':
params['file_type'] = 'json' # depends on [control=['if'], data=[]]
response = _get_request(self.url_root, self.api_key, path, response_type, params, self.ssl_verify)
return response |
def __add_shared(self, original_token):
"""Adds a token, normalizing the SID and import reference to this table."""
sid = self.__new_sid()
token = SymbolToken(original_token.text, sid, self.__import_location(sid))
self.__add(token)
return token | def function[__add_shared, parameter[self, original_token]]:
constant[Adds a token, normalizing the SID and import reference to this table.]
variable[sid] assign[=] call[name[self].__new_sid, parameter[]]
variable[token] assign[=] call[name[SymbolToken], parameter[name[original_token].text, name[sid], call[name[self].__import_location, parameter[name[sid]]]]]
call[name[self].__add, parameter[name[token]]]
return[name[token]] | keyword[def] identifier[__add_shared] ( identifier[self] , identifier[original_token] ):
literal[string]
identifier[sid] = identifier[self] . identifier[__new_sid] ()
identifier[token] = identifier[SymbolToken] ( identifier[original_token] . identifier[text] , identifier[sid] , identifier[self] . identifier[__import_location] ( identifier[sid] ))
identifier[self] . identifier[__add] ( identifier[token] )
keyword[return] identifier[token] | def __add_shared(self, original_token):
"""Adds a token, normalizing the SID and import reference to this table."""
sid = self.__new_sid()
token = SymbolToken(original_token.text, sid, self.__import_location(sid))
self.__add(token)
return token |
def returnValue(self, key, last=False):
'''Return the key's value for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty or the key is missing.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValue("name")
Jim
>>> print PLOD(test).sort("name").returnValue("name", last=True)
Larry
>>> print PLOD(test).sort("name").returnValue("income", last=True)
None
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty or the key is missing.
'''
row = self.returnOneEntry(last=last)
if not row:
return None
dict_row = internal.convert_to_dict(row)
return dict_row.get(key, None) | def function[returnValue, parameter[self, key, last]]:
constant[Return the key's value for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty or the key is missing.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValue("name")
Jim
>>> print PLOD(test).sort("name").returnValue("name", last=True)
Larry
>>> print PLOD(test).sort("name").returnValue("income", last=True)
None
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty or the key is missing.
]
variable[row] assign[=] call[name[self].returnOneEntry, parameter[]]
if <ast.UnaryOp object at 0x7da20c7960e0> begin[:]
return[constant[None]]
variable[dict_row] assign[=] call[name[internal].convert_to_dict, parameter[name[row]]]
return[call[name[dict_row].get, parameter[name[key], constant[None]]]] | keyword[def] identifier[returnValue] ( identifier[self] , identifier[key] , identifier[last] = keyword[False] ):
literal[string]
identifier[row] = identifier[self] . identifier[returnOneEntry] ( identifier[last] = identifier[last] )
keyword[if] keyword[not] identifier[row] :
keyword[return] keyword[None]
identifier[dict_row] = identifier[internal] . identifier[convert_to_dict] ( identifier[row] )
keyword[return] identifier[dict_row] . identifier[get] ( identifier[key] , keyword[None] ) | def returnValue(self, key, last=False):
"""Return the key's value for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty or the key is missing.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValue("name")
Jim
>>> print PLOD(test).sort("name").returnValue("name", last=True)
Larry
>>> print PLOD(test).sort("name").returnValue("income", last=True)
None
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty or the key is missing.
"""
row = self.returnOneEntry(last=last)
if not row:
return None # depends on [control=['if'], data=[]]
dict_row = internal.convert_to_dict(row)
return dict_row.get(key, None) |
def find_version(file_path):
"""
Scrape version information from specified file path.
"""
with open(file_path, 'r') as f:
file_contents = f.read()
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
file_contents, re.M)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("unable to find version string") | def function[find_version, parameter[file_path]]:
constant[
Scrape version information from specified file path.
]
with call[name[open], parameter[name[file_path], constant[r]]] begin[:]
variable[file_contents] assign[=] call[name[f].read, parameter[]]
variable[version_match] assign[=] call[name[re].search, parameter[constant[^__version__\s*=\s*['\"]([^'\"]*)['\"]], name[file_contents], name[re].M]]
if name[version_match] begin[:]
return[call[name[version_match].group, parameter[constant[1]]]] | keyword[def] identifier[find_version] ( identifier[file_path] ):
literal[string]
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[file_contents] = identifier[f] . identifier[read] ()
identifier[version_match] = identifier[re] . identifier[search] ( literal[string] ,
identifier[file_contents] , identifier[re] . identifier[M] )
keyword[if] identifier[version_match] :
keyword[return] identifier[version_match] . identifier[group] ( literal[int] )
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def find_version(file_path):
"""
Scrape version information from specified file path.
"""
with open(file_path, 'r') as f:
file_contents = f.read() # depends on [control=['with'], data=['f']]
version_match = re.search('^__version__\\s*=\\s*[\'\\"]([^\'\\"]*)[\'\\"]', file_contents, re.M)
if version_match:
return version_match.group(1) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('unable to find version string') |
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
) | def function[run, parameter[cmd, env, return_object, block, cwd, verbose, nospin, spinner_name, combine_stderr, display_limit, write_to_stdout]]:
constant[Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
]
variable[_env] assign[=] call[name[os].environ.copy, parameter[]]
if name[env] begin[:]
call[name[_env].update, parameter[name[env]]]
if name[six].PY2 begin[:]
variable[fs_encode] assign[=] call[name[partial], parameter[name[to_bytes]]]
variable[_env] assign[=] <ast.DictComp object at 0x7da18fe916f0>
if <ast.UnaryOp object at 0x7da18fe92bf0> begin[:]
variable[spinner_name] assign[=] constant[bouncingBar]
if name[six].PY2 begin[:]
if call[name[isinstance], parameter[name[cmd], name[six].string_types]] begin[:]
variable[cmd] assign[=] call[name[cmd].encode, parameter[constant[utf-8]]]
if <ast.UnaryOp object at 0x7da18fe93a30> begin[:]
variable[cmd] assign[=] call[name[Script].parse, parameter[name[cmd]]]
if <ast.BoolOp object at 0x7da18fe906d0> begin[:]
variable[combine_stderr] assign[=] constant[False]
variable[start_text] assign[=] constant[]
with call[name[spinner], parameter[]] begin[:]
return[call[name[_create_subprocess], parameter[name[cmd]]]] | keyword[def] identifier[run] (
identifier[cmd] ,
identifier[env] = keyword[None] ,
identifier[return_object] = keyword[False] ,
identifier[block] = keyword[True] ,
identifier[cwd] = keyword[None] ,
identifier[verbose] = keyword[False] ,
identifier[nospin] = keyword[False] ,
identifier[spinner_name] = keyword[None] ,
identifier[combine_stderr] = keyword[True] ,
identifier[display_limit] = literal[int] ,
identifier[write_to_stdout] = keyword[True] ,
):
literal[string]
identifier[_env] = identifier[os] . identifier[environ] . identifier[copy] ()
keyword[if] identifier[env] :
identifier[_env] . identifier[update] ( identifier[env] )
keyword[if] identifier[six] . identifier[PY2] :
identifier[fs_encode] = identifier[partial] ( identifier[to_bytes] , identifier[encoding] = identifier[locale_encoding] )
identifier[_env] ={ identifier[fs_encode] ( identifier[k] ): identifier[fs_encode] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[_env] . identifier[items] ()}
keyword[else] :
identifier[_env] ={ identifier[k] : identifier[fs_str] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[_env] . identifier[items] ()}
keyword[if] keyword[not] identifier[spinner_name] :
identifier[spinner_name] = literal[string]
keyword[if] identifier[six] . identifier[PY2] :
keyword[if] identifier[isinstance] ( identifier[cmd] , identifier[six] . identifier[string_types] ):
identifier[cmd] = identifier[cmd] . identifier[encode] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[cmd] ,( identifier[list] , identifier[tuple] )):
identifier[cmd] =[ identifier[c] . identifier[encode] ( literal[string] ) keyword[for] identifier[c] keyword[in] identifier[cmd] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[cmd] , identifier[Script] ):
identifier[cmd] = identifier[Script] . identifier[parse] ( identifier[cmd] )
keyword[if] identifier[block] keyword[or] keyword[not] identifier[return_object] :
identifier[combine_stderr] = keyword[False]
identifier[start_text] = literal[string]
keyword[with] identifier[spinner] (
identifier[spinner_name] = identifier[spinner_name] ,
identifier[start_text] = identifier[start_text] ,
identifier[nospin] = identifier[nospin] ,
identifier[write_to_stdout] = identifier[write_to_stdout] ,
) keyword[as] identifier[sp] :
keyword[return] identifier[_create_subprocess] (
identifier[cmd] ,
identifier[env] = identifier[_env] ,
identifier[return_object] = identifier[return_object] ,
identifier[block] = identifier[block] ,
identifier[cwd] = identifier[cwd] ,
identifier[verbose] = identifier[verbose] ,
identifier[spinner] = identifier[sp] ,
identifier[combine_stderr] = identifier[combine_stderr] ,
identifier[start_text] = identifier[start_text] ,
identifier[write_to_stdout] = keyword[True] ,
) | def run(cmd, env=None, return_object=False, block=True, cwd=None, verbose=False, nospin=False, spinner_name=None, combine_stderr=True, display_limit=200, write_to_stdout=True):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env) # depends on [control=['if'], data=[]]
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for (k, v) in _env.items()} # depends on [control=['if'], data=[]]
else:
_env = {k: fs_str(v) for (k, v) in _env.items()}
if not spinner_name:
spinner_name = 'bouncingBar' # depends on [control=['if'], data=[]]
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode('utf-8') # depends on [control=['if'], data=[]]
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode('utf-8') for c in cmd] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd) # depends on [control=['if'], data=[]]
if block or not return_object:
combine_stderr = False # depends on [control=['if'], data=[]]
start_text = ''
with spinner(spinner_name=spinner_name, start_text=start_text, nospin=nospin, write_to_stdout=write_to_stdout) as sp:
return _create_subprocess(cmd, env=_env, return_object=return_object, block=block, cwd=cwd, verbose=verbose, spinner=sp, combine_stderr=combine_stderr, start_text=start_text, write_to_stdout=True) # depends on [control=['with'], data=['sp']] |
def applet_set_properties(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
"""
return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs) | def function[applet_set_properties, parameter[object_id, input_params, always_retry]]:
constant[
Invokes the /applet-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
]
return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/setProperties] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]] | keyword[def] identifier[applet_set_properties] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] ) | def applet_set_properties(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
"""
return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs) |
def _admx_policy_parent_walk(path,
policy_namespace,
parent_category,
policy_nsmap,
return_full_policy_names,
adml_language):
'''
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
'''
admx_policy_definitions = _get_policy_definitions(language=adml_language)
category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]'
using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using'
if parent_category.find(':') >= 0:
# the parent is in another namespace
policy_namespace = parent_category.split(':')[0]
parent_category = parent_category.split(':')[1]
using_xpath_string = using_xpath_string.format(policy_namespace)
policy_nsmap = dictupdate.update(policy_nsmap,
_buildElementNsmap(admx_policy_definitions.xpath(using_xpath_string,
namespaces=policy_nsmap)))
category_xpath_string = category_xpath_string.format(policy_namespace, parent_category)
if admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap):
tparent_category = admx_policy_definitions.xpath(category_xpath_string,
namespaces=policy_nsmap)[0]
this_parent_name = _getFullPolicyName(
policy_item=tparent_category,
policy_name=tparent_category.attrib['name'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
path.append(this_parent_name)
if tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap):
# parent has a parent
path = _admx_policy_parent_walk(
path=path,
policy_namespace=policy_namespace,
parent_category=tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap)[0],
policy_nsmap=policy_nsmap,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
return path | def function[_admx_policy_parent_walk, parameter[path, policy_namespace, parent_category, policy_nsmap, return_full_policy_names, adml_language]]:
constant[
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
]
variable[admx_policy_definitions] assign[=] call[name[_get_policy_definitions], parameter[]]
variable[category_xpath_string] assign[=] constant[/policyDefinitions/categories/{0}:category[@name="{1}"]]
variable[using_xpath_string] assign[=] constant[/policyDefinitions/policyNamespaces/{0}:using]
if compare[call[name[parent_category].find, parameter[constant[:]]] greater_or_equal[>=] constant[0]] begin[:]
variable[policy_namespace] assign[=] call[call[name[parent_category].split, parameter[constant[:]]]][constant[0]]
variable[parent_category] assign[=] call[call[name[parent_category].split, parameter[constant[:]]]][constant[1]]
variable[using_xpath_string] assign[=] call[name[using_xpath_string].format, parameter[name[policy_namespace]]]
variable[policy_nsmap] assign[=] call[name[dictupdate].update, parameter[name[policy_nsmap], call[name[_buildElementNsmap], parameter[call[name[admx_policy_definitions].xpath, parameter[name[using_xpath_string]]]]]]]
variable[category_xpath_string] assign[=] call[name[category_xpath_string].format, parameter[name[policy_namespace], name[parent_category]]]
if call[name[admx_policy_definitions].xpath, parameter[name[category_xpath_string]]] begin[:]
variable[tparent_category] assign[=] call[call[name[admx_policy_definitions].xpath, parameter[name[category_xpath_string]]]][constant[0]]
variable[this_parent_name] assign[=] call[name[_getFullPolicyName], parameter[]]
call[name[path].append, parameter[name[this_parent_name]]]
if call[name[tparent_category].xpath, parameter[call[constant[{0}:parentCategory/@ref].format, parameter[name[policy_namespace]]]]] begin[:]
variable[path] assign[=] call[name[_admx_policy_parent_walk], parameter[]]
return[name[path]] | keyword[def] identifier[_admx_policy_parent_walk] ( identifier[path] ,
identifier[policy_namespace] ,
identifier[parent_category] ,
identifier[policy_nsmap] ,
identifier[return_full_policy_names] ,
identifier[adml_language] ):
literal[string]
identifier[admx_policy_definitions] = identifier[_get_policy_definitions] ( identifier[language] = identifier[adml_language] )
identifier[category_xpath_string] = literal[string]
identifier[using_xpath_string] = literal[string]
keyword[if] identifier[parent_category] . identifier[find] ( literal[string] )>= literal[int] :
identifier[policy_namespace] = identifier[parent_category] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[parent_category] = identifier[parent_category] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[using_xpath_string] = identifier[using_xpath_string] . identifier[format] ( identifier[policy_namespace] )
identifier[policy_nsmap] = identifier[dictupdate] . identifier[update] ( identifier[policy_nsmap] ,
identifier[_buildElementNsmap] ( identifier[admx_policy_definitions] . identifier[xpath] ( identifier[using_xpath_string] ,
identifier[namespaces] = identifier[policy_nsmap] )))
identifier[category_xpath_string] = identifier[category_xpath_string] . identifier[format] ( identifier[policy_namespace] , identifier[parent_category] )
keyword[if] identifier[admx_policy_definitions] . identifier[xpath] ( identifier[category_xpath_string] , identifier[namespaces] = identifier[policy_nsmap] ):
identifier[tparent_category] = identifier[admx_policy_definitions] . identifier[xpath] ( identifier[category_xpath_string] ,
identifier[namespaces] = identifier[policy_nsmap] )[ literal[int] ]
identifier[this_parent_name] = identifier[_getFullPolicyName] (
identifier[policy_item] = identifier[tparent_category] ,
identifier[policy_name] = identifier[tparent_category] . identifier[attrib] [ literal[string] ],
identifier[return_full_policy_names] = identifier[return_full_policy_names] ,
identifier[adml_language] = identifier[adml_language] )
identifier[path] . identifier[append] ( identifier[this_parent_name] )
keyword[if] identifier[tparent_category] . identifier[xpath] ( literal[string] . identifier[format] ( identifier[policy_namespace] ), identifier[namespaces] = identifier[policy_nsmap] ):
identifier[path] = identifier[_admx_policy_parent_walk] (
identifier[path] = identifier[path] ,
identifier[policy_namespace] = identifier[policy_namespace] ,
identifier[parent_category] = identifier[tparent_category] . identifier[xpath] ( literal[string] . identifier[format] ( identifier[policy_namespace] ), identifier[namespaces] = identifier[policy_nsmap] )[ literal[int] ],
identifier[policy_nsmap] = identifier[policy_nsmap] ,
identifier[return_full_policy_names] = identifier[return_full_policy_names] ,
identifier[adml_language] = identifier[adml_language] )
keyword[return] identifier[path] | def _admx_policy_parent_walk(path, policy_namespace, parent_category, policy_nsmap, return_full_policy_names, adml_language):
"""
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
"""
admx_policy_definitions = _get_policy_definitions(language=adml_language)
category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]'
using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using'
if parent_category.find(':') >= 0:
# the parent is in another namespace
policy_namespace = parent_category.split(':')[0]
parent_category = parent_category.split(':')[1]
using_xpath_string = using_xpath_string.format(policy_namespace)
policy_nsmap = dictupdate.update(policy_nsmap, _buildElementNsmap(admx_policy_definitions.xpath(using_xpath_string, namespaces=policy_nsmap))) # depends on [control=['if'], data=[]]
category_xpath_string = category_xpath_string.format(policy_namespace, parent_category)
if admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap):
tparent_category = admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap)[0]
this_parent_name = _getFullPolicyName(policy_item=tparent_category, policy_name=tparent_category.attrib['name'], return_full_policy_names=return_full_policy_names, adml_language=adml_language)
path.append(this_parent_name)
if tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap):
# parent has a parent
path = _admx_policy_parent_walk(path=path, policy_namespace=policy_namespace, parent_category=tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap)[0], policy_nsmap=policy_nsmap, return_full_policy_names=return_full_policy_names, adml_language=adml_language) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return path |
def _rpc_action_stmt(self, stmt: Statement, sctx: SchemaContext) -> None:
"""Handle rpc or action statement."""
self._handle_child(RpcActionNode(), stmt, sctx) | def function[_rpc_action_stmt, parameter[self, stmt, sctx]]:
constant[Handle rpc or action statement.]
call[name[self]._handle_child, parameter[call[name[RpcActionNode], parameter[]], name[stmt], name[sctx]]] | keyword[def] identifier[_rpc_action_stmt] ( identifier[self] , identifier[stmt] : identifier[Statement] , identifier[sctx] : identifier[SchemaContext] )-> keyword[None] :
literal[string]
identifier[self] . identifier[_handle_child] ( identifier[RpcActionNode] (), identifier[stmt] , identifier[sctx] ) | def _rpc_action_stmt(self, stmt: Statement, sctx: SchemaContext) -> None:
"""Handle rpc or action statement."""
self._handle_child(RpcActionNode(), stmt, sctx) |
def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bes, reads = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.)) * 1000
# Attract reads
samfile, logfile = align([bes, reads, "--reorder", "--mapped",
"--firstN={0}".format(opts.firstN)])
samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped))
pf = mapped.split(".")[0]
pf = pf.split("-")[0]
bespf = bes.split(".")[0]
reads = pf + ".expand.fastq"
first([str(nreads), mapped, "-o", reads])
# Perform mini-assembly
fastafile = reads.rsplit(".", 1)[0] + ".fasta"
qualfile = ""
if need_update(reads, fastafile):
fastafile, qualfile = fasta([reads])
contigs = op.join(pf, "454LargeContigs.fna")
if need_update(fastafile, contigs):
cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile)
sh(cmd)
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for query, b in Blast(blastfile).iter_best_hit():
mapping[query] = b
f = Fasta(contigs, lazy=True)
annotatedfasta = ".".join((pf, bespf, "fasta"))
fw = open(annotatedfasta, "w")
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for key, v in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = "_".join((pf, vid, subject))
rec.description = ""
recs.append((keys.index(subject), rid, rec))
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, "fasta")
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug("Annotated seqs (n={0}) written to `{1}`.".\
format(len(recs), annotatedfasta))
return annotatedfasta | def function[expand, parameter[args]]:
constant[
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
]
import module[math]
from relative_module[jcvi.formats.fasta] import module[Fasta], module[SeqIO]
from relative_module[jcvi.formats.fastq] import module[readlen], module[first], module[fasta]
from relative_module[jcvi.formats.blast] import module[Blast]
from relative_module[jcvi.formats.base] import module[FileShredder]
from relative_module[jcvi.apps.bowtie] import module[align], module[get_samfile]
from relative_module[jcvi.apps.align] import module[blast]
variable[p] assign[=] call[name[OptionParser], parameter[name[expand].__doc__]]
call[name[p].set_depth, parameter[]]
call[name[p].set_firstN, parameter[]]
<ast.Tuple object at 0x7da1b074f760> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b074f430>]]
<ast.Tuple object at 0x7da1b074f310> assign[=] name[args]
variable[size] assign[=] call[name[Fasta], parameter[name[bes]]].totalsize
variable[rl] assign[=] call[name[readlen], parameter[list[[<ast.Name object at 0x7da1b074f040>]]]]
variable[expected_size] assign[=] binary_operation[name[size] + binary_operation[constant[2] * name[rl]]]
variable[nreads] assign[=] binary_operation[binary_operation[name[expected_size] * name[opts].depth] / name[rl]]
variable[nreads] assign[=] binary_operation[call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[name[nreads] / constant[1000.0]]]]]] * constant[1000]]
<ast.Tuple object at 0x7da1b074ead0> assign[=] call[name[align], parameter[list[[<ast.Name object at 0x7da1b074e980>, <ast.Name object at 0x7da1b074e950>, <ast.Constant object at 0x7da1b074e920>, <ast.Constant object at 0x7da1b074e8f0>, <ast.Call object at 0x7da1b074e8c0>]]]]
<ast.Tuple object at 0x7da1b074e7a0> assign[=] call[name[get_samfile], parameter[name[reads], name[bes]]]
call[name[logging].debug, parameter[call[constant[Extract first {0} reads from `{1}`.].format, parameter[name[nreads], name[mapped]]]]]
variable[pf] assign[=] call[call[name[mapped].split, parameter[constant[.]]]][constant[0]]
variable[pf] assign[=] call[call[name[pf].split, parameter[constant[-]]]][constant[0]]
variable[bespf] assign[=] call[call[name[bes].split, parameter[constant[.]]]][constant[0]]
variable[reads] assign[=] binary_operation[name[pf] + constant[.expand.fastq]]
call[name[first], parameter[list[[<ast.Call object at 0x7da1b074cc70>, <ast.Name object at 0x7da1b074cbe0>, <ast.Constant object at 0x7da1b074cbb0>, <ast.Name object at 0x7da1b074cb80>]]]]
variable[fastafile] assign[=] binary_operation[call[call[name[reads].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.fasta]]
variable[qualfile] assign[=] constant[]
if call[name[need_update], parameter[name[reads], name[fastafile]]] begin[:]
<ast.Tuple object at 0x7da1b074c760> assign[=] call[name[fasta], parameter[list[[<ast.Name object at 0x7da1b07aca30>]]]]
variable[contigs] assign[=] call[name[op].join, parameter[name[pf], constant[454LargeContigs.fna]]]
if call[name[need_update], parameter[name[fastafile], name[contigs]]] begin[:]
variable[cmd] assign[=] call[constant[runAssembly -o {0} -cpu 8 {1}].format, parameter[name[pf], name[fastafile]]]
call[name[sh], parameter[name[cmd]]]
assert[call[name[op].exists, parameter[name[contigs]]]]
variable[blastfile] assign[=] call[name[blast], parameter[list[[<ast.Name object at 0x7da1b07ae920>, <ast.Name object at 0x7da1b07ae9b0>]]]]
variable[mapping] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b07af490>, <ast.Name object at 0x7da1b07ae950>]]] in starred[call[call[name[Blast], parameter[name[blastfile]]].iter_best_hit, parameter[]]] begin[:]
call[name[mapping]][name[query]] assign[=] name[b]
variable[f] assign[=] call[name[Fasta], parameter[name[contigs]]]
variable[annotatedfasta] assign[=] call[constant[.].join, parameter[tuple[[<ast.Name object at 0x7da1b07ae6b0>, <ast.Name object at 0x7da1b07ae680>, <ast.Constant object at 0x7da1b07afc40>]]]]
variable[fw] assign[=] call[name[open], parameter[name[annotatedfasta], constant[w]]]
variable[keys] assign[=] call[name[list], parameter[call[call[name[Fasta], parameter[name[bes]]].iterkeys_ordered, parameter[]]]]
variable[recs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b07af7c0>, <ast.Name object at 0x7da1b07af820>]]] in starred[call[name[f].iteritems_ordered, parameter[]]] begin[:]
variable[vid] assign[=] name[v].id
if compare[name[vid] <ast.NotIn object at 0x7da2590d7190> name[mapping]] begin[:]
continue
variable[b] assign[=] call[name[mapping]][name[vid]]
variable[subject] assign[=] name[b].subject
variable[rec] assign[=] <ast.IfExp object at 0x7da1b07afa90>
name[rec].id assign[=] call[constant[_].join, parameter[tuple[[<ast.Name object at 0x7da1b07ac4f0>, <ast.Name object at 0x7da1b07ac4c0>, <ast.Name object at 0x7da1b07ae410>]]]]
name[rec].description assign[=] constant[]
call[name[recs].append, parameter[tuple[[<ast.Call object at 0x7da1b07ac250>, <ast.Name object at 0x7da1b07ac400>, <ast.Name object at 0x7da1b07ac190>]]]]
variable[recs] assign[=] <ast.ListComp object at 0x7da1b07ac130>
call[name[SeqIO].write, parameter[name[recs], name[fw], constant[fasta]]]
call[name[fw].close, parameter[]]
call[name[FileShredder], parameter[list[[<ast.Name object at 0x7da1b07ad1b0>, <ast.Name object at 0x7da1b07ad240>, <ast.Name object at 0x7da1b07ad2a0>, <ast.Name object at 0x7da1b07ad1e0>, <ast.Name object at 0x7da1b07ad120>, <ast.Name object at 0x7da1b07ad0c0>, <ast.Name object at 0x7da1b07ace80>, <ast.Name object at 0x7da1b07ad060>]]]]
call[name[logging].debug, parameter[call[constant[Annotated seqs (n={0}) written to `{1}`.].format, parameter[call[name[len], parameter[name[recs]]], name[annotatedfasta]]]]]
return[name[annotatedfasta]] | keyword[def] identifier[expand] ( identifier[args] ):
literal[string]
keyword[import] identifier[math]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[fasta] keyword[import] identifier[Fasta] , identifier[SeqIO]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[fastq] keyword[import] identifier[readlen] , identifier[first] , identifier[fasta]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[blast] keyword[import] identifier[Blast]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[base] keyword[import] identifier[FileShredder]
keyword[from] identifier[jcvi] . identifier[apps] . identifier[bowtie] keyword[import] identifier[align] , identifier[get_samfile]
keyword[from] identifier[jcvi] . identifier[apps] . identifier[align] keyword[import] identifier[blast]
identifier[p] = identifier[OptionParser] ( identifier[expand] . identifier[__doc__] )
identifier[p] . identifier[set_depth] ( identifier[depth] = literal[int] )
identifier[p] . identifier[set_firstN] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[bes] , identifier[reads] = identifier[args]
identifier[size] = identifier[Fasta] ( identifier[bes] ). identifier[totalsize]
identifier[rl] = identifier[readlen] ([ identifier[reads] ])
identifier[expected_size] = identifier[size] + literal[int] * identifier[rl]
identifier[nreads] = identifier[expected_size] * identifier[opts] . identifier[depth] / identifier[rl]
identifier[nreads] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[nreads] / literal[int] ))* literal[int]
identifier[samfile] , identifier[logfile] = identifier[align] ([ identifier[bes] , identifier[reads] , literal[string] , literal[string] ,
literal[string] . identifier[format] ( identifier[opts] . identifier[firstN] )])
identifier[samfile] , identifier[mapped] , identifier[_] = identifier[get_samfile] ( identifier[reads] , identifier[bes] , identifier[bowtie] = keyword[True] , identifier[mapped] = keyword[True] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nreads] , identifier[mapped] ))
identifier[pf] = identifier[mapped] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[pf] = identifier[pf] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[bespf] = identifier[bes] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[reads] = identifier[pf] + literal[string]
identifier[first] ([ identifier[str] ( identifier[nreads] ), identifier[mapped] , literal[string] , identifier[reads] ])
identifier[fastafile] = identifier[reads] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[qualfile] = literal[string]
keyword[if] identifier[need_update] ( identifier[reads] , identifier[fastafile] ):
identifier[fastafile] , identifier[qualfile] = identifier[fasta] ([ identifier[reads] ])
identifier[contigs] = identifier[op] . identifier[join] ( identifier[pf] , literal[string] )
keyword[if] identifier[need_update] ( identifier[fastafile] , identifier[contigs] ):
identifier[cmd] = literal[string] . identifier[format] ( identifier[pf] , identifier[fastafile] )
identifier[sh] ( identifier[cmd] )
keyword[assert] identifier[op] . identifier[exists] ( identifier[contigs] )
identifier[blastfile] = identifier[blast] ([ identifier[bes] , identifier[contigs] ])
identifier[mapping] ={}
keyword[for] identifier[query] , identifier[b] keyword[in] identifier[Blast] ( identifier[blastfile] ). identifier[iter_best_hit] ():
identifier[mapping] [ identifier[query] ]= identifier[b]
identifier[f] = identifier[Fasta] ( identifier[contigs] , identifier[lazy] = keyword[True] )
identifier[annotatedfasta] = literal[string] . identifier[join] (( identifier[pf] , identifier[bespf] , literal[string] ))
identifier[fw] = identifier[open] ( identifier[annotatedfasta] , literal[string] )
identifier[keys] = identifier[list] ( identifier[Fasta] ( identifier[bes] ). identifier[iterkeys_ordered] ())
identifier[recs] =[]
keyword[for] identifier[key] , identifier[v] keyword[in] identifier[f] . identifier[iteritems_ordered] ():
identifier[vid] = identifier[v] . identifier[id]
keyword[if] identifier[vid] keyword[not] keyword[in] identifier[mapping] :
keyword[continue]
identifier[b] = identifier[mapping] [ identifier[vid] ]
identifier[subject] = identifier[b] . identifier[subject]
identifier[rec] = identifier[v] . identifier[reverse_complement] () keyword[if] identifier[b] . identifier[orientation] == literal[string] keyword[else] identifier[v]
identifier[rec] . identifier[id] = identifier[rid] = literal[string] . identifier[join] (( identifier[pf] , identifier[vid] , identifier[subject] ))
identifier[rec] . identifier[description] = literal[string]
identifier[recs] . identifier[append] (( identifier[keys] . identifier[index] ( identifier[subject] ), identifier[rid] , identifier[rec] ))
identifier[recs] =[ identifier[x] [- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[sorted] ( identifier[recs] )]
identifier[SeqIO] . identifier[write] ( identifier[recs] , identifier[fw] , literal[string] )
identifier[fw] . identifier[close] ()
identifier[FileShredder] ([ identifier[samfile] , identifier[logfile] , identifier[mapped] , identifier[reads] , identifier[fastafile] , identifier[qualfile] , identifier[blastfile] , identifier[pf] ])
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[recs] ), identifier[annotatedfasta] ))
keyword[return] identifier[annotatedfasta] | def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(bes, reads) = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.0)) * 1000
# Attract reads
(samfile, logfile) = align([bes, reads, '--reorder', '--mapped', '--firstN={0}'.format(opts.firstN)])
(samfile, mapped, _) = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug('Extract first {0} reads from `{1}`.'.format(nreads, mapped))
pf = mapped.split('.')[0]
pf = pf.split('-')[0]
bespf = bes.split('.')[0]
reads = pf + '.expand.fastq'
first([str(nreads), mapped, '-o', reads])
# Perform mini-assembly
fastafile = reads.rsplit('.', 1)[0] + '.fasta'
qualfile = ''
if need_update(reads, fastafile):
(fastafile, qualfile) = fasta([reads]) # depends on [control=['if'], data=[]]
contigs = op.join(pf, '454LargeContigs.fna')
if need_update(fastafile, contigs):
cmd = 'runAssembly -o {0} -cpu 8 {1}'.format(pf, fastafile)
sh(cmd) # depends on [control=['if'], data=[]]
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for (query, b) in Blast(blastfile).iter_best_hit():
mapping[query] = b # depends on [control=['for'], data=[]]
f = Fasta(contigs, lazy=True)
annotatedfasta = '.'.join((pf, bespf, 'fasta'))
fw = open(annotatedfasta, 'w')
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for (key, v) in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue # depends on [control=['if'], data=[]]
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = '_'.join((pf, vid, subject))
rec.description = ''
recs.append((keys.index(subject), rid, rec)) # depends on [control=['for'], data=[]]
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, 'fasta')
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug('Annotated seqs (n={0}) written to `{1}`.'.format(len(recs), annotatedfasta))
return annotatedfasta |
def import_xml(self, xml_gzipped_file_path, taxids=None, silent=False):
"""Imports XML
:param str xml_gzipped_file_path: path to XML file
:param Optional[list[int]] taxids: NCBI taxonomy identifier
:param bool silent: no output if True
"""
version = self.session.query(models.Version).filter(models.Version.knowledgebase == 'Swiss-Prot').first()
version.import_start_date = datetime.now()
entry_xml = '<entries>'
number_of_entries = 0
interval = 1000
start = False
if sys.platform in ('linux', 'linux2', 'darwin'):
log.info('Load gzipped XML from {}'.format(xml_gzipped_file_path))
zcat_command = 'gzcat' if sys.platform == 'darwin' else 'zcat'
number_of_lines = int(getoutput("{} {} | wc -l".format(zcat_command, xml_gzipped_file_path)))
tqdm_desc = 'Import {} lines'.format(number_of_lines)
else:
print('bin was anderes')
number_of_lines = None
tqdm_desc = None
with gzip.open(xml_gzipped_file_path) as fd:
for line in tqdm(fd, desc=tqdm_desc, total=number_of_lines, mininterval=1, disable=silent):
end_of_file = line.startswith(b"</uniprot>")
if line.startswith(b"<entry "):
start = True
elif end_of_file:
start = False
if start:
entry_xml += line.decode("utf-8")
if line.startswith(b"</entry>") or end_of_file:
number_of_entries += 1
start = False
if number_of_entries == interval or end_of_file:
entry_xml += "</entries>"
self.insert_entries(entry_xml, taxids)
if end_of_file:
break
else:
entry_xml = "<entries>"
number_of_entries = 0
version.import_completed_date = datetime.now()
self.session.commit() | def function[import_xml, parameter[self, xml_gzipped_file_path, taxids, silent]]:
constant[Imports XML
:param str xml_gzipped_file_path: path to XML file
:param Optional[list[int]] taxids: NCBI taxonomy identifier
:param bool silent: no output if True
]
variable[version] assign[=] call[call[call[name[self].session.query, parameter[name[models].Version]].filter, parameter[compare[name[models].Version.knowledgebase equal[==] constant[Swiss-Prot]]]].first, parameter[]]
name[version].import_start_date assign[=] call[name[datetime].now, parameter[]]
variable[entry_xml] assign[=] constant[<entries>]
variable[number_of_entries] assign[=] constant[0]
variable[interval] assign[=] constant[1000]
variable[start] assign[=] constant[False]
if compare[name[sys].platform in tuple[[<ast.Constant object at 0x7da1b0c64df0>, <ast.Constant object at 0x7da1b0c64160>, <ast.Constant object at 0x7da1b0c642b0>]]] begin[:]
call[name[log].info, parameter[call[constant[Load gzipped XML from {}].format, parameter[name[xml_gzipped_file_path]]]]]
variable[zcat_command] assign[=] <ast.IfExp object at 0x7da1b0c655d0>
variable[number_of_lines] assign[=] call[name[int], parameter[call[name[getoutput], parameter[call[constant[{} {} | wc -l].format, parameter[name[zcat_command], name[xml_gzipped_file_path]]]]]]]
variable[tqdm_desc] assign[=] call[constant[Import {} lines].format, parameter[name[number_of_lines]]]
with call[name[gzip].open, parameter[name[xml_gzipped_file_path]]] begin[:]
for taget[name[line]] in starred[call[name[tqdm], parameter[name[fd]]]] begin[:]
variable[end_of_file] assign[=] call[name[line].startswith, parameter[constant[b'</uniprot>']]]
if call[name[line].startswith, parameter[constant[b'<entry ']]] begin[:]
variable[start] assign[=] constant[True]
if name[start] begin[:]
<ast.AugAssign object at 0x7da1b0c96260>
if <ast.BoolOp object at 0x7da1b0c96b00> begin[:]
<ast.AugAssign object at 0x7da1b0c953c0>
variable[start] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b0c97730> begin[:]
<ast.AugAssign object at 0x7da1b0c979a0>
call[name[self].insert_entries, parameter[name[entry_xml], name[taxids]]]
if name[end_of_file] begin[:]
break
name[version].import_completed_date assign[=] call[name[datetime].now, parameter[]]
call[name[self].session.commit, parameter[]] | keyword[def] identifier[import_xml] ( identifier[self] , identifier[xml_gzipped_file_path] , identifier[taxids] = keyword[None] , identifier[silent] = keyword[False] ):
literal[string]
identifier[version] = identifier[self] . identifier[session] . identifier[query] ( identifier[models] . identifier[Version] ). identifier[filter] ( identifier[models] . identifier[Version] . identifier[knowledgebase] == literal[string] ). identifier[first] ()
identifier[version] . identifier[import_start_date] = identifier[datetime] . identifier[now] ()
identifier[entry_xml] = literal[string]
identifier[number_of_entries] = literal[int]
identifier[interval] = literal[int]
identifier[start] = keyword[False]
keyword[if] identifier[sys] . identifier[platform] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[xml_gzipped_file_path] ))
identifier[zcat_command] = literal[string] keyword[if] identifier[sys] . identifier[platform] == literal[string] keyword[else] literal[string]
identifier[number_of_lines] = identifier[int] ( identifier[getoutput] ( literal[string] . identifier[format] ( identifier[zcat_command] , identifier[xml_gzipped_file_path] )))
identifier[tqdm_desc] = literal[string] . identifier[format] ( identifier[number_of_lines] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[number_of_lines] = keyword[None]
identifier[tqdm_desc] = keyword[None]
keyword[with] identifier[gzip] . identifier[open] ( identifier[xml_gzipped_file_path] ) keyword[as] identifier[fd] :
keyword[for] identifier[line] keyword[in] identifier[tqdm] ( identifier[fd] , identifier[desc] = identifier[tqdm_desc] , identifier[total] = identifier[number_of_lines] , identifier[mininterval] = literal[int] , identifier[disable] = identifier[silent] ):
identifier[end_of_file] = identifier[line] . identifier[startswith] ( literal[string] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[start] = keyword[True]
keyword[elif] identifier[end_of_file] :
identifier[start] = keyword[False]
keyword[if] identifier[start] :
identifier[entry_xml] += identifier[line] . identifier[decode] ( literal[string] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[end_of_file] :
identifier[number_of_entries] += literal[int]
identifier[start] = keyword[False]
keyword[if] identifier[number_of_entries] == identifier[interval] keyword[or] identifier[end_of_file] :
identifier[entry_xml] += literal[string]
identifier[self] . identifier[insert_entries] ( identifier[entry_xml] , identifier[taxids] )
keyword[if] identifier[end_of_file] :
keyword[break]
keyword[else] :
identifier[entry_xml] = literal[string]
identifier[number_of_entries] = literal[int]
identifier[version] . identifier[import_completed_date] = identifier[datetime] . identifier[now] ()
identifier[self] . identifier[session] . identifier[commit] () | def import_xml(self, xml_gzipped_file_path, taxids=None, silent=False):
"""Imports XML
:param str xml_gzipped_file_path: path to XML file
:param Optional[list[int]] taxids: NCBI taxonomy identifier
:param bool silent: no output if True
"""
version = self.session.query(models.Version).filter(models.Version.knowledgebase == 'Swiss-Prot').first()
version.import_start_date = datetime.now()
entry_xml = '<entries>'
number_of_entries = 0
interval = 1000
start = False
if sys.platform in ('linux', 'linux2', 'darwin'):
log.info('Load gzipped XML from {}'.format(xml_gzipped_file_path))
zcat_command = 'gzcat' if sys.platform == 'darwin' else 'zcat'
number_of_lines = int(getoutput('{} {} | wc -l'.format(zcat_command, xml_gzipped_file_path)))
tqdm_desc = 'Import {} lines'.format(number_of_lines) # depends on [control=['if'], data=[]]
else:
print('bin was anderes')
number_of_lines = None
tqdm_desc = None
with gzip.open(xml_gzipped_file_path) as fd:
for line in tqdm(fd, desc=tqdm_desc, total=number_of_lines, mininterval=1, disable=silent):
end_of_file = line.startswith(b'</uniprot>')
if line.startswith(b'<entry '):
start = True # depends on [control=['if'], data=[]]
elif end_of_file:
start = False # depends on [control=['if'], data=[]]
if start:
entry_xml += line.decode('utf-8') # depends on [control=['if'], data=[]]
if line.startswith(b'</entry>') or end_of_file:
number_of_entries += 1
start = False
if number_of_entries == interval or end_of_file:
entry_xml += '</entries>'
self.insert_entries(entry_xml, taxids)
if end_of_file:
break # depends on [control=['if'], data=[]]
else:
entry_xml = '<entries>'
number_of_entries = 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fd']]
version.import_completed_date = datetime.now()
self.session.commit() |
def packages(self):
"""
Property for accessing :class:`PackageManager` instance, which is used to manage packages.
:rtype: yagocd.resources.package.PackageManager
"""
if self._package_manager is None:
self._package_manager = PackageManager(session=self._session)
return self._package_manager | def function[packages, parameter[self]]:
constant[
Property for accessing :class:`PackageManager` instance, which is used to manage packages.
:rtype: yagocd.resources.package.PackageManager
]
if compare[name[self]._package_manager is constant[None]] begin[:]
name[self]._package_manager assign[=] call[name[PackageManager], parameter[]]
return[name[self]._package_manager] | keyword[def] identifier[packages] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_package_manager] keyword[is] keyword[None] :
identifier[self] . identifier[_package_manager] = identifier[PackageManager] ( identifier[session] = identifier[self] . identifier[_session] )
keyword[return] identifier[self] . identifier[_package_manager] | def packages(self):
"""
Property for accessing :class:`PackageManager` instance, which is used to manage packages.
:rtype: yagocd.resources.package.PackageManager
"""
if self._package_manager is None:
self._package_manager = PackageManager(session=self._session) # depends on [control=['if'], data=[]]
return self._package_manager |
def D_dt(self, H_0, Om0, Ode0=None):
"""
time delay distance
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
"""
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_dt | def function[D_dt, parameter[self, H_0, Om0, Ode0]]:
constant[
time delay distance
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
]
variable[lensCosmo] assign[=] call[name[self]._get_cosom, parameter[name[H_0], name[Om0], name[Ode0]]]
return[name[lensCosmo].D_dt] | keyword[def] identifier[D_dt] ( identifier[self] , identifier[H_0] , identifier[Om0] , identifier[Ode0] = keyword[None] ):
literal[string]
identifier[lensCosmo] = identifier[self] . identifier[_get_cosom] ( identifier[H_0] , identifier[Om0] , identifier[Ode0] )
keyword[return] identifier[lensCosmo] . identifier[D_dt] | def D_dt(self, H_0, Om0, Ode0=None):
"""
time delay distance
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
"""
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_dt |
def del_option_by_number(self, number):
"""
Delete an option from the message by number
:type number: Integer
:param number: option naumber
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.number == number:
self._options.remove(o) | def function[del_option_by_number, parameter[self, number]]:
constant[
Delete an option from the message by number
:type number: Integer
:param number: option naumber
]
for taget[name[o]] in starred[call[name[list], parameter[name[self]._options]]] begin[:]
assert[call[name[isinstance], parameter[name[o], name[Option]]]]
if compare[name[o].number equal[==] name[number]] begin[:]
call[name[self]._options.remove, parameter[name[o]]] | keyword[def] identifier[del_option_by_number] ( identifier[self] , identifier[number] ):
literal[string]
keyword[for] identifier[o] keyword[in] identifier[list] ( identifier[self] . identifier[_options] ):
keyword[assert] identifier[isinstance] ( identifier[o] , identifier[Option] )
keyword[if] identifier[o] . identifier[number] == identifier[number] :
identifier[self] . identifier[_options] . identifier[remove] ( identifier[o] ) | def del_option_by_number(self, number):
"""
Delete an option from the message by number
:type number: Integer
:param number: option naumber
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.number == number:
self._options.remove(o) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] |
def updatePools(self,
pool1,
username1,
password1,
pool2=None,
username2=None,
password2=None,
pool3=None,
username3=None,
password3=None):
"""Change the pools of the miner. This call will restart cgminer."""
return self.__post('/api/updatePools',
data={
'Pool1': pool1,
'UserName1': username1,
'Password1': password1,
'Pool2': pool2,
'UserName2': username2,
'Password2': password2,
'Pool3': pool3,
'UserName3': username3,
'Password3': password3,
}) | def function[updatePools, parameter[self, pool1, username1, password1, pool2, username2, password2, pool3, username3, password3]]:
constant[Change the pools of the miner. This call will restart cgminer.]
return[call[name[self].__post, parameter[constant[/api/updatePools]]]] | keyword[def] identifier[updatePools] ( identifier[self] ,
identifier[pool1] ,
identifier[username1] ,
identifier[password1] ,
identifier[pool2] = keyword[None] ,
identifier[username2] = keyword[None] ,
identifier[password2] = keyword[None] ,
identifier[pool3] = keyword[None] ,
identifier[username3] = keyword[None] ,
identifier[password3] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[__post] ( literal[string] ,
identifier[data] ={
literal[string] : identifier[pool1] ,
literal[string] : identifier[username1] ,
literal[string] : identifier[password1] ,
literal[string] : identifier[pool2] ,
literal[string] : identifier[username2] ,
literal[string] : identifier[password2] ,
literal[string] : identifier[pool3] ,
literal[string] : identifier[username3] ,
literal[string] : identifier[password3] ,
}) | def updatePools(self, pool1, username1, password1, pool2=None, username2=None, password2=None, pool3=None, username3=None, password3=None):
"""Change the pools of the miner. This call will restart cgminer."""
return self.__post('/api/updatePools', data={'Pool1': pool1, 'UserName1': username1, 'Password1': password1, 'Pool2': pool2, 'UserName2': username2, 'Password2': password2, 'Pool3': pool3, 'UserName3': username3, 'Password3': password3}) |
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages'] | def function[package_remove, parameter[name]]:
constant[
Remove a "package" on the REST server
]
variable[DETAILS] assign[=] call[name[_load_state], parameter[]]
call[call[name[DETAILS]][constant[packages]].pop, parameter[name[name]]]
call[name[_save_state], parameter[name[DETAILS]]]
return[call[name[DETAILS]][constant[packages]]] | keyword[def] identifier[package_remove] ( identifier[name] ):
literal[string]
identifier[DETAILS] = identifier[_load_state] ()
identifier[DETAILS] [ literal[string] ]. identifier[pop] ( identifier[name] )
identifier[_save_state] ( identifier[DETAILS] )
keyword[return] identifier[DETAILS] [ literal[string] ] | def package_remove(name):
"""
Remove a "package" on the REST server
"""
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages'] |
def generate_help(config_cls, **kwargs):
"""
Autogenerate a help string for a config class.
If a callable is provided via the "formatter" kwarg it
will be provided with the help dictionaries as an argument
and any other kwargs provided to this function. That callable
should return the help text string.
"""
try:
formatter = kwargs.pop("formatter")
except KeyError:
formatter = _format_help_dicts
help_dicts = _generate_help_dicts(config_cls)
return formatter(help_dicts, **kwargs) | def function[generate_help, parameter[config_cls]]:
constant[
Autogenerate a help string for a config class.
If a callable is provided via the "formatter" kwarg it
will be provided with the help dictionaries as an argument
and any other kwargs provided to this function. That callable
should return the help text string.
]
<ast.Try object at 0x7da18bc73100>
variable[help_dicts] assign[=] call[name[_generate_help_dicts], parameter[name[config_cls]]]
return[call[name[formatter], parameter[name[help_dicts]]]] | keyword[def] identifier[generate_help] ( identifier[config_cls] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[formatter] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[except] identifier[KeyError] :
identifier[formatter] = identifier[_format_help_dicts]
identifier[help_dicts] = identifier[_generate_help_dicts] ( identifier[config_cls] )
keyword[return] identifier[formatter] ( identifier[help_dicts] ,** identifier[kwargs] ) | def generate_help(config_cls, **kwargs):
"""
Autogenerate a help string for a config class.
If a callable is provided via the "formatter" kwarg it
will be provided with the help dictionaries as an argument
and any other kwargs provided to this function. That callable
should return the help text string.
"""
try:
formatter = kwargs.pop('formatter') # depends on [control=['try'], data=[]]
except KeyError:
formatter = _format_help_dicts # depends on [control=['except'], data=[]]
help_dicts = _generate_help_dicts(config_cls)
return formatter(help_dicts, **kwargs) |
def load_window_opener(self, item):
"""Load window opener from JSON."""
window = Window.from_config(self.pyvlx, item)
self.add(window) | def function[load_window_opener, parameter[self, item]]:
constant[Load window opener from JSON.]
variable[window] assign[=] call[name[Window].from_config, parameter[name[self].pyvlx, name[item]]]
call[name[self].add, parameter[name[window]]] | keyword[def] identifier[load_window_opener] ( identifier[self] , identifier[item] ):
literal[string]
identifier[window] = identifier[Window] . identifier[from_config] ( identifier[self] . identifier[pyvlx] , identifier[item] )
identifier[self] . identifier[add] ( identifier[window] ) | def load_window_opener(self, item):
"""Load window opener from JSON."""
window = Window.from_config(self.pyvlx, item)
self.add(window) |
def spec_compliant_decrypt(jwe, jwk, validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe
)
header = json_decode(protected_header)
if not _verify_header(header):
raise Error('Header is invalid')
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
mac_key, enc_key = _parse_encryption_keys(encryption_key, enc)
# verify authentication tag
expected_tag = _generate_authentication_tag(
mac_key, json_encode(header), ciphertext, iv, enc
)
if not const_compare(expected_tag, authentication_tag):
raise Error('Mismatched authentication tags')
# decrypt body
((_, decipher), _), _ = JWA[enc]
# http://tools.ietf.org/html/rfc7516#section-5.1 step 11
M = decipher(ciphertext, enc_key, iv)
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(M)
else:
plaintext = M
claims = json_decode(plaintext)
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) | def function[spec_compliant_decrypt, parameter[jwe, jwk, validate_claims, expiry_seconds]]:
constant[ Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
]
<ast.Tuple object at 0x7da1b040f4f0> assign[=] call[name[map], parameter[name[b64decode_url], name[jwe]]]
variable[header] assign[=] call[name[json_decode], parameter[name[protected_header]]]
if <ast.UnaryOp object at 0x7da1b040de40> begin[:]
<ast.Raise object at 0x7da1b040e440>
variable[alg] assign[=] call[name[header]][name[HEADER_ALG]]
variable[enc] assign[=] call[name[header]][name[HEADER_ENC]]
variable[encryption_key] assign[=] call[name[_decrypt_key], parameter[name[encrypted_key], name[jwk], name[alg]]]
<ast.Tuple object at 0x7da1b0579690> assign[=] call[name[_parse_encryption_keys], parameter[name[encryption_key], name[enc]]]
variable[expected_tag] assign[=] call[name[_generate_authentication_tag], parameter[name[mac_key], call[name[json_encode], parameter[name[header]]], name[ciphertext], name[iv], name[enc]]]
if <ast.UnaryOp object at 0x7da18eb57eb0> begin[:]
<ast.Raise object at 0x7da18eb57280>
<ast.Tuple object at 0x7da18eb57070> assign[=] call[name[JWA]][name[enc]]
variable[M] assign[=] call[name[decipher], parameter[name[ciphertext], name[enc_key], name[iv]]]
if compare[name[HEADER_ZIP] in name[header]] begin[:]
<ast.Try object at 0x7da18eb54a00>
variable[plaintext] assign[=] call[name[decompress], parameter[name[M]]]
variable[claims] assign[=] call[name[json_decode], parameter[name[plaintext]]]
call[name[_validate], parameter[name[claims], name[validate_claims], name[expiry_seconds]]]
return[call[name[JWT], parameter[name[header], name[claims]]]] | keyword[def] identifier[spec_compliant_decrypt] ( identifier[jwe] , identifier[jwk] , identifier[validate_claims] = keyword[True] ,
identifier[expiry_seconds] = keyword[None] ):
literal[string]
identifier[protected_header] , identifier[encrypted_key] , identifier[iv] , identifier[ciphertext] , identifier[authentication_tag] = identifier[map] (
identifier[b64decode_url] , identifier[jwe]
)
identifier[header] = identifier[json_decode] ( identifier[protected_header] )
keyword[if] keyword[not] identifier[_verify_header] ( identifier[header] ):
keyword[raise] identifier[Error] ( literal[string] )
identifier[alg] = identifier[header] [ identifier[HEADER_ALG] ]
identifier[enc] = identifier[header] [ identifier[HEADER_ENC] ]
identifier[encryption_key] = identifier[_decrypt_key] ( identifier[encrypted_key] , identifier[jwk] , identifier[alg] )
identifier[mac_key] , identifier[enc_key] = identifier[_parse_encryption_keys] ( identifier[encryption_key] , identifier[enc] )
identifier[expected_tag] = identifier[_generate_authentication_tag] (
identifier[mac_key] , identifier[json_encode] ( identifier[header] ), identifier[ciphertext] , identifier[iv] , identifier[enc]
)
keyword[if] keyword[not] identifier[const_compare] ( identifier[expected_tag] , identifier[authentication_tag] ):
keyword[raise] identifier[Error] ( literal[string] )
(( identifier[_] , identifier[decipher] ), identifier[_] ), identifier[_] = identifier[JWA] [ identifier[enc] ]
identifier[M] = identifier[decipher] ( identifier[ciphertext] , identifier[enc_key] , identifier[iv] )
keyword[if] identifier[HEADER_ZIP] keyword[in] identifier[header] :
keyword[try] :
( identifier[_] , identifier[decompress] )= identifier[COMPRESSION] [ identifier[header] [ identifier[HEADER_ZIP] ]]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[Error] ( literal[string] . identifier[format] (
identifier[header] [ identifier[HEADER_ZIP] ]))
identifier[plaintext] = identifier[decompress] ( identifier[M] )
keyword[else] :
identifier[plaintext] = identifier[M]
identifier[claims] = identifier[json_decode] ( identifier[plaintext] )
identifier[_validate] ( identifier[claims] , identifier[validate_claims] , identifier[expiry_seconds] )
keyword[return] identifier[JWT] ( identifier[header] , identifier[claims] ) | def spec_compliant_decrypt(jwe, jwk, validate_claims=True, expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
(protected_header, encrypted_key, iv, ciphertext, authentication_tag) = map(b64decode_url, jwe)
header = json_decode(protected_header)
if not _verify_header(header):
raise Error('Header is invalid') # depends on [control=['if'], data=[]]
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
(mac_key, enc_key) = _parse_encryption_keys(encryption_key, enc)
# verify authentication tag
expected_tag = _generate_authentication_tag(mac_key, json_encode(header), ciphertext, iv, enc)
if not const_compare(expected_tag, authentication_tag):
raise Error('Mismatched authentication tags') # depends on [control=['if'], data=[]]
# decrypt body
(((_, decipher), _), _) = JWA[enc]
# http://tools.ietf.org/html/rfc7516#section-5.1 step 11
M = decipher(ciphertext, enc_key, iv)
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]] # depends on [control=['try'], data=[]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(header[HEADER_ZIP])) # depends on [control=['except'], data=[]]
plaintext = decompress(M) # depends on [control=['if'], data=['HEADER_ZIP', 'header']]
else:
plaintext = M
claims = json_decode(plaintext)
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) |
def get_included_resources(request, serializer=None):
""" Build a list of included resources. """
include_resources_param = request.query_params.get('include') if request else None
if include_resources_param:
return include_resources_param.split(',')
else:
return get_default_included_resources_from_serializer(serializer) | def function[get_included_resources, parameter[request, serializer]]:
constant[ Build a list of included resources. ]
variable[include_resources_param] assign[=] <ast.IfExp object at 0x7da1b180c760>
if name[include_resources_param] begin[:]
return[call[name[include_resources_param].split, parameter[constant[,]]]] | keyword[def] identifier[get_included_resources] ( identifier[request] , identifier[serializer] = keyword[None] ):
literal[string]
identifier[include_resources_param] = identifier[request] . identifier[query_params] . identifier[get] ( literal[string] ) keyword[if] identifier[request] keyword[else] keyword[None]
keyword[if] identifier[include_resources_param] :
keyword[return] identifier[include_resources_param] . identifier[split] ( literal[string] )
keyword[else] :
keyword[return] identifier[get_default_included_resources_from_serializer] ( identifier[serializer] ) | def get_included_resources(request, serializer=None):
""" Build a list of included resources. """
include_resources_param = request.query_params.get('include') if request else None
if include_resources_param:
return include_resources_param.split(',') # depends on [control=['if'], data=[]]
else:
return get_default_included_resources_from_serializer(serializer) |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Delete Storage Group."""
assert wait_for_completion is True # async not supported yet
# The URI is a POST operation, so we need to construct the SG URI
storage_group_oid = uri_parms[0]
storage_group_uri = '/api/storage-groups/' + storage_group_oid
try:
storage_group = hmc.lookup_by_uri(storage_group_uri)
except KeyError:
raise InvalidResourceError(method, uri)
# TODO: Check that the SG is detached from any partitions
# Reflect the result of deleting the storage_group
storage_group.manager.remove(storage_group.oid) | def function[post, parameter[method, hmc, uri, uri_parms, body, logon_required, wait_for_completion]]:
constant[Operation: Delete Storage Group.]
assert[compare[name[wait_for_completion] is constant[True]]]
variable[storage_group_oid] assign[=] call[name[uri_parms]][constant[0]]
variable[storage_group_uri] assign[=] binary_operation[constant[/api/storage-groups/] + name[storage_group_oid]]
<ast.Try object at 0x7da1b049b6a0>
call[name[storage_group].manager.remove, parameter[name[storage_group].oid]] | keyword[def] identifier[post] ( identifier[method] , identifier[hmc] , identifier[uri] , identifier[uri_parms] , identifier[body] , identifier[logon_required] ,
identifier[wait_for_completion] ):
literal[string]
keyword[assert] identifier[wait_for_completion] keyword[is] keyword[True]
identifier[storage_group_oid] = identifier[uri_parms] [ literal[int] ]
identifier[storage_group_uri] = literal[string] + identifier[storage_group_oid]
keyword[try] :
identifier[storage_group] = identifier[hmc] . identifier[lookup_by_uri] ( identifier[storage_group_uri] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidResourceError] ( identifier[method] , identifier[uri] )
identifier[storage_group] . identifier[manager] . identifier[remove] ( identifier[storage_group] . identifier[oid] ) | def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion):
"""Operation: Delete Storage Group."""
assert wait_for_completion is True # async not supported yet
# The URI is a POST operation, so we need to construct the SG URI
storage_group_oid = uri_parms[0]
storage_group_uri = '/api/storage-groups/' + storage_group_oid
try:
storage_group = hmc.lookup_by_uri(storage_group_uri) # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidResourceError(method, uri) # depends on [control=['except'], data=[]]
# TODO: Check that the SG is detached from any partitions
# Reflect the result of deleting the storage_group
storage_group.manager.remove(storage_group.oid) |
def _construct_url(self, base_api, params):
"""
Construct geocoding request url. Overridden.
:param str base_api: Geocoding function base address - self.api
or self.reverse_api.
:param dict params: Geocoding params.
:return: string URL.
"""
params['key'] = self.api_key
return super(OpenMapQuest, self)._construct_url(base_api, params) | def function[_construct_url, parameter[self, base_api, params]]:
constant[
Construct geocoding request url. Overridden.
:param str base_api: Geocoding function base address - self.api
or self.reverse_api.
:param dict params: Geocoding params.
:return: string URL.
]
call[name[params]][constant[key]] assign[=] name[self].api_key
return[call[call[name[super], parameter[name[OpenMapQuest], name[self]]]._construct_url, parameter[name[base_api], name[params]]]] | keyword[def] identifier[_construct_url] ( identifier[self] , identifier[base_api] , identifier[params] ):
literal[string]
identifier[params] [ literal[string] ]= identifier[self] . identifier[api_key]
keyword[return] identifier[super] ( identifier[OpenMapQuest] , identifier[self] ). identifier[_construct_url] ( identifier[base_api] , identifier[params] ) | def _construct_url(self, base_api, params):
"""
Construct geocoding request url. Overridden.
:param str base_api: Geocoding function base address - self.api
or self.reverse_api.
:param dict params: Geocoding params.
:return: string URL.
"""
params['key'] = self.api_key
return super(OpenMapQuest, self)._construct_url(base_api, params) |
def _get_ids_from_ip(self, ip): # pylint: disable=inconsistent-return-statements
"""Returns list of matching hardware IDs for a given ip address."""
try:
# Does it look like an ip address?
socket.inet_aton(ip)
except socket.error:
return []
# Find the server via ip address. First try public ip, then private
results = self.list_hardware(public_ip=ip, mask="id")
if results:
return [result['id'] for result in results]
results = self.list_hardware(private_ip=ip, mask="id")
if results:
return [result['id'] for result in results] | def function[_get_ids_from_ip, parameter[self, ip]]:
constant[Returns list of matching hardware IDs for a given ip address.]
<ast.Try object at 0x7da207f9a2c0>
variable[results] assign[=] call[name[self].list_hardware, parameter[]]
if name[results] begin[:]
return[<ast.ListComp object at 0x7da207f98940>]
variable[results] assign[=] call[name[self].list_hardware, parameter[]]
if name[results] begin[:]
return[<ast.ListComp object at 0x7da207f98280>] | keyword[def] identifier[_get_ids_from_ip] ( identifier[self] , identifier[ip] ):
literal[string]
keyword[try] :
identifier[socket] . identifier[inet_aton] ( identifier[ip] )
keyword[except] identifier[socket] . identifier[error] :
keyword[return] []
identifier[results] = identifier[self] . identifier[list_hardware] ( identifier[public_ip] = identifier[ip] , identifier[mask] = literal[string] )
keyword[if] identifier[results] :
keyword[return] [ identifier[result] [ literal[string] ] keyword[for] identifier[result] keyword[in] identifier[results] ]
identifier[results] = identifier[self] . identifier[list_hardware] ( identifier[private_ip] = identifier[ip] , identifier[mask] = literal[string] )
keyword[if] identifier[results] :
keyword[return] [ identifier[result] [ literal[string] ] keyword[for] identifier[result] keyword[in] identifier[results] ] | def _get_ids_from_ip(self, ip): # pylint: disable=inconsistent-return-statements
'Returns list of matching hardware IDs for a given ip address.'
try:
# Does it look like an ip address?
socket.inet_aton(ip) # depends on [control=['try'], data=[]]
except socket.error:
return [] # depends on [control=['except'], data=[]]
# Find the server via ip address. First try public ip, then private
results = self.list_hardware(public_ip=ip, mask='id')
if results:
return [result['id'] for result in results] # depends on [control=['if'], data=[]]
results = self.list_hardware(private_ip=ip, mask='id')
if results:
return [result['id'] for result in results] # depends on [control=['if'], data=[]] |
def get_standings(self, league):
"""Queries the API and gets the standings for a particular league"""
league_id = self.league_ids[league]
try:
req = self._get('competitions/{id}/standings'.format(
id=league_id))
self.writer.standings(req.json(), league)
except APIErrorException:
# Click handles incorrect League codes so this will only come up
# if that league does not have standings available. ie. Champions League
click.secho("No standings availble for {league}.".format(league=league),
fg="red", bold=True) | def function[get_standings, parameter[self, league]]:
constant[Queries the API and gets the standings for a particular league]
variable[league_id] assign[=] call[name[self].league_ids][name[league]]
<ast.Try object at 0x7da2054a6b90> | keyword[def] identifier[get_standings] ( identifier[self] , identifier[league] ):
literal[string]
identifier[league_id] = identifier[self] . identifier[league_ids] [ identifier[league] ]
keyword[try] :
identifier[req] = identifier[self] . identifier[_get] ( literal[string] . identifier[format] (
identifier[id] = identifier[league_id] ))
identifier[self] . identifier[writer] . identifier[standings] ( identifier[req] . identifier[json] (), identifier[league] )
keyword[except] identifier[APIErrorException] :
identifier[click] . identifier[secho] ( literal[string] . identifier[format] ( identifier[league] = identifier[league] ),
identifier[fg] = literal[string] , identifier[bold] = keyword[True] ) | def get_standings(self, league):
"""Queries the API and gets the standings for a particular league"""
league_id = self.league_ids[league]
try:
req = self._get('competitions/{id}/standings'.format(id=league_id))
self.writer.standings(req.json(), league) # depends on [control=['try'], data=[]]
except APIErrorException:
# Click handles incorrect League codes so this will only come up
# if that league does not have standings available. ie. Champions League
click.secho('No standings availble for {league}.'.format(league=league), fg='red', bold=True) # depends on [control=['except'], data=[]] |
def ci(ctx, enable, disable): # pylint:disable=assign-to-new-keyword
"""Enable/Disable CI on this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon project ci --enable
```
\b
```bash
$ polyaxon project ci --disable
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
def enable_ci():
try:
PolyaxonClient().project.enable_ci(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not enable CI on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success(
'Polyaxon CI was successfully enabled on project: `{}`.'.format(project_name))
def disable_ci():
try:
PolyaxonClient().project.disable_ci(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not disable CI on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success(
'Polyaxon CI was successfully disabled on project: `{}`.'.format(project_name))
if enable:
enable_ci()
if disable:
disable_ci() | def function[ci, parameter[ctx, enable, disable]]:
constant[Enable/Disable CI on this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
```bash
$ polyaxon project ci --enable
```
```bash
$ polyaxon project ci --disable
```
]
<ast.Tuple object at 0x7da1afe39420> assign[=] call[name[get_project_or_local], parameter[call[name[ctx].obj.get, parameter[constant[project]]]]]
def function[enable_ci, parameter[]]:
<ast.Try object at 0x7da1afe3a260>
call[name[Printer].print_success, parameter[call[constant[Polyaxon CI was successfully enabled on project: `{}`.].format, parameter[name[project_name]]]]]
def function[disable_ci, parameter[]]:
<ast.Try object at 0x7da1afe39300>
call[name[Printer].print_success, parameter[call[constant[Polyaxon CI was successfully disabled on project: `{}`.].format, parameter[name[project_name]]]]]
if name[enable] begin[:]
call[name[enable_ci], parameter[]]
if name[disable] begin[:]
call[name[disable_ci], parameter[]] | keyword[def] identifier[ci] ( identifier[ctx] , identifier[enable] , identifier[disable] ):
literal[string]
identifier[user] , identifier[project_name] = identifier[get_project_or_local] ( identifier[ctx] . identifier[obj] . identifier[get] ( literal[string] ))
keyword[def] identifier[enable_ci] ():
keyword[try] :
identifier[PolyaxonClient] (). identifier[project] . identifier[enable_ci] ( identifier[user] , identifier[project_name] )
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[project_name] ))
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[Printer] . identifier[print_success] (
literal[string] . identifier[format] ( identifier[project_name] ))
keyword[def] identifier[disable_ci] ():
keyword[try] :
identifier[PolyaxonClient] (). identifier[project] . identifier[disable_ci] ( identifier[user] , identifier[project_name] )
keyword[except] ( identifier[PolyaxonHTTPError] , identifier[PolyaxonShouldExitError] , identifier[PolyaxonClientException] ) keyword[as] identifier[e] :
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[project_name] ))
identifier[Printer] . identifier[print_error] ( literal[string] . identifier[format] ( identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[Printer] . identifier[print_success] (
literal[string] . identifier[format] ( identifier[project_name] ))
keyword[if] identifier[enable] :
identifier[enable_ci] ()
keyword[if] identifier[disable] :
identifier[disable_ci] () | def ci(ctx, enable, disable): # pylint:disable=assign-to-new-keyword
'Enable/Disable CI on this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \x08\n ```bash\n $ polyaxon project ci --enable\n ```\n\n \x08\n ```bash\n $ polyaxon project ci --disable\n ```\n '
(user, project_name) = get_project_or_local(ctx.obj.get('project'))
def enable_ci():
try:
PolyaxonClient().project.enable_ci(user, project_name) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not enable CI on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
Printer.print_success('Polyaxon CI was successfully enabled on project: `{}`.'.format(project_name))
def disable_ci():
try:
PolyaxonClient().project.disable_ci(user, project_name) # depends on [control=['try'], data=[]]
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not disable CI on project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) # depends on [control=['except'], data=['e']]
Printer.print_success('Polyaxon CI was successfully disabled on project: `{}`.'.format(project_name))
if enable:
enable_ci() # depends on [control=['if'], data=[]]
if disable:
disable_ci() # depends on [control=['if'], data=[]] |
async def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return await self.execute_command('EXPIRE', name, time) | <ast.AsyncFunctionDef object at 0x7da1b07cc5e0> | keyword[async] keyword[def] identifier[expire] ( identifier[self] , identifier[name] , identifier[time] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[time] , identifier[datetime] . identifier[timedelta] ):
identifier[time] = identifier[time] . identifier[seconds] + identifier[time] . identifier[days] * literal[int] * literal[int]
keyword[return] keyword[await] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[time] ) | async def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600 # depends on [control=['if'], data=[]]
return await self.execute_command('EXPIRE', name, time) |
def generate_py(module_name, code, optimizations=None, module_dir=None):
'''python + pythran spec -> py code
Prints and returns the optimized python code.
'''
pm, ir, _, _ = front_middle_end(module_name, code, optimizations,
module_dir)
return pm.dump(Python, ir) | def function[generate_py, parameter[module_name, code, optimizations, module_dir]]:
constant[python + pythran spec -> py code
Prints and returns the optimized python code.
]
<ast.Tuple object at 0x7da1b15ba290> assign[=] call[name[front_middle_end], parameter[name[module_name], name[code], name[optimizations], name[module_dir]]]
return[call[name[pm].dump, parameter[name[Python], name[ir]]]] | keyword[def] identifier[generate_py] ( identifier[module_name] , identifier[code] , identifier[optimizations] = keyword[None] , identifier[module_dir] = keyword[None] ):
literal[string]
identifier[pm] , identifier[ir] , identifier[_] , identifier[_] = identifier[front_middle_end] ( identifier[module_name] , identifier[code] , identifier[optimizations] ,
identifier[module_dir] )
keyword[return] identifier[pm] . identifier[dump] ( identifier[Python] , identifier[ir] ) | def generate_py(module_name, code, optimizations=None, module_dir=None):
"""python + pythran spec -> py code
Prints and returns the optimized python code.
"""
(pm, ir, _, _) = front_middle_end(module_name, code, optimizations, module_dir)
return pm.dump(Python, ir) |
def profile(model_specification, results_directory, process):
"""Run a simulation based on the provided MODEL_SPECIFICATION and profile
the run.
"""
model_specification = Path(model_specification)
results_directory = Path(results_directory)
out_stats_file = results_directory / f'{model_specification.name}'.replace('yaml', 'stats')
command = f'run_simulation("{model_specification}", "{results_directory}")'
cProfile.runctx(command, globals=globals(), locals=locals(), filename=out_stats_file)
if process:
out_txt_file = results_directory / (out_stats_file.name + '.txt')
with open(out_txt_file, 'w') as f:
p = pstats.Stats(str(out_stats_file), stream=f)
p.sort_stats('cumulative')
p.print_stats() | def function[profile, parameter[model_specification, results_directory, process]]:
constant[Run a simulation based on the provided MODEL_SPECIFICATION and profile
the run.
]
variable[model_specification] assign[=] call[name[Path], parameter[name[model_specification]]]
variable[results_directory] assign[=] call[name[Path], parameter[name[results_directory]]]
variable[out_stats_file] assign[=] binary_operation[name[results_directory] / call[<ast.JoinedStr object at 0x7da18bc71600>.replace, parameter[constant[yaml], constant[stats]]]]
variable[command] assign[=] <ast.JoinedStr object at 0x7da18bc70550>
call[name[cProfile].runctx, parameter[name[command]]]
if name[process] begin[:]
variable[out_txt_file] assign[=] binary_operation[name[results_directory] / binary_operation[name[out_stats_file].name + constant[.txt]]]
with call[name[open], parameter[name[out_txt_file], constant[w]]] begin[:]
variable[p] assign[=] call[name[pstats].Stats, parameter[call[name[str], parameter[name[out_stats_file]]]]]
call[name[p].sort_stats, parameter[constant[cumulative]]]
call[name[p].print_stats, parameter[]] | keyword[def] identifier[profile] ( identifier[model_specification] , identifier[results_directory] , identifier[process] ):
literal[string]
identifier[model_specification] = identifier[Path] ( identifier[model_specification] )
identifier[results_directory] = identifier[Path] ( identifier[results_directory] )
identifier[out_stats_file] = identifier[results_directory] / literal[string] . identifier[replace] ( literal[string] , literal[string] )
identifier[command] = literal[string]
identifier[cProfile] . identifier[runctx] ( identifier[command] , identifier[globals] = identifier[globals] (), identifier[locals] = identifier[locals] (), identifier[filename] = identifier[out_stats_file] )
keyword[if] identifier[process] :
identifier[out_txt_file] = identifier[results_directory] /( identifier[out_stats_file] . identifier[name] + literal[string] )
keyword[with] identifier[open] ( identifier[out_txt_file] , literal[string] ) keyword[as] identifier[f] :
identifier[p] = identifier[pstats] . identifier[Stats] ( identifier[str] ( identifier[out_stats_file] ), identifier[stream] = identifier[f] )
identifier[p] . identifier[sort_stats] ( literal[string] )
identifier[p] . identifier[print_stats] () | def profile(model_specification, results_directory, process):
"""Run a simulation based on the provided MODEL_SPECIFICATION and profile
the run.
"""
model_specification = Path(model_specification)
results_directory = Path(results_directory)
out_stats_file = results_directory / f'{model_specification.name}'.replace('yaml', 'stats')
command = f'run_simulation("{model_specification}", "{results_directory}")'
cProfile.runctx(command, globals=globals(), locals=locals(), filename=out_stats_file)
if process:
out_txt_file = results_directory / (out_stats_file.name + '.txt')
with open(out_txt_file, 'w') as f:
p = pstats.Stats(str(out_stats_file), stream=f)
p.sort_stats('cumulative')
p.print_stats() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] |
def get_path_relative_to_module(module_file_path, relative_target_path):
"""
Calculate a path relative to the specified module file.
:param module_file_path: The file path to the module.
"""
module_path = os.path.dirname(module_file_path)
path = os.path.join(module_path, relative_target_path)
path = os.path.abspath(path)
return path | def function[get_path_relative_to_module, parameter[module_file_path, relative_target_path]]:
constant[
Calculate a path relative to the specified module file.
:param module_file_path: The file path to the module.
]
variable[module_path] assign[=] call[name[os].path.dirname, parameter[name[module_file_path]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[module_path], name[relative_target_path]]]
variable[path] assign[=] call[name[os].path.abspath, parameter[name[path]]]
return[name[path]] | keyword[def] identifier[get_path_relative_to_module] ( identifier[module_file_path] , identifier[relative_target_path] ):
literal[string]
identifier[module_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[module_file_path] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[module_path] , identifier[relative_target_path] )
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] )
keyword[return] identifier[path] | def get_path_relative_to_module(module_file_path, relative_target_path):
"""
Calculate a path relative to the specified module file.
:param module_file_path: The file path to the module.
"""
module_path = os.path.dirname(module_file_path)
path = os.path.join(module_path, relative_target_path)
path = os.path.abspath(path)
return path |
def get_smooth_step_function(min_val, max_val, switch_point, smooth_factor):
"""Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
"""
dif = max_val - min_val
def _smooth_step(x):
return min_val + dif * tanh((x - switch_point) / smooth_factor)
return _smooth_step | def function[get_smooth_step_function, parameter[min_val, max_val, switch_point, smooth_factor]]:
constant[Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
]
variable[dif] assign[=] binary_operation[name[max_val] - name[min_val]]
def function[_smooth_step, parameter[x]]:
return[binary_operation[name[min_val] + binary_operation[name[dif] * call[name[tanh], parameter[binary_operation[binary_operation[name[x] - name[switch_point]] / name[smooth_factor]]]]]]]
return[name[_smooth_step]] | keyword[def] identifier[get_smooth_step_function] ( identifier[min_val] , identifier[max_val] , identifier[switch_point] , identifier[smooth_factor] ):
literal[string]
identifier[dif] = identifier[max_val] - identifier[min_val]
keyword[def] identifier[_smooth_step] ( identifier[x] ):
keyword[return] identifier[min_val] + identifier[dif] * identifier[tanh] (( identifier[x] - identifier[switch_point] )/ identifier[smooth_factor] )
keyword[return] identifier[_smooth_step] | def get_smooth_step_function(min_val, max_val, switch_point, smooth_factor):
"""Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
"""
dif = max_val - min_val
def _smooth_step(x):
return min_val + dif * tanh((x - switch_point) / smooth_factor)
return _smooth_step |
def read_metadata(self, f, objects, previous_segment=None):
"""Read segment metadata section and update object information"""
if not self.toc["kTocMetaData"]:
try:
self.ordered_objects = previous_segment.ordered_objects
except AttributeError:
raise ValueError(
"kTocMetaData is not set for segment but "
"there is no previous segment")
self.calculate_chunks()
return
if not self.toc["kTocNewObjList"]:
# In this case, there can be a list of new objects that
# are appended, or previous objects can also be repeated
# if their properties change
self.ordered_objects = [
copy(o) for o in previous_segment.ordered_objects]
log.debug("Reading metadata at %d", f.tell())
# First four bytes have number of objects in metadata
num_objects = types.Int32.read(f, self.endianness)
for obj in range(num_objects):
# Read the object path
object_path = types.String.read(f, self.endianness)
# If this is a new segment for an existing object,
# reuse the existing object, otherwise,
# create a new object and add it to the object dictionary
if object_path in objects:
obj = objects[object_path]
else:
obj = TdmsObject(object_path, self.tdms_file)
objects[object_path] = obj
# Add this segment object to the list of segment objects,
# re-using any properties from previous segments.
updating_existing = False
if not self.toc["kTocNewObjList"]:
# Search for the same object from the previous segment
# object list.
obj_index = [
i for i, o in enumerate(self.ordered_objects)
if o.tdms_object is obj]
if len(obj_index) > 0:
updating_existing = True
log.debug("Updating object in segment list")
obj_index = obj_index[0]
segment_obj = self.ordered_objects[obj_index]
if not updating_existing:
if obj._previous_segment_object is not None:
log.debug("Copying previous segment object")
segment_obj = copy(obj._previous_segment_object)
else:
log.debug("Creating a new segment object")
segment_obj = _TdmsSegmentObject(obj, self.endianness)
self.ordered_objects.append(segment_obj)
# Read the metadata for this object, updating any
# data structure information and properties.
segment_obj._read_metadata(f)
obj._previous_segment_object = segment_obj
self.calculate_chunks() | def function[read_metadata, parameter[self, f, objects, previous_segment]]:
constant[Read segment metadata section and update object information]
if <ast.UnaryOp object at 0x7da204962380> begin[:]
<ast.Try object at 0x7da204962ef0>
call[name[self].calculate_chunks, parameter[]]
return[None]
if <ast.UnaryOp object at 0x7da204960940> begin[:]
name[self].ordered_objects assign[=] <ast.ListComp object at 0x7da204962e60>
call[name[log].debug, parameter[constant[Reading metadata at %d], call[name[f].tell, parameter[]]]]
variable[num_objects] assign[=] call[name[types].Int32.read, parameter[name[f], name[self].endianness]]
for taget[name[obj]] in starred[call[name[range], parameter[name[num_objects]]]] begin[:]
variable[object_path] assign[=] call[name[types].String.read, parameter[name[f], name[self].endianness]]
if compare[name[object_path] in name[objects]] begin[:]
variable[obj] assign[=] call[name[objects]][name[object_path]]
variable[updating_existing] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da204961cf0> begin[:]
variable[obj_index] assign[=] <ast.ListComp object at 0x7da204960e50>
if compare[call[name[len], parameter[name[obj_index]]] greater[>] constant[0]] begin[:]
variable[updating_existing] assign[=] constant[True]
call[name[log].debug, parameter[constant[Updating object in segment list]]]
variable[obj_index] assign[=] call[name[obj_index]][constant[0]]
variable[segment_obj] assign[=] call[name[self].ordered_objects][name[obj_index]]
if <ast.UnaryOp object at 0x7da204960fa0> begin[:]
if compare[name[obj]._previous_segment_object is_not constant[None]] begin[:]
call[name[log].debug, parameter[constant[Copying previous segment object]]]
variable[segment_obj] assign[=] call[name[copy], parameter[name[obj]._previous_segment_object]]
call[name[self].ordered_objects.append, parameter[name[segment_obj]]]
call[name[segment_obj]._read_metadata, parameter[name[f]]]
name[obj]._previous_segment_object assign[=] name[segment_obj]
call[name[self].calculate_chunks, parameter[]] | keyword[def] identifier[read_metadata] ( identifier[self] , identifier[f] , identifier[objects] , identifier[previous_segment] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[toc] [ literal[string] ]:
keyword[try] :
identifier[self] . identifier[ordered_objects] = identifier[previous_segment] . identifier[ordered_objects]
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
identifier[self] . identifier[calculate_chunks] ()
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[toc] [ literal[string] ]:
identifier[self] . identifier[ordered_objects] =[
identifier[copy] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[previous_segment] . identifier[ordered_objects] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[f] . identifier[tell] ())
identifier[num_objects] = identifier[types] . identifier[Int32] . identifier[read] ( identifier[f] , identifier[self] . identifier[endianness] )
keyword[for] identifier[obj] keyword[in] identifier[range] ( identifier[num_objects] ):
identifier[object_path] = identifier[types] . identifier[String] . identifier[read] ( identifier[f] , identifier[self] . identifier[endianness] )
keyword[if] identifier[object_path] keyword[in] identifier[objects] :
identifier[obj] = identifier[objects] [ identifier[object_path] ]
keyword[else] :
identifier[obj] = identifier[TdmsObject] ( identifier[object_path] , identifier[self] . identifier[tdms_file] )
identifier[objects] [ identifier[object_path] ]= identifier[obj]
identifier[updating_existing] = keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[toc] [ literal[string] ]:
identifier[obj_index] =[
identifier[i] keyword[for] identifier[i] , identifier[o] keyword[in] identifier[enumerate] ( identifier[self] . identifier[ordered_objects] )
keyword[if] identifier[o] . identifier[tdms_object] keyword[is] identifier[obj] ]
keyword[if] identifier[len] ( identifier[obj_index] )> literal[int] :
identifier[updating_existing] = keyword[True]
identifier[log] . identifier[debug] ( literal[string] )
identifier[obj_index] = identifier[obj_index] [ literal[int] ]
identifier[segment_obj] = identifier[self] . identifier[ordered_objects] [ identifier[obj_index] ]
keyword[if] keyword[not] identifier[updating_existing] :
keyword[if] identifier[obj] . identifier[_previous_segment_object] keyword[is] keyword[not] keyword[None] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[segment_obj] = identifier[copy] ( identifier[obj] . identifier[_previous_segment_object] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[segment_obj] = identifier[_TdmsSegmentObject] ( identifier[obj] , identifier[self] . identifier[endianness] )
identifier[self] . identifier[ordered_objects] . identifier[append] ( identifier[segment_obj] )
identifier[segment_obj] . identifier[_read_metadata] ( identifier[f] )
identifier[obj] . identifier[_previous_segment_object] = identifier[segment_obj]
identifier[self] . identifier[calculate_chunks] () | def read_metadata(self, f, objects, previous_segment=None):
"""Read segment metadata section and update object information"""
if not self.toc['kTocMetaData']:
try:
self.ordered_objects = previous_segment.ordered_objects # depends on [control=['try'], data=[]]
except AttributeError:
raise ValueError('kTocMetaData is not set for segment but there is no previous segment') # depends on [control=['except'], data=[]]
self.calculate_chunks()
return # depends on [control=['if'], data=[]]
if not self.toc['kTocNewObjList']:
# In this case, there can be a list of new objects that
# are appended, or previous objects can also be repeated
# if their properties change
self.ordered_objects = [copy(o) for o in previous_segment.ordered_objects] # depends on [control=['if'], data=[]]
log.debug('Reading metadata at %d', f.tell())
# First four bytes have number of objects in metadata
num_objects = types.Int32.read(f, self.endianness)
for obj in range(num_objects):
# Read the object path
object_path = types.String.read(f, self.endianness)
# If this is a new segment for an existing object,
# reuse the existing object, otherwise,
# create a new object and add it to the object dictionary
if object_path in objects:
obj = objects[object_path] # depends on [control=['if'], data=['object_path', 'objects']]
else:
obj = TdmsObject(object_path, self.tdms_file)
objects[object_path] = obj
# Add this segment object to the list of segment objects,
# re-using any properties from previous segments.
updating_existing = False
if not self.toc['kTocNewObjList']:
# Search for the same object from the previous segment
# object list.
obj_index = [i for (i, o) in enumerate(self.ordered_objects) if o.tdms_object is obj]
if len(obj_index) > 0:
updating_existing = True
log.debug('Updating object in segment list')
obj_index = obj_index[0]
segment_obj = self.ordered_objects[obj_index] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not updating_existing:
if obj._previous_segment_object is not None:
log.debug('Copying previous segment object')
segment_obj = copy(obj._previous_segment_object) # depends on [control=['if'], data=[]]
else:
log.debug('Creating a new segment object')
segment_obj = _TdmsSegmentObject(obj, self.endianness)
self.ordered_objects.append(segment_obj) # depends on [control=['if'], data=[]]
# Read the metadata for this object, updating any
# data structure information and properties.
segment_obj._read_metadata(f)
obj._previous_segment_object = segment_obj # depends on [control=['for'], data=['obj']]
self.calculate_chunks() |
def aligned_base():
"""Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 5000
hparams.max_length = 0
hparams.min_length_bucket = 1024
hparams.dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.shared_embedding_and_softmax_weights = True
hparams.add_hparam("ffn_hidden_sizes", "2048") # Add new ones like this.
hparams.moe_num_experts = 32
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("layers", "timing," + "conv,att,ffn," * 2)
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
# moe params. local attention moe.
hparams.add_hparam("attention_local", False)
hparams.add_hparam("attention_moe_k", 2)
hparams.add_hparam("attention_num_experts", 16)
hparams.add_hparam("attention_split_batch", False)
# Key, query and value dimensions for the attention
hparams.add_hparam("attention_kq_size", 128)
hparams.add_hparam("attention_v_size", 256)
# Loss coef for load balancing
hparams.add_hparam("attention_load_balance", 2e-2)
hparams.add_hparam("diet_experts", False)
hparams.add_hparam("memory_efficient_ffn", False)
hparams.add_hparam("local_attention_window", 128)
hparams.add_hparam("attention_num_groups", 8)
hparams.add_hparam("memory_target_density", 2.0)
hparams.add_hparam("multiplicative_overhead", 1.25)
hparams.add_hparam("multiplicative_overhead_eval", 2.0)
hparams.add_hparam("attention_image_summary", True)
# LSH params
hparams.add_hparam("lsh_truncated", True)
# For testing right-masking.
# This is not implemented in all layers.
hparams.add_hparam("mask_right", False)
return hparams | def function[aligned_base, parameter[]]:
constant[Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
]
variable[hparams] assign[=] call[name[common_hparams].basic_params1, parameter[]]
name[hparams].hidden_size assign[=] constant[512]
name[hparams].batch_size assign[=] constant[5000]
name[hparams].max_length assign[=] constant[0]
name[hparams].min_length_bucket assign[=] constant[1024]
name[hparams].dropout assign[=] constant[0.0]
name[hparams].layer_prepostprocess_dropout assign[=] constant[0.0]
name[hparams].label_smoothing assign[=] constant[0.0]
name[hparams].clip_grad_norm assign[=] constant[0.0]
name[hparams].optimizer_adam_epsilon assign[=] constant[1e-09]
name[hparams].learning_rate_decay_scheme assign[=] constant[noam]
name[hparams].learning_rate assign[=] constant[0.1]
name[hparams].learning_rate_warmup_steps assign[=] constant[2000]
name[hparams].initializer_gain assign[=] constant[1.0]
name[hparams].initializer assign[=] constant[uniform_unit_scaling]
name[hparams].weight_decay assign[=] constant[0.0]
name[hparams].optimizer_adam_beta1 assign[=] constant[0.9]
name[hparams].optimizer_adam_beta2 assign[=] constant[0.98]
name[hparams].shared_embedding_and_softmax_weights assign[=] constant[True]
call[name[hparams].add_hparam, parameter[constant[ffn_hidden_sizes], constant[2048]]]
name[hparams].moe_num_experts assign[=] constant[32]
name[hparams].layer_preprocess_sequence assign[=] constant[n]
name[hparams].layer_postprocess_sequence assign[=] constant[da]
call[name[hparams].add_hparam, parameter[constant[layers], binary_operation[constant[timing,] + binary_operation[constant[conv,att,ffn,] * constant[2]]]]]
call[name[hparams].add_hparam, parameter[constant[num_heads], constant[8]]]
call[name[hparams].add_hparam, parameter[constant[attention_key_channels], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[attention_value_channels], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[attention_dropout], constant[0.0]]]
call[name[hparams].add_hparam, parameter[constant[pos], constant[timing]]]
call[name[hparams].add_hparam, parameter[constant[attention_local], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[attention_moe_k], constant[2]]]
call[name[hparams].add_hparam, parameter[constant[attention_num_experts], constant[16]]]
call[name[hparams].add_hparam, parameter[constant[attention_split_batch], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[attention_kq_size], constant[128]]]
call[name[hparams].add_hparam, parameter[constant[attention_v_size], constant[256]]]
call[name[hparams].add_hparam, parameter[constant[attention_load_balance], constant[0.02]]]
call[name[hparams].add_hparam, parameter[constant[diet_experts], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[memory_efficient_ffn], constant[False]]]
call[name[hparams].add_hparam, parameter[constant[local_attention_window], constant[128]]]
call[name[hparams].add_hparam, parameter[constant[attention_num_groups], constant[8]]]
call[name[hparams].add_hparam, parameter[constant[memory_target_density], constant[2.0]]]
call[name[hparams].add_hparam, parameter[constant[multiplicative_overhead], constant[1.25]]]
call[name[hparams].add_hparam, parameter[constant[multiplicative_overhead_eval], constant[2.0]]]
call[name[hparams].add_hparam, parameter[constant[attention_image_summary], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[lsh_truncated], constant[True]]]
call[name[hparams].add_hparam, parameter[constant[mask_right], constant[False]]]
return[name[hparams]] | keyword[def] identifier[aligned_base] ():
literal[string]
identifier[hparams] = identifier[common_hparams] . identifier[basic_params1] ()
identifier[hparams] . identifier[hidden_size] = literal[int]
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[max_length] = literal[int]
identifier[hparams] . identifier[min_length_bucket] = literal[int]
identifier[hparams] . identifier[dropout] = literal[int]
identifier[hparams] . identifier[layer_prepostprocess_dropout] = literal[int]
identifier[hparams] . identifier[label_smoothing] = literal[int]
identifier[hparams] . identifier[clip_grad_norm] = literal[int]
identifier[hparams] . identifier[optimizer_adam_epsilon] = literal[int]
identifier[hparams] . identifier[learning_rate_decay_scheme] = literal[string]
identifier[hparams] . identifier[learning_rate] = literal[int]
identifier[hparams] . identifier[learning_rate_warmup_steps] = literal[int]
identifier[hparams] . identifier[initializer_gain] = literal[int]
identifier[hparams] . identifier[initializer] = literal[string]
identifier[hparams] . identifier[weight_decay] = literal[int]
identifier[hparams] . identifier[optimizer_adam_beta1] = literal[int]
identifier[hparams] . identifier[optimizer_adam_beta2] = literal[int]
identifier[hparams] . identifier[shared_embedding_and_softmax_weights] = keyword[True]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[moe_num_experts] = literal[int]
identifier[hparams] . identifier[layer_preprocess_sequence] = literal[string]
identifier[hparams] . identifier[layer_postprocess_sequence] = literal[string]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] + literal[string] * literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[True] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , keyword[False] )
keyword[return] identifier[hparams] | def aligned_base():
"""Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 5000
hparams.max_length = 0
hparams.min_length_bucket = 1024
hparams.dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0.0 # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-09
hparams.learning_rate_decay_scheme = 'noam'
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.initializer = 'uniform_unit_scaling'
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.shared_embedding_and_softmax_weights = True
hparams.add_hparam('ffn_hidden_sizes', '2048') # Add new ones like this.
hparams.moe_num_experts = 32
hparams.layer_preprocess_sequence = 'n'
hparams.layer_postprocess_sequence = 'da'
hparams.add_hparam('layers', 'timing,' + 'conv,att,ffn,' * 2)
# attention-related flags
hparams.add_hparam('num_heads', 8)
hparams.add_hparam('attention_key_channels', 0)
hparams.add_hparam('attention_value_channels', 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam('attention_dropout', 0.0)
hparams.add_hparam('pos', 'timing') # timing, none
# moe params. local attention moe.
hparams.add_hparam('attention_local', False)
hparams.add_hparam('attention_moe_k', 2)
hparams.add_hparam('attention_num_experts', 16)
hparams.add_hparam('attention_split_batch', False)
# Key, query and value dimensions for the attention
hparams.add_hparam('attention_kq_size', 128)
hparams.add_hparam('attention_v_size', 256)
# Loss coef for load balancing
hparams.add_hparam('attention_load_balance', 0.02)
hparams.add_hparam('diet_experts', False)
hparams.add_hparam('memory_efficient_ffn', False)
hparams.add_hparam('local_attention_window', 128)
hparams.add_hparam('attention_num_groups', 8)
hparams.add_hparam('memory_target_density', 2.0)
hparams.add_hparam('multiplicative_overhead', 1.25)
hparams.add_hparam('multiplicative_overhead_eval', 2.0)
hparams.add_hparam('attention_image_summary', True)
# LSH params
hparams.add_hparam('lsh_truncated', True)
# For testing right-masking.
# This is not implemented in all layers.
hparams.add_hparam('mask_right', False)
return hparams |
def __process_action(self, action, file_type):
""" Extension point to populate extra action information after an
action has been created.
"""
if getattr(action, "inject_url", False):
self.__inject_url(action, file_type)
if getattr(action, "inject_ssh_properties", False):
self.__inject_ssh_properties(action) | def function[__process_action, parameter[self, action, file_type]]:
constant[ Extension point to populate extra action information after an
action has been created.
]
if call[name[getattr], parameter[name[action], constant[inject_url], constant[False]]] begin[:]
call[name[self].__inject_url, parameter[name[action], name[file_type]]]
if call[name[getattr], parameter[name[action], constant[inject_ssh_properties], constant[False]]] begin[:]
call[name[self].__inject_ssh_properties, parameter[name[action]]] | keyword[def] identifier[__process_action] ( identifier[self] , identifier[action] , identifier[file_type] ):
literal[string]
keyword[if] identifier[getattr] ( identifier[action] , literal[string] , keyword[False] ):
identifier[self] . identifier[__inject_url] ( identifier[action] , identifier[file_type] )
keyword[if] identifier[getattr] ( identifier[action] , literal[string] , keyword[False] ):
identifier[self] . identifier[__inject_ssh_properties] ( identifier[action] ) | def __process_action(self, action, file_type):
""" Extension point to populate extra action information after an
action has been created.
"""
if getattr(action, 'inject_url', False):
self.__inject_url(action, file_type) # depends on [control=['if'], data=[]]
if getattr(action, 'inject_ssh_properties', False):
self.__inject_ssh_properties(action) # depends on [control=['if'], data=[]] |
def empty_directory(self):
"""Remove all contents of a directory
Including any sub-directories and their contents"""
for child in self.walkfiles():
child.remove()
for child in reversed([d for d in self.walkdirs()]):
if child == self or not child.isdir():
continue
child.rmdir() | def function[empty_directory, parameter[self]]:
constant[Remove all contents of a directory
Including any sub-directories and their contents]
for taget[name[child]] in starred[call[name[self].walkfiles, parameter[]]] begin[:]
call[name[child].remove, parameter[]]
for taget[name[child]] in starred[call[name[reversed], parameter[<ast.ListComp object at 0x7da20c76f3a0>]]] begin[:]
if <ast.BoolOp object at 0x7da20c76eda0> begin[:]
continue
call[name[child].rmdir, parameter[]] | keyword[def] identifier[empty_directory] ( identifier[self] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[walkfiles] ():
identifier[child] . identifier[remove] ()
keyword[for] identifier[child] keyword[in] identifier[reversed] ([ identifier[d] keyword[for] identifier[d] keyword[in] identifier[self] . identifier[walkdirs] ()]):
keyword[if] identifier[child] == identifier[self] keyword[or] keyword[not] identifier[child] . identifier[isdir] ():
keyword[continue]
identifier[child] . identifier[rmdir] () | def empty_directory(self):
"""Remove all contents of a directory
Including any sub-directories and their contents"""
for child in self.walkfiles():
child.remove() # depends on [control=['for'], data=['child']]
for child in reversed([d for d in self.walkdirs()]):
if child == self or not child.isdir():
continue # depends on [control=['if'], data=[]]
child.rmdir() # depends on [control=['for'], data=['child']] |
def circular(args):
"""
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
"""
from jcvi.assembly.goldenpath import overlap
p = OptionParser(circular.__doc__)
p.add_option("--flip", default=False, action="store_true",
help="Reverse complement the sequence")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, startpos = args
startpos = int(startpos)
key, seq = next(parse_fasta(fastafile))
aseq = seq[startpos:]
bseq = seq[:startpos]
aseqfile, bseqfile = "a.seq", "b.seq"
for f, s in zip((aseqfile, bseqfile), (aseq, bseq)):
fw = must_open(f, "w")
print(">{0}\n{1}".format(f, s), file=fw)
fw.close()
o = overlap([aseqfile, bseqfile])
seq = aseq[:o.qstop] + bseq[o.sstop:]
seq = Seq(seq)
if opts.flip:
seq = seq.reverse_complement()
for f in (aseqfile, bseqfile):
os.remove(f)
fw = must_open(opts.outfile, "w")
rec = SeqRecord(seq, id=key, description="")
SeqIO.write([rec], fw, "fasta")
fw.close() | def function[circular, parameter[args]]:
constant[
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
]
from relative_module[jcvi.assembly.goldenpath] import module[overlap]
variable[p] assign[=] call[name[OptionParser], parameter[name[circular].__doc__]]
call[name[p].add_option, parameter[constant[--flip]]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da18f00fbe0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f00d6c0>]]
<ast.Tuple object at 0x7da18f00d390> assign[=] name[args]
variable[startpos] assign[=] call[name[int], parameter[name[startpos]]]
<ast.Tuple object at 0x7da18f00de10> assign[=] call[name[next], parameter[call[name[parse_fasta], parameter[name[fastafile]]]]]
variable[aseq] assign[=] call[name[seq]][<ast.Slice object at 0x7da18f00cd00>]
variable[bseq] assign[=] call[name[seq]][<ast.Slice object at 0x7da18f00e020>]
<ast.Tuple object at 0x7da18f00e4a0> assign[=] tuple[[<ast.Constant object at 0x7da18f00e350>, <ast.Constant object at 0x7da18f00d8d0>]]
for taget[tuple[[<ast.Name object at 0x7da18f00eef0>, <ast.Name object at 0x7da18f00c460>]]] in starred[call[name[zip], parameter[tuple[[<ast.Name object at 0x7da18f00f430>, <ast.Name object at 0x7da18f00d270>]], tuple[[<ast.Name object at 0x7da18f00c340>, <ast.Name object at 0x7da18f00c220>]]]]] begin[:]
variable[fw] assign[=] call[name[must_open], parameter[name[f], constant[w]]]
call[name[print], parameter[call[constant[>{0}
{1}].format, parameter[name[f], name[s]]]]]
call[name[fw].close, parameter[]]
variable[o] assign[=] call[name[overlap], parameter[list[[<ast.Name object at 0x7da1b09006d0>, <ast.Name object at 0x7da1b09039d0>]]]]
variable[seq] assign[=] binary_operation[call[name[aseq]][<ast.Slice object at 0x7da1b0902350>] + call[name[bseq]][<ast.Slice object at 0x7da1b0900910>]]
variable[seq] assign[=] call[name[Seq], parameter[name[seq]]]
if name[opts].flip begin[:]
variable[seq] assign[=] call[name[seq].reverse_complement, parameter[]]
for taget[name[f]] in starred[tuple[[<ast.Name object at 0x7da1b0902920>, <ast.Name object at 0x7da18f00dc00>]]] begin[:]
call[name[os].remove, parameter[name[f]]]
variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]]
variable[rec] assign[=] call[name[SeqRecord], parameter[name[seq]]]
call[name[SeqIO].write, parameter[list[[<ast.Name object at 0x7da18f00e1d0>]], name[fw], constant[fasta]]]
call[name[fw].close, parameter[]] | keyword[def] identifier[circular] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[assembly] . identifier[goldenpath] keyword[import] identifier[overlap]
identifier[p] = identifier[OptionParser] ( identifier[circular] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastafile] , identifier[startpos] = identifier[args]
identifier[startpos] = identifier[int] ( identifier[startpos] )
identifier[key] , identifier[seq] = identifier[next] ( identifier[parse_fasta] ( identifier[fastafile] ))
identifier[aseq] = identifier[seq] [ identifier[startpos] :]
identifier[bseq] = identifier[seq] [: identifier[startpos] ]
identifier[aseqfile] , identifier[bseqfile] = literal[string] , literal[string]
keyword[for] identifier[f] , identifier[s] keyword[in] identifier[zip] (( identifier[aseqfile] , identifier[bseqfile] ),( identifier[aseq] , identifier[bseq] )):
identifier[fw] = identifier[must_open] ( identifier[f] , literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[f] , identifier[s] ), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
identifier[o] = identifier[overlap] ([ identifier[aseqfile] , identifier[bseqfile] ])
identifier[seq] = identifier[aseq] [: identifier[o] . identifier[qstop] ]+ identifier[bseq] [ identifier[o] . identifier[sstop] :]
identifier[seq] = identifier[Seq] ( identifier[seq] )
keyword[if] identifier[opts] . identifier[flip] :
identifier[seq] = identifier[seq] . identifier[reverse_complement] ()
keyword[for] identifier[f] keyword[in] ( identifier[aseqfile] , identifier[bseqfile] ):
identifier[os] . identifier[remove] ( identifier[f] )
identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] )
identifier[rec] = identifier[SeqRecord] ( identifier[seq] , identifier[id] = identifier[key] , identifier[description] = literal[string] )
identifier[SeqIO] . identifier[write] ([ identifier[rec] ], identifier[fw] , literal[string] )
identifier[fw] . identifier[close] () | def circular(args):
"""
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
"""
from jcvi.assembly.goldenpath import overlap
p = OptionParser(circular.__doc__)
p.add_option('--flip', default=False, action='store_true', help='Reverse complement the sequence')
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastafile, startpos) = args
startpos = int(startpos)
(key, seq) = next(parse_fasta(fastafile))
aseq = seq[startpos:]
bseq = seq[:startpos]
(aseqfile, bseqfile) = ('a.seq', 'b.seq')
for (f, s) in zip((aseqfile, bseqfile), (aseq, bseq)):
fw = must_open(f, 'w')
print('>{0}\n{1}'.format(f, s), file=fw)
fw.close() # depends on [control=['for'], data=[]]
o = overlap([aseqfile, bseqfile])
seq = aseq[:o.qstop] + bseq[o.sstop:]
seq = Seq(seq)
if opts.flip:
seq = seq.reverse_complement() # depends on [control=['if'], data=[]]
for f in (aseqfile, bseqfile):
os.remove(f) # depends on [control=['for'], data=['f']]
fw = must_open(opts.outfile, 'w')
rec = SeqRecord(seq, id=key, description='')
SeqIO.write([rec], fw, 'fasta')
fw.close() |
def input(msg="", default="", title="Lackey Input", hidden=False):
""" Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value. """
root = tk.Tk()
input_text = tk.StringVar()
input_text.set(default)
PopupInput(root, msg, title, hidden, input_text)
root.focus_force()
root.mainloop()
return str(input_text.get()) | def function[input, parameter[msg, default, title, hidden]]:
constant[ Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value. ]
variable[root] assign[=] call[name[tk].Tk, parameter[]]
variable[input_text] assign[=] call[name[tk].StringVar, parameter[]]
call[name[input_text].set, parameter[name[default]]]
call[name[PopupInput], parameter[name[root], name[msg], name[title], name[hidden], name[input_text]]]
call[name[root].focus_force, parameter[]]
call[name[root].mainloop, parameter[]]
return[call[name[str], parameter[call[name[input_text].get, parameter[]]]]] | keyword[def] identifier[input] ( identifier[msg] = literal[string] , identifier[default] = literal[string] , identifier[title] = literal[string] , identifier[hidden] = keyword[False] ):
literal[string]
identifier[root] = identifier[tk] . identifier[Tk] ()
identifier[input_text] = identifier[tk] . identifier[StringVar] ()
identifier[input_text] . identifier[set] ( identifier[default] )
identifier[PopupInput] ( identifier[root] , identifier[msg] , identifier[title] , identifier[hidden] , identifier[input_text] )
identifier[root] . identifier[focus_force] ()
identifier[root] . identifier[mainloop] ()
keyword[return] identifier[str] ( identifier[input_text] . identifier[get] ()) | def input(msg='', default='', title='Lackey Input', hidden=False):
""" Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value. """
root = tk.Tk()
input_text = tk.StringVar()
input_text.set(default)
PopupInput(root, msg, title, hidden, input_text)
root.focus_force()
root.mainloop()
return str(input_text.get()) |
def infer_call_result(self, caller, context=None):
"""infer what a class instance is returning when called"""
context = contextmod.bind_context_to_node(context, self)
inferred = False
for node in self._proxied.igetattr("__call__", context):
if node is util.Uninferable or not node.callable():
continue
for res in node.infer_call_result(caller, context):
inferred = True
yield res
if not inferred:
raise exceptions.InferenceError(node=self, caller=caller, context=context) | def function[infer_call_result, parameter[self, caller, context]]:
constant[infer what a class instance is returning when called]
variable[context] assign[=] call[name[contextmod].bind_context_to_node, parameter[name[context], name[self]]]
variable[inferred] assign[=] constant[False]
for taget[name[node]] in starred[call[name[self]._proxied.igetattr, parameter[constant[__call__], name[context]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1e7c2b0> begin[:]
continue
for taget[name[res]] in starred[call[name[node].infer_call_result, parameter[name[caller], name[context]]]] begin[:]
variable[inferred] assign[=] constant[True]
<ast.Yield object at 0x7da1b1da3d60>
if <ast.UnaryOp object at 0x7da1b1da1a80> begin[:]
<ast.Raise object at 0x7da1b1da1090> | keyword[def] identifier[infer_call_result] ( identifier[self] , identifier[caller] , identifier[context] = keyword[None] ):
literal[string]
identifier[context] = identifier[contextmod] . identifier[bind_context_to_node] ( identifier[context] , identifier[self] )
identifier[inferred] = keyword[False]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_proxied] . identifier[igetattr] ( literal[string] , identifier[context] ):
keyword[if] identifier[node] keyword[is] identifier[util] . identifier[Uninferable] keyword[or] keyword[not] identifier[node] . identifier[callable] ():
keyword[continue]
keyword[for] identifier[res] keyword[in] identifier[node] . identifier[infer_call_result] ( identifier[caller] , identifier[context] ):
identifier[inferred] = keyword[True]
keyword[yield] identifier[res]
keyword[if] keyword[not] identifier[inferred] :
keyword[raise] identifier[exceptions] . identifier[InferenceError] ( identifier[node] = identifier[self] , identifier[caller] = identifier[caller] , identifier[context] = identifier[context] ) | def infer_call_result(self, caller, context=None):
"""infer what a class instance is returning when called"""
context = contextmod.bind_context_to_node(context, self)
inferred = False
for node in self._proxied.igetattr('__call__', context):
if node is util.Uninferable or not node.callable():
continue # depends on [control=['if'], data=[]]
for res in node.infer_call_result(caller, context):
inferred = True
yield res # depends on [control=['for'], data=['res']] # depends on [control=['for'], data=['node']]
if not inferred:
raise exceptions.InferenceError(node=self, caller=caller, context=context) # depends on [control=['if'], data=[]] |
def clear_job_cache(hours=24):
'''
Forcibly removes job cache folders and files on a minion.
.. versionadded:: 2018.3.0
WARNING: The safest way to clear a minion cache is by first stopping
the minion and then deleting the cache files before restarting it.
CLI Example:
.. code-block:: bash
salt '*' saltutil.clear_job_cache hours=12
'''
threshold = time.time() - hours * 60 * 60
for root, dirs, files in salt.utils.files.safe_walk(os.path.join(__opts__['cachedir'], 'minion_jobs'),
followlinks=False):
for name in dirs:
try:
directory = os.path.join(root, name)
mtime = os.path.getmtime(directory)
if mtime < threshold:
shutil.rmtree(directory)
except OSError as exc:
log.error('Attempt to clear cache with saltutil.clear_job_cache FAILED with: %s', exc)
return False
return True | def function[clear_job_cache, parameter[hours]]:
constant[
Forcibly removes job cache folders and files on a minion.
.. versionadded:: 2018.3.0
WARNING: The safest way to clear a minion cache is by first stopping
the minion and then deleting the cache files before restarting it.
CLI Example:
.. code-block:: bash
salt '*' saltutil.clear_job_cache hours=12
]
variable[threshold] assign[=] binary_operation[call[name[time].time, parameter[]] - binary_operation[binary_operation[name[hours] * constant[60]] * constant[60]]]
for taget[tuple[[<ast.Name object at 0x7da204620040>, <ast.Name object at 0x7da204620550>, <ast.Name object at 0x7da2046226e0>]]] in starred[call[name[salt].utils.files.safe_walk, parameter[call[name[os].path.join, parameter[call[name[__opts__]][constant[cachedir]], constant[minion_jobs]]]]]] begin[:]
for taget[name[name]] in starred[name[dirs]] begin[:]
<ast.Try object at 0x7da204621cc0>
return[constant[True]] | keyword[def] identifier[clear_job_cache] ( identifier[hours] = literal[int] ):
literal[string]
identifier[threshold] = identifier[time] . identifier[time] ()- identifier[hours] * literal[int] * literal[int]
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[salt] . identifier[utils] . identifier[files] . identifier[safe_walk] ( identifier[os] . identifier[path] . identifier[join] ( identifier[__opts__] [ literal[string] ], literal[string] ),
identifier[followlinks] = keyword[False] ):
keyword[for] identifier[name] keyword[in] identifier[dirs] :
keyword[try] :
identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[name] )
identifier[mtime] = identifier[os] . identifier[path] . identifier[getmtime] ( identifier[directory] )
keyword[if] identifier[mtime] < identifier[threshold] :
identifier[shutil] . identifier[rmtree] ( identifier[directory] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] ( literal[string] , identifier[exc] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def clear_job_cache(hours=24):
"""
Forcibly removes job cache folders and files on a minion.
.. versionadded:: 2018.3.0
WARNING: The safest way to clear a minion cache is by first stopping
the minion and then deleting the cache files before restarting it.
CLI Example:
.. code-block:: bash
salt '*' saltutil.clear_job_cache hours=12
"""
threshold = time.time() - hours * 60 * 60
for (root, dirs, files) in salt.utils.files.safe_walk(os.path.join(__opts__['cachedir'], 'minion_jobs'), followlinks=False):
for name in dirs:
try:
directory = os.path.join(root, name)
mtime = os.path.getmtime(directory)
if mtime < threshold:
shutil.rmtree(directory) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError as exc:
log.error('Attempt to clear cache with saltutil.clear_job_cache FAILED with: %s', exc)
return False # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]]
return True |
def tickerId(self, contract_identifier):
"""
returns the tickerId for the symbol or
sets one if it doesn't exits
"""
# contract passed instead of symbol?
symbol = contract_identifier
if isinstance(symbol, Contract):
symbol = self.contractString(symbol)
for tickerId in self.tickerIds:
if symbol == self.tickerIds[tickerId]:
return tickerId
else:
tickerId = len(self.tickerIds)
self.tickerIds[tickerId] = symbol
return tickerId | def function[tickerId, parameter[self, contract_identifier]]:
constant[
returns the tickerId for the symbol or
sets one if it doesn't exits
]
variable[symbol] assign[=] name[contract_identifier]
if call[name[isinstance], parameter[name[symbol], name[Contract]]] begin[:]
variable[symbol] assign[=] call[name[self].contractString, parameter[name[symbol]]]
for taget[name[tickerId]] in starred[name[self].tickerIds] begin[:]
if compare[name[symbol] equal[==] call[name[self].tickerIds][name[tickerId]]] begin[:]
return[name[tickerId]] | keyword[def] identifier[tickerId] ( identifier[self] , identifier[contract_identifier] ):
literal[string]
identifier[symbol] = identifier[contract_identifier]
keyword[if] identifier[isinstance] ( identifier[symbol] , identifier[Contract] ):
identifier[symbol] = identifier[self] . identifier[contractString] ( identifier[symbol] )
keyword[for] identifier[tickerId] keyword[in] identifier[self] . identifier[tickerIds] :
keyword[if] identifier[symbol] == identifier[self] . identifier[tickerIds] [ identifier[tickerId] ]:
keyword[return] identifier[tickerId]
keyword[else] :
identifier[tickerId] = identifier[len] ( identifier[self] . identifier[tickerIds] )
identifier[self] . identifier[tickerIds] [ identifier[tickerId] ]= identifier[symbol]
keyword[return] identifier[tickerId] | def tickerId(self, contract_identifier):
"""
returns the tickerId for the symbol or
sets one if it doesn't exits
"""
# contract passed instead of symbol?
symbol = contract_identifier
if isinstance(symbol, Contract):
symbol = self.contractString(symbol) # depends on [control=['if'], data=[]]
for tickerId in self.tickerIds:
if symbol == self.tickerIds[tickerId]:
return tickerId # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tickerId']]
else:
tickerId = len(self.tickerIds)
self.tickerIds[tickerId] = symbol
return tickerId |
def reload(self, reload_timeout, save_config):
"""Reload the device."""
PROCEED = re.compile(re.escape("Proceed with reload? [confirm]"))
CONTINUE = re.compile(re.escape("Do you wish to continue?[confirm(y/n)]"))
DONE = re.compile(re.escape("[Done]"))
CONFIGURATION_COMPLETED = re.compile("SYSTEM CONFIGURATION COMPLETED")
CONFIGURATION_IN_PROCESS = re.compile("SYSTEM CONFIGURATION IN PROCESS")
# CONSOLE = re.compile("ios con[0|1]/RS?P[0-1]/CPU0 is now available")
CONSOLE = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available")
CONSOLE_STBY = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby")
RECONFIGURE_USERNAME_PROMPT = "[Nn][Oo] root-system username is configured"
ROOT_USERNAME_PROMPT = "Enter root-system username\: "
ROOT_PASSWORD_PROMPT = "Enter secret( again)?\: "
# BOOT=disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm,1; \
# disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm,2;
# Candidate Boot Image num 0 is disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm
# Candidate Boot Image num 1 is disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm
CANDIDATE_BOOT_IMAGE = "Candidate Boot Image num 0 is .*vm"
NOT_COMMITTED = re.compile(re.escape("Some active software packages are not yet committed. Proceed?[confirm]"))
RELOAD_NA = re.compile("Reload to the ROM monitor disallowed from a telnet line")
# 0 1 2 3 4 5
events = [RELOAD_NA, DONE, PROCEED, CONFIGURATION_IN_PROCESS, self.rommon_re, self.press_return_re,
# 6 7 8 9
CONSOLE, CONFIGURATION_COMPLETED, RECONFIGURE_USERNAME_PROMPT, ROOT_USERNAME_PROMPT,
# 10 11 12 13 14 15
ROOT_PASSWORD_PROMPT, self.username_re, TIMEOUT, EOF, self.reload_cmd, CANDIDATE_BOOT_IMAGE,
# 16 17
NOT_COMMITTED, CONSOLE_STBY, CONTINUE]
transitions = [
(RELOAD_NA, [0], -1, a_reload_na, 0),
(CONTINUE, [0], 0, partial(a_send, "y\r"), 0),
# temp for testing
(NOT_COMMITTED, [0], -1, a_not_committed, 10),
(DONE, [0], 2, None, 120),
(PROCEED, [2], 3, partial(a_send, "\r"), reload_timeout),
# this needs to be verified
(self.rommon_re, [0, 3], 3, partial(a_send_boot, "boot"), 600),
(CANDIDATE_BOOT_IMAGE, [0, 3], 4, a_message_callback, 600),
(CONSOLE, [0, 1, 3, 4], 5, None, 600),
# This is required. Otherwise nothing more is displayed on the console
(self.press_return_re, [5], 6, partial(a_send, "\r"), 300),
# configure root username and password the same as used for device connection.
(RECONFIGURE_USERNAME_PROMPT, [6, 7, 10], 8, None, 10),
(ROOT_USERNAME_PROMPT, [8], 9, partial(a_send_username, self.device.node_info.username), 1),
(ROOT_PASSWORD_PROMPT, [9], 9, partial(a_send_password, self.device.node_info.password), 1),
(CONFIGURATION_IN_PROCESS, [6, 9], 10, None, 1200),
(CONFIGURATION_COMPLETED, [10], -1, a_reconnect, 0),
(CONSOLE_STBY, [4], -1, ConnectionStandbyConsole("Standby Console"), 0),
(self.username_re, [7, 9], -1, a_return_and_reconnect, 0),
(TIMEOUT, [0, 1, 2], -1, ConnectionAuthenticationError("Unable to reload"), 0),
(EOF, [0, 1, 2, 3, 4, 5], -1, ConnectionError("Device disconnected"), 0),
(TIMEOUT, [6], 7, partial(a_send, "\r"), 180),
(TIMEOUT, [7], -1, ConnectionAuthenticationError("Unable to reconnect after reloading"), 0),
(TIMEOUT, [10], -1, a_reconnect, 0),
]
fsm = FSM("RELOAD", self.device, events, transitions, timeout=600)
return fsm.run() | def function[reload, parameter[self, reload_timeout, save_config]]:
constant[Reload the device.]
variable[PROCEED] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[Proceed with reload? [confirm]]]]]]
variable[CONTINUE] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[Do you wish to continue?[confirm(y/n)]]]]]]
variable[DONE] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[[Done]]]]]]
variable[CONFIGURATION_COMPLETED] assign[=] call[name[re].compile, parameter[constant[SYSTEM CONFIGURATION COMPLETED]]]
variable[CONFIGURATION_IN_PROCESS] assign[=] call[name[re].compile, parameter[constant[SYSTEM CONFIGURATION IN PROCESS]]]
variable[CONSOLE] assign[=] call[name[re].compile, parameter[constant[con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available]]]
variable[CONSOLE_STBY] assign[=] call[name[re].compile, parameter[constant[con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby]]]
variable[RECONFIGURE_USERNAME_PROMPT] assign[=] constant[[Nn][Oo] root-system username is configured]
variable[ROOT_USERNAME_PROMPT] assign[=] constant[Enter root-system username\: ]
variable[ROOT_PASSWORD_PROMPT] assign[=] constant[Enter secret( again)?\: ]
variable[CANDIDATE_BOOT_IMAGE] assign[=] constant[Candidate Boot Image num 0 is .*vm]
variable[NOT_COMMITTED] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[constant[Some active software packages are not yet committed. Proceed?[confirm]]]]]]
variable[RELOAD_NA] assign[=] call[name[re].compile, parameter[constant[Reload to the ROM monitor disallowed from a telnet line]]]
variable[events] assign[=] list[[<ast.Name object at 0x7da1b2536650>, <ast.Name object at 0x7da1b25364d0>, <ast.Name object at 0x7da1b2535d80>, <ast.Name object at 0x7da1b2534c10>, <ast.Attribute object at 0x7da1b2534550>, <ast.Attribute object at 0x7da1b2536740>, <ast.Name object at 0x7da1b2536410>, <ast.Name object at 0x7da1b2534670>, <ast.Name object at 0x7da1b25369b0>, <ast.Name object at 0x7da1b2534100>, <ast.Name object at 0x7da1b2534580>, <ast.Attribute object at 0x7da1b2536860>, <ast.Name object at 0x7da1b2534610>, <ast.Name object at 0x7da1b2534c70>, <ast.Attribute object at 0x7da1b2534d60>, <ast.Name object at 0x7da1b25354b0>, <ast.Name object at 0x7da1b2534be0>, <ast.Name object at 0x7da1b2535e10>, <ast.Name object at 0x7da1b2535de0>]]
variable[transitions] assign[=] list[[<ast.Tuple object at 0x7da1b25353c0>, <ast.Tuple object at 0x7da1b2536920>, <ast.Tuple object at 0x7da1b25377f0>, <ast.Tuple object at 0x7da1b2535840>, <ast.Tuple object at 0x7da1b25357b0>, <ast.Tuple object at 0x7da1b2535330>, <ast.Tuple object at 0x7da1b2535a20>, <ast.Tuple object at 0x7da1b2534250>, <ast.Tuple object at 0x7da1b25349d0>, <ast.Tuple object at 0x7da1b2536a70>, <ast.Tuple object at 0x7da1b2535f90>, <ast.Tuple object at 0x7da1b2534ac0>, <ast.Tuple object at 0x7da1b25da650>, <ast.Tuple object at 0x7da1b25da9e0>, <ast.Tuple object at 0x7da1b25d8bb0>, <ast.Tuple object at 0x7da1b25da3e0>, <ast.Tuple object at 0x7da1b25d8d30>, <ast.Tuple object at 0x7da1b25d84c0>, <ast.Tuple object at 0x7da1b25da710>, <ast.Tuple object at 0x7da1b25d9030>, <ast.Tuple object at 0x7da1b25db040>]]
variable[fsm] assign[=] call[name[FSM], parameter[constant[RELOAD], name[self].device, name[events], name[transitions]]]
return[call[name[fsm].run, parameter[]]] | keyword[def] identifier[reload] ( identifier[self] , identifier[reload_timeout] , identifier[save_config] ):
literal[string]
identifier[PROCEED] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[CONTINUE] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[DONE] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[CONFIGURATION_COMPLETED] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONFIGURATION_IN_PROCESS] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONSOLE] = identifier[re] . identifier[compile] ( literal[string] )
identifier[CONSOLE_STBY] = identifier[re] . identifier[compile] ( literal[string] )
identifier[RECONFIGURE_USERNAME_PROMPT] = literal[string]
identifier[ROOT_USERNAME_PROMPT] = literal[string]
identifier[ROOT_PASSWORD_PROMPT] = literal[string]
identifier[CANDIDATE_BOOT_IMAGE] = literal[string]
identifier[NOT_COMMITTED] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( literal[string] ))
identifier[RELOAD_NA] = identifier[re] . identifier[compile] ( literal[string] )
identifier[events] =[ identifier[RELOAD_NA] , identifier[DONE] , identifier[PROCEED] , identifier[CONFIGURATION_IN_PROCESS] , identifier[self] . identifier[rommon_re] , identifier[self] . identifier[press_return_re] ,
identifier[CONSOLE] , identifier[CONFIGURATION_COMPLETED] , identifier[RECONFIGURE_USERNAME_PROMPT] , identifier[ROOT_USERNAME_PROMPT] ,
identifier[ROOT_PASSWORD_PROMPT] , identifier[self] . identifier[username_re] , identifier[TIMEOUT] , identifier[EOF] , identifier[self] . identifier[reload_cmd] , identifier[CANDIDATE_BOOT_IMAGE] ,
identifier[NOT_COMMITTED] , identifier[CONSOLE_STBY] , identifier[CONTINUE] ]
identifier[transitions] =[
( identifier[RELOAD_NA] ,[ literal[int] ],- literal[int] , identifier[a_reload_na] , literal[int] ),
( identifier[CONTINUE] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), literal[int] ),
( identifier[NOT_COMMITTED] ,[ literal[int] ],- literal[int] , identifier[a_not_committed] , literal[int] ),
( identifier[DONE] ,[ literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[PROCEED] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), identifier[reload_timeout] ),
( identifier[self] . identifier[rommon_re] ,[ literal[int] , literal[int] ], literal[int] , identifier[partial] ( identifier[a_send_boot] , literal[string] ), literal[int] ),
( identifier[CANDIDATE_BOOT_IMAGE] ,[ literal[int] , literal[int] ], literal[int] , identifier[a_message_callback] , literal[int] ),
( identifier[CONSOLE] ,[ literal[int] , literal[int] , literal[int] , literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[self] . identifier[press_return_re] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), literal[int] ),
( identifier[RECONFIGURE_USERNAME_PROMPT] ,[ literal[int] , literal[int] , literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[ROOT_USERNAME_PROMPT] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send_username] , identifier[self] . identifier[device] . identifier[node_info] . identifier[username] ), literal[int] ),
( identifier[ROOT_PASSWORD_PROMPT] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send_password] , identifier[self] . identifier[device] . identifier[node_info] . identifier[password] ), literal[int] ),
( identifier[CONFIGURATION_IN_PROCESS] ,[ literal[int] , literal[int] ], literal[int] , keyword[None] , literal[int] ),
( identifier[CONFIGURATION_COMPLETED] ,[ literal[int] ],- literal[int] , identifier[a_reconnect] , literal[int] ),
( identifier[CONSOLE_STBY] ,[ literal[int] ],- literal[int] , identifier[ConnectionStandbyConsole] ( literal[string] ), literal[int] ),
( identifier[self] . identifier[username_re] ,[ literal[int] , literal[int] ],- literal[int] , identifier[a_return_and_reconnect] , literal[int] ),
( identifier[TIMEOUT] ,[ literal[int] , literal[int] , literal[int] ],- literal[int] , identifier[ConnectionAuthenticationError] ( literal[string] ), literal[int] ),
( identifier[EOF] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],- literal[int] , identifier[ConnectionError] ( literal[string] ), literal[int] ),
( identifier[TIMEOUT] ,[ literal[int] ], literal[int] , identifier[partial] ( identifier[a_send] , literal[string] ), literal[int] ),
( identifier[TIMEOUT] ,[ literal[int] ],- literal[int] , identifier[ConnectionAuthenticationError] ( literal[string] ), literal[int] ),
( identifier[TIMEOUT] ,[ literal[int] ],- literal[int] , identifier[a_reconnect] , literal[int] ),
]
identifier[fsm] = identifier[FSM] ( literal[string] , identifier[self] . identifier[device] , identifier[events] , identifier[transitions] , identifier[timeout] = literal[int] )
keyword[return] identifier[fsm] . identifier[run] () | def reload(self, reload_timeout, save_config):
"""Reload the device."""
PROCEED = re.compile(re.escape('Proceed with reload? [confirm]'))
CONTINUE = re.compile(re.escape('Do you wish to continue?[confirm(y/n)]'))
DONE = re.compile(re.escape('[Done]'))
CONFIGURATION_COMPLETED = re.compile('SYSTEM CONFIGURATION COMPLETED')
CONFIGURATION_IN_PROCESS = re.compile('SYSTEM CONFIGURATION IN PROCESS')
# CONSOLE = re.compile("ios con[0|1]/RS?P[0-1]/CPU0 is now available")
CONSOLE = re.compile('con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available')
CONSOLE_STBY = re.compile('con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby')
RECONFIGURE_USERNAME_PROMPT = '[Nn][Oo] root-system username is configured'
ROOT_USERNAME_PROMPT = 'Enter root-system username\\: '
ROOT_PASSWORD_PROMPT = 'Enter secret( again)?\\: '
# BOOT=disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm,1; \
# disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm,2;
# Candidate Boot Image num 0 is disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm
# Candidate Boot Image num 1 is disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm
CANDIDATE_BOOT_IMAGE = 'Candidate Boot Image num 0 is .*vm'
NOT_COMMITTED = re.compile(re.escape('Some active software packages are not yet committed. Proceed?[confirm]'))
RELOAD_NA = re.compile('Reload to the ROM monitor disallowed from a telnet line')
# 0 1 2 3 4 5
# 6 7 8 9
# 10 11 12 13 14 15
# 16 17
events = [RELOAD_NA, DONE, PROCEED, CONFIGURATION_IN_PROCESS, self.rommon_re, self.press_return_re, CONSOLE, CONFIGURATION_COMPLETED, RECONFIGURE_USERNAME_PROMPT, ROOT_USERNAME_PROMPT, ROOT_PASSWORD_PROMPT, self.username_re, TIMEOUT, EOF, self.reload_cmd, CANDIDATE_BOOT_IMAGE, NOT_COMMITTED, CONSOLE_STBY, CONTINUE]
# temp for testing
# this needs to be verified
# This is required. Otherwise nothing more is displayed on the console
# configure root username and password the same as used for device connection.
transitions = [(RELOAD_NA, [0], -1, a_reload_na, 0), (CONTINUE, [0], 0, partial(a_send, 'y\r'), 0), (NOT_COMMITTED, [0], -1, a_not_committed, 10), (DONE, [0], 2, None, 120), (PROCEED, [2], 3, partial(a_send, '\r'), reload_timeout), (self.rommon_re, [0, 3], 3, partial(a_send_boot, 'boot'), 600), (CANDIDATE_BOOT_IMAGE, [0, 3], 4, a_message_callback, 600), (CONSOLE, [0, 1, 3, 4], 5, None, 600), (self.press_return_re, [5], 6, partial(a_send, '\r'), 300), (RECONFIGURE_USERNAME_PROMPT, [6, 7, 10], 8, None, 10), (ROOT_USERNAME_PROMPT, [8], 9, partial(a_send_username, self.device.node_info.username), 1), (ROOT_PASSWORD_PROMPT, [9], 9, partial(a_send_password, self.device.node_info.password), 1), (CONFIGURATION_IN_PROCESS, [6, 9], 10, None, 1200), (CONFIGURATION_COMPLETED, [10], -1, a_reconnect, 0), (CONSOLE_STBY, [4], -1, ConnectionStandbyConsole('Standby Console'), 0), (self.username_re, [7, 9], -1, a_return_and_reconnect, 0), (TIMEOUT, [0, 1, 2], -1, ConnectionAuthenticationError('Unable to reload'), 0), (EOF, [0, 1, 2, 3, 4, 5], -1, ConnectionError('Device disconnected'), 0), (TIMEOUT, [6], 7, partial(a_send, '\r'), 180), (TIMEOUT, [7], -1, ConnectionAuthenticationError('Unable to reconnect after reloading'), 0), (TIMEOUT, [10], -1, a_reconnect, 0)]
fsm = FSM('RELOAD', self.device, events, transitions, timeout=600)
return fsm.run() |
def rdist(x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return result | def function[rdist, parameter[x, y]]:
constant[Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
]
variable[result] assign[=] constant[0.0]
for taget[name[i]] in starred[call[name[range], parameter[call[name[x].shape][constant[0]]]]] begin[:]
<ast.AugAssign object at 0x7da18bc73280>
return[name[result]] | keyword[def] identifier[rdist] ( identifier[x] , identifier[y] ):
literal[string]
identifier[result] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[x] . identifier[shape] [ literal[int] ]):
identifier[result] +=( identifier[x] [ identifier[i] ]- identifier[y] [ identifier[i] ])** literal[int]
keyword[return] identifier[result] | def rdist(x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2 # depends on [control=['for'], data=['i']]
return result |
def resolve(self, other: Type) -> Type:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
resolved_second = NUMBER_TYPE.resolve(other.second)
if not resolved_second:
return None
return CountType(other.first) | def function[resolve, parameter[self, other]]:
constant[See ``PlaceholderType.resolve``]
if <ast.UnaryOp object at 0x7da20c991fc0> begin[:]
return[constant[None]]
variable[resolved_second] assign[=] call[name[NUMBER_TYPE].resolve, parameter[name[other].second]]
if <ast.UnaryOp object at 0x7da20c991330> begin[:]
return[constant[None]]
return[call[name[CountType], parameter[name[other].first]]] | keyword[def] identifier[resolve] ( identifier[self] , identifier[other] : identifier[Type] )-> identifier[Type] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[NltkComplexType] ):
keyword[return] keyword[None]
identifier[resolved_second] = identifier[NUMBER_TYPE] . identifier[resolve] ( identifier[other] . identifier[second] )
keyword[if] keyword[not] identifier[resolved_second] :
keyword[return] keyword[None]
keyword[return] identifier[CountType] ( identifier[other] . identifier[first] ) | def resolve(self, other: Type) -> Type:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None # depends on [control=['if'], data=[]]
resolved_second = NUMBER_TYPE.resolve(other.second)
if not resolved_second:
return None # depends on [control=['if'], data=[]]
return CountType(other.first) |
def run(self):
'''
Initialise the runner function with the passed args, kwargs
'''
# Retrieve args/kwargs here; and fire up the processing using them
try:
transcript = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
# Return the result of the processing
self.signals.result.emit(transcript)
finally:
# Done
self.signals.finished.emit() | def function[run, parameter[self]]:
constant[
Initialise the runner function with the passed args, kwargs
]
<ast.Try object at 0x7da1b20f9060> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[transcript] = identifier[self] . identifier[fn] (* identifier[self] . identifier[args] ,** identifier[self] . identifier[kwargs] )
keyword[except] :
identifier[traceback] . identifier[print_exc] ()
identifier[exctype] , identifier[value] = identifier[sys] . identifier[exc_info] ()[: literal[int] ]
identifier[self] . identifier[signals] . identifier[error] . identifier[emit] (( identifier[exctype] , identifier[value] , identifier[traceback] . identifier[format_exc] ()))
keyword[else] :
identifier[self] . identifier[signals] . identifier[result] . identifier[emit] ( identifier[transcript] )
keyword[finally] :
identifier[self] . identifier[signals] . identifier[finished] . identifier[emit] () | def run(self):
"""
Initialise the runner function with the passed args, kwargs
"""
# Retrieve args/kwargs here; and fire up the processing using them
try:
transcript = self.fn(*self.args, **self.kwargs) # depends on [control=['try'], data=[]]
except:
traceback.print_exc()
(exctype, value) = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc())) # depends on [control=['except'], data=[]]
else:
# Return the result of the processing
self.signals.result.emit(transcript)
finally:
# Done
self.signals.finished.emit() |
def backwards(self, orm):
"Write your backwards methods here."
from django.contrib.auth.models import Group
projects = orm['samples.Project'].objects.all()
names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects]
# Remove groups named after these teams
Group.objects.filter(name__in=names).delete() | def function[backwards, parameter[self, orm]]:
constant[Write your backwards methods here.]
from relative_module[django.contrib.auth.models] import module[Group]
variable[projects] assign[=] call[call[name[orm]][constant[samples.Project]].objects.all, parameter[]]
variable[names] assign[=] <ast.ListComp object at 0x7da20c7945b0>
call[call[name[Group].objects.filter, parameter[]].delete, parameter[]] | keyword[def] identifier[backwards] ( identifier[self] , identifier[orm] ):
literal[string]
keyword[from] identifier[django] . identifier[contrib] . identifier[auth] . identifier[models] keyword[import] identifier[Group]
identifier[projects] = identifier[orm] [ literal[string] ]. identifier[objects] . identifier[all] ()
identifier[names] =[ identifier[PROJECT_GROUP_TEMPLATE] . identifier[format] ( identifier[p] . identifier[name] ) keyword[for] identifier[p] keyword[in] identifier[projects] ]
identifier[Group] . identifier[objects] . identifier[filter] ( identifier[name__in] = identifier[names] ). identifier[delete] () | def backwards(self, orm):
"""Write your backwards methods here."""
from django.contrib.auth.models import Group
projects = orm['samples.Project'].objects.all()
names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects]
# Remove groups named after these teams
Group.objects.filter(name__in=names).delete() |
def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False):
"""
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
"""
vocab = {}
vocab_ = {}
word_tf = Counter()
word_df = Counter()
token_tf = Counter()
token_df = Counter()
t_ngrams = {}
# Get global word counts, first.
for grams in ngrams.values():
for g,c in grams:
word_tf[g] += c
word_df[g] += 1
if apply_stoplist:
stoplist = stopwords.words()
# Now tokenize.
for doi, grams in ngrams.iteritems():
t_ngrams[doi] = []
for g,c in grams:
ignore = False
# Ignore extremely rare words (probably garbage).
if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len:
ignore = True
# Stoplist.
elif apply_stoplist:
for w in g.split():
if w in stoplist:
ignore = True
if not ignore:
# Coerce unicode to string.
if type(g) is str:
g = unicode(g)
g = unidecode(g)
if g not in vocab.values():
i = len(vocab)
vocab[i] = g
vocab_[g] = i
else:
i = vocab_[g]
token_tf[i] += c
token_df[i] += 1
t_ngrams[doi].append( (i,c) )
return t_ngrams, vocab, token_tf | def function[tokenize, parameter[ngrams, min_tf, min_df, min_len, apply_stoplist]]:
constant[
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
]
variable[vocab] assign[=] dictionary[[], []]
variable[vocab_] assign[=] dictionary[[], []]
variable[word_tf] assign[=] call[name[Counter], parameter[]]
variable[word_df] assign[=] call[name[Counter], parameter[]]
variable[token_tf] assign[=] call[name[Counter], parameter[]]
variable[token_df] assign[=] call[name[Counter], parameter[]]
variable[t_ngrams] assign[=] dictionary[[], []]
for taget[name[grams]] in starred[call[name[ngrams].values, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b12c0b20>, <ast.Name object at 0x7da1b12c3190>]]] in starred[name[grams]] begin[:]
<ast.AugAssign object at 0x7da1b12c0250>
<ast.AugAssign object at 0x7da1b12c17e0>
if name[apply_stoplist] begin[:]
variable[stoplist] assign[=] call[name[stopwords].words, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b12c1540>, <ast.Name object at 0x7da1b12c2860>]]] in starred[call[name[ngrams].iteritems, parameter[]]] begin[:]
call[name[t_ngrams]][name[doi]] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b12c3040>, <ast.Name object at 0x7da1b12c1000>]]] in starred[name[grams]] begin[:]
variable[ignore] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1253d90> begin[:]
variable[ignore] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b12b56c0> begin[:]
if compare[call[name[type], parameter[name[g]]] is name[str]] begin[:]
variable[g] assign[=] call[name[unicode], parameter[name[g]]]
variable[g] assign[=] call[name[unidecode], parameter[name[g]]]
if compare[name[g] <ast.NotIn object at 0x7da2590d7190> call[name[vocab].values, parameter[]]] begin[:]
variable[i] assign[=] call[name[len], parameter[name[vocab]]]
call[name[vocab]][name[i]] assign[=] name[g]
call[name[vocab_]][name[g]] assign[=] name[i]
<ast.AugAssign object at 0x7da1b1196aa0>
<ast.AugAssign object at 0x7da1b11947f0>
call[call[name[t_ngrams]][name[doi]].append, parameter[tuple[[<ast.Name object at 0x7da1b1196230>, <ast.Name object at 0x7da1b1197b20>]]]]
return[tuple[[<ast.Name object at 0x7da1b1194670>, <ast.Name object at 0x7da1b1196f80>, <ast.Name object at 0x7da1b1196f50>]]] | keyword[def] identifier[tokenize] ( identifier[ngrams] , identifier[min_tf] = literal[int] , identifier[min_df] = literal[int] , identifier[min_len] = literal[int] , identifier[apply_stoplist] = keyword[False] ):
literal[string]
identifier[vocab] ={}
identifier[vocab_] ={}
identifier[word_tf] = identifier[Counter] ()
identifier[word_df] = identifier[Counter] ()
identifier[token_tf] = identifier[Counter] ()
identifier[token_df] = identifier[Counter] ()
identifier[t_ngrams] ={}
keyword[for] identifier[grams] keyword[in] identifier[ngrams] . identifier[values] ():
keyword[for] identifier[g] , identifier[c] keyword[in] identifier[grams] :
identifier[word_tf] [ identifier[g] ]+= identifier[c]
identifier[word_df] [ identifier[g] ]+= literal[int]
keyword[if] identifier[apply_stoplist] :
identifier[stoplist] = identifier[stopwords] . identifier[words] ()
keyword[for] identifier[doi] , identifier[grams] keyword[in] identifier[ngrams] . identifier[iteritems] ():
identifier[t_ngrams] [ identifier[doi] ]=[]
keyword[for] identifier[g] , identifier[c] keyword[in] identifier[grams] :
identifier[ignore] = keyword[False]
keyword[if] identifier[word_tf] [ identifier[g] ]< identifier[min_tf] keyword[or] identifier[word_df] [ identifier[g] ]< identifier[min_df] keyword[or] identifier[len] ( identifier[g] )< identifier[min_len] :
identifier[ignore] = keyword[True]
keyword[elif] identifier[apply_stoplist] :
keyword[for] identifier[w] keyword[in] identifier[g] . identifier[split] ():
keyword[if] identifier[w] keyword[in] identifier[stoplist] :
identifier[ignore] = keyword[True]
keyword[if] keyword[not] identifier[ignore] :
keyword[if] identifier[type] ( identifier[g] ) keyword[is] identifier[str] :
identifier[g] = identifier[unicode] ( identifier[g] )
identifier[g] = identifier[unidecode] ( identifier[g] )
keyword[if] identifier[g] keyword[not] keyword[in] identifier[vocab] . identifier[values] ():
identifier[i] = identifier[len] ( identifier[vocab] )
identifier[vocab] [ identifier[i] ]= identifier[g]
identifier[vocab_] [ identifier[g] ]= identifier[i]
keyword[else] :
identifier[i] = identifier[vocab_] [ identifier[g] ]
identifier[token_tf] [ identifier[i] ]+= identifier[c]
identifier[token_df] [ identifier[i] ]+= literal[int]
identifier[t_ngrams] [ identifier[doi] ]. identifier[append] (( identifier[i] , identifier[c] ))
keyword[return] identifier[t_ngrams] , identifier[vocab] , identifier[token_tf] | def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False):
"""
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
"""
vocab = {}
vocab_ = {}
word_tf = Counter()
word_df = Counter()
token_tf = Counter()
token_df = Counter()
t_ngrams = {}
# Get global word counts, first.
for grams in ngrams.values():
for (g, c) in grams:
word_tf[g] += c
word_df[g] += 1 # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['grams']]
if apply_stoplist:
stoplist = stopwords.words() # depends on [control=['if'], data=[]]
# Now tokenize.
for (doi, grams) in ngrams.iteritems():
t_ngrams[doi] = []
for (g, c) in grams:
ignore = False
# Ignore extremely rare words (probably garbage).
if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len:
ignore = True # depends on [control=['if'], data=[]]
# Stoplist.
elif apply_stoplist:
for w in g.split():
if w in stoplist:
ignore = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] # depends on [control=['if'], data=[]]
if not ignore:
# Coerce unicode to string.
if type(g) is str:
g = unicode(g) # depends on [control=['if'], data=[]]
g = unidecode(g)
if g not in vocab.values():
i = len(vocab)
vocab[i] = g
vocab_[g] = i # depends on [control=['if'], data=['g']]
else:
i = vocab_[g]
token_tf[i] += c
token_df[i] += 1
t_ngrams[doi].append((i, c)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return (t_ngrams, vocab, token_tf) |
def cliques(graph, threshold=3):
""" Returns all the cliques in the graph of at least the given size.
"""
cliques = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in cliques:
cliques.append(c)
return cliques | def function[cliques, parameter[graph, threshold]]:
constant[ Returns all the cliques in the graph of at least the given size.
]
variable[cliques] assign[=] list[[]]
for taget[name[n]] in starred[name[graph].nodes] begin[:]
variable[c] assign[=] call[name[clique], parameter[name[graph], name[n].id]]
if compare[call[name[len], parameter[name[c]]] greater_or_equal[>=] name[threshold]] begin[:]
call[name[c].sort, parameter[]]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[cliques]] begin[:]
call[name[cliques].append, parameter[name[c]]]
return[name[cliques]] | keyword[def] identifier[cliques] ( identifier[graph] , identifier[threshold] = literal[int] ):
literal[string]
identifier[cliques] =[]
keyword[for] identifier[n] keyword[in] identifier[graph] . identifier[nodes] :
identifier[c] = identifier[clique] ( identifier[graph] , identifier[n] . identifier[id] )
keyword[if] identifier[len] ( identifier[c] )>= identifier[threshold] :
identifier[c] . identifier[sort] ()
keyword[if] identifier[c] keyword[not] keyword[in] identifier[cliques] :
identifier[cliques] . identifier[append] ( identifier[c] )
keyword[return] identifier[cliques] | def cliques(graph, threshold=3):
""" Returns all the cliques in the graph of at least the given size.
"""
cliques = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in cliques:
cliques.append(c) # depends on [control=['if'], data=['c', 'cliques']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
return cliques |
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
"""
try:
UserService.objects.filter(
user=request.user,
name=ServicesActivated.objects.get(name='ServiceWallabag')
)
except KeyError:
return '/'
return 'wallabag/callback.html' | def function[callback, parameter[self, request]]:
constant[
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
]
<ast.Try object at 0x7da1b26ad3f0>
return[constant[wallabag/callback.html]] | keyword[def] identifier[callback] ( identifier[self] , identifier[request] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[UserService] . identifier[objects] . identifier[filter] (
identifier[user] = identifier[request] . identifier[user] ,
identifier[name] = identifier[ServicesActivated] . identifier[objects] . identifier[get] ( identifier[name] = literal[string] )
)
keyword[except] identifier[KeyError] :
keyword[return] literal[string]
keyword[return] literal[string] | def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
"""
try:
UserService.objects.filter(user=request.user, name=ServicesActivated.objects.get(name='ServiceWallabag')) # depends on [control=['try'], data=[]]
except KeyError:
return '/' # depends on [control=['except'], data=[]]
return 'wallabag/callback.html' |
def set(self, option, value):
"""
Sets an option to a value.
"""
if self.config is None:
self.config = {}
self.config[option] = value | def function[set, parameter[self, option, value]]:
constant[
Sets an option to a value.
]
if compare[name[self].config is constant[None]] begin[:]
name[self].config assign[=] dictionary[[], []]
call[name[self].config][name[option]] assign[=] name[value] | keyword[def] identifier[set] ( identifier[self] , identifier[option] , identifier[value] ):
literal[string]
keyword[if] identifier[self] . identifier[config] keyword[is] keyword[None] :
identifier[self] . identifier[config] ={}
identifier[self] . identifier[config] [ identifier[option] ]= identifier[value] | def set(self, option, value):
"""
Sets an option to a value.
"""
if self.config is None:
self.config = {} # depends on [control=['if'], data=[]]
self.config[option] = value |
def copy(self):
"""Provides a partial 'deepcopy' of the Model. All of the Metabolite,
Gene, and Reaction objects are created anew but in a faster fashion
than deepcopy
"""
new = self.__class__()
do_not_copy_by_ref = {"metabolites", "reactions", "genes", "notes",
"annotation", "groups"}
for attr in self.__dict__:
if attr not in do_not_copy_by_ref:
new.__dict__[attr] = self.__dict__[attr]
new.notes = deepcopy(self.notes)
new.annotation = deepcopy(self.annotation)
new.metabolites = DictList()
do_not_copy_by_ref = {"_reaction", "_model"}
for metabolite in self.metabolites:
new_met = metabolite.__class__()
for attr, value in iteritems(metabolite.__dict__):
if attr not in do_not_copy_by_ref:
new_met.__dict__[attr] = copy(
value) if attr == "formula" else value
new_met._model = new
new.metabolites.append(new_met)
new.genes = DictList()
for gene in self.genes:
new_gene = gene.__class__(None)
for attr, value in iteritems(gene.__dict__):
if attr not in do_not_copy_by_ref:
new_gene.__dict__[attr] = copy(
value) if attr == "formula" else value
new_gene._model = new
new.genes.append(new_gene)
new.reactions = DictList()
do_not_copy_by_ref = {"_model", "_metabolites", "_genes"}
for reaction in self.reactions:
new_reaction = reaction.__class__()
for attr, value in iteritems(reaction.__dict__):
if attr not in do_not_copy_by_ref:
new_reaction.__dict__[attr] = copy(value)
new_reaction._model = new
new.reactions.append(new_reaction)
# update awareness
for metabolite, stoic in iteritems(reaction._metabolites):
new_met = new.metabolites.get_by_id(metabolite.id)
new_reaction._metabolites[new_met] = stoic
new_met._reaction.add(new_reaction)
for gene in reaction._genes:
new_gene = new.genes.get_by_id(gene.id)
new_reaction._genes.add(new_gene)
new_gene._reaction.add(new_reaction)
new.groups = DictList()
do_not_copy_by_ref = {"_model", "_members"}
# Groups can be members of other groups. We initialize them first and
# then update their members.
for group in self.groups:
new_group = group.__class__(group.id)
for attr, value in iteritems(group.__dict__):
if attr not in do_not_copy_by_ref:
new_group.__dict__[attr] = copy(value)
new_group._model = new
new.groups.append(new_group)
for group in self.groups:
new_group = new.groups.get_by_id(group.id)
# update awareness, as in the reaction copies
new_objects = []
for member in group.members:
if isinstance(member, Metabolite):
new_object = new.metabolites.get_by_id(member.id)
elif isinstance(member, Reaction):
new_object = new.reactions.get_by_id(member.id)
elif isinstance(member, Gene):
new_object = new.genes.get_by_id(member.id)
elif isinstance(member, Group):
new_object = new.genes.get_by_id(member.id)
else:
raise TypeError(
"The group member {!r} is unexpectedly not a "
"metabolite, reaction, gene, nor another "
"group.".format(member))
new_objects.append(new_object)
new_group.add_members(new_objects)
try:
new._solver = deepcopy(self.solver)
# Cplex has an issue with deep copies
except Exception: # pragma: no cover
new._solver = copy(self.solver) # pragma: no cover
# it doesn't make sense to retain the context of a copied model so
# assign a new empty context
new._contexts = list()
return new | def function[copy, parameter[self]]:
constant[Provides a partial 'deepcopy' of the Model. All of the Metabolite,
Gene, and Reaction objects are created anew but in a faster fashion
than deepcopy
]
variable[new] assign[=] call[name[self].__class__, parameter[]]
variable[do_not_copy_by_ref] assign[=] <ast.Set object at 0x7da1b01fe740>
for taget[name[attr]] in starred[name[self].__dict__] begin[:]
if compare[name[attr] <ast.NotIn object at 0x7da2590d7190> name[do_not_copy_by_ref]] begin[:]
call[name[new].__dict__][name[attr]] assign[=] call[name[self].__dict__][name[attr]]
name[new].notes assign[=] call[name[deepcopy], parameter[name[self].notes]]
name[new].annotation assign[=] call[name[deepcopy], parameter[name[self].annotation]]
name[new].metabolites assign[=] call[name[DictList], parameter[]]
variable[do_not_copy_by_ref] assign[=] <ast.Set object at 0x7da1b0056320>
for taget[name[metabolite]] in starred[name[self].metabolites] begin[:]
variable[new_met] assign[=] call[name[metabolite].__class__, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0056050>, <ast.Name object at 0x7da1b0056020>]]] in starred[call[name[iteritems], parameter[name[metabolite].__dict__]]] begin[:]
if compare[name[attr] <ast.NotIn object at 0x7da2590d7190> name[do_not_copy_by_ref]] begin[:]
call[name[new_met].__dict__][name[attr]] assign[=] <ast.IfExp object at 0x7da1b0055d80>
name[new_met]._model assign[=] name[new]
call[name[new].metabolites.append, parameter[name[new_met]]]
name[new].genes assign[=] call[name[DictList], parameter[]]
for taget[name[gene]] in starred[name[self].genes] begin[:]
variable[new_gene] assign[=] call[name[gene].__class__, parameter[constant[None]]]
for taget[tuple[[<ast.Name object at 0x7da1b00556c0>, <ast.Name object at 0x7da1b0055690>]]] in starred[call[name[iteritems], parameter[name[gene].__dict__]]] begin[:]
if compare[name[attr] <ast.NotIn object at 0x7da2590d7190> name[do_not_copy_by_ref]] begin[:]
call[name[new_gene].__dict__][name[attr]] assign[=] <ast.IfExp object at 0x7da1b00553f0>
name[new_gene]._model assign[=] name[new]
call[name[new].genes.append, parameter[name[new_gene]]]
name[new].reactions assign[=] call[name[DictList], parameter[]]
variable[do_not_copy_by_ref] assign[=] <ast.Set object at 0x7da1b0054f10>
for taget[name[reaction]] in starred[name[self].reactions] begin[:]
variable[new_reaction] assign[=] call[name[reaction].__class__, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0054be0>, <ast.Name object at 0x7da1b0054bb0>]]] in starred[call[name[iteritems], parameter[name[reaction].__dict__]]] begin[:]
if compare[name[attr] <ast.NotIn object at 0x7da2590d7190> name[do_not_copy_by_ref]] begin[:]
call[name[new_reaction].__dict__][name[attr]] assign[=] call[name[copy], parameter[name[value]]]
name[new_reaction]._model assign[=] name[new]
call[name[new].reactions.append, parameter[name[new_reaction]]]
for taget[tuple[[<ast.Name object at 0x7da1b0054610>, <ast.Name object at 0x7da1b00545e0>]]] in starred[call[name[iteritems], parameter[name[reaction]._metabolites]]] begin[:]
variable[new_met] assign[=] call[name[new].metabolites.get_by_id, parameter[name[metabolite].id]]
call[name[new_reaction]._metabolites][name[new_met]] assign[=] name[stoic]
call[name[new_met]._reaction.add, parameter[name[new_reaction]]]
for taget[name[gene]] in starred[name[reaction]._genes] begin[:]
variable[new_gene] assign[=] call[name[new].genes.get_by_id, parameter[name[gene].id]]
call[name[new_reaction]._genes.add, parameter[name[new_gene]]]
call[name[new_gene]._reaction.add, parameter[name[new_reaction]]]
name[new].groups assign[=] call[name[DictList], parameter[]]
variable[do_not_copy_by_ref] assign[=] <ast.Set object at 0x7da1b0001210>
for taget[name[group]] in starred[name[self].groups] begin[:]
variable[new_group] assign[=] call[name[group].__class__, parameter[name[group].id]]
for taget[tuple[[<ast.Name object at 0x7da1b0001120>, <ast.Name object at 0x7da1b0002530>]]] in starred[call[name[iteritems], parameter[name[group].__dict__]]] begin[:]
if compare[name[attr] <ast.NotIn object at 0x7da2590d7190> name[do_not_copy_by_ref]] begin[:]
call[name[new_group].__dict__][name[attr]] assign[=] call[name[copy], parameter[name[value]]]
name[new_group]._model assign[=] name[new]
call[name[new].groups.append, parameter[name[new_group]]]
for taget[name[group]] in starred[name[self].groups] begin[:]
variable[new_group] assign[=] call[name[new].groups.get_by_id, parameter[name[group].id]]
variable[new_objects] assign[=] list[[]]
for taget[name[member]] in starred[name[group].members] begin[:]
if call[name[isinstance], parameter[name[member], name[Metabolite]]] begin[:]
variable[new_object] assign[=] call[name[new].metabolites.get_by_id, parameter[name[member].id]]
call[name[new_objects].append, parameter[name[new_object]]]
call[name[new_group].add_members, parameter[name[new_objects]]]
<ast.Try object at 0x7da1b00017b0>
name[new]._contexts assign[=] call[name[list], parameter[]]
return[name[new]] | keyword[def] identifier[copy] ( identifier[self] ):
literal[string]
identifier[new] = identifier[self] . identifier[__class__] ()
identifier[do_not_copy_by_ref] ={ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] }
keyword[for] identifier[attr] keyword[in] identifier[self] . identifier[__dict__] :
keyword[if] identifier[attr] keyword[not] keyword[in] identifier[do_not_copy_by_ref] :
identifier[new] . identifier[__dict__] [ identifier[attr] ]= identifier[self] . identifier[__dict__] [ identifier[attr] ]
identifier[new] . identifier[notes] = identifier[deepcopy] ( identifier[self] . identifier[notes] )
identifier[new] . identifier[annotation] = identifier[deepcopy] ( identifier[self] . identifier[annotation] )
identifier[new] . identifier[metabolites] = identifier[DictList] ()
identifier[do_not_copy_by_ref] ={ literal[string] , literal[string] }
keyword[for] identifier[metabolite] keyword[in] identifier[self] . identifier[metabolites] :
identifier[new_met] = identifier[metabolite] . identifier[__class__] ()
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[iteritems] ( identifier[metabolite] . identifier[__dict__] ):
keyword[if] identifier[attr] keyword[not] keyword[in] identifier[do_not_copy_by_ref] :
identifier[new_met] . identifier[__dict__] [ identifier[attr] ]= identifier[copy] (
identifier[value] ) keyword[if] identifier[attr] == literal[string] keyword[else] identifier[value]
identifier[new_met] . identifier[_model] = identifier[new]
identifier[new] . identifier[metabolites] . identifier[append] ( identifier[new_met] )
identifier[new] . identifier[genes] = identifier[DictList] ()
keyword[for] identifier[gene] keyword[in] identifier[self] . identifier[genes] :
identifier[new_gene] = identifier[gene] . identifier[__class__] ( keyword[None] )
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[iteritems] ( identifier[gene] . identifier[__dict__] ):
keyword[if] identifier[attr] keyword[not] keyword[in] identifier[do_not_copy_by_ref] :
identifier[new_gene] . identifier[__dict__] [ identifier[attr] ]= identifier[copy] (
identifier[value] ) keyword[if] identifier[attr] == literal[string] keyword[else] identifier[value]
identifier[new_gene] . identifier[_model] = identifier[new]
identifier[new] . identifier[genes] . identifier[append] ( identifier[new_gene] )
identifier[new] . identifier[reactions] = identifier[DictList] ()
identifier[do_not_copy_by_ref] ={ literal[string] , literal[string] , literal[string] }
keyword[for] identifier[reaction] keyword[in] identifier[self] . identifier[reactions] :
identifier[new_reaction] = identifier[reaction] . identifier[__class__] ()
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[iteritems] ( identifier[reaction] . identifier[__dict__] ):
keyword[if] identifier[attr] keyword[not] keyword[in] identifier[do_not_copy_by_ref] :
identifier[new_reaction] . identifier[__dict__] [ identifier[attr] ]= identifier[copy] ( identifier[value] )
identifier[new_reaction] . identifier[_model] = identifier[new]
identifier[new] . identifier[reactions] . identifier[append] ( identifier[new_reaction] )
keyword[for] identifier[metabolite] , identifier[stoic] keyword[in] identifier[iteritems] ( identifier[reaction] . identifier[_metabolites] ):
identifier[new_met] = identifier[new] . identifier[metabolites] . identifier[get_by_id] ( identifier[metabolite] . identifier[id] )
identifier[new_reaction] . identifier[_metabolites] [ identifier[new_met] ]= identifier[stoic]
identifier[new_met] . identifier[_reaction] . identifier[add] ( identifier[new_reaction] )
keyword[for] identifier[gene] keyword[in] identifier[reaction] . identifier[_genes] :
identifier[new_gene] = identifier[new] . identifier[genes] . identifier[get_by_id] ( identifier[gene] . identifier[id] )
identifier[new_reaction] . identifier[_genes] . identifier[add] ( identifier[new_gene] )
identifier[new_gene] . identifier[_reaction] . identifier[add] ( identifier[new_reaction] )
identifier[new] . identifier[groups] = identifier[DictList] ()
identifier[do_not_copy_by_ref] ={ literal[string] , literal[string] }
keyword[for] identifier[group] keyword[in] identifier[self] . identifier[groups] :
identifier[new_group] = identifier[group] . identifier[__class__] ( identifier[group] . identifier[id] )
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[iteritems] ( identifier[group] . identifier[__dict__] ):
keyword[if] identifier[attr] keyword[not] keyword[in] identifier[do_not_copy_by_ref] :
identifier[new_group] . identifier[__dict__] [ identifier[attr] ]= identifier[copy] ( identifier[value] )
identifier[new_group] . identifier[_model] = identifier[new]
identifier[new] . identifier[groups] . identifier[append] ( identifier[new_group] )
keyword[for] identifier[group] keyword[in] identifier[self] . identifier[groups] :
identifier[new_group] = identifier[new] . identifier[groups] . identifier[get_by_id] ( identifier[group] . identifier[id] )
identifier[new_objects] =[]
keyword[for] identifier[member] keyword[in] identifier[group] . identifier[members] :
keyword[if] identifier[isinstance] ( identifier[member] , identifier[Metabolite] ):
identifier[new_object] = identifier[new] . identifier[metabolites] . identifier[get_by_id] ( identifier[member] . identifier[id] )
keyword[elif] identifier[isinstance] ( identifier[member] , identifier[Reaction] ):
identifier[new_object] = identifier[new] . identifier[reactions] . identifier[get_by_id] ( identifier[member] . identifier[id] )
keyword[elif] identifier[isinstance] ( identifier[member] , identifier[Gene] ):
identifier[new_object] = identifier[new] . identifier[genes] . identifier[get_by_id] ( identifier[member] . identifier[id] )
keyword[elif] identifier[isinstance] ( identifier[member] , identifier[Group] ):
identifier[new_object] = identifier[new] . identifier[genes] . identifier[get_by_id] ( identifier[member] . identifier[id] )
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[member] ))
identifier[new_objects] . identifier[append] ( identifier[new_object] )
identifier[new_group] . identifier[add_members] ( identifier[new_objects] )
keyword[try] :
identifier[new] . identifier[_solver] = identifier[deepcopy] ( identifier[self] . identifier[solver] )
keyword[except] identifier[Exception] :
identifier[new] . identifier[_solver] = identifier[copy] ( identifier[self] . identifier[solver] )
identifier[new] . identifier[_contexts] = identifier[list] ()
keyword[return] identifier[new] | def copy(self):
"""Provides a partial 'deepcopy' of the Model. All of the Metabolite,
Gene, and Reaction objects are created anew but in a faster fashion
than deepcopy
"""
new = self.__class__()
do_not_copy_by_ref = {'metabolites', 'reactions', 'genes', 'notes', 'annotation', 'groups'}
for attr in self.__dict__:
if attr not in do_not_copy_by_ref:
new.__dict__[attr] = self.__dict__[attr] # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=['attr']]
new.notes = deepcopy(self.notes)
new.annotation = deepcopy(self.annotation)
new.metabolites = DictList()
do_not_copy_by_ref = {'_reaction', '_model'}
for metabolite in self.metabolites:
new_met = metabolite.__class__()
for (attr, value) in iteritems(metabolite.__dict__):
if attr not in do_not_copy_by_ref:
new_met.__dict__[attr] = copy(value) if attr == 'formula' else value # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=[]]
new_met._model = new
new.metabolites.append(new_met) # depends on [control=['for'], data=['metabolite']]
new.genes = DictList()
for gene in self.genes:
new_gene = gene.__class__(None)
for (attr, value) in iteritems(gene.__dict__):
if attr not in do_not_copy_by_ref:
new_gene.__dict__[attr] = copy(value) if attr == 'formula' else value # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=[]]
new_gene._model = new
new.genes.append(new_gene) # depends on [control=['for'], data=['gene']]
new.reactions = DictList()
do_not_copy_by_ref = {'_model', '_metabolites', '_genes'}
for reaction in self.reactions:
new_reaction = reaction.__class__()
for (attr, value) in iteritems(reaction.__dict__):
if attr not in do_not_copy_by_ref:
new_reaction.__dict__[attr] = copy(value) # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=[]]
new_reaction._model = new
new.reactions.append(new_reaction)
# update awareness
for (metabolite, stoic) in iteritems(reaction._metabolites):
new_met = new.metabolites.get_by_id(metabolite.id)
new_reaction._metabolites[new_met] = stoic
new_met._reaction.add(new_reaction) # depends on [control=['for'], data=[]]
for gene in reaction._genes:
new_gene = new.genes.get_by_id(gene.id)
new_reaction._genes.add(new_gene)
new_gene._reaction.add(new_reaction) # depends on [control=['for'], data=['gene']] # depends on [control=['for'], data=['reaction']]
new.groups = DictList()
do_not_copy_by_ref = {'_model', '_members'}
# Groups can be members of other groups. We initialize them first and
# then update their members.
for group in self.groups:
new_group = group.__class__(group.id)
for (attr, value) in iteritems(group.__dict__):
if attr not in do_not_copy_by_ref:
new_group.__dict__[attr] = copy(value) # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=[]]
new_group._model = new
new.groups.append(new_group) # depends on [control=['for'], data=['group']]
for group in self.groups:
new_group = new.groups.get_by_id(group.id)
# update awareness, as in the reaction copies
new_objects = []
for member in group.members:
if isinstance(member, Metabolite):
new_object = new.metabolites.get_by_id(member.id) # depends on [control=['if'], data=[]]
elif isinstance(member, Reaction):
new_object = new.reactions.get_by_id(member.id) # depends on [control=['if'], data=[]]
elif isinstance(member, Gene):
new_object = new.genes.get_by_id(member.id) # depends on [control=['if'], data=[]]
elif isinstance(member, Group):
new_object = new.genes.get_by_id(member.id) # depends on [control=['if'], data=[]]
else:
raise TypeError('The group member {!r} is unexpectedly not a metabolite, reaction, gene, nor another group.'.format(member))
new_objects.append(new_object) # depends on [control=['for'], data=['member']]
new_group.add_members(new_objects) # depends on [control=['for'], data=['group']]
try:
new._solver = deepcopy(self.solver) # depends on [control=['try'], data=[]]
# Cplex has an issue with deep copies
except Exception: # pragma: no cover
new._solver = copy(self.solver) # pragma: no cover # depends on [control=['except'], data=[]]
# it doesn't make sense to retain the context of a copied model so
# assign a new empty context
new._contexts = list()
return new |
def rename_header(self, old_name, new_name):
"""
This will rename the header. The supplied names need to be strings.
"""
self.hkeys[self.hkeys.index(old_name)] = new_name
self.headers[new_name] = self.headers.pop(old_name)
return self | def function[rename_header, parameter[self, old_name, new_name]]:
constant[
This will rename the header. The supplied names need to be strings.
]
call[name[self].hkeys][call[name[self].hkeys.index, parameter[name[old_name]]]] assign[=] name[new_name]
call[name[self].headers][name[new_name]] assign[=] call[name[self].headers.pop, parameter[name[old_name]]]
return[name[self]] | keyword[def] identifier[rename_header] ( identifier[self] , identifier[old_name] , identifier[new_name] ):
literal[string]
identifier[self] . identifier[hkeys] [ identifier[self] . identifier[hkeys] . identifier[index] ( identifier[old_name] )]= identifier[new_name]
identifier[self] . identifier[headers] [ identifier[new_name] ]= identifier[self] . identifier[headers] . identifier[pop] ( identifier[old_name] )
keyword[return] identifier[self] | def rename_header(self, old_name, new_name):
"""
This will rename the header. The supplied names need to be strings.
"""
self.hkeys[self.hkeys.index(old_name)] = new_name
self.headers[new_name] = self.headers.pop(old_name)
return self |
def _handle_spyder_msg(self, msg):
"""
Handle internal spyder messages
"""
spyder_msg_type = msg['content'].get('spyder_msg_type')
if spyder_msg_type == 'data':
# Deserialize data
try:
if PY2:
value = cloudpickle.loads(msg['buffers'][0])
else:
value = cloudpickle.loads(bytes(msg['buffers'][0]))
except Exception as msg:
self._kernel_value = None
self._kernel_reply = repr(msg)
else:
self._kernel_value = value
self.sig_got_reply.emit()
return
elif spyder_msg_type == 'pdb_state':
pdb_state = msg['content']['pdb_state']
if pdb_state is not None and isinstance(pdb_state, dict):
self.refresh_from_pdb(pdb_state)
elif spyder_msg_type == 'pdb_continue':
# Run Pdb continue to get to the first breakpoint
# Fixes 2034
self.write_to_stdin('continue')
elif spyder_msg_type == 'set_breakpoints':
self.set_spyder_breakpoints(force=True)
else:
logger.debug("No such spyder message type: %s" % spyder_msg_type) | def function[_handle_spyder_msg, parameter[self, msg]]:
constant[
Handle internal spyder messages
]
variable[spyder_msg_type] assign[=] call[call[name[msg]][constant[content]].get, parameter[constant[spyder_msg_type]]]
if compare[name[spyder_msg_type] equal[==] constant[data]] begin[:]
<ast.Try object at 0x7da1b1f99120>
call[name[self].sig_got_reply.emit, parameter[]]
return[None] | keyword[def] identifier[_handle_spyder_msg] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[spyder_msg_type] = identifier[msg] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[spyder_msg_type] == literal[string] :
keyword[try] :
keyword[if] identifier[PY2] :
identifier[value] = identifier[cloudpickle] . identifier[loads] ( identifier[msg] [ literal[string] ][ literal[int] ])
keyword[else] :
identifier[value] = identifier[cloudpickle] . identifier[loads] ( identifier[bytes] ( identifier[msg] [ literal[string] ][ literal[int] ]))
keyword[except] identifier[Exception] keyword[as] identifier[msg] :
identifier[self] . identifier[_kernel_value] = keyword[None]
identifier[self] . identifier[_kernel_reply] = identifier[repr] ( identifier[msg] )
keyword[else] :
identifier[self] . identifier[_kernel_value] = identifier[value]
identifier[self] . identifier[sig_got_reply] . identifier[emit] ()
keyword[return]
keyword[elif] identifier[spyder_msg_type] == literal[string] :
identifier[pdb_state] = identifier[msg] [ literal[string] ][ literal[string] ]
keyword[if] identifier[pdb_state] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[pdb_state] , identifier[dict] ):
identifier[self] . identifier[refresh_from_pdb] ( identifier[pdb_state] )
keyword[elif] identifier[spyder_msg_type] == literal[string] :
identifier[self] . identifier[write_to_stdin] ( literal[string] )
keyword[elif] identifier[spyder_msg_type] == literal[string] :
identifier[self] . identifier[set_spyder_breakpoints] ( identifier[force] = keyword[True] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[spyder_msg_type] ) | def _handle_spyder_msg(self, msg):
"""
Handle internal spyder messages
"""
spyder_msg_type = msg['content'].get('spyder_msg_type')
if spyder_msg_type == 'data':
# Deserialize data
try:
if PY2:
value = cloudpickle.loads(msg['buffers'][0]) # depends on [control=['if'], data=[]]
else:
value = cloudpickle.loads(bytes(msg['buffers'][0])) # depends on [control=['try'], data=[]]
except Exception as msg:
self._kernel_value = None
self._kernel_reply = repr(msg) # depends on [control=['except'], data=['msg']]
else:
self._kernel_value = value
self.sig_got_reply.emit()
return # depends on [control=['if'], data=[]]
elif spyder_msg_type == 'pdb_state':
pdb_state = msg['content']['pdb_state']
if pdb_state is not None and isinstance(pdb_state, dict):
self.refresh_from_pdb(pdb_state) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif spyder_msg_type == 'pdb_continue':
# Run Pdb continue to get to the first breakpoint
# Fixes 2034
self.write_to_stdin('continue') # depends on [control=['if'], data=[]]
elif spyder_msg_type == 'set_breakpoints':
self.set_spyder_breakpoints(force=True) # depends on [control=['if'], data=[]]
else:
logger.debug('No such spyder message type: %s' % spyder_msg_type) |
def item_from_topics(key, topics):
"""Get binding from `topics` via `key`
Example:
{0} == hello --> be in hello world
{1} == world --> be in hello world
Returns:
Single topic matching the key
Raises:
IndexError (int): With number of required
arguments for the key
"""
if re.match("{\d+}", key):
pos = int(key.strip("{}"))
try:
binding = topics[pos]
except IndexError:
raise IndexError(pos + 1)
else:
echo("be.yaml template key not recognised")
sys.exit(PROJECT_ERROR)
return binding | def function[item_from_topics, parameter[key, topics]]:
constant[Get binding from `topics` via `key`
Example:
{0} == hello --> be in hello world
{1} == world --> be in hello world
Returns:
Single topic matching the key
Raises:
IndexError (int): With number of required
arguments for the key
]
if call[name[re].match, parameter[constant[{\d+}], name[key]]] begin[:]
variable[pos] assign[=] call[name[int], parameter[call[name[key].strip, parameter[constant[{}]]]]]
<ast.Try object at 0x7da1b0fdfb80>
return[name[binding]] | keyword[def] identifier[item_from_topics] ( identifier[key] , identifier[topics] ):
literal[string]
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[key] ):
identifier[pos] = identifier[int] ( identifier[key] . identifier[strip] ( literal[string] ))
keyword[try] :
identifier[binding] = identifier[topics] [ identifier[pos] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[IndexError] ( identifier[pos] + literal[int] )
keyword[else] :
identifier[echo] ( literal[string] )
identifier[sys] . identifier[exit] ( identifier[PROJECT_ERROR] )
keyword[return] identifier[binding] | def item_from_topics(key, topics):
"""Get binding from `topics` via `key`
Example:
{0} == hello --> be in hello world
{1} == world --> be in hello world
Returns:
Single topic matching the key
Raises:
IndexError (int): With number of required
arguments for the key
"""
if re.match('{\\d+}', key):
pos = int(key.strip('{}'))
try:
binding = topics[pos] # depends on [control=['try'], data=[]]
except IndexError:
raise IndexError(pos + 1) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
echo('be.yaml template key not recognised')
sys.exit(PROJECT_ERROR)
return binding |
def hard_bounce(self, unique_id, configs=None):
""" Performs a hard bounce (kill and start) for the specified process
:Parameter unique_id: the name of the process
"""
self.kill(unique_id, configs)
self.start(unique_id, configs) | def function[hard_bounce, parameter[self, unique_id, configs]]:
constant[ Performs a hard bounce (kill and start) for the specified process
:Parameter unique_id: the name of the process
]
call[name[self].kill, parameter[name[unique_id], name[configs]]]
call[name[self].start, parameter[name[unique_id], name[configs]]] | keyword[def] identifier[hard_bounce] ( identifier[self] , identifier[unique_id] , identifier[configs] = keyword[None] ):
literal[string]
identifier[self] . identifier[kill] ( identifier[unique_id] , identifier[configs] )
identifier[self] . identifier[start] ( identifier[unique_id] , identifier[configs] ) | def hard_bounce(self, unique_id, configs=None):
""" Performs a hard bounce (kill and start) for the specified process
:Parameter unique_id: the name of the process
"""
self.kill(unique_id, configs)
self.start(unique_id, configs) |
def p_expression_lesseq(self, p):
'expression : expression LE expression'
p[0] = LessEq(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | def function[p_expression_lesseq, parameter[self, p]]:
constant[expression : expression LE expression]
call[name[p]][constant[0]] assign[=] call[name[LessEq], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]]]]
call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]] | keyword[def] identifier[p_expression_lesseq] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[LessEq] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_expression_lesseq(self, p):
"""expression : expression LE expression"""
p[0] = LessEq(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def set_fig_size(self, width, height=None):
"""Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
"""
self.figure.figure_width = width
self.figure.figure_height = height
return | def function[set_fig_size, parameter[self, width, height]]:
constant[Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
]
name[self].figure.figure_width assign[=] name[width]
name[self].figure.figure_height assign[=] name[height]
return[None] | keyword[def] identifier[set_fig_size] ( identifier[self] , identifier[width] , identifier[height] = keyword[None] ):
literal[string]
identifier[self] . identifier[figure] . identifier[figure_width] = identifier[width]
identifier[self] . identifier[figure] . identifier[figure_height] = identifier[height]
keyword[return] | def set_fig_size(self, width, height=None):
"""Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
"""
self.figure.figure_width = width
self.figure.figure_height = height
return |
def get_glibc_version():
"""
Returns:
Version as a pair of ints (major, minor) or None
"""
# TODO: Look into a nicer way to get the version
try:
out = subprocess.Popen(['ldd', '--version'],
stdout=subprocess.PIPE).communicate()[0]
except OSError:
return
match = re.search('([0-9]+)\.([0-9]+)\.?[0-9]*', out)
try:
return map(int, match.groups())
except AttributeError:
return | def function[get_glibc_version, parameter[]]:
constant[
Returns:
Version as a pair of ints (major, minor) or None
]
<ast.Try object at 0x7da2041dbf70>
variable[match] assign[=] call[name[re].search, parameter[constant[([0-9]+)\.([0-9]+)\.?[0-9]*], name[out]]]
<ast.Try object at 0x7da2041d9db0> | keyword[def] identifier[get_glibc_version] ():
literal[string]
keyword[try] :
identifier[out] = identifier[subprocess] . identifier[Popen] ([ literal[string] , literal[string] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ). identifier[communicate] ()[ literal[int] ]
keyword[except] identifier[OSError] :
keyword[return]
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[out] )
keyword[try] :
keyword[return] identifier[map] ( identifier[int] , identifier[match] . identifier[groups] ())
keyword[except] identifier[AttributeError] :
keyword[return] | def get_glibc_version():
"""
Returns:
Version as a pair of ints (major, minor) or None
"""
# TODO: Look into a nicer way to get the version
try:
out = subprocess.Popen(['ldd', '--version'], stdout=subprocess.PIPE).communicate()[0] # depends on [control=['try'], data=[]]
except OSError:
return # depends on [control=['except'], data=[]]
match = re.search('([0-9]+)\\.([0-9]+)\\.?[0-9]*', out)
try:
return map(int, match.groups()) # depends on [control=['try'], data=[]]
except AttributeError:
return # depends on [control=['except'], data=[]] |
def create_properties(self): # pylint: disable=no-self-use
"""
Format the properties with which to instantiate the connection.
This acts like a user agent over HTTP.
:rtype: dict
"""
properties = {}
properties["product"] = "eventhub.python"
properties["version"] = __version__
properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3])
properties["platform"] = sys.platform
return properties | def function[create_properties, parameter[self]]:
constant[
Format the properties with which to instantiate the connection.
This acts like a user agent over HTTP.
:rtype: dict
]
variable[properties] assign[=] dictionary[[], []]
call[name[properties]][constant[product]] assign[=] constant[eventhub.python]
call[name[properties]][constant[version]] assign[=] name[__version__]
call[name[properties]][constant[framework]] assign[=] call[constant[Python {}.{}.{}].format, parameter[<ast.Starred object at 0x7da204960d90>]]
call[name[properties]][constant[platform]] assign[=] name[sys].platform
return[name[properties]] | keyword[def] identifier[create_properties] ( identifier[self] ):
literal[string]
identifier[properties] ={}
identifier[properties] [ literal[string] ]= literal[string]
identifier[properties] [ literal[string] ]= identifier[__version__]
identifier[properties] [ literal[string] ]= literal[string] . identifier[format] (* identifier[sys] . identifier[version_info] [ literal[int] : literal[int] ])
identifier[properties] [ literal[string] ]= identifier[sys] . identifier[platform]
keyword[return] identifier[properties] | def create_properties(self): # pylint: disable=no-self-use
'\n Format the properties with which to instantiate the connection.\n This acts like a user agent over HTTP.\n\n :rtype: dict\n '
properties = {}
properties['product'] = 'eventhub.python'
properties['version'] = __version__
properties['framework'] = 'Python {}.{}.{}'.format(*sys.version_info[0:3])
properties['platform'] = sys.platform
return properties |
def __updatable():
"""
Function used to output packages update information in the console
"""
# Add argument for console
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', type=argparse.FileType(), default=None, help='Requirements file')
args = parser.parse_args()
# Get list of packages
if args.file:
packages = parse_requirements_list(args.file)
else:
packages = get_parsed_environment_package_list()
# Output updates
for package in packages:
__list_package_updates(package['package'], package['version']) | def function[__updatable, parameter[]]:
constant[
Function used to output packages update information in the console
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[file]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
if name[args].file begin[:]
variable[packages] assign[=] call[name[parse_requirements_list], parameter[name[args].file]]
for taget[name[package]] in starred[name[packages]] begin[:]
call[name[__list_package_updates], parameter[call[name[package]][constant[package]], call[name[package]][constant[version]]]] | keyword[def] identifier[__updatable] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[argparse] . identifier[FileType] (), identifier[default] = keyword[None] , identifier[help] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
keyword[if] identifier[args] . identifier[file] :
identifier[packages] = identifier[parse_requirements_list] ( identifier[args] . identifier[file] )
keyword[else] :
identifier[packages] = identifier[get_parsed_environment_package_list] ()
keyword[for] identifier[package] keyword[in] identifier[packages] :
identifier[__list_package_updates] ( identifier[package] [ literal[string] ], identifier[package] [ literal[string] ]) | def __updatable():
"""
Function used to output packages update information in the console
"""
# Add argument for console
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', type=argparse.FileType(), default=None, help='Requirements file')
args = parser.parse_args()
# Get list of packages
if args.file:
packages = parse_requirements_list(args.file) # depends on [control=['if'], data=[]]
else:
packages = get_parsed_environment_package_list()
# Output updates
for package in packages:
__list_package_updates(package['package'], package['version']) # depends on [control=['for'], data=['package']] |
def upload(self, dest_dir, file_handler, filename, callback=None, **kwargs):
"""上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param dest_dir: 网盘中文件的保存路径(不包含文件名)。
必须以 / 开头。
.. warning::
* 注意本接口的 dest_dir 参数不包含文件名,只包含路径
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_handler: 上传文件对象 。(e.g. ``open('foobar', 'rb')`` )
.. warning::
注意不要使用 .read() 方法.
:type file_handler: file
:param callback: 上传进度回调函数
需要包含 size 和 progress 名字的参数
:param filename:
:return: requests.Response 对象
.. note::
返回正确时返回的 Reponse 对象 content 中的数据结构
{"path":"服务器文件路径","size":文件大小,"ctime":创建时间,"mtime":修改时间,"md5":"文件md5值","fs_id":服务器文件识别号,"isdir":是否为目录,"request_id":请求识别号}
"""
params = {
'path':str(dest_dir)+"/"+str(filename)
}
tmp_filename = ''.join(random.sample(string.ascii_letters, 10))
files = {'file': (tmp_filename, file_handler)}
url = 'https://{0}/rest/2.0/pcs/file'.format(BAIDUPCS_SERVER)
return self._request('file', 'upload', url=url, extra_params=params,
files=files, callback=callback, **kwargs) | def function[upload, parameter[self, dest_dir, file_handler, filename, callback]]:
constant[上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param dest_dir: 网盘中文件的保存路径(不包含文件名)。
必须以 / 开头。
.. warning::
* 注意本接口的 dest_dir 参数不包含文件名,只包含路径
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\r, \n, \t, 空格, \0, \x0B`` 。
:param file_handler: 上传文件对象 。(e.g. ``open('foobar', 'rb')`` )
.. warning::
注意不要使用 .read() 方法.
:type file_handler: file
:param callback: 上传进度回调函数
需要包含 size 和 progress 名字的参数
:param filename:
:return: requests.Response 对象
.. note::
返回正确时返回的 Reponse 对象 content 中的数据结构
{"path":"服务器文件路径","size":文件大小,"ctime":创建时间,"mtime":修改时间,"md5":"文件md5值","fs_id":服务器文件识别号,"isdir":是否为目录,"request_id":请求识别号}
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1952560>], [<ast.BinOp object at 0x7da1b1952680>]]
variable[tmp_filename] assign[=] call[constant[].join, parameter[call[name[random].sample, parameter[name[string].ascii_letters, constant[10]]]]]
variable[files] assign[=] dictionary[[<ast.Constant object at 0x7da1b1953bb0>], [<ast.Tuple object at 0x7da1b1953880>]]
variable[url] assign[=] call[constant[https://{0}/rest/2.0/pcs/file].format, parameter[name[BAIDUPCS_SERVER]]]
return[call[name[self]._request, parameter[constant[file], constant[upload]]]] | keyword[def] identifier[upload] ( identifier[self] , identifier[dest_dir] , identifier[file_handler] , identifier[filename] , identifier[callback] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[str] ( identifier[dest_dir] )+ literal[string] + identifier[str] ( identifier[filename] )
}
identifier[tmp_filename] = literal[string] . identifier[join] ( identifier[random] . identifier[sample] ( identifier[string] . identifier[ascii_letters] , literal[int] ))
identifier[files] ={ literal[string] :( identifier[tmp_filename] , identifier[file_handler] )}
identifier[url] = literal[string] . identifier[format] ( identifier[BAIDUPCS_SERVER] )
keyword[return] identifier[self] . identifier[_request] ( literal[string] , literal[string] , identifier[url] = identifier[url] , identifier[extra_params] = identifier[params] ,
identifier[files] = identifier[files] , identifier[callback] = identifier[callback] ,** identifier[kwargs] ) | def upload(self, dest_dir, file_handler, filename, callback=None, **kwargs):
"""上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param dest_dir: 网盘中文件的保存路径(不包含文件名)。
必须以 / 开头。
.. warning::
* 注意本接口的 dest_dir 参数不包含文件名,只包含路径
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_handler: 上传文件对象 。(e.g. ``open('foobar', 'rb')`` )
.. warning::
注意不要使用 .read() 方法.
:type file_handler: file
:param callback: 上传进度回调函数
需要包含 size 和 progress 名字的参数
:param filename:
:return: requests.Response 对象
.. note::
返回正确时返回的 Reponse 对象 content 中的数据结构
{"path":"服务器文件路径","size":文件大小,"ctime":创建时间,"mtime":修改时间,"md5":"文件md5值","fs_id":服务器文件识别号,"isdir":是否为目录,"request_id":请求识别号}
"""
params = {'path': str(dest_dir) + '/' + str(filename)}
tmp_filename = ''.join(random.sample(string.ascii_letters, 10))
files = {'file': (tmp_filename, file_handler)}
url = 'https://{0}/rest/2.0/pcs/file'.format(BAIDUPCS_SERVER)
return self._request('file', 'upload', url=url, extra_params=params, files=files, callback=callback, **kwargs) |
def isHereDoc(self, line, column):
"""Check if text at given position is a here document.
If language is not known, or text is not parsed yet, ``False`` is returned
"""
return self._highlighter is not None and \
self._highlighter.isHereDoc(self.document().findBlockByNumber(line), column) | def function[isHereDoc, parameter[self, line, column]]:
constant[Check if text at given position is a here document.
If language is not known, or text is not parsed yet, ``False`` is returned
]
return[<ast.BoolOp object at 0x7da18f58d5a0>] | keyword[def] identifier[isHereDoc] ( identifier[self] , identifier[line] , identifier[column] ):
literal[string]
keyword[return] identifier[self] . identifier[_highlighter] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_highlighter] . identifier[isHereDoc] ( identifier[self] . identifier[document] (). identifier[findBlockByNumber] ( identifier[line] ), identifier[column] ) | def isHereDoc(self, line, column):
"""Check if text at given position is a here document.
If language is not known, or text is not parsed yet, ``False`` is returned
"""
return self._highlighter is not None and self._highlighter.isHereDoc(self.document().findBlockByNumber(line), column) |
def data_interp(self, i, currenttime):
"""
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
"""
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to release cache file so I can read from it...")
timer.sleep(2)
pass
if self.need_data(i+1):
# Acquire lock for asking for data
self.data_request_lock.acquire()
self.has_data_request_lock.value = os.getpid()
try:
# Do I still need data?
if self.need_data(i+1):
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Get the indices for the current particle location
indices = self.dataset.get_indices('u', timeinds=[np.asarray([i-1])], point=self.part.location )
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
# Override the time
# get the current time index data
self.point_get.value = [indices[0] + 1, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the CURRENT time index")
timer.sleep(2)
pass
# Do we still need to get the next timestep?
if self.need_data(i+1):
# get the next time index data
self.point_get.value = [indices[0] + 2, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug("Waiting for DataController to update cache with the NEXT time index")
timer.sleep(2)
pass
except StandardError:
logger.warn("Particle failed to request data correctly")
raise
finally:
# Release lock for asking for data
self.has_data_request_lock.value = -1
self.data_request_lock.release()
if self.caching is True:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid())
try:
# Open the Cache netCDF file on disk
self.dataset.opennc()
# Grab data at time index closest to particle location
u = [np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location )))]
v = [np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# if there is vertical velocity inthe dataset, get it
if 'w' in self.dataset.nc.variables:
w = [np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location )))]
else:
w = [0.0, 0.0]
# If there is salt and temp in the dataset, get it
if self.temp_name is not None and self.salt_name is not None:
temp = [np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location )))]
salt = [np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location ))),
np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location )))]
# Check for nans that occur in the ocean (happens because
# of model and coastline resolution mismatches)
if np.isnan(u).any() or np.isnan(v).any() or np.isnan(w).any():
# Take the mean of the closest 4 points
# If this includes nan which it will, result is nan
uarray1 = self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location, num=2)
varray1 = self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location, num=2)
uarray2 = self.dataset.get_values('u', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
varray2 = self.dataset.get_values('v', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
if 'w' in self.dataset.nc.variables:
warray1 = self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location, num=2)
warray2 = self.dataset.get_values('w', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
w = [warray1.mean(), warray2.mean()]
else:
w = [0.0, 0.0]
if self.temp_name is not None and self.salt_name is not None:
temparray1 = self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location, num=2)
saltarray1 = self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location, num=2)
temparray2 = self.dataset.get_values('temp', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
saltarray2 = self.dataset.get_values('salt', timeinds=[np.asarray([i+1])], point=self.part.location, num=2)
temp = [temparray1.mean(), temparray2.mean()]
salt = [saltarray1.mean(), saltarray2.mean()]
u = [uarray1.mean(), uarray2.mean()]
v = [varray1.mean(), varray2.mean()]
# Linear interp of data between timesteps
currenttime = date2num(currenttime)
timevar = self.timevar.datenum
u = self.linterp(timevar[i:i+2], u, currenttime)
v = self.linterp(timevar[i:i+2], v, currenttime)
w = self.linterp(timevar[i:i+2], w, currenttime)
if self.temp_name is not None and self.salt_name is not None:
temp = self.linterp(timevar[i:i+2], temp, currenttime)
salt = self.linterp(timevar[i:i+2], salt, currenttime)
if self.temp_name is None:
temp = np.nan
if self.salt_name is None:
salt = np.nan
except StandardError:
logger.error("Error in data_interp method on ForceParticle")
raise
finally:
# If caching is False, we don't have to close the dataset. We can stay in read-only mode.
if self.caching is True:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid())
return u, v, w, temp, salt | def function[data_interp, parameter[self, i, currenttime]]:
constant[
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
]
if compare[name[self].active.value is constant[True]] begin[:]
while compare[name[self].get_data.value is constant[True]] begin[:]
call[name[logger].debug, parameter[constant[Waiting for DataController to release cache file so I can read from it...]]]
call[name[timer].sleep, parameter[constant[2]]]
pass
if call[name[self].need_data, parameter[binary_operation[name[i] + constant[1]]]] begin[:]
call[name[self].data_request_lock.acquire, parameter[]]
name[self].has_data_request_lock.value assign[=] call[name[os].getpid, parameter[]]
<ast.Try object at 0x7da1b28c6aa0>
if compare[name[self].caching is constant[True]] begin[:]
with name[self].read_lock begin[:]
<ast.AugAssign object at 0x7da1b28869e0>
call[name[self].has_read_lock.append, parameter[call[name[os].getpid, parameter[]]]]
<ast.Try object at 0x7da1b2886740>
return[tuple[[<ast.Name object at 0x7da1b27eb2e0>, <ast.Name object at 0x7da1b27e84f0>, <ast.Name object at 0x7da1b27ea470>, <ast.Name object at 0x7da1b27e8cd0>, <ast.Name object at 0x7da1b27e8f70>]]] | keyword[def] identifier[data_interp] ( identifier[self] , identifier[i] , identifier[currenttime] ):
literal[string]
keyword[if] identifier[self] . identifier[active] . identifier[value] keyword[is] keyword[True] :
keyword[while] identifier[self] . identifier[get_data] . identifier[value] keyword[is] keyword[True] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[timer] . identifier[sleep] ( literal[int] )
keyword[pass]
keyword[if] identifier[self] . identifier[need_data] ( identifier[i] + literal[int] ):
identifier[self] . identifier[data_request_lock] . identifier[acquire] ()
identifier[self] . identifier[has_data_request_lock] . identifier[value] = identifier[os] . identifier[getpid] ()
keyword[try] :
keyword[if] identifier[self] . identifier[need_data] ( identifier[i] + literal[int] ):
keyword[with] identifier[self] . identifier[read_lock] :
identifier[self] . identifier[read_count] . identifier[value] += literal[int]
identifier[self] . identifier[has_read_lock] . identifier[append] ( identifier[os] . identifier[getpid] ())
identifier[self] . identifier[dataset] . identifier[opennc] ()
identifier[indices] = identifier[self] . identifier[dataset] . identifier[get_indices] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] - literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )
identifier[self] . identifier[dataset] . identifier[closenc] ()
keyword[with] identifier[self] . identifier[read_lock] :
identifier[self] . identifier[read_count] . identifier[value] -= literal[int]
identifier[self] . identifier[has_read_lock] . identifier[remove] ( identifier[os] . identifier[getpid] ())
identifier[self] . identifier[point_get] . identifier[value] =[ identifier[indices] [ literal[int] ]+ literal[int] , identifier[indices] [- literal[int] ], identifier[indices] [- literal[int] ]]
identifier[self] . identifier[get_data] . identifier[value] = keyword[True]
keyword[if] identifier[self] . identifier[active] . identifier[value] keyword[is] keyword[True] :
keyword[while] identifier[self] . identifier[get_data] . identifier[value] keyword[is] keyword[True] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[timer] . identifier[sleep] ( literal[int] )
keyword[pass]
keyword[if] identifier[self] . identifier[need_data] ( identifier[i] + literal[int] ):
identifier[self] . identifier[point_get] . identifier[value] =[ identifier[indices] [ literal[int] ]+ literal[int] , identifier[indices] [- literal[int] ], identifier[indices] [- literal[int] ]]
identifier[self] . identifier[get_data] . identifier[value] = keyword[True]
keyword[if] identifier[self] . identifier[active] . identifier[value] keyword[is] keyword[True] :
keyword[while] identifier[self] . identifier[get_data] . identifier[value] keyword[is] keyword[True] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[timer] . identifier[sleep] ( literal[int] )
keyword[pass]
keyword[except] identifier[StandardError] :
identifier[logger] . identifier[warn] ( literal[string] )
keyword[raise]
keyword[finally] :
identifier[self] . identifier[has_data_request_lock] . identifier[value] =- literal[int]
identifier[self] . identifier[data_request_lock] . identifier[release] ()
keyword[if] identifier[self] . identifier[caching] keyword[is] keyword[True] :
keyword[with] identifier[self] . identifier[read_lock] :
identifier[self] . identifier[read_count] . identifier[value] += literal[int]
identifier[self] . identifier[has_read_lock] . identifier[append] ( identifier[os] . identifier[getpid] ())
keyword[try] :
identifier[self] . identifier[dataset] . identifier[opennc] ()
identifier[u] =[ identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] ))),
identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )))]
identifier[v] =[ identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] ))),
identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )))]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[dataset] . identifier[nc] . identifier[variables] :
identifier[w] =[ identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] ))),
identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )))]
keyword[else] :
identifier[w] =[ literal[int] , literal[int] ]
keyword[if] identifier[self] . identifier[temp_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[salt_name] keyword[is] keyword[not] keyword[None] :
identifier[temp] =[ identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] ))),
identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )))]
identifier[salt] =[ identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] ))),
identifier[np] . identifier[mean] ( identifier[np] . identifier[mean] ( identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] )))]
keyword[if] identifier[np] . identifier[isnan] ( identifier[u] ). identifier[any] () keyword[or] identifier[np] . identifier[isnan] ( identifier[v] ). identifier[any] () keyword[or] identifier[np] . identifier[isnan] ( identifier[w] ). identifier[any] ():
identifier[uarray1] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[varray1] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[uarray2] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[varray2] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[dataset] . identifier[nc] . identifier[variables] :
identifier[warray1] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[warray2] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[w] =[ identifier[warray1] . identifier[mean] (), identifier[warray2] . identifier[mean] ()]
keyword[else] :
identifier[w] =[ literal[int] , literal[int] ]
keyword[if] identifier[self] . identifier[temp_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[salt_name] keyword[is] keyword[not] keyword[None] :
identifier[temparray1] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[saltarray1] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[temparray2] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[saltarray2] = identifier[self] . identifier[dataset] . identifier[get_values] ( literal[string] , identifier[timeinds] =[ identifier[np] . identifier[asarray] ([ identifier[i] + literal[int] ])], identifier[point] = identifier[self] . identifier[part] . identifier[location] , identifier[num] = literal[int] )
identifier[temp] =[ identifier[temparray1] . identifier[mean] (), identifier[temparray2] . identifier[mean] ()]
identifier[salt] =[ identifier[saltarray1] . identifier[mean] (), identifier[saltarray2] . identifier[mean] ()]
identifier[u] =[ identifier[uarray1] . identifier[mean] (), identifier[uarray2] . identifier[mean] ()]
identifier[v] =[ identifier[varray1] . identifier[mean] (), identifier[varray2] . identifier[mean] ()]
identifier[currenttime] = identifier[date2num] ( identifier[currenttime] )
identifier[timevar] = identifier[self] . identifier[timevar] . identifier[datenum]
identifier[u] = identifier[self] . identifier[linterp] ( identifier[timevar] [ identifier[i] : identifier[i] + literal[int] ], identifier[u] , identifier[currenttime] )
identifier[v] = identifier[self] . identifier[linterp] ( identifier[timevar] [ identifier[i] : identifier[i] + literal[int] ], identifier[v] , identifier[currenttime] )
identifier[w] = identifier[self] . identifier[linterp] ( identifier[timevar] [ identifier[i] : identifier[i] + literal[int] ], identifier[w] , identifier[currenttime] )
keyword[if] identifier[self] . identifier[temp_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[salt_name] keyword[is] keyword[not] keyword[None] :
identifier[temp] = identifier[self] . identifier[linterp] ( identifier[timevar] [ identifier[i] : identifier[i] + literal[int] ], identifier[temp] , identifier[currenttime] )
identifier[salt] = identifier[self] . identifier[linterp] ( identifier[timevar] [ identifier[i] : identifier[i] + literal[int] ], identifier[salt] , identifier[currenttime] )
keyword[if] identifier[self] . identifier[temp_name] keyword[is] keyword[None] :
identifier[temp] = identifier[np] . identifier[nan]
keyword[if] identifier[self] . identifier[salt_name] keyword[is] keyword[None] :
identifier[salt] = identifier[np] . identifier[nan]
keyword[except] identifier[StandardError] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise]
keyword[finally] :
keyword[if] identifier[self] . identifier[caching] keyword[is] keyword[True] :
identifier[self] . identifier[dataset] . identifier[closenc] ()
keyword[with] identifier[self] . identifier[read_lock] :
identifier[self] . identifier[read_count] . identifier[value] -= literal[int]
identifier[self] . identifier[has_read_lock] . identifier[remove] ( identifier[os] . identifier[getpid] ())
keyword[return] identifier[u] , identifier[v] , identifier[w] , identifier[temp] , identifier[salt] | def data_interp(self, i, currenttime):
"""
Method to streamline request for data from cache,
Uses linear interpolation bewtween timesteps to
get u,v,w,temp,salt
"""
if self.active.value is True:
while self.get_data.value is True:
logger.debug('Waiting for DataController to release cache file so I can read from it...')
timer.sleep(2)
pass # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
if self.need_data(i + 1):
# Acquire lock for asking for data
self.data_request_lock.acquire()
self.has_data_request_lock.value = os.getpid()
try:
# Do I still need data?
if self.need_data(i + 1):
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid()) # depends on [control=['with'], data=[]]
# Open netcdf file on disk from commondataset
self.dataset.opennc()
# Get the indices for the current particle location
indices = self.dataset.get_indices('u', timeinds=[np.asarray([i - 1])], point=self.part.location)
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid()) # depends on [control=['with'], data=[]]
# Override the time
# get the current time index data
self.point_get.value = [indices[0] + 1, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug('Waiting for DataController to update cache with the CURRENT time index')
timer.sleep(2)
pass # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
# Do we still need to get the next timestep?
if self.need_data(i + 1):
# get the next time index data
self.point_get.value = [indices[0] + 2, indices[-2], indices[-1]]
# Request that the data controller update the cache
self.get_data.value = True
# Wait until the data controller is done
if self.active.value is True:
while self.get_data.value is True:
logger.debug('Waiting for DataController to update cache with the NEXT time index')
timer.sleep(2)
pass # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except StandardError:
logger.warn('Particle failed to request data correctly')
raise # depends on [control=['except'], data=[]]
finally:
# Release lock for asking for data
self.has_data_request_lock.value = -1
self.data_request_lock.release() # depends on [control=['if'], data=[]]
if self.caching is True:
# Tell the DataController that we are going to be reading from the file
with self.read_lock:
self.read_count.value += 1
self.has_read_lock.append(os.getpid()) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
try:
# Open the Cache netCDF file on disk
self.dataset.opennc()
# Grab data at time index closest to particle location
u = [np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location))), np.mean(np.mean(self.dataset.get_values('u', timeinds=[np.asarray([i + 1])], point=self.part.location)))]
v = [np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location))), np.mean(np.mean(self.dataset.get_values('v', timeinds=[np.asarray([i + 1])], point=self.part.location)))]
# if there is vertical velocity inthe dataset, get it
if 'w' in self.dataset.nc.variables:
w = [np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location))), np.mean(np.mean(self.dataset.get_values('w', timeinds=[np.asarray([i + 1])], point=self.part.location)))] # depends on [control=['if'], data=[]]
else:
w = [0.0, 0.0]
# If there is salt and temp in the dataset, get it
if self.temp_name is not None and self.salt_name is not None:
temp = [np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location))), np.mean(np.mean(self.dataset.get_values('temp', timeinds=[np.asarray([i + 1])], point=self.part.location)))]
salt = [np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location))), np.mean(np.mean(self.dataset.get_values('salt', timeinds=[np.asarray([i + 1])], point=self.part.location)))] # depends on [control=['if'], data=[]]
# Check for nans that occur in the ocean (happens because
# of model and coastline resolution mismatches)
if np.isnan(u).any() or np.isnan(v).any() or np.isnan(w).any():
# Take the mean of the closest 4 points
# If this includes nan which it will, result is nan
uarray1 = self.dataset.get_values('u', timeinds=[np.asarray([i])], point=self.part.location, num=2)
varray1 = self.dataset.get_values('v', timeinds=[np.asarray([i])], point=self.part.location, num=2)
uarray2 = self.dataset.get_values('u', timeinds=[np.asarray([i + 1])], point=self.part.location, num=2)
varray2 = self.dataset.get_values('v', timeinds=[np.asarray([i + 1])], point=self.part.location, num=2)
if 'w' in self.dataset.nc.variables:
warray1 = self.dataset.get_values('w', timeinds=[np.asarray([i])], point=self.part.location, num=2)
warray2 = self.dataset.get_values('w', timeinds=[np.asarray([i + 1])], point=self.part.location, num=2)
w = [warray1.mean(), warray2.mean()] # depends on [control=['if'], data=[]]
else:
w = [0.0, 0.0]
if self.temp_name is not None and self.salt_name is not None:
temparray1 = self.dataset.get_values('temp', timeinds=[np.asarray([i])], point=self.part.location, num=2)
saltarray1 = self.dataset.get_values('salt', timeinds=[np.asarray([i])], point=self.part.location, num=2)
temparray2 = self.dataset.get_values('temp', timeinds=[np.asarray([i + 1])], point=self.part.location, num=2)
saltarray2 = self.dataset.get_values('salt', timeinds=[np.asarray([i + 1])], point=self.part.location, num=2)
temp = [temparray1.mean(), temparray2.mean()]
salt = [saltarray1.mean(), saltarray2.mean()] # depends on [control=['if'], data=[]]
u = [uarray1.mean(), uarray2.mean()]
v = [varray1.mean(), varray2.mean()] # depends on [control=['if'], data=[]]
# Linear interp of data between timesteps
currenttime = date2num(currenttime)
timevar = self.timevar.datenum
u = self.linterp(timevar[i:i + 2], u, currenttime)
v = self.linterp(timevar[i:i + 2], v, currenttime)
w = self.linterp(timevar[i:i + 2], w, currenttime)
if self.temp_name is not None and self.salt_name is not None:
temp = self.linterp(timevar[i:i + 2], temp, currenttime)
salt = self.linterp(timevar[i:i + 2], salt, currenttime) # depends on [control=['if'], data=[]]
if self.temp_name is None:
temp = np.nan # depends on [control=['if'], data=[]]
if self.salt_name is None:
salt = np.nan # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except StandardError:
logger.error('Error in data_interp method on ForceParticle')
raise # depends on [control=['except'], data=[]]
finally:
# If caching is False, we don't have to close the dataset. We can stay in read-only mode.
if self.caching is True:
self.dataset.closenc()
with self.read_lock:
self.read_count.value -= 1
self.has_read_lock.remove(os.getpid()) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return (u, v, w, temp, salt) |
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long() | def function[get_mask_from_sequence_lengths, parameter[sequence_lengths, max_length]]:
constant[
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
]
variable[ones] assign[=] call[name[sequence_lengths].new_ones, parameter[call[name[sequence_lengths].size, parameter[constant[0]]], name[max_length]]]
variable[range_tensor] assign[=] call[name[ones].cumsum, parameter[]]
return[call[compare[call[name[sequence_lengths].unsqueeze, parameter[constant[1]]] greater_or_equal[>=] name[range_tensor]].long, parameter[]]] | keyword[def] identifier[get_mask_from_sequence_lengths] ( identifier[sequence_lengths] : identifier[torch] . identifier[Tensor] , identifier[max_length] : identifier[int] )-> identifier[torch] . identifier[Tensor] :
literal[string]
identifier[ones] = identifier[sequence_lengths] . identifier[new_ones] ( identifier[sequence_lengths] . identifier[size] ( literal[int] ), identifier[max_length] )
identifier[range_tensor] = identifier[ones] . identifier[cumsum] ( identifier[dim] = literal[int] )
keyword[return] ( identifier[sequence_lengths] . identifier[unsqueeze] ( literal[int] )>= identifier[range_tensor] ). identifier[long] () | def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long() |
def OnTextFont(self, event):
"""Text font choice event handler"""
fontchoice_combobox = event.GetEventObject()
idx = event.GetInt()
try:
font_string = fontchoice_combobox.GetString(idx)
except AttributeError:
font_string = event.GetString()
post_command_event(self, self.FontMsg, font=font_string) | def function[OnTextFont, parameter[self, event]]:
constant[Text font choice event handler]
variable[fontchoice_combobox] assign[=] call[name[event].GetEventObject, parameter[]]
variable[idx] assign[=] call[name[event].GetInt, parameter[]]
<ast.Try object at 0x7da1b1723070>
call[name[post_command_event], parameter[name[self], name[self].FontMsg]] | keyword[def] identifier[OnTextFont] ( identifier[self] , identifier[event] ):
literal[string]
identifier[fontchoice_combobox] = identifier[event] . identifier[GetEventObject] ()
identifier[idx] = identifier[event] . identifier[GetInt] ()
keyword[try] :
identifier[font_string] = identifier[fontchoice_combobox] . identifier[GetString] ( identifier[idx] )
keyword[except] identifier[AttributeError] :
identifier[font_string] = identifier[event] . identifier[GetString] ()
identifier[post_command_event] ( identifier[self] , identifier[self] . identifier[FontMsg] , identifier[font] = identifier[font_string] ) | def OnTextFont(self, event):
"""Text font choice event handler"""
fontchoice_combobox = event.GetEventObject()
idx = event.GetInt()
try:
font_string = fontchoice_combobox.GetString(idx) # depends on [control=['try'], data=[]]
except AttributeError:
font_string = event.GetString() # depends on [control=['except'], data=[]]
post_command_event(self, self.FontMsg, font=font_string) |
def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
magic = self._match_magic(full_path)
if magic is not None:
return [_encode(s, self.encoding) for s in magic.body(environ,
file_like)]
else:
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size) | def function[_body, parameter[self, full_path, environ, file_like]]:
constant[Return an iterator over the body of the response.]
variable[magic] assign[=] call[name[self]._match_magic, parameter[name[full_path]]]
if compare[name[magic] is_not constant[None]] begin[:]
return[<ast.ListComp object at 0x7da1b24bb4c0>] | keyword[def] identifier[_body] ( identifier[self] , identifier[full_path] , identifier[environ] , identifier[file_like] ):
literal[string]
identifier[magic] = identifier[self] . identifier[_match_magic] ( identifier[full_path] )
keyword[if] identifier[magic] keyword[is] keyword[not] keyword[None] :
keyword[return] [ identifier[_encode] ( identifier[s] , identifier[self] . identifier[encoding] ) keyword[for] identifier[s] keyword[in] identifier[magic] . identifier[body] ( identifier[environ] ,
identifier[file_like] )]
keyword[else] :
identifier[way_to_send] = identifier[environ] . identifier[get] ( literal[string] , identifier[iter_and_close] )
keyword[return] identifier[way_to_send] ( identifier[file_like] , identifier[self] . identifier[block_size] ) | def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
magic = self._match_magic(full_path)
if magic is not None:
return [_encode(s, self.encoding) for s in magic.body(environ, file_like)] # depends on [control=['if'], data=['magic']]
else:
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size) |
def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) | def function[logarithmic_profile, parameter[wind_speed, wind_speed_height, hub_height, roughness_length, obstacle_height]]:
constant[
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
]
if compare[binary_operation[constant[0.7] * name[obstacle_height]] greater[>] name[wind_speed_height]] begin[:]
<ast.Raise object at 0x7da1b084ff40>
if <ast.BoolOp object at 0x7da1b084df90> begin[:]
variable[roughness_length] assign[=] call[name[np].array, parameter[name[roughness_length]]]
return[binary_operation[binary_operation[name[wind_speed] * call[name[np].log, parameter[binary_operation[binary_operation[name[hub_height] - binary_operation[constant[0.7] * name[obstacle_height]]] / name[roughness_length]]]]] / call[name[np].log, parameter[binary_operation[binary_operation[name[wind_speed_height] - binary_operation[constant[0.7] * name[obstacle_height]]] / name[roughness_length]]]]]] | keyword[def] identifier[logarithmic_profile] ( identifier[wind_speed] , identifier[wind_speed_height] , identifier[hub_height] ,
identifier[roughness_length] , identifier[obstacle_height] = literal[int] ):
literal[string]
keyword[if] literal[int] * identifier[obstacle_height] > identifier[wind_speed_height] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[obstacle_height] )+ literal[string] +
literal[string] )
keyword[if] ( identifier[isinstance] ( identifier[wind_speed] , identifier[np] . identifier[ndarray] ) keyword[and]
identifier[isinstance] ( identifier[roughness_length] , identifier[pd] . identifier[Series] )):
identifier[roughness_length] = identifier[np] . identifier[array] ( identifier[roughness_length] )
keyword[return] ( identifier[wind_speed] * identifier[np] . identifier[log] (( identifier[hub_height] - literal[int] * identifier[obstacle_height] )/
identifier[roughness_length] )/
identifier[np] . identifier[log] (( identifier[wind_speed_height] - literal[int] * identifier[obstacle_height] )/
identifier[roughness_length] )) | def logarithmic_profile(wind_speed, wind_speed_height, hub_height, roughness_length, obstacle_height=0.0):
"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\\cdot
\\frac{\\ln\\left(\\frac{h_{hub}-d}{z_{0}}\\right)}{\\ln\\left(
\\frac{h_{data}-d}{z_{0}}\\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\\cdot\\frac{\\ln\\left(\\frac{h_{hub}}
{z_{0}}\\right)}{\\ln\\left(\\frac{h_{data}}{z_{0}}\\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError('To take an obstacle height of {0} m '.format(obstacle_height) + 'into consideration, wind ' + 'speed data of a greater height is needed.') # depends on [control=['if'], data=[]]
# Return np.array if wind_speed is np.array
if isinstance(wind_speed, np.ndarray) and isinstance(roughness_length, pd.Series):
roughness_length = np.array(roughness_length) # depends on [control=['if'], data=[]]
return wind_speed * np.log((hub_height - 0.7 * obstacle_height) / roughness_length) / np.log((wind_speed_height - 0.7 * obstacle_height) / roughness_length) |
def i2c_master_read(self, addr, length, flags=I2C_NO_FLAGS):
"""Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
"""
data = array.array('B', (0,) * length)
status, rx_len = api.py_aa_i2c_read_ext(self.handle, addr, flags,
length, data)
_raise_i2c_status_code_error_if_failure(status)
del data[rx_len:]
return bytes(data) | def function[i2c_master_read, parameter[self, addr, length, flags]]:
constant[Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
]
variable[data] assign[=] call[name[array].array, parameter[constant[B], binary_operation[tuple[[<ast.Constant object at 0x7da18eb56920>]] * name[length]]]]
<ast.Tuple object at 0x7da18eb54070> assign[=] call[name[api].py_aa_i2c_read_ext, parameter[name[self].handle, name[addr], name[flags], name[length], name[data]]]
call[name[_raise_i2c_status_code_error_if_failure], parameter[name[status]]]
<ast.Delete object at 0x7da18eb561a0>
return[call[name[bytes], parameter[name[data]]]] | keyword[def] identifier[i2c_master_read] ( identifier[self] , identifier[addr] , identifier[length] , identifier[flags] = identifier[I2C_NO_FLAGS] ):
literal[string]
identifier[data] = identifier[array] . identifier[array] ( literal[string] ,( literal[int] ,)* identifier[length] )
identifier[status] , identifier[rx_len] = identifier[api] . identifier[py_aa_i2c_read_ext] ( identifier[self] . identifier[handle] , identifier[addr] , identifier[flags] ,
identifier[length] , identifier[data] )
identifier[_raise_i2c_status_code_error_if_failure] ( identifier[status] )
keyword[del] identifier[data] [ identifier[rx_len] :]
keyword[return] identifier[bytes] ( identifier[data] ) | def i2c_master_read(self, addr, length, flags=I2C_NO_FLAGS):
"""Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
"""
data = array.array('B', (0,) * length)
(status, rx_len) = api.py_aa_i2c_read_ext(self.handle, addr, flags, length, data)
_raise_i2c_status_code_error_if_failure(status)
del data[rx_len:]
return bytes(data) |
def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d) | def function[walk_json, parameter[d, func]]:
constant[ Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
]
if call[name[isinstance], parameter[name[d], name[Mapping]]] begin[:]
return[call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b18be260>]]] | keyword[def] identifier[walk_json] ( identifier[d] , identifier[func] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[d] , identifier[Mapping] ):
keyword[return] identifier[OrderedDict] (( identifier[k] , identifier[walk_json] ( identifier[v] , identifier[func] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ())
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[list] ):
keyword[return] [ identifier[walk_json] ( identifier[v] , identifier[func] ) keyword[for] identifier[v] keyword[in] identifier[d] ]
keyword[else] :
keyword[return] identifier[func] ( identifier[d] ) | def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict(((k, walk_json(v, func)) for (k, v) in d.items())) # depends on [control=['if'], data=[]]
elif isinstance(d, list):
return [walk_json(v, func) for v in d] # depends on [control=['if'], data=[]]
else:
return func(d) |
def __update_cleanup_paths(new_path):
"""
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
"""
cleanup_dirs = settings.CFG["cleanup_paths"].value
cleanup_dirs = set(cleanup_dirs)
cleanup_dirs.add(new_path)
cleanup_dirs = list(cleanup_dirs)
settings.CFG["cleanup_paths"] = cleanup_dirs | def function[__update_cleanup_paths, parameter[new_path]]:
constant[
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
]
variable[cleanup_dirs] assign[=] call[name[settings].CFG][constant[cleanup_paths]].value
variable[cleanup_dirs] assign[=] call[name[set], parameter[name[cleanup_dirs]]]
call[name[cleanup_dirs].add, parameter[name[new_path]]]
variable[cleanup_dirs] assign[=] call[name[list], parameter[name[cleanup_dirs]]]
call[name[settings].CFG][constant[cleanup_paths]] assign[=] name[cleanup_dirs] | keyword[def] identifier[__update_cleanup_paths] ( identifier[new_path] ):
literal[string]
identifier[cleanup_dirs] = identifier[settings] . identifier[CFG] [ literal[string] ]. identifier[value]
identifier[cleanup_dirs] = identifier[set] ( identifier[cleanup_dirs] )
identifier[cleanup_dirs] . identifier[add] ( identifier[new_path] )
identifier[cleanup_dirs] = identifier[list] ( identifier[cleanup_dirs] )
identifier[settings] . identifier[CFG] [ literal[string] ]= identifier[cleanup_dirs] | def __update_cleanup_paths(new_path):
"""
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
"""
cleanup_dirs = settings.CFG['cleanup_paths'].value
cleanup_dirs = set(cleanup_dirs)
cleanup_dirs.add(new_path)
cleanup_dirs = list(cleanup_dirs)
settings.CFG['cleanup_paths'] = cleanup_dirs |
def clone_version(self, service_id, version_number):
"""Clone the current configuration into a new version."""
content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | def function[clone_version, parameter[self, service_id, version_number]]:
constant[Clone the current configuration into a new version.]
variable[content] assign[=] call[name[self]._fetch, parameter[binary_operation[constant[/service/%s/version/%d/clone] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f42260>, <ast.Name object at 0x7da1b0f41cc0>]]]]]
return[call[name[FastlyVersion], parameter[name[self], name[content]]]] | keyword[def] identifier[clone_version] ( identifier[self] , identifier[service_id] , identifier[version_number] ):
literal[string]
identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] %( identifier[service_id] , identifier[version_number] ), identifier[method] = literal[string] )
keyword[return] identifier[FastlyVersion] ( identifier[self] , identifier[content] ) | def clone_version(self, service_id, version_number):
"""Clone the current configuration into a new version."""
content = self._fetch('/service/%s/version/%d/clone' % (service_id, version_number), method='PUT')
return FastlyVersion(self, content) |
def create(name, availability_zones, listeners, subnets=None,
security_groups=None, scheme='internet-facing',
region=None, key=None, keyid=None,
profile=None):
'''
Create an ELB
CLI example to create an ELB:
.. code-block:: bash
salt myminion boto_elb.create myelb '["us-east-1a", "us-east-1e"]' '{"elb_port": 443, "elb_protocol": "HTTPS", ...}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if exists(name, region, key, keyid, profile):
return True
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones)
if isinstance(listeners, six.string_types):
listeners = salt.utils.json.loads(listeners)
_complex_listeners = []
for listener in listeners:
_complex_listeners.append(listener_dict_to_tuple(listener))
try:
lb = conn.create_load_balancer(name=name, zones=availability_zones, subnets=subnets,
security_groups=security_groups, scheme=scheme,
complex_listeners=_complex_listeners)
if lb:
log.info('Created ELB %s', name)
return True
else:
log.error('Failed to create ELB %s', name)
return False
except boto.exception.BotoServerError as error:
log.error('Failed to create ELB %s: %s: %s',
name, error.error_code, error.message,
exc_info_on_loglevel=logging.DEBUG)
return False | def function[create, parameter[name, availability_zones, listeners, subnets, security_groups, scheme, region, key, keyid, profile]]:
constant[
Create an ELB
CLI example to create an ELB:
.. code-block:: bash
salt myminion boto_elb.create myelb '["us-east-1a", "us-east-1e"]' '{"elb_port": 443, "elb_protocol": "HTTPS", ...}' region=us-east-1
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
if call[name[exists], parameter[name[name], name[region], name[key], name[keyid], name[profile]]] begin[:]
return[constant[True]]
if call[name[isinstance], parameter[name[availability_zones], name[six].string_types]] begin[:]
variable[availability_zones] assign[=] call[name[salt].utils.json.loads, parameter[name[availability_zones]]]
if call[name[isinstance], parameter[name[listeners], name[six].string_types]] begin[:]
variable[listeners] assign[=] call[name[salt].utils.json.loads, parameter[name[listeners]]]
variable[_complex_listeners] assign[=] list[[]]
for taget[name[listener]] in starred[name[listeners]] begin[:]
call[name[_complex_listeners].append, parameter[call[name[listener_dict_to_tuple], parameter[name[listener]]]]]
<ast.Try object at 0x7da1b2046b60> | keyword[def] identifier[create] ( identifier[name] , identifier[availability_zones] , identifier[listeners] , identifier[subnets] = keyword[None] ,
identifier[security_groups] = keyword[None] , identifier[scheme] = literal[string] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[exists] ( identifier[name] , identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] ):
keyword[return] keyword[True]
keyword[if] identifier[isinstance] ( identifier[availability_zones] , identifier[six] . identifier[string_types] ):
identifier[availability_zones] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[availability_zones] )
keyword[if] identifier[isinstance] ( identifier[listeners] , identifier[six] . identifier[string_types] ):
identifier[listeners] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[listeners] )
identifier[_complex_listeners] =[]
keyword[for] identifier[listener] keyword[in] identifier[listeners] :
identifier[_complex_listeners] . identifier[append] ( identifier[listener_dict_to_tuple] ( identifier[listener] ))
keyword[try] :
identifier[lb] = identifier[conn] . identifier[create_load_balancer] ( identifier[name] = identifier[name] , identifier[zones] = identifier[availability_zones] , identifier[subnets] = identifier[subnets] ,
identifier[security_groups] = identifier[security_groups] , identifier[scheme] = identifier[scheme] ,
identifier[complex_listeners] = identifier[_complex_listeners] )
keyword[if] identifier[lb] :
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[error] ( literal[string] , identifier[name] )
keyword[return] keyword[False]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[error] :
identifier[log] . identifier[error] ( literal[string] ,
identifier[name] , identifier[error] . identifier[error_code] , identifier[error] . identifier[message] ,
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG] )
keyword[return] keyword[False] | def create(name, availability_zones, listeners, subnets=None, security_groups=None, scheme='internet-facing', region=None, key=None, keyid=None, profile=None):
"""
Create an ELB
CLI example to create an ELB:
.. code-block:: bash
salt myminion boto_elb.create myelb '["us-east-1a", "us-east-1e"]' '{"elb_port": 443, "elb_protocol": "HTTPS", ...}' region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if exists(name, region, key, keyid, profile):
return True # depends on [control=['if'], data=[]]
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones) # depends on [control=['if'], data=[]]
if isinstance(listeners, six.string_types):
listeners = salt.utils.json.loads(listeners) # depends on [control=['if'], data=[]]
_complex_listeners = []
for listener in listeners:
_complex_listeners.append(listener_dict_to_tuple(listener)) # depends on [control=['for'], data=['listener']]
try:
lb = conn.create_load_balancer(name=name, zones=availability_zones, subnets=subnets, security_groups=security_groups, scheme=scheme, complex_listeners=_complex_listeners)
if lb:
log.info('Created ELB %s', name)
return True # depends on [control=['if'], data=[]]
else:
log.error('Failed to create ELB %s', name)
return False # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as error:
log.error('Failed to create ELB %s: %s: %s', name, error.error_code, error.message, exc_info_on_loglevel=logging.DEBUG)
return False # depends on [control=['except'], data=['error']] |
def get_caller_name(back: int = 0) -> str:
"""
Return details about the CALLER OF THE CALLER (plus n calls further back)
of this function.
So, if your function calls :func:`get_caller_name`, it will return the
name of the function that called your function! (Or ``back`` calls further
back.)
Example:
.. code-block:: python
from cardinal_pythonlib.debugging import get_caller_name
def who_am_i():
return get_caller_name()
class MyClass(object):
def classfunc(self):
print("I am: " + who_am_i())
print("I was called by: " + get_caller_name())
print("That was called by: " + get_caller_name(back=1))
def f2():
x = MyClass()
x.classfunc()
def f1():
f2()
f1()
will produce:
.. code-block:: none
I am: MyClass.classfunc
I was called by: f2
That was called by: f1
"""
# http://stackoverflow.com/questions/5067604/determine-function-name-from-within-that-function-without-using-traceback # noqa
try:
# noinspection PyProtectedMember
frame = sys._getframe(back + 2)
except ValueError:
# Stack isn't deep enough.
return '?'
function_name = frame.f_code.co_name
class_name = get_class_name_from_frame(frame)
if class_name:
return "{}.{}".format(class_name, function_name)
return function_name | def function[get_caller_name, parameter[back]]:
constant[
Return details about the CALLER OF THE CALLER (plus n calls further back)
of this function.
So, if your function calls :func:`get_caller_name`, it will return the
name of the function that called your function! (Or ``back`` calls further
back.)
Example:
.. code-block:: python
from cardinal_pythonlib.debugging import get_caller_name
def who_am_i():
return get_caller_name()
class MyClass(object):
def classfunc(self):
print("I am: " + who_am_i())
print("I was called by: " + get_caller_name())
print("That was called by: " + get_caller_name(back=1))
def f2():
x = MyClass()
x.classfunc()
def f1():
f2()
f1()
will produce:
.. code-block:: none
I am: MyClass.classfunc
I was called by: f2
That was called by: f1
]
<ast.Try object at 0x7da1b18354b0>
variable[function_name] assign[=] name[frame].f_code.co_name
variable[class_name] assign[=] call[name[get_class_name_from_frame], parameter[name[frame]]]
if name[class_name] begin[:]
return[call[constant[{}.{}].format, parameter[name[class_name], name[function_name]]]]
return[name[function_name]] | keyword[def] identifier[get_caller_name] ( identifier[back] : identifier[int] = literal[int] )-> identifier[str] :
literal[string]
keyword[try] :
identifier[frame] = identifier[sys] . identifier[_getframe] ( identifier[back] + literal[int] )
keyword[except] identifier[ValueError] :
keyword[return] literal[string]
identifier[function_name] = identifier[frame] . identifier[f_code] . identifier[co_name]
identifier[class_name] = identifier[get_class_name_from_frame] ( identifier[frame] )
keyword[if] identifier[class_name] :
keyword[return] literal[string] . identifier[format] ( identifier[class_name] , identifier[function_name] )
keyword[return] identifier[function_name] | def get_caller_name(back: int=0) -> str:
"""
Return details about the CALLER OF THE CALLER (plus n calls further back)
of this function.
So, if your function calls :func:`get_caller_name`, it will return the
name of the function that called your function! (Or ``back`` calls further
back.)
Example:
.. code-block:: python
from cardinal_pythonlib.debugging import get_caller_name
def who_am_i():
return get_caller_name()
class MyClass(object):
def classfunc(self):
print("I am: " + who_am_i())
print("I was called by: " + get_caller_name())
print("That was called by: " + get_caller_name(back=1))
def f2():
x = MyClass()
x.classfunc()
def f1():
f2()
f1()
will produce:
.. code-block:: none
I am: MyClass.classfunc
I was called by: f2
That was called by: f1
"""
# http://stackoverflow.com/questions/5067604/determine-function-name-from-within-that-function-without-using-traceback # noqa
try:
# noinspection PyProtectedMember
frame = sys._getframe(back + 2) # depends on [control=['try'], data=[]]
except ValueError:
# Stack isn't deep enough.
return '?' # depends on [control=['except'], data=[]]
function_name = frame.f_code.co_name
class_name = get_class_name_from_frame(frame)
if class_name:
return '{}.{}'.format(class_name, function_name) # depends on [control=['if'], data=[]]
return function_name |
def managed(name, table, data, record=None):
'''
Ensures that the specified columns of the named record have the specified
values.
Args:
name: The name of the record.
table: The name of the table to which the record belongs.
data: Dictionary containing a mapping from column names to the desired
values. Columns that exist, but are not specified in this
dictionary are not touched.
record: The name of the record (optional). Replaces name if specified.
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if record is None:
record = name
current_data = {
column: __salt__['openvswitch.db_get'](table, record, column)
for column in data
}
# Comment and change messages
comment_changes = 'Columns have been updated.'
comment_no_changes = 'All columns are already up to date.'
comment_error = 'Error while updating column {0}: {1}'
# Dry run, test=true mode
if __opts__['test']:
for column in data:
if data[column] != current_data[column]:
ret['changes'][column] = {'old': current_data[column],
'new': data[column]}
if ret['changes']:
ret['result'] = None
ret['comment'] = comment_changes
else:
ret['result'] = True
ret['comment'] = comment_no_changes
return ret
for column in data:
if data[column] != current_data[column]:
result = __salt__['openvswitch.db_set'](table, record, column,
data[column])
if result is not None:
ret['comment'] = comment_error.format(column, result)
ret['result'] = False
return ret
ret['changes'][column] = {'old': current_data[column],
'new': data[column]}
ret['result'] = True
ret['comment'] = comment_no_changes
return ret | def function[managed, parameter[name, table, data, record]]:
constant[
Ensures that the specified columns of the named record have the specified
values.
Args:
name: The name of the record.
table: The name of the table to which the record belongs.
data: Dictionary containing a mapping from column names to the desired
values. Columns that exist, but are not specified in this
dictionary are not touched.
record: The name of the record (optional). Replaces name if specified.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b20ef3a0>, <ast.Constant object at 0x7da1b20ef610>, <ast.Constant object at 0x7da1b20ef340>, <ast.Constant object at 0x7da1b20eead0>], [<ast.Name object at 0x7da1b20ef7f0>, <ast.Dict object at 0x7da1b20edb40>, <ast.Constant object at 0x7da1b20eefb0>, <ast.Constant object at 0x7da1b20eedd0>]]
if compare[name[record] is constant[None]] begin[:]
variable[record] assign[=] name[name]
variable[current_data] assign[=] <ast.DictComp object at 0x7da1b20eed10>
variable[comment_changes] assign[=] constant[Columns have been updated.]
variable[comment_no_changes] assign[=] constant[All columns are already up to date.]
variable[comment_error] assign[=] constant[Error while updating column {0}: {1}]
if call[name[__opts__]][constant[test]] begin[:]
for taget[name[column]] in starred[name[data]] begin[:]
if compare[call[name[data]][name[column]] not_equal[!=] call[name[current_data]][name[column]]] begin[:]
call[call[name[ret]][constant[changes]]][name[column]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2045a20>, <ast.Constant object at 0x7da1b2045c90>], [<ast.Subscript object at 0x7da1b2044970>, <ast.Subscript object at 0x7da1b2045c00>]]
if call[name[ret]][constant[changes]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] name[comment_changes]
return[name[ret]]
for taget[name[column]] in starred[name[data]] begin[:]
if compare[call[name[data]][name[column]] not_equal[!=] call[name[current_data]][name[column]]] begin[:]
variable[result] assign[=] call[call[name[__salt__]][constant[openvswitch.db_set]], parameter[name[table], name[record], name[column], call[name[data]][name[column]]]]
if compare[name[result] is_not constant[None]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[name[comment_error].format, parameter[name[column], name[result]]]
call[name[ret]][constant[result]] assign[=] constant[False]
return[name[ret]]
call[call[name[ret]][constant[changes]]][name[column]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1cb1780>, <ast.Constant object at 0x7da1b1cb2f20>], [<ast.Subscript object at 0x7da1b1cb35e0>, <ast.Subscript object at 0x7da1b1cb3010>]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] name[comment_no_changes]
return[name[ret]] | keyword[def] identifier[managed] ( identifier[name] , identifier[table] , identifier[data] , identifier[record] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[False] , literal[string] : literal[string] }
keyword[if] identifier[record] keyword[is] keyword[None] :
identifier[record] = identifier[name]
identifier[current_data] ={
identifier[column] : identifier[__salt__] [ literal[string] ]( identifier[table] , identifier[record] , identifier[column] )
keyword[for] identifier[column] keyword[in] identifier[data]
}
identifier[comment_changes] = literal[string]
identifier[comment_no_changes] = literal[string]
identifier[comment_error] = literal[string]
keyword[if] identifier[__opts__] [ literal[string] ]:
keyword[for] identifier[column] keyword[in] identifier[data] :
keyword[if] identifier[data] [ identifier[column] ]!= identifier[current_data] [ identifier[column] ]:
identifier[ret] [ literal[string] ][ identifier[column] ]={ literal[string] : identifier[current_data] [ identifier[column] ],
literal[string] : identifier[data] [ identifier[column] ]}
keyword[if] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= identifier[comment_changes]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= identifier[comment_no_changes]
keyword[return] identifier[ret]
keyword[for] identifier[column] keyword[in] identifier[data] :
keyword[if] identifier[data] [ identifier[column] ]!= identifier[current_data] [ identifier[column] ]:
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[table] , identifier[record] , identifier[column] ,
identifier[data] [ identifier[column] ])
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
identifier[ret] [ literal[string] ]= identifier[comment_error] . identifier[format] ( identifier[column] , identifier[result] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ][ identifier[column] ]={ literal[string] : identifier[current_data] [ identifier[column] ],
literal[string] : identifier[data] [ identifier[column] ]}
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= identifier[comment_no_changes]
keyword[return] identifier[ret] | def managed(name, table, data, record=None):
"""
Ensures that the specified columns of the named record have the specified
values.
Args:
name: The name of the record.
table: The name of the table to which the record belongs.
data: Dictionary containing a mapping from column names to the desired
values. Columns that exist, but are not specified in this
dictionary are not touched.
record: The name of the record (optional). Replaces name if specified.
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if record is None:
record = name # depends on [control=['if'], data=['record']]
current_data = {column: __salt__['openvswitch.db_get'](table, record, column) for column in data}
# Comment and change messages
comment_changes = 'Columns have been updated.'
comment_no_changes = 'All columns are already up to date.'
comment_error = 'Error while updating column {0}: {1}'
# Dry run, test=true mode
if __opts__['test']:
for column in data:
if data[column] != current_data[column]:
ret['changes'][column] = {'old': current_data[column], 'new': data[column]} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']]
if ret['changes']:
ret['result'] = None
ret['comment'] = comment_changes # depends on [control=['if'], data=[]]
else:
ret['result'] = True
ret['comment'] = comment_no_changes
return ret # depends on [control=['if'], data=[]]
for column in data:
if data[column] != current_data[column]:
result = __salt__['openvswitch.db_set'](table, record, column, data[column])
if result is not None:
ret['comment'] = comment_error.format(column, result)
ret['result'] = False
return ret # depends on [control=['if'], data=['result']]
ret['changes'][column] = {'old': current_data[column], 'new': data[column]} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']]
ret['result'] = True
ret['comment'] = comment_no_changes
return ret |
def QA_data_tick_resample(tick, type_='1min'):
"""tick采样成任意级别分钟线
Arguments:
tick {[type]} -- transaction
Returns:
[type] -- [description]
"""
tick = tick.assign(amount=tick.price * tick.vol)
resx = pd.DataFrame()
_temp = set(tick.index.date)
for item in _temp:
_data = tick.loc[str(item)]
_data1 = _data[time(9,
31):time(11,
30)].resample(
type_,
closed='right',
base=30,
loffset=type_
).apply(
{
'price': 'ohlc',
'vol': 'sum',
'code': 'last',
'amount': 'sum'
}
)
_data2 = _data[time(13,
1):time(15,
0)].resample(
type_,
closed='right',
loffset=type_
).apply(
{
'price': 'ohlc',
'vol': 'sum',
'code': 'last',
'amount': 'sum'
}
)
resx = resx.append(_data1).append(_data2)
resx.columns = resx.columns.droplevel(0)
return resx.reset_index().drop_duplicates().set_index(['datetime', 'code']) | def function[QA_data_tick_resample, parameter[tick, type_]]:
constant[tick采样成任意级别分钟线
Arguments:
tick {[type]} -- transaction
Returns:
[type] -- [description]
]
variable[tick] assign[=] call[name[tick].assign, parameter[]]
variable[resx] assign[=] call[name[pd].DataFrame, parameter[]]
variable[_temp] assign[=] call[name[set], parameter[name[tick].index.date]]
for taget[name[item]] in starred[name[_temp]] begin[:]
variable[_data] assign[=] call[name[tick].loc][call[name[str], parameter[name[item]]]]
variable[_data1] assign[=] call[call[call[name[_data]][<ast.Slice object at 0x7da1b1fac6d0>].resample, parameter[name[type_]]].apply, parameter[dictionary[[<ast.Constant object at 0x7da1b1faca00>, <ast.Constant object at 0x7da1b1faca30>, <ast.Constant object at 0x7da1b1faca60>, <ast.Constant object at 0x7da1b1faca90>], [<ast.Constant object at 0x7da1b1facac0>, <ast.Constant object at 0x7da1b1facaf0>, <ast.Constant object at 0x7da1b1facb20>, <ast.Constant object at 0x7da1b1facb50>]]]]
variable[_data2] assign[=] call[call[call[name[_data]][<ast.Slice object at 0x7da1b1facd00>].resample, parameter[name[type_]]].apply, parameter[dictionary[[<ast.Constant object at 0x7da1b1fad4b0>, <ast.Constant object at 0x7da1b1fad4e0>, <ast.Constant object at 0x7da1b1fad510>, <ast.Constant object at 0x7da1b1fad540>], [<ast.Constant object at 0x7da1b1fad570>, <ast.Constant object at 0x7da1b1fad5a0>, <ast.Constant object at 0x7da1b1fad5d0>, <ast.Constant object at 0x7da1b1fad600>]]]]
variable[resx] assign[=] call[call[name[resx].append, parameter[name[_data1]]].append, parameter[name[_data2]]]
name[resx].columns assign[=] call[name[resx].columns.droplevel, parameter[constant[0]]]
return[call[call[call[name[resx].reset_index, parameter[]].drop_duplicates, parameter[]].set_index, parameter[list[[<ast.Constant object at 0x7da1b1fadb10>, <ast.Constant object at 0x7da1b1fadb40>]]]]] | keyword[def] identifier[QA_data_tick_resample] ( identifier[tick] , identifier[type_] = literal[string] ):
literal[string]
identifier[tick] = identifier[tick] . identifier[assign] ( identifier[amount] = identifier[tick] . identifier[price] * identifier[tick] . identifier[vol] )
identifier[resx] = identifier[pd] . identifier[DataFrame] ()
identifier[_temp] = identifier[set] ( identifier[tick] . identifier[index] . identifier[date] )
keyword[for] identifier[item] keyword[in] identifier[_temp] :
identifier[_data] = identifier[tick] . identifier[loc] [ identifier[str] ( identifier[item] )]
identifier[_data1] = identifier[_data] [ identifier[time] ( literal[int] ,
literal[int] ): identifier[time] ( literal[int] ,
literal[int] )]. identifier[resample] (
identifier[type_] ,
identifier[closed] = literal[string] ,
identifier[base] = literal[int] ,
identifier[loffset] = identifier[type_]
). identifier[apply] (
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
)
identifier[_data2] = identifier[_data] [ identifier[time] ( literal[int] ,
literal[int] ): identifier[time] ( literal[int] ,
literal[int] )]. identifier[resample] (
identifier[type_] ,
identifier[closed] = literal[string] ,
identifier[loffset] = identifier[type_]
). identifier[apply] (
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
)
identifier[resx] = identifier[resx] . identifier[append] ( identifier[_data1] ). identifier[append] ( identifier[_data2] )
identifier[resx] . identifier[columns] = identifier[resx] . identifier[columns] . identifier[droplevel] ( literal[int] )
keyword[return] identifier[resx] . identifier[reset_index] (). identifier[drop_duplicates] (). identifier[set_index] ([ literal[string] , literal[string] ]) | def QA_data_tick_resample(tick, type_='1min'):
"""tick采样成任意级别分钟线
Arguments:
tick {[type]} -- transaction
Returns:
[type] -- [description]
"""
tick = tick.assign(amount=tick.price * tick.vol)
resx = pd.DataFrame()
_temp = set(tick.index.date)
for item in _temp:
_data = tick.loc[str(item)]
_data1 = _data[time(9, 31):time(11, 30)].resample(type_, closed='right', base=30, loffset=type_).apply({'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum'})
_data2 = _data[time(13, 1):time(15, 0)].resample(type_, closed='right', loffset=type_).apply({'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum'})
resx = resx.append(_data1).append(_data2) # depends on [control=['for'], data=['item']]
resx.columns = resx.columns.droplevel(0)
return resx.reset_index().drop_duplicates().set_index(['datetime', 'code']) |
def _set_toChange(x):
""" set variables in list x toChange """
for key in list(x.keys()):
self.toChange[key] = True | def function[_set_toChange, parameter[x]]:
constant[ set variables in list x toChange ]
for taget[name[key]] in starred[call[name[list], parameter[call[name[x].keys, parameter[]]]]] begin[:]
call[name[self].toChange][name[key]] assign[=] constant[True] | keyword[def] identifier[_set_toChange] ( identifier[x] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[x] . identifier[keys] ()):
identifier[self] . identifier[toChange] [ identifier[key] ]= keyword[True] | def _set_toChange(x):
""" set variables in list x toChange """
for key in list(x.keys()):
self.toChange[key] = True # depends on [control=['for'], data=['key']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.