code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def apply_clicked(self, button):
"""Triggered when the Apply-Shortcut in the editor is triggered.
"""
if isinstance(self.model.state, LibraryState):
return
self.set_script_text(self.view.get_text()) | def function[apply_clicked, parameter[self, button]]:
constant[Triggered when the Apply-Shortcut in the editor is triggered.
]
if call[name[isinstance], parameter[name[self].model.state, name[LibraryState]]] begin[:]
return[None]
call[name[self].set_script_text, parameter[call[name[self].view.get_text, parameter[]]]] | keyword[def] identifier[apply_clicked] ( identifier[self] , identifier[button] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[model] . identifier[state] , identifier[LibraryState] ):
keyword[return]
identifier[self] . identifier[set_script_text] ( identifier[self] . identifier[view] . identifier[get_text] ()) | def apply_clicked(self, button):
"""Triggered when the Apply-Shortcut in the editor is triggered.
"""
if isinstance(self.model.state, LibraryState):
return # depends on [control=['if'], data=[]]
self.set_script_text(self.view.get_text()) |
def randomize_es(es_queryset):
"""Randomize an elasticsearch queryset."""
return es_queryset.query(
query.FunctionScore(
functions=[function.RandomScore()]
)
).sort("-_score") | def function[randomize_es, parameter[es_queryset]]:
constant[Randomize an elasticsearch queryset.]
return[call[call[name[es_queryset].query, parameter[call[name[query].FunctionScore, parameter[]]]].sort, parameter[constant[-_score]]]] | keyword[def] identifier[randomize_es] ( identifier[es_queryset] ):
literal[string]
keyword[return] identifier[es_queryset] . identifier[query] (
identifier[query] . identifier[FunctionScore] (
identifier[functions] =[ identifier[function] . identifier[RandomScore] ()]
)
). identifier[sort] ( literal[string] ) | def randomize_es(es_queryset):
"""Randomize an elasticsearch queryset."""
return es_queryset.query(query.FunctionScore(functions=[function.RandomScore()])).sort('-_score') |
def get_user(self, steam_id, fetch_persona_state=True):
"""Get :class:`.SteamUser` instance for ``steam id``
:param steam_id: steam id
:type steam_id: :class:`int`, :class:`.SteamID`
:param fetch_persona_state: whether to request person state when necessary
:type fetch_persona_state: :class:`bool`
:return: SteamUser instance
:rtype: :class:`.SteamUser`
"""
steam_id = int(steam_id)
suser = self._user_cache.get(steam_id, None)
if suser is None:
suser = SteamUser(steam_id, self)
self._user_cache[steam_id] = suser
if fetch_persona_state:
self.request_persona_state([steam_id])
return suser | def function[get_user, parameter[self, steam_id, fetch_persona_state]]:
constant[Get :class:`.SteamUser` instance for ``steam id``
:param steam_id: steam id
:type steam_id: :class:`int`, :class:`.SteamID`
:param fetch_persona_state: whether to request person state when necessary
:type fetch_persona_state: :class:`bool`
:return: SteamUser instance
:rtype: :class:`.SteamUser`
]
variable[steam_id] assign[=] call[name[int], parameter[name[steam_id]]]
variable[suser] assign[=] call[name[self]._user_cache.get, parameter[name[steam_id], constant[None]]]
if compare[name[suser] is constant[None]] begin[:]
variable[suser] assign[=] call[name[SteamUser], parameter[name[steam_id], name[self]]]
call[name[self]._user_cache][name[steam_id]] assign[=] name[suser]
if name[fetch_persona_state] begin[:]
call[name[self].request_persona_state, parameter[list[[<ast.Name object at 0x7da1b23153f0>]]]]
return[name[suser]] | keyword[def] identifier[get_user] ( identifier[self] , identifier[steam_id] , identifier[fetch_persona_state] = keyword[True] ):
literal[string]
identifier[steam_id] = identifier[int] ( identifier[steam_id] )
identifier[suser] = identifier[self] . identifier[_user_cache] . identifier[get] ( identifier[steam_id] , keyword[None] )
keyword[if] identifier[suser] keyword[is] keyword[None] :
identifier[suser] = identifier[SteamUser] ( identifier[steam_id] , identifier[self] )
identifier[self] . identifier[_user_cache] [ identifier[steam_id] ]= identifier[suser]
keyword[if] identifier[fetch_persona_state] :
identifier[self] . identifier[request_persona_state] ([ identifier[steam_id] ])
keyword[return] identifier[suser] | def get_user(self, steam_id, fetch_persona_state=True):
"""Get :class:`.SteamUser` instance for ``steam id``
:param steam_id: steam id
:type steam_id: :class:`int`, :class:`.SteamID`
:param fetch_persona_state: whether to request person state when necessary
:type fetch_persona_state: :class:`bool`
:return: SteamUser instance
:rtype: :class:`.SteamUser`
"""
steam_id = int(steam_id)
suser = self._user_cache.get(steam_id, None)
if suser is None:
suser = SteamUser(steam_id, self)
self._user_cache[steam_id] = suser
if fetch_persona_state:
self.request_persona_state([steam_id]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['suser']]
return suser |
def _prepare_headers(
self,
headers: Optional[LooseHeaders]) -> 'CIMultiDict[str]':
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names = set() # type: Set[str]
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result | def function[_prepare_headers, parameter[self, headers]]:
constant[ Add default headers and transform it to CIMultiDict
]
variable[result] assign[=] call[name[CIMultiDict], parameter[name[self]._default_headers]]
if name[headers] begin[:]
if <ast.UnaryOp object at 0x7da1b1f42fe0> begin[:]
variable[headers] assign[=] call[name[CIMultiDict], parameter[name[headers]]]
variable[added_names] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1f43010>, <ast.Name object at 0x7da1b1f40af0>]]] in starred[call[name[headers].items, parameter[]]] begin[:]
if compare[name[key] in name[added_names]] begin[:]
call[name[result].add, parameter[name[key], name[value]]]
return[name[result]] | keyword[def] identifier[_prepare_headers] (
identifier[self] ,
identifier[headers] : identifier[Optional] [ identifier[LooseHeaders] ])-> literal[string] :
literal[string]
identifier[result] = identifier[CIMultiDict] ( identifier[self] . identifier[_default_headers] )
keyword[if] identifier[headers] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[headers] ,( identifier[MultiDictProxy] , identifier[MultiDict] )):
identifier[headers] = identifier[CIMultiDict] ( identifier[headers] )
identifier[added_names] = identifier[set] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[headers] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[added_names] :
identifier[result] . identifier[add] ( identifier[key] , identifier[value] )
keyword[else] :
identifier[result] [ identifier[key] ]= identifier[value]
identifier[added_names] . identifier[add] ( identifier[key] )
keyword[return] identifier[result] | def _prepare_headers(self, headers: Optional[LooseHeaders]) -> 'CIMultiDict[str]':
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers) # depends on [control=['if'], data=[]]
added_names = set() # type: Set[str]
for (key, value) in headers.items():
if key in added_names:
result.add(key, value) # depends on [control=['if'], data=['key']]
else:
result[key] = value
added_names.add(key) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return result |
def check_path(path):
"""Check that a path is legal.
:return: the path if all is OK
:raise ValueError: if the path is illegal
"""
if path is None or path == b'' or path.startswith(b'/'):
raise ValueError("illegal path '%s'" % path)
if (
(sys.version_info[0] >= 3 and not isinstance(path, bytes)) and
(sys.version_info[0] == 2 and not isinstance(path, str))
):
raise TypeError("illegale type for path '%r'" % path)
return path | def function[check_path, parameter[path]]:
constant[Check that a path is legal.
:return: the path if all is OK
:raise ValueError: if the path is illegal
]
if <ast.BoolOp object at 0x7da1b0aeead0> begin[:]
<ast.Raise object at 0x7da1b0911870>
if <ast.BoolOp object at 0x7da1b09133d0> begin[:]
<ast.Raise object at 0x7da1b09102e0>
return[name[path]] | keyword[def] identifier[check_path] ( identifier[path] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] keyword[or] identifier[path] == literal[string] keyword[or] identifier[path] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[path] )
keyword[if] (
( identifier[sys] . identifier[version_info] [ literal[int] ]>= literal[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[path] , identifier[bytes] )) keyword[and]
( identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[path] , identifier[str] ))
):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[path] )
keyword[return] identifier[path] | def check_path(path):
"""Check that a path is legal.
:return: the path if all is OK
:raise ValueError: if the path is illegal
"""
if path is None or path == b'' or path.startswith(b'/'):
raise ValueError("illegal path '%s'" % path) # depends on [control=['if'], data=[]]
if (sys.version_info[0] >= 3 and (not isinstance(path, bytes))) and (sys.version_info[0] == 2 and (not isinstance(path, str))):
raise TypeError("illegale type for path '%r'" % path) # depends on [control=['if'], data=[]]
return path |
def GetValue(self, ignore_error=True):
"""Extracts and returns a single value from a DataBlob."""
if self.HasField("none"):
return None
field_names = [
"integer", "string", "data", "boolean", "list", "dict", "rdf_value",
"float", "set"
]
values = [getattr(self, x) for x in field_names if self.HasField(x)]
if len(values) != 1:
return None
if self.HasField("boolean"):
return bool(values[0])
# Unpack RDFValues.
if self.HasField("rdf_value"):
try:
rdf_class = rdfvalue.RDFValue.classes[self.rdf_value.name]
return rdf_class.FromSerializedString(
self.rdf_value.data, age=self.rdf_value.age)
except (ValueError, KeyError) as e:
if ignore_error:
return e
raise
elif self.HasField("list"):
return [x.GetValue() for x in self.list.content]
elif self.HasField("set"):
return set([x.GetValue() for x in self.set.content])
else:
return values[0] | def function[GetValue, parameter[self, ignore_error]]:
constant[Extracts and returns a single value from a DataBlob.]
if call[name[self].HasField, parameter[constant[none]]] begin[:]
return[constant[None]]
variable[field_names] assign[=] list[[<ast.Constant object at 0x7da1b1b44d60>, <ast.Constant object at 0x7da1b1b45a80>, <ast.Constant object at 0x7da1b1b44b80>, <ast.Constant object at 0x7da1b1b47a60>, <ast.Constant object at 0x7da1b1b46bf0>, <ast.Constant object at 0x7da1b1b44df0>, <ast.Constant object at 0x7da1b1b46bc0>, <ast.Constant object at 0x7da1b1b47a30>, <ast.Constant object at 0x7da1b1b47610>]]
variable[values] assign[=] <ast.ListComp object at 0x7da1b1b470d0>
if compare[call[name[len], parameter[name[values]]] not_equal[!=] constant[1]] begin[:]
return[constant[None]]
if call[name[self].HasField, parameter[constant[boolean]]] begin[:]
return[call[name[bool], parameter[call[name[values]][constant[0]]]]]
if call[name[self].HasField, parameter[constant[rdf_value]]] begin[:]
<ast.Try object at 0x7da1b1b44340> | keyword[def] identifier[GetValue] ( identifier[self] , identifier[ignore_error] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[HasField] ( literal[string] ):
keyword[return] keyword[None]
identifier[field_names] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string]
]
identifier[values] =[ identifier[getattr] ( identifier[self] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[field_names] keyword[if] identifier[self] . identifier[HasField] ( identifier[x] )]
keyword[if] identifier[len] ( identifier[values] )!= literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[HasField] ( literal[string] ):
keyword[return] identifier[bool] ( identifier[values] [ literal[int] ])
keyword[if] identifier[self] . identifier[HasField] ( literal[string] ):
keyword[try] :
identifier[rdf_class] = identifier[rdfvalue] . identifier[RDFValue] . identifier[classes] [ identifier[self] . identifier[rdf_value] . identifier[name] ]
keyword[return] identifier[rdf_class] . identifier[FromSerializedString] (
identifier[self] . identifier[rdf_value] . identifier[data] , identifier[age] = identifier[self] . identifier[rdf_value] . identifier[age] )
keyword[except] ( identifier[ValueError] , identifier[KeyError] ) keyword[as] identifier[e] :
keyword[if] identifier[ignore_error] :
keyword[return] identifier[e]
keyword[raise]
keyword[elif] identifier[self] . identifier[HasField] ( literal[string] ):
keyword[return] [ identifier[x] . identifier[GetValue] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[list] . identifier[content] ]
keyword[elif] identifier[self] . identifier[HasField] ( literal[string] ):
keyword[return] identifier[set] ([ identifier[x] . identifier[GetValue] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[set] . identifier[content] ])
keyword[else] :
keyword[return] identifier[values] [ literal[int] ] | def GetValue(self, ignore_error=True):
"""Extracts and returns a single value from a DataBlob."""
if self.HasField('none'):
return None # depends on [control=['if'], data=[]]
field_names = ['integer', 'string', 'data', 'boolean', 'list', 'dict', 'rdf_value', 'float', 'set']
values = [getattr(self, x) for x in field_names if self.HasField(x)]
if len(values) != 1:
return None # depends on [control=['if'], data=[]]
if self.HasField('boolean'):
return bool(values[0]) # depends on [control=['if'], data=[]]
# Unpack RDFValues.
if self.HasField('rdf_value'):
try:
rdf_class = rdfvalue.RDFValue.classes[self.rdf_value.name]
return rdf_class.FromSerializedString(self.rdf_value.data, age=self.rdf_value.age) # depends on [control=['try'], data=[]]
except (ValueError, KeyError) as e:
if ignore_error:
return e # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
elif self.HasField('list'):
return [x.GetValue() for x in self.list.content] # depends on [control=['if'], data=[]]
elif self.HasField('set'):
return set([x.GetValue() for x in self.set.content]) # depends on [control=['if'], data=[]]
else:
return values[0] |
def sexec(context, command, error_on_nonzero=True):
"""Executes a command within a particular Idiap SETSHELL context"""
import six
if isinstance(context, six.string_types): E = environ(context)
else: E = context
try:
logger.debug("Executing: '%s'", ' '.join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=E)
(stdout, stderr) = p.communicate() #note: stderr will be 'None'
if p.returncode != 0:
if error_on_nonzero:
raise RuntimeError("Execution of '%s' exited with status != 0 (%d): %s" % (' '.join(command), p.returncode, str_(stdout)))
else:
logger.debug("Execution of '%s' exited with status != 0 (%d): %s" % \
(' '.join(command), p.returncode, str_(stdout)))
return stdout.strip()
except KeyboardInterrupt: # the user CTRC-C'ed
os.kill(p.pid, signal.SIGTERM)
sys.exit(signal.SIGTERM) | def function[sexec, parameter[context, command, error_on_nonzero]]:
constant[Executes a command within a particular Idiap SETSHELL context]
import module[six]
if call[name[isinstance], parameter[name[context], name[six].string_types]] begin[:]
variable[E] assign[=] call[name[environ], parameter[name[context]]]
<ast.Try object at 0x7da18dc980a0> | keyword[def] identifier[sexec] ( identifier[context] , identifier[command] , identifier[error_on_nonzero] = keyword[True] ):
literal[string]
keyword[import] identifier[six]
keyword[if] identifier[isinstance] ( identifier[context] , identifier[six] . identifier[string_types] ): identifier[E] = identifier[environ] ( identifier[context] )
keyword[else] : identifier[E] = identifier[context]
keyword[try] :
identifier[logger] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[command] ))
identifier[p] = identifier[subprocess] . identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[STDOUT] , identifier[env] = identifier[E] )
( identifier[stdout] , identifier[stderr] )= identifier[p] . identifier[communicate] ()
keyword[if] identifier[p] . identifier[returncode] != literal[int] :
keyword[if] identifier[error_on_nonzero] :
keyword[raise] identifier[RuntimeError] ( literal[string] %( literal[string] . identifier[join] ( identifier[command] ), identifier[p] . identifier[returncode] , identifier[str_] ( identifier[stdout] )))
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] %( literal[string] . identifier[join] ( identifier[command] ), identifier[p] . identifier[returncode] , identifier[str_] ( identifier[stdout] )))
keyword[return] identifier[stdout] . identifier[strip] ()
keyword[except] identifier[KeyboardInterrupt] :
identifier[os] . identifier[kill] ( identifier[p] . identifier[pid] , identifier[signal] . identifier[SIGTERM] )
identifier[sys] . identifier[exit] ( identifier[signal] . identifier[SIGTERM] ) | def sexec(context, command, error_on_nonzero=True):
"""Executes a command within a particular Idiap SETSHELL context"""
import six
if isinstance(context, six.string_types):
E = environ(context) # depends on [control=['if'], data=[]]
else:
E = context
try:
logger.debug("Executing: '%s'", ' '.join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=E)
(stdout, stderr) = p.communicate() #note: stderr will be 'None'
if p.returncode != 0:
if error_on_nonzero:
raise RuntimeError("Execution of '%s' exited with status != 0 (%d): %s" % (' '.join(command), p.returncode, str_(stdout))) # depends on [control=['if'], data=[]]
else:
logger.debug("Execution of '%s' exited with status != 0 (%d): %s" % (' '.join(command), p.returncode, str_(stdout))) # depends on [control=['if'], data=[]]
return stdout.strip() # depends on [control=['try'], data=[]]
except KeyboardInterrupt: # the user CTRC-C'ed
os.kill(p.pid, signal.SIGTERM)
sys.exit(signal.SIGTERM) # depends on [control=['except'], data=[]] |
def add_file(self, **args):
'''
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
'''
# Check if allowed:
self.__check_if_adding_files_allowed_right_now()
# Check if args ok:
mandatory_args = ['file_name', 'file_handle', 'file_size',
'checksum', 'publish_path', 'checksum_type',
'file_version']
utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__enforce_integer_file_size(args)
self.__enforce_string_file_version(args)
# Add file:
self.__check_and_correct_handle_syntax(args)
self.__add_file(**args) | def function[add_file, parameter[self]]:
constant[
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
]
call[name[self].__check_if_adding_files_allowed_right_now, parameter[]]
variable[mandatory_args] assign[=] list[[<ast.Constant object at 0x7da2047ea620>, <ast.Constant object at 0x7da2047e85b0>, <ast.Constant object at 0x7da2047ebb20>, <ast.Constant object at 0x7da2047e90f0>, <ast.Constant object at 0x7da2047e95a0>, <ast.Constant object at 0x7da18fe93910>, <ast.Constant object at 0x7da18fe91390>]]
call[name[utils].check_presence_of_mandatory_args, parameter[name[args], name[mandatory_args]]]
call[name[self].__enforce_integer_file_size, parameter[name[args]]]
call[name[self].__enforce_string_file_version, parameter[name[args]]]
call[name[self].__check_and_correct_handle_syntax, parameter[name[args]]]
call[name[self].__add_file, parameter[]] | keyword[def] identifier[add_file] ( identifier[self] ,** identifier[args] ):
literal[string]
identifier[self] . identifier[__check_if_adding_files_allowed_right_now] ()
identifier[mandatory_args] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] ]
identifier[utils] . identifier[check_presence_of_mandatory_args] ( identifier[args] , identifier[mandatory_args] )
identifier[self] . identifier[__enforce_integer_file_size] ( identifier[args] )
identifier[self] . identifier[__enforce_string_file_version] ( identifier[args] )
identifier[self] . identifier[__check_and_correct_handle_syntax] ( identifier[args] )
identifier[self] . identifier[__add_file] (** identifier[args] ) | def add_file(self, **args):
"""
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
"""
# Check if allowed:
self.__check_if_adding_files_allowed_right_now()
# Check if args ok:
mandatory_args = ['file_name', 'file_handle', 'file_size', 'checksum', 'publish_path', 'checksum_type', 'file_version']
utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__enforce_integer_file_size(args)
self.__enforce_string_file_version(args)
# Add file:
self.__check_and_correct_handle_syntax(args)
self.__add_file(**args) |
def configure_modevasive(self):
"""
Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache
"""
r = self.local_renderer
if r.env.modevasive_enabled:
self.install_packages()
# Write conf for each Ubuntu version since they don't conflict.
fn = r.render_to_file('apache/apache_modevasive.template.conf')
# Ubuntu 12.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/mod-evasive.conf',
use_sudo=True)
# Ubuntu 14.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/evasive.conf',
use_sudo=True)
self.enable_mod('evasive')
else:
# print('self.last_manifest:', self.last_manifest)
# print('a:', self.last_manifest.apache_modevasive_enabled)
# print('b:', self.last_manifest.modevasive_enabled)
if self.last_manifest.modevasive_enabled:
self.disable_mod('evasive') | def function[configure_modevasive, parameter[self]]:
constant[
Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache
]
variable[r] assign[=] name[self].local_renderer
if name[r].env.modevasive_enabled begin[:]
call[name[self].install_packages, parameter[]]
variable[fn] assign[=] call[name[r].render_to_file, parameter[constant[apache/apache_modevasive.template.conf]]]
call[name[r].put, parameter[]]
call[name[r].put, parameter[]]
call[name[self].enable_mod, parameter[constant[evasive]]] | keyword[def] identifier[configure_modevasive] ( identifier[self] ):
literal[string]
identifier[r] = identifier[self] . identifier[local_renderer]
keyword[if] identifier[r] . identifier[env] . identifier[modevasive_enabled] :
identifier[self] . identifier[install_packages] ()
identifier[fn] = identifier[r] . identifier[render_to_file] ( literal[string] )
identifier[r] . identifier[put] (
identifier[local_path] = identifier[fn] ,
identifier[remote_path] = literal[string] ,
identifier[use_sudo] = keyword[True] )
identifier[r] . identifier[put] (
identifier[local_path] = identifier[fn] ,
identifier[remote_path] = literal[string] ,
identifier[use_sudo] = keyword[True] )
identifier[self] . identifier[enable_mod] ( literal[string] )
keyword[else] :
keyword[if] identifier[self] . identifier[last_manifest] . identifier[modevasive_enabled] :
identifier[self] . identifier[disable_mod] ( literal[string] ) | def configure_modevasive(self):
"""
Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache
"""
r = self.local_renderer
if r.env.modevasive_enabled:
self.install_packages()
# Write conf for each Ubuntu version since they don't conflict.
fn = r.render_to_file('apache/apache_modevasive.template.conf')
# Ubuntu 12.04
r.put(local_path=fn, remote_path='/etc/apache2/mods-available/mod-evasive.conf', use_sudo=True)
# Ubuntu 14.04
r.put(local_path=fn, remote_path='/etc/apache2/mods-available/evasive.conf', use_sudo=True)
self.enable_mod('evasive') # depends on [control=['if'], data=[]]
# print('self.last_manifest:', self.last_manifest)
# print('a:', self.last_manifest.apache_modevasive_enabled)
# print('b:', self.last_manifest.modevasive_enabled)
elif self.last_manifest.modevasive_enabled:
self.disable_mod('evasive') # depends on [control=['if'], data=[]] |
def deployment_plans(self):
"""
Gets the Deployment Plans API client.
Returns:
DeploymentPlans:
"""
if not self.__deployment_plans:
self.__deployment_plans = DeploymentPlans(self.__connection)
return self.__deployment_plans | def function[deployment_plans, parameter[self]]:
constant[
Gets the Deployment Plans API client.
Returns:
DeploymentPlans:
]
if <ast.UnaryOp object at 0x7da18bcc9330> begin[:]
name[self].__deployment_plans assign[=] call[name[DeploymentPlans], parameter[name[self].__connection]]
return[name[self].__deployment_plans] | keyword[def] identifier[deployment_plans] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__deployment_plans] :
identifier[self] . identifier[__deployment_plans] = identifier[DeploymentPlans] ( identifier[self] . identifier[__connection] )
keyword[return] identifier[self] . identifier[__deployment_plans] | def deployment_plans(self):
"""
Gets the Deployment Plans API client.
Returns:
DeploymentPlans:
"""
if not self.__deployment_plans:
self.__deployment_plans = DeploymentPlans(self.__connection) # depends on [control=['if'], data=[]]
return self.__deployment_plans |
def BuildChecks(self, request):
"""Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
"""
result = []
if request.HasField("start_time") or request.HasField("end_time"):
def FilterTimestamp(file_stat, request=request):
return file_stat.HasField("st_mtime") and (
file_stat.st_mtime < request.start_time or
file_stat.st_mtime > request.end_time)
result.append(FilterTimestamp)
if request.HasField("min_file_size") or request.HasField("max_file_size"):
def FilterSize(file_stat, request=request):
return file_stat.HasField("st_size") and (
file_stat.st_size < request.min_file_size or
file_stat.st_size > request.max_file_size)
result.append(FilterSize)
if request.HasField("perm_mode"):
def FilterPerms(file_stat, request=request):
return (file_stat.st_mode & request.perm_mask) != request.perm_mode
result.append(FilterPerms)
if request.HasField("uid"):
def FilterUID(file_stat, request=request):
return file_stat.st_uid != request.uid
result.append(FilterUID)
if request.HasField("gid"):
def FilterGID(file_stat, request=request):
return file_stat.st_gid != request.gid
result.append(FilterGID)
if request.HasField("path_regex"):
regex = request.path_regex
def FilterPath(file_stat, regex=regex):
"""Suppress any filename not matching the regular expression."""
return not regex.Search(file_stat.pathspec.Basename())
result.append(FilterPath)
if request.HasField("data_regex"):
def FilterData(file_stat, **_):
"""Suppress files that do not match the content."""
return not self.TestFileContent(file_stat)
result.append(FilterData)
return result | def function[BuildChecks, parameter[self, request]]:
constant[Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
]
variable[result] assign[=] list[[]]
if <ast.BoolOp object at 0x7da204621ae0> begin[:]
def function[FilterTimestamp, parameter[file_stat, request]]:
return[<ast.BoolOp object at 0x7da2046234c0>]
call[name[result].append, parameter[name[FilterTimestamp]]]
if <ast.BoolOp object at 0x7da1b1c24e20> begin[:]
def function[FilterSize, parameter[file_stat, request]]:
return[<ast.BoolOp object at 0x7da1b1c24ee0>]
call[name[result].append, parameter[name[FilterSize]]]
if call[name[request].HasField, parameter[constant[perm_mode]]] begin[:]
def function[FilterPerms, parameter[file_stat, request]]:
return[compare[binary_operation[name[file_stat].st_mode <ast.BitAnd object at 0x7da2590d6b60> name[request].perm_mask] not_equal[!=] name[request].perm_mode]]
call[name[result].append, parameter[name[FilterPerms]]]
if call[name[request].HasField, parameter[constant[uid]]] begin[:]
def function[FilterUID, parameter[file_stat, request]]:
return[compare[name[file_stat].st_uid not_equal[!=] name[request].uid]]
call[name[result].append, parameter[name[FilterUID]]]
if call[name[request].HasField, parameter[constant[gid]]] begin[:]
def function[FilterGID, parameter[file_stat, request]]:
return[compare[name[file_stat].st_gid not_equal[!=] name[request].gid]]
call[name[result].append, parameter[name[FilterGID]]]
if call[name[request].HasField, parameter[constant[path_regex]]] begin[:]
variable[regex] assign[=] name[request].path_regex
def function[FilterPath, parameter[file_stat, regex]]:
constant[Suppress any filename not matching the regular expression.]
return[<ast.UnaryOp object at 0x7da1b1b2a440>]
call[name[result].append, parameter[name[FilterPath]]]
if call[name[request].HasField, parameter[constant[data_regex]]] begin[:]
def function[FilterData, parameter[file_stat]]:
constant[Suppress files that do not match the content.]
return[<ast.UnaryOp object at 0x7da1b1b2a4a0>]
call[name[result].append, parameter[name[FilterData]]]
return[name[result]] | keyword[def] identifier[BuildChecks] ( identifier[self] , identifier[request] ):
literal[string]
identifier[result] =[]
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ) keyword[or] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterTimestamp] ( identifier[file_stat] , identifier[request] = identifier[request] ):
keyword[return] identifier[file_stat] . identifier[HasField] ( literal[string] ) keyword[and] (
identifier[file_stat] . identifier[st_mtime] < identifier[request] . identifier[start_time] keyword[or]
identifier[file_stat] . identifier[st_mtime] > identifier[request] . identifier[end_time] )
identifier[result] . identifier[append] ( identifier[FilterTimestamp] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ) keyword[or] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterSize] ( identifier[file_stat] , identifier[request] = identifier[request] ):
keyword[return] identifier[file_stat] . identifier[HasField] ( literal[string] ) keyword[and] (
identifier[file_stat] . identifier[st_size] < identifier[request] . identifier[min_file_size] keyword[or]
identifier[file_stat] . identifier[st_size] > identifier[request] . identifier[max_file_size] )
identifier[result] . identifier[append] ( identifier[FilterSize] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterPerms] ( identifier[file_stat] , identifier[request] = identifier[request] ):
keyword[return] ( identifier[file_stat] . identifier[st_mode] & identifier[request] . identifier[perm_mask] )!= identifier[request] . identifier[perm_mode]
identifier[result] . identifier[append] ( identifier[FilterPerms] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterUID] ( identifier[file_stat] , identifier[request] = identifier[request] ):
keyword[return] identifier[file_stat] . identifier[st_uid] != identifier[request] . identifier[uid]
identifier[result] . identifier[append] ( identifier[FilterUID] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterGID] ( identifier[file_stat] , identifier[request] = identifier[request] ):
keyword[return] identifier[file_stat] . identifier[st_gid] != identifier[request] . identifier[gid]
identifier[result] . identifier[append] ( identifier[FilterGID] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ):
identifier[regex] = identifier[request] . identifier[path_regex]
keyword[def] identifier[FilterPath] ( identifier[file_stat] , identifier[regex] = identifier[regex] ):
literal[string]
keyword[return] keyword[not] identifier[regex] . identifier[Search] ( identifier[file_stat] . identifier[pathspec] . identifier[Basename] ())
identifier[result] . identifier[append] ( identifier[FilterPath] )
keyword[if] identifier[request] . identifier[HasField] ( literal[string] ):
keyword[def] identifier[FilterData] ( identifier[file_stat] ,** identifier[_] ):
literal[string]
keyword[return] keyword[not] identifier[self] . identifier[TestFileContent] ( identifier[file_stat] )
identifier[result] . identifier[append] ( identifier[FilterData] )
keyword[return] identifier[result] | def BuildChecks(self, request):
"""Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
"""
result = []
if request.HasField('start_time') or request.HasField('end_time'):
def FilterTimestamp(file_stat, request=request):
return file_stat.HasField('st_mtime') and (file_stat.st_mtime < request.start_time or file_stat.st_mtime > request.end_time)
result.append(FilterTimestamp) # depends on [control=['if'], data=[]]
if request.HasField('min_file_size') or request.HasField('max_file_size'):
def FilterSize(file_stat, request=request):
return file_stat.HasField('st_size') and (file_stat.st_size < request.min_file_size or file_stat.st_size > request.max_file_size)
result.append(FilterSize) # depends on [control=['if'], data=[]]
if request.HasField('perm_mode'):
def FilterPerms(file_stat, request=request):
return file_stat.st_mode & request.perm_mask != request.perm_mode
result.append(FilterPerms) # depends on [control=['if'], data=[]]
if request.HasField('uid'):
def FilterUID(file_stat, request=request):
return file_stat.st_uid != request.uid
result.append(FilterUID) # depends on [control=['if'], data=[]]
if request.HasField('gid'):
def FilterGID(file_stat, request=request):
return file_stat.st_gid != request.gid
result.append(FilterGID) # depends on [control=['if'], data=[]]
if request.HasField('path_regex'):
regex = request.path_regex
def FilterPath(file_stat, regex=regex):
"""Suppress any filename not matching the regular expression."""
return not regex.Search(file_stat.pathspec.Basename())
result.append(FilterPath) # depends on [control=['if'], data=[]]
if request.HasField('data_regex'):
def FilterData(file_stat, **_):
"""Suppress files that do not match the content."""
return not self.TestFileContent(file_stat)
result.append(FilterData) # depends on [control=['if'], data=[]]
return result |
def do_auto_save(self):
"""
Delete current fit if auto_save==False,
unless current fit has explicitly been saved.
"""
if not self.auto_save.GetValue():
if self.current_fit:
if not self.current_fit.saved:
self.delete_fit(self.current_fit, specimen=self.s) | def function[do_auto_save, parameter[self]]:
constant[
Delete current fit if auto_save==False,
unless current fit has explicitly been saved.
]
if <ast.UnaryOp object at 0x7da2041da9e0> begin[:]
if name[self].current_fit begin[:]
if <ast.UnaryOp object at 0x7da2041d9960> begin[:]
call[name[self].delete_fit, parameter[name[self].current_fit]] | keyword[def] identifier[do_auto_save] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[auto_save] . identifier[GetValue] ():
keyword[if] identifier[self] . identifier[current_fit] :
keyword[if] keyword[not] identifier[self] . identifier[current_fit] . identifier[saved] :
identifier[self] . identifier[delete_fit] ( identifier[self] . identifier[current_fit] , identifier[specimen] = identifier[self] . identifier[s] ) | def do_auto_save(self):
"""
Delete current fit if auto_save==False,
unless current fit has explicitly been saved.
"""
if not self.auto_save.GetValue():
if self.current_fit:
if not self.current_fit.saved:
self.delete_fit(self.current_fit, specimen=self.s) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_thumbnail_paths(self):
"""
Helper function used to avoid processing thumbnail files during `os.walk`.
"""
thumbnail_path_tuples = []
# channel thumbnail
channel_info = self.get_channel_info()
chthumbnail_path = channel_info.get('thumbnail_chan_path', None)
if chthumbnail_path:
chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(chthumbnail_path_tuple)
# content thumbnails
for content_file_path_tuple, row in self.contentcache.items():
thumbnail_path = row.get('thumbnail_chan_path', None)
if thumbnail_path:
thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(thumbnail_path_tuple)
return thumbnail_path_tuples | def function[get_thumbnail_paths, parameter[self]]:
constant[
Helper function used to avoid processing thumbnail files during `os.walk`.
]
variable[thumbnail_path_tuples] assign[=] list[[]]
variable[channel_info] assign[=] call[name[self].get_channel_info, parameter[]]
variable[chthumbnail_path] assign[=] call[name[channel_info].get, parameter[constant[thumbnail_chan_path], constant[None]]]
if name[chthumbnail_path] begin[:]
variable[chthumbnail_path_tuple] assign[=] call[name[path_to_tuple], parameter[name[chthumbnail_path]]]
call[name[thumbnail_path_tuples].append, parameter[name[chthumbnail_path_tuple]]]
for taget[tuple[[<ast.Name object at 0x7da207f9a080>, <ast.Name object at 0x7da207f981f0>]]] in starred[call[name[self].contentcache.items, parameter[]]] begin[:]
variable[thumbnail_path] assign[=] call[name[row].get, parameter[constant[thumbnail_chan_path], constant[None]]]
if name[thumbnail_path] begin[:]
variable[thumbnail_path_tuple] assign[=] call[name[path_to_tuple], parameter[name[thumbnail_path]]]
call[name[thumbnail_path_tuples].append, parameter[name[thumbnail_path_tuple]]]
return[name[thumbnail_path_tuples]] | keyword[def] identifier[get_thumbnail_paths] ( identifier[self] ):
literal[string]
identifier[thumbnail_path_tuples] =[]
identifier[channel_info] = identifier[self] . identifier[get_channel_info] ()
identifier[chthumbnail_path] = identifier[channel_info] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[chthumbnail_path] :
identifier[chthumbnail_path_tuple] = identifier[path_to_tuple] ( identifier[chthumbnail_path] , identifier[windows] = identifier[self] . identifier[winpaths] )
identifier[thumbnail_path_tuples] . identifier[append] ( identifier[chthumbnail_path_tuple] )
keyword[for] identifier[content_file_path_tuple] , identifier[row] keyword[in] identifier[self] . identifier[contentcache] . identifier[items] ():
identifier[thumbnail_path] = identifier[row] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[thumbnail_path] :
identifier[thumbnail_path_tuple] = identifier[path_to_tuple] ( identifier[thumbnail_path] , identifier[windows] = identifier[self] . identifier[winpaths] )
identifier[thumbnail_path_tuples] . identifier[append] ( identifier[thumbnail_path_tuple] )
keyword[return] identifier[thumbnail_path_tuples] | def get_thumbnail_paths(self):
"""
Helper function used to avoid processing thumbnail files during `os.walk`.
"""
thumbnail_path_tuples = []
# channel thumbnail
channel_info = self.get_channel_info()
chthumbnail_path = channel_info.get('thumbnail_chan_path', None)
if chthumbnail_path:
chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(chthumbnail_path_tuple) # depends on [control=['if'], data=[]]
# content thumbnails
for (content_file_path_tuple, row) in self.contentcache.items():
thumbnail_path = row.get('thumbnail_chan_path', None)
if thumbnail_path:
thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(thumbnail_path_tuple) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return thumbnail_path_tuples |
def rewrite_elife_authors_json(json_content, doi):
""" this does the work of rewriting elife authors json """
# Convert doi from testing doi if applicable
article_doi = elifetools.utils.convert_testing_doi(doi)
# Edge case fix an affiliation name
if article_doi == "10.7554/eLife.06956":
for i, ref in enumerate(json_content):
if ref.get("orcid") and ref.get("orcid") == "0000-0001-6798-0064":
json_content[i]["affiliations"][0]["name"] = ["Cambridge"]
# Edge case fix an ORCID
if article_doi == "10.7554/eLife.09376":
for i, ref in enumerate(json_content):
if ref.get("orcid") and ref.get("orcid") == "000-0001-7224-925X":
json_content[i]["orcid"] = "0000-0001-7224-925X"
# Edge case competing interests
if article_doi == "10.7554/eLife.00102":
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
if ref["name"]["index"].startswith("Chen,"):
json_content[i]["competingInterests"] = "ZJC: Reviewing Editor, <i>eLife</i>"
elif ref["name"]["index"].startswith("Li,"):
json_content[i]["competingInterests"] = "The remaining authors have no competing interests to declare."
if article_doi == "10.7554/eLife.00270":
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
if ref["name"]["index"].startswith("Patterson,"):
json_content[i]["competingInterests"] = "MP: Managing Executive Editor, <i>eLife</i>"
# Remainder of competing interests rewrites
elife_author_competing_interests = {}
elife_author_competing_interests["10.7554/eLife.00133"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00190"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00230"] = "The authors have declared that no competing interests exist"
elife_author_competing_interests["10.7554/eLife.00288"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00352"] = "The author declares that no competing interest exist"
elife_author_competing_interests["10.7554/eLife.00362"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00475"] = "The remaining authors have no competing interests to declare."
elife_author_competing_interests["10.7554/eLife.00592"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.00633"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.02725"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.02935"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.04126"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.04878"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.05322"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.06011"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.06416"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.07383"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08421"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08494"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08648"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.08924"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09083"] = "The other authors declare that no competing interests exists."
elife_author_competing_interests["10.7554/eLife.09102"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09460"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09591"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.09600"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10113"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10230"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10453"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.10635"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11407"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11473"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.11750"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12217"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12620"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.12724"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.13023"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.13732"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14116"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14258"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.14694"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.15085"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.15312"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.16011"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.16940"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17023"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17092"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17218"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17267"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17523"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17556"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17769"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.17834"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18101"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18515"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18544"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.18648"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19071"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19334"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.19510"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20183"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20242"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20375"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.20797"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.21454"] = "The authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.21491"] = "The other authors declare that no competing interests exist."
elife_author_competing_interests["10.7554/eLife.22187"] = "The authors declare that no competing interests exist."
if article_doi in elife_author_competing_interests:
for i, ref in enumerate(json_content):
if not ref.get("competingInterests"):
json_content[i]["competingInterests"] = elife_author_competing_interests[article_doi]
# Rewrite "other authors declare" ... competing interests statements using a string match
for i, ref in enumerate(json_content):
if (ref.get("competingInterests") and (
ref.get("competingInterests").startswith("The other author") or
ref.get("competingInterests").startswith("The others author") or
ref.get("competingInterests").startswith("The remaining authors") or
ref.get("competingInterests").startswith("The remaining have declared")
)):
json_content[i]["competingInterests"] = "No competing interests declared."
return json_content | def function[rewrite_elife_authors_json, parameter[json_content, doi]]:
constant[ this does the work of rewriting elife authors json ]
variable[article_doi] assign[=] call[name[elifetools].utils.convert_testing_doi, parameter[name[doi]]]
if compare[name[article_doi] equal[==] constant[10.7554/eLife.06956]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6e4fd0>, <ast.Name object at 0x7da20c6e7730>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.BoolOp object at 0x7da20c6e5ba0> begin[:]
call[call[call[call[name[json_content]][name[i]]][constant[affiliations]]][constant[0]]][constant[name]] assign[=] list[[<ast.Constant object at 0x7da20c6e7cd0>]]
if compare[name[article_doi] equal[==] constant[10.7554/eLife.09376]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6e52a0>, <ast.Name object at 0x7da20c6e60b0>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.BoolOp object at 0x7da20c6e4af0> begin[:]
call[call[name[json_content]][name[i]]][constant[orcid]] assign[=] constant[0000-0001-7224-925X]
if compare[name[article_doi] equal[==] constant[10.7554/eLife.00102]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6e4c40>, <ast.Name object at 0x7da20c6e61a0>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e6890> begin[:]
if call[call[call[name[ref]][constant[name]]][constant[index]].startswith, parameter[constant[Chen,]]] begin[:]
call[call[name[json_content]][name[i]]][constant[competingInterests]] assign[=] constant[ZJC: Reviewing Editor, <i>eLife</i>]
if compare[name[article_doi] equal[==] constant[10.7554/eLife.00270]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2047e8fa0>, <ast.Name object at 0x7da2047ead10>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.UnaryOp object at 0x7da2047eb400> begin[:]
if call[call[call[name[ref]][constant[name]]][constant[index]].startswith, parameter[constant[Patterson,]]] begin[:]
call[call[name[json_content]][name[i]]][constant[competingInterests]] assign[=] constant[MP: Managing Executive Editor, <i>eLife</i>]
variable[elife_author_competing_interests] assign[=] dictionary[[], []]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00133]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00190]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00230]] assign[=] constant[The authors have declared that no competing interests exist]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00288]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00352]] assign[=] constant[The author declares that no competing interest exist]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00362]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00475]] assign[=] constant[The remaining authors have no competing interests to declare.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00592]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.00633]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.02725]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.02935]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.04126]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.04878]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.05322]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.06011]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.06416]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.07383]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.08421]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.08494]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.08648]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.08924]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.09083]] assign[=] constant[The other authors declare that no competing interests exists.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.09102]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.09460]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.09591]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.09600]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.10113]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.10230]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.10453]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.10635]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.11407]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.11473]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.11750]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.12217]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.12620]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.12724]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.13023]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.13732]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.14116]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.14258]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.14694]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.15085]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.15312]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.16011]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.16940]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17023]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17092]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17218]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17267]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17523]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17556]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17769]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.17834]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.18101]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.18515]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.18544]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.18648]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.19071]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.19334]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.19510]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.20183]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.20242]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.20375]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.20797]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.21454]] assign[=] constant[The authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.21491]] assign[=] constant[The other authors declare that no competing interests exist.]
call[name[elife_author_competing_interests]][constant[10.7554/eLife.22187]] assign[=] constant[The authors declare that no competing interests exist.]
if compare[name[article_doi] in name[elife_author_competing_interests]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b26ad930>, <ast.Name object at 0x7da1b26adff0>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26ae8c0> begin[:]
call[call[name[json_content]][name[i]]][constant[competingInterests]] assign[=] call[name[elife_author_competing_interests]][name[article_doi]]
for taget[tuple[[<ast.Name object at 0x7da1b26acdc0>, <ast.Name object at 0x7da1b26af910>]]] in starred[call[name[enumerate], parameter[name[json_content]]]] begin[:]
if <ast.BoolOp object at 0x7da1b26ae7a0> begin[:]
call[call[name[json_content]][name[i]]][constant[competingInterests]] assign[=] constant[No competing interests declared.]
return[name[json_content]] | keyword[def] identifier[rewrite_elife_authors_json] ( identifier[json_content] , identifier[doi] ):
literal[string]
identifier[article_doi] = identifier[elifetools] . identifier[utils] . identifier[convert_testing_doi] ( identifier[doi] )
keyword[if] identifier[article_doi] == literal[string] :
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] identifier[ref] . identifier[get] ( literal[string] ) keyword[and] identifier[ref] . identifier[get] ( literal[string] )== literal[string] :
identifier[json_content] [ identifier[i] ][ literal[string] ][ literal[int] ][ literal[string] ]=[ literal[string] ]
keyword[if] identifier[article_doi] == literal[string] :
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] identifier[ref] . identifier[get] ( literal[string] ) keyword[and] identifier[ref] . identifier[get] ( literal[string] )== literal[string] :
identifier[json_content] [ identifier[i] ][ literal[string] ]= literal[string]
keyword[if] identifier[article_doi] == literal[string] :
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] keyword[not] identifier[ref] . identifier[get] ( literal[string] ):
keyword[if] identifier[ref] [ literal[string] ][ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[json_content] [ identifier[i] ][ literal[string] ]= literal[string]
keyword[elif] identifier[ref] [ literal[string] ][ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[json_content] [ identifier[i] ][ literal[string] ]= literal[string]
keyword[if] identifier[article_doi] == literal[string] :
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] keyword[not] identifier[ref] . identifier[get] ( literal[string] ):
keyword[if] identifier[ref] [ literal[string] ][ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[json_content] [ identifier[i] ][ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] ={}
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
identifier[elife_author_competing_interests] [ literal[string] ]= literal[string]
keyword[if] identifier[article_doi] keyword[in] identifier[elife_author_competing_interests] :
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] keyword[not] identifier[ref] . identifier[get] ( literal[string] ):
identifier[json_content] [ identifier[i] ][ literal[string] ]= identifier[elife_author_competing_interests] [ identifier[article_doi] ]
keyword[for] identifier[i] , identifier[ref] keyword[in] identifier[enumerate] ( identifier[json_content] ):
keyword[if] ( identifier[ref] . identifier[get] ( literal[string] ) keyword[and] (
identifier[ref] . identifier[get] ( literal[string] ). identifier[startswith] ( literal[string] ) keyword[or]
identifier[ref] . identifier[get] ( literal[string] ). identifier[startswith] ( literal[string] ) keyword[or]
identifier[ref] . identifier[get] ( literal[string] ). identifier[startswith] ( literal[string] ) keyword[or]
identifier[ref] . identifier[get] ( literal[string] ). identifier[startswith] ( literal[string] )
)):
identifier[json_content] [ identifier[i] ][ literal[string] ]= literal[string]
keyword[return] identifier[json_content] | def rewrite_elife_authors_json(json_content, doi):
""" this does the work of rewriting elife authors json """
# Convert doi from testing doi if applicable
article_doi = elifetools.utils.convert_testing_doi(doi)
# Edge case fix an affiliation name
if article_doi == '10.7554/eLife.06956':
for (i, ref) in enumerate(json_content):
if ref.get('orcid') and ref.get('orcid') == '0000-0001-6798-0064':
json_content[i]['affiliations'][0]['name'] = ['Cambridge'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Edge case fix an ORCID
if article_doi == '10.7554/eLife.09376':
for (i, ref) in enumerate(json_content):
if ref.get('orcid') and ref.get('orcid') == '000-0001-7224-925X':
json_content[i]['orcid'] = '0000-0001-7224-925X' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Edge case competing interests
if article_doi == '10.7554/eLife.00102':
for (i, ref) in enumerate(json_content):
if not ref.get('competingInterests'):
if ref['name']['index'].startswith('Chen,'):
json_content[i]['competingInterests'] = 'ZJC: Reviewing Editor, <i>eLife</i>' # depends on [control=['if'], data=[]]
elif ref['name']['index'].startswith('Li,'):
json_content[i]['competingInterests'] = 'The remaining authors have no competing interests to declare.' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if article_doi == '10.7554/eLife.00270':
for (i, ref) in enumerate(json_content):
if not ref.get('competingInterests'):
if ref['name']['index'].startswith('Patterson,'):
json_content[i]['competingInterests'] = 'MP: Managing Executive Editor, <i>eLife</i>' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Remainder of competing interests rewrites
elife_author_competing_interests = {}
elife_author_competing_interests['10.7554/eLife.00133'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.00190'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.00230'] = 'The authors have declared that no competing interests exist'
elife_author_competing_interests['10.7554/eLife.00288'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.00352'] = 'The author declares that no competing interest exist'
elife_author_competing_interests['10.7554/eLife.00362'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.00475'] = 'The remaining authors have no competing interests to declare.'
elife_author_competing_interests['10.7554/eLife.00592'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.00633'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.02725'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.02935'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.04126'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.04878'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.05322'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.06011'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.06416'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.07383'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.08421'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.08494'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.08648'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.08924'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.09083'] = 'The other authors declare that no competing interests exists.'
elife_author_competing_interests['10.7554/eLife.09102'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.09460'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.09591'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.09600'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.10113'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.10230'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.10453'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.10635'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.11407'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.11473'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.11750'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.12217'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.12620'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.12724'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.13023'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.13732'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.14116'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.14258'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.14694'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.15085'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.15312'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.16011'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.16940'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17023'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17092'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17218'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17267'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17523'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17556'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17769'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.17834'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.18101'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.18515'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.18544'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.18648'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.19071'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.19334'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.19510'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.20183'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.20242'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.20375'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.20797'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.21454'] = 'The authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.21491'] = 'The other authors declare that no competing interests exist.'
elife_author_competing_interests['10.7554/eLife.22187'] = 'The authors declare that no competing interests exist.'
if article_doi in elife_author_competing_interests:
for (i, ref) in enumerate(json_content):
if not ref.get('competingInterests'):
json_content[i]['competingInterests'] = elife_author_competing_interests[article_doi] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['article_doi', 'elife_author_competing_interests']]
# Rewrite "other authors declare" ... competing interests statements using a string match
for (i, ref) in enumerate(json_content):
if ref.get('competingInterests') and (ref.get('competingInterests').startswith('The other author') or ref.get('competingInterests').startswith('The others author') or ref.get('competingInterests').startswith('The remaining authors') or ref.get('competingInterests').startswith('The remaining have declared')):
json_content[i]['competingInterests'] = 'No competing interests declared.' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return json_content |
def __get_stat_display(self, stats, layer):
"""Return a dict of dict with all the stats display.
stats: Global stats dict
layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:returns: dict of dict
* key: plugin name
* value: dict returned by the get_stats_display Plugin method
"""
ret = {}
for p in stats.getPluginsList(enable=False):
if p == 'quicklook' or p == 'processlist':
# processlist is done later
# because we need to know how many processes could be displayed
continue
# Compute the plugin max size
plugin_max_width = None
if p in self._left_sidebar:
plugin_max_width = max(self._left_sidebar_min_width,
self.screen.getmaxyx()[1] - 105)
plugin_max_width = min(self._left_sidebar_max_width,
plugin_max_width)
# Get the view
ret[p] = stats.get_plugin(p).get_stats_display(args=self.args,
max_width=plugin_max_width)
return ret | def function[__get_stat_display, parameter[self, stats, layer]]:
constant[Return a dict of dict with all the stats display.
stats: Global stats dict
layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:returns: dict of dict
* key: plugin name
* value: dict returned by the get_stats_display Plugin method
]
variable[ret] assign[=] dictionary[[], []]
for taget[name[p]] in starred[call[name[stats].getPluginsList, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c7c9240> begin[:]
continue
variable[plugin_max_width] assign[=] constant[None]
if compare[name[p] in name[self]._left_sidebar] begin[:]
variable[plugin_max_width] assign[=] call[name[max], parameter[name[self]._left_sidebar_min_width, binary_operation[call[call[name[self].screen.getmaxyx, parameter[]]][constant[1]] - constant[105]]]]
variable[plugin_max_width] assign[=] call[name[min], parameter[name[self]._left_sidebar_max_width, name[plugin_max_width]]]
call[name[ret]][name[p]] assign[=] call[call[name[stats].get_plugin, parameter[name[p]]].get_stats_display, parameter[]]
return[name[ret]] | keyword[def] identifier[__get_stat_display] ( identifier[self] , identifier[stats] , identifier[layer] ):
literal[string]
identifier[ret] ={}
keyword[for] identifier[p] keyword[in] identifier[stats] . identifier[getPluginsList] ( identifier[enable] = keyword[False] ):
keyword[if] identifier[p] == literal[string] keyword[or] identifier[p] == literal[string] :
keyword[continue]
identifier[plugin_max_width] = keyword[None]
keyword[if] identifier[p] keyword[in] identifier[self] . identifier[_left_sidebar] :
identifier[plugin_max_width] = identifier[max] ( identifier[self] . identifier[_left_sidebar_min_width] ,
identifier[self] . identifier[screen] . identifier[getmaxyx] ()[ literal[int] ]- literal[int] )
identifier[plugin_max_width] = identifier[min] ( identifier[self] . identifier[_left_sidebar_max_width] ,
identifier[plugin_max_width] )
identifier[ret] [ identifier[p] ]= identifier[stats] . identifier[get_plugin] ( identifier[p] ). identifier[get_stats_display] ( identifier[args] = identifier[self] . identifier[args] ,
identifier[max_width] = identifier[plugin_max_width] )
keyword[return] identifier[ret] | def __get_stat_display(self, stats, layer):
"""Return a dict of dict with all the stats display.
stats: Global stats dict
layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:returns: dict of dict
* key: plugin name
* value: dict returned by the get_stats_display Plugin method
"""
ret = {}
for p in stats.getPluginsList(enable=False):
if p == 'quicklook' or p == 'processlist':
# processlist is done later
# because we need to know how many processes could be displayed
continue # depends on [control=['if'], data=[]]
# Compute the plugin max size
plugin_max_width = None
if p in self._left_sidebar:
plugin_max_width = max(self._left_sidebar_min_width, self.screen.getmaxyx()[1] - 105)
plugin_max_width = min(self._left_sidebar_max_width, plugin_max_width) # depends on [control=['if'], data=[]]
# Get the view
ret[p] = stats.get_plugin(p).get_stats_display(args=self.args, max_width=plugin_max_width) # depends on [control=['for'], data=['p']]
return ret |
def dof(self):
"""Returns the DoF of the robot (with grippers)."""
dof = self.mujoco_robot.dof
if self.has_gripper_left:
dof += self.gripper_left.dof
if self.has_gripper_right:
dof += self.gripper_right.dof
return dof | def function[dof, parameter[self]]:
constant[Returns the DoF of the robot (with grippers).]
variable[dof] assign[=] name[self].mujoco_robot.dof
if name[self].has_gripper_left begin[:]
<ast.AugAssign object at 0x7da20c6a8eb0>
if name[self].has_gripper_right begin[:]
<ast.AugAssign object at 0x7da20c6a9d80>
return[name[dof]] | keyword[def] identifier[dof] ( identifier[self] ):
literal[string]
identifier[dof] = identifier[self] . identifier[mujoco_robot] . identifier[dof]
keyword[if] identifier[self] . identifier[has_gripper_left] :
identifier[dof] += identifier[self] . identifier[gripper_left] . identifier[dof]
keyword[if] identifier[self] . identifier[has_gripper_right] :
identifier[dof] += identifier[self] . identifier[gripper_right] . identifier[dof]
keyword[return] identifier[dof] | def dof(self):
"""Returns the DoF of the robot (with grippers)."""
dof = self.mujoco_robot.dof
if self.has_gripper_left:
dof += self.gripper_left.dof # depends on [control=['if'], data=[]]
if self.has_gripper_right:
dof += self.gripper_right.dof # depends on [control=['if'], data=[]]
return dof |
def _language_in_list(language, targets, min_score=80):
"""
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
"""
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0 | def function[_language_in_list, parameter[language, targets, min_score]]:
constant[
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
]
variable[matched] assign[=] call[name[best_match], parameter[name[language], name[targets]]]
return[compare[call[name[matched]][constant[1]] greater[>] constant[0]]] | keyword[def] identifier[_language_in_list] ( identifier[language] , identifier[targets] , identifier[min_score] = literal[int] ):
literal[string]
identifier[matched] = identifier[best_match] ( identifier[language] , identifier[targets] , identifier[min_score] = identifier[min_score] )
keyword[return] identifier[matched] [ literal[int] ]> literal[int] | def _language_in_list(language, targets, min_score=80):
"""
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
"""
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0 |
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst | def function[backend_inst_from_mod, parameter[mod, encoding, encoding_errors, kwargs]]:
constant[Given a mod and a set of opts return an instantiated
Backend class.
]
variable[kw] assign[=] call[name[dict], parameter[]]
<ast.Try object at 0x7da2041d9e10>
variable[inst] assign[=] call[name[klass], parameter[]]
<ast.Try object at 0x7da2041da290>
call[name[LOGGER].debug, parameter[binary_operation[constant[using %r] <ast.Mod object at 0x7da2590d6920> name[inst]]]]
return[name[inst]] | keyword[def] identifier[backend_inst_from_mod] ( identifier[mod] , identifier[encoding] , identifier[encoding_errors] , identifier[kwargs] ):
literal[string]
identifier[kw] = identifier[dict] ( identifier[encoding] = identifier[encoding] , identifier[encoding_errors] = identifier[encoding_errors] ,
identifier[kwargs] = identifier[kwargs] )
keyword[try] :
identifier[klass] = identifier[getattr] ( identifier[mod] , literal[string] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[AttributeError] ( literal[string] % identifier[mod] )
identifier[inst] = identifier[klass] (** identifier[kw] )
keyword[try] :
identifier[inst] . identifier[check] ( identifier[title] = keyword[False] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[bin_mod] = literal[string]
identifier[warn] ( literal[string] %(
identifier[mod] , identifier[str] ( identifier[err] ), identifier[bin_mod] ))
identifier[inst] = identifier[import_mod] ( identifier[bin_mod] ). identifier[Backend] (** identifier[kw] )
identifier[inst] . identifier[check] ( identifier[title] = keyword[False] )
identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[inst] )
keyword[return] identifier[inst] | def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors, kwargs=kwargs)
try:
klass = getattr(mod, 'Backend') # depends on [control=['try'], data=[]]
except AttributeError:
raise AttributeError('%r mod does not define any backend class' % mod) # depends on [control=['except'], data=[]]
inst = klass(**kw)
try:
inst.check(title=False) # depends on [control=['try'], data=[]]
except Exception as err:
bin_mod = 'fulltext.backends.__bin'
warn("can't use %r due to %r; use %r backend instead" % (mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False) # depends on [control=['except'], data=['err']]
LOGGER.debug('using %r' % inst)
return inst |
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result | def function[all_points_membership_vectors, parameter[clusterer]]:
constant[Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
]
variable[clusters] assign[=] call[call[name[np].array, parameter[call[name[sorted], parameter[call[name[list], parameter[call[name[clusterer].condensed_tree_._select_clusters, parameter[]]]]]]]].astype, parameter[name[np].intp]]
variable[all_points] assign[=] name[clusterer].prediction_data_.raw_data
if compare[name[clusters].size equal[==] constant[0]] begin[:]
return[call[name[np].zeros, parameter[call[name[all_points].shape][constant[0]]]]]
variable[distance_vecs] assign[=] call[name[all_points_dist_membership_vector], parameter[name[all_points], name[clusterer].prediction_data_.exemplars, name[clusterer].prediction_data_.dist_metric]]
variable[outlier_vecs] assign[=] call[name[all_points_outlier_membership_vector], parameter[name[clusters], name[clusterer].condensed_tree_._raw_tree, name[clusterer].prediction_data_.leaf_max_lambdas, name[clusterer].prediction_data_.cluster_tree]]
variable[in_cluster_probs] assign[=] call[name[all_points_prob_in_some_cluster], parameter[name[clusters], name[clusterer].condensed_tree_._raw_tree, name[clusterer].prediction_data_.leaf_max_lambdas, name[clusterer].prediction_data_.cluster_tree]]
variable[result] assign[=] binary_operation[name[distance_vecs] * name[outlier_vecs]]
variable[row_sums] assign[=] call[name[result].sum, parameter[]]
variable[result] assign[=] binary_operation[name[result] / call[name[row_sums]][tuple[[<ast.Slice object at 0x7da18bc71f00>, <ast.Attribute object at 0x7da18bc737f0>]]]]
<ast.AugAssign object at 0x7da18bc739d0>
return[name[result]] | keyword[def] identifier[all_points_membership_vectors] ( identifier[clusterer] ):
literal[string]
identifier[clusters] = identifier[np] . identifier[array] ( identifier[sorted] ( identifier[list] ( identifier[clusterer] . identifier[condensed_tree_] . identifier[_select_clusters] ()))). identifier[astype] ( identifier[np] . identifier[intp] )
identifier[all_points] = identifier[clusterer] . identifier[prediction_data_] . identifier[raw_data]
keyword[if] identifier[clusters] . identifier[size] == literal[int] :
keyword[return] identifier[np] . identifier[zeros] ( identifier[all_points] . identifier[shape] [ literal[int] ])
identifier[distance_vecs] = identifier[all_points_dist_membership_vector] (
identifier[all_points] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[exemplars] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[dist_metric] )
identifier[outlier_vecs] = identifier[all_points_outlier_membership_vector] (
identifier[clusters] ,
identifier[clusterer] . identifier[condensed_tree_] . identifier[_raw_tree] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[leaf_max_lambdas] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[cluster_tree] )
identifier[in_cluster_probs] = identifier[all_points_prob_in_some_cluster] (
identifier[clusters] ,
identifier[clusterer] . identifier[condensed_tree_] . identifier[_raw_tree] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[leaf_max_lambdas] ,
identifier[clusterer] . identifier[prediction_data_] . identifier[cluster_tree] )
identifier[result] = identifier[distance_vecs] * identifier[outlier_vecs]
identifier[row_sums] = identifier[result] . identifier[sum] ( identifier[axis] = literal[int] )
identifier[result] = identifier[result] / identifier[row_sums] [:, identifier[np] . identifier[newaxis] ]
identifier[result] *= identifier[in_cluster_probs] [:, identifier[np] . identifier[newaxis] ]
keyword[return] identifier[result] | def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0]) # depends on [control=['if'], data=[]]
distance_vecs = all_points_dist_membership_vector(all_points, clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result |
def create_developer_certificate(self, authorization, body, **kwargs): # noqa: E501
"""Create a new developer certificate to connect to the bootstrap server. # noqa: E501
This REST API is intended to be used by customers to get a developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Note:** The number of developer certificates allowed per account is limited. Please see [Using your own certificate authority](/docs/current/mbed-cloud-deploy/instructions-for-factory-setup-and-device-provision.html#using-your-own-certificate-authority-with-mbed-cloud). **Example usage:** curl -X POST \"http://api.us-east-1.mbedcloud.com/v3/developer-certificates\" -H \"accept: application/json\" -H \"Authorization: Bearer THE_ACCESS_TOKEN\" -H \"content-type: application/json\" -d \"{ \\\"name\\\": \\\"THE_CERTIFICATE_NAME\\\", \\\"description\\\": \\\"THE_CERTIFICATE_DESCRIPTION\\\"}\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_developer_certificate(authorization, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str authorization: Bearer {Access Token}. (required)
:param DeveloperCertificateRequestData body: (required)
:return: DeveloperCertificateResponseData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_developer_certificate_with_http_info(authorization, body, **kwargs) # noqa: E501
else:
(data) = self.create_developer_certificate_with_http_info(authorization, body, **kwargs) # noqa: E501
return data | def function[create_developer_certificate, parameter[self, authorization, body]]:
constant[Create a new developer certificate to connect to the bootstrap server. # noqa: E501
This REST API is intended to be used by customers to get a developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Note:** The number of developer certificates allowed per account is limited. Please see [Using your own certificate authority](/docs/current/mbed-cloud-deploy/instructions-for-factory-setup-and-device-provision.html#using-your-own-certificate-authority-with-mbed-cloud). **Example usage:** curl -X POST "http://api.us-east-1.mbedcloud.com/v3/developer-certificates" -H "accept: application/json" -H "Authorization: Bearer THE_ACCESS_TOKEN" -H "content-type: application/json" -d "{ \"name\": \"THE_CERTIFICATE_NAME\", \"description\": \"THE_CERTIFICATE_DESCRIPTION\"}" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_developer_certificate(authorization, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str authorization: Bearer {Access Token}. (required)
:param DeveloperCertificateRequestData body: (required)
:return: DeveloperCertificateResponseData
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].create_developer_certificate_with_http_info, parameter[name[authorization], name[body]]]] | keyword[def] identifier[create_developer_certificate] ( identifier[self] , identifier[authorization] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[create_developer_certificate_with_http_info] ( identifier[authorization] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[create_developer_certificate_with_http_info] ( identifier[authorization] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def create_developer_certificate(self, authorization, body, **kwargs): # noqa: E501
'Create a new developer certificate to connect to the bootstrap server. # noqa: E501\n\n This REST API is intended to be used by customers to get a developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Note:** The number of developer certificates allowed per account is limited. Please see [Using your own certificate authority](/docs/current/mbed-cloud-deploy/instructions-for-factory-setup-and-device-provision.html#using-your-own-certificate-authority-with-mbed-cloud). **Example usage:** curl -X POST "http://api.us-east-1.mbedcloud.com/v3/developer-certificates" -H "accept: application/json" -H "Authorization: Bearer THE_ACCESS_TOKEN" -H "content-type: application/json" -d "{ \\"name\\": \\"THE_CERTIFICATE_NAME\\", \\"description\\": \\"THE_CERTIFICATE_DESCRIPTION\\"}" # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.create_developer_certificate(authorization, body, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str authorization: Bearer {Access Token}. (required)\n :param DeveloperCertificateRequestData body: (required)\n :return: DeveloperCertificateResponseData\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_developer_certificate_with_http_info(authorization, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.create_developer_certificate_with_http_info(authorization, body, **kwargs) # noqa: E501
return data |
def list_offers(access_token, subscription_id, location, publisher):
'''List available VM image offers from a publisher.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): Publisher name, e.g. Canonical.
Returns:
HTTP response with JSON list of image offers.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token) | def function[list_offers, parameter[access_token, subscription_id, location, publisher]]:
constant[List available VM image offers from a publisher.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): Publisher name, e.g. Canonical.
Returns:
HTTP response with JSON list of image offers.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b04d9c90>, <ast.Constant object at 0x7da1b04db400>, <ast.Name object at 0x7da1b04d8eb0>, <ast.Constant object at 0x7da1b04d8a00>, <ast.Constant object at 0x7da1b04da440>, <ast.Name object at 0x7da1b04d9630>, <ast.Constant object at 0x7da1b04d97e0>, <ast.Name object at 0x7da1b04d93c0>, <ast.Constant object at 0x7da1b04d9a50>, <ast.Name object at 0x7da1b04db640>]]]]
return[call[name[do_get], parameter[name[endpoint], name[access_token]]]] | keyword[def] identifier[list_offers] ( identifier[access_token] , identifier[subscription_id] , identifier[location] , identifier[publisher] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] ,
literal[string] , identifier[location] ,
literal[string] , identifier[publisher] ,
literal[string] , identifier[COMP_API] ])
keyword[return] identifier[do_get] ( identifier[endpoint] , identifier[access_token] ) | def list_offers(access_token, subscription_id, location, publisher):
"""List available VM image offers from a publisher.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): Publisher name, e.g. Canonical.
Returns:
HTTP response with JSON list of image offers.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token) |
def sanity_check_subsections(self):
"""
This function goes through the ConfigParset and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
"""
# Loop over the sections in the ini file
for section in self.sections():
# [pegasus_profile] specially is allowed to be overriden by
# sub-sections
if section == 'pegasus_profile':
continue
# Loop over the sections again
for section2 in self.sections():
# Check if any are subsections of section
if section2.startswith(section + '-'):
# Check for duplicate options whenever this exists
self.check_duplicate_options(section, section2,
raise_error=True) | def function[sanity_check_subsections, parameter[self]]:
constant[
This function goes through the ConfigParset and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
]
for taget[name[section]] in starred[call[name[self].sections, parameter[]]] begin[:]
if compare[name[section] equal[==] constant[pegasus_profile]] begin[:]
continue
for taget[name[section2]] in starred[call[name[self].sections, parameter[]]] begin[:]
if call[name[section2].startswith, parameter[binary_operation[name[section] + constant[-]]]] begin[:]
call[name[self].check_duplicate_options, parameter[name[section], name[section2]]] | keyword[def] identifier[sanity_check_subsections] ( identifier[self] ):
literal[string]
keyword[for] identifier[section] keyword[in] identifier[self] . identifier[sections] ():
keyword[if] identifier[section] == literal[string] :
keyword[continue]
keyword[for] identifier[section2] keyword[in] identifier[self] . identifier[sections] ():
keyword[if] identifier[section2] . identifier[startswith] ( identifier[section] + literal[string] ):
identifier[self] . identifier[check_duplicate_options] ( identifier[section] , identifier[section2] ,
identifier[raise_error] = keyword[True] ) | def sanity_check_subsections(self):
"""
This function goes through the ConfigParset and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
"""
# Loop over the sections in the ini file
for section in self.sections():
# [pegasus_profile] specially is allowed to be overriden by
# sub-sections
if section == 'pegasus_profile':
continue # depends on [control=['if'], data=[]]
# Loop over the sections again
for section2 in self.sections():
# Check if any are subsections of section
if section2.startswith(section + '-'):
# Check for duplicate options whenever this exists
self.check_duplicate_options(section, section2, raise_error=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section2']] # depends on [control=['for'], data=['section']] |
def insert_entity(self, table_name, entity, timeout=None):
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) | def function[insert_entity, parameter[self, table_name, entity, timeout]]:
constant[
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
]
call[name[_validate_not_none], parameter[constant[table_name], name[table_name]]]
variable[request] assign[=] call[name[_insert_entity], parameter[name[entity]]]
name[request].host assign[=] call[name[self]._get_host, parameter[]]
name[request].path assign[=] binary_operation[constant[/] + call[name[_to_str], parameter[name[table_name]]]]
<ast.AugAssign object at 0x7da18dc980d0>
variable[response] assign[=] call[name[self]._perform_request, parameter[name[request]]]
return[call[name[_extract_etag], parameter[name[response]]]] | keyword[def] identifier[insert_entity] ( identifier[self] , identifier[table_name] , identifier[entity] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[table_name] )
identifier[request] = identifier[_insert_entity] ( identifier[entity] )
identifier[request] . identifier[host] = identifier[self] . identifier[_get_host] ()
identifier[request] . identifier[path] = literal[string] + identifier[_to_str] ( identifier[table_name] )
identifier[request] . identifier[query] +=[( literal[string] , identifier[_int_to_str] ( identifier[timeout] ))]
identifier[response] = identifier[self] . identifier[_perform_request] ( identifier[request] )
keyword[return] identifier[_extract_etag] ( identifier[response] ) | def insert_entity(self, table_name, entity, timeout=None):
"""
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
"""
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) |
def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
root, quality, appended, on = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on | def function[_parse, parameter[self, chord]]:
constant[ parse a chord
:param str chord: Name of chord.
]
<ast.Tuple object at 0x7da1b0c60cd0> assign[=] call[name[parse], parameter[name[chord]]]
name[self]._root assign[=] name[root]
name[self]._quality assign[=] name[quality]
name[self]._appended assign[=] name[appended]
name[self]._on assign[=] name[on] | keyword[def] identifier[_parse] ( identifier[self] , identifier[chord] ):
literal[string]
identifier[root] , identifier[quality] , identifier[appended] , identifier[on] = identifier[parse] ( identifier[chord] )
identifier[self] . identifier[_root] = identifier[root]
identifier[self] . identifier[_quality] = identifier[quality]
identifier[self] . identifier[_appended] = identifier[appended]
identifier[self] . identifier[_on] = identifier[on] | def _parse(self, chord):
""" parse a chord
:param str chord: Name of chord.
"""
(root, quality, appended, on) = parse(chord)
self._root = root
self._quality = quality
self._appended = appended
self._on = on |
def debug_show_reconstructed_similarity(
self,
data3d=None,
voxelsize=None,
seeds=None,
area_weight=1,
hard_constraints=True,
show=True,
bins=20,
slice_number=None,
):
"""
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return:
"""
unariesalt = self.debug_get_reconstructed_similarity(
data3d,
voxelsize=voxelsize,
seeds=seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
return_unariesalt=True,
)
self._debug_show_unariesalt(
unariesalt, show=show, bins=bins, slice_number=slice_number
) | def function[debug_show_reconstructed_similarity, parameter[self, data3d, voxelsize, seeds, area_weight, hard_constraints, show, bins, slice_number]]:
constant[
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return:
]
variable[unariesalt] assign[=] call[name[self].debug_get_reconstructed_similarity, parameter[name[data3d]]]
call[name[self]._debug_show_unariesalt, parameter[name[unariesalt]]] | keyword[def] identifier[debug_show_reconstructed_similarity] (
identifier[self] ,
identifier[data3d] = keyword[None] ,
identifier[voxelsize] = keyword[None] ,
identifier[seeds] = keyword[None] ,
identifier[area_weight] = literal[int] ,
identifier[hard_constraints] = keyword[True] ,
identifier[show] = keyword[True] ,
identifier[bins] = literal[int] ,
identifier[slice_number] = keyword[None] ,
):
literal[string]
identifier[unariesalt] = identifier[self] . identifier[debug_get_reconstructed_similarity] (
identifier[data3d] ,
identifier[voxelsize] = identifier[voxelsize] ,
identifier[seeds] = identifier[seeds] ,
identifier[area_weight] = identifier[area_weight] ,
identifier[hard_constraints] = identifier[hard_constraints] ,
identifier[return_unariesalt] = keyword[True] ,
)
identifier[self] . identifier[_debug_show_unariesalt] (
identifier[unariesalt] , identifier[show] = identifier[show] , identifier[bins] = identifier[bins] , identifier[slice_number] = identifier[slice_number]
) | def debug_show_reconstructed_similarity(self, data3d=None, voxelsize=None, seeds=None, area_weight=1, hard_constraints=True, show=True, bins=20, slice_number=None):
"""
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return:
"""
unariesalt = self.debug_get_reconstructed_similarity(data3d, voxelsize=voxelsize, seeds=seeds, area_weight=area_weight, hard_constraints=hard_constraints, return_unariesalt=True)
self._debug_show_unariesalt(unariesalt, show=show, bins=bins, slice_number=slice_number) |
def get_or_add(self, reltype, target_part):
"""
Return relationship of *reltype* to *target_part*, newly added if not
already present in collection.
"""
rel = self._get_matching(reltype, target_part)
if rel is None:
rId = self._next_rId
rel = self.add_relationship(reltype, target_part, rId)
return rel | def function[get_or_add, parameter[self, reltype, target_part]]:
constant[
Return relationship of *reltype* to *target_part*, newly added if not
already present in collection.
]
variable[rel] assign[=] call[name[self]._get_matching, parameter[name[reltype], name[target_part]]]
if compare[name[rel] is constant[None]] begin[:]
variable[rId] assign[=] name[self]._next_rId
variable[rel] assign[=] call[name[self].add_relationship, parameter[name[reltype], name[target_part], name[rId]]]
return[name[rel]] | keyword[def] identifier[get_or_add] ( identifier[self] , identifier[reltype] , identifier[target_part] ):
literal[string]
identifier[rel] = identifier[self] . identifier[_get_matching] ( identifier[reltype] , identifier[target_part] )
keyword[if] identifier[rel] keyword[is] keyword[None] :
identifier[rId] = identifier[self] . identifier[_next_rId]
identifier[rel] = identifier[self] . identifier[add_relationship] ( identifier[reltype] , identifier[target_part] , identifier[rId] )
keyword[return] identifier[rel] | def get_or_add(self, reltype, target_part):
"""
Return relationship of *reltype* to *target_part*, newly added if not
already present in collection.
"""
rel = self._get_matching(reltype, target_part)
if rel is None:
rId = self._next_rId
rel = self.add_relationship(reltype, target_part, rId) # depends on [control=['if'], data=['rel']]
return rel |
def avail_locations(call=None):
'''
Return available Packet datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations packet-provider
salt-cloud -f avail_locations packet-provider
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
vm_ = get_configured_provider()
manager = packet.Manager(auth_token=vm_['token'])
ret = {}
for facility in manager.list_facilities():
ret[facility.name] = facility.__dict__
return ret | def function[avail_locations, parameter[call]]:
constant[
Return available Packet datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations packet-provider
salt-cloud -f avail_locations packet-provider
]
if compare[name[call] equal[==] constant[action]] begin[:]
<ast.Raise object at 0x7da1b21e58a0>
variable[vm_] assign[=] call[name[get_configured_provider], parameter[]]
variable[manager] assign[=] call[name[packet].Manager, parameter[]]
variable[ret] assign[=] dictionary[[], []]
for taget[name[facility]] in starred[call[name[manager].list_facilities, parameter[]]] begin[:]
call[name[ret]][name[facility].name] assign[=] name[facility].__dict__
return[name[ret]] | keyword[def] identifier[avail_locations] ( identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] == literal[string] :
keyword[raise] identifier[SaltCloudException] (
literal[string]
)
identifier[vm_] = identifier[get_configured_provider] ()
identifier[manager] = identifier[packet] . identifier[Manager] ( identifier[auth_token] = identifier[vm_] [ literal[string] ])
identifier[ret] ={}
keyword[for] identifier[facility] keyword[in] identifier[manager] . identifier[list_facilities] ():
identifier[ret] [ identifier[facility] . identifier[name] ]= identifier[facility] . identifier[__dict__]
keyword[return] identifier[ret] | def avail_locations(call=None):
"""
Return available Packet datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations packet-provider
salt-cloud -f avail_locations packet-provider
"""
if call == 'action':
raise SaltCloudException('The avail_locations function must be called with -f or --function.') # depends on [control=['if'], data=[]]
vm_ = get_configured_provider()
manager = packet.Manager(auth_token=vm_['token'])
ret = {}
for facility in manager.list_facilities():
ret[facility.name] = facility.__dict__ # depends on [control=['for'], data=['facility']]
return ret |
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score | def function[move, parameter[self, d, add_tile]]:
constant[
move and return the move score
]
if <ast.BoolOp object at 0x7da1b07af4f0> begin[:]
<ast.Tuple object at 0x7da1b07ad210> assign[=] tuple[[<ast.Attribute object at 0x7da1b07acd30>, <ast.Attribute object at 0x7da1b07acd60>]]
variable[moved] assign[=] constant[False]
variable[score] assign[=] constant[0]
for taget[name[i]] in starred[name[self].__size_range] begin[:]
variable[origin] assign[=] call[name[get], parameter[name[i]]]
variable[line] assign[=] call[name[self].__moveLineOrCol, parameter[name[origin], name[d]]]
<ast.Tuple object at 0x7da1b07adea0> assign[=] call[name[self].__collapseLineOrCol, parameter[name[line], name[d]]]
variable[new] assign[=] call[name[self].__moveLineOrCol, parameter[name[collapsed], name[d]]]
call[name[chg], parameter[name[i], name[new]]]
if compare[name[origin] not_equal[!=] name[new]] begin[:]
variable[moved] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b0797070>
if <ast.BoolOp object at 0x7da1b0796020> begin[:]
call[name[self].addTile, parameter[]]
return[name[score]] | keyword[def] identifier[move] ( identifier[self] , identifier[d] , identifier[add_tile] = keyword[True] ):
literal[string]
keyword[if] identifier[d] == identifier[Board] . identifier[LEFT] keyword[or] identifier[d] == identifier[Board] . identifier[RIGHT] :
identifier[chg] , identifier[get] = identifier[self] . identifier[setLine] , identifier[self] . identifier[getLine]
keyword[elif] identifier[d] == identifier[Board] . identifier[UP] keyword[or] identifier[d] == identifier[Board] . identifier[DOWN] :
identifier[chg] , identifier[get] = identifier[self] . identifier[setCol] , identifier[self] . identifier[getCol]
keyword[else] :
keyword[return] literal[int]
identifier[moved] = keyword[False]
identifier[score] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__size_range] :
identifier[origin] = identifier[get] ( identifier[i] )
identifier[line] = identifier[self] . identifier[__moveLineOrCol] ( identifier[origin] , identifier[d] )
identifier[collapsed] , identifier[pts] = identifier[self] . identifier[__collapseLineOrCol] ( identifier[line] , identifier[d] )
identifier[new] = identifier[self] . identifier[__moveLineOrCol] ( identifier[collapsed] , identifier[d] )
identifier[chg] ( identifier[i] , identifier[new] )
keyword[if] identifier[origin] != identifier[new] :
identifier[moved] = keyword[True]
identifier[score] += identifier[pts]
keyword[if] identifier[moved] keyword[and] identifier[add_tile] :
identifier[self] . identifier[addTile] ()
keyword[return] identifier[score] | def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
(chg, get) = (self.setLine, self.getLine) # depends on [control=['if'], data=[]]
elif d == Board.UP or d == Board.DOWN:
(chg, get) = (self.setCol, self.getCol) # depends on [control=['if'], data=[]]
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
(collapsed, pts) = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True # depends on [control=['if'], data=[]]
score += pts # depends on [control=['for'], data=['i']]
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile() # depends on [control=['if'], data=[]]
return score |
def _parse_metadata(self, message):
"""
Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata
"""
metadata = Metadata(source=self.actor_urn).__dict__
if 'author' in message['d']:
metadata['source_user'] = message['d']['author']['username']
else:
metadata['source_user'] = None
if 'channel_id' in message['d']:
metadata['source_channel'] = message['d']['channel_id']
else:
metadata['source_channel'] = None
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_user']
metadata['source_connector'] = 'discord'
return metadata | def function[_parse_metadata, parameter[self, message]]:
constant[
Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata
]
variable[metadata] assign[=] call[name[Metadata], parameter[]].__dict__
if compare[constant[author] in call[name[message]][constant[d]]] begin[:]
call[name[metadata]][constant[source_user]] assign[=] call[call[call[name[message]][constant[d]]][constant[author]]][constant[username]]
if compare[constant[channel_id] in call[name[message]][constant[d]]] begin[:]
call[name[metadata]][constant[source_channel]] assign[=] call[call[name[message]][constant[d]]][constant[channel_id]]
call[name[metadata]][constant[user_id]] assign[=] call[name[metadata]][constant[source_user]]
call[name[metadata]][constant[display_name]] assign[=] call[name[metadata]][constant[source_user]]
call[name[metadata]][constant[source_connector]] assign[=] constant[discord]
return[name[metadata]] | keyword[def] identifier[_parse_metadata] ( identifier[self] , identifier[message] ):
literal[string]
identifier[metadata] = identifier[Metadata] ( identifier[source] = identifier[self] . identifier[actor_urn] ). identifier[__dict__]
keyword[if] literal[string] keyword[in] identifier[message] [ literal[string] ]:
identifier[metadata] [ literal[string] ]= identifier[message] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[else] :
identifier[metadata] [ literal[string] ]= keyword[None]
keyword[if] literal[string] keyword[in] identifier[message] [ literal[string] ]:
identifier[metadata] [ literal[string] ]= identifier[message] [ literal[string] ][ literal[string] ]
keyword[else] :
identifier[metadata] [ literal[string] ]= keyword[None]
identifier[metadata] [ literal[string] ]= identifier[metadata] [ literal[string] ]
identifier[metadata] [ literal[string] ]= identifier[metadata] [ literal[string] ]
identifier[metadata] [ literal[string] ]= literal[string]
keyword[return] identifier[metadata] | def _parse_metadata(self, message):
"""
Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata
"""
metadata = Metadata(source=self.actor_urn).__dict__
if 'author' in message['d']:
metadata['source_user'] = message['d']['author']['username'] # depends on [control=['if'], data=[]]
else:
metadata['source_user'] = None
if 'channel_id' in message['d']:
metadata['source_channel'] = message['d']['channel_id'] # depends on [control=['if'], data=[]]
else:
metadata['source_channel'] = None
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_user']
metadata['source_connector'] = 'discord'
return metadata |
def rgb_to_hex(rgb):
"""
Utility function to convert (r,g,b) triples to hex.
http://ageo.co/1CFxXpO
Args:
rgb (tuple): A sequence of RGB values in the
range 0-255 or 0-1.
Returns:
str: The hex code for the colour.
"""
r, g, b = rgb[:3]
if (r < 0) or (g < 0) or (b < 0):
raise Exception("RGB values must all be 0-255 or 0-1")
if (r > 255) or (g > 255) or (b > 255):
raise Exception("RGB values must all be 0-255 or 0-1")
if (0 < r < 1) or (0 < g < 1) or (0 < b < 1):
if (r > 1) or (g > 1) or (b > 1):
raise Exception("RGB values must all be 0-255 or 0-1")
if (0 <= r <= 1) and (0 <= g <= 1) and (0 <= b <= 1):
rgb = tuple([int(round(val * 255)) for val in [r, g, b]])
else:
rgb = (int(r), int(g), int(b))
result = '#%02x%02x%02x' % rgb
return result.lower() | def function[rgb_to_hex, parameter[rgb]]:
constant[
Utility function to convert (r,g,b) triples to hex.
http://ageo.co/1CFxXpO
Args:
rgb (tuple): A sequence of RGB values in the
range 0-255 or 0-1.
Returns:
str: The hex code for the colour.
]
<ast.Tuple object at 0x7da1b0314e50> assign[=] call[name[rgb]][<ast.Slice object at 0x7da1b0317070>]
if <ast.BoolOp object at 0x7da1b0314df0> begin[:]
<ast.Raise object at 0x7da1b0314070>
if <ast.BoolOp object at 0x7da1b0317a00> begin[:]
<ast.Raise object at 0x7da1b0317190>
if <ast.BoolOp object at 0x7da1b0315450> begin[:]
if <ast.BoolOp object at 0x7da1b0314b50> begin[:]
<ast.Raise object at 0x7da1b0317940>
if <ast.BoolOp object at 0x7da1b03161a0> begin[:]
variable[rgb] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b0316950>]]
variable[result] assign[=] binary_operation[constant[#%02x%02x%02x] <ast.Mod object at 0x7da2590d6920> name[rgb]]
return[call[name[result].lower, parameter[]]] | keyword[def] identifier[rgb_to_hex] ( identifier[rgb] ):
literal[string]
identifier[r] , identifier[g] , identifier[b] = identifier[rgb] [: literal[int] ]
keyword[if] ( identifier[r] < literal[int] ) keyword[or] ( identifier[g] < literal[int] ) keyword[or] ( identifier[b] < literal[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] ( identifier[r] > literal[int] ) keyword[or] ( identifier[g] > literal[int] ) keyword[or] ( identifier[b] > literal[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] ( literal[int] < identifier[r] < literal[int] ) keyword[or] ( literal[int] < identifier[g] < literal[int] ) keyword[or] ( literal[int] < identifier[b] < literal[int] ):
keyword[if] ( identifier[r] > literal[int] ) keyword[or] ( identifier[g] > literal[int] ) keyword[or] ( identifier[b] > literal[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] ( literal[int] <= identifier[r] <= literal[int] ) keyword[and] ( literal[int] <= identifier[g] <= literal[int] ) keyword[and] ( literal[int] <= identifier[b] <= literal[int] ):
identifier[rgb] = identifier[tuple] ([ identifier[int] ( identifier[round] ( identifier[val] * literal[int] )) keyword[for] identifier[val] keyword[in] [ identifier[r] , identifier[g] , identifier[b] ]])
keyword[else] :
identifier[rgb] =( identifier[int] ( identifier[r] ), identifier[int] ( identifier[g] ), identifier[int] ( identifier[b] ))
identifier[result] = literal[string] % identifier[rgb]
keyword[return] identifier[result] . identifier[lower] () | def rgb_to_hex(rgb):
"""
Utility function to convert (r,g,b) triples to hex.
http://ageo.co/1CFxXpO
Args:
rgb (tuple): A sequence of RGB values in the
range 0-255 or 0-1.
Returns:
str: The hex code for the colour.
"""
(r, g, b) = rgb[:3]
if r < 0 or g < 0 or b < 0:
raise Exception('RGB values must all be 0-255 or 0-1') # depends on [control=['if'], data=[]]
if r > 255 or g > 255 or b > 255:
raise Exception('RGB values must all be 0-255 or 0-1') # depends on [control=['if'], data=[]]
if 0 < r < 1 or 0 < g < 1 or 0 < b < 1:
if r > 1 or g > 1 or b > 1:
raise Exception('RGB values must all be 0-255 or 0-1') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 0 <= r <= 1 and 0 <= g <= 1 and (0 <= b <= 1):
rgb = tuple([int(round(val * 255)) for val in [r, g, b]]) # depends on [control=['if'], data=[]]
else:
rgb = (int(r), int(g), int(b))
result = '#%02x%02x%02x' % rgb
return result.lower() |
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName' : cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateServerCertificate', params) | def function[update_server_cert, parameter[self, cert_name, new_cert_name, new_path]]:
constant[
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b26edf30>], [<ast.Name object at 0x7da1b26ee650>]]
if name[new_cert_name] begin[:]
call[name[params]][constant[NewServerCertificateName]] assign[=] name[new_cert_name]
if name[new_path] begin[:]
call[name[params]][constant[NewPath]] assign[=] name[new_path]
return[call[name[self].get_response, parameter[constant[UpdateServerCertificate], name[params]]]] | keyword[def] identifier[update_server_cert] ( identifier[self] , identifier[cert_name] , identifier[new_cert_name] = keyword[None] ,
identifier[new_path] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[cert_name] }
keyword[if] identifier[new_cert_name] :
identifier[params] [ literal[string] ]= identifier[new_cert_name]
keyword[if] identifier[new_path] :
identifier[params] [ literal[string] ]= identifier[new_path]
keyword[return] identifier[self] . identifier[get_response] ( literal[string] , identifier[params] ) | def update_server_cert(self, cert_name, new_cert_name=None, new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name # depends on [control=['if'], data=[]]
if new_path:
params['NewPath'] = new_path # depends on [control=['if'], data=[]]
return self.get_response('UpdateServerCertificate', params) |
def get_SZ(self):
"""Get the S and Z matrices using the current parameters.
"""
if self.psd_integrator is None:
(self._S, self._Z) = self.get_SZ_orient()
else:
scatter_outdated = self._scatter_signature != (self.thet0,
self.thet, self.phi0, self.phi, self.alpha, self.beta,
self.orient)
psd_outdated = self._psd_signature != (self.psd,)
outdated = scatter_outdated or psd_outdated
if outdated:
(self._S, self._Z) = self.psd_integrator(self.psd,
self.get_geometry())
self._set_scatter_signature()
self._set_psd_signature()
return (self._S, self._Z) | def function[get_SZ, parameter[self]]:
constant[Get the S and Z matrices using the current parameters.
]
if compare[name[self].psd_integrator is constant[None]] begin[:]
<ast.Tuple object at 0x7da2044c1f90> assign[=] call[name[self].get_SZ_orient, parameter[]]
return[tuple[[<ast.Attribute object at 0x7da2044c1a20>, <ast.Attribute object at 0x7da2044c2170>]]] | keyword[def] identifier[get_SZ] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[psd_integrator] keyword[is] keyword[None] :
( identifier[self] . identifier[_S] , identifier[self] . identifier[_Z] )= identifier[self] . identifier[get_SZ_orient] ()
keyword[else] :
identifier[scatter_outdated] = identifier[self] . identifier[_scatter_signature] !=( identifier[self] . identifier[thet0] ,
identifier[self] . identifier[thet] , identifier[self] . identifier[phi0] , identifier[self] . identifier[phi] , identifier[self] . identifier[alpha] , identifier[self] . identifier[beta] ,
identifier[self] . identifier[orient] )
identifier[psd_outdated] = identifier[self] . identifier[_psd_signature] !=( identifier[self] . identifier[psd] ,)
identifier[outdated] = identifier[scatter_outdated] keyword[or] identifier[psd_outdated]
keyword[if] identifier[outdated] :
( identifier[self] . identifier[_S] , identifier[self] . identifier[_Z] )= identifier[self] . identifier[psd_integrator] ( identifier[self] . identifier[psd] ,
identifier[self] . identifier[get_geometry] ())
identifier[self] . identifier[_set_scatter_signature] ()
identifier[self] . identifier[_set_psd_signature] ()
keyword[return] ( identifier[self] . identifier[_S] , identifier[self] . identifier[_Z] ) | def get_SZ(self):
"""Get the S and Z matrices using the current parameters.
"""
if self.psd_integrator is None:
(self._S, self._Z) = self.get_SZ_orient() # depends on [control=['if'], data=[]]
else:
scatter_outdated = self._scatter_signature != (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta, self.orient)
psd_outdated = self._psd_signature != (self.psd,)
outdated = scatter_outdated or psd_outdated
if outdated:
(self._S, self._Z) = self.psd_integrator(self.psd, self.get_geometry())
self._set_scatter_signature()
self._set_psd_signature() # depends on [control=['if'], data=[]]
return (self._S, self._Z) |
def m2m_changed(sender, instance, action, reverse, model, pk_set, using, **kwargs):
"""https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed"""
try:
with transaction.atomic():
if not should_audit(instance):
return False
if action not in ("post_add", "post_remove", "post_clear"):
return False
object_json_repr = serializers.serialize("json", [instance])
if reverse:
event_type = CRUDEvent.M2M_CHANGE_REV
# add reverse M2M changes to event. must use json lib because
# django serializers ignore extra fields.
tmp_repr = json.loads(object_json_repr)
m2m_rev_field = _m2m_rev_field_name(instance._meta.concrete_model, model)
related_instances = getattr(instance, m2m_rev_field).all()
related_ids = [r.pk for r in related_instances]
tmp_repr[0]['m2m_rev_model'] = force_text(model._meta)
tmp_repr[0]['m2m_rev_pks'] = related_ids
tmp_repr[0]['m2m_rev_action'] = action
object_json_repr = json.dumps(tmp_repr)
else:
event_type = CRUDEvent.M2M_CHANGE
# user
try:
user = get_current_user()
# validate that the user still exists
user = get_user_model().objects.get(pk=user.pk)
except:
user = None
if isinstance(user, AnonymousUser):
user = None
c_t = ContentType.objects.get_for_model(instance)
sid = transaction.savepoint()
try:
with transaction.atomic():
crud_event = CRUDEvent.objects.create(
event_type=event_type,
object_repr=str(instance),
object_json_repr=object_json_repr,
content_type_id=c_t.id,
object_id=instance.pk,
user_id=getattr(user, 'id', None),
datetime=timezone.now(),
user_pk_as_string=str(user.pk) if user else user
)
except Exception as e:
logger.exception(
"easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format(
instance, instance.pk))
transaction.savepoint_rollback(sid)
except Exception:
logger.exception('easy audit had an m2m-changed exception.') | def function[m2m_changed, parameter[sender, instance, action, reverse, model, pk_set, using]]:
constant[https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed]
<ast.Try object at 0x7da1b1679960> | keyword[def] identifier[m2m_changed] ( identifier[sender] , identifier[instance] , identifier[action] , identifier[reverse] , identifier[model] , identifier[pk_set] , identifier[using] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[with] identifier[transaction] . identifier[atomic] ():
keyword[if] keyword[not] identifier[should_audit] ( identifier[instance] ):
keyword[return] keyword[False]
keyword[if] identifier[action] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[return] keyword[False]
identifier[object_json_repr] = identifier[serializers] . identifier[serialize] ( literal[string] ,[ identifier[instance] ])
keyword[if] identifier[reverse] :
identifier[event_type] = identifier[CRUDEvent] . identifier[M2M_CHANGE_REV]
identifier[tmp_repr] = identifier[json] . identifier[loads] ( identifier[object_json_repr] )
identifier[m2m_rev_field] = identifier[_m2m_rev_field_name] ( identifier[instance] . identifier[_meta] . identifier[concrete_model] , identifier[model] )
identifier[related_instances] = identifier[getattr] ( identifier[instance] , identifier[m2m_rev_field] ). identifier[all] ()
identifier[related_ids] =[ identifier[r] . identifier[pk] keyword[for] identifier[r] keyword[in] identifier[related_instances] ]
identifier[tmp_repr] [ literal[int] ][ literal[string] ]= identifier[force_text] ( identifier[model] . identifier[_meta] )
identifier[tmp_repr] [ literal[int] ][ literal[string] ]= identifier[related_ids]
identifier[tmp_repr] [ literal[int] ][ literal[string] ]= identifier[action]
identifier[object_json_repr] = identifier[json] . identifier[dumps] ( identifier[tmp_repr] )
keyword[else] :
identifier[event_type] = identifier[CRUDEvent] . identifier[M2M_CHANGE]
keyword[try] :
identifier[user] = identifier[get_current_user] ()
identifier[user] = identifier[get_user_model] (). identifier[objects] . identifier[get] ( identifier[pk] = identifier[user] . identifier[pk] )
keyword[except] :
identifier[user] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[user] , identifier[AnonymousUser] ):
identifier[user] = keyword[None]
identifier[c_t] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[instance] )
identifier[sid] = identifier[transaction] . identifier[savepoint] ()
keyword[try] :
keyword[with] identifier[transaction] . identifier[atomic] ():
identifier[crud_event] = identifier[CRUDEvent] . identifier[objects] . identifier[create] (
identifier[event_type] = identifier[event_type] ,
identifier[object_repr] = identifier[str] ( identifier[instance] ),
identifier[object_json_repr] = identifier[object_json_repr] ,
identifier[content_type_id] = identifier[c_t] . identifier[id] ,
identifier[object_id] = identifier[instance] . identifier[pk] ,
identifier[user_id] = identifier[getattr] ( identifier[user] , literal[string] , keyword[None] ),
identifier[datetime] = identifier[timezone] . identifier[now] (),
identifier[user_pk_as_string] = identifier[str] ( identifier[user] . identifier[pk] ) keyword[if] identifier[user] keyword[else] identifier[user]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] (
literal[string] . identifier[format] (
identifier[instance] , identifier[instance] . identifier[pk] ))
identifier[transaction] . identifier[savepoint_rollback] ( identifier[sid] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] ) | def m2m_changed(sender, instance, action, reverse, model, pk_set, using, **kwargs):
"""https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed"""
try:
with transaction.atomic():
if not should_audit(instance):
return False # depends on [control=['if'], data=[]]
if action not in ('post_add', 'post_remove', 'post_clear'):
return False # depends on [control=['if'], data=[]]
object_json_repr = serializers.serialize('json', [instance])
if reverse:
event_type = CRUDEvent.M2M_CHANGE_REV
# add reverse M2M changes to event. must use json lib because
# django serializers ignore extra fields.
tmp_repr = json.loads(object_json_repr)
m2m_rev_field = _m2m_rev_field_name(instance._meta.concrete_model, model)
related_instances = getattr(instance, m2m_rev_field).all()
related_ids = [r.pk for r in related_instances]
tmp_repr[0]['m2m_rev_model'] = force_text(model._meta)
tmp_repr[0]['m2m_rev_pks'] = related_ids
tmp_repr[0]['m2m_rev_action'] = action
object_json_repr = json.dumps(tmp_repr) # depends on [control=['if'], data=[]]
else:
event_type = CRUDEvent.M2M_CHANGE
# user
try:
user = get_current_user()
# validate that the user still exists
user = get_user_model().objects.get(pk=user.pk) # depends on [control=['try'], data=[]]
except:
user = None # depends on [control=['except'], data=[]]
if isinstance(user, AnonymousUser):
user = None # depends on [control=['if'], data=[]]
c_t = ContentType.objects.get_for_model(instance)
sid = transaction.savepoint()
try:
with transaction.atomic():
crud_event = CRUDEvent.objects.create(event_type=event_type, object_repr=str(instance), object_json_repr=object_json_repr, content_type_id=c_t.id, object_id=instance.pk, user_id=getattr(user, 'id', None), datetime=timezone.now(), user_pk_as_string=str(user.pk) if user else user) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception('easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}'.format(instance, instance.pk))
transaction.savepoint_rollback(sid) # depends on [control=['except'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
logger.exception('easy audit had an m2m-changed exception.') # depends on [control=['except'], data=[]] |
def must_exist(*components):
"""
Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist.
"""
_path = path(*components)
if not exists(_path):
raise File404(_path)
return _path | def function[must_exist, parameter[]]:
constant[
Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist.
]
variable[_path] assign[=] call[name[path], parameter[<ast.Starred object at 0x7da1b0a22ce0>]]
if <ast.UnaryOp object at 0x7da1b0a20610> begin[:]
<ast.Raise object at 0x7da1b09e98a0>
return[name[_path]] | keyword[def] identifier[must_exist] (* identifier[components] ):
literal[string]
identifier[_path] = identifier[path] (* identifier[components] )
keyword[if] keyword[not] identifier[exists] ( identifier[_path] ):
keyword[raise] identifier[File404] ( identifier[_path] )
keyword[return] identifier[_path] | def must_exist(*components):
"""
Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist.
"""
_path = path(*components)
if not exists(_path):
raise File404(_path) # depends on [control=['if'], data=[]]
return _path |
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value() | def function[score, parameter[self, eval_data, eval_metric, num_batch, batch_end_callback, score_end_callback, reset, epoch, sparse_row_id_fn]]:
constant[Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
]
assert[<ast.BoolOp object at 0x7da1b2065690>]
if name[reset] begin[:]
call[name[eval_data].reset, parameter[]]
if <ast.UnaryOp object at 0x7da1b2065b10> begin[:]
variable[eval_metric] assign[=] call[name[metric].create, parameter[name[eval_metric]]]
call[name[eval_metric].reset, parameter[]]
variable[actual_num_batch] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b2065600>, <ast.Name object at 0x7da1b2064640>]]] in starred[call[name[enumerate], parameter[name[eval_data]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2066110> begin[:]
break
call[name[self].prepare, parameter[name[eval_batch]]]
call[name[self].forward, parameter[name[eval_batch]]]
if call[name[isinstance], parameter[name[eval_batch], name[list]]] begin[:]
call[name[self].update_metric, parameter[name[eval_metric], <ast.ListComp object at 0x7da1b20675e0>]]
if compare[name[batch_end_callback] is_not constant[None]] begin[:]
variable[batch_end_params] assign[=] call[name[BatchEndParam], parameter[]]
for taget[name[callback]] in starred[call[name[_as_list], parameter[name[batch_end_callback]]]] begin[:]
call[name[callback], parameter[name[batch_end_params]]]
<ast.AugAssign object at 0x7da1b20f8c70>
if name[score_end_callback] begin[:]
variable[params] assign[=] call[name[BatchEndParam], parameter[]]
for taget[name[callback]] in starred[call[name[_as_list], parameter[name[score_end_callback]]]] begin[:]
call[name[callback], parameter[name[params]]]
return[call[name[eval_metric].get_name_value, parameter[]]] | keyword[def] identifier[score] ( identifier[self] , identifier[eval_data] , identifier[eval_metric] , identifier[num_batch] = keyword[None] , identifier[batch_end_callback] = keyword[None] ,
identifier[score_end_callback] = keyword[None] ,
identifier[reset] = keyword[True] , identifier[epoch] = literal[int] , identifier[sparse_row_id_fn] = keyword[None] ):
literal[string]
keyword[assert] identifier[self] . identifier[binded] keyword[and] identifier[self] . identifier[params_initialized]
keyword[if] identifier[reset] :
identifier[eval_data] . identifier[reset] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[eval_metric] , identifier[metric] . identifier[EvalMetric] ):
identifier[eval_metric] = identifier[metric] . identifier[create] ( identifier[eval_metric] )
identifier[eval_metric] . identifier[reset] ()
identifier[actual_num_batch] = literal[int]
keyword[for] identifier[nbatch] , identifier[eval_batch] keyword[in] identifier[enumerate] ( identifier[eval_data] ):
keyword[if] identifier[num_batch] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nbatch] == identifier[num_batch] :
keyword[break]
identifier[self] . identifier[prepare] ( identifier[eval_batch] , identifier[sparse_row_id_fn] = identifier[sparse_row_id_fn] )
identifier[self] . identifier[forward] ( identifier[eval_batch] , identifier[is_train] = keyword[False] )
keyword[if] identifier[isinstance] ( identifier[eval_batch] , identifier[list] ):
identifier[self] . identifier[update_metric] ( identifier[eval_metric] ,[ identifier[eb] . identifier[label] keyword[for] identifier[eb] keyword[in] identifier[eval_batch] ], identifier[pre_sliced] = keyword[True] )
keyword[else] :
identifier[self] . identifier[update_metric] ( identifier[eval_metric] , identifier[eval_batch] . identifier[label] )
keyword[if] identifier[batch_end_callback] keyword[is] keyword[not] keyword[None] :
identifier[batch_end_params] = identifier[BatchEndParam] ( identifier[epoch] = identifier[epoch] ,
identifier[nbatch] = identifier[nbatch] ,
identifier[eval_metric] = identifier[eval_metric] ,
identifier[locals] = identifier[locals] ())
keyword[for] identifier[callback] keyword[in] identifier[_as_list] ( identifier[batch_end_callback] ):
identifier[callback] ( identifier[batch_end_params] )
identifier[actual_num_batch] += literal[int]
keyword[if] identifier[score_end_callback] :
identifier[params] = identifier[BatchEndParam] ( identifier[epoch] = identifier[epoch] ,
identifier[nbatch] = identifier[actual_num_batch] ,
identifier[eval_metric] = identifier[eval_metric] ,
identifier[locals] = identifier[locals] ())
keyword[for] identifier[callback] keyword[in] identifier[_as_list] ( identifier[score_end_callback] ):
identifier[callback] ( identifier[params] )
keyword[return] identifier[eval_metric] . identifier[get_name_value] () | def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None, score_end_callback=None, reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset() # depends on [control=['if'], data=[]]
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric) # depends on [control=['if'], data=[]]
eval_metric.reset()
actual_num_batch = 0
for (nbatch, eval_batch) in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break # depends on [control=['if'], data=[]]
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True) # depends on [control=['if'], data=[]]
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, eval_metric=eval_metric, locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params) # depends on [control=['for'], data=['callback']] # depends on [control=['if'], data=['batch_end_callback']]
actual_num_batch += 1 # depends on [control=['for'], data=[]]
if score_end_callback:
params = BatchEndParam(epoch=epoch, nbatch=actual_num_batch, eval_metric=eval_metric, locals=locals())
for callback in _as_list(score_end_callback):
callback(params) # depends on [control=['for'], data=['callback']] # depends on [control=['if'], data=[]]
return eval_metric.get_name_value() |
def _preprocess(self, x, out=None):
"""Return the pre-processed version of ``x``.
C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation)
R2C: use ``tmp_f`` (R2C operation)
HALFC: use ``tmp_r`` (R2R operation)
The result is stored in ``out`` if given, otherwise in
a temporary or a new array.
"""
if out is None:
if self.domain.field == ComplexNumbers():
out = self._tmp_r if self._tmp_r is not None else self._tmp_f
elif self.domain.field == RealNumbers() and not self.halfcomplex:
out = self._tmp_f
else:
out = self._tmp_r
return dft_preprocess_data(
x, shift=self.shifts, axes=self.axes, sign=self.sign,
out=out) | def function[_preprocess, parameter[self, x, out]]:
constant[Return the pre-processed version of ``x``.
C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation)
R2C: use ``tmp_f`` (R2C operation)
HALFC: use ``tmp_r`` (R2R operation)
The result is stored in ``out`` if given, otherwise in
a temporary or a new array.
]
if compare[name[out] is constant[None]] begin[:]
if compare[name[self].domain.field equal[==] call[name[ComplexNumbers], parameter[]]] begin[:]
variable[out] assign[=] <ast.IfExp object at 0x7da1b1ec5480>
return[call[name[dft_preprocess_data], parameter[name[x]]]] | keyword[def] identifier[_preprocess] ( identifier[self] , identifier[x] , identifier[out] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[domain] . identifier[field] == identifier[ComplexNumbers] ():
identifier[out] = identifier[self] . identifier[_tmp_r] keyword[if] identifier[self] . identifier[_tmp_r] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_tmp_f]
keyword[elif] identifier[self] . identifier[domain] . identifier[field] == identifier[RealNumbers] () keyword[and] keyword[not] identifier[self] . identifier[halfcomplex] :
identifier[out] = identifier[self] . identifier[_tmp_f]
keyword[else] :
identifier[out] = identifier[self] . identifier[_tmp_r]
keyword[return] identifier[dft_preprocess_data] (
identifier[x] , identifier[shift] = identifier[self] . identifier[shifts] , identifier[axes] = identifier[self] . identifier[axes] , identifier[sign] = identifier[self] . identifier[sign] ,
identifier[out] = identifier[out] ) | def _preprocess(self, x, out=None):
"""Return the pre-processed version of ``x``.
C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation)
R2C: use ``tmp_f`` (R2C operation)
HALFC: use ``tmp_r`` (R2R operation)
The result is stored in ``out`` if given, otherwise in
a temporary or a new array.
"""
if out is None:
if self.domain.field == ComplexNumbers():
out = self._tmp_r if self._tmp_r is not None else self._tmp_f # depends on [control=['if'], data=[]]
elif self.domain.field == RealNumbers() and (not self.halfcomplex):
out = self._tmp_f # depends on [control=['if'], data=[]]
else:
out = self._tmp_r # depends on [control=['if'], data=['out']]
return dft_preprocess_data(x, shift=self.shifts, axes=self.axes, sign=self.sign, out=out) |
def get_column(self, column_name, column_type, index, verbose=True):
"""Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.get_column(
self.expr,
self.weld_type,
index
),
column_type,
1
) | def function[get_column, parameter[self, column_name, column_type, index, verbose]]:
constant[Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
]
return[call[name[LazyOpResult], parameter[call[name[grizzly_impl].get_column, parameter[name[self].expr, name[self].weld_type, name[index]]], name[column_type], constant[1]]]] | keyword[def] identifier[get_column] ( identifier[self] , identifier[column_name] , identifier[column_type] , identifier[index] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[return] identifier[LazyOpResult] (
identifier[grizzly_impl] . identifier[get_column] (
identifier[self] . identifier[expr] ,
identifier[self] . identifier[weld_type] ,
identifier[index]
),
identifier[column_type] ,
literal[int]
) | def get_column(self, column_name, column_type, index, verbose=True):
"""Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
"""
return LazyOpResult(grizzly_impl.get_column(self.expr, self.weld_type, index), column_type, 1) |
def find_file(folder, filename):
"""
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
"""
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
for root, _, filenames in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn))
if not matches:
raise IOError('File %s could not be found' % filename)
return matches[-1] | def function[find_file, parameter[folder, filename]]:
constant[
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
]
variable[matches] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c993310> begin[:]
return[name[filename]]
for taget[tuple[[<ast.Name object at 0x7da18fe928c0>, <ast.Name object at 0x7da18fe92650>, <ast.Name object at 0x7da18fe91ba0>]]] in starred[call[name[os].walk, parameter[name[folder]]]] begin[:]
for taget[name[fn]] in starred[call[name[fnmatch].filter, parameter[name[filenames], name[filename]]]] begin[:]
call[name[matches].append, parameter[call[name[os].path.join, parameter[name[root], name[fn]]]]]
if <ast.UnaryOp object at 0x7da20c9913f0> begin[:]
<ast.Raise object at 0x7da20c9901c0>
return[call[name[matches]][<ast.UnaryOp object at 0x7da20c993460>]] | keyword[def] identifier[find_file] ( identifier[folder] , identifier[filename] ):
literal[string]
identifier[matches] =[]
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ) keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ):
keyword[return] identifier[filename]
keyword[for] identifier[root] , identifier[_] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[folder] ):
keyword[for] identifier[fn] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[filenames] , identifier[filename] ):
identifier[matches] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fn] ))
keyword[if] keyword[not] identifier[matches] :
keyword[raise] identifier[IOError] ( literal[string] % identifier[filename] )
keyword[return] identifier[matches] [- literal[int] ] | def find_file(folder, filename):
"""
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
"""
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename # depends on [control=['if'], data=[]]
for (root, _, filenames) in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn)) # depends on [control=['for'], data=['fn']] # depends on [control=['for'], data=[]]
if not matches:
raise IOError('File %s could not be found' % filename) # depends on [control=['if'], data=[]]
return matches[-1] |
def makeService(opt):
"""Make a service
:params opt: dictionary-like object with 'freq', 'config' and 'messages'
:returns: twisted.application.internet.TimerService that at opt['freq']
checks for stale processes in opt['config'], and sends
restart messages through opt['messages']
"""
restarter, path = beatcheck.parseConfig(opt)
pool = client.HTTPConnectionPool(reactor)
agent = client.Agent(reactor=reactor, pool=pool)
settings = Settings(reactor=reactor, agent=agent)
states = {}
checker = functools.partial(check, settings, states, path)
httpcheck = tainternet.TimerService(opt['freq'], run, restarter, checker)
httpcheck.setName('httpcheck')
return heart.wrapHeart(httpcheck) | def function[makeService, parameter[opt]]:
constant[Make a service
:params opt: dictionary-like object with 'freq', 'config' and 'messages'
:returns: twisted.application.internet.TimerService that at opt['freq']
checks for stale processes in opt['config'], and sends
restart messages through opt['messages']
]
<ast.Tuple object at 0x7da1b03b93f0> assign[=] call[name[beatcheck].parseConfig, parameter[name[opt]]]
variable[pool] assign[=] call[name[client].HTTPConnectionPool, parameter[name[reactor]]]
variable[agent] assign[=] call[name[client].Agent, parameter[]]
variable[settings] assign[=] call[name[Settings], parameter[]]
variable[states] assign[=] dictionary[[], []]
variable[checker] assign[=] call[name[functools].partial, parameter[name[check], name[settings], name[states], name[path]]]
variable[httpcheck] assign[=] call[name[tainternet].TimerService, parameter[call[name[opt]][constant[freq]], name[run], name[restarter], name[checker]]]
call[name[httpcheck].setName, parameter[constant[httpcheck]]]
return[call[name[heart].wrapHeart, parameter[name[httpcheck]]]] | keyword[def] identifier[makeService] ( identifier[opt] ):
literal[string]
identifier[restarter] , identifier[path] = identifier[beatcheck] . identifier[parseConfig] ( identifier[opt] )
identifier[pool] = identifier[client] . identifier[HTTPConnectionPool] ( identifier[reactor] )
identifier[agent] = identifier[client] . identifier[Agent] ( identifier[reactor] = identifier[reactor] , identifier[pool] = identifier[pool] )
identifier[settings] = identifier[Settings] ( identifier[reactor] = identifier[reactor] , identifier[agent] = identifier[agent] )
identifier[states] ={}
identifier[checker] = identifier[functools] . identifier[partial] ( identifier[check] , identifier[settings] , identifier[states] , identifier[path] )
identifier[httpcheck] = identifier[tainternet] . identifier[TimerService] ( identifier[opt] [ literal[string] ], identifier[run] , identifier[restarter] , identifier[checker] )
identifier[httpcheck] . identifier[setName] ( literal[string] )
keyword[return] identifier[heart] . identifier[wrapHeart] ( identifier[httpcheck] ) | def makeService(opt):
"""Make a service
:params opt: dictionary-like object with 'freq', 'config' and 'messages'
:returns: twisted.application.internet.TimerService that at opt['freq']
checks for stale processes in opt['config'], and sends
restart messages through opt['messages']
"""
(restarter, path) = beatcheck.parseConfig(opt)
pool = client.HTTPConnectionPool(reactor)
agent = client.Agent(reactor=reactor, pool=pool)
settings = Settings(reactor=reactor, agent=agent)
states = {}
checker = functools.partial(check, settings, states, path)
httpcheck = tainternet.TimerService(opt['freq'], run, restarter, checker)
httpcheck.setName('httpcheck')
return heart.wrapHeart(httpcheck) |
def match_prefix(self, string):
"""
Do a partial match of the string with the grammar. The returned
:class:`Match` instance can contain multiple representations of the
match. This will never return `None`. If it doesn't match at all, the "trailing input"
part will capture all of the input.
:param string: The input string.
"""
# First try to match using `_re_prefix`. If nothing is found, use the patterns that
# also accept trailing characters.
for patterns in [self._re_prefix, self._re_prefix_with_trailing_input]:
matches = [(r, r.match(string)) for r in patterns]
matches = [(r, m) for r, m in matches if m]
if matches != []:
return Match(string, matches, self._group_names_to_nodes, self.unescape_funcs) | def function[match_prefix, parameter[self, string]]:
constant[
Do a partial match of the string with the grammar. The returned
:class:`Match` instance can contain multiple representations of the
match. This will never return `None`. If it doesn't match at all, the "trailing input"
part will capture all of the input.
:param string: The input string.
]
for taget[name[patterns]] in starred[list[[<ast.Attribute object at 0x7da1b085b7f0>, <ast.Attribute object at 0x7da1b085b3a0>]]] begin[:]
variable[matches] assign[=] <ast.ListComp object at 0x7da1b0859e70>
variable[matches] assign[=] <ast.ListComp object at 0x7da1b0859cf0>
if compare[name[matches] not_equal[!=] list[[]]] begin[:]
return[call[name[Match], parameter[name[string], name[matches], name[self]._group_names_to_nodes, name[self].unescape_funcs]]] | keyword[def] identifier[match_prefix] ( identifier[self] , identifier[string] ):
literal[string]
keyword[for] identifier[patterns] keyword[in] [ identifier[self] . identifier[_re_prefix] , identifier[self] . identifier[_re_prefix_with_trailing_input] ]:
identifier[matches] =[( identifier[r] , identifier[r] . identifier[match] ( identifier[string] )) keyword[for] identifier[r] keyword[in] identifier[patterns] ]
identifier[matches] =[( identifier[r] , identifier[m] ) keyword[for] identifier[r] , identifier[m] keyword[in] identifier[matches] keyword[if] identifier[m] ]
keyword[if] identifier[matches] !=[]:
keyword[return] identifier[Match] ( identifier[string] , identifier[matches] , identifier[self] . identifier[_group_names_to_nodes] , identifier[self] . identifier[unescape_funcs] ) | def match_prefix(self, string):
"""
Do a partial match of the string with the grammar. The returned
:class:`Match` instance can contain multiple representations of the
match. This will never return `None`. If it doesn't match at all, the "trailing input"
part will capture all of the input.
:param string: The input string.
"""
# First try to match using `_re_prefix`. If nothing is found, use the patterns that
# also accept trailing characters.
for patterns in [self._re_prefix, self._re_prefix_with_trailing_input]:
matches = [(r, r.match(string)) for r in patterns]
matches = [(r, m) for (r, m) in matches if m]
if matches != []:
return Match(string, matches, self._group_names_to_nodes, self.unescape_funcs) # depends on [control=['if'], data=['matches']] # depends on [control=['for'], data=['patterns']] |
def get_badge(self, kind):
''' Get a badge given its kind if present'''
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None | def function[get_badge, parameter[self, kind]]:
constant[ Get a badge given its kind if present]
variable[candidates] assign[=] <ast.ListComp object at 0x7da20c76e500>
return[<ast.IfExp object at 0x7da20c76d360>] | keyword[def] identifier[get_badge] ( identifier[self] , identifier[kind] ):
literal[string]
identifier[candidates] =[ identifier[b] keyword[for] identifier[b] keyword[in] identifier[self] . identifier[badges] keyword[if] identifier[b] . identifier[kind] == identifier[kind] ]
keyword[return] identifier[candidates] [ literal[int] ] keyword[if] identifier[candidates] keyword[else] keyword[None] | def get_badge(self, kind):
""" Get a badge given its kind if present"""
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None |
def prepare(self):
"""No percolator XML for protein tables"""
self.target = self.fn
self.targetheader = reader.get_tsv_header(self.target)
self.decoyheader = reader.get_tsv_header(self.decoyfn) | def function[prepare, parameter[self]]:
constant[No percolator XML for protein tables]
name[self].target assign[=] name[self].fn
name[self].targetheader assign[=] call[name[reader].get_tsv_header, parameter[name[self].target]]
name[self].decoyheader assign[=] call[name[reader].get_tsv_header, parameter[name[self].decoyfn]] | keyword[def] identifier[prepare] ( identifier[self] ):
literal[string]
identifier[self] . identifier[target] = identifier[self] . identifier[fn]
identifier[self] . identifier[targetheader] = identifier[reader] . identifier[get_tsv_header] ( identifier[self] . identifier[target] )
identifier[self] . identifier[decoyheader] = identifier[reader] . identifier[get_tsv_header] ( identifier[self] . identifier[decoyfn] ) | def prepare(self):
"""No percolator XML for protein tables"""
self.target = self.fn
self.targetheader = reader.get_tsv_header(self.target)
self.decoyheader = reader.get_tsv_header(self.decoyfn) |
def connect_telnet(name, ip_address=None, user='micro', password='python'):
"""Connect to a MicroPython board via telnet."""
if ip_address is None:
try:
ip_address = socket.gethostbyname(name)
except socket.gaierror:
ip_address = name
if not QUIET:
if name == ip_address:
print('Connecting to (%s) ...' % ip_address)
else:
print('Connecting to %s (%s) ...' % (name, ip_address))
dev = DeviceNet(name, ip_address, user, password)
add_device(dev) | def function[connect_telnet, parameter[name, ip_address, user, password]]:
constant[Connect to a MicroPython board via telnet.]
if compare[name[ip_address] is constant[None]] begin[:]
<ast.Try object at 0x7da20c6c66b0>
if <ast.UnaryOp object at 0x7da20c6c51e0> begin[:]
if compare[name[name] equal[==] name[ip_address]] begin[:]
call[name[print], parameter[binary_operation[constant[Connecting to (%s) ...] <ast.Mod object at 0x7da2590d6920> name[ip_address]]]]
variable[dev] assign[=] call[name[DeviceNet], parameter[name[name], name[ip_address], name[user], name[password]]]
call[name[add_device], parameter[name[dev]]] | keyword[def] identifier[connect_telnet] ( identifier[name] , identifier[ip_address] = keyword[None] , identifier[user] = literal[string] , identifier[password] = literal[string] ):
literal[string]
keyword[if] identifier[ip_address] keyword[is] keyword[None] :
keyword[try] :
identifier[ip_address] = identifier[socket] . identifier[gethostbyname] ( identifier[name] )
keyword[except] identifier[socket] . identifier[gaierror] :
identifier[ip_address] = identifier[name]
keyword[if] keyword[not] identifier[QUIET] :
keyword[if] identifier[name] == identifier[ip_address] :
identifier[print] ( literal[string] % identifier[ip_address] )
keyword[else] :
identifier[print] ( literal[string] %( identifier[name] , identifier[ip_address] ))
identifier[dev] = identifier[DeviceNet] ( identifier[name] , identifier[ip_address] , identifier[user] , identifier[password] )
identifier[add_device] ( identifier[dev] ) | def connect_telnet(name, ip_address=None, user='micro', password='python'):
"""Connect to a MicroPython board via telnet."""
if ip_address is None:
try:
ip_address = socket.gethostbyname(name) # depends on [control=['try'], data=[]]
except socket.gaierror:
ip_address = name # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['ip_address']]
if not QUIET:
if name == ip_address:
print('Connecting to (%s) ...' % ip_address) # depends on [control=['if'], data=['ip_address']]
else:
print('Connecting to %s (%s) ...' % (name, ip_address)) # depends on [control=['if'], data=[]]
dev = DeviceNet(name, ip_address, user, password)
add_device(dev) |
def format_dimension(dimension):
"""Formats the specified <dimension> XML tag for string output."""
result = ""
if "type" in dimension.attrib:
result += "[R" if dimension.attrib["type"] == "row" else "[C"
else:
result += "[C"
if "index" in dimension.attrib:
result += ":" + dimension.attrib["index"]
result += "] " + re.sub("\s+", " ", dimension.text.replace("\n", " "))
return result | def function[format_dimension, parameter[dimension]]:
constant[Formats the specified <dimension> XML tag for string output.]
variable[result] assign[=] constant[]
if compare[constant[type] in name[dimension].attrib] begin[:]
<ast.AugAssign object at 0x7da1b26ac8b0>
if compare[constant[index] in name[dimension].attrib] begin[:]
<ast.AugAssign object at 0x7da1b26afaf0>
<ast.AugAssign object at 0x7da1b26acca0>
return[name[result]] | keyword[def] identifier[format_dimension] ( identifier[dimension] ):
literal[string]
identifier[result] = literal[string]
keyword[if] literal[string] keyword[in] identifier[dimension] . identifier[attrib] :
identifier[result] += literal[string] keyword[if] identifier[dimension] . identifier[attrib] [ literal[string] ]== literal[string] keyword[else] literal[string]
keyword[else] :
identifier[result] += literal[string]
keyword[if] literal[string] keyword[in] identifier[dimension] . identifier[attrib] :
identifier[result] += literal[string] + identifier[dimension] . identifier[attrib] [ literal[string] ]
identifier[result] += literal[string] + identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[dimension] . identifier[text] . identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[result] | def format_dimension(dimension):
"""Formats the specified <dimension> XML tag for string output."""
result = ''
if 'type' in dimension.attrib:
result += '[R' if dimension.attrib['type'] == 'row' else '[C' # depends on [control=['if'], data=[]]
else:
result += '[C'
if 'index' in dimension.attrib:
result += ':' + dimension.attrib['index'] # depends on [control=['if'], data=[]]
result += '] ' + re.sub('\\s+', ' ', dimension.text.replace('\n', ' '))
return result |
def load(filters="*.*", text='Select a file, FACEFACE!', default_directory='default_directory'):
"""
Pops up a dialog for opening a single file. Returns a string path or None.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'): filters = filters + ";;All files (*)"
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys(): default = _settings[default_directory]
else: default = ""
# pop up the dialog
result = _qtw.QFileDialog.getOpenFileName(None,text,default,filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0]
# Make sure it's a string
result = str(result)
if result == '': return None
else:
_settings[default_directory] = _os.path.split(result)[0]
return result | def function[load, parameter[filters, text, default_directory]]:
constant[
Pops up a dialog for opening a single file. Returns a string path or None.
]
if <ast.UnaryOp object at 0x7da18ede61d0> begin[:]
variable[filters] assign[=] binary_operation[name[filters] + constant[;;All files (*)]]
if compare[name[default_directory] in call[name[_settings].keys, parameter[]]] begin[:]
variable[default] assign[=] call[name[_settings]][name[default_directory]]
variable[result] assign[=] call[name[_qtw].QFileDialog.getOpenFileName, parameter[constant[None], name[text], name[default], name[filters]]]
if compare[call[name[_s]._qt.VERSION_INFO][<ast.Slice object at 0x7da1b26aee30>] equal[==] constant[PyQt5]] begin[:]
variable[result] assign[=] call[name[result]][constant[0]]
variable[result] assign[=] call[name[str], parameter[name[result]]]
if compare[name[result] equal[==] constant[]] begin[:]
return[constant[None]] | keyword[def] identifier[load] ( identifier[filters] = literal[string] , identifier[text] = literal[string] , identifier[default_directory] = literal[string] ):
literal[string]
keyword[if] keyword[not] literal[string] keyword[in] identifier[filters] . identifier[split] ( literal[string] ): identifier[filters] = identifier[filters] + literal[string]
keyword[if] identifier[default_directory] keyword[in] identifier[_settings] . identifier[keys] (): identifier[default] = identifier[_settings] [ identifier[default_directory] ]
keyword[else] : identifier[default] = literal[string]
identifier[result] = identifier[_qtw] . identifier[QFileDialog] . identifier[getOpenFileName] ( keyword[None] , identifier[text] , identifier[default] , identifier[filters] )
keyword[if] identifier[_s] . identifier[_qt] . identifier[VERSION_INFO] [ literal[int] : literal[int] ]== literal[string] : identifier[result] = identifier[result] [ literal[int] ]
identifier[result] = identifier[str] ( identifier[result] )
keyword[if] identifier[result] == literal[string] : keyword[return] keyword[None]
keyword[else] :
identifier[_settings] [ identifier[default_directory] ]= identifier[_os] . identifier[path] . identifier[split] ( identifier[result] )[ literal[int] ]
keyword[return] identifier[result] | def load(filters='*.*', text='Select a file, FACEFACE!', default_directory='default_directory'):
"""
Pops up a dialog for opening a single file. Returns a string path or None.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'):
filters = filters + ';;All files (*)' # depends on [control=['if'], data=[]]
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys():
default = _settings[default_directory] # depends on [control=['if'], data=['default_directory']]
else:
default = ''
# pop up the dialog
result = _qtw.QFileDialog.getOpenFileName(None, text, default, filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == 'PyQt5':
result = result[0] # depends on [control=['if'], data=[]]
# Make sure it's a string
result = str(result)
if result == '':
return None # depends on [control=['if'], data=[]]
else:
_settings[default_directory] = _os.path.split(result)[0]
return result |
def verify_dir_structure(full_path):
'''Check if given directory to see if it is usable by s2.
Checks that all required directories exist under the given
directory, and also checks that they are writable.
'''
if full_path == None:
return False
r = True
for d2c in PREDEFINED_DIR_NAMES:
#if d2c == "s2":
# d2c = ".s2"
cp2c = os.path.join(full_path, d2c) #complete path to check
if not os.path.isdir(cp2c):
r = False
break
else: #exists, let's check it's writable
if not os.access(cp2c, os.W_OK):
r = False
break
return r | def function[verify_dir_structure, parameter[full_path]]:
constant[Check if given directory to see if it is usable by s2.
Checks that all required directories exist under the given
directory, and also checks that they are writable.
]
if compare[name[full_path] equal[==] constant[None]] begin[:]
return[constant[False]]
variable[r] assign[=] constant[True]
for taget[name[d2c]] in starred[name[PREDEFINED_DIR_NAMES]] begin[:]
variable[cp2c] assign[=] call[name[os].path.join, parameter[name[full_path], name[d2c]]]
if <ast.UnaryOp object at 0x7da1b162b4f0> begin[:]
variable[r] assign[=] constant[False]
break
return[name[r]] | keyword[def] identifier[verify_dir_structure] ( identifier[full_path] ):
literal[string]
keyword[if] identifier[full_path] == keyword[None] :
keyword[return] keyword[False]
identifier[r] = keyword[True]
keyword[for] identifier[d2c] keyword[in] identifier[PREDEFINED_DIR_NAMES] :
identifier[cp2c] = identifier[os] . identifier[path] . identifier[join] ( identifier[full_path] , identifier[d2c] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[cp2c] ):
identifier[r] = keyword[False]
keyword[break]
keyword[else] :
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[cp2c] , identifier[os] . identifier[W_OK] ):
identifier[r] = keyword[False]
keyword[break]
keyword[return] identifier[r] | def verify_dir_structure(full_path):
"""Check if given directory to see if it is usable by s2.
Checks that all required directories exist under the given
directory, and also checks that they are writable.
"""
if full_path == None:
return False # depends on [control=['if'], data=[]]
r = True
for d2c in PREDEFINED_DIR_NAMES:
#if d2c == "s2":
# d2c = ".s2"
cp2c = os.path.join(full_path, d2c) #complete path to check
if not os.path.isdir(cp2c):
r = False
break # depends on [control=['if'], data=[]] #exists, let's check it's writable
elif not os.access(cp2c, os.W_OK):
r = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d2c']]
return r |
def iter_item_handles(self):
"""Return iterator over item handles."""
for abspath in self._ls_abspaths_with_cache(self._data_abspath):
try:
relpath = self._get_metadata_with_cache(abspath, "handle")
yield relpath
except IrodsNoMetaDataSetError:
pass | def function[iter_item_handles, parameter[self]]:
constant[Return iterator over item handles.]
for taget[name[abspath]] in starred[call[name[self]._ls_abspaths_with_cache, parameter[name[self]._data_abspath]]] begin[:]
<ast.Try object at 0x7da18dc99bd0> | keyword[def] identifier[iter_item_handles] ( identifier[self] ):
literal[string]
keyword[for] identifier[abspath] keyword[in] identifier[self] . identifier[_ls_abspaths_with_cache] ( identifier[self] . identifier[_data_abspath] ):
keyword[try] :
identifier[relpath] = identifier[self] . identifier[_get_metadata_with_cache] ( identifier[abspath] , literal[string] )
keyword[yield] identifier[relpath]
keyword[except] identifier[IrodsNoMetaDataSetError] :
keyword[pass] | def iter_item_handles(self):
"""Return iterator over item handles."""
for abspath in self._ls_abspaths_with_cache(self._data_abspath):
try:
relpath = self._get_metadata_with_cache(abspath, 'handle')
yield relpath # depends on [control=['try'], data=[]]
except IrodsNoMetaDataSetError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['abspath']] |
def from_string(values, separator, remove_duplicates = False):
"""
Splits specified string into elements using a separator and assigns
the elements to a newly created AnyValueArray.
:param values: a string value to be split and assigned to AnyValueArray
:param separator: a separator to split the string
:param remove_duplicates: (optional) true to remove duplicated elements
:return: a newly created AnyValueArray.
"""
result = AnyValueArray()
if values == None or len(values) == 0:
return result
items = str(values).split(separator)
for item in items:
if (item != None and len(item) > 0) or remove_duplicates == False:
result.append(item)
return result | def function[from_string, parameter[values, separator, remove_duplicates]]:
constant[
Splits specified string into elements using a separator and assigns
the elements to a newly created AnyValueArray.
:param values: a string value to be split and assigned to AnyValueArray
:param separator: a separator to split the string
:param remove_duplicates: (optional) true to remove duplicated elements
:return: a newly created AnyValueArray.
]
variable[result] assign[=] call[name[AnyValueArray], parameter[]]
if <ast.BoolOp object at 0x7da20e954700> begin[:]
return[name[result]]
variable[items] assign[=] call[call[name[str], parameter[name[values]]].split, parameter[name[separator]]]
for taget[name[item]] in starred[name[items]] begin[:]
if <ast.BoolOp object at 0x7da20e9572b0> begin[:]
call[name[result].append, parameter[name[item]]]
return[name[result]] | keyword[def] identifier[from_string] ( identifier[values] , identifier[separator] , identifier[remove_duplicates] = keyword[False] ):
literal[string]
identifier[result] = identifier[AnyValueArray] ()
keyword[if] identifier[values] == keyword[None] keyword[or] identifier[len] ( identifier[values] )== literal[int] :
keyword[return] identifier[result]
identifier[items] = identifier[str] ( identifier[values] ). identifier[split] ( identifier[separator] )
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] ( identifier[item] != keyword[None] keyword[and] identifier[len] ( identifier[item] )> literal[int] ) keyword[or] identifier[remove_duplicates] == keyword[False] :
identifier[result] . identifier[append] ( identifier[item] )
keyword[return] identifier[result] | def from_string(values, separator, remove_duplicates=False):
"""
Splits specified string into elements using a separator and assigns
the elements to a newly created AnyValueArray.
:param values: a string value to be split and assigned to AnyValueArray
:param separator: a separator to split the string
:param remove_duplicates: (optional) true to remove duplicated elements
:return: a newly created AnyValueArray.
"""
result = AnyValueArray()
if values == None or len(values) == 0:
return result # depends on [control=['if'], data=[]]
items = str(values).split(separator)
for item in items:
if item != None and len(item) > 0 or remove_duplicates == False:
result.append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return result |
def CutAtClosestPoint(self, p):
"""
Let x be the point on the polyline closest to p. Then
CutAtClosestPoint returns two new polylines, one representing
the polyline from the beginning up to x, and one representing
x onwards to the end of the polyline. x is the first point
returned in the second polyline.
"""
(closest, i) = self.GetClosestPoint(p)
tmp = [closest]
tmp.extend(self._points[i+1:])
return (Poly(self._points[0:i+1]),
Poly(tmp)) | def function[CutAtClosestPoint, parameter[self, p]]:
constant[
Let x be the point on the polyline closest to p. Then
CutAtClosestPoint returns two new polylines, one representing
the polyline from the beginning up to x, and one representing
x onwards to the end of the polyline. x is the first point
returned in the second polyline.
]
<ast.Tuple object at 0x7da1b17e6aa0> assign[=] call[name[self].GetClosestPoint, parameter[name[p]]]
variable[tmp] assign[=] list[[<ast.Name object at 0x7da1b17e4310>]]
call[name[tmp].extend, parameter[call[name[self]._points][<ast.Slice object at 0x7da1b17e6f80>]]]
return[tuple[[<ast.Call object at 0x7da1b17e4490>, <ast.Call object at 0x7da1b17e4250>]]] | keyword[def] identifier[CutAtClosestPoint] ( identifier[self] , identifier[p] ):
literal[string]
( identifier[closest] , identifier[i] )= identifier[self] . identifier[GetClosestPoint] ( identifier[p] )
identifier[tmp] =[ identifier[closest] ]
identifier[tmp] . identifier[extend] ( identifier[self] . identifier[_points] [ identifier[i] + literal[int] :])
keyword[return] ( identifier[Poly] ( identifier[self] . identifier[_points] [ literal[int] : identifier[i] + literal[int] ]),
identifier[Poly] ( identifier[tmp] )) | def CutAtClosestPoint(self, p):
"""
Let x be the point on the polyline closest to p. Then
CutAtClosestPoint returns two new polylines, one representing
the polyline from the beginning up to x, and one representing
x onwards to the end of the polyline. x is the first point
returned in the second polyline.
"""
(closest, i) = self.GetClosestPoint(p)
tmp = [closest]
tmp.extend(self._points[i + 1:])
return (Poly(self._points[0:i + 1]), Poly(tmp)) |
def decode_setid(encoded):
"""Decode setid as uint128"""
try:
lo, hi = struct.unpack('<QQ', b32decode(encoded.upper() + '======'))
except struct.error:
raise ValueError('Cannot decode {!r}'.format(encoded))
return (hi << 64) + lo | def function[decode_setid, parameter[encoded]]:
constant[Decode setid as uint128]
<ast.Try object at 0x7da1b26b44f0>
return[binary_operation[binary_operation[name[hi] <ast.LShift object at 0x7da2590d69e0> constant[64]] + name[lo]]] | keyword[def] identifier[decode_setid] ( identifier[encoded] ):
literal[string]
keyword[try] :
identifier[lo] , identifier[hi] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[b32decode] ( identifier[encoded] . identifier[upper] ()+ literal[string] ))
keyword[except] identifier[struct] . identifier[error] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[encoded] ))
keyword[return] ( identifier[hi] << literal[int] )+ identifier[lo] | def decode_setid(encoded):
"""Decode setid as uint128"""
try:
(lo, hi) = struct.unpack('<QQ', b32decode(encoded.upper() + '======')) # depends on [control=['try'], data=[]]
except struct.error:
raise ValueError('Cannot decode {!r}'.format(encoded)) # depends on [control=['except'], data=[]]
return (hi << 64) + lo |
def parse_reports(self):
""" Find Picard InsertSizeMetrics reports and parse their data """
# Set up vars
self.picard_GCbias_data = dict()
self.picard_GCbiasSummary_data = dict()
# Go through logs and find Metrics
for f in self.find_log_files('picard/gcbias', filehandles=True):
s_name = None
gc_col = None
cov_col = None
for l in f['f']:
# New log starting
if 'GcBiasMetrics' in l and 'INPUT' in l:
s_name = None
# Pull sample name from input
fn_search = re.search(r"INPUT(?:=|\s+)(\[?[^\s]+\]?)", l, flags=re.IGNORECASE)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip('[]'))
s_name = self.clean_s_name(s_name, f['root'])
if s_name is not None:
if gc_col is not None and cov_col is not None :
try:
# Note that GC isn't always the first column.
s = l.strip("\n").split("\t")
self.picard_GCbias_data[s_name][ int(s[gc_col]) ] = float(s[cov_col])
except IndexError:
s_name = None
gc_col = None
cov_col = None
if 'GcBiasDetailMetrics' in l and '## METRICS CLASS' in l:
if s_name in self.picard_GCbias_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name, section='GcBiasDetailMetrics')
self.picard_GCbias_data[s_name] = dict()
# Get header - find columns with the data we want
l = f['f'].readline()
s = l.strip("\n").split("\t")
gc_col = s.index('GC')
cov_col = s.index('NORMALIZED_COVERAGE')
if 'GcBiasSummaryMetrics' in l and '## METRICS CLASS' in l:
if s_name in self.picard_GCbias_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name, section='GcBiasSummaryMetrics')
self.picard_GCbiasSummary_data[s_name] = dict()
keys = f['f'].readline().rstrip("\n").split("\t")
vals = f['f'].readline().rstrip("\n").split("\t")
for i, k in enumerate(keys):
try:
self.picard_GCbiasSummary_data[s_name][k] = float(vals[i])
except ValueError:
self.picard_GCbiasSummary_data[s_name][k] = vals[i]
for s_name in list(self.picard_GCbias_data.keys()):
if len(self.picard_GCbias_data[s_name]) == 0:
self.picard_GCbias_data.pop(s_name, None)
log.debug("Removing {} as no data parsed".format(s_name))
for s_name in list(self.picard_GCbiasSummary_data.keys()):
if len(self.picard_GCbiasSummary_data[s_name]) == 0:
self.picard_GCbiasSummary_data.pop(s_name, None)
log.debug("Removing {} as no data parsed".format(s_name))
# Filter to strip out ignored sample names
self.picard_GCbias_data = self.ignore_samples(self.picard_GCbias_data)
if len(self.picard_GCbias_data) > 0:
# Plot the graph
pconfig = {
'id': 'picard_gcbias_plot',
'title': 'Picard: GC Coverage Bias',
'ylab': 'Normalized Coverage',
'xlab': '% GC',
'xmin': 0,
'xmax': 100,
'xDecimals': False,
'ymin': 0,
'yCeiling': 10,
'tt_label': '<b>{point.x} %GC</b>: {point.y:.2f}',
'yPlotLines': [
{'value': 1, 'color': '#999999', 'width': 2, 'dashStyle': 'LongDash'},
]
}
self.add_section (
name = 'GC Coverage Bias',
anchor = 'picard-gcbias',
description = 'This plot shows bias in coverage across regions of the genome with varying GC content.'\
' A perfect library would be a flat line at <code>y = 1</code>.',
plot = linegraph.plot(self.picard_GCbias_data, pconfig)
)
if len(self.picard_GCbiasSummary_data) > 0:
# Write parsed summary data to a file
self.write_data_file(self.picard_GCbiasSummary_data, 'multiqc_picard_gcbias')
# Return the number of detected samples to the parent module
return len(self.picard_GCbias_data) | def function[parse_reports, parameter[self]]:
constant[ Find Picard InsertSizeMetrics reports and parse their data ]
name[self].picard_GCbias_data assign[=] call[name[dict], parameter[]]
name[self].picard_GCbiasSummary_data assign[=] call[name[dict], parameter[]]
for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[picard/gcbias]]]] begin[:]
variable[s_name] assign[=] constant[None]
variable[gc_col] assign[=] constant[None]
variable[cov_col] assign[=] constant[None]
for taget[name[l]] in starred[call[name[f]][constant[f]]] begin[:]
if <ast.BoolOp object at 0x7da18f7203d0> begin[:]
variable[s_name] assign[=] constant[None]
variable[fn_search] assign[=] call[name[re].search, parameter[constant[INPUT(?:=|\s+)(\[?[^\s]+\]?)], name[l]]]
if name[fn_search] begin[:]
variable[s_name] assign[=] call[name[os].path.basename, parameter[call[call[name[fn_search].group, parameter[constant[1]]].strip, parameter[constant[[]]]]]]
variable[s_name] assign[=] call[name[self].clean_s_name, parameter[name[s_name], call[name[f]][constant[root]]]]
if compare[name[s_name] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18f7219f0> begin[:]
<ast.Try object at 0x7da18f720670>
if <ast.BoolOp object at 0x7da18c4ce860> begin[:]
if compare[name[s_name] in name[self].picard_GCbias_data] begin[:]
call[name[log].debug, parameter[call[constant[Duplicate sample name found in {}! Overwriting: {}].format, parameter[call[name[f]][constant[fn]], name[s_name]]]]]
call[name[self].add_data_source, parameter[name[f], name[s_name]]]
call[name[self].picard_GCbias_data][name[s_name]] assign[=] call[name[dict], parameter[]]
variable[l] assign[=] call[call[name[f]][constant[f]].readline, parameter[]]
variable[s] assign[=] call[call[name[l].strip, parameter[constant[
]]].split, parameter[constant[ ]]]
variable[gc_col] assign[=] call[name[s].index, parameter[constant[GC]]]
variable[cov_col] assign[=] call[name[s].index, parameter[constant[NORMALIZED_COVERAGE]]]
if <ast.BoolOp object at 0x7da18c4cce80> begin[:]
if compare[name[s_name] in name[self].picard_GCbias_data] begin[:]
call[name[log].debug, parameter[call[constant[Duplicate sample name found in {}! Overwriting: {}].format, parameter[call[name[f]][constant[fn]], name[s_name]]]]]
call[name[self].add_data_source, parameter[name[f], name[s_name]]]
call[name[self].picard_GCbiasSummary_data][name[s_name]] assign[=] call[name[dict], parameter[]]
variable[keys] assign[=] call[call[call[call[name[f]][constant[f]].readline, parameter[]].rstrip, parameter[constant[
]]].split, parameter[constant[ ]]]
variable[vals] assign[=] call[call[call[call[name[f]][constant[f]].readline, parameter[]].rstrip, parameter[constant[
]]].split, parameter[constant[ ]]]
for taget[tuple[[<ast.Name object at 0x7da18c4cc3d0>, <ast.Name object at 0x7da18c4cd7e0>]]] in starred[call[name[enumerate], parameter[name[keys]]]] begin[:]
<ast.Try object at 0x7da18c4cd9c0>
for taget[name[s_name]] in starred[call[name[list], parameter[call[name[self].picard_GCbias_data.keys, parameter[]]]]] begin[:]
if compare[call[name[len], parameter[call[name[self].picard_GCbias_data][name[s_name]]]] equal[==] constant[0]] begin[:]
call[name[self].picard_GCbias_data.pop, parameter[name[s_name], constant[None]]]
call[name[log].debug, parameter[call[constant[Removing {} as no data parsed].format, parameter[name[s_name]]]]]
for taget[name[s_name]] in starred[call[name[list], parameter[call[name[self].picard_GCbiasSummary_data.keys, parameter[]]]]] begin[:]
if compare[call[name[len], parameter[call[name[self].picard_GCbiasSummary_data][name[s_name]]]] equal[==] constant[0]] begin[:]
call[name[self].picard_GCbiasSummary_data.pop, parameter[name[s_name], constant[None]]]
call[name[log].debug, parameter[call[constant[Removing {} as no data parsed].format, parameter[name[s_name]]]]]
name[self].picard_GCbias_data assign[=] call[name[self].ignore_samples, parameter[name[self].picard_GCbias_data]]
if compare[call[name[len], parameter[name[self].picard_GCbias_data]] greater[>] constant[0]] begin[:]
variable[pconfig] assign[=] dictionary[[<ast.Constant object at 0x7da2047eb9a0>, <ast.Constant object at 0x7da2047ea7a0>, <ast.Constant object at 0x7da2047e9960>, <ast.Constant object at 0x7da2047e8c10>, <ast.Constant object at 0x7da2047e8100>, <ast.Constant object at 0x7da2047ebca0>, <ast.Constant object at 0x7da2047e8c70>, <ast.Constant object at 0x7da2047e8ac0>, <ast.Constant object at 0x7da2047e8250>, <ast.Constant object at 0x7da2047e9e40>, <ast.Constant object at 0x7da2047e8880>], [<ast.Constant object at 0x7da2047ea140>, <ast.Constant object at 0x7da2047e88b0>, <ast.Constant object at 0x7da2047ebb20>, <ast.Constant object at 0x7da2047eacb0>, <ast.Constant object at 0x7da2047ea560>, <ast.Constant object at 0x7da2047ebd90>, <ast.Constant object at 0x7da2047ea4d0>, <ast.Constant object at 0x7da18f09e860>, <ast.Constant object at 0x7da18f09f430>, <ast.Constant object at 0x7da18f09c430>, <ast.List object at 0x7da18f09e7d0>]]
call[name[self].add_section, parameter[]]
if compare[call[name[len], parameter[name[self].picard_GCbiasSummary_data]] greater[>] constant[0]] begin[:]
call[name[self].write_data_file, parameter[name[self].picard_GCbiasSummary_data, constant[multiqc_picard_gcbias]]]
return[call[name[len], parameter[name[self].picard_GCbias_data]]] | keyword[def] identifier[parse_reports] ( identifier[self] ):
literal[string]
identifier[self] . identifier[picard_GCbias_data] = identifier[dict] ()
identifier[self] . identifier[picard_GCbiasSummary_data] = identifier[dict] ()
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] , identifier[filehandles] = keyword[True] ):
identifier[s_name] = keyword[None]
identifier[gc_col] = keyword[None]
identifier[cov_col] = keyword[None]
keyword[for] identifier[l] keyword[in] identifier[f] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[l] keyword[and] literal[string] keyword[in] identifier[l] :
identifier[s_name] = keyword[None]
identifier[fn_search] = identifier[re] . identifier[search] ( literal[string] , identifier[l] , identifier[flags] = identifier[re] . identifier[IGNORECASE] )
keyword[if] identifier[fn_search] :
identifier[s_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fn_search] . identifier[group] ( literal[int] ). identifier[strip] ( literal[string] ))
identifier[s_name] = identifier[self] . identifier[clean_s_name] ( identifier[s_name] , identifier[f] [ literal[string] ])
keyword[if] identifier[s_name] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[gc_col] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cov_col] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[s] = identifier[l] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[self] . identifier[picard_GCbias_data] [ identifier[s_name] ][ identifier[int] ( identifier[s] [ identifier[gc_col] ])]= identifier[float] ( identifier[s] [ identifier[cov_col] ])
keyword[except] identifier[IndexError] :
identifier[s_name] = keyword[None]
identifier[gc_col] = keyword[None]
identifier[cov_col] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[l] keyword[and] literal[string] keyword[in] identifier[l] :
keyword[if] identifier[s_name] keyword[in] identifier[self] . identifier[picard_GCbias_data] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ], identifier[s_name] ))
identifier[self] . identifier[add_data_source] ( identifier[f] , identifier[s_name] , identifier[section] = literal[string] )
identifier[self] . identifier[picard_GCbias_data] [ identifier[s_name] ]= identifier[dict] ()
identifier[l] = identifier[f] [ literal[string] ]. identifier[readline] ()
identifier[s] = identifier[l] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[gc_col] = identifier[s] . identifier[index] ( literal[string] )
identifier[cov_col] = identifier[s] . identifier[index] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[l] keyword[and] literal[string] keyword[in] identifier[l] :
keyword[if] identifier[s_name] keyword[in] identifier[self] . identifier[picard_GCbias_data] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ], identifier[s_name] ))
identifier[self] . identifier[add_data_source] ( identifier[f] , identifier[s_name] , identifier[section] = literal[string] )
identifier[self] . identifier[picard_GCbiasSummary_data] [ identifier[s_name] ]= identifier[dict] ()
identifier[keys] = identifier[f] [ literal[string] ]. identifier[readline] (). identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[vals] = identifier[f] [ literal[string] ]. identifier[readline] (). identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[keys] ):
keyword[try] :
identifier[self] . identifier[picard_GCbiasSummary_data] [ identifier[s_name] ][ identifier[k] ]= identifier[float] ( identifier[vals] [ identifier[i] ])
keyword[except] identifier[ValueError] :
identifier[self] . identifier[picard_GCbiasSummary_data] [ identifier[s_name] ][ identifier[k] ]= identifier[vals] [ identifier[i] ]
keyword[for] identifier[s_name] keyword[in] identifier[list] ( identifier[self] . identifier[picard_GCbias_data] . identifier[keys] ()):
keyword[if] identifier[len] ( identifier[self] . identifier[picard_GCbias_data] [ identifier[s_name] ])== literal[int] :
identifier[self] . identifier[picard_GCbias_data] . identifier[pop] ( identifier[s_name] , keyword[None] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[s_name] ))
keyword[for] identifier[s_name] keyword[in] identifier[list] ( identifier[self] . identifier[picard_GCbiasSummary_data] . identifier[keys] ()):
keyword[if] identifier[len] ( identifier[self] . identifier[picard_GCbiasSummary_data] [ identifier[s_name] ])== literal[int] :
identifier[self] . identifier[picard_GCbiasSummary_data] . identifier[pop] ( identifier[s_name] , keyword[None] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[s_name] ))
identifier[self] . identifier[picard_GCbias_data] = identifier[self] . identifier[ignore_samples] ( identifier[self] . identifier[picard_GCbias_data] )
keyword[if] identifier[len] ( identifier[self] . identifier[picard_GCbias_data] )> literal[int] :
identifier[pconfig] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[False] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] :[
{ literal[string] : literal[int] , literal[string] : literal[string] , literal[string] : literal[int] , literal[string] : literal[string] },
]
}
identifier[self] . identifier[add_section] (
identifier[name] = literal[string] ,
identifier[anchor] = literal[string] ,
identifier[description] = literal[string] literal[string] ,
identifier[plot] = identifier[linegraph] . identifier[plot] ( identifier[self] . identifier[picard_GCbias_data] , identifier[pconfig] )
)
keyword[if] identifier[len] ( identifier[self] . identifier[picard_GCbiasSummary_data] )> literal[int] :
identifier[self] . identifier[write_data_file] ( identifier[self] . identifier[picard_GCbiasSummary_data] , literal[string] )
keyword[return] identifier[len] ( identifier[self] . identifier[picard_GCbias_data] ) | def parse_reports(self):
""" Find Picard InsertSizeMetrics reports and parse their data """
# Set up vars
self.picard_GCbias_data = dict()
self.picard_GCbiasSummary_data = dict()
# Go through logs and find Metrics
for f in self.find_log_files('picard/gcbias', filehandles=True):
s_name = None
gc_col = None
cov_col = None
for l in f['f']:
# New log starting
if 'GcBiasMetrics' in l and 'INPUT' in l:
s_name = None
# Pull sample name from input
fn_search = re.search('INPUT(?:=|\\s+)(\\[?[^\\s]+\\]?)', l, flags=re.IGNORECASE)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip('[]'))
s_name = self.clean_s_name(s_name, f['root']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if s_name is not None:
if gc_col is not None and cov_col is not None:
try:
# Note that GC isn't always the first column.
s = l.strip('\n').split('\t')
self.picard_GCbias_data[s_name][int(s[gc_col])] = float(s[cov_col]) # depends on [control=['try'], data=[]]
except IndexError:
s_name = None
gc_col = None
cov_col = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if 'GcBiasDetailMetrics' in l and '## METRICS CLASS' in l:
if s_name in self.picard_GCbias_data:
log.debug('Duplicate sample name found in {}! Overwriting: {}'.format(f['fn'], s_name)) # depends on [control=['if'], data=['s_name']]
self.add_data_source(f, s_name, section='GcBiasDetailMetrics')
self.picard_GCbias_data[s_name] = dict()
# Get header - find columns with the data we want
l = f['f'].readline()
s = l.strip('\n').split('\t')
gc_col = s.index('GC')
cov_col = s.index('NORMALIZED_COVERAGE') # depends on [control=['if'], data=[]]
if 'GcBiasSummaryMetrics' in l and '## METRICS CLASS' in l:
if s_name in self.picard_GCbias_data:
log.debug('Duplicate sample name found in {}! Overwriting: {}'.format(f['fn'], s_name)) # depends on [control=['if'], data=['s_name']]
self.add_data_source(f, s_name, section='GcBiasSummaryMetrics')
self.picard_GCbiasSummary_data[s_name] = dict()
keys = f['f'].readline().rstrip('\n').split('\t')
vals = f['f'].readline().rstrip('\n').split('\t')
for (i, k) in enumerate(keys):
try:
self.picard_GCbiasSummary_data[s_name][k] = float(vals[i]) # depends on [control=['try'], data=[]]
except ValueError:
self.picard_GCbiasSummary_data[s_name][k] = vals[i] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['s_name']] # depends on [control=['for'], data=['l']]
for s_name in list(self.picard_GCbias_data.keys()):
if len(self.picard_GCbias_data[s_name]) == 0:
self.picard_GCbias_data.pop(s_name, None)
log.debug('Removing {} as no data parsed'.format(s_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s_name']]
for s_name in list(self.picard_GCbiasSummary_data.keys()):
if len(self.picard_GCbiasSummary_data[s_name]) == 0:
self.picard_GCbiasSummary_data.pop(s_name, None)
log.debug('Removing {} as no data parsed'.format(s_name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s_name']] # depends on [control=['for'], data=['f']]
# Filter to strip out ignored sample names
self.picard_GCbias_data = self.ignore_samples(self.picard_GCbias_data)
if len(self.picard_GCbias_data) > 0:
# Plot the graph
pconfig = {'id': 'picard_gcbias_plot', 'title': 'Picard: GC Coverage Bias', 'ylab': 'Normalized Coverage', 'xlab': '% GC', 'xmin': 0, 'xmax': 100, 'xDecimals': False, 'ymin': 0, 'yCeiling': 10, 'tt_label': '<b>{point.x} %GC</b>: {point.y:.2f}', 'yPlotLines': [{'value': 1, 'color': '#999999', 'width': 2, 'dashStyle': 'LongDash'}]}
self.add_section(name='GC Coverage Bias', anchor='picard-gcbias', description='This plot shows bias in coverage across regions of the genome with varying GC content. A perfect library would be a flat line at <code>y = 1</code>.', plot=linegraph.plot(self.picard_GCbias_data, pconfig)) # depends on [control=['if'], data=[]]
if len(self.picard_GCbiasSummary_data) > 0:
# Write parsed summary data to a file
self.write_data_file(self.picard_GCbiasSummary_data, 'multiqc_picard_gcbias') # depends on [control=['if'], data=[]]
# Return the number of detected samples to the parent module
return len(self.picard_GCbias_data) |
def protorpc_to_endpoints_error(self, status, body):
"""Convert a ProtoRPC error to the format expected by Google Endpoints.
If the body does not contain an ProtoRPC message in state APPLICATION_ERROR
the status and body will be returned unchanged.
Args:
status: HTTP status of the response from the backend
body: JSON-encoded error in format expected by Endpoints frontend.
Returns:
Tuple of (http status, body)
"""
try:
rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body)
except (ValueError, messages.ValidationError):
rpc_error = remote.RpcStatus()
if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR:
# Try to map to HTTP error code.
error_class = _ERROR_NAME_MAP.get(rpc_error.error_name)
if error_class:
status, body = self.__write_error(error_class.http_status,
rpc_error.error_message)
return status, body | def function[protorpc_to_endpoints_error, parameter[self, status, body]]:
constant[Convert a ProtoRPC error to the format expected by Google Endpoints.
If the body does not contain an ProtoRPC message in state APPLICATION_ERROR
the status and body will be returned unchanged.
Args:
status: HTTP status of the response from the backend
body: JSON-encoded error in format expected by Endpoints frontend.
Returns:
Tuple of (http status, body)
]
<ast.Try object at 0x7da1b0efeb90>
if compare[name[rpc_error].state equal[==] name[remote].RpcStatus.State.APPLICATION_ERROR] begin[:]
variable[error_class] assign[=] call[name[_ERROR_NAME_MAP].get, parameter[name[rpc_error].error_name]]
if name[error_class] begin[:]
<ast.Tuple object at 0x7da1b0efceb0> assign[=] call[name[self].__write_error, parameter[name[error_class].http_status, name[rpc_error].error_message]]
return[tuple[[<ast.Name object at 0x7da1b0efead0>, <ast.Name object at 0x7da1b0eff790>]]] | keyword[def] identifier[protorpc_to_endpoints_error] ( identifier[self] , identifier[status] , identifier[body] ):
literal[string]
keyword[try] :
identifier[rpc_error] = identifier[self] . identifier[__PROTOJSON] . identifier[decode_message] ( identifier[remote] . identifier[RpcStatus] , identifier[body] )
keyword[except] ( identifier[ValueError] , identifier[messages] . identifier[ValidationError] ):
identifier[rpc_error] = identifier[remote] . identifier[RpcStatus] ()
keyword[if] identifier[rpc_error] . identifier[state] == identifier[remote] . identifier[RpcStatus] . identifier[State] . identifier[APPLICATION_ERROR] :
identifier[error_class] = identifier[_ERROR_NAME_MAP] . identifier[get] ( identifier[rpc_error] . identifier[error_name] )
keyword[if] identifier[error_class] :
identifier[status] , identifier[body] = identifier[self] . identifier[__write_error] ( identifier[error_class] . identifier[http_status] ,
identifier[rpc_error] . identifier[error_message] )
keyword[return] identifier[status] , identifier[body] | def protorpc_to_endpoints_error(self, status, body):
"""Convert a ProtoRPC error to the format expected by Google Endpoints.
If the body does not contain an ProtoRPC message in state APPLICATION_ERROR
the status and body will be returned unchanged.
Args:
status: HTTP status of the response from the backend
body: JSON-encoded error in format expected by Endpoints frontend.
Returns:
Tuple of (http status, body)
"""
try:
rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body) # depends on [control=['try'], data=[]]
except (ValueError, messages.ValidationError):
rpc_error = remote.RpcStatus() # depends on [control=['except'], data=[]]
if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR:
# Try to map to HTTP error code.
error_class = _ERROR_NAME_MAP.get(rpc_error.error_name)
if error_class:
(status, body) = self.__write_error(error_class.http_status, rpc_error.error_message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (status, body) |
def max_width(*args, **kwargs):
"""Returns formatted text or context manager for textui:puts.
>>> from clint.textui import puts, max_width
>>> max_width('123 5678', 8)
'123 5678'
>>> max_width('123 5678', 7)
'123 \n5678'
>>> with max_width(7):
... puts('123 5678')
'123 \n5678'
"""
args = list(args)
if not args:
args.append(kwargs.get('string'))
args.append(kwargs.get('cols'))
args.append(kwargs.get('separator'))
elif len(args) == 1:
args.append(kwargs.get('cols'))
args.append(kwargs.get('separator'))
elif len(args) == 2:
args.append(kwargs.get('separator'))
string, cols, separator = args
if separator is None:
separator = '\n' # default value
if cols is None:
# cols should be specified vitally
# because string can be specified at textui:puts function
string, cols = cols, string
if string is None:
MAX_WIDTHS.append((cols, separator))
return _max_width_context()
else:
return _max_width_formatter(string, cols, separator) | def function[max_width, parameter[]]:
constant[Returns formatted text or context manager for textui:puts.
>>> from clint.textui import puts, max_width
>>> max_width('123 5678', 8)
'123 5678'
>>> max_width('123 5678', 7)
'123
5678'
>>> with max_width(7):
... puts('123 5678')
'123
5678'
]
variable[args] assign[=] call[name[list], parameter[name[args]]]
if <ast.UnaryOp object at 0x7da1b1d9dea0> begin[:]
call[name[args].append, parameter[call[name[kwargs].get, parameter[constant[string]]]]]
call[name[args].append, parameter[call[name[kwargs].get, parameter[constant[cols]]]]]
call[name[args].append, parameter[call[name[kwargs].get, parameter[constant[separator]]]]]
<ast.Tuple object at 0x7da1b1d6e470> assign[=] name[args]
if compare[name[separator] is constant[None]] begin[:]
variable[separator] assign[=] constant[
]
if compare[name[cols] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1d6ddb0> assign[=] tuple[[<ast.Name object at 0x7da1b1d6ebc0>, <ast.Name object at 0x7da1b1d6f160>]]
if compare[name[string] is constant[None]] begin[:]
call[name[MAX_WIDTHS].append, parameter[tuple[[<ast.Name object at 0x7da1b1d6e200>, <ast.Name object at 0x7da1b1d6fd90>]]]]
return[call[name[_max_width_context], parameter[]]] | keyword[def] identifier[max_width] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[args] = identifier[list] ( identifier[args] )
keyword[if] keyword[not] identifier[args] :
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[args] . identifier[append] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[string] , identifier[cols] , identifier[separator] = identifier[args]
keyword[if] identifier[separator] keyword[is] keyword[None] :
identifier[separator] = literal[string]
keyword[if] identifier[cols] keyword[is] keyword[None] :
identifier[string] , identifier[cols] = identifier[cols] , identifier[string]
keyword[if] identifier[string] keyword[is] keyword[None] :
identifier[MAX_WIDTHS] . identifier[append] (( identifier[cols] , identifier[separator] ))
keyword[return] identifier[_max_width_context] ()
keyword[else] :
keyword[return] identifier[_max_width_formatter] ( identifier[string] , identifier[cols] , identifier[separator] ) | def max_width(*args, **kwargs):
"""Returns formatted text or context manager for textui:puts.
>>> from clint.textui import puts, max_width
>>> max_width('123 5678', 8)
'123 5678'
>>> max_width('123 5678', 7)
'123
5678'
>>> with max_width(7):
... puts('123 5678')
'123
5678'
"""
args = list(args)
if not args:
args.append(kwargs.get('string'))
args.append(kwargs.get('cols'))
args.append(kwargs.get('separator')) # depends on [control=['if'], data=[]]
elif len(args) == 1:
args.append(kwargs.get('cols'))
args.append(kwargs.get('separator')) # depends on [control=['if'], data=[]]
elif len(args) == 2:
args.append(kwargs.get('separator')) # depends on [control=['if'], data=[]]
(string, cols, separator) = args
if separator is None:
separator = '\n' # default value # depends on [control=['if'], data=['separator']]
if cols is None:
# cols should be specified vitally
# because string can be specified at textui:puts function
(string, cols) = (cols, string) # depends on [control=['if'], data=['cols']]
if string is None:
MAX_WIDTHS.append((cols, separator))
return _max_width_context() # depends on [control=['if'], data=[]]
else:
return _max_width_formatter(string, cols, separator) |
def mark(self):
"""
Mark the unit of work as failed in the database and update the listener
so as to skip it next time.
"""
self.reliableListener.lastRun = extime.Time()
BatchProcessingError(
store=self.reliableListener.store,
processor=self.reliableListener.processor,
listener=self.reliableListener.listener,
item=self.workUnit,
error=self.failure.getErrorMessage()) | def function[mark, parameter[self]]:
constant[
Mark the unit of work as failed in the database and update the listener
so as to skip it next time.
]
name[self].reliableListener.lastRun assign[=] call[name[extime].Time, parameter[]]
call[name[BatchProcessingError], parameter[]] | keyword[def] identifier[mark] ( identifier[self] ):
literal[string]
identifier[self] . identifier[reliableListener] . identifier[lastRun] = identifier[extime] . identifier[Time] ()
identifier[BatchProcessingError] (
identifier[store] = identifier[self] . identifier[reliableListener] . identifier[store] ,
identifier[processor] = identifier[self] . identifier[reliableListener] . identifier[processor] ,
identifier[listener] = identifier[self] . identifier[reliableListener] . identifier[listener] ,
identifier[item] = identifier[self] . identifier[workUnit] ,
identifier[error] = identifier[self] . identifier[failure] . identifier[getErrorMessage] ()) | def mark(self):
"""
Mark the unit of work as failed in the database and update the listener
so as to skip it next time.
"""
self.reliableListener.lastRun = extime.Time()
BatchProcessingError(store=self.reliableListener.store, processor=self.reliableListener.processor, listener=self.reliableListener.listener, item=self.workUnit, error=self.failure.getErrorMessage()) |
def mkCuttingStock(s):
"""mkCuttingStock: convert a bin packing instance into cutting stock format"""
w,q = [],[] # list of different widths (sizes) of items, their quantities
for item in sorted(s):
if w == [] or item != w[-1]:
w.append(item)
q.append(1)
else:
q[-1] += 1
return w,q | def function[mkCuttingStock, parameter[s]]:
constant[mkCuttingStock: convert a bin packing instance into cutting stock format]
<ast.Tuple object at 0x7da1b18e3df0> assign[=] tuple[[<ast.List object at 0x7da1b18e0820>, <ast.List object at 0x7da1b18e01f0>]]
for taget[name[item]] in starred[call[name[sorted], parameter[name[s]]]] begin[:]
if <ast.BoolOp object at 0x7da1b18c01c0> begin[:]
call[name[w].append, parameter[name[item]]]
call[name[q].append, parameter[constant[1]]]
return[tuple[[<ast.Name object at 0x7da1b18c01f0>, <ast.Name object at 0x7da1b18c00d0>]]] | keyword[def] identifier[mkCuttingStock] ( identifier[s] ):
literal[string]
identifier[w] , identifier[q] =[],[]
keyword[for] identifier[item] keyword[in] identifier[sorted] ( identifier[s] ):
keyword[if] identifier[w] ==[] keyword[or] identifier[item] != identifier[w] [- literal[int] ]:
identifier[w] . identifier[append] ( identifier[item] )
identifier[q] . identifier[append] ( literal[int] )
keyword[else] :
identifier[q] [- literal[int] ]+= literal[int]
keyword[return] identifier[w] , identifier[q] | def mkCuttingStock(s):
"""mkCuttingStock: convert a bin packing instance into cutting stock format"""
(w, q) = ([], []) # list of different widths (sizes) of items, their quantities
for item in sorted(s):
if w == [] or item != w[-1]:
w.append(item)
q.append(1) # depends on [control=['if'], data=[]]
else:
q[-1] += 1 # depends on [control=['for'], data=['item']]
return (w, q) |
def extract(self, remotepath, subpath, saveaspath = None):
''' Usage: extract <remotepath> <subpath> [<saveaspath>]'''
rpath = get_pcs_path(remotepath)
topath = get_pcs_path(saveaspath)
if not saveaspath:
topath = os.path.dirname(rpath) + '/' + subpath
return self.__panapi_unzipcopy_file(rpath, subpath, topath) | def function[extract, parameter[self, remotepath, subpath, saveaspath]]:
constant[ Usage: extract <remotepath> <subpath> [<saveaspath>]]
variable[rpath] assign[=] call[name[get_pcs_path], parameter[name[remotepath]]]
variable[topath] assign[=] call[name[get_pcs_path], parameter[name[saveaspath]]]
if <ast.UnaryOp object at 0x7da1b1dc75e0> begin[:]
variable[topath] assign[=] binary_operation[binary_operation[call[name[os].path.dirname, parameter[name[rpath]]] + constant[/]] + name[subpath]]
return[call[name[self].__panapi_unzipcopy_file, parameter[name[rpath], name[subpath], name[topath]]]] | keyword[def] identifier[extract] ( identifier[self] , identifier[remotepath] , identifier[subpath] , identifier[saveaspath] = keyword[None] ):
literal[string]
identifier[rpath] = identifier[get_pcs_path] ( identifier[remotepath] )
identifier[topath] = identifier[get_pcs_path] ( identifier[saveaspath] )
keyword[if] keyword[not] identifier[saveaspath] :
identifier[topath] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[rpath] )+ literal[string] + identifier[subpath]
keyword[return] identifier[self] . identifier[__panapi_unzipcopy_file] ( identifier[rpath] , identifier[subpath] , identifier[topath] ) | def extract(self, remotepath, subpath, saveaspath=None):
""" Usage: extract <remotepath> <subpath> [<saveaspath>]"""
rpath = get_pcs_path(remotepath)
topath = get_pcs_path(saveaspath)
if not saveaspath:
topath = os.path.dirname(rpath) + '/' + subpath # depends on [control=['if'], data=[]]
return self.__panapi_unzipcopy_file(rpath, subpath, topath) |
def command(state, args):
"""Add an anime from an AniDB search."""
args = parser.parse_args(args[1:])
if args.watching:
rows = query.select.select(state.db, 'regexp IS NOT NULL', [], ['aid'])
aids = [anime.aid for anime in rows]
elif args.incomplete:
rows = query.select.select(state.db, 'enddate IS NULL', [], ['aid'])
aids = [anime.aid for anime in rows]
else:
aid = state.results.parse_aid(args.aid, default_key='db')
aids = [aid]
if not aids:
return
anime = request_anime(aids.pop())
query.update.add(state.db, anime)
print('Updated {} {}'.format(anime.aid, anime.title))
for aid in aids:
time.sleep(2)
anime = request_anime(aid)
query.update.add(state.db, anime)
print('Updated {} {}'.format(anime.aid, anime.title)) | def function[command, parameter[state, args]]:
constant[Add an anime from an AniDB search.]
variable[args] assign[=] call[name[parser].parse_args, parameter[call[name[args]][<ast.Slice object at 0x7da18ede63e0>]]]
if name[args].watching begin[:]
variable[rows] assign[=] call[name[query].select.select, parameter[name[state].db, constant[regexp IS NOT NULL], list[[]], list[[<ast.Constant object at 0x7da18ede6dd0>]]]]
variable[aids] assign[=] <ast.ListComp object at 0x7da18ede5ae0>
if <ast.UnaryOp object at 0x7da18ede6e00> begin[:]
return[None]
variable[anime] assign[=] call[name[request_anime], parameter[call[name[aids].pop, parameter[]]]]
call[name[query].update.add, parameter[name[state].db, name[anime]]]
call[name[print], parameter[call[constant[Updated {} {}].format, parameter[name[anime].aid, name[anime].title]]]]
for taget[name[aid]] in starred[name[aids]] begin[:]
call[name[time].sleep, parameter[constant[2]]]
variable[anime] assign[=] call[name[request_anime], parameter[name[aid]]]
call[name[query].update.add, parameter[name[state].db, name[anime]]]
call[name[print], parameter[call[constant[Updated {} {}].format, parameter[name[anime].aid, name[anime].title]]]] | keyword[def] identifier[command] ( identifier[state] , identifier[args] ):
literal[string]
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[args] [ literal[int] :])
keyword[if] identifier[args] . identifier[watching] :
identifier[rows] = identifier[query] . identifier[select] . identifier[select] ( identifier[state] . identifier[db] , literal[string] ,[],[ literal[string] ])
identifier[aids] =[ identifier[anime] . identifier[aid] keyword[for] identifier[anime] keyword[in] identifier[rows] ]
keyword[elif] identifier[args] . identifier[incomplete] :
identifier[rows] = identifier[query] . identifier[select] . identifier[select] ( identifier[state] . identifier[db] , literal[string] ,[],[ literal[string] ])
identifier[aids] =[ identifier[anime] . identifier[aid] keyword[for] identifier[anime] keyword[in] identifier[rows] ]
keyword[else] :
identifier[aid] = identifier[state] . identifier[results] . identifier[parse_aid] ( identifier[args] . identifier[aid] , identifier[default_key] = literal[string] )
identifier[aids] =[ identifier[aid] ]
keyword[if] keyword[not] identifier[aids] :
keyword[return]
identifier[anime] = identifier[request_anime] ( identifier[aids] . identifier[pop] ())
identifier[query] . identifier[update] . identifier[add] ( identifier[state] . identifier[db] , identifier[anime] )
identifier[print] ( literal[string] . identifier[format] ( identifier[anime] . identifier[aid] , identifier[anime] . identifier[title] ))
keyword[for] identifier[aid] keyword[in] identifier[aids] :
identifier[time] . identifier[sleep] ( literal[int] )
identifier[anime] = identifier[request_anime] ( identifier[aid] )
identifier[query] . identifier[update] . identifier[add] ( identifier[state] . identifier[db] , identifier[anime] )
identifier[print] ( literal[string] . identifier[format] ( identifier[anime] . identifier[aid] , identifier[anime] . identifier[title] )) | def command(state, args):
"""Add an anime from an AniDB search."""
args = parser.parse_args(args[1:])
if args.watching:
rows = query.select.select(state.db, 'regexp IS NOT NULL', [], ['aid'])
aids = [anime.aid for anime in rows] # depends on [control=['if'], data=[]]
elif args.incomplete:
rows = query.select.select(state.db, 'enddate IS NULL', [], ['aid'])
aids = [anime.aid for anime in rows] # depends on [control=['if'], data=[]]
else:
aid = state.results.parse_aid(args.aid, default_key='db')
aids = [aid]
if not aids:
return # depends on [control=['if'], data=[]]
anime = request_anime(aids.pop())
query.update.add(state.db, anime)
print('Updated {} {}'.format(anime.aid, anime.title))
for aid in aids:
time.sleep(2)
anime = request_anime(aid)
query.update.add(state.db, anime)
print('Updated {} {}'.format(anime.aid, anime.title)) # depends on [control=['for'], data=['aid']] |
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
QueryStatus.PENDING,
QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser)) | def function[queries, parameter[self, last_updated_ms]]:
constant[Get the updated queries.]
call[name[stats_logger].incr, parameter[constant[queries]]]
if <ast.UnaryOp object at 0x7da1b2061270> begin[:]
return[call[name[json_error_response], parameter[constant[Please login to access the queries.]]]]
variable[last_updated_ms_int] assign[=] <ast.IfExp object at 0x7da1b20600a0>
variable[last_updated_dt] assign[=] binary_operation[name[utils].EPOCH + call[name[timedelta], parameter[]]]
variable[sql_queries] assign[=] call[call[call[name[db].session.query, parameter[name[Query]]].filter, parameter[compare[name[Query].user_id equal[==] call[name[g].user.get_id, parameter[]]], compare[name[Query].changed_on greater_or_equal[>=] name[last_updated_dt]]]].all, parameter[]]
variable[dict_queries] assign[=] <ast.DictComp object at 0x7da1b2033730>
variable[now] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]]]]
variable[unfinished_states] assign[=] list[[<ast.Attribute object at 0x7da1b1e12920>, <ast.Attribute object at 0x7da1b1e13f40>]]
variable[queries_to_timeout] assign[=] <ast.ListComp object at 0x7da1b1e11d80>
if name[queries_to_timeout] begin[:]
call[call[call[name[update], parameter[name[Query]]].where, parameter[call[name[and_], parameter[compare[name[Query].user_id equal[==] call[name[g].user.get_id, parameter[]]], compare[name[Query].client_id in name[queries_to_timeout]]]]]].values, parameter[]]
for taget[name[client_id]] in starred[name[queries_to_timeout]] begin[:]
call[call[name[dict_queries]][name[client_id]]][constant[status]] assign[=] name[QueryStatus].TIMED_OUT
return[call[name[json_success], parameter[call[name[json].dumps, parameter[name[dict_queries]]]]]] | keyword[def] identifier[queries] ( identifier[self] , identifier[last_updated_ms] ):
literal[string]
identifier[stats_logger] . identifier[incr] ( literal[string] )
keyword[if] keyword[not] identifier[g] . identifier[user] . identifier[get_id] ():
keyword[return] identifier[json_error_response] (
literal[string] , identifier[status] = literal[int] )
identifier[last_updated_ms_int] = identifier[int] ( identifier[float] ( identifier[last_updated_ms] )) keyword[if] identifier[last_updated_ms] keyword[else] literal[int]
identifier[last_updated_dt] = identifier[utils] . identifier[EPOCH] + identifier[timedelta] ( identifier[seconds] = identifier[last_updated_ms_int] / literal[int] )
identifier[sql_queries] =(
identifier[db] . identifier[session] . identifier[query] ( identifier[Query] )
. identifier[filter] (
identifier[Query] . identifier[user_id] == identifier[g] . identifier[user] . identifier[get_id] (),
identifier[Query] . identifier[changed_on] >= identifier[last_updated_dt] ,
)
. identifier[all] ()
)
identifier[dict_queries] ={ identifier[q] . identifier[client_id] : identifier[q] . identifier[to_dict] () keyword[for] identifier[q] keyword[in] identifier[sql_queries] }
identifier[now] = identifier[int] ( identifier[round] ( identifier[time] . identifier[time] ()* literal[int] ))
identifier[unfinished_states] =[
identifier[QueryStatus] . identifier[PENDING] ,
identifier[QueryStatus] . identifier[RUNNING] ,
]
identifier[queries_to_timeout] =[
identifier[client_id] keyword[for] identifier[client_id] , identifier[query_dict] keyword[in] identifier[dict_queries] . identifier[items] ()
keyword[if] (
identifier[query_dict] [ literal[string] ] keyword[in] identifier[unfinished_states] keyword[and] (
identifier[now] - identifier[query_dict] [ literal[string] ]>
identifier[config] . identifier[get] ( literal[string] )* literal[int]
)
)
]
keyword[if] identifier[queries_to_timeout] :
identifier[update] ( identifier[Query] ). identifier[where] (
identifier[and_] (
identifier[Query] . identifier[user_id] == identifier[g] . identifier[user] . identifier[get_id] (),
identifier[Query] . identifier[client_id] keyword[in] identifier[queries_to_timeout] ,
),
). identifier[values] ( identifier[state] = identifier[QueryStatus] . identifier[TIMED_OUT] )
keyword[for] identifier[client_id] keyword[in] identifier[queries_to_timeout] :
identifier[dict_queries] [ identifier[client_id] ][ literal[string] ]= identifier[QueryStatus] . identifier[TIMED_OUT]
keyword[return] identifier[json_success] (
identifier[json] . identifier[dumps] ( identifier[dict_queries] , identifier[default] = identifier[utils] . identifier[json_int_dttm_ser] )) | def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response('Please login to access the queries.', status=403) # depends on [control=['if'], data=[]]
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = db.session.query(Query).filter(Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt).all()
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [QueryStatus.PENDING, QueryStatus.RUNNING]
queries_to_timeout = [client_id for (client_id, query_dict) in dict_queries.items() if query_dict['state'] in unfinished_states and now - query_dict['startDttm'] > config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000]
if queries_to_timeout:
update(Query).where(and_(Query.user_id == g.user.get_id(), Query.client_id in queries_to_timeout)).values(state=QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT # depends on [control=['for'], data=['client_id']] # depends on [control=['if'], data=[]]
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser)) |
def is_locked(self, key):
"""
Checks the lock for the specified key. If the lock is acquired, returns ``true``. Otherwise, returns false.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key that is checked for lock.
:return: (bool), ``true`` if lock is acquired, false otherwise.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_is_locked_codec, key_data, key=key_data) | def function[is_locked, parameter[self, key]]:
constant[
Checks the lock for the specified key. If the lock is acquired, returns ``true``. Otherwise, returns false.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key that is checked for lock.
:return: (bool), ``true`` if lock is acquired, false otherwise.
]
call[name[check_not_none], parameter[name[key], constant[key can't be None]]]
variable[key_data] assign[=] call[name[self]._to_data, parameter[name[key]]]
return[call[name[self]._encode_invoke_on_key, parameter[name[multi_map_is_locked_codec], name[key_data]]]] | keyword[def] identifier[is_locked] ( identifier[self] , identifier[key] ):
literal[string]
identifier[check_not_none] ( identifier[key] , literal[string] )
identifier[key_data] = identifier[self] . identifier[_to_data] ( identifier[key] )
keyword[return] identifier[self] . identifier[_encode_invoke_on_key] ( identifier[multi_map_is_locked_codec] , identifier[key_data] , identifier[key] = identifier[key_data] ) | def is_locked(self, key):
"""
Checks the lock for the specified key. If the lock is acquired, returns ``true``. Otherwise, returns false.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key that is checked for lock.
:return: (bool), ``true`` if lock is acquired, false otherwise.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_is_locked_codec, key_data, key=key_data) |
def get_argument(self, name):
"""Return single argument by name"""
val = self.arguments.get(name)
if val:
return val[0]
return None | def function[get_argument, parameter[self, name]]:
constant[Return single argument by name]
variable[val] assign[=] call[name[self].arguments.get, parameter[name[name]]]
if name[val] begin[:]
return[call[name[val]][constant[0]]]
return[constant[None]] | keyword[def] identifier[get_argument] ( identifier[self] , identifier[name] ):
literal[string]
identifier[val] = identifier[self] . identifier[arguments] . identifier[get] ( identifier[name] )
keyword[if] identifier[val] :
keyword[return] identifier[val] [ literal[int] ]
keyword[return] keyword[None] | def get_argument(self, name):
"""Return single argument by name"""
val = self.arguments.get(name)
if val:
return val[0] # depends on [control=['if'], data=[]]
return None |
def clone(self, value=univ.noValue, **kwargs):
"""Clone this instance.
If *value* is specified, use its tag as the component type selector,
and itself as the component value.
:param value: (Optional) the component value.
:type value: :py:obj:`pyasn1.type.base.Asn1ItemBase`
:return: the cloned instance.
:rtype: :py:obj:`pysnmp.proto.rfc1155.NetworkAddress`
:raise: :py:obj:`pysnmp.smi.error.SmiError`:
if the type of *value* is not allowed for this Choice instance.
"""
cloned = univ.Choice.clone(self, **kwargs)
if value is not univ.noValue:
if isinstance(value, NetworkAddress):
value = value.getComponent()
elif not isinstance(value, IpAddress):
# IpAddress is the only supported type, perhaps forever because
# this is SNMPv1.
value = IpAddress(value)
try:
tagSet = value.tagSet
except AttributeError:
raise PyAsn1Error('component value %r has no tag set' % (value,))
cloned.setComponentByType(tagSet, value)
return cloned | def function[clone, parameter[self, value]]:
constant[Clone this instance.
If *value* is specified, use its tag as the component type selector,
and itself as the component value.
:param value: (Optional) the component value.
:type value: :py:obj:`pyasn1.type.base.Asn1ItemBase`
:return: the cloned instance.
:rtype: :py:obj:`pysnmp.proto.rfc1155.NetworkAddress`
:raise: :py:obj:`pysnmp.smi.error.SmiError`:
if the type of *value* is not allowed for this Choice instance.
]
variable[cloned] assign[=] call[name[univ].Choice.clone, parameter[name[self]]]
if compare[name[value] is_not name[univ].noValue] begin[:]
if call[name[isinstance], parameter[name[value], name[NetworkAddress]]] begin[:]
variable[value] assign[=] call[name[value].getComponent, parameter[]]
<ast.Try object at 0x7da1b15f3100>
call[name[cloned].setComponentByType, parameter[name[tagSet], name[value]]]
return[name[cloned]] | keyword[def] identifier[clone] ( identifier[self] , identifier[value] = identifier[univ] . identifier[noValue] ,** identifier[kwargs] ):
literal[string]
identifier[cloned] = identifier[univ] . identifier[Choice] . identifier[clone] ( identifier[self] ,** identifier[kwargs] )
keyword[if] identifier[value] keyword[is] keyword[not] identifier[univ] . identifier[noValue] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[NetworkAddress] ):
identifier[value] = identifier[value] . identifier[getComponent] ()
keyword[elif] keyword[not] identifier[isinstance] ( identifier[value] , identifier[IpAddress] ):
identifier[value] = identifier[IpAddress] ( identifier[value] )
keyword[try] :
identifier[tagSet] = identifier[value] . identifier[tagSet]
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[PyAsn1Error] ( literal[string] %( identifier[value] ,))
identifier[cloned] . identifier[setComponentByType] ( identifier[tagSet] , identifier[value] )
keyword[return] identifier[cloned] | def clone(self, value=univ.noValue, **kwargs):
"""Clone this instance.
If *value* is specified, use its tag as the component type selector,
and itself as the component value.
:param value: (Optional) the component value.
:type value: :py:obj:`pyasn1.type.base.Asn1ItemBase`
:return: the cloned instance.
:rtype: :py:obj:`pysnmp.proto.rfc1155.NetworkAddress`
:raise: :py:obj:`pysnmp.smi.error.SmiError`:
if the type of *value* is not allowed for this Choice instance.
"""
cloned = univ.Choice.clone(self, **kwargs)
if value is not univ.noValue:
if isinstance(value, NetworkAddress):
value = value.getComponent() # depends on [control=['if'], data=[]]
elif not isinstance(value, IpAddress):
# IpAddress is the only supported type, perhaps forever because
# this is SNMPv1.
value = IpAddress(value) # depends on [control=['if'], data=[]]
try:
tagSet = value.tagSet # depends on [control=['try'], data=[]]
except AttributeError:
raise PyAsn1Error('component value %r has no tag set' % (value,)) # depends on [control=['except'], data=[]]
cloned.setComponentByType(tagSet, value) # depends on [control=['if'], data=['value']]
return cloned |
def get_firefox_binary():
"""Gets the firefox binary
@rtype: FirefoxBinary
"""
browser_config = BrowserConfig()
constants_config = ConstantsConfig()
log_dir = os.path.join(constants_config.get('logs_dir'), 'firefox')
create_directory(log_dir)
log_path = os.path.join(log_dir, '{}_{}.log'.format(datetime.datetime.now().isoformat('_'), words.random_word()))
log_file = open(log_path, 'w')
log('Firefox log file: {}'.format(log_path))
binary = FirefoxBinary(log_file=log_file)
return binary | def function[get_firefox_binary, parameter[]]:
constant[Gets the firefox binary
@rtype: FirefoxBinary
]
variable[browser_config] assign[=] call[name[BrowserConfig], parameter[]]
variable[constants_config] assign[=] call[name[ConstantsConfig], parameter[]]
variable[log_dir] assign[=] call[name[os].path.join, parameter[call[name[constants_config].get, parameter[constant[logs_dir]]], constant[firefox]]]
call[name[create_directory], parameter[name[log_dir]]]
variable[log_path] assign[=] call[name[os].path.join, parameter[name[log_dir], call[constant[{}_{}.log].format, parameter[call[call[name[datetime].datetime.now, parameter[]].isoformat, parameter[constant[_]]], call[name[words].random_word, parameter[]]]]]]
variable[log_file] assign[=] call[name[open], parameter[name[log_path], constant[w]]]
call[name[log], parameter[call[constant[Firefox log file: {}].format, parameter[name[log_path]]]]]
variable[binary] assign[=] call[name[FirefoxBinary], parameter[]]
return[name[binary]] | keyword[def] identifier[get_firefox_binary] ():
literal[string]
identifier[browser_config] = identifier[BrowserConfig] ()
identifier[constants_config] = identifier[ConstantsConfig] ()
identifier[log_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[constants_config] . identifier[get] ( literal[string] ), literal[string] )
identifier[create_directory] ( identifier[log_dir] )
identifier[log_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[log_dir] , literal[string] . identifier[format] ( identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[isoformat] ( literal[string] ), identifier[words] . identifier[random_word] ()))
identifier[log_file] = identifier[open] ( identifier[log_path] , literal[string] )
identifier[log] ( literal[string] . identifier[format] ( identifier[log_path] ))
identifier[binary] = identifier[FirefoxBinary] ( identifier[log_file] = identifier[log_file] )
keyword[return] identifier[binary] | def get_firefox_binary():
"""Gets the firefox binary
@rtype: FirefoxBinary
"""
browser_config = BrowserConfig()
constants_config = ConstantsConfig()
log_dir = os.path.join(constants_config.get('logs_dir'), 'firefox')
create_directory(log_dir)
log_path = os.path.join(log_dir, '{}_{}.log'.format(datetime.datetime.now().isoformat('_'), words.random_word()))
log_file = open(log_path, 'w')
log('Firefox log file: {}'.format(log_path))
binary = FirefoxBinary(log_file=log_file)
return binary |
def in_64(library, session, space, offset, extended=False):
"""Reads in an 64-bit value from the specified memory space and offset.
Corresponds to viIn64* function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from memory, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
value_64 = ViUInt64()
if extended:
ret = library.viIn64Ex(session, space, offset, byref(value_64))
else:
ret = library.viIn64(session, space, offset, byref(value_64))
return value_64.value, ret | def function[in_64, parameter[library, session, space, offset, extended]]:
constant[Reads in an 64-bit value from the specified memory space and offset.
Corresponds to viIn64* function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from memory, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
]
variable[value_64] assign[=] call[name[ViUInt64], parameter[]]
if name[extended] begin[:]
variable[ret] assign[=] call[name[library].viIn64Ex, parameter[name[session], name[space], name[offset], call[name[byref], parameter[name[value_64]]]]]
return[tuple[[<ast.Attribute object at 0x7da18dc9b760>, <ast.Name object at 0x7da18dc9a170>]]] | keyword[def] identifier[in_64] ( identifier[library] , identifier[session] , identifier[space] , identifier[offset] , identifier[extended] = keyword[False] ):
literal[string]
identifier[value_64] = identifier[ViUInt64] ()
keyword[if] identifier[extended] :
identifier[ret] = identifier[library] . identifier[viIn64Ex] ( identifier[session] , identifier[space] , identifier[offset] , identifier[byref] ( identifier[value_64] ))
keyword[else] :
identifier[ret] = identifier[library] . identifier[viIn64] ( identifier[session] , identifier[space] , identifier[offset] , identifier[byref] ( identifier[value_64] ))
keyword[return] identifier[value_64] . identifier[value] , identifier[ret] | def in_64(library, session, space, offset, extended=False):
"""Reads in an 64-bit value from the specified memory space and offset.
Corresponds to viIn64* function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from memory, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
value_64 = ViUInt64()
if extended:
ret = library.viIn64Ex(session, space, offset, byref(value_64)) # depends on [control=['if'], data=[]]
else:
ret = library.viIn64(session, space, offset, byref(value_64))
return (value_64.value, ret) |
def _construct_options(options_bootstrapper, build_configuration):
"""Parse and register options.
:returns: An Options object representing the full set of runtime options.
"""
# Now that plugins and backends are loaded, we can gather the known scopes.
# Gather the optionables that are not scoped to any other. All known scopes are reachable
# via these optionables' known_scope_infos() methods.
top_level_optionables = (
{GlobalOptionsRegistrar} |
GlobalSubsystems.get() |
build_configuration.optionables() |
set(Goal.get_optionables())
)
# Now that we have the known scopes we can get the full options. `get_full_options` will
# sort and de-duplicate these for us.
known_scope_infos = [si
for optionable in top_level_optionables
for si in optionable.known_scope_infos()]
return options_bootstrapper.get_full_options(known_scope_infos) | def function[_construct_options, parameter[options_bootstrapper, build_configuration]]:
constant[Parse and register options.
:returns: An Options object representing the full set of runtime options.
]
variable[top_level_optionables] assign[=] binary_operation[binary_operation[binary_operation[<ast.Set object at 0x7da1b1e6a710> <ast.BitOr object at 0x7da2590d6aa0> call[name[GlobalSubsystems].get, parameter[]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[build_configuration].optionables, parameter[]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[call[name[Goal].get_optionables, parameter[]]]]]
variable[known_scope_infos] assign[=] <ast.ListComp object at 0x7da1b1e6b490>
return[call[name[options_bootstrapper].get_full_options, parameter[name[known_scope_infos]]]] | keyword[def] identifier[_construct_options] ( identifier[options_bootstrapper] , identifier[build_configuration] ):
literal[string]
identifier[top_level_optionables] =(
{ identifier[GlobalOptionsRegistrar] }|
identifier[GlobalSubsystems] . identifier[get] ()|
identifier[build_configuration] . identifier[optionables] ()|
identifier[set] ( identifier[Goal] . identifier[get_optionables] ())
)
identifier[known_scope_infos] =[ identifier[si]
keyword[for] identifier[optionable] keyword[in] identifier[top_level_optionables]
keyword[for] identifier[si] keyword[in] identifier[optionable] . identifier[known_scope_infos] ()]
keyword[return] identifier[options_bootstrapper] . identifier[get_full_options] ( identifier[known_scope_infos] ) | def _construct_options(options_bootstrapper, build_configuration):
"""Parse and register options.
:returns: An Options object representing the full set of runtime options.
"""
# Now that plugins and backends are loaded, we can gather the known scopes.
# Gather the optionables that are not scoped to any other. All known scopes are reachable
# via these optionables' known_scope_infos() methods.
top_level_optionables = {GlobalOptionsRegistrar} | GlobalSubsystems.get() | build_configuration.optionables() | set(Goal.get_optionables())
# Now that we have the known scopes we can get the full options. `get_full_options` will
# sort and de-duplicate these for us.
known_scope_infos = [si for optionable in top_level_optionables for si in optionable.known_scope_infos()]
return options_bootstrapper.get_full_options(known_scope_infos) |
def parse_quantity(string):
"""
Parse quantity allows to convert the value in the resources spec like:
resources:
requests:
cpu: "100m"
memory": "200Mi"
limits:
memory: "300Mi"
:param string: str
:return: float
"""
number, unit = '', ''
for char in string:
if char.isdigit() or char == '.':
number += char
else:
unit += char
return float(number) * FACTORS.get(unit, 1) | def function[parse_quantity, parameter[string]]:
constant[
Parse quantity allows to convert the value in the resources spec like:
resources:
requests:
cpu: "100m"
memory": "200Mi"
limits:
memory: "300Mi"
:param string: str
:return: float
]
<ast.Tuple object at 0x7da18c4cf8e0> assign[=] tuple[[<ast.Constant object at 0x7da18c4cd990>, <ast.Constant object at 0x7da18c4cdc30>]]
for taget[name[char]] in starred[name[string]] begin[:]
if <ast.BoolOp object at 0x7da18c4cec20> begin[:]
<ast.AugAssign object at 0x7da18c4ce8f0>
return[binary_operation[call[name[float], parameter[name[number]]] * call[name[FACTORS].get, parameter[name[unit], constant[1]]]]] | keyword[def] identifier[parse_quantity] ( identifier[string] ):
literal[string]
identifier[number] , identifier[unit] = literal[string] , literal[string]
keyword[for] identifier[char] keyword[in] identifier[string] :
keyword[if] identifier[char] . identifier[isdigit] () keyword[or] identifier[char] == literal[string] :
identifier[number] += identifier[char]
keyword[else] :
identifier[unit] += identifier[char]
keyword[return] identifier[float] ( identifier[number] )* identifier[FACTORS] . identifier[get] ( identifier[unit] , literal[int] ) | def parse_quantity(string):
"""
Parse quantity allows to convert the value in the resources spec like:
resources:
requests:
cpu: "100m"
memory": "200Mi"
limits:
memory: "300Mi"
:param string: str
:return: float
"""
(number, unit) = ('', '')
for char in string:
if char.isdigit() or char == '.':
number += char # depends on [control=['if'], data=[]]
else:
unit += char # depends on [control=['for'], data=['char']]
return float(number) * FACTORS.get(unit, 1) |
def runtime_import(object_path):
"""Import at runtime."""
obj_module, obj_element = object_path.rsplit(".", 1)
loader = __import__(obj_module, globals(), locals(), [str(obj_element)])
return getattr(loader, obj_element) | def function[runtime_import, parameter[object_path]]:
constant[Import at runtime.]
<ast.Tuple object at 0x7da1b1d6d900> assign[=] call[name[object_path].rsplit, parameter[constant[.], constant[1]]]
variable[loader] assign[=] call[name[__import__], parameter[name[obj_module], call[name[globals], parameter[]], call[name[locals], parameter[]], list[[<ast.Call object at 0x7da1b1d6ebf0>]]]]
return[call[name[getattr], parameter[name[loader], name[obj_element]]]] | keyword[def] identifier[runtime_import] ( identifier[object_path] ):
literal[string]
identifier[obj_module] , identifier[obj_element] = identifier[object_path] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[loader] = identifier[__import__] ( identifier[obj_module] , identifier[globals] (), identifier[locals] (),[ identifier[str] ( identifier[obj_element] )])
keyword[return] identifier[getattr] ( identifier[loader] , identifier[obj_element] ) | def runtime_import(object_path):
"""Import at runtime."""
(obj_module, obj_element) = object_path.rsplit('.', 1)
loader = __import__(obj_module, globals(), locals(), [str(obj_element)])
return getattr(loader, obj_element) |
def between(self, start, end):
"""Adds new `BETWEEN` condition
:param start: int or datetime compatible object (in SNOW user's timezone)
:param end: int or datetime compatible object (in SNOW user's timezone)
:raise:
- QueryTypeError: if start or end arguments is of an invalid type
"""
if hasattr(start, 'strftime') and hasattr(end, 'strftime'):
dt_between = (
'javascript:gs.dateGenerate("%(start)s")'
"@"
'javascript:gs.dateGenerate("%(end)s")'
) % {
'start': start.strftime('%Y-%m-%d %H:%M:%S'),
'end': end.strftime('%Y-%m-%d %H:%M:%S')
}
elif isinstance(start, int) and isinstance(end, int):
dt_between = '%d@%d' % (start, end)
else:
raise QueryTypeError("Expected `start` and `end` of type `int` "
"or instance of `datetime`, not %s and %s" % (type(start), type(end)))
return self._add_condition('BETWEEN', dt_between, types=[str]) | def function[between, parameter[self, start, end]]:
constant[Adds new `BETWEEN` condition
:param start: int or datetime compatible object (in SNOW user's timezone)
:param end: int or datetime compatible object (in SNOW user's timezone)
:raise:
- QueryTypeError: if start or end arguments is of an invalid type
]
if <ast.BoolOp object at 0x7da1b07a02e0> begin[:]
variable[dt_between] assign[=] binary_operation[constant[javascript:gs.dateGenerate("%(start)s")@javascript:gs.dateGenerate("%(end)s")] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b07a26e0>, <ast.Constant object at 0x7da1b07a2a70>], [<ast.Call object at 0x7da1b07a20b0>, <ast.Call object at 0x7da1b07a1840>]]]
return[call[name[self]._add_condition, parameter[constant[BETWEEN], name[dt_between]]]] | keyword[def] identifier[between] ( identifier[self] , identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[start] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[end] , literal[string] ):
identifier[dt_between] =(
literal[string]
literal[string]
literal[string]
)%{
literal[string] : identifier[start] . identifier[strftime] ( literal[string] ),
literal[string] : identifier[end] . identifier[strftime] ( literal[string] )
}
keyword[elif] identifier[isinstance] ( identifier[start] , identifier[int] ) keyword[and] identifier[isinstance] ( identifier[end] , identifier[int] ):
identifier[dt_between] = literal[string] %( identifier[start] , identifier[end] )
keyword[else] :
keyword[raise] identifier[QueryTypeError] ( literal[string]
literal[string] %( identifier[type] ( identifier[start] ), identifier[type] ( identifier[end] )))
keyword[return] identifier[self] . identifier[_add_condition] ( literal[string] , identifier[dt_between] , identifier[types] =[ identifier[str] ]) | def between(self, start, end):
"""Adds new `BETWEEN` condition
:param start: int or datetime compatible object (in SNOW user's timezone)
:param end: int or datetime compatible object (in SNOW user's timezone)
:raise:
- QueryTypeError: if start or end arguments is of an invalid type
"""
if hasattr(start, 'strftime') and hasattr(end, 'strftime'):
dt_between = 'javascript:gs.dateGenerate("%(start)s")@javascript:gs.dateGenerate("%(end)s")' % {'start': start.strftime('%Y-%m-%d %H:%M:%S'), 'end': end.strftime('%Y-%m-%d %H:%M:%S')} # depends on [control=['if'], data=[]]
elif isinstance(start, int) and isinstance(end, int):
dt_between = '%d@%d' % (start, end) # depends on [control=['if'], data=[]]
else:
raise QueryTypeError('Expected `start` and `end` of type `int` or instance of `datetime`, not %s and %s' % (type(start), type(end)))
return self._add_condition('BETWEEN', dt_between, types=[str]) |
def prepare(self, params):
"""
Prepare, return the required HTTP headers.
Base 64 encode the parameters, sign it with the secret key,
create the HTTP headers, return the whole payload.
Arguments:
params -- a dictionary of parameters
"""
jsonparams = json.dumps(params)
payload = base64.b64encode(jsonparams.encode())
signature = hmac.new(self.secret_key.encode(), payload,
hashlib.sha384).hexdigest()
return {'X-GEMINI-APIKEY': self.api_key,
'X-GEMINI-PAYLOAD': payload,
'X-GEMINI-SIGNATURE': signature} | def function[prepare, parameter[self, params]]:
constant[
Prepare, return the required HTTP headers.
Base 64 encode the parameters, sign it with the secret key,
create the HTTP headers, return the whole payload.
Arguments:
params -- a dictionary of parameters
]
variable[jsonparams] assign[=] call[name[json].dumps, parameter[name[params]]]
variable[payload] assign[=] call[name[base64].b64encode, parameter[call[name[jsonparams].encode, parameter[]]]]
variable[signature] assign[=] call[call[name[hmac].new, parameter[call[name[self].secret_key.encode, parameter[]], name[payload], name[hashlib].sha384]].hexdigest, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da1b237fa30>, <ast.Constant object at 0x7da1b237c550>, <ast.Constant object at 0x7da1b237c250>], [<ast.Attribute object at 0x7da1b237f340>, <ast.Name object at 0x7da1b237e710>, <ast.Name object at 0x7da1b237dcc0>]]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[params] ):
literal[string]
identifier[jsonparams] = identifier[json] . identifier[dumps] ( identifier[params] )
identifier[payload] = identifier[base64] . identifier[b64encode] ( identifier[jsonparams] . identifier[encode] ())
identifier[signature] = identifier[hmac] . identifier[new] ( identifier[self] . identifier[secret_key] . identifier[encode] (), identifier[payload] ,
identifier[hashlib] . identifier[sha384] ). identifier[hexdigest] ()
keyword[return] { literal[string] : identifier[self] . identifier[api_key] ,
literal[string] : identifier[payload] ,
literal[string] : identifier[signature] } | def prepare(self, params):
"""
Prepare, return the required HTTP headers.
Base 64 encode the parameters, sign it with the secret key,
create the HTTP headers, return the whole payload.
Arguments:
params -- a dictionary of parameters
"""
jsonparams = json.dumps(params)
payload = base64.b64encode(jsonparams.encode())
signature = hmac.new(self.secret_key.encode(), payload, hashlib.sha384).hexdigest()
return {'X-GEMINI-APIKEY': self.api_key, 'X-GEMINI-PAYLOAD': payload, 'X-GEMINI-SIGNATURE': signature} |
def string2latlon(lat_str, lon_str, format_str):
'''
Create a LatLon object from a pair of strings.
Inputs:
lat_str (str) - string representation of a latitude (e.g. '5 52 59.88 N')
lon_str (str) - string representation of a longitude (e.g. '162 4 59.88 W')
format_str (str) - format in which the coordinate strings are given (e.g.
for the above examples this would be 'd% %m% %S% %H'). See function
string2geocoord for a detailed explanation on how to specify formats.
Returns:
A LatLon object initialized with coordinate data from lat_str and lon_str
'''
lat = string2geocoord(lat_str, Latitude, format_str)
lon = string2geocoord(lon_str, Longitude, format_str)
new_latlon = LatLon(lat = lat, lon = lon)
return new_latlon | def function[string2latlon, parameter[lat_str, lon_str, format_str]]:
constant[
Create a LatLon object from a pair of strings.
Inputs:
lat_str (str) - string representation of a latitude (e.g. '5 52 59.88 N')
lon_str (str) - string representation of a longitude (e.g. '162 4 59.88 W')
format_str (str) - format in which the coordinate strings are given (e.g.
for the above examples this would be 'd% %m% %S% %H'). See function
string2geocoord for a detailed explanation on how to specify formats.
Returns:
A LatLon object initialized with coordinate data from lat_str and lon_str
]
variable[lat] assign[=] call[name[string2geocoord], parameter[name[lat_str], name[Latitude], name[format_str]]]
variable[lon] assign[=] call[name[string2geocoord], parameter[name[lon_str], name[Longitude], name[format_str]]]
variable[new_latlon] assign[=] call[name[LatLon], parameter[]]
return[name[new_latlon]] | keyword[def] identifier[string2latlon] ( identifier[lat_str] , identifier[lon_str] , identifier[format_str] ):
literal[string]
identifier[lat] = identifier[string2geocoord] ( identifier[lat_str] , identifier[Latitude] , identifier[format_str] )
identifier[lon] = identifier[string2geocoord] ( identifier[lon_str] , identifier[Longitude] , identifier[format_str] )
identifier[new_latlon] = identifier[LatLon] ( identifier[lat] = identifier[lat] , identifier[lon] = identifier[lon] )
keyword[return] identifier[new_latlon] | def string2latlon(lat_str, lon_str, format_str):
"""
Create a LatLon object from a pair of strings.
Inputs:
lat_str (str) - string representation of a latitude (e.g. '5 52 59.88 N')
lon_str (str) - string representation of a longitude (e.g. '162 4 59.88 W')
format_str (str) - format in which the coordinate strings are given (e.g.
for the above examples this would be 'd% %m% %S% %H'). See function
string2geocoord for a detailed explanation on how to specify formats.
Returns:
A LatLon object initialized with coordinate data from lat_str and lon_str
"""
lat = string2geocoord(lat_str, Latitude, format_str)
lon = string2geocoord(lon_str, Longitude, format_str)
new_latlon = LatLon(lat=lat, lon=lon)
return new_latlon |
def load(cls,
config: Params,
serialization_dir: str,
weights_file: str = None,
cuda_device: int = -1) -> 'Model':
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
"""
# Peak at the class of the model.
model_type = config["model"]["type"]
# Load using an overridable _load method.
# This allows subclasses of Model to override _load.
# pylint: disable=protected-access
return cls.by_name(model_type)._load(config, serialization_dir, weights_file, cuda_device) | def function[load, parameter[cls, config, serialization_dir, weights_file, cuda_device]]:
constant[
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
]
variable[model_type] assign[=] call[call[name[config]][constant[model]]][constant[type]]
return[call[call[name[cls].by_name, parameter[name[model_type]]]._load, parameter[name[config], name[serialization_dir], name[weights_file], name[cuda_device]]]] | keyword[def] identifier[load] ( identifier[cls] ,
identifier[config] : identifier[Params] ,
identifier[serialization_dir] : identifier[str] ,
identifier[weights_file] : identifier[str] = keyword[None] ,
identifier[cuda_device] : identifier[int] =- literal[int] )-> literal[string] :
literal[string]
identifier[model_type] = identifier[config] [ literal[string] ][ literal[string] ]
keyword[return] identifier[cls] . identifier[by_name] ( identifier[model_type] ). identifier[_load] ( identifier[config] , identifier[serialization_dir] , identifier[weights_file] , identifier[cuda_device] ) | def load(cls, config: Params, serialization_dir: str, weights_file: str=None, cuda_device: int=-1) -> 'Model':
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
"""
# Peak at the class of the model.
model_type = config['model']['type']
# Load using an overridable _load method.
# This allows subclasses of Model to override _load.
# pylint: disable=protected-access
return cls.by_name(model_type)._load(config, serialization_dir, weights_file, cuda_device) |
def tasks(self, task_cls=None):
""" :meth:`.WTaskRegistryBase.tasks` implementation
"""
result = []
for tasks in self.__registry.values():
result.extend(tasks)
if task_cls is not None:
result = filter(lambda x: issubclass(x, task_cls), result)
return tuple(result) | def function[tasks, parameter[self, task_cls]]:
constant[ :meth:`.WTaskRegistryBase.tasks` implementation
]
variable[result] assign[=] list[[]]
for taget[name[tasks]] in starred[call[name[self].__registry.values, parameter[]]] begin[:]
call[name[result].extend, parameter[name[tasks]]]
if compare[name[task_cls] is_not constant[None]] begin[:]
variable[result] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da2044c2b30>, name[result]]]
return[call[name[tuple], parameter[name[result]]]] | keyword[def] identifier[tasks] ( identifier[self] , identifier[task_cls] = keyword[None] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[tasks] keyword[in] identifier[self] . identifier[__registry] . identifier[values] ():
identifier[result] . identifier[extend] ( identifier[tasks] )
keyword[if] identifier[task_cls] keyword[is] keyword[not] keyword[None] :
identifier[result] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[issubclass] ( identifier[x] , identifier[task_cls] ), identifier[result] )
keyword[return] identifier[tuple] ( identifier[result] ) | def tasks(self, task_cls=None):
""" :meth:`.WTaskRegistryBase.tasks` implementation
"""
result = []
for tasks in self.__registry.values():
result.extend(tasks) # depends on [control=['for'], data=['tasks']]
if task_cls is not None:
result = filter(lambda x: issubclass(x, task_cls), result) # depends on [control=['if'], data=['task_cls']]
return tuple(result) |
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult) | def function[get_service_info, parameter[self]]:
constant[
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
]
variable[postresult] assign[=] call[name[requests].get, parameter[binary_operation[constant[%s://%s/ga4gh/wes/v1/service-info] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da207f00b80>, <ast.Attribute object at 0x7da207f021d0>]]]]]
return[call[name[wes_reponse], parameter[name[postresult]]]] | keyword[def] identifier[get_service_info] ( identifier[self] ):
literal[string]
identifier[postresult] = identifier[requests] . identifier[get] ( literal[string] %( identifier[self] . identifier[proto] , identifier[self] . identifier[host] ),
identifier[headers] = identifier[self] . identifier[auth] )
keyword[return] identifier[wes_reponse] ( identifier[postresult] ) | def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get('%s://%s/ga4gh/wes/v1/service-info' % (self.proto, self.host), headers=self.auth)
return wes_reponse(postresult) |
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
if self.value in ("public","private","confidental"):
n=parent.newChild(None,self.name.upper(),None)
n.newChild(None,self.value.upper(),None)
return n
return None | def function[as_xml, parameter[self, parent]]:
constant[Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`]
if compare[name[self].value in tuple[[<ast.Constant object at 0x7da18ede5c00>, <ast.Constant object at 0x7da18ede4a30>, <ast.Constant object at 0x7da18ede6770>]]] begin[:]
variable[n] assign[=] call[name[parent].newChild, parameter[constant[None], call[name[self].name.upper, parameter[]], constant[None]]]
call[name[n].newChild, parameter[constant[None], call[name[self].value.upper, parameter[]], constant[None]]]
return[name[n]]
return[constant[None]] | keyword[def] identifier[as_xml] ( identifier[self] , identifier[parent] ):
literal[string]
keyword[if] identifier[self] . identifier[value] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[n] = identifier[parent] . identifier[newChild] ( keyword[None] , identifier[self] . identifier[name] . identifier[upper] (), keyword[None] )
identifier[n] . identifier[newChild] ( keyword[None] , identifier[self] . identifier[value] . identifier[upper] (), keyword[None] )
keyword[return] identifier[n]
keyword[return] keyword[None] | def as_xml(self, parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
if self.value in ('public', 'private', 'confidental'):
n = parent.newChild(None, self.name.upper(), None)
n.newChild(None, self.value.upper(), None)
return n # depends on [control=['if'], data=[]]
return None |
def p_expr1(p):
"""expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident
"""
p[0] = node.expr(op=p[1], args=node.expr_list([p[2]])) | def function[p_expr1, parameter[p]]:
constant[expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident
]
call[name[p]][constant[0]] assign[=] call[name[node].expr, parameter[]] | keyword[def] identifier[p_expr1] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[node] . identifier[expr] ( identifier[op] = identifier[p] [ literal[int] ], identifier[args] = identifier[node] . identifier[expr_list] ([ identifier[p] [ literal[int] ]])) | def p_expr1(p):
"""expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident
"""
p[0] = node.expr(op=p[1], args=node.expr_list([p[2]])) |
def default_asset_manager(self):
"""
Returns the default asset manager using the current config.
This is only used if asset_manager is set to None in the constructor.
"""
cache_path = None
cache_directory = self.config['CACHE_DIRECTORY']
if cache_directory:
cache_directory = cache_directory.format(version=__version__)
cache_path = os.path.join(self.instance_path, cache_directory)
return GitHubAssetManager(
cache_path, self.config['STYLE_URLS'], self.quiet) | def function[default_asset_manager, parameter[self]]:
constant[
Returns the default asset manager using the current config.
This is only used if asset_manager is set to None in the constructor.
]
variable[cache_path] assign[=] constant[None]
variable[cache_directory] assign[=] call[name[self].config][constant[CACHE_DIRECTORY]]
if name[cache_directory] begin[:]
variable[cache_directory] assign[=] call[name[cache_directory].format, parameter[]]
variable[cache_path] assign[=] call[name[os].path.join, parameter[name[self].instance_path, name[cache_directory]]]
return[call[name[GitHubAssetManager], parameter[name[cache_path], call[name[self].config][constant[STYLE_URLS]], name[self].quiet]]] | keyword[def] identifier[default_asset_manager] ( identifier[self] ):
literal[string]
identifier[cache_path] = keyword[None]
identifier[cache_directory] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] identifier[cache_directory] :
identifier[cache_directory] = identifier[cache_directory] . identifier[format] ( identifier[version] = identifier[__version__] )
identifier[cache_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[instance_path] , identifier[cache_directory] )
keyword[return] identifier[GitHubAssetManager] (
identifier[cache_path] , identifier[self] . identifier[config] [ literal[string] ], identifier[self] . identifier[quiet] ) | def default_asset_manager(self):
"""
Returns the default asset manager using the current config.
This is only used if asset_manager is set to None in the constructor.
"""
cache_path = None
cache_directory = self.config['CACHE_DIRECTORY']
if cache_directory:
cache_directory = cache_directory.format(version=__version__)
cache_path = os.path.join(self.instance_path, cache_directory) # depends on [control=['if'], data=[]]
return GitHubAssetManager(cache_path, self.config['STYLE_URLS'], self.quiet) |
def setup_graph(self):
"""
Creates our Graph and figures out, which shared/global model to hook up to.
If we are in a global-model's setup procedure, we do not create
a new graph (return None as the context). We will instead use the already existing local replica graph
of the model.
Returns: None or the graph's as_default()-context.
"""
graph_default_context = None
# Single (non-distributed) mode.
if self.execution_type == "single":
self.graph = tf.Graph()
graph_default_context = self.graph.as_default()
graph_default_context.__enter__()
self.global_model = None
# Distributed tf
elif self.execution_type == "distributed":
# Parameter-server -> Do not build any graph.
if self.distributed_spec["job"] == "ps":
return None
# worker -> construct the global (main) model; the one hosted on the ps,
elif self.distributed_spec["job"] == "worker":
# The local replica model.
if self.is_local_model:
graph = tf.Graph()
graph_default_context = graph.as_default()
graph_default_context.__enter__()
# Now that the graph is created and entered -> deepcopoy ourselves and setup global model first,
# then continue.
self.global_model = deepcopy(self)
# Switch on global construction/setup-mode for the pass to setup().
self.global_model.is_local_model = False
self.global_model.setup()
self.graph = graph
self.as_local_model()
self.scope += '-worker' + str(self.distributed_spec["task_index"])
# The global_model (whose Variables are hosted by the ps).
else:
self.graph = tf.get_default_graph() # lives in the same graph as local model
self.global_model = None
self.device = tf.train.replica_device_setter(
# Place its Variables on the parameter server(s) (round robin).
#ps_device="/job:ps", # default
# Train-ops for the global_model are hosted locally (on this worker's node).
worker_device=self.device,
cluster=self.distributed_spec["cluster_spec"]
)
else:
raise TensorForceError("Unsupported job type: {}!".format(self.distributed_spec["job"]))
else:
raise TensorForceError("Unsupported distributed type: {}!".format(self.distributed_spec["type"]))
return graph_default_context | def function[setup_graph, parameter[self]]:
constant[
Creates our Graph and figures out, which shared/global model to hook up to.
If we are in a global-model's setup procedure, we do not create
a new graph (return None as the context). We will instead use the already existing local replica graph
of the model.
Returns: None or the graph's as_default()-context.
]
variable[graph_default_context] assign[=] constant[None]
if compare[name[self].execution_type equal[==] constant[single]] begin[:]
name[self].graph assign[=] call[name[tf].Graph, parameter[]]
variable[graph_default_context] assign[=] call[name[self].graph.as_default, parameter[]]
call[name[graph_default_context].__enter__, parameter[]]
name[self].global_model assign[=] constant[None]
return[name[graph_default_context]] | keyword[def] identifier[setup_graph] ( identifier[self] ):
literal[string]
identifier[graph_default_context] = keyword[None]
keyword[if] identifier[self] . identifier[execution_type] == literal[string] :
identifier[self] . identifier[graph] = identifier[tf] . identifier[Graph] ()
identifier[graph_default_context] = identifier[self] . identifier[graph] . identifier[as_default] ()
identifier[graph_default_context] . identifier[__enter__] ()
identifier[self] . identifier[global_model] = keyword[None]
keyword[elif] identifier[self] . identifier[execution_type] == literal[string] :
keyword[if] identifier[self] . identifier[distributed_spec] [ literal[string] ]== literal[string] :
keyword[return] keyword[None]
keyword[elif] identifier[self] . identifier[distributed_spec] [ literal[string] ]== literal[string] :
keyword[if] identifier[self] . identifier[is_local_model] :
identifier[graph] = identifier[tf] . identifier[Graph] ()
identifier[graph_default_context] = identifier[graph] . identifier[as_default] ()
identifier[graph_default_context] . identifier[__enter__] ()
identifier[self] . identifier[global_model] = identifier[deepcopy] ( identifier[self] )
identifier[self] . identifier[global_model] . identifier[is_local_model] = keyword[False]
identifier[self] . identifier[global_model] . identifier[setup] ()
identifier[self] . identifier[graph] = identifier[graph]
identifier[self] . identifier[as_local_model] ()
identifier[self] . identifier[scope] += literal[string] + identifier[str] ( identifier[self] . identifier[distributed_spec] [ literal[string] ])
keyword[else] :
identifier[self] . identifier[graph] = identifier[tf] . identifier[get_default_graph] ()
identifier[self] . identifier[global_model] = keyword[None]
identifier[self] . identifier[device] = identifier[tf] . identifier[train] . identifier[replica_device_setter] (
identifier[worker_device] = identifier[self] . identifier[device] ,
identifier[cluster] = identifier[self] . identifier[distributed_spec] [ literal[string] ]
)
keyword[else] :
keyword[raise] identifier[TensorForceError] ( literal[string] . identifier[format] ( identifier[self] . identifier[distributed_spec] [ literal[string] ]))
keyword[else] :
keyword[raise] identifier[TensorForceError] ( literal[string] . identifier[format] ( identifier[self] . identifier[distributed_spec] [ literal[string] ]))
keyword[return] identifier[graph_default_context] | def setup_graph(self):
"""
Creates our Graph and figures out, which shared/global model to hook up to.
If we are in a global-model's setup procedure, we do not create
a new graph (return None as the context). We will instead use the already existing local replica graph
of the model.
Returns: None or the graph's as_default()-context.
"""
graph_default_context = None
# Single (non-distributed) mode.
if self.execution_type == 'single':
self.graph = tf.Graph()
graph_default_context = self.graph.as_default()
graph_default_context.__enter__()
self.global_model = None # depends on [control=['if'], data=[]]
# Distributed tf
elif self.execution_type == 'distributed':
# Parameter-server -> Do not build any graph.
if self.distributed_spec['job'] == 'ps':
return None # depends on [control=['if'], data=[]]
# worker -> construct the global (main) model; the one hosted on the ps,
elif self.distributed_spec['job'] == 'worker':
# The local replica model.
if self.is_local_model:
graph = tf.Graph()
graph_default_context = graph.as_default()
graph_default_context.__enter__()
# Now that the graph is created and entered -> deepcopoy ourselves and setup global model first,
# then continue.
self.global_model = deepcopy(self)
# Switch on global construction/setup-mode for the pass to setup().
self.global_model.is_local_model = False
self.global_model.setup()
self.graph = graph
self.as_local_model()
self.scope += '-worker' + str(self.distributed_spec['task_index']) # depends on [control=['if'], data=[]]
else:
# The global_model (whose Variables are hosted by the ps).
self.graph = tf.get_default_graph() # lives in the same graph as local model
self.global_model = None
# Place its Variables on the parameter server(s) (round robin).
#ps_device="/job:ps", # default
# Train-ops for the global_model are hosted locally (on this worker's node).
self.device = tf.train.replica_device_setter(worker_device=self.device, cluster=self.distributed_spec['cluster_spec']) # depends on [control=['if'], data=[]]
else:
raise TensorForceError('Unsupported job type: {}!'.format(self.distributed_spec['job'])) # depends on [control=['if'], data=[]]
else:
raise TensorForceError('Unsupported distributed type: {}!'.format(self.distributed_spec['type']))
return graph_default_context |
def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] | def function[refine, parameter[self, requirements]]:
constant[ Refines this set's properties using the requirements passed as an argument.
]
assert[call[name[isinstance], parameter[name[requirements], name[PropertySet]]]]
if compare[name[requirements] <ast.NotIn object at 0x7da2590d7190> name[self].refined_] begin[:]
variable[r] assign[=] call[name[property].refine, parameter[name[self].all_, name[requirements].all_]]
call[name[self].refined_][name[requirements]] assign[=] call[name[create], parameter[name[r]]]
return[call[name[self].refined_][name[requirements]]] | keyword[def] identifier[refine] ( identifier[self] , identifier[requirements] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[requirements] , identifier[PropertySet] )
keyword[if] identifier[requirements] keyword[not] keyword[in] identifier[self] . identifier[refined_] :
identifier[r] = identifier[property] . identifier[refine] ( identifier[self] . identifier[all_] , identifier[requirements] . identifier[all_] )
identifier[self] . identifier[refined_] [ identifier[requirements] ]= identifier[create] ( identifier[r] )
keyword[return] identifier[self] . identifier[refined_] [ identifier[requirements] ] | def refine(self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r) # depends on [control=['if'], data=['requirements']]
return self.refined_[requirements] |
def clean_config(config):
"""Check if all values have defaults and replace errors with their default value
:param config: the configobj to clean
:type config: ConfigObj
:returns: None
:raises: ConfigError
The object is validated, so we need a spec file. All failed values will be replaced
by their default values. If default values are not specified in the spec, a
MissingDefaultError will be raised. If the replaced values still fail validation,
a ValueError is raised. This can occur if the default is of the wrong type.
If the object does not have a config spec, this function does nothing.
You are on your own then.
"""
if config.configspec is None:
return
vld = Validator()
validation = config.validate(vld, copy=True)
config.configspec.walk(check_default_values, validator=vld)
fix_errors(config, validation)
validation = config.validate(vld, copy=True)
if not (validation == True): # NOQA seems unpythonic but this validation evaluates that way only
msg = 'The config could not be fixed. Make sure that all default values have the right type!'
log.debug(msg)
raise ConfigError(msg) | def function[clean_config, parameter[config]]:
constant[Check if all values have defaults and replace errors with their default value
:param config: the configobj to clean
:type config: ConfigObj
:returns: None
:raises: ConfigError
The object is validated, so we need a spec file. All failed values will be replaced
by their default values. If default values are not specified in the spec, a
MissingDefaultError will be raised. If the replaced values still fail validation,
a ValueError is raised. This can occur if the default is of the wrong type.
If the object does not have a config spec, this function does nothing.
You are on your own then.
]
if compare[name[config].configspec is constant[None]] begin[:]
return[None]
variable[vld] assign[=] call[name[Validator], parameter[]]
variable[validation] assign[=] call[name[config].validate, parameter[name[vld]]]
call[name[config].configspec.walk, parameter[name[check_default_values]]]
call[name[fix_errors], parameter[name[config], name[validation]]]
variable[validation] assign[=] call[name[config].validate, parameter[name[vld]]]
if <ast.UnaryOp object at 0x7da1b1627460> begin[:]
variable[msg] assign[=] constant[The config could not be fixed. Make sure that all default values have the right type!]
call[name[log].debug, parameter[name[msg]]]
<ast.Raise object at 0x7da1b16435e0> | keyword[def] identifier[clean_config] ( identifier[config] ):
literal[string]
keyword[if] identifier[config] . identifier[configspec] keyword[is] keyword[None] :
keyword[return]
identifier[vld] = identifier[Validator] ()
identifier[validation] = identifier[config] . identifier[validate] ( identifier[vld] , identifier[copy] = keyword[True] )
identifier[config] . identifier[configspec] . identifier[walk] ( identifier[check_default_values] , identifier[validator] = identifier[vld] )
identifier[fix_errors] ( identifier[config] , identifier[validation] )
identifier[validation] = identifier[config] . identifier[validate] ( identifier[vld] , identifier[copy] = keyword[True] )
keyword[if] keyword[not] ( identifier[validation] == keyword[True] ):
identifier[msg] = literal[string]
identifier[log] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[ConfigError] ( identifier[msg] ) | def clean_config(config):
"""Check if all values have defaults and replace errors with their default value
:param config: the configobj to clean
:type config: ConfigObj
:returns: None
:raises: ConfigError
The object is validated, so we need a spec file. All failed values will be replaced
by their default values. If default values are not specified in the spec, a
MissingDefaultError will be raised. If the replaced values still fail validation,
a ValueError is raised. This can occur if the default is of the wrong type.
If the object does not have a config spec, this function does nothing.
You are on your own then.
"""
if config.configspec is None:
return # depends on [control=['if'], data=[]]
vld = Validator()
validation = config.validate(vld, copy=True)
config.configspec.walk(check_default_values, validator=vld)
fix_errors(config, validation)
validation = config.validate(vld, copy=True)
if not validation == True: # NOQA seems unpythonic but this validation evaluates that way only
msg = 'The config could not be fixed. Make sure that all default values have the right type!'
log.debug(msg)
raise ConfigError(msg) # depends on [control=['if'], data=[]] |
def read_dir(path, folder):
""" Returns a list of relative file paths to `path` for all files within `folder` """
full_path = os.path.join(path, folder)
fnames = glob(f"{full_path}/*.*")
directories = glob(f"{full_path}/*/")
if any(fnames):
return [os.path.relpath(f,path) for f in fnames]
elif any(directories):
raise FileNotFoundError("{} has subdirectories but contains no files. Is your directory structure is correct?".format(full_path))
else:
raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path)) | def function[read_dir, parameter[path, folder]]:
constant[ Returns a list of relative file paths to `path` for all files within `folder` ]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[path], name[folder]]]
variable[fnames] assign[=] call[name[glob], parameter[<ast.JoinedStr object at 0x7da1b1d6fa00>]]
variable[directories] assign[=] call[name[glob], parameter[<ast.JoinedStr object at 0x7da1b1d6da20>]]
if call[name[any], parameter[name[fnames]]] begin[:]
return[<ast.ListComp object at 0x7da1b1d6e050>] | keyword[def] identifier[read_dir] ( identifier[path] , identifier[folder] ):
literal[string]
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[folder] )
identifier[fnames] = identifier[glob] ( literal[string] )
identifier[directories] = identifier[glob] ( literal[string] )
keyword[if] identifier[any] ( identifier[fnames] ):
keyword[return] [ identifier[os] . identifier[path] . identifier[relpath] ( identifier[f] , identifier[path] ) keyword[for] identifier[f] keyword[in] identifier[fnames] ]
keyword[elif] identifier[any] ( identifier[directories] ):
keyword[raise] identifier[FileNotFoundError] ( literal[string] . identifier[format] ( identifier[full_path] ))
keyword[else] :
keyword[raise] identifier[FileNotFoundError] ( literal[string] . identifier[format] ( identifier[full_path] )) | def read_dir(path, folder):
""" Returns a list of relative file paths to `path` for all files within `folder` """
full_path = os.path.join(path, folder)
fnames = glob(f'{full_path}/*.*')
directories = glob(f'{full_path}/*/')
if any(fnames):
return [os.path.relpath(f, path) for f in fnames] # depends on [control=['if'], data=[]]
elif any(directories):
raise FileNotFoundError('{} has subdirectories but contains no files. Is your directory structure is correct?'.format(full_path)) # depends on [control=['if'], data=[]]
else:
raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path)) |
def loads(cls, data, store_password, try_decrypt_keys=True):
"""
See :meth:`jks.jks.KeyStore.loads`.
:param bytes data: Byte string representation of the keystore to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct.
If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the
store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed in some way
:raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password
:raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password
:raises DuplicateAliasException: If the keystore contains duplicate aliases
"""
# Uber keystores contain the same entry data as BKS keystores, except they wrap it differently:
# BKS = BKS_store || HMAC-SHA1(BKS_store)
# UBER = PBEWithSHAAndTwofish-CBC(BKS_store || SHA1(BKS_store))
#
# where BKS_store represents the entry format shared by both keystore types.
#
# The Twofish key size is 256 bits, the PBE key derivation scheme is that as outlined by PKCS#12 (RFC 7292),
# and the padding scheme for the Twofish cipher is PKCS#7.
try:
pos = 0
version = b4.unpack_from(data, pos)[0]; pos += 4
if version != 1:
raise UnsupportedKeystoreVersionException('Unsupported UBER keystore version; only v1 supported, found v'+repr(version))
salt, pos = cls._read_data(data, pos)
iteration_count = b4.unpack_from(data, pos)[0]; pos += 4
encrypted_bks_store = data[pos:]
try:
decrypted = rfc7292.decrypt_PBEWithSHAAndTwofishCBC(encrypted_bks_store, store_password, salt, iteration_count)
except BadDataLengthException as e:
raise BadKeystoreFormatException("Bad UBER keystore format: %s" % str(e))
except BadPaddingException as e:
raise DecryptionFailureException("Failed to decrypt UBER keystore: bad password?")
# Note: we can assume that the hash must be present at the last 20 bytes of the decrypted data (i.e. without first
# parsing through to see where the entry data actually ends), because valid UBER keystores generators should not put
# any trailing bytes after the hash prior to encrypting.
hash_fn = hashlib.sha1
hash_digest_size = hash_fn().digest_size
bks_store = decrypted[:-hash_digest_size]
bks_hash = decrypted[-hash_digest_size:]
if len(bks_hash) != hash_digest_size:
raise BadKeystoreFormatException("Insufficient signature bytes; found %d bytes, expected %d bytes" % (len(bks_hash), hash_digest_size))
if hash_fn(bks_store).digest() != bks_hash:
raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?")
store_type = "uber"
entries, size = cls._load_bks_entries(bks_store, store_type, store_password, try_decrypt_keys=try_decrypt_keys)
return cls(store_type, entries, version=version)
except struct.error as e:
raise BadKeystoreFormatException(e) | def function[loads, parameter[cls, data, store_password, try_decrypt_keys]]:
constant[
See :meth:`jks.jks.KeyStore.loads`.
:param bytes data: Byte string representation of the keystore to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct.
If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the
store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed in some way
:raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password
:raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password
:raises DuplicateAliasException: If the keystore contains duplicate aliases
]
<ast.Try object at 0x7da1b0656ad0> | keyword[def] identifier[loads] ( identifier[cls] , identifier[data] , identifier[store_password] , identifier[try_decrypt_keys] = keyword[True] ):
literal[string]
keyword[try] :
identifier[pos] = literal[int]
identifier[version] = identifier[b4] . identifier[unpack_from] ( identifier[data] , identifier[pos] )[ literal[int] ]; identifier[pos] += literal[int]
keyword[if] identifier[version] != literal[int] :
keyword[raise] identifier[UnsupportedKeystoreVersionException] ( literal[string] + identifier[repr] ( identifier[version] ))
identifier[salt] , identifier[pos] = identifier[cls] . identifier[_read_data] ( identifier[data] , identifier[pos] )
identifier[iteration_count] = identifier[b4] . identifier[unpack_from] ( identifier[data] , identifier[pos] )[ literal[int] ]; identifier[pos] += literal[int]
identifier[encrypted_bks_store] = identifier[data] [ identifier[pos] :]
keyword[try] :
identifier[decrypted] = identifier[rfc7292] . identifier[decrypt_PBEWithSHAAndTwofishCBC] ( identifier[encrypted_bks_store] , identifier[store_password] , identifier[salt] , identifier[iteration_count] )
keyword[except] identifier[BadDataLengthException] keyword[as] identifier[e] :
keyword[raise] identifier[BadKeystoreFormatException] ( literal[string] % identifier[str] ( identifier[e] ))
keyword[except] identifier[BadPaddingException] keyword[as] identifier[e] :
keyword[raise] identifier[DecryptionFailureException] ( literal[string] )
identifier[hash_fn] = identifier[hashlib] . identifier[sha1]
identifier[hash_digest_size] = identifier[hash_fn] (). identifier[digest_size]
identifier[bks_store] = identifier[decrypted] [:- identifier[hash_digest_size] ]
identifier[bks_hash] = identifier[decrypted] [- identifier[hash_digest_size] :]
keyword[if] identifier[len] ( identifier[bks_hash] )!= identifier[hash_digest_size] :
keyword[raise] identifier[BadKeystoreFormatException] ( literal[string] %( identifier[len] ( identifier[bks_hash] ), identifier[hash_digest_size] ))
keyword[if] identifier[hash_fn] ( identifier[bks_store] ). identifier[digest] ()!= identifier[bks_hash] :
keyword[raise] identifier[KeystoreSignatureException] ( literal[string] )
identifier[store_type] = literal[string]
identifier[entries] , identifier[size] = identifier[cls] . identifier[_load_bks_entries] ( identifier[bks_store] , identifier[store_type] , identifier[store_password] , identifier[try_decrypt_keys] = identifier[try_decrypt_keys] )
keyword[return] identifier[cls] ( identifier[store_type] , identifier[entries] , identifier[version] = identifier[version] )
keyword[except] identifier[struct] . identifier[error] keyword[as] identifier[e] :
keyword[raise] identifier[BadKeystoreFormatException] ( identifier[e] ) | def loads(cls, data, store_password, try_decrypt_keys=True):
"""
See :meth:`jks.jks.KeyStore.loads`.
:param bytes data: Byte string representation of the keystore to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct.
If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the
store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed in some way
:raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password
:raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password
:raises DuplicateAliasException: If the keystore contains duplicate aliases
"""
# Uber keystores contain the same entry data as BKS keystores, except they wrap it differently:
# BKS = BKS_store || HMAC-SHA1(BKS_store)
# UBER = PBEWithSHAAndTwofish-CBC(BKS_store || SHA1(BKS_store))
#
# where BKS_store represents the entry format shared by both keystore types.
#
# The Twofish key size is 256 bits, the PBE key derivation scheme is that as outlined by PKCS#12 (RFC 7292),
# and the padding scheme for the Twofish cipher is PKCS#7.
try:
pos = 0
version = b4.unpack_from(data, pos)[0]
pos += 4
if version != 1:
raise UnsupportedKeystoreVersionException('Unsupported UBER keystore version; only v1 supported, found v' + repr(version)) # depends on [control=['if'], data=['version']]
(salt, pos) = cls._read_data(data, pos)
iteration_count = b4.unpack_from(data, pos)[0]
pos += 4
encrypted_bks_store = data[pos:]
try:
decrypted = rfc7292.decrypt_PBEWithSHAAndTwofishCBC(encrypted_bks_store, store_password, salt, iteration_count) # depends on [control=['try'], data=[]]
except BadDataLengthException as e:
raise BadKeystoreFormatException('Bad UBER keystore format: %s' % str(e)) # depends on [control=['except'], data=['e']]
except BadPaddingException as e:
raise DecryptionFailureException('Failed to decrypt UBER keystore: bad password?') # depends on [control=['except'], data=[]]
# Note: we can assume that the hash must be present at the last 20 bytes of the decrypted data (i.e. without first
# parsing through to see where the entry data actually ends), because valid UBER keystores generators should not put
# any trailing bytes after the hash prior to encrypting.
hash_fn = hashlib.sha1
hash_digest_size = hash_fn().digest_size
bks_store = decrypted[:-hash_digest_size]
bks_hash = decrypted[-hash_digest_size:]
if len(bks_hash) != hash_digest_size:
raise BadKeystoreFormatException('Insufficient signature bytes; found %d bytes, expected %d bytes' % (len(bks_hash), hash_digest_size)) # depends on [control=['if'], data=['hash_digest_size']]
if hash_fn(bks_store).digest() != bks_hash:
raise KeystoreSignatureException('Hash mismatch; incorrect keystore password?') # depends on [control=['if'], data=[]]
store_type = 'uber'
(entries, size) = cls._load_bks_entries(bks_store, store_type, store_password, try_decrypt_keys=try_decrypt_keys)
return cls(store_type, entries, version=version) # depends on [control=['try'], data=[]]
except struct.error as e:
raise BadKeystoreFormatException(e) # depends on [control=['except'], data=['e']] |
def t_prepro_define_pragma_defargs_defargsopt_CONTINUE(self, t):
r'[_\\]\r?\n'
t.lexer.lineno += 1
t.value = t.value[1:]
t.type = 'NEWLINE'
return t | def function[t_prepro_define_pragma_defargs_defargsopt_CONTINUE, parameter[self, t]]:
constant[[_\\]\r?\n]
<ast.AugAssign object at 0x7da1b0652890>
name[t].value assign[=] call[name[t].value][<ast.Slice object at 0x7da1b06535e0>]
name[t].type assign[=] constant[NEWLINE]
return[name[t]] | keyword[def] identifier[t_prepro_define_pragma_defargs_defargsopt_CONTINUE] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[lexer] . identifier[lineno] += literal[int]
identifier[t] . identifier[value] = identifier[t] . identifier[value] [ literal[int] :]
identifier[t] . identifier[type] = literal[string]
keyword[return] identifier[t] | def t_prepro_define_pragma_defargs_defargsopt_CONTINUE(self, t):
"""[_\\\\]\\r?\\n"""
t.lexer.lineno += 1
t.value = t.value[1:]
t.type = 'NEWLINE'
return t |
def _dasd_reverse_conversion(cls, val, **kwargs):
'''
converts DASD String values to the reg_sz value
'''
if val is not None:
if val.upper() == 'ADMINISTRATORS':
# "" also shows 'administrators' in the GUI
return '0'
elif val.upper() == 'ADMINISTRATORS AND POWER USERS':
return '1'
elif val.upper() == 'ADMINISTRATORS AND INTERACTIVE USERS':
return '2'
elif val.upper() == 'NOT DEFINED':
# a setting of anything other than nothing, 0, 1, 2 or if it
# doesn't exist show 'not defined'
return '9999'
else:
return 'Invalid Value'
else:
return 'Not Defined' | def function[_dasd_reverse_conversion, parameter[cls, val]]:
constant[
converts DASD String values to the reg_sz value
]
if compare[name[val] is_not constant[None]] begin[:]
if compare[call[name[val].upper, parameter[]] equal[==] constant[ADMINISTRATORS]] begin[:]
return[constant[0]] | keyword[def] identifier[_dasd_reverse_conversion] ( identifier[cls] , identifier[val] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[val] . identifier[upper] ()== literal[string] :
keyword[return] literal[string]
keyword[elif] identifier[val] . identifier[upper] ()== literal[string] :
keyword[return] literal[string]
keyword[elif] identifier[val] . identifier[upper] ()== literal[string] :
keyword[return] literal[string]
keyword[elif] identifier[val] . identifier[upper] ()== literal[string] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string] | def _dasd_reverse_conversion(cls, val, **kwargs):
"""
converts DASD String values to the reg_sz value
"""
if val is not None:
if val.upper() == 'ADMINISTRATORS':
# "" also shows 'administrators' in the GUI
return '0' # depends on [control=['if'], data=[]]
elif val.upper() == 'ADMINISTRATORS AND POWER USERS':
return '1' # depends on [control=['if'], data=[]]
elif val.upper() == 'ADMINISTRATORS AND INTERACTIVE USERS':
return '2' # depends on [control=['if'], data=[]]
elif val.upper() == 'NOT DEFINED':
# a setting of anything other than nothing, 0, 1, 2 or if it
# doesn't exist show 'not defined'
return '9999' # depends on [control=['if'], data=[]]
else:
return 'Invalid Value' # depends on [control=['if'], data=['val']]
else:
return 'Not Defined' |
def translate(self, vector):
"""Translates `Atom`.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
"""
vector = numpy.array(vector)
self._vector += numpy.array(vector)
return | def function[translate, parameter[self, vector]]:
constant[Translates `Atom`.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
]
variable[vector] assign[=] call[name[numpy].array, parameter[name[vector]]]
<ast.AugAssign object at 0x7da1b092c310>
return[None] | keyword[def] identifier[translate] ( identifier[self] , identifier[vector] ):
literal[string]
identifier[vector] = identifier[numpy] . identifier[array] ( identifier[vector] )
identifier[self] . identifier[_vector] += identifier[numpy] . identifier[array] ( identifier[vector] )
keyword[return] | def translate(self, vector):
"""Translates `Atom`.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
"""
vector = numpy.array(vector)
self._vector += numpy.array(vector)
return |
def read(*args):
"""Reads complete file contents."""
return io.open(os.path.join(HERE, *args), encoding="utf-8").read() | def function[read, parameter[]]:
constant[Reads complete file contents.]
return[call[call[name[io].open, parameter[call[name[os].path.join, parameter[name[HERE], <ast.Starred object at 0x7da1b26ad8a0>]]]].read, parameter[]]] | keyword[def] identifier[read] (* identifier[args] ):
literal[string]
keyword[return] identifier[io] . identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[HERE] ,* identifier[args] ), identifier[encoding] = literal[string] ). identifier[read] () | def read(*args):
"""Reads complete file contents."""
return io.open(os.path.join(HERE, *args), encoding='utf-8').read() |
def to_swc(self):
"""
Prototype SWC file generator.
c.f. http://research.mssm.edu/cnic/swc.html
"""
from . import __version__
swc = """# ORIGINAL_SOURCE CloudVolume {}
# CREATURE
# REGION
# FIELD/LAYER
# TYPE
# CONTRIBUTOR {}
# REFERENCE
# RAW
# EXTRAS
# SOMA_AREA
# SHINKAGE_CORRECTION
# VERSION_NUMBER
# VERSION_DATE {}
# SCALE 1.0 1.0 1.0
""".format(
__version__,
", ".join([ str(_) for _ in self.vol.provenance.owners ]),
datetime.datetime.utcnow().isoformat()
)
skel = self.clone()
def parent(i):
coords = np.where( skel.edges == i )
edge = skel.edges[ coords[0][0] ]
if edge[0] == i:
return edge[1] + 1
return edge[0] + 1
for i in range(skel.vertices.shape[0]):
line = "{n} {T} {x} {y} {z} {R} {P}".format(
n=i+1,
T=skel.vertex_types[i],
x=skel.vertices[i][0],
y=skel.vertices[i][1],
z=skel.vertices[i][2],
R=skel.radii[i],
P=-1 if i == 0 else parent(i),
)
swc += line + '\n'
return swc | def function[to_swc, parameter[self]]:
constant[
Prototype SWC file generator.
c.f. http://research.mssm.edu/cnic/swc.html
]
from relative_module[None] import module[__version__]
variable[swc] assign[=] call[constant[# ORIGINAL_SOURCE CloudVolume {}
# CREATURE
# REGION
# FIELD/LAYER
# TYPE
# CONTRIBUTOR {}
# REFERENCE
# RAW
# EXTRAS
# SOMA_AREA
# SHINKAGE_CORRECTION
# VERSION_NUMBER
# VERSION_DATE {}
# SCALE 1.0 1.0 1.0
].format, parameter[name[__version__], call[constant[, ].join, parameter[<ast.ListComp object at 0x7da204566080>]], call[call[name[datetime].datetime.utcnow, parameter[]].isoformat, parameter[]]]]
variable[skel] assign[=] call[name[self].clone, parameter[]]
def function[parent, parameter[i]]:
variable[coords] assign[=] call[name[np].where, parameter[compare[name[skel].edges equal[==] name[i]]]]
variable[edge] assign[=] call[name[skel].edges][call[call[name[coords]][constant[0]]][constant[0]]]
if compare[call[name[edge]][constant[0]] equal[==] name[i]] begin[:]
return[binary_operation[call[name[edge]][constant[1]] + constant[1]]]
return[binary_operation[call[name[edge]][constant[0]] + constant[1]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[skel].vertices.shape][constant[0]]]]] begin[:]
variable[line] assign[=] call[constant[{n} {T} {x} {y} {z} {R} {P}].format, parameter[]]
<ast.AugAssign object at 0x7da1b0ef6860>
return[name[swc]] | keyword[def] identifier[to_swc] ( identifier[self] ):
literal[string]
keyword[from] . keyword[import] identifier[__version__]
identifier[swc] = literal[string] . identifier[format] (
identifier[__version__] ,
literal[string] . identifier[join] ([ identifier[str] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[self] . identifier[vol] . identifier[provenance] . identifier[owners] ]),
identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[isoformat] ()
)
identifier[skel] = identifier[self] . identifier[clone] ()
keyword[def] identifier[parent] ( identifier[i] ):
identifier[coords] = identifier[np] . identifier[where] ( identifier[skel] . identifier[edges] == identifier[i] )
identifier[edge] = identifier[skel] . identifier[edges] [ identifier[coords] [ literal[int] ][ literal[int] ]]
keyword[if] identifier[edge] [ literal[int] ]== identifier[i] :
keyword[return] identifier[edge] [ literal[int] ]+ literal[int]
keyword[return] identifier[edge] [ literal[int] ]+ literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[skel] . identifier[vertices] . identifier[shape] [ literal[int] ]):
identifier[line] = literal[string] . identifier[format] (
identifier[n] = identifier[i] + literal[int] ,
identifier[T] = identifier[skel] . identifier[vertex_types] [ identifier[i] ],
identifier[x] = identifier[skel] . identifier[vertices] [ identifier[i] ][ literal[int] ],
identifier[y] = identifier[skel] . identifier[vertices] [ identifier[i] ][ literal[int] ],
identifier[z] = identifier[skel] . identifier[vertices] [ identifier[i] ][ literal[int] ],
identifier[R] = identifier[skel] . identifier[radii] [ identifier[i] ],
identifier[P] =- literal[int] keyword[if] identifier[i] == literal[int] keyword[else] identifier[parent] ( identifier[i] ),
)
identifier[swc] += identifier[line] + literal[string]
keyword[return] identifier[swc] | def to_swc(self):
"""
Prototype SWC file generator.
c.f. http://research.mssm.edu/cnic/swc.html
"""
from . import __version__
swc = '# ORIGINAL_SOURCE CloudVolume {}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER \n# VERSION_DATE {}\n# SCALE 1.0 1.0 1.0\n\n'.format(__version__, ', '.join([str(_) for _ in self.vol.provenance.owners]), datetime.datetime.utcnow().isoformat())
skel = self.clone()
def parent(i):
coords = np.where(skel.edges == i)
edge = skel.edges[coords[0][0]]
if edge[0] == i:
return edge[1] + 1 # depends on [control=['if'], data=[]]
return edge[0] + 1
for i in range(skel.vertices.shape[0]):
line = '{n} {T} {x} {y} {z} {R} {P}'.format(n=i + 1, T=skel.vertex_types[i], x=skel.vertices[i][0], y=skel.vertices[i][1], z=skel.vertices[i][2], R=skel.radii[i], P=-1 if i == 0 else parent(i))
swc += line + '\n' # depends on [control=['for'], data=['i']]
return swc |
def is_same_file (filename1, filename2):
"""Check if filename1 and filename2 point to the same file object.
There can be false negatives, ie. the result is False, but it is
the same file anyway. Reason is that network filesystems can create
different paths to the same physical file.
"""
if filename1 == filename2:
return True
if os.name == 'posix':
return os.path.samefile(filename1, filename2)
return is_same_filename(filename1, filename2) | def function[is_same_file, parameter[filename1, filename2]]:
constant[Check if filename1 and filename2 point to the same file object.
There can be false negatives, ie. the result is False, but it is
the same file anyway. Reason is that network filesystems can create
different paths to the same physical file.
]
if compare[name[filename1] equal[==] name[filename2]] begin[:]
return[constant[True]]
if compare[name[os].name equal[==] constant[posix]] begin[:]
return[call[name[os].path.samefile, parameter[name[filename1], name[filename2]]]]
return[call[name[is_same_filename], parameter[name[filename1], name[filename2]]]] | keyword[def] identifier[is_same_file] ( identifier[filename1] , identifier[filename2] ):
literal[string]
keyword[if] identifier[filename1] == identifier[filename2] :
keyword[return] keyword[True]
keyword[if] identifier[os] . identifier[name] == literal[string] :
keyword[return] identifier[os] . identifier[path] . identifier[samefile] ( identifier[filename1] , identifier[filename2] )
keyword[return] identifier[is_same_filename] ( identifier[filename1] , identifier[filename2] ) | def is_same_file(filename1, filename2):
"""Check if filename1 and filename2 point to the same file object.
There can be false negatives, ie. the result is False, but it is
the same file anyway. Reason is that network filesystems can create
different paths to the same physical file.
"""
if filename1 == filename2:
return True # depends on [control=['if'], data=[]]
if os.name == 'posix':
return os.path.samefile(filename1, filename2) # depends on [control=['if'], data=[]]
return is_same_filename(filename1, filename2) |
def prop2b(gm, pvinit, dt):
"""
Given a central mass and the state of massless body at time t_0,
this routine determines the state as predicted by a two-body
force model at time t_0 + dt.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prop2b_c.html
:param gm: Gravity of the central mass.
:type gm: float
:param pvinit: Initial state from which to propagate a state.
:type pvinit: 6-Element Array of floats
:param dt: Time offset from initial state to propagate to.
:type dt: float
:return: The propagated state.
:rtype: 6-Element Array of floats
"""
gm = ctypes.c_double(gm)
pvinit = stypes.toDoubleVector(pvinit)
dt = ctypes.c_double(dt)
pvprop = stypes.emptyDoubleVector(6)
libspice.prop2b_c(gm, pvinit, dt, pvprop)
return stypes.cVectorToPython(pvprop) | def function[prop2b, parameter[gm, pvinit, dt]]:
constant[
Given a central mass and the state of massless body at time t_0,
this routine determines the state as predicted by a two-body
force model at time t_0 + dt.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prop2b_c.html
:param gm: Gravity of the central mass.
:type gm: float
:param pvinit: Initial state from which to propagate a state.
:type pvinit: 6-Element Array of floats
:param dt: Time offset from initial state to propagate to.
:type dt: float
:return: The propagated state.
:rtype: 6-Element Array of floats
]
variable[gm] assign[=] call[name[ctypes].c_double, parameter[name[gm]]]
variable[pvinit] assign[=] call[name[stypes].toDoubleVector, parameter[name[pvinit]]]
variable[dt] assign[=] call[name[ctypes].c_double, parameter[name[dt]]]
variable[pvprop] assign[=] call[name[stypes].emptyDoubleVector, parameter[constant[6]]]
call[name[libspice].prop2b_c, parameter[name[gm], name[pvinit], name[dt], name[pvprop]]]
return[call[name[stypes].cVectorToPython, parameter[name[pvprop]]]] | keyword[def] identifier[prop2b] ( identifier[gm] , identifier[pvinit] , identifier[dt] ):
literal[string]
identifier[gm] = identifier[ctypes] . identifier[c_double] ( identifier[gm] )
identifier[pvinit] = identifier[stypes] . identifier[toDoubleVector] ( identifier[pvinit] )
identifier[dt] = identifier[ctypes] . identifier[c_double] ( identifier[dt] )
identifier[pvprop] = identifier[stypes] . identifier[emptyDoubleVector] ( literal[int] )
identifier[libspice] . identifier[prop2b_c] ( identifier[gm] , identifier[pvinit] , identifier[dt] , identifier[pvprop] )
keyword[return] identifier[stypes] . identifier[cVectorToPython] ( identifier[pvprop] ) | def prop2b(gm, pvinit, dt):
"""
Given a central mass and the state of massless body at time t_0,
this routine determines the state as predicted by a two-body
force model at time t_0 + dt.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prop2b_c.html
:param gm: Gravity of the central mass.
:type gm: float
:param pvinit: Initial state from which to propagate a state.
:type pvinit: 6-Element Array of floats
:param dt: Time offset from initial state to propagate to.
:type dt: float
:return: The propagated state.
:rtype: 6-Element Array of floats
"""
gm = ctypes.c_double(gm)
pvinit = stypes.toDoubleVector(pvinit)
dt = ctypes.c_double(dt)
pvprop = stypes.emptyDoubleVector(6)
libspice.prop2b_c(gm, pvinit, dt, pvprop)
return stypes.cVectorToPython(pvprop) |
def itemByPath( self, path, includeRoot = False ):
"""
Loads the items for the given path.
:param path | <str>
includeRoot | <bool>
"""
sep = self.separator()
path = nativestring(path).strip(sep)
if ( not path ):
if ( includeRoot ):
return self.invisibleRootItem()
else:
return None
splt = path.split(sep)
item = self.invisibleRootItem()
for part in splt:
next_item = None
for row in range(item.rowCount()):
child = item.child(row)
if ( child.text() == part ):
next_item = child
break
if ( not next_item ):
item = None
break
item = next_item
item.initialize()
return item | def function[itemByPath, parameter[self, path, includeRoot]]:
constant[
Loads the items for the given path.
:param path | <str>
includeRoot | <bool>
]
variable[sep] assign[=] call[name[self].separator, parameter[]]
variable[path] assign[=] call[call[name[nativestring], parameter[name[path]]].strip, parameter[name[sep]]]
if <ast.UnaryOp object at 0x7da18f00dff0> begin[:]
if name[includeRoot] begin[:]
return[call[name[self].invisibleRootItem, parameter[]]]
variable[splt] assign[=] call[name[path].split, parameter[name[sep]]]
variable[item] assign[=] call[name[self].invisibleRootItem, parameter[]]
for taget[name[part]] in starred[name[splt]] begin[:]
variable[next_item] assign[=] constant[None]
for taget[name[row]] in starred[call[name[range], parameter[call[name[item].rowCount, parameter[]]]]] begin[:]
variable[child] assign[=] call[name[item].child, parameter[name[row]]]
if compare[call[name[child].text, parameter[]] equal[==] name[part]] begin[:]
variable[next_item] assign[=] name[child]
break
if <ast.UnaryOp object at 0x7da204347490> begin[:]
variable[item] assign[=] constant[None]
break
variable[item] assign[=] name[next_item]
call[name[item].initialize, parameter[]]
return[name[item]] | keyword[def] identifier[itemByPath] ( identifier[self] , identifier[path] , identifier[includeRoot] = keyword[False] ):
literal[string]
identifier[sep] = identifier[self] . identifier[separator] ()
identifier[path] = identifier[nativestring] ( identifier[path] ). identifier[strip] ( identifier[sep] )
keyword[if] ( keyword[not] identifier[path] ):
keyword[if] ( identifier[includeRoot] ):
keyword[return] identifier[self] . identifier[invisibleRootItem] ()
keyword[else] :
keyword[return] keyword[None]
identifier[splt] = identifier[path] . identifier[split] ( identifier[sep] )
identifier[item] = identifier[self] . identifier[invisibleRootItem] ()
keyword[for] identifier[part] keyword[in] identifier[splt] :
identifier[next_item] = keyword[None]
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[item] . identifier[rowCount] ()):
identifier[child] = identifier[item] . identifier[child] ( identifier[row] )
keyword[if] ( identifier[child] . identifier[text] ()== identifier[part] ):
identifier[next_item] = identifier[child]
keyword[break]
keyword[if] ( keyword[not] identifier[next_item] ):
identifier[item] = keyword[None]
keyword[break]
identifier[item] = identifier[next_item]
identifier[item] . identifier[initialize] ()
keyword[return] identifier[item] | def itemByPath(self, path, includeRoot=False):
"""
Loads the items for the given path.
:param path | <str>
includeRoot | <bool>
"""
sep = self.separator()
path = nativestring(path).strip(sep)
if not path:
if includeRoot:
return self.invisibleRootItem() # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
splt = path.split(sep)
item = self.invisibleRootItem()
for part in splt:
next_item = None
for row in range(item.rowCount()):
child = item.child(row)
if child.text() == part:
next_item = child
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
if not next_item:
item = None
break # depends on [control=['if'], data=[]]
item = next_item
item.initialize() # depends on [control=['for'], data=['part']]
return item |
def gridlines(ax, scale, multiple=None, horizontal_kwargs=None,
left_kwargs=None, right_kwargs=None, **kwargs):
"""
Plots grid lines excluding boundary.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float
Simplex scale size.
multiple: float, None
Specifies which inner gridelines to draw. For example, if scale=30 and
multiple=6, only 5 inner gridlines will be drawn.
horizontal_kwargs: dict, None
Any kwargs to pass through to matplotlib for horizontal gridlines
left_kwargs: dict, None
Any kwargs to pass through to matplotlib for left parallel gridlines
right_kwargs: dict, None
Any kwargs to pass through to matplotlib for right parallel gridlines
kwargs:
Any kwargs to pass through to matplotlib, if not using
horizontal_kwargs, left_kwargs, or right_kwargs
"""
if 'linewidth' not in kwargs:
kwargs["linewidth"] = 0.5
if 'linestyle' not in kwargs:
kwargs["linestyle"] = ':'
horizontal_kwargs = merge_dicts(kwargs, horizontal_kwargs)
left_kwargs = merge_dicts(kwargs, left_kwargs)
right_kwargs = merge_dicts(kwargs, right_kwargs)
if not multiple:
multiple = 1.
## Draw grid-lines
# Parallel to horizontal axis
for i in arange(0, scale, multiple):
horizontal_line(ax, scale, i, **horizontal_kwargs)
# Parallel to left and right axes
for i in arange(0, scale + multiple, multiple):
left_parallel_line(ax, scale, i, **left_kwargs)
right_parallel_line(ax, scale, i, **right_kwargs)
return ax | def function[gridlines, parameter[ax, scale, multiple, horizontal_kwargs, left_kwargs, right_kwargs]]:
constant[
Plots grid lines excluding boundary.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float
Simplex scale size.
multiple: float, None
Specifies which inner gridelines to draw. For example, if scale=30 and
multiple=6, only 5 inner gridlines will be drawn.
horizontal_kwargs: dict, None
Any kwargs to pass through to matplotlib for horizontal gridlines
left_kwargs: dict, None
Any kwargs to pass through to matplotlib for left parallel gridlines
right_kwargs: dict, None
Any kwargs to pass through to matplotlib for right parallel gridlines
kwargs:
Any kwargs to pass through to matplotlib, if not using
horizontal_kwargs, left_kwargs, or right_kwargs
]
if compare[constant[linewidth] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[linewidth]] assign[=] constant[0.5]
if compare[constant[linestyle] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[linestyle]] assign[=] constant[:]
variable[horizontal_kwargs] assign[=] call[name[merge_dicts], parameter[name[kwargs], name[horizontal_kwargs]]]
variable[left_kwargs] assign[=] call[name[merge_dicts], parameter[name[kwargs], name[left_kwargs]]]
variable[right_kwargs] assign[=] call[name[merge_dicts], parameter[name[kwargs], name[right_kwargs]]]
if <ast.UnaryOp object at 0x7da20e9b3d00> begin[:]
variable[multiple] assign[=] constant[1.0]
for taget[name[i]] in starred[call[name[arange], parameter[constant[0], name[scale], name[multiple]]]] begin[:]
call[name[horizontal_line], parameter[name[ax], name[scale], name[i]]]
for taget[name[i]] in starred[call[name[arange], parameter[constant[0], binary_operation[name[scale] + name[multiple]], name[multiple]]]] begin[:]
call[name[left_parallel_line], parameter[name[ax], name[scale], name[i]]]
call[name[right_parallel_line], parameter[name[ax], name[scale], name[i]]]
return[name[ax]] | keyword[def] identifier[gridlines] ( identifier[ax] , identifier[scale] , identifier[multiple] = keyword[None] , identifier[horizontal_kwargs] = keyword[None] ,
identifier[left_kwargs] = keyword[None] , identifier[right_kwargs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[horizontal_kwargs] = identifier[merge_dicts] ( identifier[kwargs] , identifier[horizontal_kwargs] )
identifier[left_kwargs] = identifier[merge_dicts] ( identifier[kwargs] , identifier[left_kwargs] )
identifier[right_kwargs] = identifier[merge_dicts] ( identifier[kwargs] , identifier[right_kwargs] )
keyword[if] keyword[not] identifier[multiple] :
identifier[multiple] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[arange] ( literal[int] , identifier[scale] , identifier[multiple] ):
identifier[horizontal_line] ( identifier[ax] , identifier[scale] , identifier[i] ,** identifier[horizontal_kwargs] )
keyword[for] identifier[i] keyword[in] identifier[arange] ( literal[int] , identifier[scale] + identifier[multiple] , identifier[multiple] ):
identifier[left_parallel_line] ( identifier[ax] , identifier[scale] , identifier[i] ,** identifier[left_kwargs] )
identifier[right_parallel_line] ( identifier[ax] , identifier[scale] , identifier[i] ,** identifier[right_kwargs] )
keyword[return] identifier[ax] | def gridlines(ax, scale, multiple=None, horizontal_kwargs=None, left_kwargs=None, right_kwargs=None, **kwargs):
"""
Plots grid lines excluding boundary.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float
Simplex scale size.
multiple: float, None
Specifies which inner gridelines to draw. For example, if scale=30 and
multiple=6, only 5 inner gridlines will be drawn.
horizontal_kwargs: dict, None
Any kwargs to pass through to matplotlib for horizontal gridlines
left_kwargs: dict, None
Any kwargs to pass through to matplotlib for left parallel gridlines
right_kwargs: dict, None
Any kwargs to pass through to matplotlib for right parallel gridlines
kwargs:
Any kwargs to pass through to matplotlib, if not using
horizontal_kwargs, left_kwargs, or right_kwargs
"""
if 'linewidth' not in kwargs:
kwargs['linewidth'] = 0.5 # depends on [control=['if'], data=['kwargs']]
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ':' # depends on [control=['if'], data=['kwargs']]
horizontal_kwargs = merge_dicts(kwargs, horizontal_kwargs)
left_kwargs = merge_dicts(kwargs, left_kwargs)
right_kwargs = merge_dicts(kwargs, right_kwargs)
if not multiple:
multiple = 1.0 # depends on [control=['if'], data=[]]
## Draw grid-lines
# Parallel to horizontal axis
for i in arange(0, scale, multiple):
horizontal_line(ax, scale, i, **horizontal_kwargs) # depends on [control=['for'], data=['i']]
# Parallel to left and right axes
for i in arange(0, scale + multiple, multiple):
left_parallel_line(ax, scale, i, **left_kwargs)
right_parallel_line(ax, scale, i, **right_kwargs) # depends on [control=['for'], data=['i']]
return ax |
def putcellslice(self, rownr, value, blc, trc, inc=[]):
"""Put into a slice of a table cell holding an array.
(see :func:`table.putcellslice`)"""
return self._table.putcellslice(self._column, rownr, value, blc, trc, inc) | def function[putcellslice, parameter[self, rownr, value, blc, trc, inc]]:
constant[Put into a slice of a table cell holding an array.
(see :func:`table.putcellslice`)]
return[call[name[self]._table.putcellslice, parameter[name[self]._column, name[rownr], name[value], name[blc], name[trc], name[inc]]]] | keyword[def] identifier[putcellslice] ( identifier[self] , identifier[rownr] , identifier[value] , identifier[blc] , identifier[trc] , identifier[inc] =[]):
literal[string]
keyword[return] identifier[self] . identifier[_table] . identifier[putcellslice] ( identifier[self] . identifier[_column] , identifier[rownr] , identifier[value] , identifier[blc] , identifier[trc] , identifier[inc] ) | def putcellslice(self, rownr, value, blc, trc, inc=[]):
"""Put into a slice of a table cell holding an array.
(see :func:`table.putcellslice`)"""
return self._table.putcellslice(self._column, rownr, value, blc, trc, inc) |
def dasopr(fname):
"""
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value | def function[dasopr, parameter[fname]]:
constant[
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
]
variable[fname] assign[=] call[name[stypes].stringToCharP, parameter[name[fname]]]
variable[handle] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].dasopr_c, parameter[name[fname], call[name[ctypes].byref, parameter[name[handle]]]]]
return[name[handle].value] | keyword[def] identifier[dasopr] ( identifier[fname] ):
literal[string]
identifier[fname] = identifier[stypes] . identifier[stringToCharP] ( identifier[fname] )
identifier[handle] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[dasopr_c] ( identifier[fname] , identifier[ctypes] . identifier[byref] ( identifier[handle] ))
keyword[return] identifier[handle] . identifier[value] | def dasopr(fname):
"""
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value |
def _let_to_py_ast(ctx: GeneratorContext, node: Let) -> GeneratedPyAST:
"""Return a Python AST Node for a `let*` expression."""
assert node.op == NodeOp.LET
with ctx.new_symbol_table("let"):
let_body_ast: List[ast.AST] = []
for binding in node.bindings:
init_node = binding.init
assert init_node is not None
init_ast = gen_py_ast(ctx, init_node)
binding_name = genname(munge(binding.name))
let_body_ast.extend(init_ast.dependencies)
let_body_ast.append(
ast.Assign(
targets=[ast.Name(id=binding_name, ctx=ast.Store())],
value=init_ast.node,
)
)
ctx.symbol_table.new_symbol(
sym.symbol(binding.name), binding_name, LocalType.LET
)
let_result_name = genname("let_result")
body_ast = _synthetic_do_to_py_ast(ctx, node.body)
let_body_ast.extend(map(statementize, body_ast.dependencies))
let_body_ast.append(
ast.Assign(
targets=[ast.Name(id=let_result_name, ctx=ast.Store())],
value=body_ast.node,
)
)
return GeneratedPyAST(
node=ast.Name(id=let_result_name, ctx=ast.Load()), dependencies=let_body_ast
) | def function[_let_to_py_ast, parameter[ctx, node]]:
constant[Return a Python AST Node for a `let*` expression.]
assert[compare[name[node].op equal[==] name[NodeOp].LET]]
with call[name[ctx].new_symbol_table, parameter[constant[let]]] begin[:]
<ast.AnnAssign object at 0x7da1b0285300>
for taget[name[binding]] in starred[name[node].bindings] begin[:]
variable[init_node] assign[=] name[binding].init
assert[compare[name[init_node] is_not constant[None]]]
variable[init_ast] assign[=] call[name[gen_py_ast], parameter[name[ctx], name[init_node]]]
variable[binding_name] assign[=] call[name[genname], parameter[call[name[munge], parameter[name[binding].name]]]]
call[name[let_body_ast].extend, parameter[name[init_ast].dependencies]]
call[name[let_body_ast].append, parameter[call[name[ast].Assign, parameter[]]]]
call[name[ctx].symbol_table.new_symbol, parameter[call[name[sym].symbol, parameter[name[binding].name]], name[binding_name], name[LocalType].LET]]
variable[let_result_name] assign[=] call[name[genname], parameter[constant[let_result]]]
variable[body_ast] assign[=] call[name[_synthetic_do_to_py_ast], parameter[name[ctx], name[node].body]]
call[name[let_body_ast].extend, parameter[call[name[map], parameter[name[statementize], name[body_ast].dependencies]]]]
call[name[let_body_ast].append, parameter[call[name[ast].Assign, parameter[]]]]
return[call[name[GeneratedPyAST], parameter[]]] | keyword[def] identifier[_let_to_py_ast] ( identifier[ctx] : identifier[GeneratorContext] , identifier[node] : identifier[Let] )-> identifier[GeneratedPyAST] :
literal[string]
keyword[assert] identifier[node] . identifier[op] == identifier[NodeOp] . identifier[LET]
keyword[with] identifier[ctx] . identifier[new_symbol_table] ( literal[string] ):
identifier[let_body_ast] : identifier[List] [ identifier[ast] . identifier[AST] ]=[]
keyword[for] identifier[binding] keyword[in] identifier[node] . identifier[bindings] :
identifier[init_node] = identifier[binding] . identifier[init]
keyword[assert] identifier[init_node] keyword[is] keyword[not] keyword[None]
identifier[init_ast] = identifier[gen_py_ast] ( identifier[ctx] , identifier[init_node] )
identifier[binding_name] = identifier[genname] ( identifier[munge] ( identifier[binding] . identifier[name] ))
identifier[let_body_ast] . identifier[extend] ( identifier[init_ast] . identifier[dependencies] )
identifier[let_body_ast] . identifier[append] (
identifier[ast] . identifier[Assign] (
identifier[targets] =[ identifier[ast] . identifier[Name] ( identifier[id] = identifier[binding_name] , identifier[ctx] = identifier[ast] . identifier[Store] ())],
identifier[value] = identifier[init_ast] . identifier[node] ,
)
)
identifier[ctx] . identifier[symbol_table] . identifier[new_symbol] (
identifier[sym] . identifier[symbol] ( identifier[binding] . identifier[name] ), identifier[binding_name] , identifier[LocalType] . identifier[LET]
)
identifier[let_result_name] = identifier[genname] ( literal[string] )
identifier[body_ast] = identifier[_synthetic_do_to_py_ast] ( identifier[ctx] , identifier[node] . identifier[body] )
identifier[let_body_ast] . identifier[extend] ( identifier[map] ( identifier[statementize] , identifier[body_ast] . identifier[dependencies] ))
identifier[let_body_ast] . identifier[append] (
identifier[ast] . identifier[Assign] (
identifier[targets] =[ identifier[ast] . identifier[Name] ( identifier[id] = identifier[let_result_name] , identifier[ctx] = identifier[ast] . identifier[Store] ())],
identifier[value] = identifier[body_ast] . identifier[node] ,
)
)
keyword[return] identifier[GeneratedPyAST] (
identifier[node] = identifier[ast] . identifier[Name] ( identifier[id] = identifier[let_result_name] , identifier[ctx] = identifier[ast] . identifier[Load] ()), identifier[dependencies] = identifier[let_body_ast]
) | def _let_to_py_ast(ctx: GeneratorContext, node: Let) -> GeneratedPyAST:
"""Return a Python AST Node for a `let*` expression."""
assert node.op == NodeOp.LET
with ctx.new_symbol_table('let'):
let_body_ast: List[ast.AST] = []
for binding in node.bindings:
init_node = binding.init
assert init_node is not None
init_ast = gen_py_ast(ctx, init_node)
binding_name = genname(munge(binding.name))
let_body_ast.extend(init_ast.dependencies)
let_body_ast.append(ast.Assign(targets=[ast.Name(id=binding_name, ctx=ast.Store())], value=init_ast.node))
ctx.symbol_table.new_symbol(sym.symbol(binding.name), binding_name, LocalType.LET) # depends on [control=['for'], data=['binding']]
let_result_name = genname('let_result')
body_ast = _synthetic_do_to_py_ast(ctx, node.body)
let_body_ast.extend(map(statementize, body_ast.dependencies))
let_body_ast.append(ast.Assign(targets=[ast.Name(id=let_result_name, ctx=ast.Store())], value=body_ast.node))
return GeneratedPyAST(node=ast.Name(id=let_result_name, ctx=ast.Load()), dependencies=let_body_ast) # depends on [control=['with'], data=[]] |
def set_servo_position(self, goalposition, goaltime, led):
""" Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
goalposition_msb = int(goalposition) >> 8
goalposition_lsb = int(goalposition) & 0xff
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalposition_lsb)
data.append(goalposition_msb)
data.append(led)
data.append(self.servoid)
data.append(goaltime)
send_data(data) | def function[set_servo_position, parameter[self, goalposition, goaltime, led]]:
constant[ Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
]
variable[goalposition_msb] assign[=] binary_operation[call[name[int], parameter[name[goalposition]]] <ast.RShift object at 0x7da2590d6a40> constant[8]]
variable[goalposition_lsb] assign[=] binary_operation[call[name[int], parameter[name[goalposition]]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]
variable[data] assign[=] list[[]]
call[name[data].append, parameter[constant[12]]]
call[name[data].append, parameter[name[self].servoid]]
call[name[data].append, parameter[name[I_JOG_REQ]]]
call[name[data].append, parameter[name[goalposition_lsb]]]
call[name[data].append, parameter[name[goalposition_msb]]]
call[name[data].append, parameter[name[led]]]
call[name[data].append, parameter[name[self].servoid]]
call[name[data].append, parameter[name[goaltime]]]
call[name[send_data], parameter[name[data]]] | keyword[def] identifier[set_servo_position] ( identifier[self] , identifier[goalposition] , identifier[goaltime] , identifier[led] ):
literal[string]
identifier[goalposition_msb] = identifier[int] ( identifier[goalposition] )>> literal[int]
identifier[goalposition_lsb] = identifier[int] ( identifier[goalposition] )& literal[int]
identifier[data] =[]
identifier[data] . identifier[append] ( literal[int] )
identifier[data] . identifier[append] ( identifier[self] . identifier[servoid] )
identifier[data] . identifier[append] ( identifier[I_JOG_REQ] )
identifier[data] . identifier[append] ( identifier[goalposition_lsb] )
identifier[data] . identifier[append] ( identifier[goalposition_msb] )
identifier[data] . identifier[append] ( identifier[led] )
identifier[data] . identifier[append] ( identifier[self] . identifier[servoid] )
identifier[data] . identifier[append] ( identifier[goaltime] )
identifier[send_data] ( identifier[data] ) | def set_servo_position(self, goalposition, goaltime, led):
""" Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
goalposition_msb = int(goalposition) >> 8
goalposition_lsb = int(goalposition) & 255
data = []
data.append(12)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalposition_lsb)
data.append(goalposition_msb)
data.append(led)
data.append(self.servoid)
data.append(goaltime)
send_data(data) |
def earth_gyro(RAW_IMU,ATTITUDE):
'''return earth frame gyro vector'''
r = rotation(ATTITUDE)
accel = Vector3(degrees(RAW_IMU.xgyro), degrees(RAW_IMU.ygyro), degrees(RAW_IMU.zgyro)) * 0.001
return r * accel | def function[earth_gyro, parameter[RAW_IMU, ATTITUDE]]:
constant[return earth frame gyro vector]
variable[r] assign[=] call[name[rotation], parameter[name[ATTITUDE]]]
variable[accel] assign[=] binary_operation[call[name[Vector3], parameter[call[name[degrees], parameter[name[RAW_IMU].xgyro]], call[name[degrees], parameter[name[RAW_IMU].ygyro]], call[name[degrees], parameter[name[RAW_IMU].zgyro]]]] * constant[0.001]]
return[binary_operation[name[r] * name[accel]]] | keyword[def] identifier[earth_gyro] ( identifier[RAW_IMU] , identifier[ATTITUDE] ):
literal[string]
identifier[r] = identifier[rotation] ( identifier[ATTITUDE] )
identifier[accel] = identifier[Vector3] ( identifier[degrees] ( identifier[RAW_IMU] . identifier[xgyro] ), identifier[degrees] ( identifier[RAW_IMU] . identifier[ygyro] ), identifier[degrees] ( identifier[RAW_IMU] . identifier[zgyro] ))* literal[int]
keyword[return] identifier[r] * identifier[accel] | def earth_gyro(RAW_IMU, ATTITUDE):
"""return earth frame gyro vector"""
r = rotation(ATTITUDE)
accel = Vector3(degrees(RAW_IMU.xgyro), degrees(RAW_IMU.ygyro), degrees(RAW_IMU.zgyro)) * 0.001
return r * accel |
def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs):
"""Overridden to only call `notify` if model matches.
"""
notified = False
instance = kwargs.get("instance")
if instance._meta.label_lower == self.model:
notified = super().notify(
force_notify=force_notify,
use_email=use_email,
use_sms=use_sms,
**kwargs,
)
return notified | def function[notify, parameter[self, force_notify, use_email, use_sms]]:
constant[Overridden to only call `notify` if model matches.
]
variable[notified] assign[=] constant[False]
variable[instance] assign[=] call[name[kwargs].get, parameter[constant[instance]]]
if compare[name[instance]._meta.label_lower equal[==] name[self].model] begin[:]
variable[notified] assign[=] call[call[name[super], parameter[]].notify, parameter[]]
return[name[notified]] | keyword[def] identifier[notify] ( identifier[self] , identifier[force_notify] = keyword[None] , identifier[use_email] = keyword[None] , identifier[use_sms] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[notified] = keyword[False]
identifier[instance] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[instance] . identifier[_meta] . identifier[label_lower] == identifier[self] . identifier[model] :
identifier[notified] = identifier[super] (). identifier[notify] (
identifier[force_notify] = identifier[force_notify] ,
identifier[use_email] = identifier[use_email] ,
identifier[use_sms] = identifier[use_sms] ,
** identifier[kwargs] ,
)
keyword[return] identifier[notified] | def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs):
"""Overridden to only call `notify` if model matches.
"""
notified = False
instance = kwargs.get('instance')
if instance._meta.label_lower == self.model:
notified = super().notify(force_notify=force_notify, use_email=use_email, use_sms=use_sms, **kwargs) # depends on [control=['if'], data=[]]
return notified |
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj | def function[create_datacenter, parameter[service_instance, datacenter_name]]:
constant[
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
]
variable[root_folder] assign[=] call[name[get_root_folder], parameter[name[service_instance]]]
call[name[log].trace, parameter[constant[Creating datacenter '%s'], name[datacenter_name]]]
<ast.Try object at 0x7da1b1c81d80>
return[name[dc_obj]] | keyword[def] identifier[create_datacenter] ( identifier[service_instance] , identifier[datacenter_name] ):
literal[string]
identifier[root_folder] = identifier[get_root_folder] ( identifier[service_instance] )
identifier[log] . identifier[trace] ( literal[string] , identifier[datacenter_name] )
keyword[try] :
identifier[dc_obj] = identifier[root_folder] . identifier[CreateDatacenter] ( identifier[datacenter_name] )
keyword[except] identifier[vim] . identifier[fault] . identifier[NoPermission] keyword[as] identifier[exc] :
identifier[log] . identifier[exception] ( identifier[exc] )
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[VMwareApiError] (
literal[string]
literal[string] . identifier[format] ( identifier[exc] . identifier[privilegeId] ))
keyword[except] identifier[vim] . identifier[fault] . identifier[VimFault] keyword[as] identifier[exc] :
identifier[log] . identifier[exception] ( identifier[exc] )
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[VMwareApiError] ( identifier[exc] . identifier[msg] )
keyword[except] identifier[vmodl] . identifier[RuntimeFault] keyword[as] identifier[exc] :
identifier[log] . identifier[exception] ( identifier[exc] )
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[VMwareRuntimeError] ( identifier[exc] . identifier[msg] )
keyword[return] identifier[dc_obj] | def create_datacenter(service_instance, datacenter_name):
"""
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
"""
root_folder = get_root_folder(service_instance)
log.trace("Creating datacenter '%s'", datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name) # depends on [control=['try'], data=[]]
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError('Not enough permissions. Required privilege: {}'.format(exc.privilegeId)) # depends on [control=['except'], data=['exc']]
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg) # depends on [control=['except'], data=['exc']]
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg) # depends on [control=['except'], data=['exc']]
return dc_obj |
def _check_json_data(self, json_data):
"""
Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request
"""
if not isinstance(json_data, dict):
raise BadRequestError('Request body should be a JSON hash')
if 'data' not in json_data.keys():
raise BadRequestError('Request should contain data key') | def function[_check_json_data, parameter[self, json_data]]:
constant[
Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request
]
if <ast.UnaryOp object at 0x7da1b0ed18d0> begin[:]
<ast.Raise object at 0x7da1b0ed20e0>
if compare[constant[data] <ast.NotIn object at 0x7da2590d7190> call[name[json_data].keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b0ed0a00> | keyword[def] identifier[_check_json_data] ( identifier[self] , identifier[json_data] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[json_data] , identifier[dict] ):
keyword[raise] identifier[BadRequestError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[json_data] . identifier[keys] ():
keyword[raise] identifier[BadRequestError] ( literal[string] ) | def _check_json_data(self, json_data):
"""
Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request
"""
if not isinstance(json_data, dict):
raise BadRequestError('Request body should be a JSON hash') # depends on [control=['if'], data=[]]
if 'data' not in json_data.keys():
raise BadRequestError('Request should contain data key') # depends on [control=['if'], data=[]] |
def translate_to_dbus_type(typeof, value):
"""
Helper function to map values from their native Python types
to Dbus types.
:param type typeof: Target for type conversion e.g., 'dbus.Dictionary'
:param value: Value to assign using type 'typeof'
:return: 'value' converted to type 'typeof'
:rtype: typeof
"""
if ((isinstance(value, types.UnicodeType) or
isinstance(value, str)) and typeof is not dbus.String):
# FIXME: This is potentially dangerous since it evaluates
# a string in-situ
return typeof(eval(value))
else:
return typeof(value) | def function[translate_to_dbus_type, parameter[typeof, value]]:
constant[
Helper function to map values from their native Python types
to Dbus types.
:param type typeof: Target for type conversion e.g., 'dbus.Dictionary'
:param value: Value to assign using type 'typeof'
:return: 'value' converted to type 'typeof'
:rtype: typeof
]
if <ast.BoolOp object at 0x7da1b26493f0> begin[:]
return[call[name[typeof], parameter[call[name[eval], parameter[name[value]]]]]] | keyword[def] identifier[translate_to_dbus_type] ( identifier[typeof] , identifier[value] ):
literal[string]
keyword[if] (( identifier[isinstance] ( identifier[value] , identifier[types] . identifier[UnicodeType] ) keyword[or]
identifier[isinstance] ( identifier[value] , identifier[str] )) keyword[and] identifier[typeof] keyword[is] keyword[not] identifier[dbus] . identifier[String] ):
keyword[return] identifier[typeof] ( identifier[eval] ( identifier[value] ))
keyword[else] :
keyword[return] identifier[typeof] ( identifier[value] ) | def translate_to_dbus_type(typeof, value):
"""
Helper function to map values from their native Python types
to Dbus types.
:param type typeof: Target for type conversion e.g., 'dbus.Dictionary'
:param value: Value to assign using type 'typeof'
:return: 'value' converted to type 'typeof'
:rtype: typeof
"""
if (isinstance(value, types.UnicodeType) or isinstance(value, str)) and typeof is not dbus.String:
# FIXME: This is potentially dangerous since it evaluates
# a string in-situ
return typeof(eval(value)) # depends on [control=['if'], data=[]]
else:
return typeof(value) |
def selected_subcategory(self):
"""Obtain the subcategory selected by user.
:returns: Metadata of the selected subcategory.
:rtype: dict, None
"""
item = self.lstSubcategories.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole))
except (AttributeError, NameError):
return None | def function[selected_subcategory, parameter[self]]:
constant[Obtain the subcategory selected by user.
:returns: Metadata of the selected subcategory.
:rtype: dict, None
]
variable[item] assign[=] call[name[self].lstSubcategories.currentItem, parameter[]]
<ast.Try object at 0x7da204344880> | keyword[def] identifier[selected_subcategory] ( identifier[self] ):
literal[string]
identifier[item] = identifier[self] . identifier[lstSubcategories] . identifier[currentItem] ()
keyword[try] :
keyword[return] identifier[definition] ( identifier[item] . identifier[data] ( identifier[QtCore] . identifier[Qt] . identifier[UserRole] ))
keyword[except] ( identifier[AttributeError] , identifier[NameError] ):
keyword[return] keyword[None] | def selected_subcategory(self):
"""Obtain the subcategory selected by user.
:returns: Metadata of the selected subcategory.
:rtype: dict, None
"""
item = self.lstSubcategories.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole)) # depends on [control=['try'], data=[]]
except (AttributeError, NameError):
return None # depends on [control=['except'], data=[]] |
def alert_policy_condition_path(cls, project, alert_policy, condition):
"""Return a fully-qualified alert_policy_condition string."""
return google.api_core.path_template.expand(
"projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}",
project=project,
alert_policy=alert_policy,
condition=condition,
) | def function[alert_policy_condition_path, parameter[cls, project, alert_policy, condition]]:
constant[Return a fully-qualified alert_policy_condition string.]
return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}]]]] | keyword[def] identifier[alert_policy_condition_path] ( identifier[cls] , identifier[project] , identifier[alert_policy] , identifier[condition] ):
literal[string]
keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] (
literal[string] ,
identifier[project] = identifier[project] ,
identifier[alert_policy] = identifier[alert_policy] ,
identifier[condition] = identifier[condition] ,
) | def alert_policy_condition_path(cls, project, alert_policy, condition):
"""Return a fully-qualified alert_policy_condition string."""
return google.api_core.path_template.expand('projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}', project=project, alert_policy=alert_policy, condition=condition) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.