code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def atomic_write(filename, content, overwrite=True, permissions=0o0644, encoding='utf-8'):
'''
Write a file atomically by writing the file content to a
temporary location first, then renaming the file.
TODO: this relies pretty heavily on os.rename to ensure atomicity, but
os.rename does not silently overwrite files that already exist on
Windows natively. For now, the functionality provided here can only be
supported under Windows Subsystem for Linux on Windows 10 version 1607
and later.
:param filename: Filename
:type filename: str
:param content: File content
:type content: str
:param overwrite: Overwrite
:type overwrite: bool
:param permissions: Octal permissions
:type permissions: octal
'''
filename = os.path.expanduser(filename)
if not overwrite and os.path.exists(filename):
raise WriteError('file already exists: {0}'.format(filename))
dirname = os.path.dirname(filename)
with tf.NamedTemporaryFile(dir=dirname, prefix='.', delete=False) as tmp:
if isinstance(content, six.string_types):
tmp.write(content.decode(encoding))
else:
tmp.write(content)
os.chmod(tmp.name, permissions)
os.rename(tmp.name, filename) | def function[atomic_write, parameter[filename, content, overwrite, permissions, encoding]]:
constant[
Write a file atomically by writing the file content to a
temporary location first, then renaming the file.
TODO: this relies pretty heavily on os.rename to ensure atomicity, but
os.rename does not silently overwrite files that already exist on
Windows natively. For now, the functionality provided here can only be
supported under Windows Subsystem for Linux on Windows 10 version 1607
and later.
:param filename: Filename
:type filename: str
:param content: File content
:type content: str
:param overwrite: Overwrite
:type overwrite: bool
:param permissions: Octal permissions
:type permissions: octal
]
variable[filename] assign[=] call[name[os].path.expanduser, parameter[name[filename]]]
if <ast.BoolOp object at 0x7da18fe90460> begin[:]
<ast.Raise object at 0x7da18fe92290>
variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[filename]]]
with call[name[tf].NamedTemporaryFile, parameter[]] begin[:]
if call[name[isinstance], parameter[name[content], name[six].string_types]] begin[:]
call[name[tmp].write, parameter[call[name[content].decode, parameter[name[encoding]]]]]
call[name[os].chmod, parameter[name[tmp].name, name[permissions]]]
call[name[os].rename, parameter[name[tmp].name, name[filename]]] | keyword[def] identifier[atomic_write] ( identifier[filename] , identifier[content] , identifier[overwrite] = keyword[True] , identifier[permissions] = literal[int] , identifier[encoding] = literal[string] ):
literal[string]
identifier[filename] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[filename] )
keyword[if] keyword[not] identifier[overwrite] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
keyword[raise] identifier[WriteError] ( literal[string] . identifier[format] ( identifier[filename] ))
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] )
keyword[with] identifier[tf] . identifier[NamedTemporaryFile] ( identifier[dir] = identifier[dirname] , identifier[prefix] = literal[string] , identifier[delete] = keyword[False] ) keyword[as] identifier[tmp] :
keyword[if] identifier[isinstance] ( identifier[content] , identifier[six] . identifier[string_types] ):
identifier[tmp] . identifier[write] ( identifier[content] . identifier[decode] ( identifier[encoding] ))
keyword[else] :
identifier[tmp] . identifier[write] ( identifier[content] )
identifier[os] . identifier[chmod] ( identifier[tmp] . identifier[name] , identifier[permissions] )
identifier[os] . identifier[rename] ( identifier[tmp] . identifier[name] , identifier[filename] ) | def atomic_write(filename, content, overwrite=True, permissions=420, encoding='utf-8'):
"""
Write a file atomically by writing the file content to a
temporary location first, then renaming the file.
TODO: this relies pretty heavily on os.rename to ensure atomicity, but
os.rename does not silently overwrite files that already exist on
Windows natively. For now, the functionality provided here can only be
supported under Windows Subsystem for Linux on Windows 10 version 1607
and later.
:param filename: Filename
:type filename: str
:param content: File content
:type content: str
:param overwrite: Overwrite
:type overwrite: bool
:param permissions: Octal permissions
:type permissions: octal
"""
filename = os.path.expanduser(filename)
if not overwrite and os.path.exists(filename):
raise WriteError('file already exists: {0}'.format(filename)) # depends on [control=['if'], data=[]]
dirname = os.path.dirname(filename)
with tf.NamedTemporaryFile(dir=dirname, prefix='.', delete=False) as tmp:
if isinstance(content, six.string_types):
tmp.write(content.decode(encoding)) # depends on [control=['if'], data=[]]
else:
tmp.write(content) # depends on [control=['with'], data=['tmp']]
os.chmod(tmp.name, permissions)
os.rename(tmp.name, filename) |
def atmos_worker(srcs, window, ij, args):
"""A simple atmospheric correction user function."""
src = srcs[0]
rgb = src.read(window=window)
rgb = to_math_type(rgb)
atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"])
# should be scaled 0 to 1, scale to outtype
return scale_dtype(atmos, args["out_dtype"]) | def function[atmos_worker, parameter[srcs, window, ij, args]]:
constant[A simple atmospheric correction user function.]
variable[src] assign[=] call[name[srcs]][constant[0]]
variable[rgb] assign[=] call[name[src].read, parameter[]]
variable[rgb] assign[=] call[name[to_math_type], parameter[name[rgb]]]
variable[atmos] assign[=] call[name[simple_atmo], parameter[name[rgb], call[name[args]][constant[atmo]], call[name[args]][constant[contrast]], call[name[args]][constant[bias]]]]
return[call[name[scale_dtype], parameter[name[atmos], call[name[args]][constant[out_dtype]]]]] | keyword[def] identifier[atmos_worker] ( identifier[srcs] , identifier[window] , identifier[ij] , identifier[args] ):
literal[string]
identifier[src] = identifier[srcs] [ literal[int] ]
identifier[rgb] = identifier[src] . identifier[read] ( identifier[window] = identifier[window] )
identifier[rgb] = identifier[to_math_type] ( identifier[rgb] )
identifier[atmos] = identifier[simple_atmo] ( identifier[rgb] , identifier[args] [ literal[string] ], identifier[args] [ literal[string] ], identifier[args] [ literal[string] ])
keyword[return] identifier[scale_dtype] ( identifier[atmos] , identifier[args] [ literal[string] ]) | def atmos_worker(srcs, window, ij, args):
"""A simple atmospheric correction user function."""
src = srcs[0]
rgb = src.read(window=window)
rgb = to_math_type(rgb)
atmos = simple_atmo(rgb, args['atmo'], args['contrast'], args['bias'])
# should be scaled 0 to 1, scale to outtype
return scale_dtype(atmos, args['out_dtype']) |
def from_file_path(cls, path_prefix):
"""Load the embedding matrix and the vocab from <path_prefix>.npy and <path_prefix>.vocab.
:param (str) path_prefix: path prefix of the saved files
"""
with cls._path_prefix_to_files(path_prefix, 'r') as (array_file, vocab_file):
return cls.from_files(array_file, vocab_file) | def function[from_file_path, parameter[cls, path_prefix]]:
constant[Load the embedding matrix and the vocab from <path_prefix>.npy and <path_prefix>.vocab.
:param (str) path_prefix: path prefix of the saved files
]
with call[name[cls]._path_prefix_to_files, parameter[name[path_prefix], constant[r]]] begin[:]
return[call[name[cls].from_files, parameter[name[array_file], name[vocab_file]]]] | keyword[def] identifier[from_file_path] ( identifier[cls] , identifier[path_prefix] ):
literal[string]
keyword[with] identifier[cls] . identifier[_path_prefix_to_files] ( identifier[path_prefix] , literal[string] ) keyword[as] ( identifier[array_file] , identifier[vocab_file] ):
keyword[return] identifier[cls] . identifier[from_files] ( identifier[array_file] , identifier[vocab_file] ) | def from_file_path(cls, path_prefix):
"""Load the embedding matrix and the vocab from <path_prefix>.npy and <path_prefix>.vocab.
:param (str) path_prefix: path prefix of the saved files
"""
with cls._path_prefix_to_files(path_prefix, 'r') as (array_file, vocab_file):
return cls.from_files(array_file, vocab_file) # depends on [control=['with'], data=[]] |
def field_value(self, value):
"""Validate against NodeType.
"""
if not self.is_array:
return self.field_type(value)
if isinstance(value, (list, tuple, set)):
return [self.field_type(item) for item in value]
return self.field_type(value) | def function[field_value, parameter[self, value]]:
constant[Validate against NodeType.
]
if <ast.UnaryOp object at 0x7da18fe90100> begin[:]
return[call[name[self].field_type, parameter[name[value]]]]
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da18fe93370>, <ast.Name object at 0x7da18fe91cf0>, <ast.Name object at 0x7da18fe907c0>]]]] begin[:]
return[<ast.ListComp object at 0x7da18fe92e30>]
return[call[name[self].field_type, parameter[name[value]]]] | keyword[def] identifier[field_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_array] :
keyword[return] identifier[self] . identifier[field_type] ( identifier[value] )
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] , identifier[set] )):
keyword[return] [ identifier[self] . identifier[field_type] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[value] ]
keyword[return] identifier[self] . identifier[field_type] ( identifier[value] ) | def field_value(self, value):
"""Validate against NodeType.
"""
if not self.is_array:
return self.field_type(value) # depends on [control=['if'], data=[]]
if isinstance(value, (list, tuple, set)):
return [self.field_type(item) for item in value] # depends on [control=['if'], data=[]]
return self.field_type(value) |
def check_crtf(self):
"""
Checks for CRTF compatibility.
"""
if self.region_type not in regions_attributes:
raise ValueError("'{0}' is not a valid region type in this package"
"supported by CRTF".format(self.region_type))
if self.coordsys not in valid_coordsys['CRTF']:
raise ValueError("'{0}' is not a valid coordinate reference frame in "
"astropy supported by CRTF".format(self.coordsys)) | def function[check_crtf, parameter[self]]:
constant[
Checks for CRTF compatibility.
]
if compare[name[self].region_type <ast.NotIn object at 0x7da2590d7190> name[regions_attributes]] begin[:]
<ast.Raise object at 0x7da207f9ba30>
if compare[name[self].coordsys <ast.NotIn object at 0x7da2590d7190> call[name[valid_coordsys]][constant[CRTF]]] begin[:]
<ast.Raise object at 0x7da2047eba30> | keyword[def] identifier[check_crtf] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[region_type] keyword[not] keyword[in] identifier[regions_attributes] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[region_type] ))
keyword[if] identifier[self] . identifier[coordsys] keyword[not] keyword[in] identifier[valid_coordsys] [ literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[coordsys] )) | def check_crtf(self):
"""
Checks for CRTF compatibility.
"""
if self.region_type not in regions_attributes:
raise ValueError("'{0}' is not a valid region type in this packagesupported by CRTF".format(self.region_type)) # depends on [control=['if'], data=[]]
if self.coordsys not in valid_coordsys['CRTF']:
raise ValueError("'{0}' is not a valid coordinate reference frame in astropy supported by CRTF".format(self.coordsys)) # depends on [control=['if'], data=[]] |
def GetK2Campaign(campaign, clobber=False, split=False,
epics_only=False, cadence='lc'):
'''
Return all stars in a given *K2* campaign.
:param campaign: The *K2* campaign number. If this is an :py:class:`int`, \
returns all targets in that campaign. If a :py:class:`float` in \
the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \
:py:obj:`X`.
:param bool clobber: If :py:obj:`True`, download and overwrite existing \
files. Default :py:obj:`False`
:param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \
:py:class:`int`, returns each of the subcampaigns as a separate \
list. Default :py:obj:`False`
:param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \
If :py:obj:`False`, returns metadata associated with each target. \
Default :py:obj:`False`
:param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \
Default :py:obj:`lc`.
'''
all = GetK2Stars(clobber=clobber)
if int(campaign) in all.keys():
all = all[int(campaign)]
else:
return []
if cadence == 'sc':
all = [a for a in all if a[3]]
if epics_only:
all = [a[0] for a in all]
if type(campaign) is int or type(campaign) is np.int64:
if not split:
return all
else:
all_split = list(Chunks(all, len(all) // 10))
# HACK: Sometimes we're left with a few targets
# dangling at the end. Insert them back evenly
# into the first few subcampaigns.
if len(all_split) > 10:
tmp1 = all_split[:10]
tmp2 = all_split[10:]
for n in range(len(tmp2)):
tmp1[n] = np.append(tmp1[n], tmp2[n])
all_split = tmp1
res = []
for subcampaign in range(10):
res.append(all_split[subcampaign])
return res
elif type(campaign) is float:
x, y = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
return list(Chunks(all, len(all) // 10))[subcampaign]
else:
raise Exception('Argument `subcampaign` must be an `int` ' +
'or a `float` in the form `X.Y`') | def function[GetK2Campaign, parameter[campaign, clobber, split, epics_only, cadence]]:
constant[
Return all stars in a given *K2* campaign.
:param campaign: The *K2* campaign number. If this is an :py:class:`int`, returns all targets in that campaign. If a :py:class:`float` in the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign :py:obj:`X`.
:param bool clobber: If :py:obj:`True`, download and overwrite existing files. Default :py:obj:`False`
:param bool split: If :py:obj:`True` and :py:obj:`campaign` is an :py:class:`int`, returns each of the subcampaigns as a separate list. Default :py:obj:`False`
:param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. If :py:obj:`False`, returns metadata associated with each target. Default :py:obj:`False`
:param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? Default :py:obj:`lc`.
]
variable[all] assign[=] call[name[GetK2Stars], parameter[]]
if compare[call[name[int], parameter[name[campaign]]] in call[name[all].keys, parameter[]]] begin[:]
variable[all] assign[=] call[name[all]][call[name[int], parameter[name[campaign]]]]
if compare[name[cadence] equal[==] constant[sc]] begin[:]
variable[all] assign[=] <ast.ListComp object at 0x7da1b0e6f760>
if name[epics_only] begin[:]
variable[all] assign[=] <ast.ListComp object at 0x7da1b0eae4a0>
if <ast.BoolOp object at 0x7da1b0eae5f0> begin[:]
if <ast.UnaryOp object at 0x7da1b0eae4d0> begin[:]
return[name[all]] | keyword[def] identifier[GetK2Campaign] ( identifier[campaign] , identifier[clobber] = keyword[False] , identifier[split] = keyword[False] ,
identifier[epics_only] = keyword[False] , identifier[cadence] = literal[string] ):
literal[string]
identifier[all] = identifier[GetK2Stars] ( identifier[clobber] = identifier[clobber] )
keyword[if] identifier[int] ( identifier[campaign] ) keyword[in] identifier[all] . identifier[keys] ():
identifier[all] = identifier[all] [ identifier[int] ( identifier[campaign] )]
keyword[else] :
keyword[return] []
keyword[if] identifier[cadence] == literal[string] :
identifier[all] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[all] keyword[if] identifier[a] [ literal[int] ]]
keyword[if] identifier[epics_only] :
identifier[all] =[ identifier[a] [ literal[int] ] keyword[for] identifier[a] keyword[in] identifier[all] ]
keyword[if] identifier[type] ( identifier[campaign] ) keyword[is] identifier[int] keyword[or] identifier[type] ( identifier[campaign] ) keyword[is] identifier[np] . identifier[int64] :
keyword[if] keyword[not] identifier[split] :
keyword[return] identifier[all]
keyword[else] :
identifier[all_split] = identifier[list] ( identifier[Chunks] ( identifier[all] , identifier[len] ( identifier[all] )// literal[int] ))
keyword[if] identifier[len] ( identifier[all_split] )> literal[int] :
identifier[tmp1] = identifier[all_split] [: literal[int] ]
identifier[tmp2] = identifier[all_split] [ literal[int] :]
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[len] ( identifier[tmp2] )):
identifier[tmp1] [ identifier[n] ]= identifier[np] . identifier[append] ( identifier[tmp1] [ identifier[n] ], identifier[tmp2] [ identifier[n] ])
identifier[all_split] = identifier[tmp1]
identifier[res] =[]
keyword[for] identifier[subcampaign] keyword[in] identifier[range] ( literal[int] ):
identifier[res] . identifier[append] ( identifier[all_split] [ identifier[subcampaign] ])
keyword[return] identifier[res]
keyword[elif] identifier[type] ( identifier[campaign] ) keyword[is] identifier[float] :
identifier[x] , identifier[y] = identifier[divmod] ( identifier[campaign] , literal[int] )
identifier[campaign] = identifier[int] ( identifier[x] )
identifier[subcampaign] = identifier[round] ( identifier[y] * literal[int] )
keyword[return] identifier[list] ( identifier[Chunks] ( identifier[all] , identifier[len] ( identifier[all] )// literal[int] ))[ identifier[subcampaign] ]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] ) | def GetK2Campaign(campaign, clobber=False, split=False, epics_only=False, cadence='lc'):
"""
Return all stars in a given *K2* campaign.
:param campaign: The *K2* campaign number. If this is an :py:class:`int`, returns all targets in that campaign. If a :py:class:`float` in the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign :py:obj:`X`.
:param bool clobber: If :py:obj:`True`, download and overwrite existing files. Default :py:obj:`False`
:param bool split: If :py:obj:`True` and :py:obj:`campaign` is an :py:class:`int`, returns each of the subcampaigns as a separate list. Default :py:obj:`False`
:param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. If :py:obj:`False`, returns metadata associated with each target. Default :py:obj:`False`
:param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? Default :py:obj:`lc`.
"""
all = GetK2Stars(clobber=clobber)
if int(campaign) in all.keys():
all = all[int(campaign)] # depends on [control=['if'], data=[]]
else:
return []
if cadence == 'sc':
all = [a for a in all if a[3]] # depends on [control=['if'], data=[]]
if epics_only:
all = [a[0] for a in all] # depends on [control=['if'], data=[]]
if type(campaign) is int or type(campaign) is np.int64:
if not split:
return all # depends on [control=['if'], data=[]]
else:
all_split = list(Chunks(all, len(all) // 10))
# HACK: Sometimes we're left with a few targets
# dangling at the end. Insert them back evenly
# into the first few subcampaigns.
if len(all_split) > 10:
tmp1 = all_split[:10]
tmp2 = all_split[10:]
for n in range(len(tmp2)):
tmp1[n] = np.append(tmp1[n], tmp2[n]) # depends on [control=['for'], data=['n']]
all_split = tmp1 # depends on [control=['if'], data=[]]
res = []
for subcampaign in range(10):
res.append(all_split[subcampaign]) # depends on [control=['for'], data=['subcampaign']]
return res # depends on [control=['if'], data=[]]
elif type(campaign) is float:
(x, y) = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
return list(Chunks(all, len(all) // 10))[subcampaign] # depends on [control=['if'], data=[]]
else:
raise Exception('Argument `subcampaign` must be an `int` ' + 'or a `float` in the form `X.Y`') |
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results | def function[_search, parameter[self]]:
constant[Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
]
variable[results] assign[=] list[[]]
for taget[name[_id]] in starred[name[self].doc_dict] begin[:]
variable[entry] assign[=] call[name[self].doc_dict][name[_id]]
if compare[name[entry].doc is_not constant[None]] begin[:]
call[name[results].append, parameter[name[entry].merged_dict]]
return[name[results]] | keyword[def] identifier[_search] ( identifier[self] ):
literal[string]
identifier[results] =[]
keyword[for] identifier[_id] keyword[in] identifier[self] . identifier[doc_dict] :
identifier[entry] = identifier[self] . identifier[doc_dict] [ identifier[_id] ]
keyword[if] identifier[entry] . identifier[doc] keyword[is] keyword[not] keyword[None] :
identifier[results] . identifier[append] ( identifier[entry] . identifier[merged_dict] )
keyword[return] identifier[results] | def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_id']]
return results |
def add_child(self, **kwargs):
"""Adds a child to the node."""
cls = get_result_class(self.__class__)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if newobj.pk:
raise NodeAlreadySaved("Attempted to add a tree node that is "\
"already in the database")
else:
newobj = cls(**kwargs)
try:
newobj._cached_depth = self._cached_depth + 1
except AttributeError:
pass
if not cls.node_order_by:
try:
max = cls.objects.filter(parent=self).reverse(
)[0].sib_order
except IndexError:
max = 0
newobj.sib_order = max + 1
newobj.parent = self
newobj.save()
return newobj | def function[add_child, parameter[self]]:
constant[Adds a child to the node.]
variable[cls] assign[=] call[name[get_result_class], parameter[name[self].__class__]]
if <ast.BoolOp object at 0x7da20c794f10> begin[:]
variable[newobj] assign[=] call[name[kwargs]][constant[instance]]
if name[newobj].pk begin[:]
<ast.Raise object at 0x7da20c794a00>
<ast.Try object at 0x7da20c796680>
if <ast.UnaryOp object at 0x7da20c796350> begin[:]
<ast.Try object at 0x7da20c794bb0>
name[newobj].sib_order assign[=] binary_operation[name[max] + constant[1]]
name[newobj].parent assign[=] name[self]
call[name[newobj].save, parameter[]]
return[name[newobj]] | keyword[def] identifier[add_child] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[cls] = identifier[get_result_class] ( identifier[self] . identifier[__class__] )
keyword[if] identifier[len] ( identifier[kwargs] )== literal[int] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[newobj] = identifier[kwargs] [ literal[string] ]
keyword[if] identifier[newobj] . identifier[pk] :
keyword[raise] identifier[NodeAlreadySaved] ( literal[string] literal[string] )
keyword[else] :
identifier[newobj] = identifier[cls] (** identifier[kwargs] )
keyword[try] :
identifier[newobj] . identifier[_cached_depth] = identifier[self] . identifier[_cached_depth] + literal[int]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] keyword[not] identifier[cls] . identifier[node_order_by] :
keyword[try] :
identifier[max] = identifier[cls] . identifier[objects] . identifier[filter] ( identifier[parent] = identifier[self] ). identifier[reverse] (
)[ literal[int] ]. identifier[sib_order]
keyword[except] identifier[IndexError] :
identifier[max] = literal[int]
identifier[newobj] . identifier[sib_order] = identifier[max] + literal[int]
identifier[newobj] . identifier[parent] = identifier[self]
identifier[newobj] . identifier[save] ()
keyword[return] identifier[newobj] | def add_child(self, **kwargs):
"""Adds a child to the node."""
cls = get_result_class(self.__class__)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if newobj.pk:
raise NodeAlreadySaved('Attempted to add a tree node that is already in the database') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
newobj = cls(**kwargs)
try:
newobj._cached_depth = self._cached_depth + 1 # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
if not cls.node_order_by:
try:
max = cls.objects.filter(parent=self).reverse()[0].sib_order # depends on [control=['try'], data=[]]
except IndexError:
max = 0 # depends on [control=['except'], data=[]]
newobj.sib_order = max + 1 # depends on [control=['if'], data=[]]
newobj.parent = self
newobj.save()
return newobj |
def search_certificate(self, hash):
"""
Searches for a specific certificate using its hash
:param hash: certificate hash
:type hash: str
:return: dict
"""
c = CensysCertificates(api_id=self.__uid, api_secret=self.__api_key)
return c.view(hash) | def function[search_certificate, parameter[self, hash]]:
constant[
Searches for a specific certificate using its hash
:param hash: certificate hash
:type hash: str
:return: dict
]
variable[c] assign[=] call[name[CensysCertificates], parameter[]]
return[call[name[c].view, parameter[name[hash]]]] | keyword[def] identifier[search_certificate] ( identifier[self] , identifier[hash] ):
literal[string]
identifier[c] = identifier[CensysCertificates] ( identifier[api_id] = identifier[self] . identifier[__uid] , identifier[api_secret] = identifier[self] . identifier[__api_key] )
keyword[return] identifier[c] . identifier[view] ( identifier[hash] ) | def search_certificate(self, hash):
"""
Searches for a specific certificate using its hash
:param hash: certificate hash
:type hash: str
:return: dict
"""
c = CensysCertificates(api_id=self.__uid, api_secret=self.__api_key)
return c.view(hash) |
def _compile_lock(self, query, value):
"""
Compile the lock into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param value: The lock value
:type value: bool or str
:return: The compiled lock
:rtype: str
"""
if isinstance(value, basestring):
return value
if value is True:
return 'FOR UPDATE'
elif value is False:
return 'LOCK IN SHARE MODE' | def function[_compile_lock, parameter[self, query, value]]:
constant[
Compile the lock into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param value: The lock value
:type value: bool or str
:return: The compiled lock
:rtype: str
]
if call[name[isinstance], parameter[name[value], name[basestring]]] begin[:]
return[name[value]]
if compare[name[value] is constant[True]] begin[:]
return[constant[FOR UPDATE]] | keyword[def] identifier[_compile_lock] ( identifier[self] , identifier[query] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[basestring] ):
keyword[return] identifier[value]
keyword[if] identifier[value] keyword[is] keyword[True] :
keyword[return] literal[string]
keyword[elif] identifier[value] keyword[is] keyword[False] :
keyword[return] literal[string] | def _compile_lock(self, query, value):
"""
Compile the lock into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param value: The lock value
:type value: bool or str
:return: The compiled lock
:rtype: str
"""
if isinstance(value, basestring):
return value # depends on [control=['if'], data=[]]
if value is True:
return 'FOR UPDATE' # depends on [control=['if'], data=[]]
elif value is False:
return 'LOCK IN SHARE MODE' # depends on [control=['if'], data=[]] |
def get_sources(zone, permanent=True):
'''
List sources bound to a zone
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_sources zone
'''
cmd = '--zone={0} --list-sources'.format(zone)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split() | def function[get_sources, parameter[zone, permanent]]:
constant[
List sources bound to a zone
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_sources zone
]
variable[cmd] assign[=] call[constant[--zone={0} --list-sources].format, parameter[name[zone]]]
if name[permanent] begin[:]
<ast.AugAssign object at 0x7da2054a6500>
return[call[call[name[__firewall_cmd], parameter[name[cmd]]].split, parameter[]]] | keyword[def] identifier[get_sources] ( identifier[zone] , identifier[permanent] = keyword[True] ):
literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[zone] )
keyword[if] identifier[permanent] :
identifier[cmd] += literal[string]
keyword[return] identifier[__firewall_cmd] ( identifier[cmd] ). identifier[split] () | def get_sources(zone, permanent=True):
"""
List sources bound to a zone
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_sources zone
"""
cmd = '--zone={0} --list-sources'.format(zone)
if permanent:
cmd += ' --permanent' # depends on [control=['if'], data=[]]
return __firewall_cmd(cmd).split() |
def sort_stats(stats,
sortedby='cpu_percent',
sortedby_secondary='memory_percent',
reverse=True):
"""Return the stats (dict) sorted by (sortedby).
Reverse the sort if reverse is True.
"""
if sortedby is None and sortedby_secondary is None:
# No need to sort...
return stats
# Check if a specific sort should be done
sort_lambda = _sort_lambda(sortedby=sortedby,
sortedby_secondary=sortedby_secondary)
if sort_lambda is not None:
# Specific sort
try:
stats.sort(key=sort_lambda, reverse=reverse)
except Exception:
# If an error is detected, fallback to cpu_percent
stats.sort(key=lambda process: (weighted(process['cpu_percent']),
weighted(process[sortedby_secondary])),
reverse=reverse)
else:
# Standard sort
try:
stats.sort(key=lambda process: (weighted(process[sortedby]),
weighted(process[sortedby_secondary])),
reverse=reverse)
except (KeyError, TypeError):
# Fallback to name
stats.sort(key=lambda process: process['name'] if process['name'] is not None else '~',
reverse=False)
return stats | def function[sort_stats, parameter[stats, sortedby, sortedby_secondary, reverse]]:
constant[Return the stats (dict) sorted by (sortedby).
Reverse the sort if reverse is True.
]
if <ast.BoolOp object at 0x7da18bc72620> begin[:]
return[name[stats]]
variable[sort_lambda] assign[=] call[name[_sort_lambda], parameter[]]
if compare[name[sort_lambda] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b21e1390>
return[name[stats]] | keyword[def] identifier[sort_stats] ( identifier[stats] ,
identifier[sortedby] = literal[string] ,
identifier[sortedby_secondary] = literal[string] ,
identifier[reverse] = keyword[True] ):
literal[string]
keyword[if] identifier[sortedby] keyword[is] keyword[None] keyword[and] identifier[sortedby_secondary] keyword[is] keyword[None] :
keyword[return] identifier[stats]
identifier[sort_lambda] = identifier[_sort_lambda] ( identifier[sortedby] = identifier[sortedby] ,
identifier[sortedby_secondary] = identifier[sortedby_secondary] )
keyword[if] identifier[sort_lambda] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[stats] . identifier[sort] ( identifier[key] = identifier[sort_lambda] , identifier[reverse] = identifier[reverse] )
keyword[except] identifier[Exception] :
identifier[stats] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[process] :( identifier[weighted] ( identifier[process] [ literal[string] ]),
identifier[weighted] ( identifier[process] [ identifier[sortedby_secondary] ])),
identifier[reverse] = identifier[reverse] )
keyword[else] :
keyword[try] :
identifier[stats] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[process] :( identifier[weighted] ( identifier[process] [ identifier[sortedby] ]),
identifier[weighted] ( identifier[process] [ identifier[sortedby_secondary] ])),
identifier[reverse] = identifier[reverse] )
keyword[except] ( identifier[KeyError] , identifier[TypeError] ):
identifier[stats] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[process] : identifier[process] [ literal[string] ] keyword[if] identifier[process] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] ,
identifier[reverse] = keyword[False] )
keyword[return] identifier[stats] | def sort_stats(stats, sortedby='cpu_percent', sortedby_secondary='memory_percent', reverse=True):
"""Return the stats (dict) sorted by (sortedby).
Reverse the sort if reverse is True.
"""
if sortedby is None and sortedby_secondary is None:
# No need to sort...
return stats # depends on [control=['if'], data=[]]
# Check if a specific sort should be done
sort_lambda = _sort_lambda(sortedby=sortedby, sortedby_secondary=sortedby_secondary)
if sort_lambda is not None:
# Specific sort
try:
stats.sort(key=sort_lambda, reverse=reverse) # depends on [control=['try'], data=[]]
except Exception:
# If an error is detected, fallback to cpu_percent
stats.sort(key=lambda process: (weighted(process['cpu_percent']), weighted(process[sortedby_secondary])), reverse=reverse) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['sort_lambda']]
else:
# Standard sort
try:
stats.sort(key=lambda process: (weighted(process[sortedby]), weighted(process[sortedby_secondary])), reverse=reverse) # depends on [control=['try'], data=[]]
except (KeyError, TypeError):
# Fallback to name
stats.sort(key=lambda process: process['name'] if process['name'] is not None else '~', reverse=False) # depends on [control=['except'], data=[]]
return stats |
def concatenate(arrs, axis=0):
r"""Concatenate multiple values into a new unitized object.
This is essentially a unit-aware version of `numpy.concatenate`. All items
must be able to be converted to the same units. If an item has no units, it will be given
those of the rest of the collection, without conversion. The first units found in the
arguments is used as the final output units.
Parameters
----------
arrs : Sequence of arrays
The items to be joined together
axis : integer, optional
The array axis along which to join the arrays. Defaults to 0 (the first dimension)
Returns
-------
`pint.Quantity`
New container with the value passed in and units corresponding to the first item.
"""
dest = 'dimensionless'
for a in arrs:
if hasattr(a, 'units'):
dest = a.units
break
data = []
for a in arrs:
if hasattr(a, 'to'):
a = a.to(dest).magnitude
data.append(np.atleast_1d(a))
# Use masked array concatenate to ensure masks are preserved, but convert to an
# array if there are no masked values.
data = np.ma.concatenate(data, axis=axis)
if not np.any(data.mask):
data = np.asarray(data)
return units.Quantity(data, dest) | def function[concatenate, parameter[arrs, axis]]:
constant[Concatenate multiple values into a new unitized object.
This is essentially a unit-aware version of `numpy.concatenate`. All items
must be able to be converted to the same units. If an item has no units, it will be given
those of the rest of the collection, without conversion. The first units found in the
arguments is used as the final output units.
Parameters
----------
arrs : Sequence of arrays
The items to be joined together
axis : integer, optional
The array axis along which to join the arrays. Defaults to 0 (the first dimension)
Returns
-------
`pint.Quantity`
New container with the value passed in and units corresponding to the first item.
]
variable[dest] assign[=] constant[dimensionless]
for taget[name[a]] in starred[name[arrs]] begin[:]
if call[name[hasattr], parameter[name[a], constant[units]]] begin[:]
variable[dest] assign[=] name[a].units
break
variable[data] assign[=] list[[]]
for taget[name[a]] in starred[name[arrs]] begin[:]
if call[name[hasattr], parameter[name[a], constant[to]]] begin[:]
variable[a] assign[=] call[name[a].to, parameter[name[dest]]].magnitude
call[name[data].append, parameter[call[name[np].atleast_1d, parameter[name[a]]]]]
variable[data] assign[=] call[name[np].ma.concatenate, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da1b22ba4a0> begin[:]
variable[data] assign[=] call[name[np].asarray, parameter[name[data]]]
return[call[name[units].Quantity, parameter[name[data], name[dest]]]] | keyword[def] identifier[concatenate] ( identifier[arrs] , identifier[axis] = literal[int] ):
literal[string]
identifier[dest] = literal[string]
keyword[for] identifier[a] keyword[in] identifier[arrs] :
keyword[if] identifier[hasattr] ( identifier[a] , literal[string] ):
identifier[dest] = identifier[a] . identifier[units]
keyword[break]
identifier[data] =[]
keyword[for] identifier[a] keyword[in] identifier[arrs] :
keyword[if] identifier[hasattr] ( identifier[a] , literal[string] ):
identifier[a] = identifier[a] . identifier[to] ( identifier[dest] ). identifier[magnitude]
identifier[data] . identifier[append] ( identifier[np] . identifier[atleast_1d] ( identifier[a] ))
identifier[data] = identifier[np] . identifier[ma] . identifier[concatenate] ( identifier[data] , identifier[axis] = identifier[axis] )
keyword[if] keyword[not] identifier[np] . identifier[any] ( identifier[data] . identifier[mask] ):
identifier[data] = identifier[np] . identifier[asarray] ( identifier[data] )
keyword[return] identifier[units] . identifier[Quantity] ( identifier[data] , identifier[dest] ) | def concatenate(arrs, axis=0):
"""Concatenate multiple values into a new unitized object.
This is essentially a unit-aware version of `numpy.concatenate`. All items
must be able to be converted to the same units. If an item has no units, it will be given
those of the rest of the collection, without conversion. The first units found in the
arguments is used as the final output units.
Parameters
----------
arrs : Sequence of arrays
The items to be joined together
axis : integer, optional
The array axis along which to join the arrays. Defaults to 0 (the first dimension)
Returns
-------
`pint.Quantity`
New container with the value passed in and units corresponding to the first item.
"""
dest = 'dimensionless'
for a in arrs:
if hasattr(a, 'units'):
dest = a.units
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
data = []
for a in arrs:
if hasattr(a, 'to'):
a = a.to(dest).magnitude # depends on [control=['if'], data=[]]
data.append(np.atleast_1d(a)) # depends on [control=['for'], data=['a']]
# Use masked array concatenate to ensure masks are preserved, but convert to an
# array if there are no masked values.
data = np.ma.concatenate(data, axis=axis)
if not np.any(data.mask):
data = np.asarray(data) # depends on [control=['if'], data=[]]
return units.Quantity(data, dest) |
def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec | def function[from_file, parameter[filename]]:
constant[Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
]
variable[spec] assign[=] call[name[Spec], parameter[]]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
variable[parse_context] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c9fc0>], [<ast.Constant object at 0x7da20c7ca290>]]
for taget[name[line]] in starred[name[f]] begin[:]
<ast.Tuple object at 0x7da20c7cb400> assign[=] call[name[_parse], parameter[name[spec], name[parse_context], name[line]]]
return[name[spec]] | keyword[def] identifier[from_file] ( identifier[filename] ):
literal[string]
identifier[spec] = identifier[Spec] ()
keyword[with] identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[parse_context] ={ literal[string] : keyword[None] }
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[spec] , identifier[parse_context] = identifier[_parse] ( identifier[spec] , identifier[parse_context] , identifier[line] )
keyword[return] identifier[spec] | def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, 'r', encoding='utf-8') as f:
parse_context = {'current_subpackage': None}
for line in f:
(spec, parse_context) = _parse(spec, parse_context, line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
return spec |
def all(self):
"""
Returns all the values joined together.
:return <int>
"""
out = 0
for key, value in self.items():
out |= value
return out | def function[all, parameter[self]]:
constant[
Returns all the values joined together.
:return <int>
]
variable[out] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18f00d9f0>, <ast.Name object at 0x7da18f00ece0>]]] in starred[call[name[self].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da18f00c730>
return[name[out]] | keyword[def] identifier[all] ( identifier[self] ):
literal[string]
identifier[out] = literal[int]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[items] ():
identifier[out] |= identifier[value]
keyword[return] identifier[out] | def all(self):
"""
Returns all the values joined together.
:return <int>
"""
out = 0
for (key, value) in self.items():
out |= value # depends on [control=['for'], data=[]]
return out |
def delete_address_by_id(cls, address_id, **kwargs):
"""Delete Address
Delete an instance of Address by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_address_by_id(address_id, async=True)
>>> result = thread.get()
:param async bool
:param str address_id: ID of address to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_address_by_id_with_http_info(address_id, **kwargs)
else:
(data) = cls._delete_address_by_id_with_http_info(address_id, **kwargs)
return data | def function[delete_address_by_id, parameter[cls, address_id]]:
constant[Delete Address
Delete an instance of Address by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_address_by_id(address_id, async=True)
>>> result = thread.get()
:param async bool
:param str address_id: ID of address to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._delete_address_by_id_with_http_info, parameter[name[address_id]]]] | keyword[def] identifier[delete_address_by_id] ( identifier[cls] , identifier[address_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_delete_address_by_id_with_http_info] ( identifier[address_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_delete_address_by_id_with_http_info] ( identifier[address_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_address_by_id(cls, address_id, **kwargs):
"""Delete Address
Delete an instance of Address by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_address_by_id(address_id, async=True)
>>> result = thread.get()
:param async bool
:param str address_id: ID of address to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_address_by_id_with_http_info(address_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._delete_address_by_id_with_http_info(address_id, **kwargs)
return data |
def set_result_type(self, result_type):
""" Sets 'result_type' parameter to specify what type of search \
results you would prefer to receive. The current default is “mixed.” \
Valid values include: \
- mixed: Include both popular and real time results \
- recent: return only the most recent results \
- popular: return only the most popular results \
:param result_type: A string containing one of \
the three valid result types
:raises: TwitterSearchException
"""
result_type = result_type.lower()
if result_type in ['mixed', 'recent', 'popular']:
self.arguments.update({'result_type': '%s' % result_type})
else:
raise TwitterSearchException(1003) | def function[set_result_type, parameter[self, result_type]]:
constant[ Sets 'result_type' parameter to specify what type of search results you would prefer to receive. The current default is “mixed.” Valid values include: - mixed: Include both popular and real time results - recent: return only the most recent results - popular: return only the most popular results
:param result_type: A string containing one of the three valid result types
:raises: TwitterSearchException
]
variable[result_type] assign[=] call[name[result_type].lower, parameter[]]
if compare[name[result_type] in list[[<ast.Constant object at 0x7da1b15286a0>, <ast.Constant object at 0x7da1b1529000>, <ast.Constant object at 0x7da1b1529f60>]]] begin[:]
call[name[self].arguments.update, parameter[dictionary[[<ast.Constant object at 0x7da1b15296f0>], [<ast.BinOp object at 0x7da1b152a7a0>]]]] | keyword[def] identifier[set_result_type] ( identifier[self] , identifier[result_type] ):
literal[string]
identifier[result_type] = identifier[result_type] . identifier[lower] ()
keyword[if] identifier[result_type] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[self] . identifier[arguments] . identifier[update] ({ literal[string] : literal[string] % identifier[result_type] })
keyword[else] :
keyword[raise] identifier[TwitterSearchException] ( literal[int] ) | def set_result_type(self, result_type):
""" Sets 'result_type' parameter to specify what type of search results you would prefer to receive. The current default is “mixed.” Valid values include: - mixed: Include both popular and real time results - recent: return only the most recent results - popular: return only the most popular results
:param result_type: A string containing one of the three valid result types
:raises: TwitterSearchException
"""
result_type = result_type.lower()
if result_type in ['mixed', 'recent', 'popular']:
self.arguments.update({'result_type': '%s' % result_type}) # depends on [control=['if'], data=['result_type']]
else:
raise TwitterSearchException(1003) |
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
np.negative(q[1:], q[1:])
return q | def function[quaternion_conjugate, parameter[quaternion]]:
constant[Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
]
variable[q] assign[=] call[name[np].array, parameter[name[quaternion]]]
call[name[np].negative, parameter[call[name[q]][<ast.Slice object at 0x7da2054a4eb0>], call[name[q]][<ast.Slice object at 0x7da2054a6ec0>]]]
return[name[q]] | keyword[def] identifier[quaternion_conjugate] ( identifier[quaternion] ):
literal[string]
identifier[q] = identifier[np] . identifier[array] ( identifier[quaternion] , identifier[dtype] = identifier[np] . identifier[float64] , identifier[copy] = keyword[True] )
identifier[np] . identifier[negative] ( identifier[q] [ literal[int] :], identifier[q] [ literal[int] :])
keyword[return] identifier[q] | def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
np.negative(q[1:], q[1:])
return q |
def get_headers(data, extra_headers=None):
'''
Takes the response data as well as any additional headers and returns a
tuple of tuples of headers suitable for passing to start_response()
'''
response_headers = {
'Content-Length': str(len(data)),
}
if extra_headers:
response_headers.update(extra_headers)
return list(response_headers.items()) | def function[get_headers, parameter[data, extra_headers]]:
constant[
Takes the response data as well as any additional headers and returns a
tuple of tuples of headers suitable for passing to start_response()
]
variable[response_headers] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57580>], [<ast.Call object at 0x7da18eb55d20>]]
if name[extra_headers] begin[:]
call[name[response_headers].update, parameter[name[extra_headers]]]
return[call[name[list], parameter[call[name[response_headers].items, parameter[]]]]] | keyword[def] identifier[get_headers] ( identifier[data] , identifier[extra_headers] = keyword[None] ):
literal[string]
identifier[response_headers] ={
literal[string] : identifier[str] ( identifier[len] ( identifier[data] )),
}
keyword[if] identifier[extra_headers] :
identifier[response_headers] . identifier[update] ( identifier[extra_headers] )
keyword[return] identifier[list] ( identifier[response_headers] . identifier[items] ()) | def get_headers(data, extra_headers=None):
"""
Takes the response data as well as any additional headers and returns a
tuple of tuples of headers suitable for passing to start_response()
"""
response_headers = {'Content-Length': str(len(data))}
if extra_headers:
response_headers.update(extra_headers) # depends on [control=['if'], data=[]]
return list(response_headers.items()) |
def check(self):
"""
Determine how long until the next scheduled time for a Task.
Returns the number of seconds until the next scheduled time or zero
if the task needs to be run immediately.
If it's an hourly task and it's never been run, run it now.
If it's a daily task and it's never been run and the hour is right, run it now.
"""
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))
if self.hourly and not self.last_executed:
return 0
if self.daily and not self.last_executed:
if int(self.hour) == self.now.hour:
return 0
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
delta = self.now - self.last_executed
if self.hourly:
if delta.seconds >= 60*60:
return 0
else:
return 60*60 - delta.seconds
else:
if int(self.hour) == self.now.hour:
if delta.days >= 1:
return 0
else:
return 82800 # 23 hours, just to be safe
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 | def function[check, parameter[self]]:
constant[
Determine how long until the next scheduled time for a Task.
Returns the number of seconds until the next scheduled time or zero
if the task needs to be run immediately.
If it's an hourly task and it's never been run, run it now.
If it's a daily task and it's never been run and the hour is right, run it now.
]
call[name[boto].log.info, parameter[binary_operation[constant[checking Task[%s]-now=%s, last=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b265a920>, <ast.Attribute object at 0x7da1b2658b80>, <ast.Attribute object at 0x7da1b265a860>]]]]]
if <ast.BoolOp object at 0x7da1b265bf40> begin[:]
return[constant[0]]
if <ast.BoolOp object at 0x7da1b2658040> begin[:]
if compare[call[name[int], parameter[name[self].hour]] equal[==] name[self].now.hour] begin[:]
return[constant[0]]
variable[delta] assign[=] binary_operation[name[self].now - name[self].last_executed]
if name[self].hourly begin[:]
if compare[name[delta].seconds greater_or_equal[>=] binary_operation[constant[60] * constant[60]]] begin[:]
return[constant[0]] | keyword[def] identifier[check] ( identifier[self] ):
literal[string]
identifier[boto] . identifier[log] . identifier[info] ( literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[now] , identifier[self] . identifier[last_executed] ))
keyword[if] identifier[self] . identifier[hourly] keyword[and] keyword[not] identifier[self] . identifier[last_executed] :
keyword[return] literal[int]
keyword[if] identifier[self] . identifier[daily] keyword[and] keyword[not] identifier[self] . identifier[last_executed] :
keyword[if] identifier[int] ( identifier[self] . identifier[hour] )== identifier[self] . identifier[now] . identifier[hour] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[max] (( identifier[int] ( identifier[self] . identifier[hour] )- identifier[self] . identifier[now] . identifier[hour] ),( identifier[self] . identifier[now] . identifier[hour] - identifier[int] ( identifier[self] . identifier[hour] )))* literal[int] * literal[int]
identifier[delta] = identifier[self] . identifier[now] - identifier[self] . identifier[last_executed]
keyword[if] identifier[self] . identifier[hourly] :
keyword[if] identifier[delta] . identifier[seconds] >= literal[int] * literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] * literal[int] - identifier[delta] . identifier[seconds]
keyword[else] :
keyword[if] identifier[int] ( identifier[self] . identifier[hour] )== identifier[self] . identifier[now] . identifier[hour] :
keyword[if] identifier[delta] . identifier[days] >= literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[max] (( identifier[int] ( identifier[self] . identifier[hour] )- identifier[self] . identifier[now] . identifier[hour] ),( identifier[self] . identifier[now] . identifier[hour] - identifier[int] ( identifier[self] . identifier[hour] )))* literal[int] * literal[int] | def check(self):
"""
Determine how long until the next scheduled time for a Task.
Returns the number of seconds until the next scheduled time or zero
if the task needs to be run immediately.
If it's an hourly task and it's never been run, run it now.
If it's a daily task and it's never been run and the hour is right, run it now.
"""
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))
if self.hourly and (not self.last_executed):
return 0 # depends on [control=['if'], data=[]]
if self.daily and (not self.last_executed):
if int(self.hour) == self.now.hour:
return 0 # depends on [control=['if'], data=[]]
else:
return max(int(self.hour) - self.now.hour, self.now.hour - int(self.hour)) * 60 * 60 # depends on [control=['if'], data=[]]
delta = self.now - self.last_executed
if self.hourly:
if delta.seconds >= 60 * 60:
return 0 # depends on [control=['if'], data=[]]
else:
return 60 * 60 - delta.seconds # depends on [control=['if'], data=[]]
elif int(self.hour) == self.now.hour:
if delta.days >= 1:
return 0 # depends on [control=['if'], data=[]]
else:
return 82800 # 23 hours, just to be safe # depends on [control=['if'], data=[]]
else:
return max(int(self.hour) - self.now.hour, self.now.hour - int(self.hour)) * 60 * 60 |
def recursive_copy(source, dest):
'''
Recursively copy the source directory to the destination,
leaving files with the source does not explicitly overwrite.
(identical to cp -r on a unix machine)
'''
for root, _, files in salt.utils.path.os_walk(source):
path_from_source = root.replace(source, '').lstrip(os.sep)
target_directory = os.path.join(dest, path_from_source)
if not os.path.exists(target_directory):
os.makedirs(target_directory)
for name in files:
file_path_from_source = os.path.join(source, path_from_source, name)
target_path = os.path.join(target_directory, name)
shutil.copyfile(file_path_from_source, target_path) | def function[recursive_copy, parameter[source, dest]]:
constant[
Recursively copy the source directory to the destination,
leaving files with the source does not explicitly overwrite.
(identical to cp -r on a unix machine)
]
for taget[tuple[[<ast.Name object at 0x7da1b1c37430>, <ast.Name object at 0x7da1b1c36b60>, <ast.Name object at 0x7da1b1c34e80>]]] in starred[call[name[salt].utils.path.os_walk, parameter[name[source]]]] begin[:]
variable[path_from_source] assign[=] call[call[name[root].replace, parameter[name[source], constant[]]].lstrip, parameter[name[os].sep]]
variable[target_directory] assign[=] call[name[os].path.join, parameter[name[dest], name[path_from_source]]]
if <ast.UnaryOp object at 0x7da1b1c36b00> begin[:]
call[name[os].makedirs, parameter[name[target_directory]]]
for taget[name[name]] in starred[name[files]] begin[:]
variable[file_path_from_source] assign[=] call[name[os].path.join, parameter[name[source], name[path_from_source], name[name]]]
variable[target_path] assign[=] call[name[os].path.join, parameter[name[target_directory], name[name]]]
call[name[shutil].copyfile, parameter[name[file_path_from_source], name[target_path]]] | keyword[def] identifier[recursive_copy] ( identifier[source] , identifier[dest] ):
literal[string]
keyword[for] identifier[root] , identifier[_] , identifier[files] keyword[in] identifier[salt] . identifier[utils] . identifier[path] . identifier[os_walk] ( identifier[source] ):
identifier[path_from_source] = identifier[root] . identifier[replace] ( identifier[source] , literal[string] ). identifier[lstrip] ( identifier[os] . identifier[sep] )
identifier[target_directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest] , identifier[path_from_source] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[target_directory] ):
identifier[os] . identifier[makedirs] ( identifier[target_directory] )
keyword[for] identifier[name] keyword[in] identifier[files] :
identifier[file_path_from_source] = identifier[os] . identifier[path] . identifier[join] ( identifier[source] , identifier[path_from_source] , identifier[name] )
identifier[target_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[target_directory] , identifier[name] )
identifier[shutil] . identifier[copyfile] ( identifier[file_path_from_source] , identifier[target_path] ) | def recursive_copy(source, dest):
"""
Recursively copy the source directory to the destination,
leaving files with the source does not explicitly overwrite.
(identical to cp -r on a unix machine)
"""
for (root, _, files) in salt.utils.path.os_walk(source):
path_from_source = root.replace(source, '').lstrip(os.sep)
target_directory = os.path.join(dest, path_from_source)
if not os.path.exists(target_directory):
os.makedirs(target_directory) # depends on [control=['if'], data=[]]
for name in files:
file_path_from_source = os.path.join(source, path_from_source, name)
target_path = os.path.join(target_directory, name)
shutil.copyfile(file_path_from_source, target_path) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]] |
def save(self, validate=False):
"""
Save the current values in all the widgets back to the persistent data storage.
:param validate: Whether to validate the data before saving.
Calling this while setting the `data` field (e.g. in a widget callback) will have no
effect.
When validating data, it can throw an Exception for any
"""
# Don't allow this function to be called if we are already updating the
# data for the form.
if self._in_call:
return
# We're clear - pass on to all layouts/widgets.
invalid = []
for layout in self._layouts:
try:
layout.save(validate=validate)
except InvalidFields as exc:
invalid.extend(exc.fields)
# Check for any bad data and raise exception if needed.
if len(invalid) > 0:
raise InvalidFields(invalid) | def function[save, parameter[self, validate]]:
constant[
Save the current values in all the widgets back to the persistent data storage.
:param validate: Whether to validate the data before saving.
Calling this while setting the `data` field (e.g. in a widget callback) will have no
effect.
When validating data, it can throw an Exception for any
]
if name[self]._in_call begin[:]
return[None]
variable[invalid] assign[=] list[[]]
for taget[name[layout]] in starred[name[self]._layouts] begin[:]
<ast.Try object at 0x7da1b1d4e170>
if compare[call[name[len], parameter[name[invalid]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1d4d090> | keyword[def] identifier[save] ( identifier[self] , identifier[validate] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[_in_call] :
keyword[return]
identifier[invalid] =[]
keyword[for] identifier[layout] keyword[in] identifier[self] . identifier[_layouts] :
keyword[try] :
identifier[layout] . identifier[save] ( identifier[validate] = identifier[validate] )
keyword[except] identifier[InvalidFields] keyword[as] identifier[exc] :
identifier[invalid] . identifier[extend] ( identifier[exc] . identifier[fields] )
keyword[if] identifier[len] ( identifier[invalid] )> literal[int] :
keyword[raise] identifier[InvalidFields] ( identifier[invalid] ) | def save(self, validate=False):
"""
Save the current values in all the widgets back to the persistent data storage.
:param validate: Whether to validate the data before saving.
Calling this while setting the `data` field (e.g. in a widget callback) will have no
effect.
When validating data, it can throw an Exception for any
"""
# Don't allow this function to be called if we are already updating the
# data for the form.
if self._in_call:
return # depends on [control=['if'], data=[]]
# We're clear - pass on to all layouts/widgets.
invalid = []
for layout in self._layouts:
try:
layout.save(validate=validate) # depends on [control=['try'], data=[]]
except InvalidFields as exc:
invalid.extend(exc.fields) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['layout']]
# Check for any bad data and raise exception if needed.
if len(invalid) > 0:
raise InvalidFields(invalid) # depends on [control=['if'], data=[]] |
def compcor( boldImage, ncompcor=4, quantile=0.975, mask=None, filter_type=False, degree=2 ):
"""
Compute noise components from the input image
ANTsR function: `compcor`
this is adapted from nipy code https://github.com/nipy/nipype/blob/e29ac95fc0fc00fedbcaa0adaf29d5878408ca7c/nipype/algorithms/confounds.py
Arguments
---------
boldImage: input time series image
ncompcor: number of noise components to return
quantile: quantile defining high-variance
mask: mask defining brain or specific tissues
filter_type: type off filter to apply to time series before computing
noise components.
'polynomial' - Legendre polynomial basis
False - None (mean-removal only)
degree: order of polynomial used to remove trends from the timeseries
Returns
-------
dictionary containing:
components: a numpy array
basis: a numpy array containing the (non-constant) filter regressors
Example
-------
>>> cc = ants.compcor( ants.image_read(ants.get_ants_data("ch2")) )
"""
def compute_tSTD(M, quantile, x=0, axis=0):
stdM = np.std(M, axis=axis)
# set bad values to x
stdM[stdM == 0] = x
stdM[np.isnan(stdM)] = x
tt = round( quantile*100 )
threshold_std = np.percentile( stdM, tt )
# threshold_std = quantile( stdM, quantile )
return { 'tSTD': stdM, 'threshold_std': threshold_std}
if mask is None:
temp = utils.slice_image( boldImage, axis=boldImage.dimension-1, idx=0 )
mask = utils.get_mask( temp )
imagematrix = core.timeseries_to_matrix( boldImage, mask )
temp = compute_tSTD( imagematrix, quantile, 0 )
tsnrmask = core.make_image( mask, temp['tSTD'] )
tsnrmask = utils.threshold_image( tsnrmask, temp['threshold_std'], temp['tSTD'].max() )
M = core.timeseries_to_matrix( boldImage, tsnrmask )
components = None
basis = np.array([])
if filter_type in ('polynomial', False):
M, basis = regress_poly(degree, M)
# M = M / compute_tSTD(M, 1.)['tSTD']
# "The covariance matrix C = MMT was constructed and decomposed into its
# principal components using a singular value decomposition."
u, _, _ = linalg.svd(M, full_matrices=False)
if components is None:
components = u[:, :ncompcor]
else:
components = np.hstack((components, u[:, :ncompcor]))
if components is None and ncompcor > 0:
raise ValueError('No components found')
return { 'components': components, 'basis': basis } | def function[compcor, parameter[boldImage, ncompcor, quantile, mask, filter_type, degree]]:
constant[
Compute noise components from the input image
ANTsR function: `compcor`
this is adapted from nipy code https://github.com/nipy/nipype/blob/e29ac95fc0fc00fedbcaa0adaf29d5878408ca7c/nipype/algorithms/confounds.py
Arguments
---------
boldImage: input time series image
ncompcor: number of noise components to return
quantile: quantile defining high-variance
mask: mask defining brain or specific tissues
filter_type: type off filter to apply to time series before computing
noise components.
'polynomial' - Legendre polynomial basis
False - None (mean-removal only)
degree: order of polynomial used to remove trends from the timeseries
Returns
-------
dictionary containing:
components: a numpy array
basis: a numpy array containing the (non-constant) filter regressors
Example
-------
>>> cc = ants.compcor( ants.image_read(ants.get_ants_data("ch2")) )
]
def function[compute_tSTD, parameter[M, quantile, x, axis]]:
variable[stdM] assign[=] call[name[np].std, parameter[name[M]]]
call[name[stdM]][compare[name[stdM] equal[==] constant[0]]] assign[=] name[x]
call[name[stdM]][call[name[np].isnan, parameter[name[stdM]]]] assign[=] name[x]
variable[tt] assign[=] call[name[round], parameter[binary_operation[name[quantile] * constant[100]]]]
variable[threshold_std] assign[=] call[name[np].percentile, parameter[name[stdM], name[tt]]]
return[dictionary[[<ast.Constant object at 0x7da2043441f0>, <ast.Constant object at 0x7da204347070>], [<ast.Name object at 0x7da2043457b0>, <ast.Name object at 0x7da2043458d0>]]]
if compare[name[mask] is constant[None]] begin[:]
variable[temp] assign[=] call[name[utils].slice_image, parameter[name[boldImage]]]
variable[mask] assign[=] call[name[utils].get_mask, parameter[name[temp]]]
variable[imagematrix] assign[=] call[name[core].timeseries_to_matrix, parameter[name[boldImage], name[mask]]]
variable[temp] assign[=] call[name[compute_tSTD], parameter[name[imagematrix], name[quantile], constant[0]]]
variable[tsnrmask] assign[=] call[name[core].make_image, parameter[name[mask], call[name[temp]][constant[tSTD]]]]
variable[tsnrmask] assign[=] call[name[utils].threshold_image, parameter[name[tsnrmask], call[name[temp]][constant[threshold_std]], call[call[name[temp]][constant[tSTD]].max, parameter[]]]]
variable[M] assign[=] call[name[core].timeseries_to_matrix, parameter[name[boldImage], name[tsnrmask]]]
variable[components] assign[=] constant[None]
variable[basis] assign[=] call[name[np].array, parameter[list[[]]]]
if compare[name[filter_type] in tuple[[<ast.Constant object at 0x7da2041d8430>, <ast.Constant object at 0x7da2041dafe0>]]] begin[:]
<ast.Tuple object at 0x7da2041d9c60> assign[=] call[name[regress_poly], parameter[name[degree], name[M]]]
<ast.Tuple object at 0x7da2041db0d0> assign[=] call[name[linalg].svd, parameter[name[M]]]
if compare[name[components] is constant[None]] begin[:]
variable[components] assign[=] call[name[u]][tuple[[<ast.Slice object at 0x7da20c76d180>, <ast.Slice object at 0x7da20c76ff40>]]]
if <ast.BoolOp object at 0x7da20c76fd90> begin[:]
<ast.Raise object at 0x7da20c76e9b0>
return[dictionary[[<ast.Constant object at 0x7da20c76c430>, <ast.Constant object at 0x7da20c76d060>], [<ast.Name object at 0x7da20c76fa90>, <ast.Name object at 0x7da20c76fd60>]]] | keyword[def] identifier[compcor] ( identifier[boldImage] , identifier[ncompcor] = literal[int] , identifier[quantile] = literal[int] , identifier[mask] = keyword[None] , identifier[filter_type] = keyword[False] , identifier[degree] = literal[int] ):
literal[string]
keyword[def] identifier[compute_tSTD] ( identifier[M] , identifier[quantile] , identifier[x] = literal[int] , identifier[axis] = literal[int] ):
identifier[stdM] = identifier[np] . identifier[std] ( identifier[M] , identifier[axis] = identifier[axis] )
identifier[stdM] [ identifier[stdM] == literal[int] ]= identifier[x]
identifier[stdM] [ identifier[np] . identifier[isnan] ( identifier[stdM] )]= identifier[x]
identifier[tt] = identifier[round] ( identifier[quantile] * literal[int] )
identifier[threshold_std] = identifier[np] . identifier[percentile] ( identifier[stdM] , identifier[tt] )
keyword[return] { literal[string] : identifier[stdM] , literal[string] : identifier[threshold_std] }
keyword[if] identifier[mask] keyword[is] keyword[None] :
identifier[temp] = identifier[utils] . identifier[slice_image] ( identifier[boldImage] , identifier[axis] = identifier[boldImage] . identifier[dimension] - literal[int] , identifier[idx] = literal[int] )
identifier[mask] = identifier[utils] . identifier[get_mask] ( identifier[temp] )
identifier[imagematrix] = identifier[core] . identifier[timeseries_to_matrix] ( identifier[boldImage] , identifier[mask] )
identifier[temp] = identifier[compute_tSTD] ( identifier[imagematrix] , identifier[quantile] , literal[int] )
identifier[tsnrmask] = identifier[core] . identifier[make_image] ( identifier[mask] , identifier[temp] [ literal[string] ])
identifier[tsnrmask] = identifier[utils] . identifier[threshold_image] ( identifier[tsnrmask] , identifier[temp] [ literal[string] ], identifier[temp] [ literal[string] ]. identifier[max] ())
identifier[M] = identifier[core] . identifier[timeseries_to_matrix] ( identifier[boldImage] , identifier[tsnrmask] )
identifier[components] = keyword[None]
identifier[basis] = identifier[np] . identifier[array] ([])
keyword[if] identifier[filter_type] keyword[in] ( literal[string] , keyword[False] ):
identifier[M] , identifier[basis] = identifier[regress_poly] ( identifier[degree] , identifier[M] )
identifier[u] , identifier[_] , identifier[_] = identifier[linalg] . identifier[svd] ( identifier[M] , identifier[full_matrices] = keyword[False] )
keyword[if] identifier[components] keyword[is] keyword[None] :
identifier[components] = identifier[u] [:,: identifier[ncompcor] ]
keyword[else] :
identifier[components] = identifier[np] . identifier[hstack] (( identifier[components] , identifier[u] [:,: identifier[ncompcor] ]))
keyword[if] identifier[components] keyword[is] keyword[None] keyword[and] identifier[ncompcor] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] { literal[string] : identifier[components] , literal[string] : identifier[basis] } | def compcor(boldImage, ncompcor=4, quantile=0.975, mask=None, filter_type=False, degree=2):
"""
Compute noise components from the input image
ANTsR function: `compcor`
this is adapted from nipy code https://github.com/nipy/nipype/blob/e29ac95fc0fc00fedbcaa0adaf29d5878408ca7c/nipype/algorithms/confounds.py
Arguments
---------
boldImage: input time series image
ncompcor: number of noise components to return
quantile: quantile defining high-variance
mask: mask defining brain or specific tissues
filter_type: type off filter to apply to time series before computing
noise components.
'polynomial' - Legendre polynomial basis
False - None (mean-removal only)
degree: order of polynomial used to remove trends from the timeseries
Returns
-------
dictionary containing:
components: a numpy array
basis: a numpy array containing the (non-constant) filter regressors
Example
-------
>>> cc = ants.compcor( ants.image_read(ants.get_ants_data("ch2")) )
"""
def compute_tSTD(M, quantile, x=0, axis=0):
stdM = np.std(M, axis=axis)
# set bad values to x
stdM[stdM == 0] = x
stdM[np.isnan(stdM)] = x
tt = round(quantile * 100)
threshold_std = np.percentile(stdM, tt)
# threshold_std = quantile( stdM, quantile )
return {'tSTD': stdM, 'threshold_std': threshold_std}
if mask is None:
temp = utils.slice_image(boldImage, axis=boldImage.dimension - 1, idx=0)
mask = utils.get_mask(temp) # depends on [control=['if'], data=['mask']]
imagematrix = core.timeseries_to_matrix(boldImage, mask)
temp = compute_tSTD(imagematrix, quantile, 0)
tsnrmask = core.make_image(mask, temp['tSTD'])
tsnrmask = utils.threshold_image(tsnrmask, temp['threshold_std'], temp['tSTD'].max())
M = core.timeseries_to_matrix(boldImage, tsnrmask)
components = None
basis = np.array([])
if filter_type in ('polynomial', False):
(M, basis) = regress_poly(degree, M) # depends on [control=['if'], data=[]]
# M = M / compute_tSTD(M, 1.)['tSTD']
# "The covariance matrix C = MMT was constructed and decomposed into its
# principal components using a singular value decomposition."
(u, _, _) = linalg.svd(M, full_matrices=False)
if components is None:
components = u[:, :ncompcor] # depends on [control=['if'], data=['components']]
else:
components = np.hstack((components, u[:, :ncompcor]))
if components is None and ncompcor > 0:
raise ValueError('No components found') # depends on [control=['if'], data=[]]
return {'components': components, 'basis': basis} |
def pay_cost(self, source, amount: int) -> int:
"""
Make player pay \a amount mana.
Returns how much mana is spent, after temporary mana adjustments.
"""
if self.spells_cost_health and source.type == CardType.SPELL:
self.log("%s spells cost %i health", self, amount)
self.game.queue_actions(self, [Hit(self.hero, amount)])
return amount
if self.temp_mana:
# Coin, Innervate etc
used_temp = min(self.temp_mana, amount)
amount -= used_temp
self.temp_mana -= used_temp
self.log("%s pays %i mana", self, amount)
self.used_mana += amount
return amount | def function[pay_cost, parameter[self, source, amount]]:
constant[
Make player pay amount mana.
Returns how much mana is spent, after temporary mana adjustments.
]
if <ast.BoolOp object at 0x7da1b08e7340> begin[:]
call[name[self].log, parameter[constant[%s spells cost %i health], name[self], name[amount]]]
call[name[self].game.queue_actions, parameter[name[self], list[[<ast.Call object at 0x7da1b088f550>]]]]
return[name[amount]]
if name[self].temp_mana begin[:]
variable[used_temp] assign[=] call[name[min], parameter[name[self].temp_mana, name[amount]]]
<ast.AugAssign object at 0x7da1b074df00>
<ast.AugAssign object at 0x7da1b074e500>
call[name[self].log, parameter[constant[%s pays %i mana], name[self], name[amount]]]
<ast.AugAssign object at 0x7da1b074fc70>
return[name[amount]] | keyword[def] identifier[pay_cost] ( identifier[self] , identifier[source] , identifier[amount] : identifier[int] )-> identifier[int] :
literal[string]
keyword[if] identifier[self] . identifier[spells_cost_health] keyword[and] identifier[source] . identifier[type] == identifier[CardType] . identifier[SPELL] :
identifier[self] . identifier[log] ( literal[string] , identifier[self] , identifier[amount] )
identifier[self] . identifier[game] . identifier[queue_actions] ( identifier[self] ,[ identifier[Hit] ( identifier[self] . identifier[hero] , identifier[amount] )])
keyword[return] identifier[amount]
keyword[if] identifier[self] . identifier[temp_mana] :
identifier[used_temp] = identifier[min] ( identifier[self] . identifier[temp_mana] , identifier[amount] )
identifier[amount] -= identifier[used_temp]
identifier[self] . identifier[temp_mana] -= identifier[used_temp]
identifier[self] . identifier[log] ( literal[string] , identifier[self] , identifier[amount] )
identifier[self] . identifier[used_mana] += identifier[amount]
keyword[return] identifier[amount] | def pay_cost(self, source, amount: int) -> int:
"""
Make player pay \x07 amount mana.
Returns how much mana is spent, after temporary mana adjustments.
"""
if self.spells_cost_health and source.type == CardType.SPELL:
self.log('%s spells cost %i health', self, amount)
self.game.queue_actions(self, [Hit(self.hero, amount)])
return amount # depends on [control=['if'], data=[]]
if self.temp_mana: # Coin, Innervate etc
used_temp = min(self.temp_mana, amount)
amount -= used_temp
self.temp_mana -= used_temp # depends on [control=['if'], data=[]]
self.log('%s pays %i mana', self, amount)
self.used_mana += amount
return amount |
def timeit(func):
'''
计算运行消耗时间
@timeit
def test():
time.sleep(1)
'''
def wapper(*args, **kwargs):
_start = time.time()
retval = func(*args, **kwargs)
_end = time.time()
logger.info('function %s() used : %.6f s' % (func.__name__, _end - _start))
return retval
return wapper | def function[timeit, parameter[func]]:
constant[
计算运行消耗时间
@timeit
def test():
time.sleep(1)
]
def function[wapper, parameter[]]:
variable[_start] assign[=] call[name[time].time, parameter[]]
variable[retval] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da2044c1a80>]]
variable[_end] assign[=] call[name[time].time, parameter[]]
call[name[logger].info, parameter[binary_operation[constant[function %s() used : %.6f s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2044c34c0>, <ast.BinOp object at 0x7da2044c38e0>]]]]]
return[name[retval]]
return[name[wapper]] | keyword[def] identifier[timeit] ( identifier[func] ):
literal[string]
keyword[def] identifier[wapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[_start] = identifier[time] . identifier[time] ()
identifier[retval] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
identifier[_end] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[info] ( literal[string] %( identifier[func] . identifier[__name__] , identifier[_end] - identifier[_start] ))
keyword[return] identifier[retval]
keyword[return] identifier[wapper] | def timeit(func):
"""
计算运行消耗时间
@timeit
def test():
time.sleep(1)
"""
def wapper(*args, **kwargs):
_start = time.time()
retval = func(*args, **kwargs)
_end = time.time()
logger.info('function %s() used : %.6f s' % (func.__name__, _end - _start))
return retval
return wapper |
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = tf.ones([], dtype=x.dtype)
return tf.math.digamma(x + one) - tf.math.digamma(one) | def function[_harmonic_number, parameter[x]]:
constant[Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
]
variable[one] assign[=] call[name[tf].ones, parameter[list[[]]]]
return[binary_operation[call[name[tf].math.digamma, parameter[binary_operation[name[x] + name[one]]]] - call[name[tf].math.digamma, parameter[name[one]]]]] | keyword[def] identifier[_harmonic_number] ( identifier[x] ):
literal[string]
identifier[one] = identifier[tf] . identifier[ones] ([], identifier[dtype] = identifier[x] . identifier[dtype] )
keyword[return] identifier[tf] . identifier[math] . identifier[digamma] ( identifier[x] + identifier[one] )- identifier[tf] . identifier[math] . identifier[digamma] ( identifier[one] ) | def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = tf.ones([], dtype=x.dtype)
return tf.math.digamma(x + one) - tf.math.digamma(one) |
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record) | def function[_compute_and_transfer_to_final_run, parameter[self, process_name, start_timeperiod, end_timeperiod, job_record]]:
constant[ method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method]
variable[source_collection_name] assign[=] call[name[context].process_context][name[process_name]].source
variable[start_id] assign[=] call[name[self].ds.highest_primary_key, parameter[name[source_collection_name], name[start_timeperiod], name[end_timeperiod]]]
variable[end_id] assign[=] call[name[self].ds.lowest_primary_key, parameter[name[source_collection_name], name[start_timeperiod], name[end_timeperiod]]]
<ast.Tuple object at 0x7da20c6c5120> assign[=] call[name[self].insert_and_publish_uow, parameter[name[job_record], name[start_id], name[end_id]]]
call[name[self].update_job, parameter[name[job_record], name[uow], name[job].STATE_FINAL_RUN]]
if name[transfer_to_final] begin[:]
call[name[self]._process_state_final_run, parameter[name[job_record]]] | keyword[def] identifier[_compute_and_transfer_to_final_run] ( identifier[self] , identifier[process_name] , identifier[start_timeperiod] , identifier[end_timeperiod] , identifier[job_record] ):
literal[string]
identifier[source_collection_name] = identifier[context] . identifier[process_context] [ identifier[process_name] ]. identifier[source]
identifier[start_id] = identifier[self] . identifier[ds] . identifier[highest_primary_key] ( identifier[source_collection_name] , identifier[start_timeperiod] , identifier[end_timeperiod] )
identifier[end_id] = identifier[self] . identifier[ds] . identifier[lowest_primary_key] ( identifier[source_collection_name] , identifier[start_timeperiod] , identifier[end_timeperiod] )
identifier[uow] , identifier[transfer_to_final] = identifier[self] . identifier[insert_and_publish_uow] ( identifier[job_record] , identifier[start_id] , identifier[end_id] )
identifier[self] . identifier[update_job] ( identifier[job_record] , identifier[uow] , identifier[job] . identifier[STATE_FINAL_RUN] )
keyword[if] identifier[transfer_to_final] :
identifier[self] . identifier[_process_state_final_run] ( identifier[job_record] ) | def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
(uow, transfer_to_final) = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record) # depends on [control=['if'], data=[]] |
def disassemble(qobj):
"""Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj
"""
run_config = qobj.config.to_dict()
user_qobj_header = qobj.header.to_dict()
circuits = _experiments_to_circuits(qobj)
return circuits, run_config, user_qobj_header | def function[disassemble, parameter[qobj]]:
constant[Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj
]
variable[run_config] assign[=] call[name[qobj].config.to_dict, parameter[]]
variable[user_qobj_header] assign[=] call[name[qobj].header.to_dict, parameter[]]
variable[circuits] assign[=] call[name[_experiments_to_circuits], parameter[name[qobj]]]
return[tuple[[<ast.Name object at 0x7da207f99d80>, <ast.Name object at 0x7da207f98e80>, <ast.Name object at 0x7da207f98c40>]]] | keyword[def] identifier[disassemble] ( identifier[qobj] ):
literal[string]
identifier[run_config] = identifier[qobj] . identifier[config] . identifier[to_dict] ()
identifier[user_qobj_header] = identifier[qobj] . identifier[header] . identifier[to_dict] ()
identifier[circuits] = identifier[_experiments_to_circuits] ( identifier[qobj] )
keyword[return] identifier[circuits] , identifier[run_config] , identifier[user_qobj_header] | def disassemble(qobj):
"""Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj
"""
run_config = qobj.config.to_dict()
user_qobj_header = qobj.header.to_dict()
circuits = _experiments_to_circuits(qobj)
return (circuits, run_config, user_qobj_header) |
def confirm(message='Confirm (y or n) '):
"""
Display a confirmation prompt.
"""
assert isinstance(message, text_type)
app = create_confirm_application(message)
return run_application(app) | def function[confirm, parameter[message]]:
constant[
Display a confirmation prompt.
]
assert[call[name[isinstance], parameter[name[message], name[text_type]]]]
variable[app] assign[=] call[name[create_confirm_application], parameter[name[message]]]
return[call[name[run_application], parameter[name[app]]]] | keyword[def] identifier[confirm] ( identifier[message] = literal[string] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[message] , identifier[text_type] )
identifier[app] = identifier[create_confirm_application] ( identifier[message] )
keyword[return] identifier[run_application] ( identifier[app] ) | def confirm(message='Confirm (y or n) '):
"""
Display a confirmation prompt.
"""
assert isinstance(message, text_type)
app = create_confirm_application(message)
return run_application(app) |
def add(self, s, c, track=False):
"""
This function adds constraints to the backend solver.
:param c: A sequence of ASTs
:param s: A backend solver object
:param bool track: True to enable constraint tracking, which is used in unsat_core()
"""
return self._add(s, self.convert_list(c), track=track) | def function[add, parameter[self, s, c, track]]:
constant[
This function adds constraints to the backend solver.
:param c: A sequence of ASTs
:param s: A backend solver object
:param bool track: True to enable constraint tracking, which is used in unsat_core()
]
return[call[name[self]._add, parameter[name[s], call[name[self].convert_list, parameter[name[c]]]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[s] , identifier[c] , identifier[track] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_add] ( identifier[s] , identifier[self] . identifier[convert_list] ( identifier[c] ), identifier[track] = identifier[track] ) | def add(self, s, c, track=False):
"""
This function adds constraints to the backend solver.
:param c: A sequence of ASTs
:param s: A backend solver object
:param bool track: True to enable constraint tracking, which is used in unsat_core()
"""
return self._add(s, self.convert_list(c), track=track) |
def _get_point_value(self, loglstar):
"""Grab the first live point proposal in the queue."""
# If the queue is empty, refill it.
if self.nqueue <= 0:
self._fill_queue(loglstar)
# Grab the earliest entry.
u, v, logl, nc, blob = self.queue.pop(0)
self.used += 1 # add to the total number of used points
self.nqueue -= 1
return u, v, logl, nc, blob | def function[_get_point_value, parameter[self, loglstar]]:
constant[Grab the first live point proposal in the queue.]
if compare[name[self].nqueue less_or_equal[<=] constant[0]] begin[:]
call[name[self]._fill_queue, parameter[name[loglstar]]]
<ast.Tuple object at 0x7da1b1d4b0a0> assign[=] call[name[self].queue.pop, parameter[constant[0]]]
<ast.AugAssign object at 0x7da1b1d4ba90>
<ast.AugAssign object at 0x7da1b1d4a950>
return[tuple[[<ast.Name object at 0x7da1b1d4a0e0>, <ast.Name object at 0x7da1b1d4a470>, <ast.Name object at 0x7da1b1d490f0>, <ast.Name object at 0x7da1b1d4a8c0>, <ast.Name object at 0x7da1b1d48370>]]] | keyword[def] identifier[_get_point_value] ( identifier[self] , identifier[loglstar] ):
literal[string]
keyword[if] identifier[self] . identifier[nqueue] <= literal[int] :
identifier[self] . identifier[_fill_queue] ( identifier[loglstar] )
identifier[u] , identifier[v] , identifier[logl] , identifier[nc] , identifier[blob] = identifier[self] . identifier[queue] . identifier[pop] ( literal[int] )
identifier[self] . identifier[used] += literal[int]
identifier[self] . identifier[nqueue] -= literal[int]
keyword[return] identifier[u] , identifier[v] , identifier[logl] , identifier[nc] , identifier[blob] | def _get_point_value(self, loglstar):
"""Grab the first live point proposal in the queue."""
# If the queue is empty, refill it.
if self.nqueue <= 0:
self._fill_queue(loglstar) # depends on [control=['if'], data=[]]
# Grab the earliest entry.
(u, v, logl, nc, blob) = self.queue.pop(0)
self.used += 1 # add to the total number of used points
self.nqueue -= 1
return (u, v, logl, nc, blob) |
def hist_1d_index(x, shape):
"""
Fast 1d histogram of 1D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram().
The indices are given in coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
shape : tuple
tuple with x dimensions: (x,)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 1:
raise InvalidInputError('The shape has to describe a 1-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32)
analysis_functions.hist_1d(x, shape[0], result)
return result | def function[hist_1d_index, parameter[x, shape]]:
constant[
Fast 1d histogram of 1D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram().
The indices are given in coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
shape : tuple
tuple with x dimensions: (x,)
Returns
-------
np.ndarray with given shape
]
if compare[call[name[len], parameter[name[shape]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da2045644c0>
variable[x] assign[=] call[name[np].ascontiguousarray, parameter[call[name[x].astype, parameter[name[np].int32]]]]
variable[result] assign[=] call[name[np].zeros, parameter[]]
call[name[analysis_functions].hist_1d, parameter[name[x], call[name[shape]][constant[0]], name[result]]]
return[name[result]] | keyword[def] identifier[hist_1d_index] ( identifier[x] , identifier[shape] ):
literal[string]
keyword[if] identifier[len] ( identifier[shape] )!= literal[int] :
keyword[raise] identifier[InvalidInputError] ( literal[string] )
identifier[x] = identifier[np] . identifier[ascontiguousarray] ( identifier[x] . identifier[astype] ( identifier[np] . identifier[int32] ))
identifier[result] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[shape] , identifier[dtype] = identifier[np] . identifier[uint32] )
identifier[analysis_functions] . identifier[hist_1d] ( identifier[x] , identifier[shape] [ literal[int] ], identifier[result] )
keyword[return] identifier[result] | def hist_1d_index(x, shape):
"""
Fast 1d histogram of 1D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram().
The indices are given in coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
shape : tuple
tuple with x dimensions: (x,)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 1:
raise InvalidInputError('The shape has to describe a 1-d histogram') # depends on [control=['if'], data=[]]
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32)
analysis_functions.hist_1d(x, shape[0], result)
return result |
def get_column_at_index(self, index):
"""
Returns a table column by it's index
:param int index: the zero-indexed position of the column in the table
"""
if index is None:
return None
url = self.build_url(self._endpoints.get('get_column_index'))
response = self.session.post(url, data={'index': index})
if not response:
return None
return self.column_constructor(parent=self, **{self._cloud_data_key: response.json()}) | def function[get_column_at_index, parameter[self, index]]:
constant[
Returns a table column by it's index
:param int index: the zero-indexed position of the column in the table
]
if compare[name[index] is constant[None]] begin[:]
return[constant[None]]
variable[url] assign[=] call[name[self].build_url, parameter[call[name[self]._endpoints.get, parameter[constant[get_column_index]]]]]
variable[response] assign[=] call[name[self].session.post, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b1b0d4b0> begin[:]
return[constant[None]]
return[call[name[self].column_constructor, parameter[]]] | keyword[def] identifier[get_column_at_index] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[index] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[url] = identifier[self] . identifier[build_url] ( identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ))
identifier[response] = identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[data] ={ literal[string] : identifier[index] })
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[column_constructor] ( identifier[parent] = identifier[self] ,**{ identifier[self] . identifier[_cloud_data_key] : identifier[response] . identifier[json] ()}) | def get_column_at_index(self, index):
"""
Returns a table column by it's index
:param int index: the zero-indexed position of the column in the table
"""
if index is None:
return None # depends on [control=['if'], data=[]]
url = self.build_url(self._endpoints.get('get_column_index'))
response = self.session.post(url, data={'index': index})
if not response:
return None # depends on [control=['if'], data=[]]
return self.column_constructor(parent=self, **{self._cloud_data_key: response.json()}) |
def __move_line_or_selection(self, after_current_line=True):
"""Move current line or selected text"""
cursor = self.textCursor()
cursor.beginEditBlock()
start_pos, end_pos = self.__save_selection()
last_line = False
# ------ Select text
# Get selection start location
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
start_pos = cursor.position()
# Get selection end location
cursor.setPosition(end_pos)
if not cursor.atBlockStart() or end_pos == start_pos:
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.movePosition(QTextCursor.NextBlock)
end_pos = cursor.position()
# Check if selection ends on the last line of the document
if cursor.atEnd():
if not cursor.atBlockStart() or end_pos == start_pos:
last_line = True
# ------ Stop if at document boundary
cursor.setPosition(start_pos)
if cursor.atStart() and not after_current_line:
# Stop if selection is already at top of the file while moving up
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return
cursor.setPosition(end_pos, QTextCursor.KeepAnchor)
if last_line and after_current_line:
# Stop if selection is already at end of the file while moving down
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return
# ------ Move text
sel_text = to_text_string(cursor.selectedText())
cursor.removeSelectedText()
if after_current_line:
# Shift selection down
text = to_text_string(cursor.block().text())
sel_text = os.linesep + sel_text[0:-1] # Move linesep at the start
cursor.movePosition(QTextCursor.EndOfBlock)
start_pos += len(text)+1
end_pos += len(text)
if not cursor.atEnd():
end_pos += 1
else:
# Shift selection up
if last_line:
# Remove the last linesep and add it to the selected text
cursor.deletePreviousChar()
sel_text = sel_text + os.linesep
cursor.movePosition(QTextCursor.StartOfBlock)
end_pos += 1
else:
cursor.movePosition(QTextCursor.PreviousBlock)
text = to_text_string(cursor.block().text())
start_pos -= len(text)+1
end_pos -= len(text)+1
cursor.insertText(sel_text)
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos) | def function[__move_line_or_selection, parameter[self, after_current_line]]:
constant[Move current line or selected text]
variable[cursor] assign[=] call[name[self].textCursor, parameter[]]
call[name[cursor].beginEditBlock, parameter[]]
<ast.Tuple object at 0x7da2054a4490> assign[=] call[name[self].__save_selection, parameter[]]
variable[last_line] assign[=] constant[False]
call[name[cursor].setPosition, parameter[name[start_pos]]]
call[name[cursor].movePosition, parameter[name[QTextCursor].StartOfBlock]]
variable[start_pos] assign[=] call[name[cursor].position, parameter[]]
call[name[cursor].setPosition, parameter[name[end_pos]]]
if <ast.BoolOp object at 0x7da2054a78b0> begin[:]
call[name[cursor].movePosition, parameter[name[QTextCursor].EndOfBlock]]
call[name[cursor].movePosition, parameter[name[QTextCursor].NextBlock]]
variable[end_pos] assign[=] call[name[cursor].position, parameter[]]
if call[name[cursor].atEnd, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da2054a4b20> begin[:]
variable[last_line] assign[=] constant[True]
call[name[cursor].setPosition, parameter[name[start_pos]]]
if <ast.BoolOp object at 0x7da2054a7670> begin[:]
call[name[cursor].endEditBlock, parameter[]]
call[name[self].setTextCursor, parameter[name[cursor]]]
call[name[self].__restore_selection, parameter[name[start_pos], name[end_pos]]]
return[None]
call[name[cursor].setPosition, parameter[name[end_pos], name[QTextCursor].KeepAnchor]]
if <ast.BoolOp object at 0x7da20e961780> begin[:]
call[name[cursor].endEditBlock, parameter[]]
call[name[self].setTextCursor, parameter[name[cursor]]]
call[name[self].__restore_selection, parameter[name[start_pos], name[end_pos]]]
return[None]
variable[sel_text] assign[=] call[name[to_text_string], parameter[call[name[cursor].selectedText, parameter[]]]]
call[name[cursor].removeSelectedText, parameter[]]
if name[after_current_line] begin[:]
variable[text] assign[=] call[name[to_text_string], parameter[call[call[name[cursor].block, parameter[]].text, parameter[]]]]
variable[sel_text] assign[=] binary_operation[name[os].linesep + call[name[sel_text]][<ast.Slice object at 0x7da20e960190>]]
call[name[cursor].movePosition, parameter[name[QTextCursor].EndOfBlock]]
<ast.AugAssign object at 0x7da20e9627a0>
<ast.AugAssign object at 0x7da20e960610>
if <ast.UnaryOp object at 0x7da20e9625c0> begin[:]
<ast.AugAssign object at 0x7da20e961e70>
call[name[cursor].insertText, parameter[name[sel_text]]]
call[name[cursor].endEditBlock, parameter[]]
call[name[self].setTextCursor, parameter[name[cursor]]]
call[name[self].__restore_selection, parameter[name[start_pos], name[end_pos]]] | keyword[def] identifier[__move_line_or_selection] ( identifier[self] , identifier[after_current_line] = keyword[True] ):
literal[string]
identifier[cursor] = identifier[self] . identifier[textCursor] ()
identifier[cursor] . identifier[beginEditBlock] ()
identifier[start_pos] , identifier[end_pos] = identifier[self] . identifier[__save_selection] ()
identifier[last_line] = keyword[False]
identifier[cursor] . identifier[setPosition] ( identifier[start_pos] )
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[StartOfBlock] )
identifier[start_pos] = identifier[cursor] . identifier[position] ()
identifier[cursor] . identifier[setPosition] ( identifier[end_pos] )
keyword[if] keyword[not] identifier[cursor] . identifier[atBlockStart] () keyword[or] identifier[end_pos] == identifier[start_pos] :
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[EndOfBlock] )
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[NextBlock] )
identifier[end_pos] = identifier[cursor] . identifier[position] ()
keyword[if] identifier[cursor] . identifier[atEnd] ():
keyword[if] keyword[not] identifier[cursor] . identifier[atBlockStart] () keyword[or] identifier[end_pos] == identifier[start_pos] :
identifier[last_line] = keyword[True]
identifier[cursor] . identifier[setPosition] ( identifier[start_pos] )
keyword[if] identifier[cursor] . identifier[atStart] () keyword[and] keyword[not] identifier[after_current_line] :
identifier[cursor] . identifier[endEditBlock] ()
identifier[self] . identifier[setTextCursor] ( identifier[cursor] )
identifier[self] . identifier[__restore_selection] ( identifier[start_pos] , identifier[end_pos] )
keyword[return]
identifier[cursor] . identifier[setPosition] ( identifier[end_pos] , identifier[QTextCursor] . identifier[KeepAnchor] )
keyword[if] identifier[last_line] keyword[and] identifier[after_current_line] :
identifier[cursor] . identifier[endEditBlock] ()
identifier[self] . identifier[setTextCursor] ( identifier[cursor] )
identifier[self] . identifier[__restore_selection] ( identifier[start_pos] , identifier[end_pos] )
keyword[return]
identifier[sel_text] = identifier[to_text_string] ( identifier[cursor] . identifier[selectedText] ())
identifier[cursor] . identifier[removeSelectedText] ()
keyword[if] identifier[after_current_line] :
identifier[text] = identifier[to_text_string] ( identifier[cursor] . identifier[block] (). identifier[text] ())
identifier[sel_text] = identifier[os] . identifier[linesep] + identifier[sel_text] [ literal[int] :- literal[int] ]
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[EndOfBlock] )
identifier[start_pos] += identifier[len] ( identifier[text] )+ literal[int]
identifier[end_pos] += identifier[len] ( identifier[text] )
keyword[if] keyword[not] identifier[cursor] . identifier[atEnd] ():
identifier[end_pos] += literal[int]
keyword[else] :
keyword[if] identifier[last_line] :
identifier[cursor] . identifier[deletePreviousChar] ()
identifier[sel_text] = identifier[sel_text] + identifier[os] . identifier[linesep]
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[StartOfBlock] )
identifier[end_pos] += literal[int]
keyword[else] :
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[PreviousBlock] )
identifier[text] = identifier[to_text_string] ( identifier[cursor] . identifier[block] (). identifier[text] ())
identifier[start_pos] -= identifier[len] ( identifier[text] )+ literal[int]
identifier[end_pos] -= identifier[len] ( identifier[text] )+ literal[int]
identifier[cursor] . identifier[insertText] ( identifier[sel_text] )
identifier[cursor] . identifier[endEditBlock] ()
identifier[self] . identifier[setTextCursor] ( identifier[cursor] )
identifier[self] . identifier[__restore_selection] ( identifier[start_pos] , identifier[end_pos] ) | def __move_line_or_selection(self, after_current_line=True):
"""Move current line or selected text"""
cursor = self.textCursor()
cursor.beginEditBlock()
(start_pos, end_pos) = self.__save_selection()
last_line = False # ------ Select text
# Get selection start location
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
start_pos = cursor.position() # Get selection end location
cursor.setPosition(end_pos)
if not cursor.atBlockStart() or end_pos == start_pos:
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.movePosition(QTextCursor.NextBlock) # depends on [control=['if'], data=[]]
end_pos = cursor.position() # Check if selection ends on the last line of the document
if cursor.atEnd():
if not cursor.atBlockStart() or end_pos == start_pos:
last_line = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # ------ Stop if at document boundary
cursor.setPosition(start_pos)
if cursor.atStart() and (not after_current_line): # Stop if selection is already at top of the file while moving up
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return # depends on [control=['if'], data=[]]
cursor.setPosition(end_pos, QTextCursor.KeepAnchor)
if last_line and after_current_line: # Stop if selection is already at end of the file while moving down
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos)
return # depends on [control=['if'], data=[]] # ------ Move text
sel_text = to_text_string(cursor.selectedText())
cursor.removeSelectedText()
if after_current_line: # Shift selection down
text = to_text_string(cursor.block().text())
sel_text = os.linesep + sel_text[0:-1] # Move linesep at the start
cursor.movePosition(QTextCursor.EndOfBlock)
start_pos += len(text) + 1
end_pos += len(text)
if not cursor.atEnd():
end_pos += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # Shift selection up
if last_line: # Remove the last linesep and add it to the selected text
cursor.deletePreviousChar()
sel_text = sel_text + os.linesep
cursor.movePosition(QTextCursor.StartOfBlock)
end_pos += 1 # depends on [control=['if'], data=[]]
else:
cursor.movePosition(QTextCursor.PreviousBlock)
text = to_text_string(cursor.block().text())
start_pos -= len(text) + 1
end_pos -= len(text) + 1
cursor.insertText(sel_text)
cursor.endEditBlock()
self.setTextCursor(cursor)
self.__restore_selection(start_pos, end_pos) |
def addFolderPermission(self, principal, isAllowed=True, folder=None):
"""
Assigns a new permission to a role (principal). The permission
on a parent resource is automatically inherited by all child
resources
Input:
principal - name of role to assign/disassign accesss
isAllowed - boolean which allows access
Output:
JSON message as dictionary
"""
if folder is not None:
uURL = self._url + "/%s/%s" % (folder, "/permissions/add")
else:
uURL = self._url + "/permissions/add"
params = {
"f" : "json",
"principal" : principal,
"isAllowed" : isAllowed
}
return self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | def function[addFolderPermission, parameter[self, principal, isAllowed, folder]]:
constant[
Assigns a new permission to a role (principal). The permission
on a parent resource is automatically inherited by all child
resources
Input:
principal - name of role to assign/disassign accesss
isAllowed - boolean which allows access
Output:
JSON message as dictionary
]
if compare[name[folder] is_not constant[None]] begin[:]
variable[uURL] assign[=] binary_operation[name[self]._url + binary_operation[constant[/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b12341c0>, <ast.Constant object at 0x7da1b1237490>]]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b12349a0>, <ast.Constant object at 0x7da1b1235cc0>, <ast.Constant object at 0x7da1b1235930>], [<ast.Constant object at 0x7da1b1234880>, <ast.Name object at 0x7da1b1236f80>, <ast.Name object at 0x7da1b1236ad0>]]
return[call[name[self]._post, parameter[]]] | keyword[def] identifier[addFolderPermission] ( identifier[self] , identifier[principal] , identifier[isAllowed] = keyword[True] , identifier[folder] = keyword[None] ):
literal[string]
keyword[if] identifier[folder] keyword[is] keyword[not] keyword[None] :
identifier[uURL] = identifier[self] . identifier[_url] + literal[string] %( identifier[folder] , literal[string] )
keyword[else] :
identifier[uURL] = identifier[self] . identifier[_url] + literal[string]
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : identifier[principal] ,
literal[string] : identifier[isAllowed]
}
keyword[return] identifier[self] . identifier[_post] ( identifier[url] = identifier[uURL] , identifier[param_dict] = identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ) | def addFolderPermission(self, principal, isAllowed=True, folder=None):
"""
Assigns a new permission to a role (principal). The permission
on a parent resource is automatically inherited by all child
resources
Input:
principal - name of role to assign/disassign accesss
isAllowed - boolean which allows access
Output:
JSON message as dictionary
"""
if folder is not None:
uURL = self._url + '/%s/%s' % (folder, '/permissions/add') # depends on [control=['if'], data=['folder']]
else:
uURL = self._url + '/permissions/add'
params = {'f': 'json', 'principal': principal, 'isAllowed': isAllowed}
return self._post(url=uURL, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) |
def list_straten_adapter(obj, request):
"""
Adapter for rendering a list of
:class:`crabpy.gateway.crab.Straat` to json.
"""
return {
'id': obj.id,
'label': obj.label,
'status': {
'id': obj.status.id,
'naam': obj.status.naam,
'definitie': obj.status.definitie
},
} | def function[list_straten_adapter, parameter[obj, request]]:
constant[
Adapter for rendering a list of
:class:`crabpy.gateway.crab.Straat` to json.
]
return[dictionary[[<ast.Constant object at 0x7da20c796560>, <ast.Constant object at 0x7da20c795c30>, <ast.Constant object at 0x7da204565f30>], [<ast.Attribute object at 0x7da204566290>, <ast.Attribute object at 0x7da204565720>, <ast.Dict object at 0x7da204566a70>]]] | keyword[def] identifier[list_straten_adapter] ( identifier[obj] , identifier[request] ):
literal[string]
keyword[return] {
literal[string] : identifier[obj] . identifier[id] ,
literal[string] : identifier[obj] . identifier[label] ,
literal[string] :{
literal[string] : identifier[obj] . identifier[status] . identifier[id] ,
literal[string] : identifier[obj] . identifier[status] . identifier[naam] ,
literal[string] : identifier[obj] . identifier[status] . identifier[definitie]
},
} | def list_straten_adapter(obj, request):
"""
Adapter for rendering a list of
:class:`crabpy.gateway.crab.Straat` to json.
"""
return {'id': obj.id, 'label': obj.label, 'status': {'id': obj.status.id, 'naam': obj.status.naam, 'definitie': obj.status.definitie}} |
def setKeyColor( self, key, color ):
"""
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
"""
self._keyColors[nativestring(key)] = QColor(color) | def function[setKeyColor, parameter[self, key, color]]:
constant[
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
]
call[name[self]._keyColors][call[name[nativestring], parameter[name[key]]]] assign[=] call[name[QColor], parameter[name[color]]] | keyword[def] identifier[setKeyColor] ( identifier[self] , identifier[key] , identifier[color] ):
literal[string]
identifier[self] . identifier[_keyColors] [ identifier[nativestring] ( identifier[key] )]= identifier[QColor] ( identifier[color] ) | def setKeyColor(self, key, color):
"""
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
"""
self._keyColors[nativestring(key)] = QColor(color) |
def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[str, float]]:
"""Sorts a list of servers by http round-trip time
Params:
servers: sequence of http server urls
Returns:
sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers
(possibly empty)
"""
if not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'}):
raise TransportError('Invalid server urls')
get_rtt_jobs = set(
gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url)
for server_url
in servers
)
# these tasks should never raise, returns None on errors
gevent.joinall(get_rtt_jobs, raise_error=False) # block and wait tasks
sorted_servers: List[Tuple[str, float]] = sorted(
(job.value for job in get_rtt_jobs if job.value[1] is not None),
key=itemgetter(1),
)
log.debug('Matrix homeserver RTT times', rtt_times=sorted_servers)
return sorted_servers | def function[sort_servers_closest, parameter[servers]]:
constant[Sorts a list of servers by http round-trip time
Params:
servers: sequence of http server urls
Returns:
sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers
(possibly empty)
]
if <ast.UnaryOp object at 0x7da1b19510f0> begin[:]
<ast.Raise object at 0x7da1b1950d30>
variable[get_rtt_jobs] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b19501f0>]]
call[name[gevent].joinall, parameter[name[get_rtt_jobs]]]
<ast.AnnAssign object at 0x7da1b1712200>
call[name[log].debug, parameter[constant[Matrix homeserver RTT times]]]
return[name[sorted_servers]] | keyword[def] identifier[sort_servers_closest] ( identifier[servers] : identifier[Sequence] [ identifier[str] ])-> identifier[Sequence] [ identifier[Tuple] [ identifier[str] , identifier[float] ]]:
literal[string]
keyword[if] keyword[not] { identifier[urlparse] ( identifier[url] ). identifier[scheme] keyword[for] identifier[url] keyword[in] identifier[servers] }. identifier[issubset] ({ literal[string] , literal[string] }):
keyword[raise] identifier[TransportError] ( literal[string] )
identifier[get_rtt_jobs] = identifier[set] (
identifier[gevent] . identifier[spawn] ( keyword[lambda] identifier[url] :( identifier[url] , identifier[get_http_rtt] ( identifier[url] )), identifier[server_url] )
keyword[for] identifier[server_url]
keyword[in] identifier[servers]
)
identifier[gevent] . identifier[joinall] ( identifier[get_rtt_jobs] , identifier[raise_error] = keyword[False] )
identifier[sorted_servers] : identifier[List] [ identifier[Tuple] [ identifier[str] , identifier[float] ]]= identifier[sorted] (
( identifier[job] . identifier[value] keyword[for] identifier[job] keyword[in] identifier[get_rtt_jobs] keyword[if] identifier[job] . identifier[value] [ literal[int] ] keyword[is] keyword[not] keyword[None] ),
identifier[key] = identifier[itemgetter] ( literal[int] ),
)
identifier[log] . identifier[debug] ( literal[string] , identifier[rtt_times] = identifier[sorted_servers] )
keyword[return] identifier[sorted_servers] | def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[str, float]]:
"""Sorts a list of servers by http round-trip time
Params:
servers: sequence of http server urls
Returns:
sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers
(possibly empty)
"""
if not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'}):
raise TransportError('Invalid server urls') # depends on [control=['if'], data=[]]
get_rtt_jobs = set((gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url) for server_url in servers))
# these tasks should never raise, returns None on errors
gevent.joinall(get_rtt_jobs, raise_error=False) # block and wait tasks
sorted_servers: List[Tuple[str, float]] = sorted((job.value for job in get_rtt_jobs if job.value[1] is not None), key=itemgetter(1))
log.debug('Matrix homeserver RTT times', rtt_times=sorted_servers)
return sorted_servers |
def parse_entry_media_attributes(self, soup):
"""
Args:
soup: a bs4 element containing a row from the current media list
Return a dict of attributes of the media the row is about.
"""
row_info = {}
try:
start = utilities.parse_profile_date(soup.find('series_start').text)
except ValueError:
start = None
except:
if not self.session.suppress_parse_exceptions:
raise
if start is not None:
try:
row_info['aired'] = (start, utilities.parse_profile_date(soup.find('series_end').text))
except ValueError:
row_info['aired'] = (start, None)
except:
if not self.session.suppress_parse_exceptions:
raise
# look up the given media type's status terms.
status_terms = getattr(self.session, self.type)(1)._status_terms
try:
row_info['id'] = int(soup.find('series_' + self.type + 'db_id').text)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['title'] = soup.find('series_title').text
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['status'] = status_terms[int(soup.find('series_status').text)]
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['picture'] = soup.find('series_image').text
except:
if not self.session.suppress_parse_exceptions:
raise
return row_info | def function[parse_entry_media_attributes, parameter[self, soup]]:
constant[
Args:
soup: a bs4 element containing a row from the current media list
Return a dict of attributes of the media the row is about.
]
variable[row_info] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b2587640>
if compare[name[start] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b2587be0>
variable[status_terms] assign[=] call[call[name[getattr], parameter[name[self].session, name[self].type]], parameter[constant[1]]]._status_terms
<ast.Try object at 0x7da18f812110>
<ast.Try object at 0x7da1b2584310>
<ast.Try object at 0x7da1b2584f10>
<ast.Try object at 0x7da1b25841f0>
return[name[row_info]] | keyword[def] identifier[parse_entry_media_attributes] ( identifier[self] , identifier[soup] ):
literal[string]
identifier[row_info] ={}
keyword[try] :
identifier[start] = identifier[utilities] . identifier[parse_profile_date] ( identifier[soup] . identifier[find] ( literal[string] ). identifier[text] )
keyword[except] identifier[ValueError] :
identifier[start] = keyword[None]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[row_info] [ literal[string] ]=( identifier[start] , identifier[utilities] . identifier[parse_profile_date] ( identifier[soup] . identifier[find] ( literal[string] ). identifier[text] ))
keyword[except] identifier[ValueError] :
identifier[row_info] [ literal[string] ]=( identifier[start] , keyword[None] )
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
identifier[status_terms] = identifier[getattr] ( identifier[self] . identifier[session] , identifier[self] . identifier[type] )( literal[int] ). identifier[_status_terms]
keyword[try] :
identifier[row_info] [ literal[string] ]= identifier[int] ( identifier[soup] . identifier[find] ( literal[string] + identifier[self] . identifier[type] + literal[string] ). identifier[text] )
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[row_info] [ literal[string] ]= identifier[soup] . identifier[find] ( literal[string] ). identifier[text]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[row_info] [ literal[string] ]= identifier[status_terms] [ identifier[int] ( identifier[soup] . identifier[find] ( literal[string] ). identifier[text] )]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[try] :
identifier[row_info] [ literal[string] ]= identifier[soup] . identifier[find] ( literal[string] ). identifier[text]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[return] identifier[row_info] | def parse_entry_media_attributes(self, soup):
"""
Args:
soup: a bs4 element containing a row from the current media list
Return a dict of attributes of the media the row is about.
"""
row_info = {}
try:
start = utilities.parse_profile_date(soup.find('series_start').text) # depends on [control=['try'], data=[]]
except ValueError:
start = None # depends on [control=['except'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
if start is not None:
try:
row_info['aired'] = (start, utilities.parse_profile_date(soup.find('series_end').text)) # depends on [control=['try'], data=[]]
except ValueError:
row_info['aired'] = (start, None) # depends on [control=['except'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['start']]
# look up the given media type's status terms.
status_terms = getattr(self.session, self.type)(1)._status_terms
try:
row_info['id'] = int(soup.find('series_' + self.type + 'db_id').text) # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
row_info['title'] = soup.find('series_title').text # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
row_info['status'] = status_terms[int(soup.find('series_status').text)] # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
row_info['picture'] = soup.find('series_image').text # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
return row_info |
def is_logged_in(self, name_id):
""" Check if user is in the cache
:param name_id: The identifier of the subject
"""
identity = self.users.get_identity(name_id)[0]
return bool(identity) | def function[is_logged_in, parameter[self, name_id]]:
constant[ Check if user is in the cache
:param name_id: The identifier of the subject
]
variable[identity] assign[=] call[call[name[self].users.get_identity, parameter[name[name_id]]]][constant[0]]
return[call[name[bool], parameter[name[identity]]]] | keyword[def] identifier[is_logged_in] ( identifier[self] , identifier[name_id] ):
literal[string]
identifier[identity] = identifier[self] . identifier[users] . identifier[get_identity] ( identifier[name_id] )[ literal[int] ]
keyword[return] identifier[bool] ( identifier[identity] ) | def is_logged_in(self, name_id):
""" Check if user is in the cache
:param name_id: The identifier of the subject
"""
identity = self.users.get_identity(name_id)[0]
return bool(identity) |
def sieve(self, ifos=None, description=None, segment=None,
segmentlist=None, exact_match=False):
"""
Return a Cache object with those CacheEntries that
contain the given patterns (or overlap, in the case of
segment or segmentlist). If exact_match is True, then
non-None ifos, description, and segment patterns must
match exactly, and a non-None segmentlist must contain
a segment which matches exactly).
It makes little sense to specify both segment and
segmentlist arguments, but it is not prohibited.
Bash-style wildcards (*?) are allowed for ifos and description.
"""
if exact_match:
segment_func = lambda e: e.segment == segment
segmentlist_func = lambda e: e.segment in segmentlist
else:
if ifos is not None: ifos = "*" + ifos + "*"
if description is not None: description = "*" + description + "*"
segment_func = lambda e: segment.intersects(e.segment)
segmentlist_func = lambda e: segmentlist.intersects_segment(e.segment)
c = self
if ifos is not None:
ifos_regexp = re.compile(fnmatch.translate(ifos))
c = [entry for entry in c if ifos_regexp.match(entry.observatory) is not None]
if description is not None:
descr_regexp = re.compile(fnmatch.translate(description))
c = [entry for entry in c if descr_regexp.match(entry.description) is not None]
if segment is not None:
c = [entry for entry in c if segment_func(entry)]
if segmentlist is not None:
# must coalesce for intersects_segment() to work
segmentlist.coalesce()
c = [entry for entry in c if segmentlist_func(entry)]
return self.__class__(c) | def function[sieve, parameter[self, ifos, description, segment, segmentlist, exact_match]]:
constant[
Return a Cache object with those CacheEntries that
contain the given patterns (or overlap, in the case of
segment or segmentlist). If exact_match is True, then
non-None ifos, description, and segment patterns must
match exactly, and a non-None segmentlist must contain
a segment which matches exactly).
It makes little sense to specify both segment and
segmentlist arguments, but it is not prohibited.
Bash-style wildcards (*?) are allowed for ifos and description.
]
if name[exact_match] begin[:]
variable[segment_func] assign[=] <ast.Lambda object at 0x7da20c7c8550>
variable[segmentlist_func] assign[=] <ast.Lambda object at 0x7da20c7c9510>
variable[c] assign[=] name[self]
if compare[name[ifos] is_not constant[None]] begin[:]
variable[ifos_regexp] assign[=] call[name[re].compile, parameter[call[name[fnmatch].translate, parameter[name[ifos]]]]]
variable[c] assign[=] <ast.ListComp object at 0x7da2041d8100>
if compare[name[description] is_not constant[None]] begin[:]
variable[descr_regexp] assign[=] call[name[re].compile, parameter[call[name[fnmatch].translate, parameter[name[description]]]]]
variable[c] assign[=] <ast.ListComp object at 0x7da20c7cb5b0>
if compare[name[segment] is_not constant[None]] begin[:]
variable[c] assign[=] <ast.ListComp object at 0x7da20c7cae30>
if compare[name[segmentlist] is_not constant[None]] begin[:]
call[name[segmentlist].coalesce, parameter[]]
variable[c] assign[=] <ast.ListComp object at 0x7da20c7c8f70>
return[call[name[self].__class__, parameter[name[c]]]] | keyword[def] identifier[sieve] ( identifier[self] , identifier[ifos] = keyword[None] , identifier[description] = keyword[None] , identifier[segment] = keyword[None] ,
identifier[segmentlist] = keyword[None] , identifier[exact_match] = keyword[False] ):
literal[string]
keyword[if] identifier[exact_match] :
identifier[segment_func] = keyword[lambda] identifier[e] : identifier[e] . identifier[segment] == identifier[segment]
identifier[segmentlist_func] = keyword[lambda] identifier[e] : identifier[e] . identifier[segment] keyword[in] identifier[segmentlist]
keyword[else] :
keyword[if] identifier[ifos] keyword[is] keyword[not] keyword[None] : identifier[ifos] = literal[string] + identifier[ifos] + literal[string]
keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] : identifier[description] = literal[string] + identifier[description] + literal[string]
identifier[segment_func] = keyword[lambda] identifier[e] : identifier[segment] . identifier[intersects] ( identifier[e] . identifier[segment] )
identifier[segmentlist_func] = keyword[lambda] identifier[e] : identifier[segmentlist] . identifier[intersects_segment] ( identifier[e] . identifier[segment] )
identifier[c] = identifier[self]
keyword[if] identifier[ifos] keyword[is] keyword[not] keyword[None] :
identifier[ifos_regexp] = identifier[re] . identifier[compile] ( identifier[fnmatch] . identifier[translate] ( identifier[ifos] ))
identifier[c] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[c] keyword[if] identifier[ifos_regexp] . identifier[match] ( identifier[entry] . identifier[observatory] ) keyword[is] keyword[not] keyword[None] ]
keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] :
identifier[descr_regexp] = identifier[re] . identifier[compile] ( identifier[fnmatch] . identifier[translate] ( identifier[description] ))
identifier[c] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[c] keyword[if] identifier[descr_regexp] . identifier[match] ( identifier[entry] . identifier[description] ) keyword[is] keyword[not] keyword[None] ]
keyword[if] identifier[segment] keyword[is] keyword[not] keyword[None] :
identifier[c] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[c] keyword[if] identifier[segment_func] ( identifier[entry] )]
keyword[if] identifier[segmentlist] keyword[is] keyword[not] keyword[None] :
identifier[segmentlist] . identifier[coalesce] ()
identifier[c] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[c] keyword[if] identifier[segmentlist_func] ( identifier[entry] )]
keyword[return] identifier[self] . identifier[__class__] ( identifier[c] ) | def sieve(self, ifos=None, description=None, segment=None, segmentlist=None, exact_match=False):
"""
Return a Cache object with those CacheEntries that
contain the given patterns (or overlap, in the case of
segment or segmentlist). If exact_match is True, then
non-None ifos, description, and segment patterns must
match exactly, and a non-None segmentlist must contain
a segment which matches exactly).
It makes little sense to specify both segment and
segmentlist arguments, but it is not prohibited.
Bash-style wildcards (*?) are allowed for ifos and description.
"""
if exact_match:
segment_func = lambda e: e.segment == segment
segmentlist_func = lambda e: e.segment in segmentlist # depends on [control=['if'], data=[]]
else:
if ifos is not None:
ifos = '*' + ifos + '*' # depends on [control=['if'], data=['ifos']]
if description is not None:
description = '*' + description + '*' # depends on [control=['if'], data=['description']]
segment_func = lambda e: segment.intersects(e.segment)
segmentlist_func = lambda e: segmentlist.intersects_segment(e.segment)
c = self
if ifos is not None:
ifos_regexp = re.compile(fnmatch.translate(ifos))
c = [entry for entry in c if ifos_regexp.match(entry.observatory) is not None] # depends on [control=['if'], data=['ifos']]
if description is not None:
descr_regexp = re.compile(fnmatch.translate(description))
c = [entry for entry in c if descr_regexp.match(entry.description) is not None] # depends on [control=['if'], data=['description']]
if segment is not None:
c = [entry for entry in c if segment_func(entry)] # depends on [control=['if'], data=[]]
if segmentlist is not None: # must coalesce for intersects_segment() to work
segmentlist.coalesce()
c = [entry for entry in c if segmentlist_func(entry)] # depends on [control=['if'], data=['segmentlist']]
return self.__class__(c) |
def esc_split(text, delimiter=" ", maxsplit=-1, escape="\\", *, ignore_empty=False):
"""Escape-aware text splitting:
Split text on on a delimiter, recognizing escaped delimiters."""
is_escaped = False
split_count = 0
yval = []
for char in text:
if is_escaped:
is_escaped = False
yval.append(char)
else:
if char == escape:
is_escaped = True
elif char in delimiter and split_count != maxsplit:
if yval or not ignore_empty:
yield "".join(yval)
split_count += 1
yval = []
else:
yval.append(char)
yield "".join(yval) | def function[esc_split, parameter[text, delimiter, maxsplit, escape]]:
constant[Escape-aware text splitting:
Split text on on a delimiter, recognizing escaped delimiters.]
variable[is_escaped] assign[=] constant[False]
variable[split_count] assign[=] constant[0]
variable[yval] assign[=] list[[]]
for taget[name[char]] in starred[name[text]] begin[:]
if name[is_escaped] begin[:]
variable[is_escaped] assign[=] constant[False]
call[name[yval].append, parameter[name[char]]]
<ast.Yield object at 0x7da1b0146740> | keyword[def] identifier[esc_split] ( identifier[text] , identifier[delimiter] = literal[string] , identifier[maxsplit] =- literal[int] , identifier[escape] = literal[string] ,*, identifier[ignore_empty] = keyword[False] ):
literal[string]
identifier[is_escaped] = keyword[False]
identifier[split_count] = literal[int]
identifier[yval] =[]
keyword[for] identifier[char] keyword[in] identifier[text] :
keyword[if] identifier[is_escaped] :
identifier[is_escaped] = keyword[False]
identifier[yval] . identifier[append] ( identifier[char] )
keyword[else] :
keyword[if] identifier[char] == identifier[escape] :
identifier[is_escaped] = keyword[True]
keyword[elif] identifier[char] keyword[in] identifier[delimiter] keyword[and] identifier[split_count] != identifier[maxsplit] :
keyword[if] identifier[yval] keyword[or] keyword[not] identifier[ignore_empty] :
keyword[yield] literal[string] . identifier[join] ( identifier[yval] )
identifier[split_count] += literal[int]
identifier[yval] =[]
keyword[else] :
identifier[yval] . identifier[append] ( identifier[char] )
keyword[yield] literal[string] . identifier[join] ( identifier[yval] ) | def esc_split(text, delimiter=' ', maxsplit=-1, escape='\\', *, ignore_empty=False):
"""Escape-aware text splitting:
Split text on on a delimiter, recognizing escaped delimiters."""
is_escaped = False
split_count = 0
yval = []
for char in text:
if is_escaped:
is_escaped = False
yval.append(char) # depends on [control=['if'], data=[]]
elif char == escape:
is_escaped = True # depends on [control=['if'], data=[]]
elif char in delimiter and split_count != maxsplit:
if yval or not ignore_empty:
yield ''.join(yval)
split_count += 1 # depends on [control=['if'], data=[]]
yval = [] # depends on [control=['if'], data=[]]
else:
yval.append(char) # depends on [control=['for'], data=['char']]
yield ''.join(yval) |
def calculate_first_digit(number):
""" This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
"""
sum = 0
if len(number) == 9:
weights = CPF_WEIGHTS[0]
else:
weights = CNPJ_WEIGHTS[0]
for i in range(len(number)):
sum = sum + int(number[i]) * weights[i]
rest_division = sum % DIVISOR
if rest_division < 2:
return '0'
return str(11 - rest_division) | def function[calculate_first_digit, parameter[number]]:
constant[ This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
]
variable[sum] assign[=] constant[0]
if compare[call[name[len], parameter[name[number]]] equal[==] constant[9]] begin[:]
variable[weights] assign[=] call[name[CPF_WEIGHTS]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[number]]]]]] begin[:]
variable[sum] assign[=] binary_operation[name[sum] + binary_operation[call[name[int], parameter[call[name[number]][name[i]]]] * call[name[weights]][name[i]]]]
variable[rest_division] assign[=] binary_operation[name[sum] <ast.Mod object at 0x7da2590d6920> name[DIVISOR]]
if compare[name[rest_division] less[<] constant[2]] begin[:]
return[constant[0]]
return[call[name[str], parameter[binary_operation[constant[11] - name[rest_division]]]]] | keyword[def] identifier[calculate_first_digit] ( identifier[number] ):
literal[string]
identifier[sum] = literal[int]
keyword[if] identifier[len] ( identifier[number] )== literal[int] :
identifier[weights] = identifier[CPF_WEIGHTS] [ literal[int] ]
keyword[else] :
identifier[weights] = identifier[CNPJ_WEIGHTS] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[number] )):
identifier[sum] = identifier[sum] + identifier[int] ( identifier[number] [ identifier[i] ])* identifier[weights] [ identifier[i] ]
identifier[rest_division] = identifier[sum] % identifier[DIVISOR]
keyword[if] identifier[rest_division] < literal[int] :
keyword[return] literal[string]
keyword[return] identifier[str] ( literal[int] - identifier[rest_division] ) | def calculate_first_digit(number):
""" This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
"""
sum = 0
if len(number) == 9:
weights = CPF_WEIGHTS[0] # depends on [control=['if'], data=[]]
else:
weights = CNPJ_WEIGHTS[0]
for i in range(len(number)):
sum = sum + int(number[i]) * weights[i] # depends on [control=['for'], data=['i']]
rest_division = sum % DIVISOR
if rest_division < 2:
return '0' # depends on [control=['if'], data=[]]
return str(11 - rest_division) |
def session(self, sid):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
async with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid):
self.server = server
self.sid = sid
self.session = None
async def __aenter__(self):
self.session = await self.server.get_session(sid)
return self.session
async def __aexit__(self, *args):
await self.server.save_session(sid, self.session)
return _session_context_manager(self, sid) | def function[session, parameter[self, sid]]:
constant[Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
async with eio.session(sid) as session:
print('received message from ', session['username'])
]
class class[_session_context_manager, parameter[]] begin[:]
def function[__init__, parameter[self, server, sid]]:
name[self].server assign[=] name[server]
name[self].sid assign[=] name[sid]
name[self].session assign[=] constant[None]
<ast.AsyncFunctionDef object at 0x7da1b0888ac0>
<ast.AsyncFunctionDef object at 0x7da1b0888eb0>
return[call[name[_session_context_manager], parameter[name[self], name[sid]]]] | keyword[def] identifier[session] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[class] identifier[_session_context_manager] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[server] , identifier[sid] ):
identifier[self] . identifier[server] = identifier[server]
identifier[self] . identifier[sid] = identifier[sid]
identifier[self] . identifier[session] = keyword[None]
keyword[async] keyword[def] identifier[__aenter__] ( identifier[self] ):
identifier[self] . identifier[session] = keyword[await] identifier[self] . identifier[server] . identifier[get_session] ( identifier[sid] )
keyword[return] identifier[self] . identifier[session]
keyword[async] keyword[def] identifier[__aexit__] ( identifier[self] ,* identifier[args] ):
keyword[await] identifier[self] . identifier[server] . identifier[save_session] ( identifier[sid] , identifier[self] . identifier[session] )
keyword[return] identifier[_session_context_manager] ( identifier[self] , identifier[sid] ) | def session(self, sid):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
async with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid):
self.server = server
self.sid = sid
self.session = None
async def __aenter__(self):
self.session = await self.server.get_session(sid)
return self.session
async def __aexit__(self, *args):
await self.server.save_session(sid, self.session)
return _session_context_manager(self, sid) |
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
disk_size_units = self._get_disk_size_units(final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise | def function[image_import, parameter[self, image_name, url, image_meta, remote_host]]:
constant[Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100]
variable[image_info] assign[=] list[[]]
<ast.Try object at 0x7da204567400>
if name[image_info] begin[:]
variable[msg] assign[=] binary_operation[constant[The image name %s has already exist in SDK image database, please check if they are same image or consider to use a different image name for import] <ast.Mod object at 0x7da2590d6920> name[image_name]]
call[name[LOG].error, parameter[name[msg]]]
<ast.Raise object at 0x7da204566cb0>
<ast.Try object at 0x7da2045659f0>
<ast.Try object at 0x7da1b196d150> | keyword[def] identifier[image_import] ( identifier[self] , identifier[image_name] , identifier[url] , identifier[image_meta] , identifier[remote_host] = keyword[None] ):
literal[string]
identifier[image_info] =[]
keyword[try] :
identifier[image_info] = identifier[self] . identifier[_ImageDbOperator] . identifier[image_query_record] ( identifier[image_name] )
keyword[except] identifier[exception] . identifier[SDKObjectNotExistError] :
identifier[msg] =( literal[string]
literal[string] % identifier[image_name] )
identifier[LOG] . identifier[info] ( identifier[msg] )
keyword[if] identifier[image_info] :
identifier[msg] =( literal[string]
literal[string]
literal[string] % identifier[image_name] )
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] , identifier[img] = identifier[image_name] )
keyword[try] :
identifier[image_os_version] = identifier[image_meta] [ literal[string] ]. identifier[lower] ()
identifier[target_folder] = identifier[self] . identifier[_pathutils] . identifier[create_import_image_repository] (
identifier[image_os_version] , identifier[const] . identifier[IMAGE_TYPE] [ literal[string] ],
identifier[image_name] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[msg] =( literal[string]
literal[string]
literal[string]
literal[string] %{ literal[string] : identifier[image_name] ,
literal[string] : identifier[six] . identifier[text_type] ( identifier[err] )})
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] , identifier[msg] = identifier[msg] )
keyword[try] :
identifier[import_image_fn] = identifier[urlparse] . identifier[urlparse] ( identifier[url] ). identifier[path] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[import_image_fpath] = literal[string] . identifier[join] ([ identifier[target_folder] , identifier[import_image_fn] ])
identifier[self] . identifier[_scheme2backend] ( identifier[urlparse] . identifier[urlparse] ( identifier[url] ). identifier[scheme] ). identifier[image_import] (
identifier[image_name] , identifier[url] ,
identifier[import_image_fpath] ,
identifier[remote_host] = identifier[remote_host] )
identifier[expect_md5sum] = identifier[image_meta] . identifier[get] ( literal[string] )
identifier[real_md5sum] = identifier[self] . identifier[_get_md5sum] ( identifier[import_image_fpath] )
keyword[if] identifier[expect_md5sum] keyword[and] identifier[expect_md5sum] != identifier[real_md5sum] :
identifier[msg] =( literal[string]
literal[string] )
identifier[LOG] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[SDKImageOperationError] ( identifier[rs] = literal[int] )
identifier[image_type] = literal[string]
keyword[if] identifier[image_type] == literal[string] :
identifier[final_image_fpath] = literal[string] . identifier[join] ([ identifier[target_folder] ,
identifier[CONF] . identifier[zvm] . identifier[user_root_vdev] ])
identifier[os] . identifier[rename] ( identifier[import_image_fpath] , identifier[final_image_fpath] )
keyword[elif] identifier[image_type] == literal[string] :
keyword[pass]
identifier[disk_size_units] = identifier[self] . identifier[_get_disk_size_units] ( identifier[final_image_fpath] )
identifier[image_size] = identifier[self] . identifier[_get_image_size] ( identifier[final_image_fpath] )
identifier[self] . identifier[_ImageDbOperator] . identifier[image_add_record] ( identifier[image_name] ,
identifier[image_os_version] ,
identifier[real_md5sum] ,
identifier[disk_size_units] ,
identifier[image_size] ,
identifier[image_type] )
identifier[LOG] . identifier[info] ( literal[string] % identifier[image_name] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[_pathutils] . identifier[clean_temp_folder] ( identifier[target_folder] )
keyword[raise] | def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name) # depends on [control=['try'], data=[]]
except exception.SDKObjectNotExistError:
msg = "The image record %s doens't exist in SDK image datebase, will import the image and create record now" % image_name
LOG.info(msg) # depends on [control=['except'], data=[]]
# Ensure the specified image is not exist in image DB
if image_info:
msg = 'The image name %s has already exist in SDK image database, please check if they are same image or consider to use a different image name for import' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name) # depends on [control=['if'], data=[]]
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(image_os_version, const.IMAGE_TYPE['DEPLOY'], image_name) # depends on [control=['try'], data=[]]
except Exception as err:
msg = 'Failed to create repository to store image %(img)s with error: %(err)s, please make sure there are enough space on zvmsdk server and proper permission to create the repository' % {'img': image_name, 'err': six.text_type(err)}
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg) # depends on [control=['except'], data=['err']]
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(image_name, url, import_image_fpath, remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = 'The md5sum after import is not same as source image, the image has been broken'
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4) # depends on [control=['if'], data=[]]
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder, CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath) # depends on [control=['if'], data=[]]
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass # depends on [control=['if'], data=[]]
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
disk_size_units = self._get_disk_size_units(final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name, image_os_version, real_md5sum, disk_size_units, image_size, image_type)
LOG.info('Image %s is import successfully' % image_name) # depends on [control=['try'], data=[]]
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise # depends on [control=['except'], data=[]] |
def write_message(self, message, binary=False, locked=True):
''' Override parent write_message with a version that acquires a
write lock before writing.
'''
if locked:
with (yield self.write_lock.acquire()):
yield super(WSHandler, self).write_message(message, binary)
else:
yield super(WSHandler, self).write_message(message, binary) | def function[write_message, parameter[self, message, binary, locked]]:
constant[ Override parent write_message with a version that acquires a
write lock before writing.
]
if name[locked] begin[:]
with <ast.Yield object at 0x7da1b21d69b0> begin[:]
<ast.Yield object at 0x7da1b21d47f0> | keyword[def] identifier[write_message] ( identifier[self] , identifier[message] , identifier[binary] = keyword[False] , identifier[locked] = keyword[True] ):
literal[string]
keyword[if] identifier[locked] :
keyword[with] ( keyword[yield] identifier[self] . identifier[write_lock] . identifier[acquire] ()):
keyword[yield] identifier[super] ( identifier[WSHandler] , identifier[self] ). identifier[write_message] ( identifier[message] , identifier[binary] )
keyword[else] :
keyword[yield] identifier[super] ( identifier[WSHandler] , identifier[self] ). identifier[write_message] ( identifier[message] , identifier[binary] ) | def write_message(self, message, binary=False, locked=True):
""" Override parent write_message with a version that acquires a
write lock before writing.
"""
if locked:
with (yield self.write_lock.acquire()):
yield super(WSHandler, self).write_message(message, binary) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
else:
yield super(WSHandler, self).write_message(message, binary) |
def words(self, quantity: int = 5) -> List[str]:
"""Generate lis of the random words.
:param quantity: Quantity of words. Default is 5.
:return: Word list.
:Example:
[science, network, god, octopus, love]
"""
words = self._data['words'].get('normal')
words_list = [self.random.choice(words) for _ in range(quantity)]
return words_list | def function[words, parameter[self, quantity]]:
constant[Generate lis of the random words.
:param quantity: Quantity of words. Default is 5.
:return: Word list.
:Example:
[science, network, god, octopus, love]
]
variable[words] assign[=] call[call[name[self]._data][constant[words]].get, parameter[constant[normal]]]
variable[words_list] assign[=] <ast.ListComp object at 0x7da20e9b05b0>
return[name[words_list]] | keyword[def] identifier[words] ( identifier[self] , identifier[quantity] : identifier[int] = literal[int] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[words] = identifier[self] . identifier[_data] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[words_list] =[ identifier[self] . identifier[random] . identifier[choice] ( identifier[words] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[quantity] )]
keyword[return] identifier[words_list] | def words(self, quantity: int=5) -> List[str]:
"""Generate lis of the random words.
:param quantity: Quantity of words. Default is 5.
:return: Word list.
:Example:
[science, network, god, octopus, love]
"""
words = self._data['words'].get('normal')
words_list = [self.random.choice(words) for _ in range(quantity)]
return words_list |
async def list_pools() -> None:
"""
Lists names of created pool ledgers
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("list_pools: >>> ")
if not hasattr(list_pools, "cb"):
logger.debug("list_pools: Creating callback")
list_pools.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
res = await do_call('indy_list_pools',
list_pools.cb)
res = json.loads(res.decode())
logger.debug("list_pools: <<< res: %r", res)
return res | <ast.AsyncFunctionDef object at 0x7da18f00c910> | keyword[async] keyword[def] identifier[list_pools] ()-> keyword[None] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[list_pools] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[list_pools] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] ))
identifier[res] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[list_pools] . identifier[cb] )
identifier[res] = identifier[json] . identifier[loads] ( identifier[res] . identifier[decode] ())
identifier[logger] . identifier[debug] ( literal[string] , identifier[res] )
keyword[return] identifier[res] | async def list_pools() -> None:
"""
Lists names of created pool ledgers
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug('list_pools: >>> ')
if not hasattr(list_pools, 'cb'):
logger.debug('list_pools: Creating callback')
list_pools.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) # depends on [control=['if'], data=[]]
res = await do_call('indy_list_pools', list_pools.cb)
res = json.loads(res.decode())
logger.debug('list_pools: <<< res: %r', res)
return res |
def group_by_allele(self, locus):
'''
Split the PileupCollection by the alleles suggested by the reads at the
specified locus.
If a read has an insertion immediately following the locus, then the
insertion is included in the allele. For example, if locus is the
1-base range [5,6), one allele might be "AGA", indicating that at
locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If
a read has a deletion at the specified locus, the allele is the empty
string.
The given locus may include any number of bases. If the locus includes
multiple bases, then the alleles consist of all bases aligning to that
range in any read. Note that only sequences actually sequenced in a
particular read are included. For example, if one read has "ATT" at a
locus and another read has "GCC", then the alleles are "ATT" and
"GCC", but not "GTT". That is, the bases in each allele are phased. For
this reason, only reads that overlap the entire locus are included.
If the locus is an empty interval (e.g. [5,5) ), then the alleles
consist only of inserted bases. In this example, only bases inserted
immediately after locus 5 would be included (but *not* the base
actually at locus 5). In the previous insertion example, the allele
would be "GA", indicating a 2-base insertion. Reads that have no
insertion at that position (matches or deletions) would have the empty
string as their allele.
Parameters
----------
locus : Locus
The reference locus, encompassing 0 or more bases.
Returns
----------
A dict of string -> PileupCollection. The keys are nucleotide strings
giving the bases sequenced at the locus, and the values are
PileupCollection instances of the alignments that support that allele.
'''
locus = to_locus(locus)
read_to_allele = None
loci = []
if locus.positions:
# Our locus includes at least one reference base.
for position in locus.positions:
base_position = Locus.from_interbase_coordinates(
locus.contig, position)
loci.append(base_position)
new_read_to_allele = {}
for element in self.pileups[base_position]:
allele_prefix = ""
key = alignment_key(element.alignment)
if read_to_allele is not None:
try:
allele_prefix = read_to_allele[key]
except KeyError:
continue
allele = allele_prefix + element.bases
new_read_to_allele[key] = allele
read_to_allele = new_read_to_allele
else:
# Our locus is between reference bases.
position_before = Locus.from_interbase_coordinates(
locus.contig, locus.start)
loci.append(position_before)
read_to_allele = {}
for element in self.pileups[position_before]:
allele = element.bases[1:]
read_to_allele[alignment_key(element.alignment)] = allele
split = defaultdict(lambda: PileupCollection(pileups={}, parent=self))
for locus in loci:
pileup = self.pileups[locus]
for e in pileup.elements:
key = read_to_allele.get(alignment_key(e.alignment))
if key is not None:
if locus in split[key].pileups:
split[key].pileups[locus].append(e)
else:
split[key].pileups[locus] = Pileup(locus, [e])
# Sort by number of reads (descending). Break ties with the
# lexicographic ordering of the allele string.
def sorter(pair):
(allele, pileup_collection) = pair
return (-1 * pileup_collection.num_reads(), allele)
return OrderedDict(sorted(split.items(), key=sorter)) | def function[group_by_allele, parameter[self, locus]]:
constant[
Split the PileupCollection by the alleles suggested by the reads at the
specified locus.
If a read has an insertion immediately following the locus, then the
insertion is included in the allele. For example, if locus is the
1-base range [5,6), one allele might be "AGA", indicating that at
locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If
a read has a deletion at the specified locus, the allele is the empty
string.
The given locus may include any number of bases. If the locus includes
multiple bases, then the alleles consist of all bases aligning to that
range in any read. Note that only sequences actually sequenced in a
particular read are included. For example, if one read has "ATT" at a
locus and another read has "GCC", then the alleles are "ATT" and
"GCC", but not "GTT". That is, the bases in each allele are phased. For
this reason, only reads that overlap the entire locus are included.
If the locus is an empty interval (e.g. [5,5) ), then the alleles
consist only of inserted bases. In this example, only bases inserted
immediately after locus 5 would be included (but *not* the base
actually at locus 5). In the previous insertion example, the allele
would be "GA", indicating a 2-base insertion. Reads that have no
insertion at that position (matches or deletions) would have the empty
string as their allele.
Parameters
----------
locus : Locus
The reference locus, encompassing 0 or more bases.
Returns
----------
A dict of string -> PileupCollection. The keys are nucleotide strings
giving the bases sequenced at the locus, and the values are
PileupCollection instances of the alignments that support that allele.
]
variable[locus] assign[=] call[name[to_locus], parameter[name[locus]]]
variable[read_to_allele] assign[=] constant[None]
variable[loci] assign[=] list[[]]
if name[locus].positions begin[:]
for taget[name[position]] in starred[name[locus].positions] begin[:]
variable[base_position] assign[=] call[name[Locus].from_interbase_coordinates, parameter[name[locus].contig, name[position]]]
call[name[loci].append, parameter[name[base_position]]]
variable[new_read_to_allele] assign[=] dictionary[[], []]
for taget[name[element]] in starred[call[name[self].pileups][name[base_position]]] begin[:]
variable[allele_prefix] assign[=] constant[]
variable[key] assign[=] call[name[alignment_key], parameter[name[element].alignment]]
if compare[name[read_to_allele] is_not constant[None]] begin[:]
<ast.Try object at 0x7da2041da860>
variable[allele] assign[=] binary_operation[name[allele_prefix] + name[element].bases]
call[name[new_read_to_allele]][name[key]] assign[=] name[allele]
variable[read_to_allele] assign[=] name[new_read_to_allele]
variable[split] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da2041d9ae0>]]
for taget[name[locus]] in starred[name[loci]] begin[:]
variable[pileup] assign[=] call[name[self].pileups][name[locus]]
for taget[name[e]] in starred[name[pileup].elements] begin[:]
variable[key] assign[=] call[name[read_to_allele].get, parameter[call[name[alignment_key], parameter[name[e].alignment]]]]
if compare[name[key] is_not constant[None]] begin[:]
if compare[name[locus] in call[name[split]][name[key]].pileups] begin[:]
call[call[call[name[split]][name[key]].pileups][name[locus]].append, parameter[name[e]]]
def function[sorter, parameter[pair]]:
<ast.Tuple object at 0x7da2041dac50> assign[=] name[pair]
return[tuple[[<ast.BinOp object at 0x7da2041db160>, <ast.Name object at 0x7da2041db460>]]]
return[call[name[OrderedDict], parameter[call[name[sorted], parameter[call[name[split].items, parameter[]]]]]]] | keyword[def] identifier[group_by_allele] ( identifier[self] , identifier[locus] ):
literal[string]
identifier[locus] = identifier[to_locus] ( identifier[locus] )
identifier[read_to_allele] = keyword[None]
identifier[loci] =[]
keyword[if] identifier[locus] . identifier[positions] :
keyword[for] identifier[position] keyword[in] identifier[locus] . identifier[positions] :
identifier[base_position] = identifier[Locus] . identifier[from_interbase_coordinates] (
identifier[locus] . identifier[contig] , identifier[position] )
identifier[loci] . identifier[append] ( identifier[base_position] )
identifier[new_read_to_allele] ={}
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[pileups] [ identifier[base_position] ]:
identifier[allele_prefix] = literal[string]
identifier[key] = identifier[alignment_key] ( identifier[element] . identifier[alignment] )
keyword[if] identifier[read_to_allele] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[allele_prefix] = identifier[read_to_allele] [ identifier[key] ]
keyword[except] identifier[KeyError] :
keyword[continue]
identifier[allele] = identifier[allele_prefix] + identifier[element] . identifier[bases]
identifier[new_read_to_allele] [ identifier[key] ]= identifier[allele]
identifier[read_to_allele] = identifier[new_read_to_allele]
keyword[else] :
identifier[position_before] = identifier[Locus] . identifier[from_interbase_coordinates] (
identifier[locus] . identifier[contig] , identifier[locus] . identifier[start] )
identifier[loci] . identifier[append] ( identifier[position_before] )
identifier[read_to_allele] ={}
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[pileups] [ identifier[position_before] ]:
identifier[allele] = identifier[element] . identifier[bases] [ literal[int] :]
identifier[read_to_allele] [ identifier[alignment_key] ( identifier[element] . identifier[alignment] )]= identifier[allele]
identifier[split] = identifier[defaultdict] ( keyword[lambda] : identifier[PileupCollection] ( identifier[pileups] ={}, identifier[parent] = identifier[self] ))
keyword[for] identifier[locus] keyword[in] identifier[loci] :
identifier[pileup] = identifier[self] . identifier[pileups] [ identifier[locus] ]
keyword[for] identifier[e] keyword[in] identifier[pileup] . identifier[elements] :
identifier[key] = identifier[read_to_allele] . identifier[get] ( identifier[alignment_key] ( identifier[e] . identifier[alignment] ))
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[locus] keyword[in] identifier[split] [ identifier[key] ]. identifier[pileups] :
identifier[split] [ identifier[key] ]. identifier[pileups] [ identifier[locus] ]. identifier[append] ( identifier[e] )
keyword[else] :
identifier[split] [ identifier[key] ]. identifier[pileups] [ identifier[locus] ]= identifier[Pileup] ( identifier[locus] ,[ identifier[e] ])
keyword[def] identifier[sorter] ( identifier[pair] ):
( identifier[allele] , identifier[pileup_collection] )= identifier[pair]
keyword[return] (- literal[int] * identifier[pileup_collection] . identifier[num_reads] (), identifier[allele] )
keyword[return] identifier[OrderedDict] ( identifier[sorted] ( identifier[split] . identifier[items] (), identifier[key] = identifier[sorter] )) | def group_by_allele(self, locus):
"""
Split the PileupCollection by the alleles suggested by the reads at the
specified locus.
If a read has an insertion immediately following the locus, then the
insertion is included in the allele. For example, if locus is the
1-base range [5,6), one allele might be "AGA", indicating that at
locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If
a read has a deletion at the specified locus, the allele is the empty
string.
The given locus may include any number of bases. If the locus includes
multiple bases, then the alleles consist of all bases aligning to that
range in any read. Note that only sequences actually sequenced in a
particular read are included. For example, if one read has "ATT" at a
locus and another read has "GCC", then the alleles are "ATT" and
"GCC", but not "GTT". That is, the bases in each allele are phased. For
this reason, only reads that overlap the entire locus are included.
If the locus is an empty interval (e.g. [5,5) ), then the alleles
consist only of inserted bases. In this example, only bases inserted
immediately after locus 5 would be included (but *not* the base
actually at locus 5). In the previous insertion example, the allele
would be "GA", indicating a 2-base insertion. Reads that have no
insertion at that position (matches or deletions) would have the empty
string as their allele.
Parameters
----------
locus : Locus
The reference locus, encompassing 0 or more bases.
Returns
----------
A dict of string -> PileupCollection. The keys are nucleotide strings
giving the bases sequenced at the locus, and the values are
PileupCollection instances of the alignments that support that allele.
"""
locus = to_locus(locus)
read_to_allele = None
loci = []
if locus.positions:
# Our locus includes at least one reference base.
for position in locus.positions:
base_position = Locus.from_interbase_coordinates(locus.contig, position)
loci.append(base_position)
new_read_to_allele = {}
for element in self.pileups[base_position]:
allele_prefix = ''
key = alignment_key(element.alignment)
if read_to_allele is not None:
try:
allele_prefix = read_to_allele[key] # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['read_to_allele']]
allele = allele_prefix + element.bases
new_read_to_allele[key] = allele # depends on [control=['for'], data=['element']]
read_to_allele = new_read_to_allele # depends on [control=['for'], data=['position']] # depends on [control=['if'], data=[]]
else:
# Our locus is between reference bases.
position_before = Locus.from_interbase_coordinates(locus.contig, locus.start)
loci.append(position_before)
read_to_allele = {}
for element in self.pileups[position_before]:
allele = element.bases[1:]
read_to_allele[alignment_key(element.alignment)] = allele # depends on [control=['for'], data=['element']]
split = defaultdict(lambda : PileupCollection(pileups={}, parent=self))
for locus in loci:
pileup = self.pileups[locus]
for e in pileup.elements:
key = read_to_allele.get(alignment_key(e.alignment))
if key is not None:
if locus in split[key].pileups:
split[key].pileups[locus].append(e) # depends on [control=['if'], data=['locus']]
else:
split[key].pileups[locus] = Pileup(locus, [e]) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['e']] # depends on [control=['for'], data=['locus']]
# Sort by number of reads (descending). Break ties with the
# lexicographic ordering of the allele string.
def sorter(pair):
(allele, pileup_collection) = pair
return (-1 * pileup_collection.num_reads(), allele)
return OrderedDict(sorted(split.items(), key=sorter)) |
def py_to_couch_validate(key, val):
"""
Validates the individual parameter key and value.
"""
if key not in RESULT_ARG_TYPES:
raise CloudantArgumentError(116, key)
# pylint: disable=unidiomatic-typecheck
# Validate argument values and ensure that a boolean is not passed in
# if an integer is expected
if (not isinstance(val, RESULT_ARG_TYPES[key]) or
(type(val) is bool and int in RESULT_ARG_TYPES[key])):
raise CloudantArgumentError(117, key, RESULT_ARG_TYPES[key])
if key == 'keys':
for key_list_val in val:
if (not isinstance(key_list_val, RESULT_ARG_TYPES['key']) or
type(key_list_val) is bool):
raise CloudantArgumentError(134, RESULT_ARG_TYPES['key'])
if key == 'stale':
if val not in ('ok', 'update_after'):
raise CloudantArgumentError(135, val) | def function[py_to_couch_validate, parameter[key, val]]:
constant[
Validates the individual parameter key and value.
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[RESULT_ARG_TYPES]] begin[:]
<ast.Raise object at 0x7da1b2346d40>
if <ast.BoolOp object at 0x7da1b23466b0> begin[:]
<ast.Raise object at 0x7da1b23468f0>
if compare[name[key] equal[==] constant[keys]] begin[:]
for taget[name[key_list_val]] in starred[name[val]] begin[:]
if <ast.BoolOp object at 0x7da1b2346740> begin[:]
<ast.Raise object at 0x7da1b2347070>
if compare[name[key] equal[==] constant[stale]] begin[:]
if compare[name[val] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b2346590>, <ast.Constant object at 0x7da1b2346200>]]] begin[:]
<ast.Raise object at 0x7da1b2345a80> | keyword[def] identifier[py_to_couch_validate] ( identifier[key] , identifier[val] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[RESULT_ARG_TYPES] :
keyword[raise] identifier[CloudantArgumentError] ( literal[int] , identifier[key] )
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[val] , identifier[RESULT_ARG_TYPES] [ identifier[key] ]) keyword[or]
( identifier[type] ( identifier[val] ) keyword[is] identifier[bool] keyword[and] identifier[int] keyword[in] identifier[RESULT_ARG_TYPES] [ identifier[key] ])):
keyword[raise] identifier[CloudantArgumentError] ( literal[int] , identifier[key] , identifier[RESULT_ARG_TYPES] [ identifier[key] ])
keyword[if] identifier[key] == literal[string] :
keyword[for] identifier[key_list_val] keyword[in] identifier[val] :
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[key_list_val] , identifier[RESULT_ARG_TYPES] [ literal[string] ]) keyword[or]
identifier[type] ( identifier[key_list_val] ) keyword[is] identifier[bool] ):
keyword[raise] identifier[CloudantArgumentError] ( literal[int] , identifier[RESULT_ARG_TYPES] [ literal[string] ])
keyword[if] identifier[key] == literal[string] :
keyword[if] identifier[val] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[CloudantArgumentError] ( literal[int] , identifier[val] ) | def py_to_couch_validate(key, val):
"""
Validates the individual parameter key and value.
"""
if key not in RESULT_ARG_TYPES:
raise CloudantArgumentError(116, key) # depends on [control=['if'], data=['key']]
# pylint: disable=unidiomatic-typecheck
# Validate argument values and ensure that a boolean is not passed in
# if an integer is expected
if not isinstance(val, RESULT_ARG_TYPES[key]) or (type(val) is bool and int in RESULT_ARG_TYPES[key]):
raise CloudantArgumentError(117, key, RESULT_ARG_TYPES[key]) # depends on [control=['if'], data=[]]
if key == 'keys':
for key_list_val in val:
if not isinstance(key_list_val, RESULT_ARG_TYPES['key']) or type(key_list_val) is bool:
raise CloudantArgumentError(134, RESULT_ARG_TYPES['key']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key_list_val']] # depends on [control=['if'], data=[]]
if key == 'stale':
if val not in ('ok', 'update_after'):
raise CloudantArgumentError(135, val) # depends on [control=['if'], data=['val']] # depends on [control=['if'], data=[]] |
def corpora(self, full=False):
'''Return list of corpora owned by user.
If `full=True`, it'll download all pages returned by the HTTP server'''
url = self.base_url + self.CORPORA_PAGE
class_ = Corpus
results = self._retrieve_resources(url, class_, full)
return results | def function[corpora, parameter[self, full]]:
constant[Return list of corpora owned by user.
If `full=True`, it'll download all pages returned by the HTTP server]
variable[url] assign[=] binary_operation[name[self].base_url + name[self].CORPORA_PAGE]
variable[class_] assign[=] name[Corpus]
variable[results] assign[=] call[name[self]._retrieve_resources, parameter[name[url], name[class_], name[full]]]
return[name[results]] | keyword[def] identifier[corpora] ( identifier[self] , identifier[full] = keyword[False] ):
literal[string]
identifier[url] = identifier[self] . identifier[base_url] + identifier[self] . identifier[CORPORA_PAGE]
identifier[class_] = identifier[Corpus]
identifier[results] = identifier[self] . identifier[_retrieve_resources] ( identifier[url] , identifier[class_] , identifier[full] )
keyword[return] identifier[results] | def corpora(self, full=False):
"""Return list of corpora owned by user.
If `full=True`, it'll download all pages returned by the HTTP server"""
url = self.base_url + self.CORPORA_PAGE
class_ = Corpus
results = self._retrieve_resources(url, class_, full)
return results |
def apply_single_tag_set(tag_set, selection):
"""All servers matching one tag set.
A tag set is a dict. A server matches if its tags are a superset:
A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}.
The empty tag set {} matches any server.
"""
def tags_match(server_tags):
for key, value in tag_set.items():
if key not in server_tags or server_tags[key] != value:
return False
return True
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if tags_match(s.tags)]) | def function[apply_single_tag_set, parameter[tag_set, selection]]:
constant[All servers matching one tag set.
A tag set is a dict. A server matches if its tags are a superset:
A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}.
The empty tag set {} matches any server.
]
def function[tags_match, parameter[server_tags]]:
for taget[tuple[[<ast.Name object at 0x7da18ede6fb0>, <ast.Name object at 0x7da18ede69e0>]]] in starred[call[name[tag_set].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18ede5120> begin[:]
return[constant[False]]
return[constant[True]]
return[call[name[selection].with_server_descriptions, parameter[<ast.ListComp object at 0x7da20e955a80>]]] | keyword[def] identifier[apply_single_tag_set] ( identifier[tag_set] , identifier[selection] ):
literal[string]
keyword[def] identifier[tags_match] ( identifier[server_tags] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[tag_set] . identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[server_tags] keyword[or] identifier[server_tags] [ identifier[key] ]!= identifier[value] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[return] identifier[selection] . identifier[with_server_descriptions] (
[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[selection] . identifier[server_descriptions] keyword[if] identifier[tags_match] ( identifier[s] . identifier[tags] )]) | def apply_single_tag_set(tag_set, selection):
"""All servers matching one tag set.
A tag set is a dict. A server matches if its tags are a superset:
A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}.
The empty tag set {} matches any server.
"""
def tags_match(server_tags):
for (key, value) in tag_set.items():
if key not in server_tags or server_tags[key] != value:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True
return selection.with_server_descriptions([s for s in selection.server_descriptions if tags_match(s.tags)]) |
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
headers = sorted(six.viewkeys(json_records))
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
headers,
zip(*[json_records.get(header) for header in headers]),
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(headers),
) | def function[to_table_data, parameter[self]]:
constant[
:raises ValueError:
:raises pytablereader.error.ValidationError:
]
call[name[self]._validate_source_data, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0fd82b0>, <ast.Name object at 0x7da1b0fd9a50>]]] in starred[call[name[six].iteritems, parameter[name[self]._buffer]]] begin[:]
variable[headers] assign[=] call[name[sorted], parameter[call[name[six].viewkeys, parameter[name[json_records]]]]]
call[name[self]._loader.inc_table_count, parameter[]]
name[self]._table_key assign[=] name[table_key]
<ast.Yield object at 0x7da1b0fd81f0> | keyword[def] identifier[to_table_data] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_validate_source_data] ()
keyword[for] identifier[table_key] , identifier[json_records] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[_buffer] ):
identifier[headers] = identifier[sorted] ( identifier[six] . identifier[viewkeys] ( identifier[json_records] ))
identifier[self] . identifier[_loader] . identifier[inc_table_count] ()
identifier[self] . identifier[_table_key] = identifier[table_key]
keyword[yield] identifier[TableData] (
identifier[self] . identifier[_make_table_name] (),
identifier[headers] ,
identifier[zip] (*[ identifier[json_records] . identifier[get] ( identifier[header] ) keyword[for] identifier[header] keyword[in] identifier[headers] ]),
identifier[dp_extractor] = identifier[self] . identifier[_loader] . identifier[dp_extractor] ,
identifier[type_hints] = identifier[self] . identifier[_extract_type_hints] ( identifier[headers] ),
) | def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for (table_key, json_records) in six.iteritems(self._buffer):
headers = sorted(six.viewkeys(json_records))
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(self._make_table_name(), headers, zip(*[json_records.get(header) for header in headers]), dp_extractor=self._loader.dp_extractor, type_hints=self._extract_type_hints(headers)) # depends on [control=['for'], data=[]] |
def group_consecutives(data, stepsize=1):
"""
Return list of consecutive lists of numbers from data (number list).
References:
http://stackoverflow.com/questions/7352684/how-to-find-the-groups-of-consecutive-elements-from-an-array-in-numpy
"""
run = []
result = [run]
expect = None
for item in data:
if (item == expect) or (expect is None):
run.append(item)
else:
run = [item]
result.append(run)
expect = item + stepsize
return result | def function[group_consecutives, parameter[data, stepsize]]:
constant[
Return list of consecutive lists of numbers from data (number list).
References:
http://stackoverflow.com/questions/7352684/how-to-find-the-groups-of-consecutive-elements-from-an-array-in-numpy
]
variable[run] assign[=] list[[]]
variable[result] assign[=] list[[<ast.Name object at 0x7da1b24eaa70>]]
variable[expect] assign[=] constant[None]
for taget[name[item]] in starred[name[data]] begin[:]
if <ast.BoolOp object at 0x7da1b24eb280> begin[:]
call[name[run].append, parameter[name[item]]]
variable[expect] assign[=] binary_operation[name[item] + name[stepsize]]
return[name[result]] | keyword[def] identifier[group_consecutives] ( identifier[data] , identifier[stepsize] = literal[int] ):
literal[string]
identifier[run] =[]
identifier[result] =[ identifier[run] ]
identifier[expect] = keyword[None]
keyword[for] identifier[item] keyword[in] identifier[data] :
keyword[if] ( identifier[item] == identifier[expect] ) keyword[or] ( identifier[expect] keyword[is] keyword[None] ):
identifier[run] . identifier[append] ( identifier[item] )
keyword[else] :
identifier[run] =[ identifier[item] ]
identifier[result] . identifier[append] ( identifier[run] )
identifier[expect] = identifier[item] + identifier[stepsize]
keyword[return] identifier[result] | def group_consecutives(data, stepsize=1):
"""
Return list of consecutive lists of numbers from data (number list).
References:
http://stackoverflow.com/questions/7352684/how-to-find-the-groups-of-consecutive-elements-from-an-array-in-numpy
"""
run = []
result = [run]
expect = None
for item in data:
if item == expect or expect is None:
run.append(item) # depends on [control=['if'], data=[]]
else:
run = [item]
result.append(run)
expect = item + stepsize # depends on [control=['for'], data=['item']]
return result |
def get_version_status(
package_descriptors, targets, repos_data,
strip_version=False, strip_os_code_name=False):
"""
For each package and target check if it is affected by a sync.
This is the case when the package version in the testing repo is different
from the version in the main repo.
:return: a dict indexed by package names containing
dicts indexed by targets containing
a list of status strings (one for each repo)
"""
status = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
ref_version = package_descriptor.version
if strip_version:
ref_version = _strip_version_suffix(ref_version)
status[pkg_name] = {}
for target in targets:
statuses = []
for repo_data in repos_data:
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if strip_version:
version = _strip_version_suffix(version)
if strip_os_code_name:
version = _strip_os_code_name_suffix(
version, target.os_code_name)
if ref_version:
if not version:
statuses.append('missing')
elif version.startswith(ref_version): # including equal
statuses.append('equal')
else:
if _version_is_gt_other(version, ref_version):
statuses.append('higher')
else:
statuses.append('lower')
else:
if not version:
statuses.append('ignore')
else:
statuses.append('obsolete')
status[pkg_name][target] = statuses
return status | def function[get_version_status, parameter[package_descriptors, targets, repos_data, strip_version, strip_os_code_name]]:
constant[
For each package and target check if it is affected by a sync.
This is the case when the package version in the testing repo is different
from the version in the main repo.
:return: a dict indexed by package names containing
dicts indexed by targets containing
a list of status strings (one for each repo)
]
variable[status] assign[=] dictionary[[], []]
for taget[name[package_descriptor]] in starred[call[name[package_descriptors].values, parameter[]]] begin[:]
variable[pkg_name] assign[=] name[package_descriptor].pkg_name
variable[debian_pkg_name] assign[=] name[package_descriptor].debian_pkg_name
variable[ref_version] assign[=] name[package_descriptor].version
if name[strip_version] begin[:]
variable[ref_version] assign[=] call[name[_strip_version_suffix], parameter[name[ref_version]]]
call[name[status]][name[pkg_name]] assign[=] dictionary[[], []]
for taget[name[target]] in starred[name[targets]] begin[:]
variable[statuses] assign[=] list[[]]
for taget[name[repo_data]] in starred[name[repos_data]] begin[:]
variable[version] assign[=] call[call[name[repo_data].get, parameter[name[target], dictionary[[], []]]].get, parameter[name[debian_pkg_name], constant[None]]]
if name[strip_version] begin[:]
variable[version] assign[=] call[name[_strip_version_suffix], parameter[name[version]]]
if name[strip_os_code_name] begin[:]
variable[version] assign[=] call[name[_strip_os_code_name_suffix], parameter[name[version], name[target].os_code_name]]
if name[ref_version] begin[:]
if <ast.UnaryOp object at 0x7da1b00dddb0> begin[:]
call[name[statuses].append, parameter[constant[missing]]]
call[call[name[status]][name[pkg_name]]][name[target]] assign[=] name[statuses]
return[name[status]] | keyword[def] identifier[get_version_status] (
identifier[package_descriptors] , identifier[targets] , identifier[repos_data] ,
identifier[strip_version] = keyword[False] , identifier[strip_os_code_name] = keyword[False] ):
literal[string]
identifier[status] ={}
keyword[for] identifier[package_descriptor] keyword[in] identifier[package_descriptors] . identifier[values] ():
identifier[pkg_name] = identifier[package_descriptor] . identifier[pkg_name]
identifier[debian_pkg_name] = identifier[package_descriptor] . identifier[debian_pkg_name]
identifier[ref_version] = identifier[package_descriptor] . identifier[version]
keyword[if] identifier[strip_version] :
identifier[ref_version] = identifier[_strip_version_suffix] ( identifier[ref_version] )
identifier[status] [ identifier[pkg_name] ]={}
keyword[for] identifier[target] keyword[in] identifier[targets] :
identifier[statuses] =[]
keyword[for] identifier[repo_data] keyword[in] identifier[repos_data] :
identifier[version] = identifier[repo_data] . identifier[get] ( identifier[target] ,{}). identifier[get] ( identifier[debian_pkg_name] , keyword[None] )
keyword[if] identifier[strip_version] :
identifier[version] = identifier[_strip_version_suffix] ( identifier[version] )
keyword[if] identifier[strip_os_code_name] :
identifier[version] = identifier[_strip_os_code_name_suffix] (
identifier[version] , identifier[target] . identifier[os_code_name] )
keyword[if] identifier[ref_version] :
keyword[if] keyword[not] identifier[version] :
identifier[statuses] . identifier[append] ( literal[string] )
keyword[elif] identifier[version] . identifier[startswith] ( identifier[ref_version] ):
identifier[statuses] . identifier[append] ( literal[string] )
keyword[else] :
keyword[if] identifier[_version_is_gt_other] ( identifier[version] , identifier[ref_version] ):
identifier[statuses] . identifier[append] ( literal[string] )
keyword[else] :
identifier[statuses] . identifier[append] ( literal[string] )
keyword[else] :
keyword[if] keyword[not] identifier[version] :
identifier[statuses] . identifier[append] ( literal[string] )
keyword[else] :
identifier[statuses] . identifier[append] ( literal[string] )
identifier[status] [ identifier[pkg_name] ][ identifier[target] ]= identifier[statuses]
keyword[return] identifier[status] | def get_version_status(package_descriptors, targets, repos_data, strip_version=False, strip_os_code_name=False):
"""
For each package and target check if it is affected by a sync.
This is the case when the package version in the testing repo is different
from the version in the main repo.
:return: a dict indexed by package names containing
dicts indexed by targets containing
a list of status strings (one for each repo)
"""
status = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
ref_version = package_descriptor.version
if strip_version:
ref_version = _strip_version_suffix(ref_version) # depends on [control=['if'], data=[]]
status[pkg_name] = {}
for target in targets:
statuses = []
for repo_data in repos_data:
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if strip_version:
version = _strip_version_suffix(version) # depends on [control=['if'], data=[]]
if strip_os_code_name:
version = _strip_os_code_name_suffix(version, target.os_code_name) # depends on [control=['if'], data=[]]
if ref_version:
if not version:
statuses.append('missing') # depends on [control=['if'], data=[]]
elif version.startswith(ref_version): # including equal
statuses.append('equal') # depends on [control=['if'], data=[]]
elif _version_is_gt_other(version, ref_version):
statuses.append('higher') # depends on [control=['if'], data=[]]
else:
statuses.append('lower') # depends on [control=['if'], data=[]]
elif not version:
statuses.append('ignore') # depends on [control=['if'], data=[]]
else:
statuses.append('obsolete') # depends on [control=['for'], data=['repo_data']]
status[pkg_name][target] = statuses # depends on [control=['for'], data=['target']] # depends on [control=['for'], data=['package_descriptor']]
return status |
def subclass_of(*args):
"""
This type validation function can be used in two modes:
* providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType
error if not. If ref_type is a set of types, any match with one of the included types will do
* providing a single argument (ref_type), this is a function generator. It returns a validation function to check
that `subclass_of(c, ref_type)`.
:param args:
:return:
"""
if len(args) == 2:
# Standard mode
typ, ref_type = args
if not isinstance(ref_type, set):
# ref_type is a single type
if issubclass(typ, ref_type):
return True
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type)
else:
# ref_type is a set
match = False
# test against each of the provided types
for ref in ref_type:
if issubclass(typ, ref):
match = True
break
if match:
return True
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type,
help_msg='Value should be a subclass of any of {ref_type}')
elif len(args) == 1:
# Function generator mode
ref_type = args[0]
if not isinstance(ref_type, set):
def subclass_of_ref(x):
if issubclass(x, ref_type):
return True
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type)
else:
# ref_type is a set
def subclass_of_ref(x):
match = False
# test against each of the provided types
for ref in ref_type:
if issubclass(x, ref):
match = True
break
if match:
return True
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type,
help_msg='Value should be a subclass of any of {ref_type}')
subclass_of_ref.__name__ = 'subclass_of_{}'.format(ref_type)
return subclass_of_ref
else:
raise TypeError('subclass_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args))) | def function[subclass_of, parameter[]]:
constant[
This type validation function can be used in two modes:
* providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType
error if not. If ref_type is a set of types, any match with one of the included types will do
* providing a single argument (ref_type), this is a function generator. It returns a validation function to check
that `subclass_of(c, ref_type)`.
:param args:
:return:
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da1b0ff23b0> assign[=] name[args]
if <ast.UnaryOp object at 0x7da1b0ff27d0> begin[:]
if call[name[issubclass], parameter[name[typ], name[ref_type]]] begin[:]
return[constant[True]] | keyword[def] identifier[subclass_of] (* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[typ] , identifier[ref_type] = identifier[args]
keyword[if] keyword[not] identifier[isinstance] ( identifier[ref_type] , identifier[set] ):
keyword[if] identifier[issubclass] ( identifier[typ] , identifier[ref_type] ):
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[IsWrongType] ( identifier[wrong_value] = identifier[typ] , identifier[ref_type] = identifier[ref_type] )
keyword[else] :
identifier[match] = keyword[False]
keyword[for] identifier[ref] keyword[in] identifier[ref_type] :
keyword[if] identifier[issubclass] ( identifier[typ] , identifier[ref] ):
identifier[match] = keyword[True]
keyword[break]
keyword[if] identifier[match] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[IsWrongType] ( identifier[wrong_value] = identifier[typ] , identifier[ref_type] = identifier[ref_type] ,
identifier[help_msg] = literal[string] )
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[ref_type] = identifier[args] [ literal[int] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[ref_type] , identifier[set] ):
keyword[def] identifier[subclass_of_ref] ( identifier[x] ):
keyword[if] identifier[issubclass] ( identifier[x] , identifier[ref_type] ):
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[IsWrongType] ( identifier[wrong_value] = identifier[x] , identifier[ref_type] = identifier[ref_type] )
keyword[else] :
keyword[def] identifier[subclass_of_ref] ( identifier[x] ):
identifier[match] = keyword[False]
keyword[for] identifier[ref] keyword[in] identifier[ref_type] :
keyword[if] identifier[issubclass] ( identifier[x] , identifier[ref] ):
identifier[match] = keyword[True]
keyword[break]
keyword[if] identifier[match] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[IsWrongType] ( identifier[wrong_value] = identifier[x] , identifier[ref_type] = identifier[ref_type] ,
identifier[help_msg] = literal[string] )
identifier[subclass_of_ref] . identifier[__name__] = literal[string] . identifier[format] ( identifier[ref_type] )
keyword[return] identifier[subclass_of_ref]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] + identifier[str] ( identifier[len] ( identifier[args] ))) | def subclass_of(*args):
"""
This type validation function can be used in two modes:
* providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType
error if not. If ref_type is a set of types, any match with one of the included types will do
* providing a single argument (ref_type), this is a function generator. It returns a validation function to check
that `subclass_of(c, ref_type)`.
:param args:
:return:
"""
if len(args) == 2:
# Standard mode
(typ, ref_type) = args
if not isinstance(ref_type, set):
# ref_type is a single type
if issubclass(typ, ref_type):
return True # depends on [control=['if'], data=[]]
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type) # depends on [control=['if'], data=[]]
else:
# ref_type is a set
match = False
# test against each of the provided types
for ref in ref_type:
if issubclass(typ, ref):
match = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ref']]
if match:
return True # depends on [control=['if'], data=[]]
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type, help_msg='Value should be a subclass of any of {ref_type}') # depends on [control=['if'], data=[]]
elif len(args) == 1:
# Function generator mode
ref_type = args[0]
if not isinstance(ref_type, set):
def subclass_of_ref(x):
if issubclass(x, ref_type):
return True # depends on [control=['if'], data=[]]
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type) # depends on [control=['if'], data=[]]
else:
# ref_type is a set
def subclass_of_ref(x):
match = False
# test against each of the provided types
for ref in ref_type:
if issubclass(x, ref):
match = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ref']]
if match:
return True # depends on [control=['if'], data=[]]
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type, help_msg='Value should be a subclass of any of {ref_type}')
subclass_of_ref.__name__ = 'subclass_of_{}'.format(ref_type)
return subclass_of_ref # depends on [control=['if'], data=[]]
else:
raise TypeError('subclass_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args))) |
def eta_from_seebeck(seeb,Lambda):
"""
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
"""
from scipy.optimize import fsolve
out = fsolve(lambda x: (seebeck_spb(x,Lambda) - abs(seeb)) ** 2, 1.,full_output=True)
return out[0][0] | def function[eta_from_seebeck, parameter[seeb, Lambda]]:
constant[
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
]
from relative_module[scipy.optimize] import module[fsolve]
variable[out] assign[=] call[name[fsolve], parameter[<ast.Lambda object at 0x7da1b1cd6aa0>, constant[1.0]]]
return[call[call[name[out]][constant[0]]][constant[0]]] | keyword[def] identifier[eta_from_seebeck] ( identifier[seeb] , identifier[Lambda] ):
literal[string]
keyword[from] identifier[scipy] . identifier[optimize] keyword[import] identifier[fsolve]
identifier[out] = identifier[fsolve] ( keyword[lambda] identifier[x] :( identifier[seebeck_spb] ( identifier[x] , identifier[Lambda] )- identifier[abs] ( identifier[seeb] ))** literal[int] , literal[int] , identifier[full_output] = keyword[True] )
keyword[return] identifier[out] [ literal[int] ][ literal[int] ] | def eta_from_seebeck(seeb, Lambda):
"""
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
"""
from scipy.optimize import fsolve
out = fsolve(lambda x: (seebeck_spb(x, Lambda) - abs(seeb)) ** 2, 1.0, full_output=True)
return out[0][0] |
def get_location(self, location):
"""
For an index location return a dict of the index and value. This is optimized for speed because
it does not need to lookup the index location with a search. Also can accept relative indexing from the end of
the SEries in standard python notation [-3, -2, -1]
:param location: index location in standard python form of positive or negative number
:return: dictionary
"""
return {self.index_name: self._index[location], self.data_name: self._data[location]} | def function[get_location, parameter[self, location]]:
constant[
For an index location return a dict of the index and value. This is optimized for speed because
it does not need to lookup the index location with a search. Also can accept relative indexing from the end of
the SEries in standard python notation [-3, -2, -1]
:param location: index location in standard python form of positive or negative number
:return: dictionary
]
return[dictionary[[<ast.Attribute object at 0x7da20c7c9f90>, <ast.Attribute object at 0x7da20c7c8eb0>], [<ast.Subscript object at 0x7da20c7cb0a0>, <ast.Subscript object at 0x7da20c7cbe50>]]] | keyword[def] identifier[get_location] ( identifier[self] , identifier[location] ):
literal[string]
keyword[return] { identifier[self] . identifier[index_name] : identifier[self] . identifier[_index] [ identifier[location] ], identifier[self] . identifier[data_name] : identifier[self] . identifier[_data] [ identifier[location] ]} | def get_location(self, location):
"""
For an index location return a dict of the index and value. This is optimized for speed because
it does not need to lookup the index location with a search. Also can accept relative indexing from the end of
the SEries in standard python notation [-3, -2, -1]
:param location: index location in standard python form of positive or negative number
:return: dictionary
"""
return {self.index_name: self._index[location], self.data_name: self._data[location]} |
async def call_async(self, method_name: str, *args, rpc_timeout: float = None, **kwargs):
"""
Send JSON RPC request to a backend socket and receive reply (asynchronously)
:param method_name: Method name
:param args: Args that will be passed to the remote function
:param float rpc_timeout: Timeout in seconds for Server response, set to None to disable the timeout
:param kwargs: Keyword args that will be passed to the remote function
"""
# if an rpc_timeout override is not specified, use the one set in the Client attributes
if rpc_timeout is None:
rpc_timeout = self.rpc_timeout
if rpc_timeout:
# Implementation note: this simply wraps the call in a timeout and converts to the built-in TimeoutError
try:
return await asyncio.wait_for(self._call_async(method_name, *args, **kwargs), timeout=rpc_timeout)
except asyncio.TimeoutError:
raise TimeoutError(f"Timeout on client {self.endpoint}, method name {method_name}, class info: {self}")
else:
return await self._call_async(method_name, *args, **kwargs) | <ast.AsyncFunctionDef object at 0x7da1b0efa110> | keyword[async] keyword[def] identifier[call_async] ( identifier[self] , identifier[method_name] : identifier[str] ,* identifier[args] , identifier[rpc_timeout] : identifier[float] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[rpc_timeout] keyword[is] keyword[None] :
identifier[rpc_timeout] = identifier[self] . identifier[rpc_timeout]
keyword[if] identifier[rpc_timeout] :
keyword[try] :
keyword[return] keyword[await] identifier[asyncio] . identifier[wait_for] ( identifier[self] . identifier[_call_async] ( identifier[method_name] ,* identifier[args] ,** identifier[kwargs] ), identifier[timeout] = identifier[rpc_timeout] )
keyword[except] identifier[asyncio] . identifier[TimeoutError] :
keyword[raise] identifier[TimeoutError] ( literal[string] )
keyword[else] :
keyword[return] keyword[await] identifier[self] . identifier[_call_async] ( identifier[method_name] ,* identifier[args] ,** identifier[kwargs] ) | async def call_async(self, method_name: str, *args, rpc_timeout: float=None, **kwargs):
"""
Send JSON RPC request to a backend socket and receive reply (asynchronously)
:param method_name: Method name
:param args: Args that will be passed to the remote function
:param float rpc_timeout: Timeout in seconds for Server response, set to None to disable the timeout
:param kwargs: Keyword args that will be passed to the remote function
"""
# if an rpc_timeout override is not specified, use the one set in the Client attributes
if rpc_timeout is None:
rpc_timeout = self.rpc_timeout # depends on [control=['if'], data=['rpc_timeout']]
if rpc_timeout:
# Implementation note: this simply wraps the call in a timeout and converts to the built-in TimeoutError
try:
return await asyncio.wait_for(self._call_async(method_name, *args, **kwargs), timeout=rpc_timeout) # depends on [control=['try'], data=[]]
except asyncio.TimeoutError:
raise TimeoutError(f'Timeout on client {self.endpoint}, method name {method_name}, class info: {self}') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
return await self._call_async(method_name, *args, **kwargs) |
def order_by_next_occurrence(self):
"""
:return: A list of events in order of minimum occurrence greater than
now (or overlapping now in the case of drop-in events).
This is an expensive operation - use with as small a source
queryset as possible.
Events with no upcoming occurrence appear last (in order of their first
occurrence). Events with no occurrences at all appear right at the end
(in no order). To remove these, use "with_upcoming_occurrences" or
"with_upcoming_or_no_occurrences".
"""
qs = self.prefetch_related('occurrences')
def _sort(x):
try:
# If there's an upcoming occurrence, use it.
return x.get_next_occurrence().start
except AttributeError:
try:
# If there's any occurrence, use the first one, plus 1000 years.
return x.get_first_occurrence().start + timedelta(days=1000*365)
except AttributeError:
# If no occurence, use a localised datetime.max (minus a
# few days to avoid overflow)
return make_aware(datetime.max-timedelta(2))
sorted_qs = sorted(qs, key=_sort)
return sorted_qs | def function[order_by_next_occurrence, parameter[self]]:
constant[
:return: A list of events in order of minimum occurrence greater than
now (or overlapping now in the case of drop-in events).
This is an expensive operation - use with as small a source
queryset as possible.
Events with no upcoming occurrence appear last (in order of their first
occurrence). Events with no occurrences at all appear right at the end
(in no order). To remove these, use "with_upcoming_occurrences" or
"with_upcoming_or_no_occurrences".
]
variable[qs] assign[=] call[name[self].prefetch_related, parameter[constant[occurrences]]]
def function[_sort, parameter[x]]:
<ast.Try object at 0x7da1b0ebc220>
variable[sorted_qs] assign[=] call[name[sorted], parameter[name[qs]]]
return[name[sorted_qs]] | keyword[def] identifier[order_by_next_occurrence] ( identifier[self] ):
literal[string]
identifier[qs] = identifier[self] . identifier[prefetch_related] ( literal[string] )
keyword[def] identifier[_sort] ( identifier[x] ):
keyword[try] :
keyword[return] identifier[x] . identifier[get_next_occurrence] (). identifier[start]
keyword[except] identifier[AttributeError] :
keyword[try] :
keyword[return] identifier[x] . identifier[get_first_occurrence] (). identifier[start] + identifier[timedelta] ( identifier[days] = literal[int] * literal[int] )
keyword[except] identifier[AttributeError] :
keyword[return] identifier[make_aware] ( identifier[datetime] . identifier[max] - identifier[timedelta] ( literal[int] ))
identifier[sorted_qs] = identifier[sorted] ( identifier[qs] , identifier[key] = identifier[_sort] )
keyword[return] identifier[sorted_qs] | def order_by_next_occurrence(self):
"""
:return: A list of events in order of minimum occurrence greater than
now (or overlapping now in the case of drop-in events).
This is an expensive operation - use with as small a source
queryset as possible.
Events with no upcoming occurrence appear last (in order of their first
occurrence). Events with no occurrences at all appear right at the end
(in no order). To remove these, use "with_upcoming_occurrences" or
"with_upcoming_or_no_occurrences".
"""
qs = self.prefetch_related('occurrences')
def _sort(x):
try:
# If there's an upcoming occurrence, use it.
return x.get_next_occurrence().start # depends on [control=['try'], data=[]]
except AttributeError:
try:
# If there's any occurrence, use the first one, plus 1000 years.
return x.get_first_occurrence().start + timedelta(days=1000 * 365) # depends on [control=['try'], data=[]]
except AttributeError:
# If no occurence, use a localised datetime.max (minus a
# few days to avoid overflow)
return make_aware(datetime.max - timedelta(2)) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
sorted_qs = sorted(qs, key=_sort)
return sorted_qs |
def ensure_contexted(func):
"""
This decorator ensure that an instance of the
Evtx class is used within a context statement. That is,
that the `with` statement is used, or `__enter__()`
and `__exit__()` are called explicitly.
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._buf is None:
raise TypeError("An Evtx object must be used with"
" a context (see the `with` statement).")
else:
return func(self, *args, **kwargs)
return wrapped | def function[ensure_contexted, parameter[func]]:
constant[
This decorator ensure that an instance of the
Evtx class is used within a context statement. That is,
that the `with` statement is used, or `__enter__()`
and `__exit__()` are called explicitly.
]
def function[wrapped, parameter[self]]:
if compare[name[self]._buf is constant[None]] begin[:]
<ast.Raise object at 0x7da1b20e65f0>
return[name[wrapped]] | keyword[def] identifier[ensure_contexted] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapped] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[self] . identifier[_buf] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[else] :
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapped] | def ensure_contexted(func):
"""
This decorator ensure that an instance of the
Evtx class is used within a context statement. That is,
that the `with` statement is used, or `__enter__()`
and `__exit__()` are called explicitly.
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._buf is None:
raise TypeError('An Evtx object must be used with a context (see the `with` statement).') # depends on [control=['if'], data=[]]
else:
return func(self, *args, **kwargs)
return wrapped |
def _parse_hosts(self, hosts):
"""
Return hosts parsed into a tuple of tuples.
:param hosts: String or list of hosts
"""
# Default host
if hosts is None:
return
# If it's a string, we allow comma separated strings
if isinstance(hosts, six.string_types):
# Split comma-separated list
hosts = [host.strip() for host in hosts.split(',')]
# Split host and port
hosts = [host.split(':') for host in hosts]
# Coerce ports to int
hosts = [(host[0], int(host[1])) for host in hosts]
# The python-etcd client explicitly checks for a tuple type
return tuple(hosts) | def function[_parse_hosts, parameter[self, hosts]]:
constant[
Return hosts parsed into a tuple of tuples.
:param hosts: String or list of hosts
]
if compare[name[hosts] is constant[None]] begin[:]
return[None]
if call[name[isinstance], parameter[name[hosts], name[six].string_types]] begin[:]
variable[hosts] assign[=] <ast.ListComp object at 0x7da204346f80>
variable[hosts] assign[=] <ast.ListComp object at 0x7da1b00f4ac0>
variable[hosts] assign[=] <ast.ListComp object at 0x7da1b00f4190>
return[call[name[tuple], parameter[name[hosts]]]] | keyword[def] identifier[_parse_hosts] ( identifier[self] , identifier[hosts] ):
literal[string]
keyword[if] identifier[hosts] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[isinstance] ( identifier[hosts] , identifier[six] . identifier[string_types] ):
identifier[hosts] =[ identifier[host] . identifier[strip] () keyword[for] identifier[host] keyword[in] identifier[hosts] . identifier[split] ( literal[string] )]
identifier[hosts] =[ identifier[host] . identifier[split] ( literal[string] ) keyword[for] identifier[host] keyword[in] identifier[hosts] ]
identifier[hosts] =[( identifier[host] [ literal[int] ], identifier[int] ( identifier[host] [ literal[int] ])) keyword[for] identifier[host] keyword[in] identifier[hosts] ]
keyword[return] identifier[tuple] ( identifier[hosts] ) | def _parse_hosts(self, hosts):
"""
Return hosts parsed into a tuple of tuples.
:param hosts: String or list of hosts
"""
# Default host
if hosts is None:
return # depends on [control=['if'], data=[]]
# If it's a string, we allow comma separated strings
if isinstance(hosts, six.string_types):
# Split comma-separated list
hosts = [host.strip() for host in hosts.split(',')]
# Split host and port
hosts = [host.split(':') for host in hosts]
# Coerce ports to int
hosts = [(host[0], int(host[1])) for host in hosts] # depends on [control=['if'], data=[]]
# The python-etcd client explicitly checks for a tuple type
return tuple(hosts) |
def add_after(self):
"""Returns a builder inserting a new block after the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) | def function[add_after, parameter[self]]:
constant[Returns a builder inserting a new block after the current block]
variable[idx] assign[=] call[name[self]._container.structure.index, parameter[name[self]]]
return[call[name[BlockBuilder], parameter[name[self]._container, binary_operation[name[idx] + constant[1]]]]] | keyword[def] identifier[add_after] ( identifier[self] ):
literal[string]
identifier[idx] = identifier[self] . identifier[_container] . identifier[structure] . identifier[index] ( identifier[self] )
keyword[return] identifier[BlockBuilder] ( identifier[self] . identifier[_container] , identifier[idx] + literal[int] ) | def add_after(self):
"""Returns a builder inserting a new block after the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx + 1) |
def wait_for_initial_conf(self, timeout=1.0):
"""Wait initial configuration from the arbiter.
Basically sleep 1.0 and check if new_conf is here
:param timeout: timeout to wait
:type timeout: int
:return: None
"""
logger.info("Waiting for initial configuration")
# Arbiter do not already set our have_conf param
_ts = time.time()
while not self.new_conf and not self.interrupted:
# Make a pause and check if the system time changed
_, _ = self.make_a_pause(timeout, check_time_change=True)
if not self.interrupted:
logger.info("Got initial configuration, waited for: %.2f seconds", time.time() - _ts)
statsmgr.timer('configuration.initial', time.time() - _ts)
else:
logger.info("Interrupted before getting the initial configuration") | def function[wait_for_initial_conf, parameter[self, timeout]]:
constant[Wait initial configuration from the arbiter.
Basically sleep 1.0 and check if new_conf is here
:param timeout: timeout to wait
:type timeout: int
:return: None
]
call[name[logger].info, parameter[constant[Waiting for initial configuration]]]
variable[_ts] assign[=] call[name[time].time, parameter[]]
while <ast.BoolOp object at 0x7da18fe92ad0> begin[:]
<ast.Tuple object at 0x7da18fe910f0> assign[=] call[name[self].make_a_pause, parameter[name[timeout]]]
if <ast.UnaryOp object at 0x7da18fe93d30> begin[:]
call[name[logger].info, parameter[constant[Got initial configuration, waited for: %.2f seconds], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
call[name[statsmgr].timer, parameter[constant[configuration.initial], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]] | keyword[def] identifier[wait_for_initial_conf] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[_ts] = identifier[time] . identifier[time] ()
keyword[while] keyword[not] identifier[self] . identifier[new_conf] keyword[and] keyword[not] identifier[self] . identifier[interrupted] :
identifier[_] , identifier[_] = identifier[self] . identifier[make_a_pause] ( identifier[timeout] , identifier[check_time_change] = keyword[True] )
keyword[if] keyword[not] identifier[self] . identifier[interrupted] :
identifier[logger] . identifier[info] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] ) | def wait_for_initial_conf(self, timeout=1.0):
"""Wait initial configuration from the arbiter.
Basically sleep 1.0 and check if new_conf is here
:param timeout: timeout to wait
:type timeout: int
:return: None
"""
logger.info('Waiting for initial configuration')
# Arbiter do not already set our have_conf param
_ts = time.time()
while not self.new_conf and (not self.interrupted):
# Make a pause and check if the system time changed
(_, _) = self.make_a_pause(timeout, check_time_change=True) # depends on [control=['while'], data=[]]
if not self.interrupted:
logger.info('Got initial configuration, waited for: %.2f seconds', time.time() - _ts)
statsmgr.timer('configuration.initial', time.time() - _ts) # depends on [control=['if'], data=[]]
else:
logger.info('Interrupted before getting the initial configuration') |
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e | def function[get_change, parameter[self, change_id]]:
constant[
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
]
variable[uri] assign[=] binary_operation[constant[/%s/change/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b26a51e0>, <ast.Name object at 0x7da1b26a5870>]]]
variable[response] assign[=] call[name[self].make_request, parameter[constant[GET], name[uri]]]
variable[body] assign[=] call[name[response].read, parameter[]]
call[name[boto].log.debug, parameter[name[body]]]
if compare[name[response].status greater_or_equal[>=] constant[300]] begin[:]
<ast.Raise object at 0x7da1b26a4670>
variable[e] assign[=] call[name[boto].jsonresponse.Element, parameter[]]
variable[h] assign[=] call[name[boto].jsonresponse.XmlHandler, parameter[name[e], constant[None]]]
call[name[h].parse, parameter[name[body]]]
return[name[e]] | keyword[def] identifier[get_change] ( identifier[self] , identifier[change_id] ):
literal[string]
identifier[uri] = literal[string] %( identifier[self] . identifier[Version] , identifier[change_id] )
identifier[response] = identifier[self] . identifier[make_request] ( literal[string] , identifier[uri] )
identifier[body] = identifier[response] . identifier[read] ()
identifier[boto] . identifier[log] . identifier[debug] ( identifier[body] )
keyword[if] identifier[response] . identifier[status] >= literal[int] :
keyword[raise] identifier[exception] . identifier[DNSServerError] ( identifier[response] . identifier[status] ,
identifier[response] . identifier[reason] ,
identifier[body] )
identifier[e] = identifier[boto] . identifier[jsonresponse] . identifier[Element] ()
identifier[h] = identifier[boto] . identifier[jsonresponse] . identifier[XmlHandler] ( identifier[e] , keyword[None] )
identifier[h] . identifier[parse] ( identifier[body] )
keyword[return] identifier[e] | def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status, response.reason, body) # depends on [control=['if'], data=[]]
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e |
def get_term_category_frequencies(self, scatterchartdata):
'''
Applies the ranker in scatterchartdata to term-category frequencies.
Parameters
----------
scatterchartdata : ScatterChartData
Returns
-------
pd.DataFrame
'''
term_ranker = scatterchartdata.term_ranker(self)
if scatterchartdata.use_non_text_features:
term_ranker.use_non_text_features()
return term_ranker.get_ranks() | def function[get_term_category_frequencies, parameter[self, scatterchartdata]]:
constant[
Applies the ranker in scatterchartdata to term-category frequencies.
Parameters
----------
scatterchartdata : ScatterChartData
Returns
-------
pd.DataFrame
]
variable[term_ranker] assign[=] call[name[scatterchartdata].term_ranker, parameter[name[self]]]
if name[scatterchartdata].use_non_text_features begin[:]
call[name[term_ranker].use_non_text_features, parameter[]]
return[call[name[term_ranker].get_ranks, parameter[]]] | keyword[def] identifier[get_term_category_frequencies] ( identifier[self] , identifier[scatterchartdata] ):
literal[string]
identifier[term_ranker] = identifier[scatterchartdata] . identifier[term_ranker] ( identifier[self] )
keyword[if] identifier[scatterchartdata] . identifier[use_non_text_features] :
identifier[term_ranker] . identifier[use_non_text_features] ()
keyword[return] identifier[term_ranker] . identifier[get_ranks] () | def get_term_category_frequencies(self, scatterchartdata):
"""
Applies the ranker in scatterchartdata to term-category frequencies.
Parameters
----------
scatterchartdata : ScatterChartData
Returns
-------
pd.DataFrame
"""
term_ranker = scatterchartdata.term_ranker(self)
if scatterchartdata.use_non_text_features:
term_ranker.use_non_text_features() # depends on [control=['if'], data=[]]
return term_ranker.get_ranks() |
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
pool_kwargs=pool_kwargs) | def function[connection_from_url, parameter[self, url, pool_kwargs]]:
constant[
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
]
variable[u] assign[=] call[name[parse_url], parameter[name[url]]]
return[call[name[self].connection_from_host, parameter[name[u].host]]] | keyword[def] identifier[connection_from_url] ( identifier[self] , identifier[url] , identifier[pool_kwargs] = keyword[None] ):
literal[string]
identifier[u] = identifier[parse_url] ( identifier[url] )
keyword[return] identifier[self] . identifier[connection_from_host] ( identifier[u] . identifier[host] , identifier[port] = identifier[u] . identifier[port] , identifier[scheme] = identifier[u] . identifier[scheme] ,
identifier[pool_kwargs] = identifier[pool_kwargs] ) | def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs) |
def _got_cons_input(self, handle):
"""Callback for handle events detected by ipmi session
"""
self._addpendingdata(handle.read())
if not self.awaitingack:
self._sendpendingoutput() | def function[_got_cons_input, parameter[self, handle]]:
constant[Callback for handle events detected by ipmi session
]
call[name[self]._addpendingdata, parameter[call[name[handle].read, parameter[]]]]
if <ast.UnaryOp object at 0x7da18dc07e50> begin[:]
call[name[self]._sendpendingoutput, parameter[]] | keyword[def] identifier[_got_cons_input] ( identifier[self] , identifier[handle] ):
literal[string]
identifier[self] . identifier[_addpendingdata] ( identifier[handle] . identifier[read] ())
keyword[if] keyword[not] identifier[self] . identifier[awaitingack] :
identifier[self] . identifier[_sendpendingoutput] () | def _got_cons_input(self, handle):
"""Callback for handle events detected by ipmi session
"""
self._addpendingdata(handle.read())
if not self.awaitingack:
self._sendpendingoutput() # depends on [control=['if'], data=[]] |
def _get_rule(cls):
# type: (_MetaRule) -> (List[object], List[object])
"""
Get rule on the Rule class.
:param cls: Rule for which return the rule.
:return: Rule inside the Rule class.
:raise RuleNotDefinedException: If the rule is not defined.
:raise CantCreateSingleRuleException: If the rule consists of more rules.
:raise NotASingleSymbolException: If number of symbols on the left is more.
"""
if cls._traverse:
return (cls.left, cls.right)
if len(cls.rules) > 1:
raise CantCreateSingleRuleException(cls)
return cls.rules[0] | def function[_get_rule, parameter[cls]]:
constant[
Get rule on the Rule class.
:param cls: Rule for which return the rule.
:return: Rule inside the Rule class.
:raise RuleNotDefinedException: If the rule is not defined.
:raise CantCreateSingleRuleException: If the rule consists of more rules.
:raise NotASingleSymbolException: If number of symbols on the left is more.
]
if name[cls]._traverse begin[:]
return[tuple[[<ast.Attribute object at 0x7da1b2768610>, <ast.Attribute object at 0x7da1b27748b0>]]]
if compare[call[name[len], parameter[name[cls].rules]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b2774340>
return[call[name[cls].rules][constant[0]]] | keyword[def] identifier[_get_rule] ( identifier[cls] ):
literal[string]
keyword[if] identifier[cls] . identifier[_traverse] :
keyword[return] ( identifier[cls] . identifier[left] , identifier[cls] . identifier[right] )
keyword[if] identifier[len] ( identifier[cls] . identifier[rules] )> literal[int] :
keyword[raise] identifier[CantCreateSingleRuleException] ( identifier[cls] )
keyword[return] identifier[cls] . identifier[rules] [ literal[int] ] | def _get_rule(cls):
# type: (_MetaRule) -> (List[object], List[object])
'\n Get rule on the Rule class.\n :param cls: Rule for which return the rule.\n :return: Rule inside the Rule class.\n :raise RuleNotDefinedException: If the rule is not defined.\n :raise CantCreateSingleRuleException: If the rule consists of more rules.\n :raise NotASingleSymbolException: If number of symbols on the left is more.\n '
if cls._traverse:
return (cls.left, cls.right) # depends on [control=['if'], data=[]]
if len(cls.rules) > 1:
raise CantCreateSingleRuleException(cls) # depends on [control=['if'], data=[]]
return cls.rules[0] |
async def cli_handler(loop):
"""Application starts here."""
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='+',
help='commands, help, ...')
parser.add_argument('--name', help='apple tv name',
dest='name', default='Apple TV')
parser.add_argument('--address', help='device ip address or hostname',
dest='address', default=None)
parser.add_argument('--protocol', action=TransformProtocol,
help='protocol to use (values: dmap, mrp)',
dest='protocol', default=None)
parser.add_argument('--port', help='port when connecting',
dest='port', type=_in_range(0, 65535),
default=0)
parser.add_argument('-t', '--scan-timeout', help='timeout when scanning',
dest='scan_timeout', type=_in_range(1, 100),
metavar='TIMEOUT', default=3)
parser.add_argument('--version', action='version',
help='version of atvremote and pyatv',
version='%(prog)s {0}'.format(const.__version__))
pairing = parser.add_argument_group('pairing')
pairing.add_argument('--remote-name', help='remote pairing name',
dest='remote_name', default='pyatv')
pairing.add_argument('-p', '--pin', help='pairing pin code',
dest='pin_code', metavar='PIN', default=1234,
type=_in_range(0, 9999, allow_none=True))
pairing.add_argument('--pairing-guid',
help='pairing guid (16 chars hex)',
dest='pairing_guid', default=None)
parser.add_argument('-a', '--autodiscover', action='store_true',
help='automatically find a device',
dest='autodiscover', default=False)
parser.add_argument('--device_credentials', help='credentials to device',
dest='device_credentials', default=None)
airplay = parser.add_argument_group('airplay')
airplay.add_argument('--airplay_credentials',
help='credentials for airplay',
dest='airplay_credentials', default=None)
debug = parser.add_argument_group('debugging')
debug.add_argument('-v', '--verbose', help='increase output verbosity',
action='store_true', dest='verbose')
debug.add_argument('--debug', help='print debug information',
action='store_true', dest='debug')
args = parser.parse_args()
loglevel = logging.WARNING
if args.verbose:
loglevel = logging.INFO
if args.debug:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel,
format='%(levelname)s: %(message)s')
logging.getLogger('requests').setLevel(logging.WARNING)
cmds = retrieve_commands(GlobalCommands)
if args.command[0] in cmds:
glob_cmds = GlobalCommands(args, loop)
return (await _exec_command(
glob_cmds, args.command[0], print_result=False))
if args.autodiscover:
if not await _autodiscover_device(args, loop):
return 1
return await _handle_commands(args, loop)
if args.address:
return await _handle_commands(args, loop)
logging.error('To autodiscover an Apple TV, add -a')
return 1 | <ast.AsyncFunctionDef object at 0x7da2054a4070> | keyword[async] keyword[def] identifier[cli_handler] ( identifier[loop] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = identifier[TransformProtocol] ,
identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[type] = identifier[_in_range] ( literal[int] , literal[int] ),
identifier[default] = literal[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[type] = identifier[_in_range] ( literal[int] , literal[int] ),
identifier[metavar] = literal[string] , identifier[default] = literal[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
identifier[version] = literal[string] . identifier[format] ( identifier[const] . identifier[__version__] ))
identifier[pairing] = identifier[parser] . identifier[add_argument_group] ( literal[string] )
identifier[pairing] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = literal[string] )
identifier[pairing] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[metavar] = literal[string] , identifier[default] = literal[int] ,
identifier[type] = identifier[_in_range] ( literal[int] , literal[int] , identifier[allow_none] = keyword[True] ))
identifier[pairing] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[None] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[None] )
identifier[airplay] = identifier[parser] . identifier[add_argument_group] ( literal[string] )
identifier[airplay] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[None] )
identifier[debug] = identifier[parser] . identifier[add_argument_group] ( literal[string] )
identifier[debug] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[dest] = literal[string] )
identifier[debug] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ,
identifier[action] = literal[string] , identifier[dest] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[loglevel] = identifier[logging] . identifier[WARNING]
keyword[if] identifier[args] . identifier[verbose] :
identifier[loglevel] = identifier[logging] . identifier[INFO]
keyword[if] identifier[args] . identifier[debug] :
identifier[loglevel] = identifier[logging] . identifier[DEBUG]
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[loglevel] ,
identifier[format] = literal[string] )
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[setLevel] ( identifier[logging] . identifier[WARNING] )
identifier[cmds] = identifier[retrieve_commands] ( identifier[GlobalCommands] )
keyword[if] identifier[args] . identifier[command] [ literal[int] ] keyword[in] identifier[cmds] :
identifier[glob_cmds] = identifier[GlobalCommands] ( identifier[args] , identifier[loop] )
keyword[return] ( keyword[await] identifier[_exec_command] (
identifier[glob_cmds] , identifier[args] . identifier[command] [ literal[int] ], identifier[print_result] = keyword[False] ))
keyword[if] identifier[args] . identifier[autodiscover] :
keyword[if] keyword[not] keyword[await] identifier[_autodiscover_device] ( identifier[args] , identifier[loop] ):
keyword[return] literal[int]
keyword[return] keyword[await] identifier[_handle_commands] ( identifier[args] , identifier[loop] )
keyword[if] identifier[args] . identifier[address] :
keyword[return] keyword[await] identifier[_handle_commands] ( identifier[args] , identifier[loop] )
identifier[logging] . identifier[error] ( literal[string] )
keyword[return] literal[int] | async def cli_handler(loop):
"""Application starts here."""
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='+', help='commands, help, ...')
parser.add_argument('--name', help='apple tv name', dest='name', default='Apple TV')
parser.add_argument('--address', help='device ip address or hostname', dest='address', default=None)
parser.add_argument('--protocol', action=TransformProtocol, help='protocol to use (values: dmap, mrp)', dest='protocol', default=None)
parser.add_argument('--port', help='port when connecting', dest='port', type=_in_range(0, 65535), default=0)
parser.add_argument('-t', '--scan-timeout', help='timeout when scanning', dest='scan_timeout', type=_in_range(1, 100), metavar='TIMEOUT', default=3)
parser.add_argument('--version', action='version', help='version of atvremote and pyatv', version='%(prog)s {0}'.format(const.__version__))
pairing = parser.add_argument_group('pairing')
pairing.add_argument('--remote-name', help='remote pairing name', dest='remote_name', default='pyatv')
pairing.add_argument('-p', '--pin', help='pairing pin code', dest='pin_code', metavar='PIN', default=1234, type=_in_range(0, 9999, allow_none=True))
pairing.add_argument('--pairing-guid', help='pairing guid (16 chars hex)', dest='pairing_guid', default=None)
parser.add_argument('-a', '--autodiscover', action='store_true', help='automatically find a device', dest='autodiscover', default=False)
parser.add_argument('--device_credentials', help='credentials to device', dest='device_credentials', default=None)
airplay = parser.add_argument_group('airplay')
airplay.add_argument('--airplay_credentials', help='credentials for airplay', dest='airplay_credentials', default=None)
debug = parser.add_argument_group('debugging')
debug.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true', dest='verbose')
debug.add_argument('--debug', help='print debug information', action='store_true', dest='debug')
args = parser.parse_args()
loglevel = logging.WARNING
if args.verbose:
loglevel = logging.INFO # depends on [control=['if'], data=[]]
if args.debug:
loglevel = logging.DEBUG # depends on [control=['if'], data=[]]
logging.basicConfig(level=loglevel, format='%(levelname)s: %(message)s')
logging.getLogger('requests').setLevel(logging.WARNING)
cmds = retrieve_commands(GlobalCommands)
if args.command[0] in cmds:
glob_cmds = GlobalCommands(args, loop)
return await _exec_command(glob_cmds, args.command[0], print_result=False) # depends on [control=['if'], data=[]]
if args.autodiscover:
if not await _autodiscover_device(args, loop):
return 1 # depends on [control=['if'], data=[]]
return await _handle_commands(args, loop) # depends on [control=['if'], data=[]]
if args.address:
return await _handle_commands(args, loop) # depends on [control=['if'], data=[]]
logging.error('To autodiscover an Apple TV, add -a')
return 1 |
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE') | def function[parse_gntp, parameter[data, password]]:
constant[Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
]
variable[data] assign[=] call[name[gntp].shim.u, parameter[name[data]]]
variable[match] assign[=] call[name[GNTP_INFO_LINE_SHORT].match, parameter[name[data]]]
if <ast.UnaryOp object at 0x7da207f021a0> begin[:]
<ast.Raise object at 0x7da207f00ca0>
variable[info] assign[=] call[name[match].groupdict, parameter[]]
if compare[call[name[info]][constant[messagetype]] equal[==] constant[REGISTER]] begin[:]
return[call[name[GNTPRegister], parameter[name[data]]]]
<ast.Raise object at 0x7da207f01f00> | keyword[def] identifier[parse_gntp] ( identifier[data] , identifier[password] = keyword[None] ):
literal[string]
identifier[data] = identifier[gntp] . identifier[shim] . identifier[u] ( identifier[data] )
identifier[match] = identifier[GNTP_INFO_LINE_SHORT] . identifier[match] ( identifier[data] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[errors] . identifier[ParseError] ( literal[string] )
identifier[info] = identifier[match] . identifier[groupdict] ()
keyword[if] identifier[info] [ literal[string] ]== literal[string] :
keyword[return] identifier[GNTPRegister] ( identifier[data] , identifier[password] = identifier[password] )
keyword[elif] identifier[info] [ literal[string] ]== literal[string] :
keyword[return] identifier[GNTPNotice] ( identifier[data] , identifier[password] = identifier[password] )
keyword[elif] identifier[info] [ literal[string] ]== literal[string] :
keyword[return] identifier[GNTPSubscribe] ( identifier[data] , identifier[password] = identifier[password] )
keyword[elif] identifier[info] [ literal[string] ]== literal[string] :
keyword[return] identifier[GNTPOK] ( identifier[data] )
keyword[elif] identifier[info] [ literal[string] ]== literal[string] :
keyword[return] identifier[GNTPError] ( identifier[data] )
keyword[raise] identifier[errors] . identifier[ParseError] ( literal[string] ) | def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO') # depends on [control=['if'], data=[]]
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password) # depends on [control=['if'], data=[]]
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password) # depends on [control=['if'], data=[]]
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password) # depends on [control=['if'], data=[]]
elif info['messagetype'] == '-OK':
return GNTPOK(data) # depends on [control=['if'], data=[]]
elif info['messagetype'] == '-ERROR':
return GNTPError(data) # depends on [control=['if'], data=[]]
raise errors.ParseError('INVALID_GNTP_MESSAGE') |
def check_file_names(samples, raw_dir, options):
"""Check if all files are present.
:param samples: a list of tuples with the family ID as first element (str)
and sample ID as last element (str).
:param raw_dir: the directory containing the raw files.
:param options: the options.
:type samples: list of tuples
:type raw_dir: str
:type options: argparse.Namespace
:returns: a dict containing samples as key (a tuple with the family ID as
first element and sample ID as last element) and the name of the
raw file as element.
"""
file_names = {}
for sample in samples:
the_sample = None
try:
the_sample = sample[1]
except IndexError:
msg = ("problematic samples file should include both family and "
"individual IDs")
raise ProgramError(msg)
if options.use_full_ids:
the_sample = options.full_ids_delimiter.join(sample)
file_name = os.path.join(raw_dir, "{}.txt".format(the_sample))
if not os.path.isfile(file_name):
file_name += ".gz"
if not os.path.isfile(file_name):
msg = "can't find file for sample {}".format(the_sample)
raise ProgramError(msg)
file_names[the_sample] = file_name
return file_names | def function[check_file_names, parameter[samples, raw_dir, options]]:
constant[Check if all files are present.
:param samples: a list of tuples with the family ID as first element (str)
and sample ID as last element (str).
:param raw_dir: the directory containing the raw files.
:param options: the options.
:type samples: list of tuples
:type raw_dir: str
:type options: argparse.Namespace
:returns: a dict containing samples as key (a tuple with the family ID as
first element and sample ID as last element) and the name of the
raw file as element.
]
variable[file_names] assign[=] dictionary[[], []]
for taget[name[sample]] in starred[name[samples]] begin[:]
variable[the_sample] assign[=] constant[None]
<ast.Try object at 0x7da1b0ada770>
if name[options].use_full_ids begin[:]
variable[the_sample] assign[=] call[name[options].full_ids_delimiter.join, parameter[name[sample]]]
variable[file_name] assign[=] call[name[os].path.join, parameter[name[raw_dir], call[constant[{}.txt].format, parameter[name[the_sample]]]]]
if <ast.UnaryOp object at 0x7da1b09a91e0> begin[:]
<ast.AugAssign object at 0x7da1b09a85b0>
if <ast.UnaryOp object at 0x7da1b09a9390> begin[:]
variable[msg] assign[=] call[constant[can't find file for sample {}].format, parameter[name[the_sample]]]
<ast.Raise object at 0x7da1b098b040>
call[name[file_names]][name[the_sample]] assign[=] name[file_name]
return[name[file_names]] | keyword[def] identifier[check_file_names] ( identifier[samples] , identifier[raw_dir] , identifier[options] ):
literal[string]
identifier[file_names] ={}
keyword[for] identifier[sample] keyword[in] identifier[samples] :
identifier[the_sample] = keyword[None]
keyword[try] :
identifier[the_sample] = identifier[sample] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[msg] =( literal[string]
literal[string] )
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[if] identifier[options] . identifier[use_full_ids] :
identifier[the_sample] = identifier[options] . identifier[full_ids_delimiter] . identifier[join] ( identifier[sample] )
identifier[file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_dir] , literal[string] . identifier[format] ( identifier[the_sample] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_name] ):
identifier[file_name] += literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_name] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[the_sample] )
keyword[raise] identifier[ProgramError] ( identifier[msg] )
identifier[file_names] [ identifier[the_sample] ]= identifier[file_name]
keyword[return] identifier[file_names] | def check_file_names(samples, raw_dir, options):
"""Check if all files are present.
:param samples: a list of tuples with the family ID as first element (str)
and sample ID as last element (str).
:param raw_dir: the directory containing the raw files.
:param options: the options.
:type samples: list of tuples
:type raw_dir: str
:type options: argparse.Namespace
:returns: a dict containing samples as key (a tuple with the family ID as
first element and sample ID as last element) and the name of the
raw file as element.
"""
file_names = {}
for sample in samples:
the_sample = None
try:
the_sample = sample[1] # depends on [control=['try'], data=[]]
except IndexError:
msg = 'problematic samples file should include both family and individual IDs'
raise ProgramError(msg) # depends on [control=['except'], data=[]]
if options.use_full_ids:
the_sample = options.full_ids_delimiter.join(sample) # depends on [control=['if'], data=[]]
file_name = os.path.join(raw_dir, '{}.txt'.format(the_sample))
if not os.path.isfile(file_name):
file_name += '.gz'
if not os.path.isfile(file_name):
msg = "can't find file for sample {}".format(the_sample)
raise ProgramError(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
file_names[the_sample] = file_name # depends on [control=['for'], data=['sample']]
return file_names |
def _descr_str(descr, data, region):
"""Add additional useful information from data to description string.
"""
if data:
name = dd.get_sample_name(data)
if name:
descr = "{0} : {1}".format(descr, name)
elif "work_bam" in data:
descr = "{0} : {1}".format(descr, os.path.basename(data["work_bam"]))
if region:
descr = "{0} : {1}".format(descr, region)
return descr | def function[_descr_str, parameter[descr, data, region]]:
constant[Add additional useful information from data to description string.
]
if name[data] begin[:]
variable[name] assign[=] call[name[dd].get_sample_name, parameter[name[data]]]
if name[name] begin[:]
variable[descr] assign[=] call[constant[{0} : {1}].format, parameter[name[descr], name[name]]]
if name[region] begin[:]
variable[descr] assign[=] call[constant[{0} : {1}].format, parameter[name[descr], name[region]]]
return[name[descr]] | keyword[def] identifier[_descr_str] ( identifier[descr] , identifier[data] , identifier[region] ):
literal[string]
keyword[if] identifier[data] :
identifier[name] = identifier[dd] . identifier[get_sample_name] ( identifier[data] )
keyword[if] identifier[name] :
identifier[descr] = literal[string] . identifier[format] ( identifier[descr] , identifier[name] )
keyword[elif] literal[string] keyword[in] identifier[data] :
identifier[descr] = literal[string] . identifier[format] ( identifier[descr] , identifier[os] . identifier[path] . identifier[basename] ( identifier[data] [ literal[string] ]))
keyword[if] identifier[region] :
identifier[descr] = literal[string] . identifier[format] ( identifier[descr] , identifier[region] )
keyword[return] identifier[descr] | def _descr_str(descr, data, region):
"""Add additional useful information from data to description string.
"""
if data:
name = dd.get_sample_name(data)
if name:
descr = '{0} : {1}'.format(descr, name) # depends on [control=['if'], data=[]]
elif 'work_bam' in data:
descr = '{0} : {1}'.format(descr, os.path.basename(data['work_bam'])) # depends on [control=['if'], data=['data']] # depends on [control=['if'], data=[]]
if region:
descr = '{0} : {1}'.format(descr, region) # depends on [control=['if'], data=[]]
return descr |
def get_statistics(self):
"""
Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary
"""
statistics = {
"number_authors": 0,
"number_author_names": 0,
"number_author_abbreviations": 0,
"number_works": 0,
"number_work_titles": 0,
"number_title_abbreviations": 0,
"number_opus_maximum":0,
}
for author in self.get_authors():
if author.get_urn() is not None:
opmax = True if self.get_opus_maximum_of(author.get_urn())\
is not None else False
if opmax:
statistics["number_opus_maximum"] += 1
statistics["number_authors"] += 1
statistics["number_author_names"] += len(author.get_names())
statistics["number_author_abbreviations"] += len(
author.get_abbreviations()
)
for work in author.get_works():
statistics["number_works"] += 1
statistics["number_work_titles"] += len(work.get_titles())
statistics["number_title_abbreviations"] += len(
work.get_abbreviations()
)
return statistics | def function[get_statistics, parameter[self]]:
constant[
Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary
]
variable[statistics] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9a890>, <ast.Constant object at 0x7da18dc9a1d0>, <ast.Constant object at 0x7da18dc99c30>, <ast.Constant object at 0x7da18dc99510>, <ast.Constant object at 0x7da18dc99ae0>, <ast.Constant object at 0x7da18dc9a680>, <ast.Constant object at 0x7da18dc9a350>], [<ast.Constant object at 0x7da18dc9bd30>, <ast.Constant object at 0x7da18dc99cf0>, <ast.Constant object at 0x7da18dc98670>, <ast.Constant object at 0x7da18dc9b9d0>, <ast.Constant object at 0x7da18dc98130>, <ast.Constant object at 0x7da18dc9b2b0>, <ast.Constant object at 0x7da18dc9bf40>]]
for taget[name[author]] in starred[call[name[self].get_authors, parameter[]]] begin[:]
if compare[call[name[author].get_urn, parameter[]] is_not constant[None]] begin[:]
variable[opmax] assign[=] <ast.IfExp object at 0x7da18dc98fd0>
if name[opmax] begin[:]
<ast.AugAssign object at 0x7da18dc9a110>
<ast.AugAssign object at 0x7da18dc9b760>
<ast.AugAssign object at 0x7da18dc98e80>
<ast.AugAssign object at 0x7da18dc9bc70>
for taget[name[work]] in starred[call[name[author].get_works, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da18dc998d0>
<ast.AugAssign object at 0x7da18dc9b730>
<ast.AugAssign object at 0x7da18dc9ab30>
return[name[statistics]] | keyword[def] identifier[get_statistics] ( identifier[self] ):
literal[string]
identifier[statistics] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
keyword[for] identifier[author] keyword[in] identifier[self] . identifier[get_authors] ():
keyword[if] identifier[author] . identifier[get_urn] () keyword[is] keyword[not] keyword[None] :
identifier[opmax] = keyword[True] keyword[if] identifier[self] . identifier[get_opus_maximum_of] ( identifier[author] . identifier[get_urn] ()) keyword[is] keyword[not] keyword[None] keyword[else] keyword[False]
keyword[if] identifier[opmax] :
identifier[statistics] [ literal[string] ]+= literal[int]
identifier[statistics] [ literal[string] ]+= literal[int]
identifier[statistics] [ literal[string] ]+= identifier[len] ( identifier[author] . identifier[get_names] ())
identifier[statistics] [ literal[string] ]+= identifier[len] (
identifier[author] . identifier[get_abbreviations] ()
)
keyword[for] identifier[work] keyword[in] identifier[author] . identifier[get_works] ():
identifier[statistics] [ literal[string] ]+= literal[int]
identifier[statistics] [ literal[string] ]+= identifier[len] ( identifier[work] . identifier[get_titles] ())
identifier[statistics] [ literal[string] ]+= identifier[len] (
identifier[work] . identifier[get_abbreviations] ()
)
keyword[return] identifier[statistics] | def get_statistics(self):
"""
Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary
"""
statistics = {'number_authors': 0, 'number_author_names': 0, 'number_author_abbreviations': 0, 'number_works': 0, 'number_work_titles': 0, 'number_title_abbreviations': 0, 'number_opus_maximum': 0}
for author in self.get_authors():
if author.get_urn() is not None:
opmax = True if self.get_opus_maximum_of(author.get_urn()) is not None else False
if opmax:
statistics['number_opus_maximum'] += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
statistics['number_authors'] += 1
statistics['number_author_names'] += len(author.get_names())
statistics['number_author_abbreviations'] += len(author.get_abbreviations())
for work in author.get_works():
statistics['number_works'] += 1
statistics['number_work_titles'] += len(work.get_titles())
statistics['number_title_abbreviations'] += len(work.get_abbreviations()) # depends on [control=['for'], data=['work']] # depends on [control=['for'], data=['author']]
return statistics |
def prune_by_ngram_count_per_work(self, minimum=None, maximum=None,
label=None):
"""Removes results rows if the n-gram count for all works bearing that
n-gram is outside the range specified by `minimum` and
`maximum`.
That is, if a single witness of a single work has an n-gram
count that falls within the specified range, all result rows
for that n-gram are kept.
If `label` is specified, the works checked are restricted to
those associated with `label`.
:param minimum: minimum n-gram count
:type minimum: `int`
:param maximum: maximum n-gram count
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str`
"""
self._logger.info('Pruning results by n-gram count per work')
matches = self._matches
keep_ngrams = matches[constants.NGRAM_FIELDNAME].unique()
if label is not None:
matches = matches[matches[constants.LABEL_FIELDNAME] == label]
if minimum and maximum:
keep_ngrams = matches[
(matches[constants.COUNT_FIELDNAME] >= minimum) &
(matches[constants.COUNT_FIELDNAME] <= maximum)][
constants.NGRAM_FIELDNAME].unique()
elif minimum:
keep_ngrams = matches[
matches[constants.COUNT_FIELDNAME] >= minimum][
constants.NGRAM_FIELDNAME].unique()
elif maximum:
keep_ngrams = matches[
self._matches[constants.COUNT_FIELDNAME] <= maximum][
constants.NGRAM_FIELDNAME].unique()
self._matches = self._matches[self._matches[
constants.NGRAM_FIELDNAME].isin(keep_ngrams)] | def function[prune_by_ngram_count_per_work, parameter[self, minimum, maximum, label]]:
constant[Removes results rows if the n-gram count for all works bearing that
n-gram is outside the range specified by `minimum` and
`maximum`.
That is, if a single witness of a single work has an n-gram
count that falls within the specified range, all result rows
for that n-gram are kept.
If `label` is specified, the works checked are restricted to
those associated with `label`.
:param minimum: minimum n-gram count
:type minimum: `int`
:param maximum: maximum n-gram count
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str`
]
call[name[self]._logger.info, parameter[constant[Pruning results by n-gram count per work]]]
variable[matches] assign[=] name[self]._matches
variable[keep_ngrams] assign[=] call[call[name[matches]][name[constants].NGRAM_FIELDNAME].unique, parameter[]]
if compare[name[label] is_not constant[None]] begin[:]
variable[matches] assign[=] call[name[matches]][compare[call[name[matches]][name[constants].LABEL_FIELDNAME] equal[==] name[label]]]
if <ast.BoolOp object at 0x7da1b19c0040> begin[:]
variable[keep_ngrams] assign[=] call[call[call[name[matches]][binary_operation[compare[call[name[matches]][name[constants].COUNT_FIELDNAME] greater_or_equal[>=] name[minimum]] <ast.BitAnd object at 0x7da2590d6b60> compare[call[name[matches]][name[constants].COUNT_FIELDNAME] less_or_equal[<=] name[maximum]]]]][name[constants].NGRAM_FIELDNAME].unique, parameter[]]
name[self]._matches assign[=] call[name[self]._matches][call[call[name[self]._matches][name[constants].NGRAM_FIELDNAME].isin, parameter[name[keep_ngrams]]]] | keyword[def] identifier[prune_by_ngram_count_per_work] ( identifier[self] , identifier[minimum] = keyword[None] , identifier[maximum] = keyword[None] ,
identifier[label] = keyword[None] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[matches] = identifier[self] . identifier[_matches]
identifier[keep_ngrams] = identifier[matches] [ identifier[constants] . identifier[NGRAM_FIELDNAME] ]. identifier[unique] ()
keyword[if] identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[matches] = identifier[matches] [ identifier[matches] [ identifier[constants] . identifier[LABEL_FIELDNAME] ]== identifier[label] ]
keyword[if] identifier[minimum] keyword[and] identifier[maximum] :
identifier[keep_ngrams] = identifier[matches] [
( identifier[matches] [ identifier[constants] . identifier[COUNT_FIELDNAME] ]>= identifier[minimum] )&
( identifier[matches] [ identifier[constants] . identifier[COUNT_FIELDNAME] ]<= identifier[maximum] )][
identifier[constants] . identifier[NGRAM_FIELDNAME] ]. identifier[unique] ()
keyword[elif] identifier[minimum] :
identifier[keep_ngrams] = identifier[matches] [
identifier[matches] [ identifier[constants] . identifier[COUNT_FIELDNAME] ]>= identifier[minimum] ][
identifier[constants] . identifier[NGRAM_FIELDNAME] ]. identifier[unique] ()
keyword[elif] identifier[maximum] :
identifier[keep_ngrams] = identifier[matches] [
identifier[self] . identifier[_matches] [ identifier[constants] . identifier[COUNT_FIELDNAME] ]<= identifier[maximum] ][
identifier[constants] . identifier[NGRAM_FIELDNAME] ]. identifier[unique] ()
identifier[self] . identifier[_matches] = identifier[self] . identifier[_matches] [ identifier[self] . identifier[_matches] [
identifier[constants] . identifier[NGRAM_FIELDNAME] ]. identifier[isin] ( identifier[keep_ngrams] )] | def prune_by_ngram_count_per_work(self, minimum=None, maximum=None, label=None):
"""Removes results rows if the n-gram count for all works bearing that
n-gram is outside the range specified by `minimum` and
`maximum`.
That is, if a single witness of a single work has an n-gram
count that falls within the specified range, all result rows
for that n-gram are kept.
If `label` is specified, the works checked are restricted to
those associated with `label`.
:param minimum: minimum n-gram count
:type minimum: `int`
:param maximum: maximum n-gram count
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str`
"""
self._logger.info('Pruning results by n-gram count per work')
matches = self._matches
keep_ngrams = matches[constants.NGRAM_FIELDNAME].unique()
if label is not None:
matches = matches[matches[constants.LABEL_FIELDNAME] == label] # depends on [control=['if'], data=['label']]
if minimum and maximum:
keep_ngrams = matches[(matches[constants.COUNT_FIELDNAME] >= minimum) & (matches[constants.COUNT_FIELDNAME] <= maximum)][constants.NGRAM_FIELDNAME].unique() # depends on [control=['if'], data=[]]
elif minimum:
keep_ngrams = matches[matches[constants.COUNT_FIELDNAME] >= minimum][constants.NGRAM_FIELDNAME].unique() # depends on [control=['if'], data=[]]
elif maximum:
keep_ngrams = matches[self._matches[constants.COUNT_FIELDNAME] <= maximum][constants.NGRAM_FIELDNAME].unique() # depends on [control=['if'], data=[]]
self._matches = self._matches[self._matches[constants.NGRAM_FIELDNAME].isin(keep_ngrams)] |
def render(self, data):
''' Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
'''
renderers = {
"text/csv": self._render_as_csv,
"text/html": self._render_as_html,
None: self._render_as_html,
}
render = renderers[data.content_type]
return render(data) | def function[render, parameter[self, data]]:
constant[ Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
]
variable[renderers] assign[=] dictionary[[<ast.Constant object at 0x7da1b01bac20>, <ast.Constant object at 0x7da1b01bb850>, <ast.Constant object at 0x7da1b01baaa0>], [<ast.Attribute object at 0x7da1b01bb250>, <ast.Attribute object at 0x7da1b01bb940>, <ast.Attribute object at 0x7da1b01b8160>]]
variable[render] assign[=] call[name[renderers]][name[data].content_type]
return[call[name[render], parameter[name[data]]]] | keyword[def] identifier[render] ( identifier[self] , identifier[data] ):
literal[string]
identifier[renderers] ={
literal[string] : identifier[self] . identifier[_render_as_csv] ,
literal[string] : identifier[self] . identifier[_render_as_html] ,
keyword[None] : identifier[self] . identifier[_render_as_html] ,
}
identifier[render] = identifier[renderers] [ identifier[data] . identifier[content_type] ]
keyword[return] identifier[render] ( identifier[data] ) | def render(self, data):
""" Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
"""
renderers = {'text/csv': self._render_as_csv, 'text/html': self._render_as_html, None: self._render_as_html}
render = renderers[data.content_type]
return render(data) |
async def generate_widget_large(
self,
bot_id: int = None,
top: str = '2C2F33',
mid: str = '23272A',
user: str = 'FFFFFF',
cert: str = 'FFFFFF',
data: str = 'FFFFFF',
label: str = '99AAB5',
highlight: str = '2C2F33'
):
"""This function is a coroutine.
Generates a custom large widget. Do not add `#` to the color codes (e.g. #FF00FF become FF00FF).
Parameters
==========
bot_id: int
The bot_id of the bot you wish to make a widget for.
top: str
The hex color code of the top bar.
mid: str
The hex color code of the main section.
user: str
The hex color code of the username text.
cert: str
The hex color code of the certified text (if applicable).
data: str
The hex color code of the statistics (numbers only e.g. 44) of the bot.
label: str
The hex color code of the description (text e.g. guild count) of the statistics.
highlight: str
The hex color code of the data boxes.
Returns
=======
URL of the widget: str
"""
url = 'https://discordbots.org/api/widget/{0}.png?topcolor={1}&middlecolor={2}&usernamecolor={3}&certifiedcolor={4}&datacolor={5}&labelcolor={6}&highlightcolor={7}'
if bot_id is None:
bot_id = self.bot_id
url = url.format(bot_id, top, mid, user, cert, data, label, highlight)
return url | <ast.AsyncFunctionDef object at 0x7da1b024f2b0> | keyword[async] keyword[def] identifier[generate_widget_large] (
identifier[self] ,
identifier[bot_id] : identifier[int] = keyword[None] ,
identifier[top] : identifier[str] = literal[string] ,
identifier[mid] : identifier[str] = literal[string] ,
identifier[user] : identifier[str] = literal[string] ,
identifier[cert] : identifier[str] = literal[string] ,
identifier[data] : identifier[str] = literal[string] ,
identifier[label] : identifier[str] = literal[string] ,
identifier[highlight] : identifier[str] = literal[string]
):
literal[string]
identifier[url] = literal[string]
keyword[if] identifier[bot_id] keyword[is] keyword[None] :
identifier[bot_id] = identifier[self] . identifier[bot_id]
identifier[url] = identifier[url] . identifier[format] ( identifier[bot_id] , identifier[top] , identifier[mid] , identifier[user] , identifier[cert] , identifier[data] , identifier[label] , identifier[highlight] )
keyword[return] identifier[url] | async def generate_widget_large(self, bot_id: int=None, top: str='2C2F33', mid: str='23272A', user: str='FFFFFF', cert: str='FFFFFF', data: str='FFFFFF', label: str='99AAB5', highlight: str='2C2F33'):
"""This function is a coroutine.
Generates a custom large widget. Do not add `#` to the color codes (e.g. #FF00FF become FF00FF).
Parameters
==========
bot_id: int
The bot_id of the bot you wish to make a widget for.
top: str
The hex color code of the top bar.
mid: str
The hex color code of the main section.
user: str
The hex color code of the username text.
cert: str
The hex color code of the certified text (if applicable).
data: str
The hex color code of the statistics (numbers only e.g. 44) of the bot.
label: str
The hex color code of the description (text e.g. guild count) of the statistics.
highlight: str
The hex color code of the data boxes.
Returns
=======
URL of the widget: str
"""
url = 'https://discordbots.org/api/widget/{0}.png?topcolor={1}&middlecolor={2}&usernamecolor={3}&certifiedcolor={4}&datacolor={5}&labelcolor={6}&highlightcolor={7}'
if bot_id is None:
bot_id = self.bot_id # depends on [control=['if'], data=['bot_id']]
url = url.format(bot_id, top, mid, user, cert, data, label, highlight)
return url |
def clear_to_enc_filename(fname):
"""
Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None
"""
if not fname.lower().endswith('.json'):
raise CredkeepException('Invalid filetype')
if fname.lower().endswith('.enc.json'):
raise CredkeepException('File already encrypted')
enc_fname = fname[:-4] + 'enc.json'
return enc_fname if exists(enc_fname) else None | def function[clear_to_enc_filename, parameter[fname]]:
constant[
Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None
]
if <ast.UnaryOp object at 0x7da207f99780> begin[:]
<ast.Raise object at 0x7da207f99ed0>
if call[call[name[fname].lower, parameter[]].endswith, parameter[constant[.enc.json]]] begin[:]
<ast.Raise object at 0x7da207f98c10>
variable[enc_fname] assign[=] binary_operation[call[name[fname]][<ast.Slice object at 0x7da207f98d60>] + constant[enc.json]]
return[<ast.IfExp object at 0x7da207f9b460>] | keyword[def] identifier[clear_to_enc_filename] ( identifier[fname] ):
literal[string]
keyword[if] keyword[not] identifier[fname] . identifier[lower] (). identifier[endswith] ( literal[string] ):
keyword[raise] identifier[CredkeepException] ( literal[string] )
keyword[if] identifier[fname] . identifier[lower] (). identifier[endswith] ( literal[string] ):
keyword[raise] identifier[CredkeepException] ( literal[string] )
identifier[enc_fname] = identifier[fname] [:- literal[int] ]+ literal[string]
keyword[return] identifier[enc_fname] keyword[if] identifier[exists] ( identifier[enc_fname] ) keyword[else] keyword[None] | def clear_to_enc_filename(fname):
"""
Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None
"""
if not fname.lower().endswith('.json'):
raise CredkeepException('Invalid filetype') # depends on [control=['if'], data=[]]
if fname.lower().endswith('.enc.json'):
raise CredkeepException('File already encrypted') # depends on [control=['if'], data=[]]
enc_fname = fname[:-4] + 'enc.json'
return enc_fname if exists(enc_fname) else None |
def _create_stdout_logger():
""" create a logger to stdout """
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO) | def function[_create_stdout_logger, parameter[]]:
constant[ create a logger to stdout ]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
variable[out_hdlr] assign[=] call[name[logging].StreamHandler, parameter[name[sys].stdout]]
call[name[out_hdlr].setFormatter, parameter[call[name[logging].Formatter, parameter[constant[%(message)s]]]]]
call[name[out_hdlr].setLevel, parameter[name[logging].INFO]]
call[name[log].addHandler, parameter[name[out_hdlr]]]
call[name[log].setLevel, parameter[name[logging].INFO]] | keyword[def] identifier[_create_stdout_logger] ():
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[out_hdlr] = identifier[logging] . identifier[StreamHandler] ( identifier[sys] . identifier[stdout] )
identifier[out_hdlr] . identifier[setFormatter] ( identifier[logging] . identifier[Formatter] ( literal[string] ))
identifier[out_hdlr] . identifier[setLevel] ( identifier[logging] . identifier[INFO] )
identifier[log] . identifier[addHandler] ( identifier[out_hdlr] )
identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[INFO] ) | def _create_stdout_logger():
""" create a logger to stdout """
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO) |
def boxes_intersect(box1, box2):
"""Determines if two rectangles, each input as a tuple
(xmin, xmax, ymin, ymax), intersect."""
xmin1, xmax1, ymin1, ymax1 = box1
xmin2, xmax2, ymin2, ymax2 = box2
if interval_intersection_width(xmin1, xmax1, xmin2, xmax2) and \
interval_intersection_width(ymin1, ymax1, ymin2, ymax2):
return True
else:
return False | def function[boxes_intersect, parameter[box1, box2]]:
constant[Determines if two rectangles, each input as a tuple
(xmin, xmax, ymin, ymax), intersect.]
<ast.Tuple object at 0x7da20cabd870> assign[=] name[box1]
<ast.Tuple object at 0x7da20cabfa30> assign[=] name[box2]
if <ast.BoolOp object at 0x7da20cabe050> begin[:]
return[constant[True]] | keyword[def] identifier[boxes_intersect] ( identifier[box1] , identifier[box2] ):
literal[string]
identifier[xmin1] , identifier[xmax1] , identifier[ymin1] , identifier[ymax1] = identifier[box1]
identifier[xmin2] , identifier[xmax2] , identifier[ymin2] , identifier[ymax2] = identifier[box2]
keyword[if] identifier[interval_intersection_width] ( identifier[xmin1] , identifier[xmax1] , identifier[xmin2] , identifier[xmax2] ) keyword[and] identifier[interval_intersection_width] ( identifier[ymin1] , identifier[ymax1] , identifier[ymin2] , identifier[ymax2] ):
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def boxes_intersect(box1, box2):
"""Determines if two rectangles, each input as a tuple
(xmin, xmax, ymin, ymax), intersect."""
(xmin1, xmax1, ymin1, ymax1) = box1
(xmin2, xmax2, ymin2, ymax2) = box2
if interval_intersection_width(xmin1, xmax1, xmin2, xmax2) and interval_intersection_width(ymin1, ymax1, ymin2, ymax2):
return True # depends on [control=['if'], data=[]]
else:
return False |
def check_smtp_domain (self, mail):
"""
Check a single mail address.
"""
from dns.exception import DNSException
log.debug(LOG_CHECK, "checking mail address %r", mail)
mail = strformat.ascii_safe(mail)
username, domain = mail.rsplit('@', 1)
log.debug(LOG_CHECK, "looking up MX mailhost %r", domain)
try:
answers = resolver.query(domain, 'MX')
except DNSException:
answers = []
if len(answers) == 0:
self.add_warning(_("No MX mail host for %(domain)s found.") %
{'domain': domain},
tag=WARN_MAIL_NO_MX_HOST)
try:
answers = resolver.query(domain, 'A')
except DNSException:
answers = []
if len(answers) == 0:
self.set_result(_("No host for %(domain)s found.") %
{'domain': domain}, valid=False,
overwrite=True)
return
# set preference to zero
mxdata = [(0, rdata.to_text(omit_final_dot=True))
for rdata in answers]
else:
from dns.rdtypes.mxbase import MXBase
mxdata = [(rdata.preference,
rdata.exchange.to_text(omit_final_dot=True))
for rdata in answers if isinstance(rdata, MXBase)]
if not mxdata:
self.set_result(
_("Got invalid DNS answer %(answer)s for %(domain)s.") %
{'answer': answers, 'domain': domain}, valid=False,
overwrite=True)
return
# sort according to preference (lower preference means this
# host should be preferred)
mxdata.sort()
# debug output
log.debug(LOG_CHECK, "found %d MX mailhosts:", len(answers))
for preference, host in mxdata:
log.debug(LOG_CHECK, "MX host %r, preference %d", host, preference)
pass
self.set_result(_("Valid mail address syntax")) | def function[check_smtp_domain, parameter[self, mail]]:
constant[
Check a single mail address.
]
from relative_module[dns.exception] import module[DNSException]
call[name[log].debug, parameter[name[LOG_CHECK], constant[checking mail address %r], name[mail]]]
variable[mail] assign[=] call[name[strformat].ascii_safe, parameter[name[mail]]]
<ast.Tuple object at 0x7da18bccaa40> assign[=] call[name[mail].rsplit, parameter[constant[@], constant[1]]]
call[name[log].debug, parameter[name[LOG_CHECK], constant[looking up MX mailhost %r], name[domain]]]
<ast.Try object at 0x7da18dc071f0>
if compare[call[name[len], parameter[name[answers]]] equal[==] constant[0]] begin[:]
call[name[self].add_warning, parameter[binary_operation[call[name[_], parameter[constant[No MX mail host for %(domain)s found.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da18dc07130>], [<ast.Name object at 0x7da18dc04250>]]]]]
<ast.Try object at 0x7da18dc04d00>
if compare[call[name[len], parameter[name[answers]]] equal[==] constant[0]] begin[:]
call[name[self].set_result, parameter[binary_operation[call[name[_], parameter[constant[No host for %(domain)s found.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da18dc04790>], [<ast.Name object at 0x7da18dc06020>]]]]]
return[None]
variable[mxdata] assign[=] <ast.ListComp object at 0x7da18dc04d90>
call[name[log].debug, parameter[name[LOG_CHECK], constant[found %d MX mailhosts:], call[name[len], parameter[name[answers]]]]]
for taget[tuple[[<ast.Name object at 0x7da18dc05ea0>, <ast.Name object at 0x7da18dc05c30>]]] in starred[name[mxdata]] begin[:]
call[name[log].debug, parameter[name[LOG_CHECK], constant[MX host %r, preference %d], name[host], name[preference]]]
pass
call[name[self].set_result, parameter[call[name[_], parameter[constant[Valid mail address syntax]]]]] | keyword[def] identifier[check_smtp_domain] ( identifier[self] , identifier[mail] ):
literal[string]
keyword[from] identifier[dns] . identifier[exception] keyword[import] identifier[DNSException]
identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[mail] )
identifier[mail] = identifier[strformat] . identifier[ascii_safe] ( identifier[mail] )
identifier[username] , identifier[domain] = identifier[mail] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[domain] )
keyword[try] :
identifier[answers] = identifier[resolver] . identifier[query] ( identifier[domain] , literal[string] )
keyword[except] identifier[DNSException] :
identifier[answers] =[]
keyword[if] identifier[len] ( identifier[answers] )== literal[int] :
identifier[self] . identifier[add_warning] ( identifier[_] ( literal[string] )%
{ literal[string] : identifier[domain] },
identifier[tag] = identifier[WARN_MAIL_NO_MX_HOST] )
keyword[try] :
identifier[answers] = identifier[resolver] . identifier[query] ( identifier[domain] , literal[string] )
keyword[except] identifier[DNSException] :
identifier[answers] =[]
keyword[if] identifier[len] ( identifier[answers] )== literal[int] :
identifier[self] . identifier[set_result] ( identifier[_] ( literal[string] )%
{ literal[string] : identifier[domain] }, identifier[valid] = keyword[False] ,
identifier[overwrite] = keyword[True] )
keyword[return]
identifier[mxdata] =[( literal[int] , identifier[rdata] . identifier[to_text] ( identifier[omit_final_dot] = keyword[True] ))
keyword[for] identifier[rdata] keyword[in] identifier[answers] ]
keyword[else] :
keyword[from] identifier[dns] . identifier[rdtypes] . identifier[mxbase] keyword[import] identifier[MXBase]
identifier[mxdata] =[( identifier[rdata] . identifier[preference] ,
identifier[rdata] . identifier[exchange] . identifier[to_text] ( identifier[omit_final_dot] = keyword[True] ))
keyword[for] identifier[rdata] keyword[in] identifier[answers] keyword[if] identifier[isinstance] ( identifier[rdata] , identifier[MXBase] )]
keyword[if] keyword[not] identifier[mxdata] :
identifier[self] . identifier[set_result] (
identifier[_] ( literal[string] )%
{ literal[string] : identifier[answers] , literal[string] : identifier[domain] }, identifier[valid] = keyword[False] ,
identifier[overwrite] = keyword[True] )
keyword[return]
identifier[mxdata] . identifier[sort] ()
identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[len] ( identifier[answers] ))
keyword[for] identifier[preference] , identifier[host] keyword[in] identifier[mxdata] :
identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[host] , identifier[preference] )
keyword[pass]
identifier[self] . identifier[set_result] ( identifier[_] ( literal[string] )) | def check_smtp_domain(self, mail):
"""
Check a single mail address.
"""
from dns.exception import DNSException
log.debug(LOG_CHECK, 'checking mail address %r', mail)
mail = strformat.ascii_safe(mail)
(username, domain) = mail.rsplit('@', 1)
log.debug(LOG_CHECK, 'looking up MX mailhost %r', domain)
try:
answers = resolver.query(domain, 'MX') # depends on [control=['try'], data=[]]
except DNSException:
answers = [] # depends on [control=['except'], data=[]]
if len(answers) == 0:
self.add_warning(_('No MX mail host for %(domain)s found.') % {'domain': domain}, tag=WARN_MAIL_NO_MX_HOST)
try:
answers = resolver.query(domain, 'A') # depends on [control=['try'], data=[]]
except DNSException:
answers = [] # depends on [control=['except'], data=[]]
if len(answers) == 0:
self.set_result(_('No host for %(domain)s found.') % {'domain': domain}, valid=False, overwrite=True)
return # depends on [control=['if'], data=[]]
# set preference to zero
mxdata = [(0, rdata.to_text(omit_final_dot=True)) for rdata in answers] # depends on [control=['if'], data=[]]
else:
from dns.rdtypes.mxbase import MXBase
mxdata = [(rdata.preference, rdata.exchange.to_text(omit_final_dot=True)) for rdata in answers if isinstance(rdata, MXBase)]
if not mxdata:
self.set_result(_('Got invalid DNS answer %(answer)s for %(domain)s.') % {'answer': answers, 'domain': domain}, valid=False, overwrite=True)
return # depends on [control=['if'], data=[]]
# sort according to preference (lower preference means this
# host should be preferred)
mxdata.sort()
# debug output
log.debug(LOG_CHECK, 'found %d MX mailhosts:', len(answers))
for (preference, host) in mxdata:
log.debug(LOG_CHECK, 'MX host %r, preference %d', host, preference)
pass # depends on [control=['for'], data=[]]
self.set_result(_('Valid mail address syntax')) |
def read_fields(self, template, offset=0):
"""
Return a tuple containing the C-struct fields in this stream
specified by *template* and starting at *offset*.
"""
self._file.seek(offset)
bufr = self._file.read(calcsize(template))
return unpack_from(template, bufr) | def function[read_fields, parameter[self, template, offset]]:
constant[
Return a tuple containing the C-struct fields in this stream
specified by *template* and starting at *offset*.
]
call[name[self]._file.seek, parameter[name[offset]]]
variable[bufr] assign[=] call[name[self]._file.read, parameter[call[name[calcsize], parameter[name[template]]]]]
return[call[name[unpack_from], parameter[name[template], name[bufr]]]] | keyword[def] identifier[read_fields] ( identifier[self] , identifier[template] , identifier[offset] = literal[int] ):
literal[string]
identifier[self] . identifier[_file] . identifier[seek] ( identifier[offset] )
identifier[bufr] = identifier[self] . identifier[_file] . identifier[read] ( identifier[calcsize] ( identifier[template] ))
keyword[return] identifier[unpack_from] ( identifier[template] , identifier[bufr] ) | def read_fields(self, template, offset=0):
"""
Return a tuple containing the C-struct fields in this stream
specified by *template* and starting at *offset*.
"""
self._file.seek(offset)
bufr = self._file.read(calcsize(template))
return unpack_from(template, bufr) |
def p_int(self, tree):
''' V ::= INTEGER '''
tree.value = int(tree.attr)
tree.svalue = tree.attr | def function[p_int, parameter[self, tree]]:
constant[ V ::= INTEGER ]
name[tree].value assign[=] call[name[int], parameter[name[tree].attr]]
name[tree].svalue assign[=] name[tree].attr | keyword[def] identifier[p_int] ( identifier[self] , identifier[tree] ):
literal[string]
identifier[tree] . identifier[value] = identifier[int] ( identifier[tree] . identifier[attr] )
identifier[tree] . identifier[svalue] = identifier[tree] . identifier[attr] | def p_int(self, tree):
""" V ::= INTEGER """
tree.value = int(tree.attr)
tree.svalue = tree.attr |
def reset(self):
"""Reset the tough connection.
If a reset is not possible, tries to reopen the connection.
It will not complain if the connection is already closed.
"""
try:
self._con.reset()
self._transaction = False
self._setsession()
self._usage = 0
except Exception:
try:
self.reopen()
except Exception:
try:
self.rollback()
except Exception:
pass | def function[reset, parameter[self]]:
constant[Reset the tough connection.
If a reset is not possible, tries to reopen the connection.
It will not complain if the connection is already closed.
]
<ast.Try object at 0x7da207f02da0> | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_con] . identifier[reset] ()
identifier[self] . identifier[_transaction] = keyword[False]
identifier[self] . identifier[_setsession] ()
identifier[self] . identifier[_usage] = literal[int]
keyword[except] identifier[Exception] :
keyword[try] :
identifier[self] . identifier[reopen] ()
keyword[except] identifier[Exception] :
keyword[try] :
identifier[self] . identifier[rollback] ()
keyword[except] identifier[Exception] :
keyword[pass] | def reset(self):
"""Reset the tough connection.
If a reset is not possible, tries to reopen the connection.
It will not complain if the connection is already closed.
"""
try:
self._con.reset()
self._transaction = False
self._setsession()
self._usage = 0 # depends on [control=['try'], data=[]]
except Exception:
try:
self.reopen() # depends on [control=['try'], data=[]]
except Exception:
try:
self.rollback() # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] |
def compileGLShader(self, pchShaderName, pchVertexShader, pchFragmentShader):
"""
Purpose: Compiles a GL shader program and returns the handle. Returns 0 if
the shader couldn't be compiled for some reason.
"""
unProgramID = glCreateProgram()
nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource( nSceneVertexShader, pchVertexShader)
glCompileShader( nSceneVertexShader )
vShaderCompiled = glGetShaderiv( nSceneVertexShader, GL_COMPILE_STATUS)
if not vShaderCompiled:
dprintf("%s - Unable to compile vertex shader %d!\n" % (pchShaderName, nSceneVertexShader) )
glDeleteProgram( unProgramID )
glDeleteShader( nSceneVertexShader )
return 0
glAttachShader( unProgramID, nSceneVertexShader)
glDeleteShader( nSceneVertexShader ) # the program hangs onto this once it's attached
nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource( nSceneFragmentShader, pchFragmentShader)
glCompileShader( nSceneFragmentShader )
fShaderCompiled = glGetShaderiv( nSceneFragmentShader, GL_COMPILE_STATUS)
if not fShaderCompiled:
dprintf("%s - Unable to compile fragment shader %d!\n" % ( pchShaderName, nSceneFragmentShader) )
glDeleteProgram( unProgramID )
glDeleteShader( nSceneFragmentShader )
return 0
glAttachShader( unProgramID, nSceneFragmentShader )
glDeleteShader( nSceneFragmentShader ) # the program hangs onto this once it's attached
glLinkProgram( unProgramID )
programSuccess = glGetProgramiv( unProgramID, GL_LINK_STATUS)
if not programSuccess:
dprintf("%s - Error linking program %d!\n" % (pchShaderName, unProgramID) )
glDeleteProgram( unProgramID )
return 0
glUseProgram( unProgramID )
glUseProgram( 0 )
return unProgramID | def function[compileGLShader, parameter[self, pchShaderName, pchVertexShader, pchFragmentShader]]:
constant[
Purpose: Compiles a GL shader program and returns the handle. Returns 0 if
the shader couldn't be compiled for some reason.
]
variable[unProgramID] assign[=] call[name[glCreateProgram], parameter[]]
variable[nSceneVertexShader] assign[=] call[name[glCreateShader], parameter[name[GL_VERTEX_SHADER]]]
call[name[glShaderSource], parameter[name[nSceneVertexShader], name[pchVertexShader]]]
call[name[glCompileShader], parameter[name[nSceneVertexShader]]]
variable[vShaderCompiled] assign[=] call[name[glGetShaderiv], parameter[name[nSceneVertexShader], name[GL_COMPILE_STATUS]]]
if <ast.UnaryOp object at 0x7da207f00580> begin[:]
call[name[dprintf], parameter[binary_operation[constant[%s - Unable to compile vertex shader %d!
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f03880>, <ast.Name object at 0x7da207f00be0>]]]]]
call[name[glDeleteProgram], parameter[name[unProgramID]]]
call[name[glDeleteShader], parameter[name[nSceneVertexShader]]]
return[constant[0]]
call[name[glAttachShader], parameter[name[unProgramID], name[nSceneVertexShader]]]
call[name[glDeleteShader], parameter[name[nSceneVertexShader]]]
variable[nSceneFragmentShader] assign[=] call[name[glCreateShader], parameter[name[GL_FRAGMENT_SHADER]]]
call[name[glShaderSource], parameter[name[nSceneFragmentShader], name[pchFragmentShader]]]
call[name[glCompileShader], parameter[name[nSceneFragmentShader]]]
variable[fShaderCompiled] assign[=] call[name[glGetShaderiv], parameter[name[nSceneFragmentShader], name[GL_COMPILE_STATUS]]]
if <ast.UnaryOp object at 0x7da207f015a0> begin[:]
call[name[dprintf], parameter[binary_operation[constant[%s - Unable to compile fragment shader %d!
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f03340>, <ast.Name object at 0x7da207f01c30>]]]]]
call[name[glDeleteProgram], parameter[name[unProgramID]]]
call[name[glDeleteShader], parameter[name[nSceneFragmentShader]]]
return[constant[0]]
call[name[glAttachShader], parameter[name[unProgramID], name[nSceneFragmentShader]]]
call[name[glDeleteShader], parameter[name[nSceneFragmentShader]]]
call[name[glLinkProgram], parameter[name[unProgramID]]]
variable[programSuccess] assign[=] call[name[glGetProgramiv], parameter[name[unProgramID], name[GL_LINK_STATUS]]]
if <ast.UnaryOp object at 0x7da207f01390> begin[:]
call[name[dprintf], parameter[binary_operation[constant[%s - Error linking program %d!
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f02b00>, <ast.Name object at 0x7da207f023b0>]]]]]
call[name[glDeleteProgram], parameter[name[unProgramID]]]
return[constant[0]]
call[name[glUseProgram], parameter[name[unProgramID]]]
call[name[glUseProgram], parameter[constant[0]]]
return[name[unProgramID]] | keyword[def] identifier[compileGLShader] ( identifier[self] , identifier[pchShaderName] , identifier[pchVertexShader] , identifier[pchFragmentShader] ):
literal[string]
identifier[unProgramID] = identifier[glCreateProgram] ()
identifier[nSceneVertexShader] = identifier[glCreateShader] ( identifier[GL_VERTEX_SHADER] )
identifier[glShaderSource] ( identifier[nSceneVertexShader] , identifier[pchVertexShader] )
identifier[glCompileShader] ( identifier[nSceneVertexShader] )
identifier[vShaderCompiled] = identifier[glGetShaderiv] ( identifier[nSceneVertexShader] , identifier[GL_COMPILE_STATUS] )
keyword[if] keyword[not] identifier[vShaderCompiled] :
identifier[dprintf] ( literal[string] %( identifier[pchShaderName] , identifier[nSceneVertexShader] ))
identifier[glDeleteProgram] ( identifier[unProgramID] )
identifier[glDeleteShader] ( identifier[nSceneVertexShader] )
keyword[return] literal[int]
identifier[glAttachShader] ( identifier[unProgramID] , identifier[nSceneVertexShader] )
identifier[glDeleteShader] ( identifier[nSceneVertexShader] )
identifier[nSceneFragmentShader] = identifier[glCreateShader] ( identifier[GL_FRAGMENT_SHADER] )
identifier[glShaderSource] ( identifier[nSceneFragmentShader] , identifier[pchFragmentShader] )
identifier[glCompileShader] ( identifier[nSceneFragmentShader] )
identifier[fShaderCompiled] = identifier[glGetShaderiv] ( identifier[nSceneFragmentShader] , identifier[GL_COMPILE_STATUS] )
keyword[if] keyword[not] identifier[fShaderCompiled] :
identifier[dprintf] ( literal[string] %( identifier[pchShaderName] , identifier[nSceneFragmentShader] ))
identifier[glDeleteProgram] ( identifier[unProgramID] )
identifier[glDeleteShader] ( identifier[nSceneFragmentShader] )
keyword[return] literal[int]
identifier[glAttachShader] ( identifier[unProgramID] , identifier[nSceneFragmentShader] )
identifier[glDeleteShader] ( identifier[nSceneFragmentShader] )
identifier[glLinkProgram] ( identifier[unProgramID] )
identifier[programSuccess] = identifier[glGetProgramiv] ( identifier[unProgramID] , identifier[GL_LINK_STATUS] )
keyword[if] keyword[not] identifier[programSuccess] :
identifier[dprintf] ( literal[string] %( identifier[pchShaderName] , identifier[unProgramID] ))
identifier[glDeleteProgram] ( identifier[unProgramID] )
keyword[return] literal[int]
identifier[glUseProgram] ( identifier[unProgramID] )
identifier[glUseProgram] ( literal[int] )
keyword[return] identifier[unProgramID] | def compileGLShader(self, pchShaderName, pchVertexShader, pchFragmentShader):
"""
Purpose: Compiles a GL shader program and returns the handle. Returns 0 if
the shader couldn't be compiled for some reason.
"""
unProgramID = glCreateProgram()
nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(nSceneVertexShader, pchVertexShader)
glCompileShader(nSceneVertexShader)
vShaderCompiled = glGetShaderiv(nSceneVertexShader, GL_COMPILE_STATUS)
if not vShaderCompiled:
dprintf('%s - Unable to compile vertex shader %d!\n' % (pchShaderName, nSceneVertexShader))
glDeleteProgram(unProgramID)
glDeleteShader(nSceneVertexShader)
return 0 # depends on [control=['if'], data=[]]
glAttachShader(unProgramID, nSceneVertexShader)
glDeleteShader(nSceneVertexShader) # the program hangs onto this once it's attached
nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(nSceneFragmentShader, pchFragmentShader)
glCompileShader(nSceneFragmentShader)
fShaderCompiled = glGetShaderiv(nSceneFragmentShader, GL_COMPILE_STATUS)
if not fShaderCompiled:
dprintf('%s - Unable to compile fragment shader %d!\n' % (pchShaderName, nSceneFragmentShader))
glDeleteProgram(unProgramID)
glDeleteShader(nSceneFragmentShader)
return 0 # depends on [control=['if'], data=[]]
glAttachShader(unProgramID, nSceneFragmentShader)
glDeleteShader(nSceneFragmentShader) # the program hangs onto this once it's attached
glLinkProgram(unProgramID)
programSuccess = glGetProgramiv(unProgramID, GL_LINK_STATUS)
if not programSuccess:
dprintf('%s - Error linking program %d!\n' % (pchShaderName, unProgramID))
glDeleteProgram(unProgramID)
return 0 # depends on [control=['if'], data=[]]
glUseProgram(unProgramID)
glUseProgram(0)
return unProgramID |
def _synthesize(self, text_file):
"""
Synthesize text into a WAVE file.
Return a tuple consisting of:
1. the handler of the generated audio file
2. the path of the generated audio file
3. the list of anchors, that is, a list of floats
each representing the start time of the corresponding
text fragment in the generated wave file
``[start_1, start_2, ..., start_n]``
4. a tuple describing the format of the audio file
:param text_file: the text to be synthesized
:type text_file: :class:`~aeneas.textfile.TextFile`
:rtype: tuple (handler, string, list)
"""
handler, path = gf.tmp_file(suffix=u".wav", root=self.rconf[RuntimeConfiguration.TMP_PATH])
result = self.synthesizer.synthesize(text_file, path)
return (handler, path, result[0], self.synthesizer.output_audio_format) | def function[_synthesize, parameter[self, text_file]]:
constant[
Synthesize text into a WAVE file.
Return a tuple consisting of:
1. the handler of the generated audio file
2. the path of the generated audio file
3. the list of anchors, that is, a list of floats
each representing the start time of the corresponding
text fragment in the generated wave file
``[start_1, start_2, ..., start_n]``
4. a tuple describing the format of the audio file
:param text_file: the text to be synthesized
:type text_file: :class:`~aeneas.textfile.TextFile`
:rtype: tuple (handler, string, list)
]
<ast.Tuple object at 0x7da1b16dd420> assign[=] call[name[gf].tmp_file, parameter[]]
variable[result] assign[=] call[name[self].synthesizer.synthesize, parameter[name[text_file], name[path]]]
return[tuple[[<ast.Name object at 0x7da207f02200>, <ast.Name object at 0x7da207f00cd0>, <ast.Subscript object at 0x7da207f020b0>, <ast.Attribute object at 0x7da207f00a30>]]] | keyword[def] identifier[_synthesize] ( identifier[self] , identifier[text_file] ):
literal[string]
identifier[handler] , identifier[path] = identifier[gf] . identifier[tmp_file] ( identifier[suffix] = literal[string] , identifier[root] = identifier[self] . identifier[rconf] [ identifier[RuntimeConfiguration] . identifier[TMP_PATH] ])
identifier[result] = identifier[self] . identifier[synthesizer] . identifier[synthesize] ( identifier[text_file] , identifier[path] )
keyword[return] ( identifier[handler] , identifier[path] , identifier[result] [ literal[int] ], identifier[self] . identifier[synthesizer] . identifier[output_audio_format] ) | def _synthesize(self, text_file):
"""
Synthesize text into a WAVE file.
Return a tuple consisting of:
1. the handler of the generated audio file
2. the path of the generated audio file
3. the list of anchors, that is, a list of floats
each representing the start time of the corresponding
text fragment in the generated wave file
``[start_1, start_2, ..., start_n]``
4. a tuple describing the format of the audio file
:param text_file: the text to be synthesized
:type text_file: :class:`~aeneas.textfile.TextFile`
:rtype: tuple (handler, string, list)
"""
(handler, path) = gf.tmp_file(suffix=u'.wav', root=self.rconf[RuntimeConfiguration.TMP_PATH])
result = self.synthesizer.synthesize(text_file, path)
return (handler, path, result[0], self.synthesizer.output_audio_format) |
def emit_signal(sender=None, namespace=None):
"""
@emit_signal
A decorator to mark a method or function as a signal emitter
It will turn the function into a decorator that can be used to
receive signal with: $fn_name.pre.connect, $fn_name.post.connect
*pre will execute before running the function
*post will run after running the function
**observe is an alias to post.connect
:param sender: string to be the sender.
If empty, it will use the function __module__+__fn_name,
or method __module__+__class_name__+__fn_name__
:param namespace: The namespace. If None, it will use the global namespace
:return:
"""
if not namespace:
namespace = __signals_namespace
def decorator(fn):
fname = sender
if not fname:
fnargs = inspect.getargspec(fn).args
fname = fn.__module__
if 'self' in fnargs or 'cls' in fnargs:
caller = inspect.currentframe().f_back
fname += "_" + caller.f_code.co_name
fname += "__" + fn.__name__
# pre and post
fn.pre = namespace.signal('pre_%s' % fname)
fn.post = namespace.signal('post_%s' % fname)
# alias to post.connect
fn.observe = fn.post.connect
def send(action, *a, **kw):
sig_name = "%s_%s" % (action, fname)
result = kw.pop("result", None)
kw.update(inspect.getcallargs(fn, *a, **kw))
sendkw = {
"kwargs": {k: v for k, v in kw.items() if k in kw.keys()},
"sender": fn.__name__,
"emitter": kw.get('self', kw.get('cls', fn))
}
if action == 'post':
namespace.signal(sig_name).send(result, **sendkw)
else:
namespace.signal(sig_name).send(**sendkw)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
send('pre', *args, **kwargs)
result = fn(*args, **kwargs)
kwargs["result"] = result
send('post', *args, **kwargs)
return result
return wrapper
return decorator | def function[emit_signal, parameter[sender, namespace]]:
constant[
@emit_signal
A decorator to mark a method or function as a signal emitter
It will turn the function into a decorator that can be used to
receive signal with: $fn_name.pre.connect, $fn_name.post.connect
*pre will execute before running the function
*post will run after running the function
**observe is an alias to post.connect
:param sender: string to be the sender.
If empty, it will use the function __module__+__fn_name,
or method __module__+__class_name__+__fn_name__
:param namespace: The namespace. If None, it will use the global namespace
:return:
]
if <ast.UnaryOp object at 0x7da20c7951e0> begin[:]
variable[namespace] assign[=] name[__signals_namespace]
def function[decorator, parameter[fn]]:
variable[fname] assign[=] name[sender]
if <ast.UnaryOp object at 0x7da20c7952d0> begin[:]
variable[fnargs] assign[=] call[name[inspect].getargspec, parameter[name[fn]]].args
variable[fname] assign[=] name[fn].__module__
if <ast.BoolOp object at 0x7da18eb55630> begin[:]
variable[caller] assign[=] call[name[inspect].currentframe, parameter[]].f_back
<ast.AugAssign object at 0x7da18eb57430>
<ast.AugAssign object at 0x7da18eb57310>
name[fn].pre assign[=] call[name[namespace].signal, parameter[binary_operation[constant[pre_%s] <ast.Mod object at 0x7da2590d6920> name[fname]]]]
name[fn].post assign[=] call[name[namespace].signal, parameter[binary_operation[constant[post_%s] <ast.Mod object at 0x7da2590d6920> name[fname]]]]
name[fn].observe assign[=] name[fn].post.connect
def function[send, parameter[action]]:
variable[sig_name] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c794d60>, <ast.Name object at 0x7da20c796890>]]]
variable[result] assign[=] call[name[kw].pop, parameter[constant[result], constant[None]]]
call[name[kw].update, parameter[call[name[inspect].getcallargs, parameter[name[fn], <ast.Starred object at 0x7da18bcc8af0>]]]]
variable[sendkw] assign[=] dictionary[[<ast.Constant object at 0x7da18bccafb0>, <ast.Constant object at 0x7da18bcc8760>, <ast.Constant object at 0x7da18bcca9b0>], [<ast.DictComp object at 0x7da18bcca7a0>, <ast.Attribute object at 0x7da18bccb670>, <ast.Call object at 0x7da18bccb730>]]
if compare[name[action] equal[==] constant[post]] begin[:]
call[call[name[namespace].signal, parameter[name[sig_name]]].send, parameter[name[result]]]
def function[wrapper, parameter[]]:
call[name[send], parameter[constant[pre], <ast.Starred object at 0x7da18bccb8b0>]]
variable[result] assign[=] call[name[fn], parameter[<ast.Starred object at 0x7da18bcc81c0>]]
call[name[kwargs]][constant[result]] assign[=] name[result]
call[name[send], parameter[constant[post], <ast.Starred object at 0x7da18bccbca0>]]
return[name[result]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[emit_signal] ( identifier[sender] = keyword[None] , identifier[namespace] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[namespace] :
identifier[namespace] = identifier[__signals_namespace]
keyword[def] identifier[decorator] ( identifier[fn] ):
identifier[fname] = identifier[sender]
keyword[if] keyword[not] identifier[fname] :
identifier[fnargs] = identifier[inspect] . identifier[getargspec] ( identifier[fn] ). identifier[args]
identifier[fname] = identifier[fn] . identifier[__module__]
keyword[if] literal[string] keyword[in] identifier[fnargs] keyword[or] literal[string] keyword[in] identifier[fnargs] :
identifier[caller] = identifier[inspect] . identifier[currentframe] (). identifier[f_back]
identifier[fname] += literal[string] + identifier[caller] . identifier[f_code] . identifier[co_name]
identifier[fname] += literal[string] + identifier[fn] . identifier[__name__]
identifier[fn] . identifier[pre] = identifier[namespace] . identifier[signal] ( literal[string] % identifier[fname] )
identifier[fn] . identifier[post] = identifier[namespace] . identifier[signal] ( literal[string] % identifier[fname] )
identifier[fn] . identifier[observe] = identifier[fn] . identifier[post] . identifier[connect]
keyword[def] identifier[send] ( identifier[action] ,* identifier[a] ,** identifier[kw] ):
identifier[sig_name] = literal[string] %( identifier[action] , identifier[fname] )
identifier[result] = identifier[kw] . identifier[pop] ( literal[string] , keyword[None] )
identifier[kw] . identifier[update] ( identifier[inspect] . identifier[getcallargs] ( identifier[fn] ,* identifier[a] ,** identifier[kw] ))
identifier[sendkw] ={
literal[string] :{ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kw] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[kw] . identifier[keys] ()},
literal[string] : identifier[fn] . identifier[__name__] ,
literal[string] : identifier[kw] . identifier[get] ( literal[string] , identifier[kw] . identifier[get] ( literal[string] , identifier[fn] ))
}
keyword[if] identifier[action] == literal[string] :
identifier[namespace] . identifier[signal] ( identifier[sig_name] ). identifier[send] ( identifier[result] ,** identifier[sendkw] )
keyword[else] :
identifier[namespace] . identifier[signal] ( identifier[sig_name] ). identifier[send] (** identifier[sendkw] )
@ identifier[functools] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[send] ( literal[string] ,* identifier[args] ,** identifier[kwargs] )
identifier[result] = identifier[fn] (* identifier[args] ,** identifier[kwargs] )
identifier[kwargs] [ literal[string] ]= identifier[result]
identifier[send] ( literal[string] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[result]
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def emit_signal(sender=None, namespace=None):
"""
@emit_signal
A decorator to mark a method or function as a signal emitter
It will turn the function into a decorator that can be used to
receive signal with: $fn_name.pre.connect, $fn_name.post.connect
*pre will execute before running the function
*post will run after running the function
**observe is an alias to post.connect
:param sender: string to be the sender.
If empty, it will use the function __module__+__fn_name,
or method __module__+__class_name__+__fn_name__
:param namespace: The namespace. If None, it will use the global namespace
:return:
"""
if not namespace:
namespace = __signals_namespace # depends on [control=['if'], data=[]]
def decorator(fn):
fname = sender
if not fname:
fnargs = inspect.getargspec(fn).args
fname = fn.__module__
if 'self' in fnargs or 'cls' in fnargs:
caller = inspect.currentframe().f_back
fname += '_' + caller.f_code.co_name # depends on [control=['if'], data=[]]
fname += '__' + fn.__name__ # depends on [control=['if'], data=[]]
# pre and post
fn.pre = namespace.signal('pre_%s' % fname)
fn.post = namespace.signal('post_%s' % fname)
# alias to post.connect
fn.observe = fn.post.connect
def send(action, *a, **kw):
sig_name = '%s_%s' % (action, fname)
result = kw.pop('result', None)
kw.update(inspect.getcallargs(fn, *a, **kw))
sendkw = {'kwargs': {k: v for (k, v) in kw.items() if k in kw.keys()}, 'sender': fn.__name__, 'emitter': kw.get('self', kw.get('cls', fn))}
if action == 'post':
namespace.signal(sig_name).send(result, **sendkw) # depends on [control=['if'], data=[]]
else:
namespace.signal(sig_name).send(**sendkw)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
send('pre', *args, **kwargs)
result = fn(*args, **kwargs)
kwargs['result'] = result
send('post', *args, **kwargs)
return result
return wrapper
return decorator |
def promote(self):
""" Mark object as alive, so it won't be collected during next
run of the garbage collector.
"""
if self.expiry is not None:
self.promoted = self.time_module.time() + self.expiry | def function[promote, parameter[self]]:
constant[ Mark object as alive, so it won't be collected during next
run of the garbage collector.
]
if compare[name[self].expiry is_not constant[None]] begin[:]
name[self].promoted assign[=] binary_operation[call[name[self].time_module.time, parameter[]] + name[self].expiry] | keyword[def] identifier[promote] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[expiry] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[promoted] = identifier[self] . identifier[time_module] . identifier[time] ()+ identifier[self] . identifier[expiry] | def promote(self):
""" Mark object as alive, so it won't be collected during next
run of the garbage collector.
"""
if self.expiry is not None:
self.promoted = self.time_module.time() + self.expiry # depends on [control=['if'], data=[]] |
def endpoint_from_flag(flag):
"""The object used for interacting with relations tied to a flag, or None.
"""
relation_name = None
value = _get_flag_value(flag)
if isinstance(value, dict) and 'relation' in value:
# old-style RelationBase
relation_name = value['relation']
elif flag.startswith('endpoint.'):
# new-style Endpoint
relation_name = flag.split('.')[1]
elif '.' in flag:
# might be an unprefixed new-style Endpoint
relation_name = flag.split('.')[0]
if relation_name not in hookenv.relation_types():
return None
if relation_name:
factory = relation_factory(relation_name)
if factory:
return factory.from_flag(flag)
return None | def function[endpoint_from_flag, parameter[flag]]:
constant[The object used for interacting with relations tied to a flag, or None.
]
variable[relation_name] assign[=] constant[None]
variable[value] assign[=] call[name[_get_flag_value], parameter[name[flag]]]
if <ast.BoolOp object at 0x7da1b1a3cee0> begin[:]
variable[relation_name] assign[=] call[name[value]][constant[relation]]
if name[relation_name] begin[:]
variable[factory] assign[=] call[name[relation_factory], parameter[name[relation_name]]]
if name[factory] begin[:]
return[call[name[factory].from_flag, parameter[name[flag]]]]
return[constant[None]] | keyword[def] identifier[endpoint_from_flag] ( identifier[flag] ):
literal[string]
identifier[relation_name] = keyword[None]
identifier[value] = identifier[_get_flag_value] ( identifier[flag] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[value] :
identifier[relation_name] = identifier[value] [ literal[string] ]
keyword[elif] identifier[flag] . identifier[startswith] ( literal[string] ):
identifier[relation_name] = identifier[flag] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] literal[string] keyword[in] identifier[flag] :
identifier[relation_name] = identifier[flag] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[relation_name] keyword[not] keyword[in] identifier[hookenv] . identifier[relation_types] ():
keyword[return] keyword[None]
keyword[if] identifier[relation_name] :
identifier[factory] = identifier[relation_factory] ( identifier[relation_name] )
keyword[if] identifier[factory] :
keyword[return] identifier[factory] . identifier[from_flag] ( identifier[flag] )
keyword[return] keyword[None] | def endpoint_from_flag(flag):
"""The object used for interacting with relations tied to a flag, or None.
"""
relation_name = None
value = _get_flag_value(flag)
if isinstance(value, dict) and 'relation' in value:
# old-style RelationBase
relation_name = value['relation'] # depends on [control=['if'], data=[]]
elif flag.startswith('endpoint.'):
# new-style Endpoint
relation_name = flag.split('.')[1] # depends on [control=['if'], data=[]]
elif '.' in flag:
# might be an unprefixed new-style Endpoint
relation_name = flag.split('.')[0]
if relation_name not in hookenv.relation_types():
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['flag']]
if relation_name:
factory = relation_factory(relation_name)
if factory:
return factory.from_flag(flag) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
"""
Creates a new Virtual Channel connection (unidirectional).
:param port1: input port
:param vpi1: input vpi
:param vci1: input vci
:param port2: output port
:param vpi2: output vpi
:param vci2: output vci
"""
if port1 not in self._nios:
return
if port2 not in self._nios:
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
yield from self._hypervisor.send('atmsw create_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name,
input_nio=nio1,
input_vpi=vpi1,
input_vci=vci1,
output_nio=nio2,
output_vpi=vpi2,
output_vci=vci2))
log.info('ATM switch "{name}" [{id}]: VCC from port {port1} VPI {vpi1} VCI {vci1} to port {port2} VPI {vpi2} VCI {vci2} created'.format(name=self._name,
id=self._id,
port1=port1,
vpi1=vpi1,
vci1=vci1,
port2=port2,
vpi2=vpi2,
vci2=vci2))
self._active_mappings[(port1, vpi1, vci1)] = (port2, vpi2, vci2) | def function[map_pvc, parameter[self, port1, vpi1, vci1, port2, vpi2, vci2]]:
constant[
Creates a new Virtual Channel connection (unidirectional).
:param port1: input port
:param vpi1: input vpi
:param vci1: input vci
:param port2: output port
:param vpi2: output vpi
:param vci2: output vci
]
if compare[name[port1] <ast.NotIn object at 0x7da2590d7190> name[self]._nios] begin[:]
return[None]
if compare[name[port2] <ast.NotIn object at 0x7da2590d7190> name[self]._nios] begin[:]
return[None]
variable[nio1] assign[=] call[name[self]._nios][name[port1]]
variable[nio2] assign[=] call[name[self]._nios][name[port2]]
<ast.YieldFrom object at 0x7da207f013f0>
call[name[log].info, parameter[call[constant[ATM switch "{name}" [{id}]: VCC from port {port1} VPI {vpi1} VCI {vci1} to port {port2} VPI {vpi2} VCI {vci2} created].format, parameter[]]]]
call[name[self]._active_mappings][tuple[[<ast.Name object at 0x7da20c6c64a0>, <ast.Name object at 0x7da20c6c4ca0>, <ast.Name object at 0x7da20c6c5510>]]] assign[=] tuple[[<ast.Name object at 0x7da20c6c7220>, <ast.Name object at 0x7da20c6c4a90>, <ast.Name object at 0x7da20c6c7e50>]] | keyword[def] identifier[map_pvc] ( identifier[self] , identifier[port1] , identifier[vpi1] , identifier[vci1] , identifier[port2] , identifier[vpi2] , identifier[vci2] ):
literal[string]
keyword[if] identifier[port1] keyword[not] keyword[in] identifier[self] . identifier[_nios] :
keyword[return]
keyword[if] identifier[port2] keyword[not] keyword[in] identifier[self] . identifier[_nios] :
keyword[return]
identifier[nio1] = identifier[self] . identifier[_nios] [ identifier[port1] ]
identifier[nio2] = identifier[self] . identifier[_nios] [ identifier[port2] ]
keyword[yield] keyword[from] identifier[self] . identifier[_hypervisor] . identifier[send] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[input_nio] = identifier[nio1] ,
identifier[input_vpi] = identifier[vpi1] ,
identifier[input_vci] = identifier[vci1] ,
identifier[output_nio] = identifier[nio2] ,
identifier[output_vpi] = identifier[vpi2] ,
identifier[output_vci] = identifier[vci2] ))
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[id] = identifier[self] . identifier[_id] ,
identifier[port1] = identifier[port1] ,
identifier[vpi1] = identifier[vpi1] ,
identifier[vci1] = identifier[vci1] ,
identifier[port2] = identifier[port2] ,
identifier[vpi2] = identifier[vpi2] ,
identifier[vci2] = identifier[vci2] ))
identifier[self] . identifier[_active_mappings] [( identifier[port1] , identifier[vpi1] , identifier[vci1] )]=( identifier[port2] , identifier[vpi2] , identifier[vci2] ) | def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
"""
Creates a new Virtual Channel connection (unidirectional).
:param port1: input port
:param vpi1: input vpi
:param vci1: input vci
:param port2: output port
:param vpi2: output vpi
:param vci2: output vci
"""
if port1 not in self._nios:
return # depends on [control=['if'], data=[]]
if port2 not in self._nios:
return # depends on [control=['if'], data=[]]
nio1 = self._nios[port1]
nio2 = self._nios[port2]
yield from self._hypervisor.send('atmsw create_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name, input_nio=nio1, input_vpi=vpi1, input_vci=vci1, output_nio=nio2, output_vpi=vpi2, output_vci=vci2))
log.info('ATM switch "{name}" [{id}]: VCC from port {port1} VPI {vpi1} VCI {vci1} to port {port2} VPI {vpi2} VCI {vci2} created'.format(name=self._name, id=self._id, port1=port1, vpi1=vpi1, vci1=vci1, port2=port2, vpi2=vpi2, vci2=vci2))
self._active_mappings[port1, vpi1, vci1] = (port2, vpi2, vci2) |
def action_is_greedy(action, isoptional=False):
''' Returns True if action will necessarily consume the next argument.
isoptional indicates whether the argument is an optional (starts with -).
'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.option_strings:
if not isoptional and not action_is_satisfied(action):
return True
return action.nargs == REMAINDER
else:
return action.nargs == REMAINDER and num_consumed_args >= 1 | def function[action_is_greedy, parameter[action, isoptional]]:
constant[ Returns True if action will necessarily consume the next argument.
isoptional indicates whether the argument is an optional (starts with -).
]
variable[num_consumed_args] assign[=] call[name[_num_consumed_args].get, parameter[name[action], constant[0]]]
if name[action].option_strings begin[:]
if <ast.BoolOp object at 0x7da18fe90670> begin[:]
return[constant[True]]
return[compare[name[action].nargs equal[==] name[REMAINDER]]] | keyword[def] identifier[action_is_greedy] ( identifier[action] , identifier[isoptional] = keyword[False] ):
literal[string]
identifier[num_consumed_args] = identifier[_num_consumed_args] . identifier[get] ( identifier[action] , literal[int] )
keyword[if] identifier[action] . identifier[option_strings] :
keyword[if] keyword[not] identifier[isoptional] keyword[and] keyword[not] identifier[action_is_satisfied] ( identifier[action] ):
keyword[return] keyword[True]
keyword[return] identifier[action] . identifier[nargs] == identifier[REMAINDER]
keyword[else] :
keyword[return] identifier[action] . identifier[nargs] == identifier[REMAINDER] keyword[and] identifier[num_consumed_args] >= literal[int] | def action_is_greedy(action, isoptional=False):
""" Returns True if action will necessarily consume the next argument.
isoptional indicates whether the argument is an optional (starts with -).
"""
num_consumed_args = _num_consumed_args.get(action, 0)
if action.option_strings:
if not isoptional and (not action_is_satisfied(action)):
return True # depends on [control=['if'], data=[]]
return action.nargs == REMAINDER # depends on [control=['if'], data=[]]
else:
return action.nargs == REMAINDER and num_consumed_args >= 1 |
def get(self, endpoint):
""" Todo """
r = self.http.request('GET',
self._api_base.format(endpoint),
headers={'Authorization': 'Bot '+self.token})
if r.status == 200:
return json.loads(r.data.decode('utf-8'))
else:
return {} | def function[get, parameter[self, endpoint]]:
constant[ Todo ]
variable[r] assign[=] call[name[self].http.request, parameter[constant[GET], call[name[self]._api_base.format, parameter[name[endpoint]]]]]
if compare[name[r].status equal[==] constant[200]] begin[:]
return[call[name[json].loads, parameter[call[name[r].data.decode, parameter[constant[utf-8]]]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[endpoint] ):
literal[string]
identifier[r] = identifier[self] . identifier[http] . identifier[request] ( literal[string] ,
identifier[self] . identifier[_api_base] . identifier[format] ( identifier[endpoint] ),
identifier[headers] ={ literal[string] : literal[string] + identifier[self] . identifier[token] })
keyword[if] identifier[r] . identifier[status] == literal[int] :
keyword[return] identifier[json] . identifier[loads] ( identifier[r] . identifier[data] . identifier[decode] ( literal[string] ))
keyword[else] :
keyword[return] {} | def get(self, endpoint):
""" Todo """
r = self.http.request('GET', self._api_base.format(endpoint), headers={'Authorization': 'Bot ' + self.token})
if r.status == 200:
return json.loads(r.data.decode('utf-8')) # depends on [control=['if'], data=[]]
else:
return {} |
def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env:
""" Make a single environment compatible with the experiments """
settings = self.get_preset(preset)
return wrapped_env_maker(self.envname, seed, serial_id, **settings) | def function[instantiate, parameter[self, seed, serial_id, preset, extra_args]]:
constant[ Make a single environment compatible with the experiments ]
variable[settings] assign[=] call[name[self].get_preset, parameter[name[preset]]]
return[call[name[wrapped_env_maker], parameter[name[self].envname, name[seed], name[serial_id]]]] | keyword[def] identifier[instantiate] ( identifier[self] , identifier[seed] = literal[int] , identifier[serial_id] = literal[int] , identifier[preset] = literal[string] , identifier[extra_args] = keyword[None] )-> identifier[gym] . identifier[Env] :
literal[string]
identifier[settings] = identifier[self] . identifier[get_preset] ( identifier[preset] )
keyword[return] identifier[wrapped_env_maker] ( identifier[self] . identifier[envname] , identifier[seed] , identifier[serial_id] ,** identifier[settings] ) | def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env:
""" Make a single environment compatible with the experiments """
settings = self.get_preset(preset)
return wrapped_env_maker(self.envname, seed, serial_id, **settings) |
def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) | def function[listRoleIds, parameter[self]]:
constant[
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
]
return[call[name[self]._makeApiCall, parameter[call[name[self].funcinfo][constant[listRoleIds]], <ast.Starred object at 0x7da18eb54280>]]] | keyword[def] identifier[listRoleIds] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] ) | def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo['listRoleIds'], *args, **kwargs) |
def path(self, value):
"""
Setter for **self.__path** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format("path", value)
self.__path = value | def function[path, parameter[self, value]]:
constant[
Setter for **self.__path** attribute.
:param value: Attribute value.
:type value: unicode
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[unicode]]]
name[self].__path assign[=] name[value] | keyword[def] identifier[path] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[unicode] , literal[string] . identifier[format] ( literal[string] , identifier[value] )
identifier[self] . identifier[__path] = identifier[value] | def path(self, value):
"""
Setter for **self.__path** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format('path', value) # depends on [control=['if'], data=['value']]
self.__path = value |
def _get_variable_name_or_register(var, variables, names, params, prefix):
'''
Args:
var (~nnabla.Variable)
variables (OrderedDict)
names (dict): Force name table, Variable -> str
params (dict): NdArray -> str
prefix(str)
'''
if var not in variables.values():
vname = prefix
if var.data in params:
vname = params[var.data]
elif var in names:
vname = names[var]
vname = _get_unique_variable_name(vname, variables)
variables[vname] = var
else:
vname = list(variables.keys())[list(variables.values()).index(var)]
return vname | def function[_get_variable_name_or_register, parameter[var, variables, names, params, prefix]]:
constant[
Args:
var (~nnabla.Variable)
variables (OrderedDict)
names (dict): Force name table, Variable -> str
params (dict): NdArray -> str
prefix(str)
]
if compare[name[var] <ast.NotIn object at 0x7da2590d7190> call[name[variables].values, parameter[]]] begin[:]
variable[vname] assign[=] name[prefix]
if compare[name[var].data in name[params]] begin[:]
variable[vname] assign[=] call[name[params]][name[var].data]
variable[vname] assign[=] call[name[_get_unique_variable_name], parameter[name[vname], name[variables]]]
call[name[variables]][name[vname]] assign[=] name[var]
return[name[vname]] | keyword[def] identifier[_get_variable_name_or_register] ( identifier[var] , identifier[variables] , identifier[names] , identifier[params] , identifier[prefix] ):
literal[string]
keyword[if] identifier[var] keyword[not] keyword[in] identifier[variables] . identifier[values] ():
identifier[vname] = identifier[prefix]
keyword[if] identifier[var] . identifier[data] keyword[in] identifier[params] :
identifier[vname] = identifier[params] [ identifier[var] . identifier[data] ]
keyword[elif] identifier[var] keyword[in] identifier[names] :
identifier[vname] = identifier[names] [ identifier[var] ]
identifier[vname] = identifier[_get_unique_variable_name] ( identifier[vname] , identifier[variables] )
identifier[variables] [ identifier[vname] ]= identifier[var]
keyword[else] :
identifier[vname] = identifier[list] ( identifier[variables] . identifier[keys] ())[ identifier[list] ( identifier[variables] . identifier[values] ()). identifier[index] ( identifier[var] )]
keyword[return] identifier[vname] | def _get_variable_name_or_register(var, variables, names, params, prefix):
"""
Args:
var (~nnabla.Variable)
variables (OrderedDict)
names (dict): Force name table, Variable -> str
params (dict): NdArray -> str
prefix(str)
"""
if var not in variables.values():
vname = prefix
if var.data in params:
vname = params[var.data] # depends on [control=['if'], data=['params']]
elif var in names:
vname = names[var] # depends on [control=['if'], data=['var', 'names']]
vname = _get_unique_variable_name(vname, variables)
variables[vname] = var # depends on [control=['if'], data=['var']]
else:
vname = list(variables.keys())[list(variables.values()).index(var)]
return vname |
def get_thumbnail(file_, geometry_string, **options):
"""
A shortcut for the Backend ``get_thumbnail`` method
"""
return default.backend.get_thumbnail(file_, geometry_string, **options) | def function[get_thumbnail, parameter[file_, geometry_string]]:
constant[
A shortcut for the Backend ``get_thumbnail`` method
]
return[call[name[default].backend.get_thumbnail, parameter[name[file_], name[geometry_string]]]] | keyword[def] identifier[get_thumbnail] ( identifier[file_] , identifier[geometry_string] ,** identifier[options] ):
literal[string]
keyword[return] identifier[default] . identifier[backend] . identifier[get_thumbnail] ( identifier[file_] , identifier[geometry_string] ,** identifier[options] ) | def get_thumbnail(file_, geometry_string, **options):
"""
A shortcut for the Backend ``get_thumbnail`` method
"""
return default.backend.get_thumbnail(file_, geometry_string, **options) |
def parse(argv=None):
"""
Parse some arguments using the parser.
"""
if argv is None:
argv = sys.argv[1:]
# Evade http://bugs.python.org/issue9253
if not argv or argv[0] not in {"run", "transform"}:
argv = ["run"] + argv
arguments = _clean(_parser.parse_args(argv))
return arguments | def function[parse, parameter[argv]]:
constant[
Parse some arguments using the parser.
]
if compare[name[argv] is constant[None]] begin[:]
variable[argv] assign[=] call[name[sys].argv][<ast.Slice object at 0x7da20c794610>]
if <ast.BoolOp object at 0x7da20c795900> begin[:]
variable[argv] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c7968f0>]] + name[argv]]
variable[arguments] assign[=] call[name[_clean], parameter[call[name[_parser].parse_args, parameter[name[argv]]]]]
return[name[arguments]] | keyword[def] identifier[parse] ( identifier[argv] = keyword[None] ):
literal[string]
keyword[if] identifier[argv] keyword[is] keyword[None] :
identifier[argv] = identifier[sys] . identifier[argv] [ literal[int] :]
keyword[if] keyword[not] identifier[argv] keyword[or] identifier[argv] [ literal[int] ] keyword[not] keyword[in] { literal[string] , literal[string] }:
identifier[argv] =[ literal[string] ]+ identifier[argv]
identifier[arguments] = identifier[_clean] ( identifier[_parser] . identifier[parse_args] ( identifier[argv] ))
keyword[return] identifier[arguments] | def parse(argv=None):
"""
Parse some arguments using the parser.
"""
if argv is None:
argv = sys.argv[1:] # depends on [control=['if'], data=['argv']]
# Evade http://bugs.python.org/issue9253
if not argv or argv[0] not in {'run', 'transform'}:
argv = ['run'] + argv # depends on [control=['if'], data=[]]
arguments = _clean(_parser.parse_args(argv))
return arguments |
def read_lines_from_file(file_path: str) -> List[str]:
""" Read text lines from a file """
# check if the file exists?
with open(file_path) as csv_file:
content = csv_file.readlines()
return content | def function[read_lines_from_file, parameter[file_path]]:
constant[ Read text lines from a file ]
with call[name[open], parameter[name[file_path]]] begin[:]
variable[content] assign[=] call[name[csv_file].readlines, parameter[]]
return[name[content]] | keyword[def] identifier[read_lines_from_file] ( identifier[file_path] : identifier[str] )-> identifier[List] [ identifier[str] ]:
literal[string]
keyword[with] identifier[open] ( identifier[file_path] ) keyword[as] identifier[csv_file] :
identifier[content] = identifier[csv_file] . identifier[readlines] ()
keyword[return] identifier[content] | def read_lines_from_file(file_path: str) -> List[str]:
""" Read text lines from a file """
# check if the file exists?
with open(file_path) as csv_file:
content = csv_file.readlines() # depends on [control=['with'], data=['csv_file']]
return content |
def run(self):
"""Run until there are no more events.
This only looks at events scheduled through the event loop.
"""
self._stop = False
while not self._stop:
have_sources = self._timers or self._readers or self._writers
if not self._processor.pending and not have_sources:
break
events = QEventLoop.AllEvents
if not self._processor.pending:
events |= QEventLoop.WaitForMoreEvents
self._qapp.processEvents(events)
if self._processor.pending:
self._processor.run() | def function[run, parameter[self]]:
constant[Run until there are no more events.
This only looks at events scheduled through the event loop.
]
name[self]._stop assign[=] constant[False]
while <ast.UnaryOp object at 0x7da20c7c8520> begin[:]
variable[have_sources] assign[=] <ast.BoolOp object at 0x7da20c7ca920>
if <ast.BoolOp object at 0x7da20c7ca4a0> begin[:]
break
variable[events] assign[=] name[QEventLoop].AllEvents
if <ast.UnaryOp object at 0x7da20c7cbc40> begin[:]
<ast.AugAssign object at 0x7da20c7cbd00>
call[name[self]._qapp.processEvents, parameter[name[events]]]
if name[self]._processor.pending begin[:]
call[name[self]._processor.run, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_stop] = keyword[False]
keyword[while] keyword[not] identifier[self] . identifier[_stop] :
identifier[have_sources] = identifier[self] . identifier[_timers] keyword[or] identifier[self] . identifier[_readers] keyword[or] identifier[self] . identifier[_writers]
keyword[if] keyword[not] identifier[self] . identifier[_processor] . identifier[pending] keyword[and] keyword[not] identifier[have_sources] :
keyword[break]
identifier[events] = identifier[QEventLoop] . identifier[AllEvents]
keyword[if] keyword[not] identifier[self] . identifier[_processor] . identifier[pending] :
identifier[events] |= identifier[QEventLoop] . identifier[WaitForMoreEvents]
identifier[self] . identifier[_qapp] . identifier[processEvents] ( identifier[events] )
keyword[if] identifier[self] . identifier[_processor] . identifier[pending] :
identifier[self] . identifier[_processor] . identifier[run] () | def run(self):
"""Run until there are no more events.
This only looks at events scheduled through the event loop.
"""
self._stop = False
while not self._stop:
have_sources = self._timers or self._readers or self._writers
if not self._processor.pending and (not have_sources):
break # depends on [control=['if'], data=[]]
events = QEventLoop.AllEvents
if not self._processor.pending:
events |= QEventLoop.WaitForMoreEvents # depends on [control=['if'], data=[]]
self._qapp.processEvents(events)
if self._processor.pending:
self._processor.run() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.