code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) | def function[save, parameter[self]]:
constant[ Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
]
with call[name[open], parameter[binary_operation[name[self].file_root + constant[.pkl]], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[args], name[f]]] | keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[file_root] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[args] , identifier[f] , identifier[protocol] = identifier[pickle] . identifier[HIGHEST_PROTOCOL] ) | def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', 'wb') as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['f']] |
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv | def function[distribute_covar_matrix_to_match_covariance_type, parameter[tied_cv, covariance_type, n_components]]:
constant[Create all the covariance matrices from a given template.]
if compare[name[covariance_type] equal[==] constant[spherical]] begin[:]
variable[cv] assign[=] call[name[np].tile, parameter[binary_operation[call[name[tied_cv].mean, parameter[]] * call[name[np].ones, parameter[call[name[tied_cv].shape][constant[1]]]]], tuple[[<ast.Name object at 0x7da1b1d5f190>, <ast.Constant object at 0x7da1b1d5e230>]]]]
return[name[cv]] | keyword[def] identifier[distribute_covar_matrix_to_match_covariance_type] (
identifier[tied_cv] , identifier[covariance_type] , identifier[n_components] ):
literal[string]
keyword[if] identifier[covariance_type] == literal[string] :
identifier[cv] = identifier[np] . identifier[tile] ( identifier[tied_cv] . identifier[mean] ()* identifier[np] . identifier[ones] ( identifier[tied_cv] . identifier[shape] [ literal[int] ]),
( identifier[n_components] , literal[int] ))
keyword[elif] identifier[covariance_type] == literal[string] :
identifier[cv] = identifier[tied_cv]
keyword[elif] identifier[covariance_type] == literal[string] :
identifier[cv] = identifier[np] . identifier[tile] ( identifier[np] . identifier[diag] ( identifier[tied_cv] ),( identifier[n_components] , literal[int] ))
keyword[elif] identifier[covariance_type] == literal[string] :
identifier[cv] = identifier[np] . identifier[tile] ( identifier[tied_cv] ,( identifier[n_components] , literal[int] , literal[int] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] )
keyword[return] identifier[cv] | def distribute_covar_matrix_to_match_covariance_type(tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) # depends on [control=['if'], data=[]]
elif covariance_type == 'tied':
cv = tied_cv # depends on [control=['if'], data=[]]
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1)) # depends on [control=['if'], data=[]]
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1)) # depends on [control=['if'], data=[]]
else:
raise ValueError('covariance_type must be one of ' + "'spherical', 'tied', 'diag', 'full'")
return cv |
def _get_rows(self, table):
"""Returns rows from table"""
childnodes = table.childNodes
qname_childnodes = [(s.qname[1], s) for s in childnodes]
return [node for name, node in qname_childnodes
if name == u'table-row'] | def function[_get_rows, parameter[self, table]]:
constant[Returns rows from table]
variable[childnodes] assign[=] name[table].childNodes
variable[qname_childnodes] assign[=] <ast.ListComp object at 0x7da1b26adb70>
return[<ast.ListComp object at 0x7da1b26ae6e0>] | keyword[def] identifier[_get_rows] ( identifier[self] , identifier[table] ):
literal[string]
identifier[childnodes] = identifier[table] . identifier[childNodes]
identifier[qname_childnodes] =[( identifier[s] . identifier[qname] [ literal[int] ], identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[childnodes] ]
keyword[return] [ identifier[node] keyword[for] identifier[name] , identifier[node] keyword[in] identifier[qname_childnodes]
keyword[if] identifier[name] == literal[string] ] | def _get_rows(self, table):
"""Returns rows from table"""
childnodes = table.childNodes
qname_childnodes = [(s.qname[1], s) for s in childnodes]
return [node for (name, node) in qname_childnodes if name == u'table-row'] |
def get_exchange_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number
"""
structure = self.get_structure_with_spin()
return structure.get_space_group_info(
symprec=symprec, angle_tolerance=angle_tolerance
) | def function[get_exchange_group_info, parameter[self, symprec, angle_tolerance]]:
constant[
Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number
]
variable[structure] assign[=] call[name[self].get_structure_with_spin, parameter[]]
return[call[name[structure].get_space_group_info, parameter[]]] | keyword[def] identifier[get_exchange_group_info] ( identifier[self] , identifier[symprec] = literal[int] , identifier[angle_tolerance] = literal[int] ):
literal[string]
identifier[structure] = identifier[self] . identifier[get_structure_with_spin] ()
keyword[return] identifier[structure] . identifier[get_space_group_info] (
identifier[symprec] = identifier[symprec] , identifier[angle_tolerance] = identifier[angle_tolerance]
) | def get_exchange_group_info(self, symprec=0.01, angle_tolerance=5.0):
"""
Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number
"""
structure = self.get_structure_with_spin()
return structure.get_space_group_info(symprec=symprec, angle_tolerance=angle_tolerance) |
def get_volumes_for_instance(self, arg, device=None):
"""
Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device.
"""
instance = self.get(arg)
filters = {'attachment.instance-id': instance.id}
if device is not None:
filters['attachment.device'] = device
return self.get_all_volumes(filters=filters) | def function[get_volumes_for_instance, parameter[self, arg, device]]:
constant[
Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device.
]
variable[instance] assign[=] call[name[self].get, parameter[name[arg]]]
variable[filters] assign[=] dictionary[[<ast.Constant object at 0x7da18bc726e0>], [<ast.Attribute object at 0x7da18bc704c0>]]
if compare[name[device] is_not constant[None]] begin[:]
call[name[filters]][constant[attachment.device]] assign[=] name[device]
return[call[name[self].get_all_volumes, parameter[]]] | keyword[def] identifier[get_volumes_for_instance] ( identifier[self] , identifier[arg] , identifier[device] = keyword[None] ):
literal[string]
identifier[instance] = identifier[self] . identifier[get] ( identifier[arg] )
identifier[filters] ={ literal[string] : identifier[instance] . identifier[id] }
keyword[if] identifier[device] keyword[is] keyword[not] keyword[None] :
identifier[filters] [ literal[string] ]= identifier[device]
keyword[return] identifier[self] . identifier[get_all_volumes] ( identifier[filters] = identifier[filters] ) | def get_volumes_for_instance(self, arg, device=None):
"""
Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device.
"""
instance = self.get(arg)
filters = {'attachment.instance-id': instance.id}
if device is not None:
filters['attachment.device'] = device # depends on [control=['if'], data=['device']]
return self.get_all_volumes(filters=filters) |
def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() | def function[run_mutation_aggregator, parameter[job, mutation_results, univ_options]]:
constant[
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
]
variable[out] assign[=] dictionary[[], []]
for taget[name[chrom]] in starred[call[call[name[mutation_results]][constant[mutect]].keys, parameter[]]] begin[:]
call[name[out]][name[chrom]] assign[=] call[call[name[job].addChildJobFn, parameter[name[merge_perchrom_mutations], name[chrom], name[mutation_results], name[univ_options]]].rv, parameter[]]
variable[merged_snvs] assign[=] call[name[job].addFollowOnJobFn, parameter[name[merge_perchrom_vcfs], name[out], constant[merged], name[univ_options]]]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Aggregated mutations for %s successfully] <ast.Mod object at 0x7da2590d6920> call[name[univ_options]][constant[patient]]]]]
return[call[name[merged_snvs].rv, parameter[]]] | keyword[def] identifier[run_mutation_aggregator] ( identifier[job] , identifier[mutation_results] , identifier[univ_options] ):
literal[string]
identifier[out] ={}
keyword[for] identifier[chrom] keyword[in] identifier[mutation_results] [ literal[string] ]. identifier[keys] ():
identifier[out] [ identifier[chrom] ]= identifier[job] . identifier[addChildJobFn] ( identifier[merge_perchrom_mutations] , identifier[chrom] , identifier[mutation_results] ,
identifier[univ_options] ). identifier[rv] ()
identifier[merged_snvs] = identifier[job] . identifier[addFollowOnJobFn] ( identifier[merge_perchrom_vcfs] , identifier[out] , literal[string] , identifier[univ_options] )
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] % identifier[univ_options] [ literal[string] ])
keyword[return] identifier[merged_snvs] . identifier[rv] () | def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results, univ_options).rv() # depends on [control=['for'], data=['chrom']]
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() |
def dict_match(self, match_dict):
'''
Accept a dictionary of keys and return the current state of the
specified keys
'''
ret = {}
cur_keys = self.list_keys()
for status, keys in six.iteritems(match_dict):
for key in salt.utils.data.sorted_ignorecase(keys):
for keydir in (self.ACC, self.PEND, self.REJ, self.DEN):
if keydir and fnmatch.filter(cur_keys.get(keydir, []), key):
ret.setdefault(keydir, []).append(key)
return ret | def function[dict_match, parameter[self, match_dict]]:
constant[
Accept a dictionary of keys and return the current state of the
specified keys
]
variable[ret] assign[=] dictionary[[], []]
variable[cur_keys] assign[=] call[name[self].list_keys, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da204345270>, <ast.Name object at 0x7da204345840>]]] in starred[call[name[six].iteritems, parameter[name[match_dict]]]] begin[:]
for taget[name[key]] in starred[call[name[salt].utils.data.sorted_ignorecase, parameter[name[keys]]]] begin[:]
for taget[name[keydir]] in starred[tuple[[<ast.Attribute object at 0x7da204344e50>, <ast.Attribute object at 0x7da204344910>, <ast.Attribute object at 0x7da204346ec0>, <ast.Attribute object at 0x7da204345810>]]] begin[:]
if <ast.BoolOp object at 0x7da204344610> begin[:]
call[call[name[ret].setdefault, parameter[name[keydir], list[[]]]].append, parameter[name[key]]]
return[name[ret]] | keyword[def] identifier[dict_match] ( identifier[self] , identifier[match_dict] ):
literal[string]
identifier[ret] ={}
identifier[cur_keys] = identifier[self] . identifier[list_keys] ()
keyword[for] identifier[status] , identifier[keys] keyword[in] identifier[six] . identifier[iteritems] ( identifier[match_dict] ):
keyword[for] identifier[key] keyword[in] identifier[salt] . identifier[utils] . identifier[data] . identifier[sorted_ignorecase] ( identifier[keys] ):
keyword[for] identifier[keydir] keyword[in] ( identifier[self] . identifier[ACC] , identifier[self] . identifier[PEND] , identifier[self] . identifier[REJ] , identifier[self] . identifier[DEN] ):
keyword[if] identifier[keydir] keyword[and] identifier[fnmatch] . identifier[filter] ( identifier[cur_keys] . identifier[get] ( identifier[keydir] ,[]), identifier[key] ):
identifier[ret] . identifier[setdefault] ( identifier[keydir] ,[]). identifier[append] ( identifier[key] )
keyword[return] identifier[ret] | def dict_match(self, match_dict):
"""
Accept a dictionary of keys and return the current state of the
specified keys
"""
ret = {}
cur_keys = self.list_keys()
for (status, keys) in six.iteritems(match_dict):
for key in salt.utils.data.sorted_ignorecase(keys):
for keydir in (self.ACC, self.PEND, self.REJ, self.DEN):
if keydir and fnmatch.filter(cur_keys.get(keydir, []), key):
ret.setdefault(keydir, []).append(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['keydir']] # depends on [control=['for'], data=['key']] # depends on [control=['for'], data=[]]
return ret |
def sample(self):
"""
This is the core sampling method. Samples a state from a
demonstration, in accordance with the configuration.
"""
# chooses a sampling scheme randomly based on the mixing ratios
seed = random.uniform(0, 1)
ratio = np.cumsum(self.scheme_ratios)
ratio = ratio > seed
for i, v in enumerate(ratio):
if v:
break
sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
return sample_method() | def function[sample, parameter[self]]:
constant[
This is the core sampling method. Samples a state from a
demonstration, in accordance with the configuration.
]
variable[seed] assign[=] call[name[random].uniform, parameter[constant[0], constant[1]]]
variable[ratio] assign[=] call[name[np].cumsum, parameter[name[self].scheme_ratios]]
variable[ratio] assign[=] compare[name[ratio] greater[>] name[seed]]
for taget[tuple[[<ast.Name object at 0x7da20c6abd00>, <ast.Name object at 0x7da20c6a8f40>]]] in starred[call[name[enumerate], parameter[name[ratio]]]] begin[:]
if name[v] begin[:]
break
variable[sample_method] assign[=] call[name[getattr], parameter[name[self], call[name[self].sample_method_dict][call[name[self].sampling_schemes][name[i]]]]]
return[call[name[sample_method], parameter[]]] | keyword[def] identifier[sample] ( identifier[self] ):
literal[string]
identifier[seed] = identifier[random] . identifier[uniform] ( literal[int] , literal[int] )
identifier[ratio] = identifier[np] . identifier[cumsum] ( identifier[self] . identifier[scheme_ratios] )
identifier[ratio] = identifier[ratio] > identifier[seed]
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[ratio] ):
keyword[if] identifier[v] :
keyword[break]
identifier[sample_method] = identifier[getattr] ( identifier[self] , identifier[self] . identifier[sample_method_dict] [ identifier[self] . identifier[sampling_schemes] [ identifier[i] ]])
keyword[return] identifier[sample_method] () | def sample(self):
"""
This is the core sampling method. Samples a state from a
demonstration, in accordance with the configuration.
"""
# chooses a sampling scheme randomly based on the mixing ratios
seed = random.uniform(0, 1)
ratio = np.cumsum(self.scheme_ratios)
ratio = ratio > seed
for (i, v) in enumerate(ratio):
if v:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
return sample_method() |
def _mnl_utility_transform(systematic_utilities, *args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Should contain the systematic utilities for each each available
alternative for each observation.
Returns
-------
`systematic_utilities[:, None]`
"""
# Be sure to return a 2D array since other functions will be expecting this
if len(systematic_utilities.shape) == 1:
systematic_utilities = systematic_utilities[:, np.newaxis]
return systematic_utilities | def function[_mnl_utility_transform, parameter[systematic_utilities]]:
constant[
Parameters
----------
systematic_utilities : 1D ndarray.
Should contain the systematic utilities for each each available
alternative for each observation.
Returns
-------
`systematic_utilities[:, None]`
]
if compare[call[name[len], parameter[name[systematic_utilities].shape]] equal[==] constant[1]] begin[:]
variable[systematic_utilities] assign[=] call[name[systematic_utilities]][tuple[[<ast.Slice object at 0x7da18f58e7a0>, <ast.Attribute object at 0x7da18f58c1f0>]]]
return[name[systematic_utilities]] | keyword[def] identifier[_mnl_utility_transform] ( identifier[systematic_utilities] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[systematic_utilities] . identifier[shape] )== literal[int] :
identifier[systematic_utilities] = identifier[systematic_utilities] [:, identifier[np] . identifier[newaxis] ]
keyword[return] identifier[systematic_utilities] | def _mnl_utility_transform(systematic_utilities, *args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Should contain the systematic utilities for each each available
alternative for each observation.
Returns
-------
`systematic_utilities[:, None]`
"""
# Be sure to return a 2D array since other functions will be expecting this
if len(systematic_utilities.shape) == 1:
systematic_utilities = systematic_utilities[:, np.newaxis] # depends on [control=['if'], data=[]]
return systematic_utilities |
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(';'):
m = re.match(r'(\w+)\s*=\s*(.*)', sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params) | def function[from_string, parameter[string]]:
constant[
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
]
variable[lines] assign[=] call[name[list], parameter[call[name[clean_lines], parameter[call[name[string].splitlines, parameter[]]]]]]
variable[params] assign[=] dictionary[[], []]
for taget[name[line]] in starred[name[lines]] begin[:]
for taget[name[sline]] in starred[call[name[line].split, parameter[constant[;]]]] begin[:]
variable[m] assign[=] call[name[re].match, parameter[constant[(\w+)\s*=\s*(.*)], call[name[sline].strip, parameter[]]]]
if name[m] begin[:]
variable[key] assign[=] call[call[name[m].group, parameter[constant[1]]].strip, parameter[]]
variable[val] assign[=] call[call[name[m].group, parameter[constant[2]]].strip, parameter[]]
variable[val] assign[=] call[name[Incar].proc_val, parameter[name[key], name[val]]]
call[name[params]][name[key]] assign[=] name[val]
return[call[name[Incar], parameter[name[params]]]] | keyword[def] identifier[from_string] ( identifier[string] ):
literal[string]
identifier[lines] = identifier[list] ( identifier[clean_lines] ( identifier[string] . identifier[splitlines] ()))
identifier[params] ={}
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[for] identifier[sline] keyword[in] identifier[line] . identifier[split] ( literal[string] ):
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[sline] . identifier[strip] ())
keyword[if] identifier[m] :
identifier[key] = identifier[m] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[val] = identifier[m] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[val] = identifier[Incar] . identifier[proc_val] ( identifier[key] , identifier[val] )
identifier[params] [ identifier[key] ]= identifier[val]
keyword[return] identifier[Incar] ( identifier[params] ) | def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(';'):
m = re.match('(\\w+)\\s*=\\s*(.*)', sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sline']] # depends on [control=['for'], data=['line']]
return Incar(params) |
def memory_allocation(library, session, size, extended=False):
"""Allocates memory from a resource's memory region.
Corresponds to viMemAlloc* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param size: Specifies the size of the allocation.
:param extended: Use 64 bits offset independent of the platform.
:return: offset of the allocated memory, return value of the library call.
:rtype: offset, :class:`pyvisa.constants.StatusCode`
"""
offset = ViBusAddress()
if extended:
ret = library.viMemAllocEx(session, size, byref(offset))
else:
ret = library.viMemAlloc(session, size, byref(offset))
return offset, ret | def function[memory_allocation, parameter[library, session, size, extended]]:
constant[Allocates memory from a resource's memory region.
Corresponds to viMemAlloc* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param size: Specifies the size of the allocation.
:param extended: Use 64 bits offset independent of the platform.
:return: offset of the allocated memory, return value of the library call.
:rtype: offset, :class:`pyvisa.constants.StatusCode`
]
variable[offset] assign[=] call[name[ViBusAddress], parameter[]]
if name[extended] begin[:]
variable[ret] assign[=] call[name[library].viMemAllocEx, parameter[name[session], name[size], call[name[byref], parameter[name[offset]]]]]
return[tuple[[<ast.Name object at 0x7da20c6c4760>, <ast.Name object at 0x7da20c6c4100>]]] | keyword[def] identifier[memory_allocation] ( identifier[library] , identifier[session] , identifier[size] , identifier[extended] = keyword[False] ):
literal[string]
identifier[offset] = identifier[ViBusAddress] ()
keyword[if] identifier[extended] :
identifier[ret] = identifier[library] . identifier[viMemAllocEx] ( identifier[session] , identifier[size] , identifier[byref] ( identifier[offset] ))
keyword[else] :
identifier[ret] = identifier[library] . identifier[viMemAlloc] ( identifier[session] , identifier[size] , identifier[byref] ( identifier[offset] ))
keyword[return] identifier[offset] , identifier[ret] | def memory_allocation(library, session, size, extended=False):
"""Allocates memory from a resource's memory region.
Corresponds to viMemAlloc* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param size: Specifies the size of the allocation.
:param extended: Use 64 bits offset independent of the platform.
:return: offset of the allocated memory, return value of the library call.
:rtype: offset, :class:`pyvisa.constants.StatusCode`
"""
offset = ViBusAddress()
if extended:
ret = library.viMemAllocEx(session, size, byref(offset)) # depends on [control=['if'], data=[]]
else:
ret = library.viMemAlloc(session, size, byref(offset))
return (offset, ret) |
def get_oceania_temp_and_alt(wxlist: [str]) -> ([str], [str], [str]): # type: ignore
"""
Get Temperature and Altimeter lists for Oceania TAFs
"""
tlist, qlist = [], [] # type: ignore
if 'T' in wxlist:
wxlist, tlist = _get_digit_list(wxlist, wxlist.index('T'))
if 'Q' in wxlist:
wxlist, qlist = _get_digit_list(wxlist, wxlist.index('Q'))
return wxlist, tlist, qlist | def function[get_oceania_temp_and_alt, parameter[wxlist]]:
constant[
Get Temperature and Altimeter lists for Oceania TAFs
]
<ast.Tuple object at 0x7da20c7c8d00> assign[=] tuple[[<ast.List object at 0x7da20c7c8040>, <ast.List object at 0x7da20c7c94e0>]]
if compare[constant[T] in name[wxlist]] begin[:]
<ast.Tuple object at 0x7da20c7ca8c0> assign[=] call[name[_get_digit_list], parameter[name[wxlist], call[name[wxlist].index, parameter[constant[T]]]]]
if compare[constant[Q] in name[wxlist]] begin[:]
<ast.Tuple object at 0x7da18f720c70> assign[=] call[name[_get_digit_list], parameter[name[wxlist], call[name[wxlist].index, parameter[constant[Q]]]]]
return[tuple[[<ast.Name object at 0x7da18f7201f0>, <ast.Name object at 0x7da18f723d60>, <ast.Name object at 0x7da18f7231f0>]]] | keyword[def] identifier[get_oceania_temp_and_alt] ( identifier[wxlist] :[ identifier[str] ])->([ identifier[str] ],[ identifier[str] ],[ identifier[str] ]):
literal[string]
identifier[tlist] , identifier[qlist] =[],[]
keyword[if] literal[string] keyword[in] identifier[wxlist] :
identifier[wxlist] , identifier[tlist] = identifier[_get_digit_list] ( identifier[wxlist] , identifier[wxlist] . identifier[index] ( literal[string] ))
keyword[if] literal[string] keyword[in] identifier[wxlist] :
identifier[wxlist] , identifier[qlist] = identifier[_get_digit_list] ( identifier[wxlist] , identifier[wxlist] . identifier[index] ( literal[string] ))
keyword[return] identifier[wxlist] , identifier[tlist] , identifier[qlist] | def get_oceania_temp_and_alt(wxlist: [str]) -> ([str], [str], [str]): # type: ignore
'\n Get Temperature and Altimeter lists for Oceania TAFs\n '
(tlist, qlist) = ([], []) # type: ignore
if 'T' in wxlist:
(wxlist, tlist) = _get_digit_list(wxlist, wxlist.index('T')) # depends on [control=['if'], data=['wxlist']]
if 'Q' in wxlist:
(wxlist, qlist) = _get_digit_list(wxlist, wxlist.index('Q')) # depends on [control=['if'], data=['wxlist']]
return (wxlist, tlist, qlist) |
def synchronizeLayout(primary, secondary, surface_size):
"""Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
"""
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
# Check for key size.
if (primary.key_size < secondary.key_size):
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size
elif (primary.key_size > secondary.key_size):
logging.warning('Normalizing key size from primary to secondary')
primary.key_size = secondary.key_size
if (primary.size[1] > secondary.size[1]):
logging.warning('Normalizing layout size from secondary to primary')
secondary.set_size(primary.size, surface_size)
elif (primary.size[1] < secondary.size[1]):
logging.warning('Normalizing layout size from primary to secondary')
primary.set_size(secondary.size, surface_size) | def function[synchronizeLayout, parameter[primary, secondary, surface_size]]:
constant[Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
]
call[name[primary].configure_bound, parameter[name[surface_size]]]
call[name[secondary].configure_bound, parameter[name[surface_size]]]
if compare[name[primary].key_size less[<] name[secondary].key_size] begin[:]
call[name[logging].warning, parameter[constant[Normalizing key size from secondary to primary]]]
name[secondary].key_size assign[=] name[primary].key_size
if compare[call[name[primary].size][constant[1]] greater[>] call[name[secondary].size][constant[1]]] begin[:]
call[name[logging].warning, parameter[constant[Normalizing layout size from secondary to primary]]]
call[name[secondary].set_size, parameter[name[primary].size, name[surface_size]]] | keyword[def] identifier[synchronizeLayout] ( identifier[primary] , identifier[secondary] , identifier[surface_size] ):
literal[string]
identifier[primary] . identifier[configure_bound] ( identifier[surface_size] )
identifier[secondary] . identifier[configure_bound] ( identifier[surface_size] )
keyword[if] ( identifier[primary] . identifier[key_size] < identifier[secondary] . identifier[key_size] ):
identifier[logging] . identifier[warning] ( literal[string] )
identifier[secondary] . identifier[key_size] = identifier[primary] . identifier[key_size]
keyword[elif] ( identifier[primary] . identifier[key_size] > identifier[secondary] . identifier[key_size] ):
identifier[logging] . identifier[warning] ( literal[string] )
identifier[primary] . identifier[key_size] = identifier[secondary] . identifier[key_size]
keyword[if] ( identifier[primary] . identifier[size] [ literal[int] ]> identifier[secondary] . identifier[size] [ literal[int] ]):
identifier[logging] . identifier[warning] ( literal[string] )
identifier[secondary] . identifier[set_size] ( identifier[primary] . identifier[size] , identifier[surface_size] )
keyword[elif] ( identifier[primary] . identifier[size] [ literal[int] ]< identifier[secondary] . identifier[size] [ literal[int] ]):
identifier[logging] . identifier[warning] ( literal[string] )
identifier[primary] . identifier[set_size] ( identifier[secondary] . identifier[size] , identifier[surface_size] ) | def synchronizeLayout(primary, secondary, surface_size):
"""Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
"""
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
# Check for key size.
if primary.key_size < secondary.key_size:
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size # depends on [control=['if'], data=[]]
elif primary.key_size > secondary.key_size:
logging.warning('Normalizing key size from primary to secondary')
primary.key_size = secondary.key_size # depends on [control=['if'], data=[]]
if primary.size[1] > secondary.size[1]:
logging.warning('Normalizing layout size from secondary to primary')
secondary.set_size(primary.size, surface_size) # depends on [control=['if'], data=[]]
elif primary.size[1] < secondary.size[1]:
logging.warning('Normalizing layout size from primary to secondary')
primary.set_size(secondary.size, surface_size) # depends on [control=['if'], data=[]] |
def error_keys_not_found(self, keys):
"""
Check if the requested keys are found in the dict.
:param keys: keys to be looked for
"""
try:
log.error("Filename: {0}".format(self['meta']['location']))
except:
log.error("Filename: {0}".format(self['location']))
log.error("Key '{0}' does not exist".format('.'.join(keys)))
indent = ""
last_index = len(keys) - 1
for i, k in enumerate(keys):
if i == last_index:
log.error(indent + k + ": <- this value is missing")
else:
log.error(indent + k + ":")
indent += " " | def function[error_keys_not_found, parameter[self, keys]]:
constant[
Check if the requested keys are found in the dict.
:param keys: keys to be looked for
]
<ast.Try object at 0x7da20cabda80>
call[name[log].error, parameter[call[constant[Key '{0}' does not exist].format, parameter[call[constant[.].join, parameter[name[keys]]]]]]]
variable[indent] assign[=] constant[]
variable[last_index] assign[=] binary_operation[call[name[len], parameter[name[keys]]] - constant[1]]
for taget[tuple[[<ast.Name object at 0x7da20cabfc70>, <ast.Name object at 0x7da20cabf430>]]] in starred[call[name[enumerate], parameter[name[keys]]]] begin[:]
if compare[name[i] equal[==] name[last_index]] begin[:]
call[name[log].error, parameter[binary_operation[binary_operation[name[indent] + name[k]] + constant[: <- this value is missing]]]]
<ast.AugAssign object at 0x7da20c992a40> | keyword[def] identifier[error_keys_not_found] ( identifier[self] , identifier[keys] ):
literal[string]
keyword[try] :
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] [ literal[string] ][ literal[string] ]))
keyword[except] :
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[self] [ literal[string] ]))
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[keys] )))
identifier[indent] = literal[string]
identifier[last_index] = identifier[len] ( identifier[keys] )- literal[int]
keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[keys] ):
keyword[if] identifier[i] == identifier[last_index] :
identifier[log] . identifier[error] ( identifier[indent] + identifier[k] + literal[string] )
keyword[else] :
identifier[log] . identifier[error] ( identifier[indent] + identifier[k] + literal[string] )
identifier[indent] += literal[string] | def error_keys_not_found(self, keys):
"""
Check if the requested keys are found in the dict.
:param keys: keys to be looked for
"""
try:
log.error('Filename: {0}'.format(self['meta']['location'])) # depends on [control=['try'], data=[]]
except:
log.error('Filename: {0}'.format(self['location'])) # depends on [control=['except'], data=[]]
log.error("Key '{0}' does not exist".format('.'.join(keys)))
indent = ''
last_index = len(keys) - 1
for (i, k) in enumerate(keys):
if i == last_index:
log.error(indent + k + ': <- this value is missing') # depends on [control=['if'], data=[]]
else:
log.error(indent + k + ':')
indent += ' ' # depends on [control=['for'], data=[]] |
def check_auto_merge_labeler(repo: GithubRepository, pull_id: int
) -> Optional[CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/events"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Event check failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
relevant = [event
for event in payload
if event['event'] == 'labeled' and
event['label']['name'] in AUTO_MERGE_LABELS]
if not relevant:
return CannotAutomergeError('"automerge" label was never added.')
return check_collaborator_has_write(repo, relevant[-1]['actor']['login']) | def function[check_auto_merge_labeler, parameter[repo, pull_id]]:
constant[
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
]
variable[url] assign[=] call[constant[https://api.github.com/repos/{}/{}/issues/{}/events?access_token={}].format, parameter[name[repo].organization, name[repo].name, name[pull_id], name[repo].access_token]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da1b1cc3880>
variable[payload] assign[=] call[call[name[json].JSONDecoder, parameter[]].decode, parameter[call[name[response].content.decode, parameter[]]]]
variable[relevant] assign[=] <ast.ListComp object at 0x7da1b1cc0df0>
if <ast.UnaryOp object at 0x7da1b1cc2110> begin[:]
return[call[name[CannotAutomergeError], parameter[constant["automerge" label was never added.]]]]
return[call[name[check_collaborator_has_write], parameter[name[repo], call[call[call[name[relevant]][<ast.UnaryOp object at 0x7da1b1cc2410>]][constant[actor]]][constant[login]]]]] | keyword[def] identifier[check_auto_merge_labeler] ( identifier[repo] : identifier[GithubRepository] , identifier[pull_id] : identifier[int]
)-> identifier[Optional] [ identifier[CannotAutomergeError] ]:
literal[string]
identifier[url] =( literal[string]
literal[string] . identifier[format] ( identifier[repo] . identifier[organization] ,
identifier[repo] . identifier[name] ,
identifier[pull_id] ,
identifier[repo] . identifier[access_token] ))
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[raise] identifier[RuntimeError] (
literal[string] . identifier[format] (
identifier[response] . identifier[status_code] , identifier[response] . identifier[content] ))
identifier[payload] = identifier[json] . identifier[JSONDecoder] (). identifier[decode] ( identifier[response] . identifier[content] . identifier[decode] ())
identifier[relevant] =[ identifier[event]
keyword[for] identifier[event] keyword[in] identifier[payload]
keyword[if] identifier[event] [ literal[string] ]== literal[string] keyword[and]
identifier[event] [ literal[string] ][ literal[string] ] keyword[in] identifier[AUTO_MERGE_LABELS] ]
keyword[if] keyword[not] identifier[relevant] :
keyword[return] identifier[CannotAutomergeError] ( literal[string] )
keyword[return] identifier[check_collaborator_has_write] ( identifier[repo] , identifier[relevant] [- literal[int] ][ literal[string] ][ literal[string] ]) | def check_auto_merge_labeler(repo: GithubRepository, pull_id: int) -> Optional[CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
"""
url = 'https://api.github.com/repos/{}/{}/issues/{}/events?access_token={}'.format(repo.organization, repo.name, pull_id, repo.access_token)
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError('Event check failed. Code: {}. Content: {}.'.format(response.status_code, response.content)) # depends on [control=['if'], data=[]]
payload = json.JSONDecoder().decode(response.content.decode())
relevant = [event for event in payload if event['event'] == 'labeled' and event['label']['name'] in AUTO_MERGE_LABELS]
if not relevant:
return CannotAutomergeError('"automerge" label was never added.') # depends on [control=['if'], data=[]]
return check_collaborator_has_write(repo, relevant[-1]['actor']['login']) |
def load_module_from_file_object(fp, filename='<unknown>', code_objects=None, fast_load=False,
get_code=True):
"""load a module from a file object without importing it.
See :func:load_module for a list of return values.
"""
if code_objects is None:
code_objects = {}
timestamp = 0
try:
magic = fp.read(4)
magic_int = magics.magic2int(magic)
# For reasons I don't understand, PyPy 3.2 stores a magic
# of '0'... The two values below are for Python 2.x and 3.x respectively
if magic[0:1] in ['0', b'0']:
magic = magics.int2magic(3180+7)
try:
# FIXME: use the internal routine below
float_version = float(magics.versions[magic][:3])
# float_version = magics.magic_int2float(magic_int)
except KeyError:
if magic_int in (2657, 22138):
raise ImportError("This smells like Pyston which is not supported.")
if len(magic) >= 2:
raise ImportError("Unknown magic number %s in %s" %
(ord(magic[0:1])+256*ord(magic[1:2]), filename))
else:
raise ImportError("Bad magic number: '%s'" % magic)
if magic_int in (3010, 3020, 3030, 3040, 3050, 3060, 3061, 3361, 3371):
raise ImportError("%s is interim Python %s (%d) bytecode which is "
"not supported.\nFinal released versions are "
"supported." % (
filename, magics.versions[magic],
magics.magic2int(magic)))
elif magic_int == 62135:
fp.seek(0)
return fix_dropbox_pyc(fp)
elif magic_int == 62215:
raise ImportError("%s is a dropbox-hacked Python %s (bytecode %d).\n"
"See https://github.com/kholia/dedrop for how to "
"decrypt." % (
filename, magics.versions[magic],
magics.magic2int(magic)))
try:
# print version
ts = fp.read(4)
my_magic_int = magics.magic2int(imp.get_magic())
magic_int = magics.magic2int(magic)
if magic_int == 3393:
timestamp = 0
_ = unpack("<I", ts)[0] # hash word 1
_ = unpack("<I", fp.read(4))[0] # hash word 2
elif magic_int in (3394, 3401):
timestamp = 0
_ = unpack("<I", fp.read(4))[0] # pep552_bits
else:
timestamp = unpack("<I", ts)[0]
# Note: a higher magic number doesn't necessarily mean a later
# release. At Python 3.0 the magic number decreased
# significantly. Hence the range below. Also note inclusion of
# the size info, occurred within a Python major/minor
# release. Hence the test on the magic value rather than
# PYTHON_VERSION, although PYTHON_VERSION would probably work.
if 3200 <= magic_int < 20121 and magic_int not in (5892, 11913, 39170, 39171):
source_size = unpack("<I", fp.read(4))[0] # size mod 2**32
else:
source_size = None
if get_code:
if my_magic_int == magic_int:
bytecode = fp.read()
co = marshal.loads(bytecode)
elif fast_load:
co = xdis.marsh.load(fp, magics.magicint2version[magic_int])
else:
co = xdis.unmarshal.load_code(fp, magic_int, code_objects)
pass
else:
co = None
except:
kind, msg = sys.exc_info()[0:2]
import traceback
traceback.print_exc()
raise ImportError("Ill-formed bytecode file %s\n%s; %s"
% (filename, kind, msg))
finally:
fp.close()
return float_version, timestamp, magic_int, co, is_pypy(magic_int), source_size | def function[load_module_from_file_object, parameter[fp, filename, code_objects, fast_load, get_code]]:
constant[load a module from a file object without importing it.
See :func:load_module for a list of return values.
]
if compare[name[code_objects] is constant[None]] begin[:]
variable[code_objects] assign[=] dictionary[[], []]
variable[timestamp] assign[=] constant[0]
<ast.Try object at 0x7da1b064f3d0>
return[tuple[[<ast.Name object at 0x7da1b06a1270>, <ast.Name object at 0x7da1b06a12a0>, <ast.Name object at 0x7da1b06a12d0>, <ast.Name object at 0x7da1b06a1300>, <ast.Call object at 0x7da1b06a1330>, <ast.Name object at 0x7da1b06a13c0>]]] | keyword[def] identifier[load_module_from_file_object] ( identifier[fp] , identifier[filename] = literal[string] , identifier[code_objects] = keyword[None] , identifier[fast_load] = keyword[False] ,
identifier[get_code] = keyword[True] ):
literal[string]
keyword[if] identifier[code_objects] keyword[is] keyword[None] :
identifier[code_objects] ={}
identifier[timestamp] = literal[int]
keyword[try] :
identifier[magic] = identifier[fp] . identifier[read] ( literal[int] )
identifier[magic_int] = identifier[magics] . identifier[magic2int] ( identifier[magic] )
keyword[if] identifier[magic] [ literal[int] : literal[int] ] keyword[in] [ literal[string] , literal[string] ]:
identifier[magic] = identifier[magics] . identifier[int2magic] ( literal[int] + literal[int] )
keyword[try] :
identifier[float_version] = identifier[float] ( identifier[magics] . identifier[versions] [ identifier[magic] ][: literal[int] ])
keyword[except] identifier[KeyError] :
keyword[if] identifier[magic_int] keyword[in] ( literal[int] , literal[int] ):
keyword[raise] identifier[ImportError] ( literal[string] )
keyword[if] identifier[len] ( identifier[magic] )>= literal[int] :
keyword[raise] identifier[ImportError] ( literal[string] %
( identifier[ord] ( identifier[magic] [ literal[int] : literal[int] ])+ literal[int] * identifier[ord] ( identifier[magic] [ literal[int] : literal[int] ]), identifier[filename] ))
keyword[else] :
keyword[raise] identifier[ImportError] ( literal[string] % identifier[magic] )
keyword[if] identifier[magic_int] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
keyword[raise] identifier[ImportError] ( literal[string]
literal[string]
literal[string] %(
identifier[filename] , identifier[magics] . identifier[versions] [ identifier[magic] ],
identifier[magics] . identifier[magic2int] ( identifier[magic] )))
keyword[elif] identifier[magic_int] == literal[int] :
identifier[fp] . identifier[seek] ( literal[int] )
keyword[return] identifier[fix_dropbox_pyc] ( identifier[fp] )
keyword[elif] identifier[magic_int] == literal[int] :
keyword[raise] identifier[ImportError] ( literal[string]
literal[string]
literal[string] %(
identifier[filename] , identifier[magics] . identifier[versions] [ identifier[magic] ],
identifier[magics] . identifier[magic2int] ( identifier[magic] )))
keyword[try] :
identifier[ts] = identifier[fp] . identifier[read] ( literal[int] )
identifier[my_magic_int] = identifier[magics] . identifier[magic2int] ( identifier[imp] . identifier[get_magic] ())
identifier[magic_int] = identifier[magics] . identifier[magic2int] ( identifier[magic] )
keyword[if] identifier[magic_int] == literal[int] :
identifier[timestamp] = literal[int]
identifier[_] = identifier[unpack] ( literal[string] , identifier[ts] )[ literal[int] ]
identifier[_] = identifier[unpack] ( literal[string] , identifier[fp] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[elif] identifier[magic_int] keyword[in] ( literal[int] , literal[int] ):
identifier[timestamp] = literal[int]
identifier[_] = identifier[unpack] ( literal[string] , identifier[fp] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[else] :
identifier[timestamp] = identifier[unpack] ( literal[string] , identifier[ts] )[ literal[int] ]
keyword[if] literal[int] <= identifier[magic_int] < literal[int] keyword[and] identifier[magic_int] keyword[not] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[source_size] = identifier[unpack] ( literal[string] , identifier[fp] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[else] :
identifier[source_size] = keyword[None]
keyword[if] identifier[get_code] :
keyword[if] identifier[my_magic_int] == identifier[magic_int] :
identifier[bytecode] = identifier[fp] . identifier[read] ()
identifier[co] = identifier[marshal] . identifier[loads] ( identifier[bytecode] )
keyword[elif] identifier[fast_load] :
identifier[co] = identifier[xdis] . identifier[marsh] . identifier[load] ( identifier[fp] , identifier[magics] . identifier[magicint2version] [ identifier[magic_int] ])
keyword[else] :
identifier[co] = identifier[xdis] . identifier[unmarshal] . identifier[load_code] ( identifier[fp] , identifier[magic_int] , identifier[code_objects] )
keyword[pass]
keyword[else] :
identifier[co] = keyword[None]
keyword[except] :
identifier[kind] , identifier[msg] = identifier[sys] . identifier[exc_info] ()[ literal[int] : literal[int] ]
keyword[import] identifier[traceback]
identifier[traceback] . identifier[print_exc] ()
keyword[raise] identifier[ImportError] ( literal[string]
%( identifier[filename] , identifier[kind] , identifier[msg] ))
keyword[finally] :
identifier[fp] . identifier[close] ()
keyword[return] identifier[float_version] , identifier[timestamp] , identifier[magic_int] , identifier[co] , identifier[is_pypy] ( identifier[magic_int] ), identifier[source_size] | def load_module_from_file_object(fp, filename='<unknown>', code_objects=None, fast_load=False, get_code=True):
"""load a module from a file object without importing it.
See :func:load_module for a list of return values.
"""
if code_objects is None:
code_objects = {} # depends on [control=['if'], data=['code_objects']]
timestamp = 0
try:
magic = fp.read(4)
magic_int = magics.magic2int(magic)
# For reasons I don't understand, PyPy 3.2 stores a magic
# of '0'... The two values below are for Python 2.x and 3.x respectively
if magic[0:1] in ['0', b'0']:
magic = magics.int2magic(3180 + 7) # depends on [control=['if'], data=[]]
try:
# FIXME: use the internal routine below
float_version = float(magics.versions[magic][:3]) # depends on [control=['try'], data=[]]
# float_version = magics.magic_int2float(magic_int)
except KeyError:
if magic_int in (2657, 22138):
raise ImportError('This smells like Pyston which is not supported.') # depends on [control=['if'], data=[]]
if len(magic) >= 2:
raise ImportError('Unknown magic number %s in %s' % (ord(magic[0:1]) + 256 * ord(magic[1:2]), filename)) # depends on [control=['if'], data=[]]
else:
raise ImportError("Bad magic number: '%s'" % magic) # depends on [control=['except'], data=[]]
if magic_int in (3010, 3020, 3030, 3040, 3050, 3060, 3061, 3361, 3371):
raise ImportError('%s is interim Python %s (%d) bytecode which is not supported.\nFinal released versions are supported.' % (filename, magics.versions[magic], magics.magic2int(magic))) # depends on [control=['if'], data=[]]
elif magic_int == 62135:
fp.seek(0)
return fix_dropbox_pyc(fp) # depends on [control=['if'], data=[]]
elif magic_int == 62215:
raise ImportError('%s is a dropbox-hacked Python %s (bytecode %d).\nSee https://github.com/kholia/dedrop for how to decrypt.' % (filename, magics.versions[magic], magics.magic2int(magic))) # depends on [control=['if'], data=[]]
try:
# print version
ts = fp.read(4)
my_magic_int = magics.magic2int(imp.get_magic())
magic_int = magics.magic2int(magic)
if magic_int == 3393:
timestamp = 0
_ = unpack('<I', ts)[0] # hash word 1
_ = unpack('<I', fp.read(4))[0] # hash word 2 # depends on [control=['if'], data=[]]
elif magic_int in (3394, 3401):
timestamp = 0
_ = unpack('<I', fp.read(4))[0] # pep552_bits # depends on [control=['if'], data=[]]
else:
timestamp = unpack('<I', ts)[0]
# Note: a higher magic number doesn't necessarily mean a later
# release. At Python 3.0 the magic number decreased
# significantly. Hence the range below. Also note inclusion of
# the size info, occurred within a Python major/minor
# release. Hence the test on the magic value rather than
# PYTHON_VERSION, although PYTHON_VERSION would probably work.
if 3200 <= magic_int < 20121 and magic_int not in (5892, 11913, 39170, 39171):
source_size = unpack('<I', fp.read(4))[0] # size mod 2**32 # depends on [control=['if'], data=[]]
else:
source_size = None
if get_code:
if my_magic_int == magic_int:
bytecode = fp.read()
co = marshal.loads(bytecode) # depends on [control=['if'], data=[]]
elif fast_load:
co = xdis.marsh.load(fp, magics.magicint2version[magic_int]) # depends on [control=['if'], data=[]]
else:
co = xdis.unmarshal.load_code(fp, magic_int, code_objects)
pass # depends on [control=['if'], data=[]]
else:
co = None # depends on [control=['try'], data=[]]
except:
(kind, msg) = sys.exc_info()[0:2]
import traceback
traceback.print_exc()
raise ImportError('Ill-formed bytecode file %s\n%s; %s' % (filename, kind, msg)) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
finally:
fp.close()
return (float_version, timestamp, magic_int, co, is_pypy(magic_int), source_size) |
def to_json(el, schema=None):
"""Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
"""
if type(el) is str:
json_el = el
elif type(el) is list:
json_el = list(map(to_json, el))
elif type(el) is dict:
assert 'tagName' in el
json_el = el.copy()
if 'attributes' not in el:
json_el['attributes'] = {}
if 'children' not in el:
json_el['children'] = []
elif isinstance(el, VDOM):
json_el = el.to_dict()
else:
json_el = el
if schema:
try:
validate(instance=json_el, schema=schema, cls=Draft4Validator)
except ValidationError as e:
raise ValidationError(_validate_err_template.format(schema, e))
return json_el | def function[to_json, parameter[el, schema]]:
constant[Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
]
if compare[call[name[type], parameter[name[el]]] is name[str]] begin[:]
variable[json_el] assign[=] name[el]
if name[schema] begin[:]
<ast.Try object at 0x7da1b11113f0>
return[name[json_el]] | keyword[def] identifier[to_json] ( identifier[el] , identifier[schema] = keyword[None] ):
literal[string]
keyword[if] identifier[type] ( identifier[el] ) keyword[is] identifier[str] :
identifier[json_el] = identifier[el]
keyword[elif] identifier[type] ( identifier[el] ) keyword[is] identifier[list] :
identifier[json_el] = identifier[list] ( identifier[map] ( identifier[to_json] , identifier[el] ))
keyword[elif] identifier[type] ( identifier[el] ) keyword[is] identifier[dict] :
keyword[assert] literal[string] keyword[in] identifier[el]
identifier[json_el] = identifier[el] . identifier[copy] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[el] :
identifier[json_el] [ literal[string] ]={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[el] :
identifier[json_el] [ literal[string] ]=[]
keyword[elif] identifier[isinstance] ( identifier[el] , identifier[VDOM] ):
identifier[json_el] = identifier[el] . identifier[to_dict] ()
keyword[else] :
identifier[json_el] = identifier[el]
keyword[if] identifier[schema] :
keyword[try] :
identifier[validate] ( identifier[instance] = identifier[json_el] , identifier[schema] = identifier[schema] , identifier[cls] = identifier[Draft4Validator] )
keyword[except] identifier[ValidationError] keyword[as] identifier[e] :
keyword[raise] identifier[ValidationError] ( identifier[_validate_err_template] . identifier[format] ( identifier[schema] , identifier[e] ))
keyword[return] identifier[json_el] | def to_json(el, schema=None):
"""Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
"""
if type(el) is str:
json_el = el # depends on [control=['if'], data=[]]
elif type(el) is list:
json_el = list(map(to_json, el)) # depends on [control=['if'], data=['list']]
elif type(el) is dict:
assert 'tagName' in el
json_el = el.copy()
if 'attributes' not in el:
json_el['attributes'] = {} # depends on [control=['if'], data=[]]
if 'children' not in el:
json_el['children'] = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(el, VDOM):
json_el = el.to_dict() # depends on [control=['if'], data=[]]
else:
json_el = el
if schema:
try:
validate(instance=json_el, schema=schema, cls=Draft4Validator) # depends on [control=['try'], data=[]]
except ValidationError as e:
raise ValidationError(_validate_err_template.format(schema, e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return json_el |
def _make_authorization_header(region,
service,
canonical_request,
credentials,
instant):
"""
Construct an AWS version 4 authorization value for use in an
C{Authorization} header.
@param region: The AWS region name (e.g., C{'us-east-1'}).
@type region: L{str}
@param service: The AWS service's name (e.g., C{'s3'}).
@type service: L{str}
@param canonical_request: The canonical form of the request.
@type canonical_request: L{_CanonicalRequest} (use
L{_CanonicalRequest.from_payload_and_headers})
@param credentials: The AWS credentials.
@type credentials: L{txaws.credentials.AWSCredentials}
@param instant: The current UTC date and time
@type instant: A naive local L{datetime.datetime} (as returned by
L{datetime.datetime.utcnow})
@return: A value suitable for use in an C{Authorization} header
@rtype: L{bytes}
"""
date_stamp = makeDateStamp(instant)
amz_date = makeAMZDate(instant)
scope = _CredentialScope(
date_stamp=date_stamp,
region=region,
service=service
)
signable = _SignableAWS4HMAC256Token(
amz_date,
scope,
canonical_request,
)
signature = signable.signature(
getSignatureKey(credentials.secret_key,
date_stamp,
region,
service)
)
v4credential = _Credential(
access_key=credentials.access_key,
credential_scope=scope,
)
return (
b"%s " % (_SignableAWS4HMAC256Token.ALGORITHM,) +
b", ".join([
b"Credential=%s" % (v4credential.serialize(),),
b"SignedHeaders=%s" % (canonical_request.signed_headers,),
b"Signature=%s" % (signature,),
])) | def function[_make_authorization_header, parameter[region, service, canonical_request, credentials, instant]]:
constant[
Construct an AWS version 4 authorization value for use in an
C{Authorization} header.
@param region: The AWS region name (e.g., C{'us-east-1'}).
@type region: L{str}
@param service: The AWS service's name (e.g., C{'s3'}).
@type service: L{str}
@param canonical_request: The canonical form of the request.
@type canonical_request: L{_CanonicalRequest} (use
L{_CanonicalRequest.from_payload_and_headers})
@param credentials: The AWS credentials.
@type credentials: L{txaws.credentials.AWSCredentials}
@param instant: The current UTC date and time
@type instant: A naive local L{datetime.datetime} (as returned by
L{datetime.datetime.utcnow})
@return: A value suitable for use in an C{Authorization} header
@rtype: L{bytes}
]
variable[date_stamp] assign[=] call[name[makeDateStamp], parameter[name[instant]]]
variable[amz_date] assign[=] call[name[makeAMZDate], parameter[name[instant]]]
variable[scope] assign[=] call[name[_CredentialScope], parameter[]]
variable[signable] assign[=] call[name[_SignableAWS4HMAC256Token], parameter[name[amz_date], name[scope], name[canonical_request]]]
variable[signature] assign[=] call[name[signable].signature, parameter[call[name[getSignatureKey], parameter[name[credentials].secret_key, name[date_stamp], name[region], name[service]]]]]
variable[v4credential] assign[=] call[name[_Credential], parameter[]]
return[binary_operation[binary_operation[constant[b'%s '] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6e4670>]]] + call[constant[b', '].join, parameter[list[[<ast.BinOp object at 0x7da20c6e6bf0>, <ast.BinOp object at 0x7da20c6e5fc0>, <ast.BinOp object at 0x7da20c6e4af0>]]]]]] | keyword[def] identifier[_make_authorization_header] ( identifier[region] ,
identifier[service] ,
identifier[canonical_request] ,
identifier[credentials] ,
identifier[instant] ):
literal[string]
identifier[date_stamp] = identifier[makeDateStamp] ( identifier[instant] )
identifier[amz_date] = identifier[makeAMZDate] ( identifier[instant] )
identifier[scope] = identifier[_CredentialScope] (
identifier[date_stamp] = identifier[date_stamp] ,
identifier[region] = identifier[region] ,
identifier[service] = identifier[service]
)
identifier[signable] = identifier[_SignableAWS4HMAC256Token] (
identifier[amz_date] ,
identifier[scope] ,
identifier[canonical_request] ,
)
identifier[signature] = identifier[signable] . identifier[signature] (
identifier[getSignatureKey] ( identifier[credentials] . identifier[secret_key] ,
identifier[date_stamp] ,
identifier[region] ,
identifier[service] )
)
identifier[v4credential] = identifier[_Credential] (
identifier[access_key] = identifier[credentials] . identifier[access_key] ,
identifier[credential_scope] = identifier[scope] ,
)
keyword[return] (
literal[string] %( identifier[_SignableAWS4HMAC256Token] . identifier[ALGORITHM] ,)+
literal[string] . identifier[join] ([
literal[string] %( identifier[v4credential] . identifier[serialize] (),),
literal[string] %( identifier[canonical_request] . identifier[signed_headers] ,),
literal[string] %( identifier[signature] ,),
])) | def _make_authorization_header(region, service, canonical_request, credentials, instant):
"""
Construct an AWS version 4 authorization value for use in an
C{Authorization} header.
@param region: The AWS region name (e.g., C{'us-east-1'}).
@type region: L{str}
@param service: The AWS service's name (e.g., C{'s3'}).
@type service: L{str}
@param canonical_request: The canonical form of the request.
@type canonical_request: L{_CanonicalRequest} (use
L{_CanonicalRequest.from_payload_and_headers})
@param credentials: The AWS credentials.
@type credentials: L{txaws.credentials.AWSCredentials}
@param instant: The current UTC date and time
@type instant: A naive local L{datetime.datetime} (as returned by
L{datetime.datetime.utcnow})
@return: A value suitable for use in an C{Authorization} header
@rtype: L{bytes}
"""
date_stamp = makeDateStamp(instant)
amz_date = makeAMZDate(instant)
scope = _CredentialScope(date_stamp=date_stamp, region=region, service=service)
signable = _SignableAWS4HMAC256Token(amz_date, scope, canonical_request)
signature = signable.signature(getSignatureKey(credentials.secret_key, date_stamp, region, service))
v4credential = _Credential(access_key=credentials.access_key, credential_scope=scope)
return b'%s ' % (_SignableAWS4HMAC256Token.ALGORITHM,) + b', '.join([b'Credential=%s' % (v4credential.serialize(),), b'SignedHeaders=%s' % (canonical_request.signed_headers,), b'Signature=%s' % (signature,)]) |
def novo2(args):
"""
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
"""
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
trimmed, pf = args
pctid = opts.pctid
reads, samples = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = "uclust"
acdir = "allele_counts"
for d in (clustdir, acdir):
mkdir(d)
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split(".")[0] == s]
outfile = s + ".P{0}.clustS".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust cluster --cpus=8"
cmd += " {0} {1}".format(s, " ".join(flist))
cmd += " --outdir={0}".format(clustdir)
cmd += " --pctid={0}".format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile)
# Step 1 - make consensus within sample
allcons = []
for s, clustfile in zip(samples, clustfiles):
outfile = s + ".P{0}.consensus".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust consensus"
cmd += " {0}".format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile)
# Step 2 - clustering across samples
clustSfile = pf + ".P{0}.clustS".format(pctid)
cmd = "python -m jcvi.apps.uclust mcluster {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + ".P{0}.loci".format(pctid)
cmd = "python -m jcvi.apps.uclust mconsensus {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write() | def function[novo2, parameter[args]]:
constant[
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[novo2].__doc__]]
call[name[p].set_fastq_names, parameter[]]
call[name[p].set_align, parameter[]]
<ast.Tuple object at 0x7da20c76ffd0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c76d9c0>]]
<ast.Tuple object at 0x7da20c76dde0> assign[=] name[args]
variable[pctid] assign[=] name[opts].pctid
<ast.Tuple object at 0x7da20c76e410> assign[=] call[name[scan_read_files], parameter[name[trimmed], name[opts].names]]
variable[clustdir] assign[=] constant[uclust]
variable[acdir] assign[=] constant[allele_counts]
for taget[name[d]] in starred[tuple[[<ast.Name object at 0x7da20c76dcf0>, <ast.Name object at 0x7da20c76c310>]]] begin[:]
call[name[mkdir], parameter[name[d]]]
variable[mm] assign[=] call[name[MakeManager], parameter[]]
variable[clustfiles] assign[=] list[[]]
for taget[name[s]] in starred[name[samples]] begin[:]
variable[flist] assign[=] <ast.ListComp object at 0x7da18dc06740>
variable[outfile] assign[=] binary_operation[name[s] + call[constant[.P{0}.clustS].format, parameter[name[pctid]]]]
variable[outfile] assign[=] call[name[op].join, parameter[name[clustdir], name[outfile]]]
variable[cmd] assign[=] constant[python -m jcvi.apps.uclust cluster --cpus=8]
<ast.AugAssign object at 0x7da18dc05f30>
<ast.AugAssign object at 0x7da18dc06080>
<ast.AugAssign object at 0x7da18dc040a0>
call[name[mm].add, parameter[name[flist], name[outfile], name[cmd]]]
call[name[clustfiles].append, parameter[name[outfile]]]
variable[allcons] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc05ae0>, <ast.Name object at 0x7da18dc075e0>]]] in starred[call[name[zip], parameter[name[samples], name[clustfiles]]]] begin[:]
variable[outfile] assign[=] binary_operation[name[s] + call[constant[.P{0}.consensus].format, parameter[name[pctid]]]]
variable[outfile] assign[=] call[name[op].join, parameter[name[clustdir], name[outfile]]]
variable[cmd] assign[=] constant[python -m jcvi.apps.uclust consensus]
<ast.AugAssign object at 0x7da18dc05960>
call[name[mm].add, parameter[name[clustfile], name[outfile], name[cmd]]]
call[name[allcons].append, parameter[name[outfile]]]
variable[clustSfile] assign[=] binary_operation[name[pf] + call[constant[.P{0}.clustS].format, parameter[name[pctid]]]]
variable[cmd] assign[=] call[constant[python -m jcvi.apps.uclust mcluster {0}].format, parameter[call[constant[ ].join, parameter[name[allcons]]]]]
<ast.AugAssign object at 0x7da20e962530>
call[name[mm].add, parameter[name[allcons], name[clustSfile], name[cmd]]]
variable[locifile] assign[=] binary_operation[name[pf] + call[constant[.P{0}.loci].format, parameter[name[pctid]]]]
variable[cmd] assign[=] call[constant[python -m jcvi.apps.uclust mconsensus {0}].format, parameter[call[constant[ ].join, parameter[name[allcons]]]]]
<ast.AugAssign object at 0x7da20e963be0>
call[name[mm].add, parameter[binary_operation[name[allcons] + list[[<ast.Name object at 0x7da20e961ff0>]]], name[locifile], name[cmd]]]
call[name[mm].write, parameter[]] | keyword[def] identifier[novo2] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[novo2] . identifier[__doc__] )
identifier[p] . identifier[set_fastq_names] ()
identifier[p] . identifier[set_align] ( identifier[pctid] = literal[int] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[trimmed] , identifier[pf] = identifier[args]
identifier[pctid] = identifier[opts] . identifier[pctid]
identifier[reads] , identifier[samples] = identifier[scan_read_files] ( identifier[trimmed] , identifier[opts] . identifier[names] )
identifier[clustdir] = literal[string]
identifier[acdir] = literal[string]
keyword[for] identifier[d] keyword[in] ( identifier[clustdir] , identifier[acdir] ):
identifier[mkdir] ( identifier[d] )
identifier[mm] = identifier[MakeManager] ()
identifier[clustfiles] =[]
keyword[for] identifier[s] keyword[in] identifier[samples] :
identifier[flist] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[reads] keyword[if] identifier[op] . identifier[basename] ( identifier[x] ). identifier[split] ( literal[string] )[ literal[int] ]== identifier[s] ]
identifier[outfile] = identifier[s] + literal[string] . identifier[format] ( identifier[pctid] )
identifier[outfile] = identifier[op] . identifier[join] ( identifier[clustdir] , identifier[outfile] )
identifier[cmd] = literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[s] , literal[string] . identifier[join] ( identifier[flist] ))
identifier[cmd] += literal[string] . identifier[format] ( identifier[clustdir] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[pctid] )
identifier[mm] . identifier[add] ( identifier[flist] , identifier[outfile] , identifier[cmd] )
identifier[clustfiles] . identifier[append] ( identifier[outfile] )
identifier[allcons] =[]
keyword[for] identifier[s] , identifier[clustfile] keyword[in] identifier[zip] ( identifier[samples] , identifier[clustfiles] ):
identifier[outfile] = identifier[s] + literal[string] . identifier[format] ( identifier[pctid] )
identifier[outfile] = identifier[op] . identifier[join] ( identifier[clustdir] , identifier[outfile] )
identifier[cmd] = literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[clustfile] )
identifier[mm] . identifier[add] ( identifier[clustfile] , identifier[outfile] , identifier[cmd] )
identifier[allcons] . identifier[append] ( identifier[outfile] )
identifier[clustSfile] = identifier[pf] + literal[string] . identifier[format] ( identifier[pctid] )
identifier[cmd] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[allcons] ))
identifier[cmd] += literal[string] . identifier[format] ( identifier[pf] )
identifier[mm] . identifier[add] ( identifier[allcons] , identifier[clustSfile] , identifier[cmd] )
identifier[locifile] = identifier[pf] + literal[string] . identifier[format] ( identifier[pctid] )
identifier[cmd] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[allcons] ))
identifier[cmd] += literal[string] . identifier[format] ( identifier[pf] )
identifier[mm] . identifier[add] ( identifier[allcons] +[ identifier[clustSfile] ], identifier[locifile] , identifier[cmd] )
identifier[mm] . identifier[write] () | def novo2(args):
"""
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
"""
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(trimmed, pf) = args
pctid = opts.pctid
(reads, samples) = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = 'uclust'
acdir = 'allele_counts'
for d in (clustdir, acdir):
mkdir(d) # depends on [control=['for'], data=['d']]
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split('.')[0] == s]
outfile = s + '.P{0}.clustS'.format(pctid)
outfile = op.join(clustdir, outfile)
cmd = 'python -m jcvi.apps.uclust cluster --cpus=8'
cmd += ' {0} {1}'.format(s, ' '.join(flist))
cmd += ' --outdir={0}'.format(clustdir)
cmd += ' --pctid={0}'.format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile) # depends on [control=['for'], data=['s']]
# Step 1 - make consensus within sample
allcons = []
for (s, clustfile) in zip(samples, clustfiles):
outfile = s + '.P{0}.consensus'.format(pctid)
outfile = op.join(clustdir, outfile)
cmd = 'python -m jcvi.apps.uclust consensus'
cmd += ' {0}'.format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile) # depends on [control=['for'], data=[]]
# Step 2 - clustering across samples
clustSfile = pf + '.P{0}.clustS'.format(pctid)
cmd = 'python -m jcvi.apps.uclust mcluster {0}'.format(' '.join(allcons))
cmd += ' --prefix={0}'.format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + '.P{0}.loci'.format(pctid)
cmd = 'python -m jcvi.apps.uclust mconsensus {0}'.format(' '.join(allcons))
cmd += ' --prefix={0}'.format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write() |
def grep_file(query, item):
"""This function performs the actual grep on a given file."""
return ['%s: %s' % (item, line) for line in open(item)
if re.search(query, line)] | def function[grep_file, parameter[query, item]]:
constant[This function performs the actual grep on a given file.]
return[<ast.ListComp object at 0x7da1b18039a0>] | keyword[def] identifier[grep_file] ( identifier[query] , identifier[item] ):
literal[string]
keyword[return] [ literal[string] %( identifier[item] , identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[item] )
keyword[if] identifier[re] . identifier[search] ( identifier[query] , identifier[line] )] | def grep_file(query, item):
"""This function performs the actual grep on a given file."""
return ['%s: %s' % (item, line) for line in open(item) if re.search(query, line)] |
def recv(self, maxsize=None):
'''
Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If
any of those is ``None`` we can no longer communicate with the
terminal's child process.
'''
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return self._recv(maxsize) | def function[recv, parameter[self, maxsize]]:
constant[
Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If
any of those is ``None`` we can no longer communicate with the
terminal's child process.
]
if compare[name[maxsize] is constant[None]] begin[:]
variable[maxsize] assign[=] constant[1024]
return[call[name[self]._recv, parameter[name[maxsize]]]] | keyword[def] identifier[recv] ( identifier[self] , identifier[maxsize] = keyword[None] ):
literal[string]
keyword[if] identifier[maxsize] keyword[is] keyword[None] :
identifier[maxsize] = literal[int]
keyword[elif] identifier[maxsize] < literal[int] :
identifier[maxsize] = literal[int]
keyword[return] identifier[self] . identifier[_recv] ( identifier[maxsize] ) | def recv(self, maxsize=None):
"""
Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If
any of those is ``None`` we can no longer communicate with the
terminal's child process.
"""
if maxsize is None:
maxsize = 1024 # depends on [control=['if'], data=['maxsize']]
elif maxsize < 1:
maxsize = 1 # depends on [control=['if'], data=['maxsize']]
return self._recv(maxsize) |
def walk_files_relative_path(self, relativePath=""):
"""
Walk the repository and yield all found files relative path joined with file name.
:parameters:
#. relativePath (str): The relative path from which start the walk.
"""
def walk_files(directory, relativePath):
directories = dict.__getitem__(directory, 'directories')
files = dict.__getitem__(directory, 'files')
for f in sorted(files):
yield os.path.join(relativePath, f)
for k in sorted(dict.keys(directories)):
path = os.path.join(relativePath, k)
dir = directories.__getitem__(k)
for e in walk_files(dir, path):
yield e
dir, errorMessage = self.get_directory_info(relativePath)
assert dir is not None, errorMessage
return walk_files(dir, relativePath='') | def function[walk_files_relative_path, parameter[self, relativePath]]:
constant[
Walk the repository and yield all found files relative path joined with file name.
:parameters:
#. relativePath (str): The relative path from which start the walk.
]
def function[walk_files, parameter[directory, relativePath]]:
variable[directories] assign[=] call[name[dict].__getitem__, parameter[name[directory], constant[directories]]]
variable[files] assign[=] call[name[dict].__getitem__, parameter[name[directory], constant[files]]]
for taget[name[f]] in starred[call[name[sorted], parameter[name[files]]]] begin[:]
<ast.Yield object at 0x7da207f9af80>
for taget[name[k]] in starred[call[name[sorted], parameter[call[name[dict].keys, parameter[name[directories]]]]]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[relativePath], name[k]]]
variable[dir] assign[=] call[name[directories].__getitem__, parameter[name[k]]]
for taget[name[e]] in starred[call[name[walk_files], parameter[name[dir], name[path]]]] begin[:]
<ast.Yield object at 0x7da207f991e0>
<ast.Tuple object at 0x7da207f98dc0> assign[=] call[name[self].get_directory_info, parameter[name[relativePath]]]
assert[compare[name[dir] is_not constant[None]]]
return[call[name[walk_files], parameter[name[dir]]]] | keyword[def] identifier[walk_files_relative_path] ( identifier[self] , identifier[relativePath] = literal[string] ):
literal[string]
keyword[def] identifier[walk_files] ( identifier[directory] , identifier[relativePath] ):
identifier[directories] = identifier[dict] . identifier[__getitem__] ( identifier[directory] , literal[string] )
identifier[files] = identifier[dict] . identifier[__getitem__] ( identifier[directory] , literal[string] )
keyword[for] identifier[f] keyword[in] identifier[sorted] ( identifier[files] ):
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[relativePath] , identifier[f] )
keyword[for] identifier[k] keyword[in] identifier[sorted] ( identifier[dict] . identifier[keys] ( identifier[directories] )):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[relativePath] , identifier[k] )
identifier[dir] = identifier[directories] . identifier[__getitem__] ( identifier[k] )
keyword[for] identifier[e] keyword[in] identifier[walk_files] ( identifier[dir] , identifier[path] ):
keyword[yield] identifier[e]
identifier[dir] , identifier[errorMessage] = identifier[self] . identifier[get_directory_info] ( identifier[relativePath] )
keyword[assert] identifier[dir] keyword[is] keyword[not] keyword[None] , identifier[errorMessage]
keyword[return] identifier[walk_files] ( identifier[dir] , identifier[relativePath] = literal[string] ) | def walk_files_relative_path(self, relativePath=''):
"""
Walk the repository and yield all found files relative path joined with file name.
:parameters:
#. relativePath (str): The relative path from which start the walk.
"""
def walk_files(directory, relativePath):
directories = dict.__getitem__(directory, 'directories')
files = dict.__getitem__(directory, 'files')
for f in sorted(files):
yield os.path.join(relativePath, f) # depends on [control=['for'], data=['f']]
for k in sorted(dict.keys(directories)):
path = os.path.join(relativePath, k)
dir = directories.__getitem__(k)
for e in walk_files(dir, path):
yield e # depends on [control=['for'], data=['e']] # depends on [control=['for'], data=['k']]
(dir, errorMessage) = self.get_directory_info(relativePath)
assert dir is not None, errorMessage
return walk_files(dir, relativePath='') |
def create_data_iters_and_vocab(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
resume_training: bool,
output_folder: str) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.BaseParallelSampleIter',
'data_io.DataConfig', Dict]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
_, num_words_target = args.num_words
num_words_target = num_words_target if num_words_target > 0 else None
_, word_min_count_target = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
either_raw_or_prepared_error_msg = "Either specify a raw training corpus with %s or a preprocessed corpus " \
"with %s." % (C.TRAINING_ARG_TARGET,
C.TRAINING_ARG_PREPARED_DATA)
# Note: ignore args.prepared_data for the moment
utils.check_condition(args.prepared_data is None and args.target is not None,
either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocab created when starting the training run.
target_vocab = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_TRG_NAME))
# Recover the vocabulary path from the existing config file:
data_info = cast(data_io.DataInfo, Config.load(os.path.join(output_folder, C.DATA_INFO)))
target_vocab_path = data_info.target_vocab
else:
# Load vocab:
target_vocab_path = args.target_vocab
# Note: We do not care about the source vocab for images, that is why some inputs are mocked
target_vocab = vocab.load_or_create_vocab(data=args.target,
vocab_path=target_vocab_path,
num_words=num_words_target,
word_min_count=word_min_count_target)
train_iter, validation_iter, config_data, data_info = data_io_image.get_training_image_text_data_iters(
source_root=args.source_root,
source=os.path.abspath(args.source),
target=os.path.abspath(args.target),
validation_source_root=args.validation_source_root,
validation_source=os.path.abspath(args.validation_source),
validation_target=os.path.abspath(args.validation_target),
vocab_target=target_vocab,
vocab_target_path=target_vocab_path,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
source_image_size=args.source_image_size,
max_seq_len_target=max_seq_len_target,
bucketing=not args.no_bucketing,
bucket_width=args.bucket_width,
use_feature_loader=args.image_preextracted_features,
preload_features=args.load_all_features_to_memory
)
data_info_fname = os.path.join(output_folder, C.DATA_INFO)
logger.info("Writing data config to '%s'", data_info_fname)
# Removing objects that cannot be saved:
data_info.sources = None
data_info.save(data_info_fname)
return train_iter, validation_iter, config_data, target_vocab | def function[create_data_iters_and_vocab, parameter[args, max_seq_len_source, max_seq_len_target, resume_training, output_folder]]:
constant[
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
]
<ast.Tuple object at 0x7da1b1d766b0> assign[=] name[args].num_words
variable[num_words_target] assign[=] <ast.IfExp object at 0x7da1b1d76440>
<ast.Tuple object at 0x7da1b1d772e0> assign[=] name[args].word_min_count
variable[batch_num_devices] assign[=] <ast.IfExp object at 0x7da1b1d75a80>
variable[batch_by_words] assign[=] compare[name[args].batch_type equal[==] name[C].BATCH_TYPE_WORD]
variable[either_raw_or_prepared_error_msg] assign[=] binary_operation[constant[Either specify a raw training corpus with %s or a preprocessed corpus with %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1d74e20>, <ast.Attribute object at 0x7da1b1d757b0>]]]
call[name[utils].check_condition, parameter[<ast.BoolOp object at 0x7da1b1d75360>, name[either_raw_or_prepared_error_msg]]]
if name[resume_training] begin[:]
variable[target_vocab] assign[=] call[name[vocab].vocab_from_json, parameter[call[name[os].path.join, parameter[name[output_folder], name[C].VOCAB_TRG_NAME]]]]
variable[data_info] assign[=] call[name[cast], parameter[name[data_io].DataInfo, call[name[Config].load, parameter[call[name[os].path.join, parameter[name[output_folder], name[C].DATA_INFO]]]]]]
variable[target_vocab_path] assign[=] name[data_info].target_vocab
<ast.Tuple object at 0x7da1b1d6e260> assign[=] call[name[data_io_image].get_training_image_text_data_iters, parameter[]]
variable[data_info_fname] assign[=] call[name[os].path.join, parameter[name[output_folder], name[C].DATA_INFO]]
call[name[logger].info, parameter[constant[Writing data config to '%s'], name[data_info_fname]]]
name[data_info].sources assign[=] constant[None]
call[name[data_info].save, parameter[name[data_info_fname]]]
return[tuple[[<ast.Name object at 0x7da1b1d6d7e0>, <ast.Name object at 0x7da1b1d6ec50>, <ast.Name object at 0x7da1b1d6dba0>, <ast.Name object at 0x7da1b1d6ee60>]]] | keyword[def] identifier[create_data_iters_and_vocab] ( identifier[args] : identifier[argparse] . identifier[Namespace] ,
identifier[max_seq_len_source] : identifier[int] ,
identifier[max_seq_len_target] : identifier[int] ,
identifier[resume_training] : identifier[bool] ,
identifier[output_folder] : identifier[str] )-> identifier[Tuple] [ literal[string] ,
literal[string] ,
literal[string] , identifier[Dict] ]:
literal[string]
identifier[_] , identifier[num_words_target] = identifier[args] . identifier[num_words]
identifier[num_words_target] = identifier[num_words_target] keyword[if] identifier[num_words_target] > literal[int] keyword[else] keyword[None]
identifier[_] , identifier[word_min_count_target] = identifier[args] . identifier[word_min_count]
identifier[batch_num_devices] = literal[int] keyword[if] identifier[args] . identifier[use_cpu] keyword[else] identifier[sum] (- identifier[di] keyword[if] identifier[di] < literal[int] keyword[else] literal[int] keyword[for] identifier[di] keyword[in] identifier[args] . identifier[device_ids] )
identifier[batch_by_words] = identifier[args] . identifier[batch_type] == identifier[C] . identifier[BATCH_TYPE_WORD]
identifier[either_raw_or_prepared_error_msg] = literal[string] literal[string] %( identifier[C] . identifier[TRAINING_ARG_TARGET] ,
identifier[C] . identifier[TRAINING_ARG_PREPARED_DATA] )
identifier[utils] . identifier[check_condition] ( identifier[args] . identifier[prepared_data] keyword[is] keyword[None] keyword[and] identifier[args] . identifier[target] keyword[is] keyword[not] keyword[None] ,
identifier[either_raw_or_prepared_error_msg] )
keyword[if] identifier[resume_training] :
identifier[target_vocab] = identifier[vocab] . identifier[vocab_from_json] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_folder] , identifier[C] . identifier[VOCAB_TRG_NAME] ))
identifier[data_info] = identifier[cast] ( identifier[data_io] . identifier[DataInfo] , identifier[Config] . identifier[load] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_folder] , identifier[C] . identifier[DATA_INFO] )))
identifier[target_vocab_path] = identifier[data_info] . identifier[target_vocab]
keyword[else] :
identifier[target_vocab_path] = identifier[args] . identifier[target_vocab]
identifier[target_vocab] = identifier[vocab] . identifier[load_or_create_vocab] ( identifier[data] = identifier[args] . identifier[target] ,
identifier[vocab_path] = identifier[target_vocab_path] ,
identifier[num_words] = identifier[num_words_target] ,
identifier[word_min_count] = identifier[word_min_count_target] )
identifier[train_iter] , identifier[validation_iter] , identifier[config_data] , identifier[data_info] = identifier[data_io_image] . identifier[get_training_image_text_data_iters] (
identifier[source_root] = identifier[args] . identifier[source_root] ,
identifier[source] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[source] ),
identifier[target] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[target] ),
identifier[validation_source_root] = identifier[args] . identifier[validation_source_root] ,
identifier[validation_source] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[validation_source] ),
identifier[validation_target] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[validation_target] ),
identifier[vocab_target] = identifier[target_vocab] ,
identifier[vocab_target_path] = identifier[target_vocab_path] ,
identifier[batch_size] = identifier[args] . identifier[batch_size] ,
identifier[batch_by_words] = identifier[batch_by_words] ,
identifier[batch_num_devices] = identifier[batch_num_devices] ,
identifier[source_image_size] = identifier[args] . identifier[source_image_size] ,
identifier[max_seq_len_target] = identifier[max_seq_len_target] ,
identifier[bucketing] = keyword[not] identifier[args] . identifier[no_bucketing] ,
identifier[bucket_width] = identifier[args] . identifier[bucket_width] ,
identifier[use_feature_loader] = identifier[args] . identifier[image_preextracted_features] ,
identifier[preload_features] = identifier[args] . identifier[load_all_features_to_memory]
)
identifier[data_info_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_folder] , identifier[C] . identifier[DATA_INFO] )
identifier[logger] . identifier[info] ( literal[string] , identifier[data_info_fname] )
identifier[data_info] . identifier[sources] = keyword[None]
identifier[data_info] . identifier[save] ( identifier[data_info_fname] )
keyword[return] identifier[train_iter] , identifier[validation_iter] , identifier[config_data] , identifier[target_vocab] | def create_data_iters_and_vocab(args: argparse.Namespace, max_seq_len_source: int, max_seq_len_target: int, resume_training: bool, output_folder: str) -> Tuple['data_io.BaseParallelSampleIter', 'data_io.BaseParallelSampleIter', 'data_io.DataConfig', Dict]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
(_, num_words_target) = args.num_words
num_words_target = num_words_target if num_words_target > 0 else None
(_, word_min_count_target) = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum((-di if di < 0 else 1 for di in args.device_ids))
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
either_raw_or_prepared_error_msg = 'Either specify a raw training corpus with %s or a preprocessed corpus with %s.' % (C.TRAINING_ARG_TARGET, C.TRAINING_ARG_PREPARED_DATA)
# Note: ignore args.prepared_data for the moment
utils.check_condition(args.prepared_data is None and args.target is not None, either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocab created when starting the training run.
target_vocab = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_TRG_NAME))
# Recover the vocabulary path from the existing config file:
data_info = cast(data_io.DataInfo, Config.load(os.path.join(output_folder, C.DATA_INFO)))
target_vocab_path = data_info.target_vocab # depends on [control=['if'], data=[]]
else:
# Load vocab:
target_vocab_path = args.target_vocab
# Note: We do not care about the source vocab for images, that is why some inputs are mocked
target_vocab = vocab.load_or_create_vocab(data=args.target, vocab_path=target_vocab_path, num_words=num_words_target, word_min_count=word_min_count_target)
(train_iter, validation_iter, config_data, data_info) = data_io_image.get_training_image_text_data_iters(source_root=args.source_root, source=os.path.abspath(args.source), target=os.path.abspath(args.target), validation_source_root=args.validation_source_root, validation_source=os.path.abspath(args.validation_source), validation_target=os.path.abspath(args.validation_target), vocab_target=target_vocab, vocab_target_path=target_vocab_path, batch_size=args.batch_size, batch_by_words=batch_by_words, batch_num_devices=batch_num_devices, source_image_size=args.source_image_size, max_seq_len_target=max_seq_len_target, bucketing=not args.no_bucketing, bucket_width=args.bucket_width, use_feature_loader=args.image_preextracted_features, preload_features=args.load_all_features_to_memory)
data_info_fname = os.path.join(output_folder, C.DATA_INFO)
logger.info("Writing data config to '%s'", data_info_fname)
# Removing objects that cannot be saved:
data_info.sources = None
data_info.save(data_info_fname)
return (train_iter, validation_iter, config_data, target_vocab) |
def make_column_suffixes(self):
""" Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
"""
if self.column_suffixes:
return self.column_suffixes
if len(self.columns) == 0:
return ()
elif len(self.columns) == 1:
if self.formatters:
return '_raw',
else:
return '',
elif len(self.columns) == 2:
if self.formatters:
return '_id', '_raw',
else:
return '_id', '',
else:
raise BadIngredient(
'column_suffixes must be supplied if there is '
'more than one column'
) | def function[make_column_suffixes, parameter[self]]:
constant[ Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
]
if name[self].column_suffixes begin[:]
return[name[self].column_suffixes]
if compare[call[name[len], parameter[name[self].columns]] equal[==] constant[0]] begin[:]
return[tuple[[]]] | keyword[def] identifier[make_column_suffixes] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[column_suffixes] :
keyword[return] identifier[self] . identifier[column_suffixes]
keyword[if] identifier[len] ( identifier[self] . identifier[columns] )== literal[int] :
keyword[return] ()
keyword[elif] identifier[len] ( identifier[self] . identifier[columns] )== literal[int] :
keyword[if] identifier[self] . identifier[formatters] :
keyword[return] literal[string] ,
keyword[else] :
keyword[return] literal[string] ,
keyword[elif] identifier[len] ( identifier[self] . identifier[columns] )== literal[int] :
keyword[if] identifier[self] . identifier[formatters] :
keyword[return] literal[string] , literal[string] ,
keyword[else] :
keyword[return] literal[string] , literal[string] ,
keyword[else] :
keyword[raise] identifier[BadIngredient] (
literal[string]
literal[string]
) | def make_column_suffixes(self):
""" Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
"""
if self.column_suffixes:
return self.column_suffixes # depends on [control=['if'], data=[]]
if len(self.columns) == 0:
return () # depends on [control=['if'], data=[]]
elif len(self.columns) == 1:
if self.formatters:
return ('_raw',) # depends on [control=['if'], data=[]]
else:
return ('',) # depends on [control=['if'], data=[]]
elif len(self.columns) == 2:
if self.formatters:
return ('_id', '_raw') # depends on [control=['if'], data=[]]
else:
return ('_id', '') # depends on [control=['if'], data=[]]
else:
raise BadIngredient('column_suffixes must be supplied if there is more than one column') |
def updateTargetState(self, newState):
"""
Updates the system target state and propagates that to all devices.
:param newState:
:return:
"""
self._targetStateProvider.state = loadTargetState(newState, self._targetStateProvider.state)
for device in self.deviceController.getDevices():
self.updateDeviceState(device.payload) | def function[updateTargetState, parameter[self, newState]]:
constant[
Updates the system target state and propagates that to all devices.
:param newState:
:return:
]
name[self]._targetStateProvider.state assign[=] call[name[loadTargetState], parameter[name[newState], name[self]._targetStateProvider.state]]
for taget[name[device]] in starred[call[name[self].deviceController.getDevices, parameter[]]] begin[:]
call[name[self].updateDeviceState, parameter[name[device].payload]] | keyword[def] identifier[updateTargetState] ( identifier[self] , identifier[newState] ):
literal[string]
identifier[self] . identifier[_targetStateProvider] . identifier[state] = identifier[loadTargetState] ( identifier[newState] , identifier[self] . identifier[_targetStateProvider] . identifier[state] )
keyword[for] identifier[device] keyword[in] identifier[self] . identifier[deviceController] . identifier[getDevices] ():
identifier[self] . identifier[updateDeviceState] ( identifier[device] . identifier[payload] ) | def updateTargetState(self, newState):
"""
Updates the system target state and propagates that to all devices.
:param newState:
:return:
"""
self._targetStateProvider.state = loadTargetState(newState, self._targetStateProvider.state)
for device in self.deviceController.getDevices():
self.updateDeviceState(device.payload) # depends on [control=['for'], data=['device']] |
def start(self, max):
"""
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
"""
try:
self.widget.max = max
display(self.widget)
except:
pass | def function[start, parameter[self, max]]:
constant[
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
]
<ast.Try object at 0x7da1b0c00490> | keyword[def] identifier[start] ( identifier[self] , identifier[max] ):
literal[string]
keyword[try] :
identifier[self] . identifier[widget] . identifier[max] = identifier[max]
identifier[display] ( identifier[self] . identifier[widget] )
keyword[except] :
keyword[pass] | def start(self, max):
"""
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
"""
try:
self.widget.max = max
display(self.widget) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] |
async def pin_chat_message(self, chat_id: typing.Union[base.Integer, base.String], message_id: base.Integer,
disable_notification: typing.Union[base.Boolean, None] = None) -> base.Boolean:
"""
Use this method to pin a message in a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#pinchatmessage
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param message_id: Identifier of a message to pin
:type message_id: :obj:`base.Integer`
:param disable_notification: Pass True, if it is not necessary to send a notification to
all group members about the new pinned message
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.PIN_CHAT_MESSAGE, payload)
return result | <ast.AsyncFunctionDef object at 0x7da1b17aabf0> | keyword[async] keyword[def] identifier[pin_chat_message] ( identifier[self] , identifier[chat_id] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , identifier[base] . identifier[String] ], identifier[message_id] : identifier[base] . identifier[Integer] ,
identifier[disable_notification] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Boolean] , keyword[None] ]= keyword[None] )-> identifier[base] . identifier[Boolean] :
literal[string]
identifier[payload] = identifier[generate_payload] (** identifier[locals] ())
identifier[result] = keyword[await] identifier[self] . identifier[request] ( identifier[api] . identifier[Methods] . identifier[PIN_CHAT_MESSAGE] , identifier[payload] )
keyword[return] identifier[result] | async def pin_chat_message(self, chat_id: typing.Union[base.Integer, base.String], message_id: base.Integer, disable_notification: typing.Union[base.Boolean, None]=None) -> base.Boolean:
"""
Use this method to pin a message in a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#pinchatmessage
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param message_id: Identifier of a message to pin
:type message_id: :obj:`base.Integer`
:param disable_notification: Pass True, if it is not necessary to send a notification to
all group members about the new pinned message
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.PIN_CHAT_MESSAGE, payload)
return result |
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment(quantity=False)) < 1./180.*numpy.pi:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment(quantity=False)) >= 1./180.*numpy.pi \
and numpy.fabs(self.misalignment(quantity=False)) < 3./180.*numpy.pi:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment(quantity=False)) >= 3./180.*numpy.pi:
self.nTrackIterations= 2
return None | def function[_determine_nTrackIterations, parameter[self, nTrackIterations]]:
constant[Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now]
if <ast.UnaryOp object at 0x7da1b0da2e90> begin[:]
name[self].nTrackIterations assign[=] name[nTrackIterations]
return[constant[None]]
if compare[call[name[numpy].fabs, parameter[call[name[self].misalignment, parameter[]]]] less[<] binary_operation[binary_operation[constant[1.0] / constant[180.0]] * name[numpy].pi]] begin[:]
name[self].nTrackIterations assign[=] constant[0]
return[constant[None]] | keyword[def] identifier[_determine_nTrackIterations] ( identifier[self] , identifier[nTrackIterations] ):
literal[string]
keyword[if] keyword[not] identifier[nTrackIterations] keyword[is] keyword[None] :
identifier[self] . identifier[nTrackIterations] = identifier[nTrackIterations]
keyword[return] keyword[None]
keyword[if] identifier[numpy] . identifier[fabs] ( identifier[self] . identifier[misalignment] ( identifier[quantity] = keyword[False] ))< literal[int] / literal[int] * identifier[numpy] . identifier[pi] :
identifier[self] . identifier[nTrackIterations] = literal[int]
keyword[elif] identifier[numpy] . identifier[fabs] ( identifier[self] . identifier[misalignment] ( identifier[quantity] = keyword[False] ))>= literal[int] / literal[int] * identifier[numpy] . identifier[pi] keyword[and] identifier[numpy] . identifier[fabs] ( identifier[self] . identifier[misalignment] ( identifier[quantity] = keyword[False] ))< literal[int] / literal[int] * identifier[numpy] . identifier[pi] :
identifier[self] . identifier[nTrackIterations] = literal[int]
keyword[elif] identifier[numpy] . identifier[fabs] ( identifier[self] . identifier[misalignment] ( identifier[quantity] = keyword[False] ))>= literal[int] / literal[int] * identifier[numpy] . identifier[pi] :
identifier[self] . identifier[nTrackIterations] = literal[int]
keyword[return] keyword[None] | def _determine_nTrackIterations(self, nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations = nTrackIterations
return None # depends on [control=['if'], data=[]]
if numpy.fabs(self.misalignment(quantity=False)) < 1.0 / 180.0 * numpy.pi:
self.nTrackIterations = 0 # depends on [control=['if'], data=[]]
elif numpy.fabs(self.misalignment(quantity=False)) >= 1.0 / 180.0 * numpy.pi and numpy.fabs(self.misalignment(quantity=False)) < 3.0 / 180.0 * numpy.pi:
self.nTrackIterations = 1 # depends on [control=['if'], data=[]]
elif numpy.fabs(self.misalignment(quantity=False)) >= 3.0 / 180.0 * numpy.pi:
self.nTrackIterations = 2 # depends on [control=['if'], data=[]]
return None |
def refresh(func):
"""
Decorator that can be applied to model method that forces a refresh of the model.
Note this decorator ensures the state of the model is what is currently within
the database and therefore overwrites any current field changes.
For example, assume we have the following model:
.. code-block:: python
class MyModel(models.Model):
counter = models.IntegerField()
@refresh
def my_method(self):
print counter
Then the following is performed:
.. code-block:: python
i = MyModel.objects.create(counter=1)
i.counter = 3
i.my_method()
# prints 1
This behavior is useful in a distributed system, such as celery, where
"asserting the world is the responsibility of the task" - see http://celery.readthedocs.org/en/latest/userguide/tasks.html?highlight=model#state
Note that the refresh of the model uses the approach outlined in https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
which was discovered after from https://code.djangoproject.com/ticket/901#comment:29
which is a Django ticket which discusses a specific method 'refresh' on a
model.
"""
@wraps(func)
def inner(self, *args, **kwargs):
# Refresh the model instance - see https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
new_self = self.__class__._base_manager.using(self._state.db).get(pk=self.pk)
for f in self.__class__._meta.fields:
setattr(self, f.name, getattr(new_self, f.name))
return func(self, *args, **kwargs)
return inner | def function[refresh, parameter[func]]:
constant[
Decorator that can be applied to model method that forces a refresh of the model.
Note this decorator ensures the state of the model is what is currently within
the database and therefore overwrites any current field changes.
For example, assume we have the following model:
.. code-block:: python
class MyModel(models.Model):
counter = models.IntegerField()
@refresh
def my_method(self):
print counter
Then the following is performed:
.. code-block:: python
i = MyModel.objects.create(counter=1)
i.counter = 3
i.my_method()
# prints 1
This behavior is useful in a distributed system, such as celery, where
"asserting the world is the responsibility of the task" - see http://celery.readthedocs.org/en/latest/userguide/tasks.html?highlight=model#state
Note that the refresh of the model uses the approach outlined in https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
which was discovered after from https://code.djangoproject.com/ticket/901#comment:29
which is a Django ticket which discusses a specific method 'refresh' on a
model.
]
def function[inner, parameter[self]]:
variable[new_self] assign[=] call[call[name[self].__class__._base_manager.using, parameter[name[self]._state.db]].get, parameter[]]
for taget[name[f]] in starred[name[self].__class__._meta.fields] begin[:]
call[name[setattr], parameter[name[self], name[f].name, call[name[getattr], parameter[name[new_self], name[f].name]]]]
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da18f58e170>]]]
return[name[inner]] | keyword[def] identifier[refresh] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[inner] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[new_self] = identifier[self] . identifier[__class__] . identifier[_base_manager] . identifier[using] ( identifier[self] . identifier[_state] . identifier[db] ). identifier[get] ( identifier[pk] = identifier[self] . identifier[pk] )
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[__class__] . identifier[_meta] . identifier[fields] :
identifier[setattr] ( identifier[self] , identifier[f] . identifier[name] , identifier[getattr] ( identifier[new_self] , identifier[f] . identifier[name] ))
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[inner] | def refresh(func):
"""
Decorator that can be applied to model method that forces a refresh of the model.
Note this decorator ensures the state of the model is what is currently within
the database and therefore overwrites any current field changes.
For example, assume we have the following model:
.. code-block:: python
class MyModel(models.Model):
counter = models.IntegerField()
@refresh
def my_method(self):
print counter
Then the following is performed:
.. code-block:: python
i = MyModel.objects.create(counter=1)
i.counter = 3
i.my_method()
# prints 1
This behavior is useful in a distributed system, such as celery, where
"asserting the world is the responsibility of the task" - see http://celery.readthedocs.org/en/latest/userguide/tasks.html?highlight=model#state
Note that the refresh of the model uses the approach outlined in https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
which was discovered after from https://code.djangoproject.com/ticket/901#comment:29
which is a Django ticket which discusses a specific method 'refresh' on a
model.
"""
@wraps(func)
def inner(self, *args, **kwargs):
# Refresh the model instance - see https://github.com/planop/django/blob/ticket_901/django/db/models/base.py#L1012
new_self = self.__class__._base_manager.using(self._state.db).get(pk=self.pk)
for f in self.__class__._meta.fields:
setattr(self, f.name, getattr(new_self, f.name)) # depends on [control=['for'], data=['f']]
return func(self, *args, **kwargs)
return inner |
def list_resource_commands(self):
"""Returns a list of multi-commands for each resource type.
"""
resource_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
'resources'
))
answer = set([])
for _, name, _ in pkgutil.iter_modules([resource_path]):
res = tower_cli.get_resource(name)
if not getattr(res, 'internal', False):
answer.add(name)
return sorted(answer) | def function[list_resource_commands, parameter[self]]:
constant[Returns a list of multi-commands for each resource type.
]
variable[resource_path] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], name[os].pardir, constant[resources]]]]]
variable[answer] assign[=] call[name[set], parameter[list[[]]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9bcd0>, <ast.Name object at 0x7da18dc997e0>, <ast.Name object at 0x7da18dc9b2e0>]]] in starred[call[name[pkgutil].iter_modules, parameter[list[[<ast.Name object at 0x7da18dc99de0>]]]]] begin[:]
variable[res] assign[=] call[name[tower_cli].get_resource, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da207f01fc0> begin[:]
call[name[answer].add, parameter[name[name]]]
return[call[name[sorted], parameter[name[answer]]]] | keyword[def] identifier[list_resource_commands] ( identifier[self] ):
literal[string]
identifier[resource_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ),
identifier[os] . identifier[pardir] ,
literal[string]
))
identifier[answer] = identifier[set] ([])
keyword[for] identifier[_] , identifier[name] , identifier[_] keyword[in] identifier[pkgutil] . identifier[iter_modules] ([ identifier[resource_path] ]):
identifier[res] = identifier[tower_cli] . identifier[get_resource] ( identifier[name] )
keyword[if] keyword[not] identifier[getattr] ( identifier[res] , literal[string] , keyword[False] ):
identifier[answer] . identifier[add] ( identifier[name] )
keyword[return] identifier[sorted] ( identifier[answer] ) | def list_resource_commands(self):
"""Returns a list of multi-commands for each resource type.
"""
resource_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'resources'))
answer = set([])
for (_, name, _) in pkgutil.iter_modules([resource_path]):
res = tower_cli.get_resource(name)
if not getattr(res, 'internal', False):
answer.add(name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return sorted(answer) |
def send_im(self, user, text):
"""
Sends a message to a user as an IM
* user - The user to send to. This can be a SlackUser object, a user id, or the username (without the @)
* text - String to send
"""
if isinstance(user, SlackUser):
user = user.id
channelid = self._find_im_channel(user)
else:
channelid = user.id
self.send_message(channelid, text) | def function[send_im, parameter[self, user, text]]:
constant[
Sends a message to a user as an IM
* user - The user to send to. This can be a SlackUser object, a user id, or the username (without the @)
* text - String to send
]
if call[name[isinstance], parameter[name[user], name[SlackUser]]] begin[:]
variable[user] assign[=] name[user].id
variable[channelid] assign[=] call[name[self]._find_im_channel, parameter[name[user]]]
call[name[self].send_message, parameter[name[channelid], name[text]]] | keyword[def] identifier[send_im] ( identifier[self] , identifier[user] , identifier[text] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[user] , identifier[SlackUser] ):
identifier[user] = identifier[user] . identifier[id]
identifier[channelid] = identifier[self] . identifier[_find_im_channel] ( identifier[user] )
keyword[else] :
identifier[channelid] = identifier[user] . identifier[id]
identifier[self] . identifier[send_message] ( identifier[channelid] , identifier[text] ) | def send_im(self, user, text):
"""
Sends a message to a user as an IM
* user - The user to send to. This can be a SlackUser object, a user id, or the username (without the @)
* text - String to send
"""
if isinstance(user, SlackUser):
user = user.id
channelid = self._find_im_channel(user) # depends on [control=['if'], data=[]]
else:
channelid = user.id
self.send_message(channelid, text) |
def send(self, tid, out_sid, company_code, session, sender_id=None, cancel_id=None, feature=None):
'''taobao.logistics.offline.send 自己联系物流(线下物流)发货
用户调用该接口可实现自己联系发货(线下物流),使用该接口发货,交易订单状态会直接变成卖家已发货。不支持货到付款、在线下单类型的订单。'''
request = TOPRequest('taobao.logistics.offline.send')
request['tid'] = tid
request['out_sid'] = out_sid
request['company_code'] = company_code
if feature!=None: request['feature'] = feature
if sender_id!=None: request['sender_id'] = sender_id
if cancel_id!=None: request['cancel_id'] = cancel_id
self.create(self.execute(request, session))
return self.shipping | def function[send, parameter[self, tid, out_sid, company_code, session, sender_id, cancel_id, feature]]:
constant[taobao.logistics.offline.send 自己联系物流(线下物流)发货
用户调用该接口可实现自己联系发货(线下物流),使用该接口发货,交易订单状态会直接变成卖家已发货。不支持货到付款、在线下单类型的订单。]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.logistics.offline.send]]]
call[name[request]][constant[tid]] assign[=] name[tid]
call[name[request]][constant[out_sid]] assign[=] name[out_sid]
call[name[request]][constant[company_code]] assign[=] name[company_code]
if compare[name[feature] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[feature]] assign[=] name[feature]
if compare[name[sender_id] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[sender_id]] assign[=] name[sender_id]
if compare[name[cancel_id] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[cancel_id]] assign[=] name[cancel_id]
call[name[self].create, parameter[call[name[self].execute, parameter[name[request], name[session]]]]]
return[name[self].shipping] | keyword[def] identifier[send] ( identifier[self] , identifier[tid] , identifier[out_sid] , identifier[company_code] , identifier[session] , identifier[sender_id] = keyword[None] , identifier[cancel_id] = keyword[None] , identifier[feature] = keyword[None] ):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
identifier[request] [ literal[string] ]= identifier[tid]
identifier[request] [ literal[string] ]= identifier[out_sid]
identifier[request] [ literal[string] ]= identifier[company_code]
keyword[if] identifier[feature] != keyword[None] : identifier[request] [ literal[string] ]= identifier[feature]
keyword[if] identifier[sender_id] != keyword[None] : identifier[request] [ literal[string] ]= identifier[sender_id]
keyword[if] identifier[cancel_id] != keyword[None] : identifier[request] [ literal[string] ]= identifier[cancel_id]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] ))
keyword[return] identifier[self] . identifier[shipping] | def send(self, tid, out_sid, company_code, session, sender_id=None, cancel_id=None, feature=None):
"""taobao.logistics.offline.send 自己联系物流(线下物流)发货
用户调用该接口可实现自己联系发货(线下物流),使用该接口发货,交易订单状态会直接变成卖家已发货。不支持货到付款、在线下单类型的订单。"""
request = TOPRequest('taobao.logistics.offline.send')
request['tid'] = tid
request['out_sid'] = out_sid
request['company_code'] = company_code
if feature != None:
request['feature'] = feature # depends on [control=['if'], data=['feature']]
if sender_id != None:
request['sender_id'] = sender_id # depends on [control=['if'], data=['sender_id']]
if cancel_id != None:
request['cancel_id'] = cancel_id # depends on [control=['if'], data=['cancel_id']]
self.create(self.execute(request, session))
return self.shipping |
def calc_radius(latitude, ellipsoid='WGS84'):
"""Calculate earth radius for a given latitude.
This function is most useful when dealing with datasets that are very
localised and require the accuracy of an ellipsoid model without the
complexity of code necessary to actually use one. The results are meant to
be used as a :data:`BODY_RADIUS` replacement when the simple geocentric
value is not good enough.
The original use for ``calc_radius`` is to set a more accurate radius value
for use with trigpointing databases that are keyed on the OSGB36 datum, but
it has been expanded to cover other ellipsoids.
Args:
latitude (float): Latitude to calculate earth radius for
ellipsoid (tuple of float): Ellipsoid model to use for calculation
Returns:
float: Approximated Earth radius at the given latitude
"""
ellipsoids = {
'Airy (1830)': (6377.563, 6356.257), # Ordnance Survey default
'Bessel': (6377.397, 6356.079),
'Clarke (1880)': (6378.249145, 6356.51486955),
'FAI sphere': (6371, 6371), # Idealised
'GRS-67': (6378.160, 6356.775),
'International': (6378.388, 6356.912),
'Krasovsky': (6378.245, 6356.863),
'NAD27': (6378.206, 6356.584),
'WGS66': (6378.145, 6356.758),
'WGS72': (6378.135, 6356.751),
'WGS84': (6378.137, 6356.752), # GPS default
}
# Equatorial radius, polar radius
major, minor = ellipsoids[ellipsoid]
# eccentricity of the ellipsoid
eccentricity = 1 - (minor ** 2 / major ** 2)
sl = math.sin(math.radians(latitude))
return (major * (1 - eccentricity)) / (1 - eccentricity * sl ** 2) ** 1.5 | def function[calc_radius, parameter[latitude, ellipsoid]]:
constant[Calculate earth radius for a given latitude.
This function is most useful when dealing with datasets that are very
localised and require the accuracy of an ellipsoid model without the
complexity of code necessary to actually use one. The results are meant to
be used as a :data:`BODY_RADIUS` replacement when the simple geocentric
value is not good enough.
The original use for ``calc_radius`` is to set a more accurate radius value
for use with trigpointing databases that are keyed on the OSGB36 datum, but
it has been expanded to cover other ellipsoids.
Args:
latitude (float): Latitude to calculate earth radius for
ellipsoid (tuple of float): Ellipsoid model to use for calculation
Returns:
float: Approximated Earth radius at the given latitude
]
variable[ellipsoids] assign[=] dictionary[[<ast.Constant object at 0x7da18f00fac0>, <ast.Constant object at 0x7da18f00f850>, <ast.Constant object at 0x7da18f00d420>, <ast.Constant object at 0x7da18f00d210>, <ast.Constant object at 0x7da18f00dd80>, <ast.Constant object at 0x7da18f00c940>, <ast.Constant object at 0x7da18f00e6b0>, <ast.Constant object at 0x7da18f00e560>, <ast.Constant object at 0x7da18f00ed10>, <ast.Constant object at 0x7da18f00eb90>, <ast.Constant object at 0x7da18f00db70>], [<ast.Tuple object at 0x7da18f00ff70>, <ast.Tuple object at 0x7da18f00c580>, <ast.Tuple object at 0x7da18f00f580>, <ast.Tuple object at 0x7da18f00ea40>, <ast.Tuple object at 0x7da18f00cfa0>, <ast.Tuple object at 0x7da18f00d510>, <ast.Tuple object at 0x7da18f00de40>, <ast.Tuple object at 0x7da18f00c460>, <ast.Tuple object at 0x7da18f00f670>, <ast.Tuple object at 0x7da18f00ca90>, <ast.Tuple object at 0x7da18f00f460>]]
<ast.Tuple object at 0x7da18f00f730> assign[=] call[name[ellipsoids]][name[ellipsoid]]
variable[eccentricity] assign[=] binary_operation[constant[1] - binary_operation[binary_operation[name[minor] ** constant[2]] / binary_operation[name[major] ** constant[2]]]]
variable[sl] assign[=] call[name[math].sin, parameter[call[name[math].radians, parameter[name[latitude]]]]]
return[binary_operation[binary_operation[name[major] * binary_operation[constant[1] - name[eccentricity]]] / binary_operation[binary_operation[constant[1] - binary_operation[name[eccentricity] * binary_operation[name[sl] ** constant[2]]]] ** constant[1.5]]]] | keyword[def] identifier[calc_radius] ( identifier[latitude] , identifier[ellipsoid] = literal[string] ):
literal[string]
identifier[ellipsoids] ={
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
}
identifier[major] , identifier[minor] = identifier[ellipsoids] [ identifier[ellipsoid] ]
identifier[eccentricity] = literal[int] -( identifier[minor] ** literal[int] / identifier[major] ** literal[int] )
identifier[sl] = identifier[math] . identifier[sin] ( identifier[math] . identifier[radians] ( identifier[latitude] ))
keyword[return] ( identifier[major] *( literal[int] - identifier[eccentricity] ))/( literal[int] - identifier[eccentricity] * identifier[sl] ** literal[int] )** literal[int] | def calc_radius(latitude, ellipsoid='WGS84'):
"""Calculate earth radius for a given latitude.
This function is most useful when dealing with datasets that are very
localised and require the accuracy of an ellipsoid model without the
complexity of code necessary to actually use one. The results are meant to
be used as a :data:`BODY_RADIUS` replacement when the simple geocentric
value is not good enough.
The original use for ``calc_radius`` is to set a more accurate radius value
for use with trigpointing databases that are keyed on the OSGB36 datum, but
it has been expanded to cover other ellipsoids.
Args:
latitude (float): Latitude to calculate earth radius for
ellipsoid (tuple of float): Ellipsoid model to use for calculation
Returns:
float: Approximated Earth radius at the given latitude
""" # Ordnance Survey default
# Idealised
# GPS default
ellipsoids = {'Airy (1830)': (6377.563, 6356.257), 'Bessel': (6377.397, 6356.079), 'Clarke (1880)': (6378.249145, 6356.51486955), 'FAI sphere': (6371, 6371), 'GRS-67': (6378.16, 6356.775), 'International': (6378.388, 6356.912), 'Krasovsky': (6378.245, 6356.863), 'NAD27': (6378.206, 6356.584), 'WGS66': (6378.145, 6356.758), 'WGS72': (6378.135, 6356.751), 'WGS84': (6378.137, 6356.752)}
# Equatorial radius, polar radius
(major, minor) = ellipsoids[ellipsoid]
# eccentricity of the ellipsoid
eccentricity = 1 - minor ** 2 / major ** 2
sl = math.sin(math.radians(latitude))
return major * (1 - eccentricity) / (1 - eccentricity * sl ** 2) ** 1.5 |
def render_toolbar(context, config):
"""Render the toolbar for the given config."""
quill_config = getattr(quill_app, config)
t = template.loader.get_template(quill_config['toolbar_template'])
return t.render(context) | def function[render_toolbar, parameter[context, config]]:
constant[Render the toolbar for the given config.]
variable[quill_config] assign[=] call[name[getattr], parameter[name[quill_app], name[config]]]
variable[t] assign[=] call[name[template].loader.get_template, parameter[call[name[quill_config]][constant[toolbar_template]]]]
return[call[name[t].render, parameter[name[context]]]] | keyword[def] identifier[render_toolbar] ( identifier[context] , identifier[config] ):
literal[string]
identifier[quill_config] = identifier[getattr] ( identifier[quill_app] , identifier[config] )
identifier[t] = identifier[template] . identifier[loader] . identifier[get_template] ( identifier[quill_config] [ literal[string] ])
keyword[return] identifier[t] . identifier[render] ( identifier[context] ) | def render_toolbar(context, config):
"""Render the toolbar for the given config."""
quill_config = getattr(quill_app, config)
t = template.loader.get_template(quill_config['toolbar_template'])
return t.render(context) |
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Returns:
bytes or byterray: The decoded byte buffer
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if self.encoding == 'none' and not isinstance(obj, (bytes, bytearray)):
raise ValidationError('Byte object was not either bytes or a bytearray', type=obj.__class__.__name__)
elif self.encoding == 'base64':
try:
data = base64.b64decode(obj)
return data
except TypeError:
raise ValidationError("Could not decode base64 encoded bytes", obj=obj)
elif self.encoding == 'hex':
try:
data = binascii.unhexlify(obj)
return data
except TypeError:
raise ValidationError("Could not decode hex encoded bytes", obj=obj)
return obj | def function[verify, parameter[self, obj]]:
constant[Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Returns:
bytes or byterray: The decoded byte buffer
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
]
if <ast.BoolOp object at 0x7da20c76cd00> begin[:]
<ast.Raise object at 0x7da18f722fb0>
return[name[obj]] | keyword[def] identifier[verify] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[self] . identifier[encoding] == literal[string] keyword[and] keyword[not] identifier[isinstance] ( identifier[obj] ,( identifier[bytes] , identifier[bytearray] )):
keyword[raise] identifier[ValidationError] ( literal[string] , identifier[type] = identifier[obj] . identifier[__class__] . identifier[__name__] )
keyword[elif] identifier[self] . identifier[encoding] == literal[string] :
keyword[try] :
identifier[data] = identifier[base64] . identifier[b64decode] ( identifier[obj] )
keyword[return] identifier[data]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValidationError] ( literal[string] , identifier[obj] = identifier[obj] )
keyword[elif] identifier[self] . identifier[encoding] == literal[string] :
keyword[try] :
identifier[data] = identifier[binascii] . identifier[unhexlify] ( identifier[obj] )
keyword[return] identifier[data]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValidationError] ( literal[string] , identifier[obj] = identifier[obj] )
keyword[return] identifier[obj] | def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Returns:
bytes or byterray: The decoded byte buffer
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if self.encoding == 'none' and (not isinstance(obj, (bytes, bytearray))):
raise ValidationError('Byte object was not either bytes or a bytearray', type=obj.__class__.__name__) # depends on [control=['if'], data=[]]
elif self.encoding == 'base64':
try:
data = base64.b64decode(obj)
return data # depends on [control=['try'], data=[]]
except TypeError:
raise ValidationError('Could not decode base64 encoded bytes', obj=obj) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif self.encoding == 'hex':
try:
data = binascii.unhexlify(obj)
return data # depends on [control=['try'], data=[]]
except TypeError:
raise ValidationError('Could not decode hex encoded bytes', obj=obj) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return obj |
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number] | def function[number_to_day, parameter[self, day_number]]:
constant[Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
]
return[call[list[[<ast.Subscript object at 0x7da1b04c9990>, <ast.Subscript object at 0x7da1b04ca3e0>, <ast.Subscript object at 0x7da1b04ca4d0>, <ast.Subscript object at 0x7da1b04ca260>, <ast.Subscript object at 0x7da1b04ca5f0>, <ast.Subscript object at 0x7da1b04ca080>, <ast.Subscript object at 0x7da1b04ca410>]]][name[day_number]]] | keyword[def] identifier[number_to_day] ( identifier[self] , identifier[day_number] ):
literal[string]
keyword[return] [
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ],
identifier[calendar] . identifier[day_name] [ literal[int] ]
][ identifier[day_number] ] | def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5]][day_number] |
def loadJSON(self, jdata):
"""
Loads JSON data for this column type.
:param jdata: <dict>
"""
super(StringColumn, self).loadJSON(jdata)
# load additional info
self.__maxLength = jdata.get('maxLength') or self.__maxLength | def function[loadJSON, parameter[self, jdata]]:
constant[
Loads JSON data for this column type.
:param jdata: <dict>
]
call[call[name[super], parameter[name[StringColumn], name[self]]].loadJSON, parameter[name[jdata]]]
name[self].__maxLength assign[=] <ast.BoolOp object at 0x7da1b255e3e0> | keyword[def] identifier[loadJSON] ( identifier[self] , identifier[jdata] ):
literal[string]
identifier[super] ( identifier[StringColumn] , identifier[self] ). identifier[loadJSON] ( identifier[jdata] )
identifier[self] . identifier[__maxLength] = identifier[jdata] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[__maxLength] | def loadJSON(self, jdata):
"""
Loads JSON data for this column type.
:param jdata: <dict>
"""
super(StringColumn, self).loadJSON(jdata)
# load additional info
self.__maxLength = jdata.get('maxLength') or self.__maxLength |
def index(self, date):
""" Returns the index of a date in the table. """
for (i, (start, end, ruler)) in enumerate(self.table):
if start <= date.jd <= end:
return i
return None | def function[index, parameter[self, date]]:
constant[ Returns the index of a date in the table. ]
for taget[tuple[[<ast.Name object at 0x7da1b2346f20>, <ast.Tuple object at 0x7da1b2345090>]]] in starred[call[name[enumerate], parameter[name[self].table]]] begin[:]
if compare[name[start] less_or_equal[<=] name[date].jd] begin[:]
return[name[i]]
return[constant[None]] | keyword[def] identifier[index] ( identifier[self] , identifier[date] ):
literal[string]
keyword[for] ( identifier[i] ,( identifier[start] , identifier[end] , identifier[ruler] )) keyword[in] identifier[enumerate] ( identifier[self] . identifier[table] ):
keyword[if] identifier[start] <= identifier[date] . identifier[jd] <= identifier[end] :
keyword[return] identifier[i]
keyword[return] keyword[None] | def index(self, date):
""" Returns the index of a date in the table. """
for (i, (start, end, ruler)) in enumerate(self.table):
if start <= date.jd <= end:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return None |
def to_netflux(flux):
r"""Compute the netflux from the gross flux.
f_ij^{+}=max{0, f_ij-f_ji}
for all pairs i,j
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
"""
netflux = flux - np.transpose(flux)
"""Set negative fluxes to zero"""
ind = (netflux < 0.0)
netflux[ind] = 0.0
return netflux | def function[to_netflux, parameter[flux]]:
constant[Compute the netflux from the gross flux.
f_ij^{+}=max{0, f_ij-f_ji}
for all pairs i,j
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
]
variable[netflux] assign[=] binary_operation[name[flux] - call[name[np].transpose, parameter[name[flux]]]]
constant[Set negative fluxes to zero]
variable[ind] assign[=] compare[name[netflux] less[<] constant[0.0]]
call[name[netflux]][name[ind]] assign[=] constant[0.0]
return[name[netflux]] | keyword[def] identifier[to_netflux] ( identifier[flux] ):
literal[string]
identifier[netflux] = identifier[flux] - identifier[np] . identifier[transpose] ( identifier[flux] )
literal[string]
identifier[ind] =( identifier[netflux] < literal[int] )
identifier[netflux] [ identifier[ind] ]= literal[int]
keyword[return] identifier[netflux] | def to_netflux(flux):
"""Compute the netflux from the gross flux.
f_ij^{+}=max{0, f_ij-f_ji}
for all pairs i,j
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
"""
netflux = flux - np.transpose(flux)
'Set negative fluxes to zero'
ind = netflux < 0.0
netflux[ind] = 0.0
return netflux |
def remove_from_model(self, remove_orphans=False):
"""Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
"""
self._model.remove_reactions([self], remove_orphans=remove_orphans) | def function[remove_from_model, parameter[self, remove_orphans]]:
constant[Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
]
call[name[self]._model.remove_reactions, parameter[list[[<ast.Name object at 0x7da1b0002230>]]]] | keyword[def] identifier[remove_from_model] ( identifier[self] , identifier[remove_orphans] = keyword[False] ):
literal[string]
identifier[self] . identifier[_model] . identifier[remove_reactions] ([ identifier[self] ], identifier[remove_orphans] = identifier[remove_orphans] ) | def remove_from_model(self, remove_orphans=False):
"""Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
"""
self._model.remove_reactions([self], remove_orphans=remove_orphans) |
def proximal_step(self, gradf=None):
"""Compute proximal update (gradient descent + constraint).
Variables are mapped back and forth between input and
frequency domains.
"""
if gradf is None:
gradf = self.eval_grad()
self.Vf[:] = self.Yf - (1. / self.L) * gradf
V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)
self.X[:] = self.eval_proxop(V)
self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
return gradf | def function[proximal_step, parameter[self, gradf]]:
constant[Compute proximal update (gradient descent + constraint).
Variables are mapped back and forth between input and
frequency domains.
]
if compare[name[gradf] is constant[None]] begin[:]
variable[gradf] assign[=] call[name[self].eval_grad, parameter[]]
call[name[self].Vf][<ast.Slice object at 0x7da1b07f81f0>] assign[=] binary_operation[name[self].Yf - binary_operation[binary_operation[constant[1.0] / name[self].L] * name[gradf]]]
variable[V] assign[=] call[name[sl].irfftn, parameter[name[self].Vf, name[self].cri.Nv, name[self].cri.axisN]]
call[name[self].X][<ast.Slice object at 0x7da1b07f9de0>] assign[=] call[name[self].eval_proxop, parameter[name[V]]]
name[self].Xf assign[=] call[name[sl].rfftn, parameter[name[self].X, constant[None], name[self].cri.axisN]]
return[name[gradf]] | keyword[def] identifier[proximal_step] ( identifier[self] , identifier[gradf] = keyword[None] ):
literal[string]
keyword[if] identifier[gradf] keyword[is] keyword[None] :
identifier[gradf] = identifier[self] . identifier[eval_grad] ()
identifier[self] . identifier[Vf] [:]= identifier[self] . identifier[Yf] -( literal[int] / identifier[self] . identifier[L] )* identifier[gradf]
identifier[V] = identifier[sl] . identifier[irfftn] ( identifier[self] . identifier[Vf] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] )
identifier[self] . identifier[X] [:]= identifier[self] . identifier[eval_proxop] ( identifier[V] )
identifier[self] . identifier[Xf] = identifier[sl] . identifier[rfftn] ( identifier[self] . identifier[X] , keyword[None] , identifier[self] . identifier[cri] . identifier[axisN] )
keyword[return] identifier[gradf] | def proximal_step(self, gradf=None):
"""Compute proximal update (gradient descent + constraint).
Variables are mapped back and forth between input and
frequency domains.
"""
if gradf is None:
gradf = self.eval_grad() # depends on [control=['if'], data=['gradf']]
self.Vf[:] = self.Yf - 1.0 / self.L * gradf
V = sl.irfftn(self.Vf, self.cri.Nv, self.cri.axisN)
self.X[:] = self.eval_proxop(V)
self.Xf = sl.rfftn(self.X, None, self.cri.axisN)
return gradf |
def run_command(self):
"""Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster."""
topics = get_topic_partition_metadata(self.cluster_config.broker_list)
topics_with_wrong_rf = _find_topics_with_wrong_rp(
topics,
self.zk,
self.args.default_min_isr,
)
errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL
out = _prepare_output(topics_with_wrong_rf, self.args.verbose)
return errcode, out | def function[run_command, parameter[self]]:
constant[Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster.]
variable[topics] assign[=] call[name[get_topic_partition_metadata], parameter[name[self].cluster_config.broker_list]]
variable[topics_with_wrong_rf] assign[=] call[name[_find_topics_with_wrong_rp], parameter[name[topics], name[self].zk, name[self].args.default_min_isr]]
variable[errcode] assign[=] <ast.IfExp object at 0x7da1b07b1420>
variable[out] assign[=] call[name[_prepare_output], parameter[name[topics_with_wrong_rf], name[self].args.verbose]]
return[tuple[[<ast.Name object at 0x7da1b07b2860>, <ast.Name object at 0x7da1b07b2ec0>]]] | keyword[def] identifier[run_command] ( identifier[self] ):
literal[string]
identifier[topics] = identifier[get_topic_partition_metadata] ( identifier[self] . identifier[cluster_config] . identifier[broker_list] )
identifier[topics_with_wrong_rf] = identifier[_find_topics_with_wrong_rp] (
identifier[topics] ,
identifier[self] . identifier[zk] ,
identifier[self] . identifier[args] . identifier[default_min_isr] ,
)
identifier[errcode] = identifier[status_code] . identifier[OK] keyword[if] keyword[not] identifier[topics_with_wrong_rf] keyword[else] identifier[status_code] . identifier[CRITICAL]
identifier[out] = identifier[_prepare_output] ( identifier[topics_with_wrong_rf] , identifier[self] . identifier[args] . identifier[verbose] )
keyword[return] identifier[errcode] , identifier[out] | def run_command(self):
"""Replication factor command, checks replication factor settings and compare it with
min.isr in the cluster."""
topics = get_topic_partition_metadata(self.cluster_config.broker_list)
topics_with_wrong_rf = _find_topics_with_wrong_rp(topics, self.zk, self.args.default_min_isr)
errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL
out = _prepare_output(topics_with_wrong_rf, self.args.verbose)
return (errcode, out) |
def check_loops_in_grpah(self, current=None, visited=[]):
'''
:param current: current node to check if visited
:param visited: list of visited fields
:raise: KittyException if loop found
'''
if current in visited:
path = ' -> '.join(v.get_name() for v in (visited + [current]))
raise KittyException('loop detected in model: %s' % path)
current = current if current else self._root
for conn in self._graph[current.hash()]:
self.check_loops_in_grpah(conn.dst, visited + [conn.src]) | def function[check_loops_in_grpah, parameter[self, current, visited]]:
constant[
:param current: current node to check if visited
:param visited: list of visited fields
:raise: KittyException if loop found
]
if compare[name[current] in name[visited]] begin[:]
variable[path] assign[=] call[constant[ -> ].join, parameter[<ast.GeneratorExp object at 0x7da207f03340>]]
<ast.Raise object at 0x7da207f03460>
variable[current] assign[=] <ast.IfExp object at 0x7da207f017e0>
for taget[name[conn]] in starred[call[name[self]._graph][call[name[current].hash, parameter[]]]] begin[:]
call[name[self].check_loops_in_grpah, parameter[name[conn].dst, binary_operation[name[visited] + list[[<ast.Attribute object at 0x7da207f00820>]]]]] | keyword[def] identifier[check_loops_in_grpah] ( identifier[self] , identifier[current] = keyword[None] , identifier[visited] =[]):
literal[string]
keyword[if] identifier[current] keyword[in] identifier[visited] :
identifier[path] = literal[string] . identifier[join] ( identifier[v] . identifier[get_name] () keyword[for] identifier[v] keyword[in] ( identifier[visited] +[ identifier[current] ]))
keyword[raise] identifier[KittyException] ( literal[string] % identifier[path] )
identifier[current] = identifier[current] keyword[if] identifier[current] keyword[else] identifier[self] . identifier[_root]
keyword[for] identifier[conn] keyword[in] identifier[self] . identifier[_graph] [ identifier[current] . identifier[hash] ()]:
identifier[self] . identifier[check_loops_in_grpah] ( identifier[conn] . identifier[dst] , identifier[visited] +[ identifier[conn] . identifier[src] ]) | def check_loops_in_grpah(self, current=None, visited=[]):
"""
:param current: current node to check if visited
:param visited: list of visited fields
:raise: KittyException if loop found
"""
if current in visited:
path = ' -> '.join((v.get_name() for v in visited + [current]))
raise KittyException('loop detected in model: %s' % path) # depends on [control=['if'], data=['current', 'visited']]
current = current if current else self._root
for conn in self._graph[current.hash()]:
self.check_loops_in_grpah(conn.dst, visited + [conn.src]) # depends on [control=['for'], data=['conn']] |
def init_db_conn(connection_name, HOSTS=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) | def function[init_db_conn, parameter[connection_name, HOSTS]]:
constant[
Initialize a redis connection by each connection string
defined in the configuration file
]
variable[el] assign[=] call[name[elasticsearch].Elasticsearch, parameter[]]
call[name[el_pool].connections][name[connection_name]] assign[=] call[name[ElasticSearchClient], parameter[name[el]]] | keyword[def] identifier[init_db_conn] ( identifier[connection_name] , identifier[HOSTS] = keyword[None] ):
literal[string]
identifier[el] = identifier[elasticsearch] . identifier[Elasticsearch] ( identifier[hosts] = identifier[HOSTS] )
identifier[el_pool] . identifier[connections] [ identifier[connection_name] ]= identifier[ElasticSearchClient] ( identifier[el] ) | def init_db_conn(connection_name, HOSTS=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) |
def show_as(**mappings):
"""
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
g.show_request_fields = mappings
g.show_response_fields = mappings
return func(*args, **kwargs)
return wrapper
return decorator | def function[show_as, parameter[]]:
constant[
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
]
def function[decorator, parameter[func]]:
def function[wrapper, parameter[]]:
name[g].show_request_fields assign[=] name[mappings]
name[g].show_response_fields assign[=] name[mappings]
return[call[name[func], parameter[<ast.Starred object at 0x7da1b0c64af0>]]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[show_as] (** identifier[mappings] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[g] . identifier[show_request_fields] = identifier[mappings]
identifier[g] . identifier[show_response_fields] = identifier[mappings]
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def show_as(**mappings):
"""
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
g.show_request_fields = mappings
g.show_response_fields = mappings
return func(*args, **kwargs)
return wrapper
return decorator |
async def _handle_home(self, request: Request) -> Response:
"""Home page request handler."""
if self.description:
title = f'{self.name} - {self.description}'
else:
title = self.name
text = dedent(
f'''<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
</head>
<body>
<h1>{title}</h1>
<p>
Metric are exported at the
<a href="/metrics">/metrics</a> endpoint.
</p>
</body>
</html>
''')
return Response(content_type='text/html', text=text) | <ast.AsyncFunctionDef object at 0x7da1b034bb50> | keyword[async] keyword[def] identifier[_handle_home] ( identifier[self] , identifier[request] : identifier[Request] )-> identifier[Response] :
literal[string]
keyword[if] identifier[self] . identifier[description] :
identifier[title] = literal[string]
keyword[else] :
identifier[title] = identifier[self] . identifier[name]
identifier[text] = identifier[dedent] (
literal[string] )
keyword[return] identifier[Response] ( identifier[content_type] = literal[string] , identifier[text] = identifier[text] ) | async def _handle_home(self, request: Request) -> Response:
"""Home page request handler."""
if self.description:
title = f'{self.name} - {self.description}' # depends on [control=['if'], data=[]]
else:
title = self.name
text = dedent(f'<!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n </head>\n <body>\n <h1>{title}</h1>\n <p>\n Metric are exported at the\n <a href="/metrics">/metrics</a> endpoint.\n </p>\n </body>\n </html>\n ')
return Response(content_type='text/html', text=text) |
def get_value_with_source(self, layer=None):
"""Returns a tuple of the value's source and the value at the specified
layer. If no layer is specified then the outer layer is used.
Parameters
----------
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
Raises
------
KeyError
If the value is not set for the specified layer
"""
if layer:
return self._values[layer]
for layer in reversed(self._layers):
if layer in self._values:
return self._values[layer]
raise KeyError(layer) | def function[get_value_with_source, parameter[self, layer]]:
constant[Returns a tuple of the value's source and the value at the specified
layer. If no layer is specified then the outer layer is used.
Parameters
----------
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
Raises
------
KeyError
If the value is not set for the specified layer
]
if name[layer] begin[:]
return[call[name[self]._values][name[layer]]]
for taget[name[layer]] in starred[call[name[reversed], parameter[name[self]._layers]]] begin[:]
if compare[name[layer] in name[self]._values] begin[:]
return[call[name[self]._values][name[layer]]]
<ast.Raise object at 0x7da20c6aa290> | keyword[def] identifier[get_value_with_source] ( identifier[self] , identifier[layer] = keyword[None] ):
literal[string]
keyword[if] identifier[layer] :
keyword[return] identifier[self] . identifier[_values] [ identifier[layer] ]
keyword[for] identifier[layer] keyword[in] identifier[reversed] ( identifier[self] . identifier[_layers] ):
keyword[if] identifier[layer] keyword[in] identifier[self] . identifier[_values] :
keyword[return] identifier[self] . identifier[_values] [ identifier[layer] ]
keyword[raise] identifier[KeyError] ( identifier[layer] ) | def get_value_with_source(self, layer=None):
"""Returns a tuple of the value's source and the value at the specified
layer. If no layer is specified then the outer layer is used.
Parameters
----------
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
Raises
------
KeyError
If the value is not set for the specified layer
"""
if layer:
return self._values[layer] # depends on [control=['if'], data=[]]
for layer in reversed(self._layers):
if layer in self._values:
return self._values[layer] # depends on [control=['if'], data=['layer']] # depends on [control=['for'], data=['layer']]
raise KeyError(layer) |
def _handle_parameter(self, default):
"""Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
"""
key = None
showkey = False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamEquals):
key = self._pop()
showkey = True
self._push()
elif isinstance(token, (tokens.TemplateParamSeparator,
tokens.TemplateClose)):
self._tokens.append(token)
value = self._pop()
if key is None:
key = Wikicode(SmartList([Text(str(default))]))
return Parameter(key, value, showkey)
else:
self._write(self._handle_token(token))
raise ParserError("_handle_parameter() missed a close token") | def function[_handle_parameter, parameter[self, default]]:
constant[Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
]
variable[key] assign[=] constant[None]
variable[showkey] assign[=] constant[False]
call[name[self]._push, parameter[]]
while name[self]._tokens begin[:]
variable[token] assign[=] call[name[self]._tokens.pop, parameter[]]
if call[name[isinstance], parameter[name[token], name[tokens].TemplateParamEquals]] begin[:]
variable[key] assign[=] call[name[self]._pop, parameter[]]
variable[showkey] assign[=] constant[True]
call[name[self]._push, parameter[]]
<ast.Raise object at 0x7da20c794490> | keyword[def] identifier[_handle_parameter] ( identifier[self] , identifier[default] ):
literal[string]
identifier[key] = keyword[None]
identifier[showkey] = keyword[False]
identifier[self] . identifier[_push] ()
keyword[while] identifier[self] . identifier[_tokens] :
identifier[token] = identifier[self] . identifier[_tokens] . identifier[pop] ()
keyword[if] identifier[isinstance] ( identifier[token] , identifier[tokens] . identifier[TemplateParamEquals] ):
identifier[key] = identifier[self] . identifier[_pop] ()
identifier[showkey] = keyword[True]
identifier[self] . identifier[_push] ()
keyword[elif] identifier[isinstance] ( identifier[token] ,( identifier[tokens] . identifier[TemplateParamSeparator] ,
identifier[tokens] . identifier[TemplateClose] )):
identifier[self] . identifier[_tokens] . identifier[append] ( identifier[token] )
identifier[value] = identifier[self] . identifier[_pop] ()
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[key] = identifier[Wikicode] ( identifier[SmartList] ([ identifier[Text] ( identifier[str] ( identifier[default] ))]))
keyword[return] identifier[Parameter] ( identifier[key] , identifier[value] , identifier[showkey] )
keyword[else] :
identifier[self] . identifier[_write] ( identifier[self] . identifier[_handle_token] ( identifier[token] ))
keyword[raise] identifier[ParserError] ( literal[string] ) | def _handle_parameter(self, default):
"""Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
"""
key = None
showkey = False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamEquals):
key = self._pop()
showkey = True
self._push() # depends on [control=['if'], data=[]]
elif isinstance(token, (tokens.TemplateParamSeparator, tokens.TemplateClose)):
self._tokens.append(token)
value = self._pop()
if key is None:
key = Wikicode(SmartList([Text(str(default))])) # depends on [control=['if'], data=['key']]
return Parameter(key, value, showkey) # depends on [control=['if'], data=[]]
else:
self._write(self._handle_token(token)) # depends on [control=['while'], data=[]]
raise ParserError('_handle_parameter() missed a close token') |
def intersection(self, other):
"""
Create a new DateRange representing the maximal range enclosed by this range and other
"""
startopen = other.startopen if self.start is None \
else self.startopen if other.start is None \
else other.startopen if self.start < other.start \
else self.startopen if self.start > other.start \
else (self.startopen or other.startopen)
endopen = other.endopen if self.end is None \
else self.endopen if other.end is None \
else other.endopen if self.end > other.end \
else self.endopen if self.end < other.end \
else (self.endopen or other.endopen)
new_start = self.start if other.start is None \
else other.start if self.start is None \
else max(self.start, other.start)
new_end = self.end if other.end is None \
else other.end if self.end is None \
else min(self.end, other.end)
interval = INTERVAL_LOOKUP[(startopen, endopen)]
return DateRange(new_start, new_end, interval) | def function[intersection, parameter[self, other]]:
constant[
Create a new DateRange representing the maximal range enclosed by this range and other
]
variable[startopen] assign[=] <ast.IfExp object at 0x7da20c76ca00>
variable[endopen] assign[=] <ast.IfExp object at 0x7da1b2346770>
variable[new_start] assign[=] <ast.IfExp object at 0x7da1b23452d0>
variable[new_end] assign[=] <ast.IfExp object at 0x7da1b2344340>
variable[interval] assign[=] call[name[INTERVAL_LOOKUP]][tuple[[<ast.Name object at 0x7da1b2344ca0>, <ast.Name object at 0x7da1b2346e00>]]]
return[call[name[DateRange], parameter[name[new_start], name[new_end], name[interval]]]] | keyword[def] identifier[intersection] ( identifier[self] , identifier[other] ):
literal[string]
identifier[startopen] = identifier[other] . identifier[startopen] keyword[if] identifier[self] . identifier[start] keyword[is] keyword[None] keyword[else] identifier[self] . identifier[startopen] keyword[if] identifier[other] . identifier[start] keyword[is] keyword[None] keyword[else] identifier[other] . identifier[startopen] keyword[if] identifier[self] . identifier[start] < identifier[other] . identifier[start] keyword[else] identifier[self] . identifier[startopen] keyword[if] identifier[self] . identifier[start] > identifier[other] . identifier[start] keyword[else] ( identifier[self] . identifier[startopen] keyword[or] identifier[other] . identifier[startopen] )
identifier[endopen] = identifier[other] . identifier[endopen] keyword[if] identifier[self] . identifier[end] keyword[is] keyword[None] keyword[else] identifier[self] . identifier[endopen] keyword[if] identifier[other] . identifier[end] keyword[is] keyword[None] keyword[else] identifier[other] . identifier[endopen] keyword[if] identifier[self] . identifier[end] > identifier[other] . identifier[end] keyword[else] identifier[self] . identifier[endopen] keyword[if] identifier[self] . identifier[end] < identifier[other] . identifier[end] keyword[else] ( identifier[self] . identifier[endopen] keyword[or] identifier[other] . identifier[endopen] )
identifier[new_start] = identifier[self] . identifier[start] keyword[if] identifier[other] . identifier[start] keyword[is] keyword[None] keyword[else] identifier[other] . identifier[start] keyword[if] identifier[self] . identifier[start] keyword[is] keyword[None] keyword[else] identifier[max] ( identifier[self] . identifier[start] , identifier[other] . identifier[start] )
identifier[new_end] = identifier[self] . identifier[end] keyword[if] identifier[other] . identifier[end] keyword[is] keyword[None] keyword[else] identifier[other] . identifier[end] keyword[if] identifier[self] . identifier[end] keyword[is] keyword[None] keyword[else] identifier[min] ( identifier[self] . identifier[end] , identifier[other] . identifier[end] )
identifier[interval] = identifier[INTERVAL_LOOKUP] [( identifier[startopen] , identifier[endopen] )]
keyword[return] identifier[DateRange] ( identifier[new_start] , identifier[new_end] , identifier[interval] ) | def intersection(self, other):
"""
Create a new DateRange representing the maximal range enclosed by this range and other
"""
startopen = other.startopen if self.start is None else self.startopen if other.start is None else other.startopen if self.start < other.start else self.startopen if self.start > other.start else self.startopen or other.startopen
endopen = other.endopen if self.end is None else self.endopen if other.end is None else other.endopen if self.end > other.end else self.endopen if self.end < other.end else self.endopen or other.endopen
new_start = self.start if other.start is None else other.start if self.start is None else max(self.start, other.start)
new_end = self.end if other.end is None else other.end if self.end is None else min(self.end, other.end)
interval = INTERVAL_LOOKUP[startopen, endopen]
return DateRange(new_start, new_end, interval) |
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a record. Returns `False` if no matching record is found.
"""
result = False
# TODO: some providers allow content-based updates without supplying an
# ID, and therefore `identifier` is here optional. If we don't receive
# an ID, look it up.
if not identifier and rtype and name:
records = self._list_records(rtype, name)
if len(records) == 1:
identifier = records[0]["id"]
if identifier and content:
with localzone.manage(self.filename, self.origin, autosave=True) as zone:
if zone.update_record(identifier, content): # pylint: disable=no-member
result = True
LOGGER.debug("update_record: %s", result)
return result | def function[_update_record, parameter[self, identifier, rtype, name, content]]:
constant[
Update a record. Returns `False` if no matching record is found.
]
variable[result] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1d35d50> begin[:]
variable[records] assign[=] call[name[self]._list_records, parameter[name[rtype], name[name]]]
if compare[call[name[len], parameter[name[records]]] equal[==] constant[1]] begin[:]
variable[identifier] assign[=] call[call[name[records]][constant[0]]][constant[id]]
if <ast.BoolOp object at 0x7da1b1d35f30> begin[:]
with call[name[localzone].manage, parameter[name[self].filename, name[self].origin]] begin[:]
if call[name[zone].update_record, parameter[name[identifier], name[content]]] begin[:]
variable[result] assign[=] constant[True]
call[name[LOGGER].debug, parameter[constant[update_record: %s], name[result]]]
return[name[result]] | keyword[def] identifier[_update_record] ( identifier[self] , identifier[identifier] , identifier[rtype] = keyword[None] , identifier[name] = keyword[None] , identifier[content] = keyword[None] ):
literal[string]
identifier[result] = keyword[False]
keyword[if] keyword[not] identifier[identifier] keyword[and] identifier[rtype] keyword[and] identifier[name] :
identifier[records] = identifier[self] . identifier[_list_records] ( identifier[rtype] , identifier[name] )
keyword[if] identifier[len] ( identifier[records] )== literal[int] :
identifier[identifier] = identifier[records] [ literal[int] ][ literal[string] ]
keyword[if] identifier[identifier] keyword[and] identifier[content] :
keyword[with] identifier[localzone] . identifier[manage] ( identifier[self] . identifier[filename] , identifier[self] . identifier[origin] , identifier[autosave] = keyword[True] ) keyword[as] identifier[zone] :
keyword[if] identifier[zone] . identifier[update_record] ( identifier[identifier] , identifier[content] ):
identifier[result] = keyword[True]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[result] )
keyword[return] identifier[result] | def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a record. Returns `False` if no matching record is found.
"""
result = False
# TODO: some providers allow content-based updates without supplying an
# ID, and therefore `identifier` is here optional. If we don't receive
# an ID, look it up.
if not identifier and rtype and name:
records = self._list_records(rtype, name)
if len(records) == 1:
identifier = records[0]['id'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if identifier and content:
with localzone.manage(self.filename, self.origin, autosave=True) as zone:
if zone.update_record(identifier, content): # pylint: disable=no-member
result = True # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['zone']] # depends on [control=['if'], data=[]]
LOGGER.debug('update_record: %s', result)
return result |
def hash_sha256(buf):
"""AuthenticationHelper.hash"""
a = hashlib.sha256(buf).hexdigest()
return (64 - len(a)) * '0' + a | def function[hash_sha256, parameter[buf]]:
constant[AuthenticationHelper.hash]
variable[a] assign[=] call[call[name[hashlib].sha256, parameter[name[buf]]].hexdigest, parameter[]]
return[binary_operation[binary_operation[binary_operation[constant[64] - call[name[len], parameter[name[a]]]] * constant[0]] + name[a]]] | keyword[def] identifier[hash_sha256] ( identifier[buf] ):
literal[string]
identifier[a] = identifier[hashlib] . identifier[sha256] ( identifier[buf] ). identifier[hexdigest] ()
keyword[return] ( literal[int] - identifier[len] ( identifier[a] ))* literal[string] + identifier[a] | def hash_sha256(buf):
"""AuthenticationHelper.hash"""
a = hashlib.sha256(buf).hexdigest()
return (64 - len(a)) * '0' + a |
def percent_point(self, U):
"""Given a cumulated distribution value, returns a value in original space.
Arguments:
U: `np.ndarray` of shape (n, 1) and values in [0,1]
Returns:
`np.ndarray`: Estimated values in original space.
"""
self.check_fit()
return norm.ppf(U, loc=self.mean, scale=self.std) | def function[percent_point, parameter[self, U]]:
constant[Given a cumulated distribution value, returns a value in original space.
Arguments:
U: `np.ndarray` of shape (n, 1) and values in [0,1]
Returns:
`np.ndarray`: Estimated values in original space.
]
call[name[self].check_fit, parameter[]]
return[call[name[norm].ppf, parameter[name[U]]]] | keyword[def] identifier[percent_point] ( identifier[self] , identifier[U] ):
literal[string]
identifier[self] . identifier[check_fit] ()
keyword[return] identifier[norm] . identifier[ppf] ( identifier[U] , identifier[loc] = identifier[self] . identifier[mean] , identifier[scale] = identifier[self] . identifier[std] ) | def percent_point(self, U):
"""Given a cumulated distribution value, returns a value in original space.
Arguments:
U: `np.ndarray` of shape (n, 1) and values in [0,1]
Returns:
`np.ndarray`: Estimated values in original space.
"""
self.check_fit()
return norm.ppf(U, loc=self.mean, scale=self.std) |
def get_info_from_service(service, zconf):
""" Resolve service_info from service. """
service_info = None
try:
service_info = zconf.get_service_info('_googlecast._tcp.local.',
service)
if service_info:
_LOGGER.debug(
"get_info_from_service resolved service %s to service_info %s",
service, service_info)
except IOError:
pass
return service_info | def function[get_info_from_service, parameter[service, zconf]]:
constant[ Resolve service_info from service. ]
variable[service_info] assign[=] constant[None]
<ast.Try object at 0x7da18bc71f90>
return[name[service_info]] | keyword[def] identifier[get_info_from_service] ( identifier[service] , identifier[zconf] ):
literal[string]
identifier[service_info] = keyword[None]
keyword[try] :
identifier[service_info] = identifier[zconf] . identifier[get_service_info] ( literal[string] ,
identifier[service] )
keyword[if] identifier[service_info] :
identifier[_LOGGER] . identifier[debug] (
literal[string] ,
identifier[service] , identifier[service_info] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[return] identifier[service_info] | def get_info_from_service(service, zconf):
""" Resolve service_info from service. """
service_info = None
try:
service_info = zconf.get_service_info('_googlecast._tcp.local.', service)
if service_info:
_LOGGER.debug('get_info_from_service resolved service %s to service_info %s', service, service_info) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
return service_info |
def mod(cmd, params):
"""
Mod management command
rqalpha mod list \n
rqalpha mod install xxx \n
rqalpha mod uninstall xxx \n
rqalpha mod enable xxx \n
rqalpha mod disable xxx \n
"""
def list(params):
"""
List all mod configuration
"""
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for mod_name, mod in six.iteritems(mod_config['mod']):
table.append([
mod_name,
("enabled" if mod['enabled'] else "disabled")
])
headers = [
"name",
"status"
]
six.print_(tabulate(table, headers=headers, tablefmt="psql"))
six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods")
def install(params):
"""
Install third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.install import InstallCommand
except ImportError:
from pip import main as pip_main
from pip.commands.install import InstallCommand
params = [param for param in params]
options, mod_list = InstallCommand().parse_args(params)
mod_list = [mod_name for mod_name in mod_list if mod_name != "."]
params = ["install"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Install Mod
installed_result = pip_main(params)
# Export config
from rqalpha.utils.config import load_yaml, user_mod_conf_path
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
if installed_result == 0:
# 如果为0,则说明安装成功
if len(mod_list) == 0:
"""
主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件:
1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下
2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装)
3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下
* 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy`
* 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy`
"""
mod_name = _detect_package_name_from_dir(params)
mod_name = mod_name.replace("-", "_").replace("rqalpha_mod_", "")
mod_list.append(mod_name)
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
if "==" in mod_name:
mod_name = mod_name.split('==')[0]
user_conf['mod'][mod_name] = {}
user_conf['mod'][mod_name]['enabled'] = False
dump_config(user_mod_conf_path(), user_conf)
return installed_result
def uninstall(params):
"""
Uninstall third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.uninstall import UninstallCommand
except ImportError:
# be compatible with pip < 10.0
from pip import main as pip_main
from pip.commands.uninstall import UninstallCommand
params = [param for param in params]
options, mod_list = UninstallCommand().parse_args(params)
params = ["uninstall"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Uninstall Mod
uninstalled_result = pip_main(params)
# Remove Mod Config
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
del user_conf['mod'][mod_name]
dump_config(user_mod_conf_path(), user_conf)
return uninstalled_result
def enable(params):
"""
enable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
# check whether is installed
module_name = "rqalpha_mod_" + mod_name
if module_name.startswith("rqalpha_mod_sys_"):
module_name = "rqalpha.mod." + module_name
try:
import_module(module_name)
except ImportError:
installed_result = install([module_name])
if installed_result != 0:
return
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True}
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
"""
disable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False}
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params) | def function[mod, parameter[cmd, params]]:
constant[
Mod management command
rqalpha mod list
rqalpha mod install xxx
rqalpha mod uninstall xxx
rqalpha mod enable xxx
rqalpha mod disable xxx
]
def function[list, parameter[params]]:
constant[
List all mod configuration
]
from relative_module[tabulate] import module[tabulate]
from relative_module[rqalpha.utils.config] import module[get_mod_conf]
variable[mod_config] assign[=] call[name[get_mod_conf], parameter[]]
variable[table] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2114430>, <ast.Name object at 0x7da1b21140d0>]]] in starred[call[name[six].iteritems, parameter[call[name[mod_config]][constant[mod]]]]] begin[:]
call[name[table].append, parameter[list[[<ast.Name object at 0x7da1b2116a40>, <ast.IfExp object at 0x7da1b21168f0>]]]]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da1b2116a10>, <ast.Constant object at 0x7da1b2116c50>]]
call[name[six].print_, parameter[call[name[tabulate], parameter[name[table]]]]]
call[name[six].print_, parameter[constant[You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods]]]
def function[install, parameter[params]]:
constant[
Install third-party Mod
]
<ast.Try object at 0x7da1b2116f50>
variable[params] assign[=] <ast.ListComp object at 0x7da1b2114ca0>
<ast.Tuple object at 0x7da1b2115360> assign[=] call[call[name[InstallCommand], parameter[]].parse_args, parameter[name[params]]]
variable[mod_list] assign[=] <ast.ListComp object at 0x7da1b2114c10>
variable[params] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b21173d0>]] + name[params]]
for taget[name[mod_name]] in starred[name[mod_list]] begin[:]
variable[mod_name_index] assign[=] call[name[params].index, parameter[name[mod_name]]]
if call[name[mod_name].startswith, parameter[constant[rqalpha_mod_sys_]]] begin[:]
call[name[six].print_, parameter[constant[System Mod can not be installed or uninstalled]]]
return[None]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[lib_name] assign[=] name[mod_name]
call[name[params]][name[mod_name_index]] assign[=] name[lib_name]
variable[installed_result] assign[=] call[name[pip_main], parameter[name[params]]]
from relative_module[rqalpha.utils.config] import module[load_yaml], module[user_mod_conf_path]
variable[user_conf] assign[=] <ast.IfExp object at 0x7da1b2116ad0>
if compare[name[installed_result] equal[==] constant[0]] begin[:]
if compare[call[name[len], parameter[name[mod_list]]] equal[==] constant[0]] begin[:]
constant[
主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件:
1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下
2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装)
3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下
* 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy`
* 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy`
]
variable[mod_name] assign[=] call[name[_detect_package_name_from_dir], parameter[name[params]]]
variable[mod_name] assign[=] call[call[name[mod_name].replace, parameter[constant[-], constant[_]]].replace, parameter[constant[rqalpha_mod_], constant[]]]
call[name[mod_list].append, parameter[name[mod_name]]]
for taget[name[mod_name]] in starred[name[mod_list]] begin[:]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[mod_name] assign[=] call[name[mod_name].replace, parameter[constant[rqalpha_mod_], constant[]]]
if compare[constant[==] in name[mod_name]] begin[:]
variable[mod_name] assign[=] call[call[name[mod_name].split, parameter[constant[==]]]][constant[0]]
call[call[name[user_conf]][constant[mod]]][name[mod_name]] assign[=] dictionary[[], []]
call[call[call[name[user_conf]][constant[mod]]][name[mod_name]]][constant[enabled]] assign[=] constant[False]
call[name[dump_config], parameter[call[name[user_mod_conf_path], parameter[]], name[user_conf]]]
return[name[installed_result]]
def function[uninstall, parameter[params]]:
constant[
Uninstall third-party Mod
]
<ast.Try object at 0x7da1b2114eb0>
variable[params] assign[=] <ast.ListComp object at 0x7da1b21169e0>
<ast.Tuple object at 0x7da1b2114400> assign[=] call[call[name[UninstallCommand], parameter[]].parse_args, parameter[name[params]]]
variable[params] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b2116d40>]] + name[params]]
for taget[name[mod_name]] in starred[name[mod_list]] begin[:]
variable[mod_name_index] assign[=] call[name[params].index, parameter[name[mod_name]]]
if call[name[mod_name].startswith, parameter[constant[rqalpha_mod_sys_]]] begin[:]
call[name[six].print_, parameter[constant[System Mod can not be installed or uninstalled]]]
return[None]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[lib_name] assign[=] name[mod_name]
call[name[params]][name[mod_name_index]] assign[=] name[lib_name]
variable[uninstalled_result] assign[=] call[name[pip_main], parameter[name[params]]]
from relative_module[rqalpha.utils.config] import module[user_mod_conf_path], module[load_yaml]
variable[user_conf] assign[=] <ast.IfExp object at 0x7da1b212e6e0>
for taget[name[mod_name]] in starred[name[mod_list]] begin[:]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[mod_name] assign[=] call[name[mod_name].replace, parameter[constant[rqalpha_mod_], constant[]]]
<ast.Delete object at 0x7da1b212dff0>
call[name[dump_config], parameter[call[name[user_mod_conf_path], parameter[]], name[user_conf]]]
return[name[uninstalled_result]]
def function[enable, parameter[params]]:
constant[
enable mod
]
variable[mod_name] assign[=] call[name[params]][constant[0]]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[mod_name] assign[=] call[name[mod_name].replace, parameter[constant[rqalpha_mod_], constant[]]]
variable[module_name] assign[=] binary_operation[constant[rqalpha_mod_] + name[mod_name]]
if call[name[module_name].startswith, parameter[constant[rqalpha_mod_sys_]]] begin[:]
variable[module_name] assign[=] binary_operation[constant[rqalpha.mod.] + name[module_name]]
<ast.Try object at 0x7da1b212f280>
from relative_module[rqalpha.utils.config] import module[user_mod_conf_path], module[load_yaml]
variable[user_conf] assign[=] <ast.IfExp object at 0x7da1b212fa00>
<ast.Try object at 0x7da1b212fcd0>
call[name[dump_config], parameter[call[name[user_mod_conf_path], parameter[]], name[user_conf]]]
def function[disable, parameter[params]]:
constant[
disable mod
]
variable[mod_name] assign[=] call[name[params]][constant[0]]
if compare[constant[rqalpha_mod_] in name[mod_name]] begin[:]
variable[mod_name] assign[=] call[name[mod_name].replace, parameter[constant[rqalpha_mod_], constant[]]]
from relative_module[rqalpha.utils.config] import module[user_mod_conf_path], module[load_yaml]
variable[user_conf] assign[=] <ast.IfExp object at 0x7da1b212f130>
<ast.Try object at 0x7da1b212de40>
call[name[dump_config], parameter[call[name[user_mod_conf_path], parameter[]], name[user_conf]]]
call[call[call[name[locals], parameter[]]][name[cmd]], parameter[name[params]]] | keyword[def] identifier[mod] ( identifier[cmd] , identifier[params] ):
literal[string]
keyword[def] identifier[list] ( identifier[params] ):
literal[string]
keyword[from] identifier[tabulate] keyword[import] identifier[tabulate]
keyword[from] identifier[rqalpha] . identifier[utils] . identifier[config] keyword[import] identifier[get_mod_conf]
identifier[mod_config] = identifier[get_mod_conf] ()
identifier[table] =[]
keyword[for] identifier[mod_name] , identifier[mod] keyword[in] identifier[six] . identifier[iteritems] ( identifier[mod_config] [ literal[string] ]):
identifier[table] . identifier[append] ([
identifier[mod_name] ,
( literal[string] keyword[if] identifier[mod] [ literal[string] ] keyword[else] literal[string] )
])
identifier[headers] =[
literal[string] ,
literal[string]
]
identifier[six] . identifier[print_] ( identifier[tabulate] ( identifier[table] , identifier[headers] = identifier[headers] , identifier[tablefmt] = literal[string] ))
identifier[six] . identifier[print_] ( literal[string] )
keyword[def] identifier[install] ( identifier[params] ):
literal[string]
keyword[try] :
keyword[from] identifier[pip] . identifier[_internal] keyword[import] identifier[main] keyword[as] identifier[pip_main]
keyword[from] identifier[pip] . identifier[_internal] . identifier[commands] . identifier[install] keyword[import] identifier[InstallCommand]
keyword[except] identifier[ImportError] :
keyword[from] identifier[pip] keyword[import] identifier[main] keyword[as] identifier[pip_main]
keyword[from] identifier[pip] . identifier[commands] . identifier[install] keyword[import] identifier[InstallCommand]
identifier[params] =[ identifier[param] keyword[for] identifier[param] keyword[in] identifier[params] ]
identifier[options] , identifier[mod_list] = identifier[InstallCommand] (). identifier[parse_args] ( identifier[params] )
identifier[mod_list] =[ identifier[mod_name] keyword[for] identifier[mod_name] keyword[in] identifier[mod_list] keyword[if] identifier[mod_name] != literal[string] ]
identifier[params] =[ literal[string] ]+ identifier[params]
keyword[for] identifier[mod_name] keyword[in] identifier[mod_list] :
identifier[mod_name_index] = identifier[params] . identifier[index] ( identifier[mod_name] )
keyword[if] identifier[mod_name] . identifier[startswith] ( literal[string] ):
identifier[six] . identifier[print_] ( literal[string] )
keyword[return]
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[lib_name] = identifier[mod_name]
keyword[else] :
identifier[lib_name] = literal[string] + identifier[mod_name]
identifier[params] [ identifier[mod_name_index] ]= identifier[lib_name]
identifier[installed_result] = identifier[pip_main] ( identifier[params] )
keyword[from] identifier[rqalpha] . identifier[utils] . identifier[config] keyword[import] identifier[load_yaml] , identifier[user_mod_conf_path]
identifier[user_conf] = identifier[load_yaml] ( identifier[user_mod_conf_path] ()) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[user_mod_conf_path] ()) keyword[else] { literal[string] :{}}
keyword[if] identifier[installed_result] == literal[int] :
keyword[if] identifier[len] ( identifier[mod_list] )== literal[int] :
literal[string]
identifier[mod_name] = identifier[_detect_package_name_from_dir] ( identifier[params] )
identifier[mod_name] = identifier[mod_name] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[mod_list] . identifier[append] ( identifier[mod_name] )
keyword[for] identifier[mod_name] keyword[in] identifier[mod_list] :
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[mod_name] = identifier[mod_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[mod_name] = identifier[mod_name] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ]={}
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ][ literal[string] ]= keyword[False]
identifier[dump_config] ( identifier[user_mod_conf_path] (), identifier[user_conf] )
keyword[return] identifier[installed_result]
keyword[def] identifier[uninstall] ( identifier[params] ):
literal[string]
keyword[try] :
keyword[from] identifier[pip] . identifier[_internal] keyword[import] identifier[main] keyword[as] identifier[pip_main]
keyword[from] identifier[pip] . identifier[_internal] . identifier[commands] . identifier[uninstall] keyword[import] identifier[UninstallCommand]
keyword[except] identifier[ImportError] :
keyword[from] identifier[pip] keyword[import] identifier[main] keyword[as] identifier[pip_main]
keyword[from] identifier[pip] . identifier[commands] . identifier[uninstall] keyword[import] identifier[UninstallCommand]
identifier[params] =[ identifier[param] keyword[for] identifier[param] keyword[in] identifier[params] ]
identifier[options] , identifier[mod_list] = identifier[UninstallCommand] (). identifier[parse_args] ( identifier[params] )
identifier[params] =[ literal[string] ]+ identifier[params]
keyword[for] identifier[mod_name] keyword[in] identifier[mod_list] :
identifier[mod_name_index] = identifier[params] . identifier[index] ( identifier[mod_name] )
keyword[if] identifier[mod_name] . identifier[startswith] ( literal[string] ):
identifier[six] . identifier[print_] ( literal[string] )
keyword[return]
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[lib_name] = identifier[mod_name]
keyword[else] :
identifier[lib_name] = literal[string] + identifier[mod_name]
identifier[params] [ identifier[mod_name_index] ]= identifier[lib_name]
identifier[uninstalled_result] = identifier[pip_main] ( identifier[params] )
keyword[from] identifier[rqalpha] . identifier[utils] . identifier[config] keyword[import] identifier[user_mod_conf_path] , identifier[load_yaml]
identifier[user_conf] = identifier[load_yaml] ( identifier[user_mod_conf_path] ()) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[user_mod_conf_path] ()) keyword[else] { literal[string] :{}}
keyword[for] identifier[mod_name] keyword[in] identifier[mod_list] :
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[mod_name] = identifier[mod_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[del] identifier[user_conf] [ literal[string] ][ identifier[mod_name] ]
identifier[dump_config] ( identifier[user_mod_conf_path] (), identifier[user_conf] )
keyword[return] identifier[uninstalled_result]
keyword[def] identifier[enable] ( identifier[params] ):
literal[string]
identifier[mod_name] = identifier[params] [ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[mod_name] = identifier[mod_name] . identifier[replace] ( literal[string] , literal[string] )
identifier[module_name] = literal[string] + identifier[mod_name]
keyword[if] identifier[module_name] . identifier[startswith] ( literal[string] ):
identifier[module_name] = literal[string] + identifier[module_name]
keyword[try] :
identifier[import_module] ( identifier[module_name] )
keyword[except] identifier[ImportError] :
identifier[installed_result] = identifier[install] ([ identifier[module_name] ])
keyword[if] identifier[installed_result] != literal[int] :
keyword[return]
keyword[from] identifier[rqalpha] . identifier[utils] . identifier[config] keyword[import] identifier[user_mod_conf_path] , identifier[load_yaml]
identifier[user_conf] = identifier[load_yaml] ( identifier[user_mod_conf_path] ()) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[user_mod_conf_path] ()) keyword[else] { literal[string] :{}}
keyword[try] :
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ][ literal[string] ]= keyword[True]
keyword[except] identifier[KeyError] :
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ]={ literal[string] : keyword[True] }
identifier[dump_config] ( identifier[user_mod_conf_path] (), identifier[user_conf] )
keyword[def] identifier[disable] ( identifier[params] ):
literal[string]
identifier[mod_name] = identifier[params] [ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[mod_name] :
identifier[mod_name] = identifier[mod_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[from] identifier[rqalpha] . identifier[utils] . identifier[config] keyword[import] identifier[user_mod_conf_path] , identifier[load_yaml]
identifier[user_conf] = identifier[load_yaml] ( identifier[user_mod_conf_path] ()) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[user_mod_conf_path] ()) keyword[else] { literal[string] :{}}
keyword[try] :
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ][ literal[string] ]= keyword[False]
keyword[except] identifier[KeyError] :
identifier[user_conf] [ literal[string] ][ identifier[mod_name] ]={ literal[string] : keyword[False] }
identifier[dump_config] ( identifier[user_mod_conf_path] (), identifier[user_conf] )
identifier[locals] ()[ identifier[cmd] ]( identifier[params] ) | def mod(cmd, params):
"""
Mod management command
rqalpha mod list
rqalpha mod install xxx
rqalpha mod uninstall xxx
rqalpha mod enable xxx
rqalpha mod disable xxx
"""
def list(params):
"""
List all mod configuration
"""
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for (mod_name, mod) in six.iteritems(mod_config['mod']):
table.append([mod_name, 'enabled' if mod['enabled'] else 'disabled']) # depends on [control=['for'], data=[]]
headers = ['name', 'status']
six.print_(tabulate(table, headers=headers, tablefmt='psql'))
six.print_('You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods')
def install(params):
"""
Install third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.install import InstallCommand # depends on [control=['try'], data=[]]
except ImportError:
from pip import main as pip_main
from pip.commands.install import InstallCommand # depends on [control=['except'], data=[]]
params = [param for param in params]
(options, mod_list) = InstallCommand().parse_args(params)
mod_list = [mod_name for mod_name in mod_list if mod_name != '.']
params = ['install'] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith('rqalpha_mod_sys_'):
six.print_('System Mod can not be installed or uninstalled')
return # depends on [control=['if'], data=[]]
if 'rqalpha_mod_' in mod_name:
lib_name = mod_name # depends on [control=['if'], data=['mod_name']]
else:
lib_name = 'rqalpha_mod_' + mod_name
params[mod_name_index] = lib_name # depends on [control=['for'], data=['mod_name']]
# Install Mod
installed_result = pip_main(params)
# Export config
from rqalpha.utils.config import load_yaml, user_mod_conf_path
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
if installed_result == 0:
# 如果为0,则说明安装成功
if len(mod_list) == 0:
'\n 主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件:\n 1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下\n 2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装)\n 3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下\n * 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy`\n * 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy`\n '
mod_name = _detect_package_name_from_dir(params)
mod_name = mod_name.replace('-', '_').replace('rqalpha_mod_', '')
mod_list.append(mod_name) # depends on [control=['if'], data=[]]
for mod_name in mod_list:
if 'rqalpha_mod_' in mod_name:
mod_name = mod_name.replace('rqalpha_mod_', '') # depends on [control=['if'], data=['mod_name']]
if '==' in mod_name:
mod_name = mod_name.split('==')[0] # depends on [control=['if'], data=['mod_name']]
user_conf['mod'][mod_name] = {}
user_conf['mod'][mod_name]['enabled'] = False # depends on [control=['for'], data=['mod_name']]
dump_config(user_mod_conf_path(), user_conf) # depends on [control=['if'], data=[]]
return installed_result
def uninstall(params):
"""
Uninstall third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.uninstall import UninstallCommand # depends on [control=['try'], data=[]]
except ImportError:
# be compatible with pip < 10.0
from pip import main as pip_main
from pip.commands.uninstall import UninstallCommand # depends on [control=['except'], data=[]]
params = [param for param in params]
(options, mod_list) = UninstallCommand().parse_args(params)
params = ['uninstall'] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith('rqalpha_mod_sys_'):
six.print_('System Mod can not be installed or uninstalled')
return # depends on [control=['if'], data=[]]
if 'rqalpha_mod_' in mod_name:
lib_name = mod_name # depends on [control=['if'], data=['mod_name']]
else:
lib_name = 'rqalpha_mod_' + mod_name
params[mod_name_index] = lib_name # depends on [control=['for'], data=['mod_name']]
# Uninstall Mod
uninstalled_result = pip_main(params)
# Remove Mod Config
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
for mod_name in mod_list:
if 'rqalpha_mod_' in mod_name:
mod_name = mod_name.replace('rqalpha_mod_', '') # depends on [control=['if'], data=['mod_name']]
del user_conf['mod'][mod_name] # depends on [control=['for'], data=['mod_name']]
dump_config(user_mod_conf_path(), user_conf)
return uninstalled_result
def enable(params):
"""
enable mod
"""
mod_name = params[0]
if 'rqalpha_mod_' in mod_name:
mod_name = mod_name.replace('rqalpha_mod_', '') # depends on [control=['if'], data=['mod_name']]
# check whether is installed
module_name = 'rqalpha_mod_' + mod_name
if module_name.startswith('rqalpha_mod_sys_'):
module_name = 'rqalpha.mod.' + module_name # depends on [control=['if'], data=[]]
try:
import_module(module_name) # depends on [control=['try'], data=[]]
except ImportError:
installed_result = install([module_name])
if installed_result != 0:
return # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True # depends on [control=['try'], data=[]]
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True} # depends on [control=['except'], data=[]]
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
"""
disable mod
"""
mod_name = params[0]
if 'rqalpha_mod_' in mod_name:
mod_name = mod_name.replace('rqalpha_mod_', '') # depends on [control=['if'], data=['mod_name']]
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False # depends on [control=['try'], data=[]]
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False} # depends on [control=['except'], data=[]]
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params) |
def _is_leap_year(year):
"""Determine if a year is leap year.
Parameters
----------
year : numeric
Returns
-------
isleap : array of bools
"""
isleap = ((np.mod(year, 4) == 0) &
((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0)))
return isleap | def function[_is_leap_year, parameter[year]]:
constant[Determine if a year is leap year.
Parameters
----------
year : numeric
Returns
-------
isleap : array of bools
]
variable[isleap] assign[=] binary_operation[compare[call[name[np].mod, parameter[name[year], constant[4]]] equal[==] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[compare[call[name[np].mod, parameter[name[year], constant[100]]] not_equal[!=] constant[0]] <ast.BitOr object at 0x7da2590d6aa0> compare[call[name[np].mod, parameter[name[year], constant[400]]] equal[==] constant[0]]]]
return[name[isleap]] | keyword[def] identifier[_is_leap_year] ( identifier[year] ):
literal[string]
identifier[isleap] =(( identifier[np] . identifier[mod] ( identifier[year] , literal[int] )== literal[int] )&
(( identifier[np] . identifier[mod] ( identifier[year] , literal[int] )!= literal[int] )|( identifier[np] . identifier[mod] ( identifier[year] , literal[int] )== literal[int] )))
keyword[return] identifier[isleap] | def _is_leap_year(year):
"""Determine if a year is leap year.
Parameters
----------
year : numeric
Returns
-------
isleap : array of bools
"""
isleap = (np.mod(year, 4) == 0) & ((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0))
return isleap |
def send_video(self, *args, **kwargs):
"""See :func:`send_video`"""
return send_video(*args, **self._merge_overrides(**kwargs)).run() | def function[send_video, parameter[self]]:
constant[See :func:`send_video`]
return[call[call[name[send_video], parameter[<ast.Starred object at 0x7da18dc9b820>]].run, parameter[]]] | keyword[def] identifier[send_video] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[send_video] (* identifier[args] ,** identifier[self] . identifier[_merge_overrides] (** identifier[kwargs] )). identifier[run] () | def send_video(self, *args, **kwargs):
"""See :func:`send_video`"""
return send_video(*args, **self._merge_overrides(**kwargs)).run() |
def create_new_metadata(self, rsa_public_key):
# type: (EncryptionMetadata,
# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)
# -> None
"""Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
"""
self._rsa_public_key = rsa_public_key
self._symkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self._signkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES)
self.encryption_agent = EncryptionAgent(
encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM,
protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION,
)
self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE | def function[create_new_metadata, parameter[self, rsa_public_key]]:
constant[Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
]
name[self]._rsa_public_key assign[=] name[rsa_public_key]
name[self]._symkey assign[=] call[name[os].urandom, parameter[name[blobxfer].operations.crypto._AES256_KEYLENGTH_BYTES]]
name[self]._signkey assign[=] call[name[os].urandom, parameter[name[blobxfer].operations.crypto._AES256_KEYLENGTH_BYTES]]
name[self].content_encryption_iv assign[=] call[name[os].urandom, parameter[name[AES256_BLOCKSIZE_BYTES]]]
name[self].encryption_agent assign[=] call[name[EncryptionAgent], parameter[]]
name[self].encryption_mode assign[=] name[EncryptionMetadata]._ENCRYPTION_MODE | keyword[def] identifier[create_new_metadata] ( identifier[self] , identifier[rsa_public_key] ):
literal[string]
identifier[self] . identifier[_rsa_public_key] = identifier[rsa_public_key]
identifier[self] . identifier[_symkey] = identifier[os] . identifier[urandom] (
identifier[blobxfer] . identifier[operations] . identifier[crypto] . identifier[_AES256_KEYLENGTH_BYTES] )
identifier[self] . identifier[_signkey] = identifier[os] . identifier[urandom] (
identifier[blobxfer] . identifier[operations] . identifier[crypto] . identifier[_AES256_KEYLENGTH_BYTES] )
identifier[self] . identifier[content_encryption_iv] = identifier[os] . identifier[urandom] ( identifier[AES256_BLOCKSIZE_BYTES] )
identifier[self] . identifier[encryption_agent] = identifier[EncryptionAgent] (
identifier[encryption_algorithm] = identifier[EncryptionMetadata] . identifier[_ENCRYPTION_ALGORITHM] ,
identifier[protocol] = identifier[EncryptionMetadata] . identifier[_ENCRYPTION_PROTOCOL_VERSION] ,
)
identifier[self] . identifier[encryption_mode] = identifier[EncryptionMetadata] . identifier[_ENCRYPTION_MODE] | def create_new_metadata(self, rsa_public_key):
# type: (EncryptionMetadata,
# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)
# -> None
'Create new metadata entries for encryption (upload)\n :param EncryptionMetadata self: this\n :param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:\n rsa public key\n '
self._rsa_public_key = rsa_public_key
self._symkey = os.urandom(blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self._signkey = os.urandom(blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES)
self.encryption_agent = EncryptionAgent(encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM, protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION)
self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE |
def _set_bind_interfaces(self, v, load=False):
"""
Setter method for bind_interfaces, mapped from YANG variable /overlay_service_policy_state/bind_interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bind_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bind_interfaces() directly.
YANG Description: Overlay Service Policy Binding Interfaces
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bind_interfaces.bind_interfaces, is_container='container', presence=False, yang_name="bind-interfaces", rest_name="bind-interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-bind-intf', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bind_interfaces must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bind_interfaces.bind_interfaces, is_container='container', presence=False, yang_name="bind-interfaces", rest_name="bind-interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-bind-intf', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=False)""",
})
self.__bind_interfaces = t
if hasattr(self, '_set'):
self._set() | def function[_set_bind_interfaces, parameter[self, v, load]]:
constant[
Setter method for bind_interfaces, mapped from YANG variable /overlay_service_policy_state/bind_interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bind_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bind_interfaces() directly.
YANG Description: Overlay Service Policy Binding Interfaces
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bc70130>
name[self].__bind_interfaces assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_bind_interfaces] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[bind_interfaces] . identifier[bind_interfaces] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__bind_interfaces] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_bind_interfaces(self, v, load=False):
"""
Setter method for bind_interfaces, mapped from YANG variable /overlay_service_policy_state/bind_interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bind_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bind_interfaces() directly.
YANG Description: Overlay Service Policy Binding Interfaces
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=bind_interfaces.bind_interfaces, is_container='container', presence=False, yang_name='bind-interfaces', rest_name='bind-interfaces', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-bind-intf', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=False) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'bind_interfaces must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=bind_interfaces.bind_interfaces, is_container=\'container\', presence=False, yang_name="bind-interfaces", rest_name="bind-interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'ssm-overlay-service-policy-bind-intf\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-ssm-operational\', defining_module=\'brocade-ssm-operational\', yang_type=\'container\', is_config=False)'}) # depends on [control=['except'], data=[]]
self.__bind_interfaces = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def parse_exiobase3(path):
""" Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
"""
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh
except AttributeError:
pass
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions(
{'AUS': 'AU',
'AUT': 'AT',
'BEL': 'BE',
'BGR': 'BG',
'BRA': 'BR',
'CAN': 'CA',
'CHE': 'CH',
'CHN': 'CN',
'CYP': 'CY',
'CZE': 'CZ',
'DEU': 'DE',
'DNK': 'DK',
'ESP': 'ES',
'EST': 'EE',
'FIN': 'FI',
'FRA': 'FR',
'GBR': 'GB',
'GRC': 'GR',
'HRV': 'HR',
'HUN': 'HU',
'IDN': 'ID',
'IND': 'IN',
'IRL': 'IE',
'ITA': 'IT',
'JPN': 'JP',
'KOR': 'KR',
'LTU': 'LT',
'LUX': 'LU',
'LVA': 'LV',
'MEX': 'MX',
'MLT': 'MT',
'NLD': 'NL',
'NOR': 'NO',
'POL': 'PL',
'PRT': 'PT',
'ROM': 'RO',
'RUS': 'RU',
'SVK': 'SK',
'SVN': 'SI',
'SWE': 'SE',
'TUR': 'TR',
'TWN': 'TW',
'USA': 'US',
'ZAF': 'ZA',
'WWA': 'WA',
'WWE': 'WE',
'WWF': 'WF',
'WWL': 'WL',
'WWM': 'WM'})
return io | def function[parse_exiobase3, parameter[path]]:
constant[ Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
]
variable[io] assign[=] call[name[load_all], parameter[name[path]]]
<ast.Try object at 0x7da1b0625f90>
call[name[io].rename_regions, parameter[dictionary[[<ast.Constant object at 0x7da1b06267a0>, <ast.Constant object at 0x7da1b06258d0>, <ast.Constant object at 0x7da1b06271c0>, <ast.Constant object at 0x7da1b06272e0>, <ast.Constant object at 0x7da1b0627760>, <ast.Constant object at 0x7da1b0625db0>, <ast.Constant object at 0x7da1b0626650>, <ast.Constant object at 0x7da1b0627a00>, <ast.Constant object at 0x7da1b0627eb0>, <ast.Constant object at 0x7da1b06259c0>, <ast.Constant object at 0x7da1b0626440>, <ast.Constant object at 0x7da1b06257e0>, <ast.Constant object at 0x7da1b0626b90>, <ast.Constant object at 0x7da1b0627c10>, <ast.Constant object at 0x7da1b0626350>, <ast.Constant object at 0x7da1b0625720>, <ast.Constant object at 0x7da1b0626a70>, <ast.Constant object at 0x7da1b0626ad0>, <ast.Constant object at 0x7da1b0627bb0>, <ast.Constant object at 0x7da1b0625960>, <ast.Constant object at 0x7da1b0627d90>, <ast.Constant object at 0x7da1b0625e40>, <ast.Constant object at 0x7da1b06261a0>, <ast.Constant object at 0x7da1b06264d0>, <ast.Constant object at 0x7da1b0625f60>, <ast.Constant object at 0x7da1b0626aa0>, <ast.Constant object at 0x7da1b0627100>, <ast.Constant object at 0x7da1b06276d0>, <ast.Constant object at 0x7da1b06262c0>, <ast.Constant object at 0x7da1b0626260>, <ast.Constant object at 0x7da1b0626320>, <ast.Constant object at 0x7da1b0627220>, <ast.Constant object at 0x7da1b0627520>, <ast.Constant object at 0x7da1b0627160>, <ast.Constant object at 0x7da1b0625780>, <ast.Constant object at 0x7da1b0626620>, <ast.Constant object at 0x7da1b06256c0>, <ast.Constant object at 0x7da1b0627a30>, <ast.Constant object at 0x7da1b0625cc0>, <ast.Constant object at 0x7da1b0627be0>, <ast.Constant object at 0x7da1b0627ee0>, <ast.Constant object at 0x7da1b0627d00>, <ast.Constant object at 0x7da1b0626e90>, <ast.Constant object at 0x7da1b0627640>, <ast.Constant object at 0x7da1b06257b0>, <ast.Constant object at 0x7da1b06268c0>, <ast.Constant object at 0x7da1b06266e0>, <ast.Constant object at 0x7da1b06263b0>, <ast.Constant object at 0x7da1b0626f80>], [<ast.Constant object at 0x7da1b06278e0>, <ast.Constant object at 0x7da1b0626d10>, <ast.Constant object at 0x7da1b0626560>, <ast.Constant object at 0x7da1b0627cd0>, <ast.Constant object at 0x7da1b0627580>, <ast.Constant object at 0x7da1b0627430>, <ast.Constant object at 0x7da1b06261d0>, <ast.Constant object at 0x7da1b0625750>, <ast.Constant object at 0x7da1b06270d0>, <ast.Constant object at 0x7da1b0625ed0>, <ast.Constant object at 0x7da1b0627880>, <ast.Constant object at 0x7da1b0626f20>, <ast.Constant object at 0x7da1b0627ac0>, <ast.Constant object at 0x7da1b0627a60>, <ast.Constant object at 0x7da1b0627400>, <ast.Constant object at 0x7da1b0626da0>, <ast.Constant object at 0x7da1b06267d0>, <ast.Constant object at 0x7da1b0625a80>, <ast.Constant object at 0x7da1b06255d0>, <ast.Constant object at 0x7da1b0625f30>, <ast.Constant object at 0x7da1b0627fd0>, <ast.Constant object at 0x7da1b06263e0>, <ast.Constant object at 0x7da1b0625840>, <ast.Constant object at 0x7da1b0627730>, <ast.Constant object at 0x7da1b0625fc0>, <ast.Constant object at 0x7da1b0627910>, <ast.Constant object at 0x7da1b06262f0>, <ast.Constant object at 0x7da1b0626380>, <ast.Constant object at 0x7da18ede6710>, <ast.Constant object at 0x7da18ede6dd0>, <ast.Constant object at 0x7da18ede7b50>, <ast.Constant object at 0x7da18ede5000>, <ast.Constant object at 0x7da18ede5de0>, <ast.Constant object at 0x7da18ede4c40>, <ast.Constant object at 0x7da18ede6d70>, <ast.Constant object at 0x7da18ede6c50>, <ast.Constant object at 0x7da18ede6200>, <ast.Constant object at 0x7da18ede4d30>, <ast.Constant object at 0x7da18ede4640>, <ast.Constant object at 0x7da18ede62f0>, <ast.Constant object at 0x7da18ede7820>, <ast.Constant object at 0x7da18ede7dc0>, <ast.Constant object at 0x7da18ede59f0>, <ast.Constant object at 0x7da18ede71c0>, <ast.Constant object at 0x7da18ede7610>, <ast.Constant object at 0x7da18ede6650>, <ast.Constant object at 0x7da18ede43a0>, <ast.Constant object at 0x7da18ede7790>, <ast.Constant object at 0x7da18ede6740>]]]]
return[name[io]] | keyword[def] identifier[parse_exiobase3] ( identifier[path] ):
literal[string]
identifier[io] = identifier[load_all] ( identifier[path] )
keyword[try] :
identifier[io] . identifier[satellite] . identifier[FY] = identifier[io] . identifier[satellite] . identifier[F_hh] . identifier[copy] ()
keyword[del] identifier[io] . identifier[satellite] . identifier[F_hh]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[io] . identifier[rename_regions] (
{ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] })
keyword[return] identifier[io] | def parse_exiobase3(path):
""" Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
"""
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions({'AUS': 'AU', 'AUT': 'AT', 'BEL': 'BE', 'BGR': 'BG', 'BRA': 'BR', 'CAN': 'CA', 'CHE': 'CH', 'CHN': 'CN', 'CYP': 'CY', 'CZE': 'CZ', 'DEU': 'DE', 'DNK': 'DK', 'ESP': 'ES', 'EST': 'EE', 'FIN': 'FI', 'FRA': 'FR', 'GBR': 'GB', 'GRC': 'GR', 'HRV': 'HR', 'HUN': 'HU', 'IDN': 'ID', 'IND': 'IN', 'IRL': 'IE', 'ITA': 'IT', 'JPN': 'JP', 'KOR': 'KR', 'LTU': 'LT', 'LUX': 'LU', 'LVA': 'LV', 'MEX': 'MX', 'MLT': 'MT', 'NLD': 'NL', 'NOR': 'NO', 'POL': 'PL', 'PRT': 'PT', 'ROM': 'RO', 'RUS': 'RU', 'SVK': 'SK', 'SVN': 'SI', 'SWE': 'SE', 'TUR': 'TR', 'TWN': 'TW', 'USA': 'US', 'ZAF': 'ZA', 'WWA': 'WA', 'WWE': 'WE', 'WWF': 'WF', 'WWL': 'WL', 'WWM': 'WM'})
return io |
def count_params(self):
"""Returns the number of parameters in the network."""
n_params = 0
for _i, p in enumerate(self.all_params):
n = 1
# for s in p.eval().shape:
for s in p.get_shape():
try:
s = int(s)
except Exception:
s = 1
if s:
n = n * s
n_params = n_params + n
return n_params | def function[count_params, parameter[self]]:
constant[Returns the number of parameters in the network.]
variable[n_params] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18bc72950>, <ast.Name object at 0x7da18bc71e10>]]] in starred[call[name[enumerate], parameter[name[self].all_params]]] begin[:]
variable[n] assign[=] constant[1]
for taget[name[s]] in starred[call[name[p].get_shape, parameter[]]] begin[:]
<ast.Try object at 0x7da18bc739d0>
if name[s] begin[:]
variable[n] assign[=] binary_operation[name[n] * name[s]]
variable[n_params] assign[=] binary_operation[name[n_params] + name[n]]
return[name[n_params]] | keyword[def] identifier[count_params] ( identifier[self] ):
literal[string]
identifier[n_params] = literal[int]
keyword[for] identifier[_i] , identifier[p] keyword[in] identifier[enumerate] ( identifier[self] . identifier[all_params] ):
identifier[n] = literal[int]
keyword[for] identifier[s] keyword[in] identifier[p] . identifier[get_shape] ():
keyword[try] :
identifier[s] = identifier[int] ( identifier[s] )
keyword[except] identifier[Exception] :
identifier[s] = literal[int]
keyword[if] identifier[s] :
identifier[n] = identifier[n] * identifier[s]
identifier[n_params] = identifier[n_params] + identifier[n]
keyword[return] identifier[n_params] | def count_params(self):
"""Returns the number of parameters in the network."""
n_params = 0
for (_i, p) in enumerate(self.all_params):
n = 1
# for s in p.eval().shape:
for s in p.get_shape():
try:
s = int(s) # depends on [control=['try'], data=[]]
except Exception:
s = 1 # depends on [control=['except'], data=[]]
if s:
n = n * s # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']]
n_params = n_params + n # depends on [control=['for'], data=[]]
return n_params |
def oscillating_setpoint(_square_wave=False, shift=0):
"""A basic example of a target that you may want to approximate.
If you have a thermostat, this is a temperature setting.
This target can't change too often
"""
import math
c = 0
while 1:
if _square_wave:
yield ((c % 300) < 150) * 30 + 20
c += 1
else:
yield 10 * math.sin(2 * 3.1415926 * c + shift) \
+ 20 + 5 * math.sin(2 * 3.1415926 * c * 3 + shift)
c += .001 | def function[oscillating_setpoint, parameter[_square_wave, shift]]:
constant[A basic example of a target that you may want to approximate.
If you have a thermostat, this is a temperature setting.
This target can't change too often
]
import module[math]
variable[c] assign[=] constant[0]
while constant[1] begin[:]
if name[_square_wave] begin[:]
<ast.Yield object at 0x7da1b2345ff0>
<ast.AugAssign object at 0x7da1b2347400> | keyword[def] identifier[oscillating_setpoint] ( identifier[_square_wave] = keyword[False] , identifier[shift] = literal[int] ):
literal[string]
keyword[import] identifier[math]
identifier[c] = literal[int]
keyword[while] literal[int] :
keyword[if] identifier[_square_wave] :
keyword[yield] (( identifier[c] % literal[int] )< literal[int] )* literal[int] + literal[int]
identifier[c] += literal[int]
keyword[else] :
keyword[yield] literal[int] * identifier[math] . identifier[sin] ( literal[int] * literal[int] * identifier[c] + identifier[shift] )+ literal[int] + literal[int] * identifier[math] . identifier[sin] ( literal[int] * literal[int] * identifier[c] * literal[int] + identifier[shift] )
identifier[c] += literal[int] | def oscillating_setpoint(_square_wave=False, shift=0):
"""A basic example of a target that you may want to approximate.
If you have a thermostat, this is a temperature setting.
This target can't change too often
"""
import math
c = 0
while 1:
if _square_wave:
yield ((c % 300 < 150) * 30 + 20)
c += 1 # depends on [control=['if'], data=[]]
else:
yield (10 * math.sin(2 * 3.1415926 * c + shift) + 20 + 5 * math.sin(2 * 3.1415926 * c * 3 + shift))
c += 0.001 # depends on [control=['while'], data=[]] |
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
'''Returns a partial scope with current next state-fluents.
Args:
next_state_fluents (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
'''
return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents)) | def function[next_state_scope, parameter[self, next_state_fluents]]:
constant[Returns a partial scope with current next state-fluents.
Args:
next_state_fluents (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
]
return[call[name[dict], parameter[call[name[zip], parameter[name[self].rddl.domain.next_state_fluent_ordering, name[next_state_fluents]]]]]] | keyword[def] identifier[next_state_scope] ( identifier[self] , identifier[next_state_fluents] : identifier[Sequence] [ identifier[tf] . identifier[Tensor] ])-> identifier[Dict] [ identifier[str] , identifier[TensorFluent] ]:
literal[string]
keyword[return] identifier[dict] ( identifier[zip] ( identifier[self] . identifier[rddl] . identifier[domain] . identifier[next_state_fluent_ordering] , identifier[next_state_fluents] )) | def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
"""Returns a partial scope with current next state-fluents.
Args:
next_state_fluents (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
"""
return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents)) |
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam | def function[load, parameter[path_or_file, validate, strict, fmt]]:
constant[Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
]
with call[name[_open], parameter[name[path_or_file]]] begin[:]
variable[jam] assign[=] call[name[JAMS], parameter[]]
if name[validate] begin[:]
call[name[jam].validate, parameter[]]
return[name[jam]] | keyword[def] identifier[load] ( identifier[path_or_file] , identifier[validate] = keyword[True] , identifier[strict] = keyword[True] , identifier[fmt] = literal[string] ):
literal[string]
keyword[with] identifier[_open] ( identifier[path_or_file] , identifier[mode] = literal[string] , identifier[fmt] = identifier[fmt] ) keyword[as] identifier[fdesc] :
identifier[jam] = identifier[JAMS] (** identifier[json] . identifier[load] ( identifier[fdesc] ))
keyword[if] identifier[validate] :
identifier[jam] . identifier[validate] ( identifier[strict] = identifier[strict] )
keyword[return] identifier[jam] | def load(path_or_file, validate=True, strict=True, fmt='auto'):
"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc)) # depends on [control=['with'], data=['fdesc']]
if validate:
jam.validate(strict=strict) # depends on [control=['if'], data=[]]
return jam |
def zrevrangebyscore(self, key, max=float('inf'), min=float('-inf'),
*, exclude=None, withscores=False,
offset=None, count=None, encoding=_NOTSET):
"""Return a range of members in a sorted set, by score,
with scores ordered from high to low.
:raises TypeError: if min or max is not float or int
:raises TypeError: if both offset and count are not specified
:raises TypeError: if offset is not int
:raises TypeError: if count is not int
"""
if not isinstance(min, (int, float)):
raise TypeError("min argument must be int or float")
if not isinstance(max, (int, float)):
raise TypeError("max argument must be int or float")
if (offset is not None and count is None) or \
(count is not None and offset is None):
raise TypeError("offset and count must both be specified")
if offset is not None and not isinstance(offset, int):
raise TypeError("offset argument must be int")
if count is not None and not isinstance(count, int):
raise TypeError("count argument must be int")
min, max = _encode_min_max(exclude, min, max)
args = []
if withscores:
args = [b'WITHSCORES']
if offset is not None and count is not None:
args.extend([b'LIMIT', offset, count])
fut = self.execute(b'ZREVRANGEBYSCORE', key, max, min, *args,
encoding=encoding)
if withscores:
return wait_convert(fut, pairs_int_or_float)
return fut | def function[zrevrangebyscore, parameter[self, key, max, min]]:
constant[Return a range of members in a sorted set, by score,
with scores ordered from high to low.
:raises TypeError: if min or max is not float or int
:raises TypeError: if both offset and count are not specified
:raises TypeError: if offset is not int
:raises TypeError: if count is not int
]
if <ast.UnaryOp object at 0x7da2054a7070> begin[:]
<ast.Raise object at 0x7da2054a6ec0>
if <ast.UnaryOp object at 0x7da2054a5cc0> begin[:]
<ast.Raise object at 0x7da2054a7190>
if <ast.BoolOp object at 0x7da2054a72b0> begin[:]
<ast.Raise object at 0x7da18ede5a80>
if <ast.BoolOp object at 0x7da20e956e00> begin[:]
<ast.Raise object at 0x7da1b235b970>
if <ast.BoolOp object at 0x7da1b2358d30> begin[:]
<ast.Raise object at 0x7da1b23583d0>
<ast.Tuple object at 0x7da1b235b790> assign[=] call[name[_encode_min_max], parameter[name[exclude], name[min], name[max]]]
variable[args] assign[=] list[[]]
if name[withscores] begin[:]
variable[args] assign[=] list[[<ast.Constant object at 0x7da1b2358100>]]
if <ast.BoolOp object at 0x7da1b235b4f0> begin[:]
call[name[args].extend, parameter[list[[<ast.Constant object at 0x7da1b2358d60>, <ast.Name object at 0x7da1b235b640>, <ast.Name object at 0x7da1b23598a0>]]]]
variable[fut] assign[=] call[name[self].execute, parameter[constant[b'ZREVRANGEBYSCORE'], name[key], name[max], name[min], <ast.Starred object at 0x7da1b2358df0>]]
if name[withscores] begin[:]
return[call[name[wait_convert], parameter[name[fut], name[pairs_int_or_float]]]]
return[name[fut]] | keyword[def] identifier[zrevrangebyscore] ( identifier[self] , identifier[key] , identifier[max] = identifier[float] ( literal[string] ), identifier[min] = identifier[float] ( literal[string] ),
*, identifier[exclude] = keyword[None] , identifier[withscores] = keyword[False] ,
identifier[offset] = keyword[None] , identifier[count] = keyword[None] , identifier[encoding] = identifier[_NOTSET] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[min] ,( identifier[int] , identifier[float] )):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[max] ,( identifier[int] , identifier[float] )):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] ( identifier[offset] keyword[is] keyword[not] keyword[None] keyword[and] identifier[count] keyword[is] keyword[None] ) keyword[or] ( identifier[count] keyword[is] keyword[not] keyword[None] keyword[and] identifier[offset] keyword[is] keyword[None] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[offset] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[count] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[count] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[min] , identifier[max] = identifier[_encode_min_max] ( identifier[exclude] , identifier[min] , identifier[max] )
identifier[args] =[]
keyword[if] identifier[withscores] :
identifier[args] =[ literal[string] ]
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] keyword[and] identifier[count] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[extend] ([ literal[string] , identifier[offset] , identifier[count] ])
identifier[fut] = identifier[self] . identifier[execute] ( literal[string] , identifier[key] , identifier[max] , identifier[min] ,* identifier[args] ,
identifier[encoding] = identifier[encoding] )
keyword[if] identifier[withscores] :
keyword[return] identifier[wait_convert] ( identifier[fut] , identifier[pairs_int_or_float] )
keyword[return] identifier[fut] | def zrevrangebyscore(self, key, max=float('inf'), min=float('-inf'), *, exclude=None, withscores=False, offset=None, count=None, encoding=_NOTSET):
"""Return a range of members in a sorted set, by score,
with scores ordered from high to low.
:raises TypeError: if min or max is not float or int
:raises TypeError: if both offset and count are not specified
:raises TypeError: if offset is not int
:raises TypeError: if count is not int
"""
if not isinstance(min, (int, float)):
raise TypeError('min argument must be int or float') # depends on [control=['if'], data=[]]
if not isinstance(max, (int, float)):
raise TypeError('max argument must be int or float') # depends on [control=['if'], data=[]]
if offset is not None and count is None or (count is not None and offset is None):
raise TypeError('offset and count must both be specified') # depends on [control=['if'], data=[]]
if offset is not None and (not isinstance(offset, int)):
raise TypeError('offset argument must be int') # depends on [control=['if'], data=[]]
if count is not None and (not isinstance(count, int)):
raise TypeError('count argument must be int') # depends on [control=['if'], data=[]]
(min, max) = _encode_min_max(exclude, min, max)
args = []
if withscores:
args = [b'WITHSCORES'] # depends on [control=['if'], data=[]]
if offset is not None and count is not None:
args.extend([b'LIMIT', offset, count]) # depends on [control=['if'], data=[]]
fut = self.execute(b'ZREVRANGEBYSCORE', key, max, min, *args, encoding=encoding)
if withscores:
return wait_convert(fut, pairs_int_or_float) # depends on [control=['if'], data=[]]
return fut |
def child_cardinality(self, child):
""" Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
"""
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
try:
_min = self.c_cardinality["min"]
except KeyError:
_min = 1
try:
_max = self.c_cardinality["max"]
except KeyError:
_max = "unbounded"
return _min, _max
else:
return 1, 1
return None | def function[child_cardinality, parameter[self, child]]:
constant[ Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
]
for taget[tuple[[<ast.Name object at 0x7da1b206b7c0>, <ast.Name object at 0x7da1b206b190>]]] in starred[call[name[self].c_children.values, parameter[]]] begin[:]
if compare[name[child] equal[==] name[prop]] begin[:]
if call[name[isinstance], parameter[name[klassdef], name[list]]] begin[:]
<ast.Try object at 0x7da1b20687f0>
<ast.Try object at 0x7da1b20b44f0>
return[tuple[[<ast.Name object at 0x7da1b20b7310>, <ast.Name object at 0x7da1b20b46a0>]]]
return[constant[None]] | keyword[def] identifier[child_cardinality] ( identifier[self] , identifier[child] ):
literal[string]
keyword[for] identifier[prop] , identifier[klassdef] keyword[in] identifier[self] . identifier[c_children] . identifier[values] ():
keyword[if] identifier[child] == identifier[prop] :
keyword[if] identifier[isinstance] ( identifier[klassdef] , identifier[list] ):
keyword[try] :
identifier[_min] = identifier[self] . identifier[c_cardinality] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[_min] = literal[int]
keyword[try] :
identifier[_max] = identifier[self] . identifier[c_cardinality] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[_max] = literal[string]
keyword[return] identifier[_min] , identifier[_max]
keyword[else] :
keyword[return] literal[int] , literal[int]
keyword[return] keyword[None] | def child_cardinality(self, child):
""" Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
"""
for (prop, klassdef) in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
try:
_min = self.c_cardinality['min'] # depends on [control=['try'], data=[]]
except KeyError:
_min = 1 # depends on [control=['except'], data=[]]
try:
_max = self.c_cardinality['max'] # depends on [control=['try'], data=[]]
except KeyError:
_max = 'unbounded' # depends on [control=['except'], data=[]]
return (_min, _max) # depends on [control=['if'], data=[]]
else:
return (1, 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return None |
def mdaOnes(shap, dtype=numpy.float, mask=None):
"""
One constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
"""
res = MaskedDistArray(shap, dtype)
res[:] = 1
res.mask = mask
return res | def function[mdaOnes, parameter[shap, dtype, mask]]:
constant[
One constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
]
variable[res] assign[=] call[name[MaskedDistArray], parameter[name[shap], name[dtype]]]
call[name[res]][<ast.Slice object at 0x7da204622e30>] assign[=] constant[1]
name[res].mask assign[=] name[mask]
return[name[res]] | keyword[def] identifier[mdaOnes] ( identifier[shap] , identifier[dtype] = identifier[numpy] . identifier[float] , identifier[mask] = keyword[None] ):
literal[string]
identifier[res] = identifier[MaskedDistArray] ( identifier[shap] , identifier[dtype] )
identifier[res] [:]= literal[int]
identifier[res] . identifier[mask] = identifier[mask]
keyword[return] identifier[res] | def mdaOnes(shap, dtype=numpy.float, mask=None):
"""
One constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
"""
res = MaskedDistArray(shap, dtype)
res[:] = 1
res.mask = mask
return res |
def inverse(self):
"""Inverse of this operator.
The inverse of ``scalar * op`` is given by
``op.inverse * 1/scalar`` if ``scalar != 0``. If ``scalar == 0``,
the inverse is not defined.
``OperatorLeftScalarMult(op, s).inverse ==
OperatorRightScalarMult(op.inverse, 1/s)``
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space)
>>> left_mul_op = OperatorLeftScalarMult(operator, 3)
>>> left_mul_op.inverse([3, 3, 3])
rn(3).element([ 1., 1., 1.])
"""
if self.scalar == 0.0:
raise ZeroDivisionError('{} not invertible'.format(self))
return self.operator.inverse * (1.0 / self.scalar) | def function[inverse, parameter[self]]:
constant[Inverse of this operator.
The inverse of ``scalar * op`` is given by
``op.inverse * 1/scalar`` if ``scalar != 0``. If ``scalar == 0``,
the inverse is not defined.
``OperatorLeftScalarMult(op, s).inverse ==
OperatorRightScalarMult(op.inverse, 1/s)``
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space)
>>> left_mul_op = OperatorLeftScalarMult(operator, 3)
>>> left_mul_op.inverse([3, 3, 3])
rn(3).element([ 1., 1., 1.])
]
if compare[name[self].scalar equal[==] constant[0.0]] begin[:]
<ast.Raise object at 0x7da18f58e5c0>
return[binary_operation[name[self].operator.inverse * binary_operation[constant[1.0] / name[self].scalar]]] | keyword[def] identifier[inverse] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[scalar] == literal[int] :
keyword[raise] identifier[ZeroDivisionError] ( literal[string] . identifier[format] ( identifier[self] ))
keyword[return] identifier[self] . identifier[operator] . identifier[inverse] *( literal[int] / identifier[self] . identifier[scalar] ) | def inverse(self):
"""Inverse of this operator.
The inverse of ``scalar * op`` is given by
``op.inverse * 1/scalar`` if ``scalar != 0``. If ``scalar == 0``,
the inverse is not defined.
``OperatorLeftScalarMult(op, s).inverse ==
OperatorRightScalarMult(op.inverse, 1/s)``
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space)
>>> left_mul_op = OperatorLeftScalarMult(operator, 3)
>>> left_mul_op.inverse([3, 3, 3])
rn(3).element([ 1., 1., 1.])
"""
if self.scalar == 0.0:
raise ZeroDivisionError('{} not invertible'.format(self)) # depends on [control=['if'], data=[]]
return self.operator.inverse * (1.0 / self.scalar) |
def parse_unicode(self, i, wide=False):
"""Parse Unicode."""
text = self.get_wide_unicode(i) if wide else self.get_narrow_unicode(i)
value = int(text, 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(chr(value)) | def function[parse_unicode, parameter[self, i, wide]]:
constant[Parse Unicode.]
variable[text] assign[=] <ast.IfExp object at 0x7da18ede7d60>
variable[value] assign[=] call[name[int], parameter[name[text], constant[16]]]
variable[single] assign[=] call[name[self].get_single_stack, parameter[]]
if name[self].span_stack begin[:]
variable[text] assign[=] call[name[self].convert_case, parameter[call[name[chr], parameter[name[value]]], call[name[self].span_stack][<ast.UnaryOp object at 0x7da18ede5510>]]]
variable[value] assign[=] <ast.IfExp object at 0x7da18ede59f0>
if <ast.BoolOp object at 0x7da18ede78e0> begin[:]
call[name[self].handle_format, parameter[call[name[chr], parameter[name[value]]], name[i]]] | keyword[def] identifier[parse_unicode] ( identifier[self] , identifier[i] , identifier[wide] = keyword[False] ):
literal[string]
identifier[text] = identifier[self] . identifier[get_wide_unicode] ( identifier[i] ) keyword[if] identifier[wide] keyword[else] identifier[self] . identifier[get_narrow_unicode] ( identifier[i] )
identifier[value] = identifier[int] ( identifier[text] , literal[int] )
identifier[single] = identifier[self] . identifier[get_single_stack] ()
keyword[if] identifier[self] . identifier[span_stack] :
identifier[text] = identifier[self] . identifier[convert_case] ( identifier[chr] ( identifier[value] ), identifier[self] . identifier[span_stack] [- literal[int] ])
identifier[value] = identifier[ord] ( identifier[self] . identifier[convert_case] ( identifier[text] , identifier[single] )) keyword[if] identifier[single] keyword[is] keyword[not] keyword[None] keyword[else] identifier[ord] ( identifier[text] )
keyword[elif] identifier[single] :
identifier[value] = identifier[ord] ( identifier[self] . identifier[convert_case] ( identifier[chr] ( identifier[value] ), identifier[single] ))
keyword[if] identifier[self] . identifier[use_format] keyword[and] identifier[value] keyword[in] identifier[_CURLY_BRACKETS_ORD] :
identifier[self] . identifier[handle_format] ( identifier[chr] ( identifier[value] ), identifier[i] )
keyword[elif] identifier[value] <= literal[int] :
identifier[self] . identifier[result] . identifier[append] ( literal[string] % identifier[value] )
keyword[else] :
identifier[self] . identifier[result] . identifier[append] ( identifier[chr] ( identifier[value] )) | def parse_unicode(self, i, wide=False):
"""Parse Unicode."""
text = self.get_wide_unicode(i) if wide else self.get_narrow_unicode(i)
value = int(text, 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text) # depends on [control=['if'], data=[]]
elif single:
value = ord(self.convert_case(chr(value), single)) # depends on [control=['if'], data=[]]
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i) # depends on [control=['if'], data=[]]
elif value <= 255:
self.result.append('\\%03o' % value) # depends on [control=['if'], data=['value']]
else:
self.result.append(chr(value)) |
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
'''
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
'''
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(six.text_type(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_unicode(not_found_content) \
if not_found_content and (prepend_if_not_found or append_if_not_found) \
else salt.utils.stringutils.to_unicode(repl)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
return False
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(salt.utils.stringutils.to_bytes('^{0}($|(?=\r\n))'.format(re.escape(content))),
r_data,
flags=flags_num):
# Content was found, so set found.
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
# keep the backup only if it was requested
# and only if there were any changes
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except Exception:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
differences = __utils__['stringutils.get_diff'](orig_file, new_file)
if show_changes:
return differences
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not differences:
has_changes = False
return has_changes | def function[replace, parameter[path, pattern, repl, count, flags, bufsize, append_if_not_found, prepend_if_not_found, not_found_content, backup, dry_run, search_only, show_changes, ignore_if_missing, preserve_inode, backslash_literal]]:
constant[
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
]
variable[symlink] assign[=] constant[False]
if call[name[is_link], parameter[name[path]]] begin[:]
variable[symlink] assign[=] constant[True]
variable[target_path] assign[=] call[name[os].readlink, parameter[name[path]]]
variable[given_path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[path] assign[=] call[name[os].path.realpath, parameter[call[name[os].path.expanduser, parameter[name[path]]]]]
if <ast.UnaryOp object at 0x7da2043461d0> begin[:]
if name[ignore_if_missing] begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da204347f70> begin[:]
<ast.Raise object at 0x7da204345720>
if <ast.BoolOp object at 0x7da204347e80> begin[:]
<ast.Raise object at 0x7da2043473d0>
if <ast.BoolOp object at 0x7da2043452a0> begin[:]
<ast.Raise object at 0x7da204344820>
variable[flags_num] assign[=] call[name[_get_flags], parameter[name[flags]]]
variable[cpattern] assign[=] call[name[re].compile, parameter[call[name[salt].utils.stringutils.to_bytes, parameter[name[pattern]]], name[flags_num]]]
variable[filesize] assign[=] call[name[os].path.getsize, parameter[name[path]]]
if compare[name[bufsize] equal[==] constant[file]] begin[:]
variable[bufsize] assign[=] name[filesize]
variable[has_changes] assign[=] constant[False]
variable[orig_file] assign[=] list[[]]
variable[new_file] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da20ec06260> begin[:]
variable[pre_user] assign[=] call[name[get_user], parameter[name[path]]]
variable[pre_group] assign[=] call[name[get_group], parameter[name[path]]]
variable[pre_mode] assign[=] call[name[salt].utils.files.normalize_mode, parameter[call[name[get_mode], parameter[name[path]]]]]
variable[repl] assign[=] call[name[salt].utils.stringutils.to_bytes, parameter[call[name[six].text_type, parameter[name[repl]]]]]
if name[not_found_content] begin[:]
variable[not_found_content] assign[=] call[name[salt].utils.stringutils.to_bytes, parameter[name[not_found_content]]]
variable[found] assign[=] constant[False]
variable[temp_file] assign[=] constant[None]
variable[content] assign[=] <ast.IfExp object at 0x7da20c6e68c0>
<ast.Try object at 0x7da20c6e6710>
if <ast.BoolOp object at 0x7da20c6e7940> begin[:]
<ast.Try object at 0x7da20eb294b0>
variable[r_data] assign[=] constant[None]
<ast.Try object at 0x7da20ed9bd30>
if <ast.BoolOp object at 0x7da1b2346740> begin[:]
if compare[name[not_found_content] is constant[None]] begin[:]
variable[not_found_content] assign[=] name[repl]
if name[prepend_if_not_found] begin[:]
call[name[new_file].insert, parameter[constant[0], binary_operation[name[not_found_content] + call[name[salt].utils.stringutils.to_bytes, parameter[name[os].linesep]]]]]
variable[has_changes] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b2345b70> begin[:]
<ast.Try object at 0x7da1b23476a0>
<ast.Try object at 0x7da1b2344f10>
if <ast.BoolOp object at 0x7da1b2347910> begin[:]
variable[backup_name] assign[=] call[constant[{0}{1}].format, parameter[name[path], name[backup]]]
<ast.Try object at 0x7da1b2344520>
if name[symlink] begin[:]
variable[symlink_backup] assign[=] call[constant[{0}{1}].format, parameter[name[given_path], name[backup]]]
variable[target_backup] assign[=] call[constant[{0}{1}].format, parameter[name[target_path], name[backup]]]
<ast.Try object at 0x7da1b2345210>
if <ast.BoolOp object at 0x7da1b26ac100> begin[:]
call[name[check_perms], parameter[name[path], constant[None], name[pre_user], name[pre_group], name[pre_mode]]]
variable[differences] assign[=] call[call[name[__utils__]][constant[stringutils.get_diff]], parameter[name[orig_file], name[new_file]]]
if name[show_changes] begin[:]
return[name[differences]]
if <ast.UnaryOp object at 0x7da1b21e26e0> begin[:]
variable[has_changes] assign[=] constant[False]
return[name[has_changes]] | keyword[def] identifier[replace] ( identifier[path] ,
identifier[pattern] ,
identifier[repl] ,
identifier[count] = literal[int] ,
identifier[flags] = literal[int] ,
identifier[bufsize] = literal[int] ,
identifier[append_if_not_found] = keyword[False] ,
identifier[prepend_if_not_found] = keyword[False] ,
identifier[not_found_content] = keyword[None] ,
identifier[backup] = literal[string] ,
identifier[dry_run] = keyword[False] ,
identifier[search_only] = keyword[False] ,
identifier[show_changes] = keyword[True] ,
identifier[ignore_if_missing] = keyword[False] ,
identifier[preserve_inode] = keyword[True] ,
identifier[backslash_literal] = keyword[False] ,
):
literal[string]
identifier[symlink] = keyword[False]
keyword[if] identifier[is_link] ( identifier[path] ):
identifier[symlink] = keyword[True]
identifier[target_path] = identifier[os] . identifier[readlink] ( identifier[path] )
identifier[given_path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[if] identifier[ignore_if_missing] :
keyword[return] keyword[False]
keyword[else] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] keyword[not] identifier[__utils__] [ literal[string] ]( identifier[path] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
. identifier[format] ( identifier[path] )
)
keyword[if] identifier[search_only] keyword[and] ( identifier[append_if_not_found] keyword[or] identifier[prepend_if_not_found] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
)
keyword[if] identifier[append_if_not_found] keyword[and] identifier[prepend_if_not_found] :
keyword[raise] identifier[SaltInvocationError] (
literal[string]
)
identifier[flags_num] = identifier[_get_flags] ( identifier[flags] )
identifier[cpattern] = identifier[re] . identifier[compile] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[pattern] ), identifier[flags_num] )
identifier[filesize] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[path] )
keyword[if] identifier[bufsize] == literal[string] :
identifier[bufsize] = identifier[filesize]
identifier[has_changes] = keyword[False]
identifier[orig_file] =[]
identifier[new_file] =[]
keyword[if] keyword[not] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] ():
identifier[pre_user] = identifier[get_user] ( identifier[path] )
identifier[pre_group] = identifier[get_group] ( identifier[path] )
identifier[pre_mode] = identifier[salt] . identifier[utils] . identifier[files] . identifier[normalize_mode] ( identifier[get_mode] ( identifier[path] ))
identifier[repl] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[six] . identifier[text_type] ( identifier[repl] ))
keyword[if] identifier[not_found_content] :
identifier[not_found_content] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[not_found_content] )
identifier[found] = keyword[False]
identifier[temp_file] = keyword[None]
identifier[content] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[not_found_content] ) keyword[if] identifier[not_found_content] keyword[and] ( identifier[prepend_if_not_found] keyword[or] identifier[append_if_not_found] ) keyword[else] identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[repl] )
keyword[try] :
identifier[r_data] = keyword[None]
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] ,
identifier[mode] = literal[string] ,
identifier[buffering] = identifier[bufsize] ) keyword[as] identifier[r_file] :
keyword[try] :
identifier[r_data] = identifier[mmap] . identifier[mmap] ( identifier[r_file] . identifier[fileno] (),
literal[int] ,
identifier[access] = identifier[mmap] . identifier[ACCESS_READ] )
keyword[except] ( identifier[ValueError] , identifier[mmap] . identifier[error] ):
identifier[r_data] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( literal[string] . identifier[join] ( identifier[r_file] ))
keyword[if] identifier[search_only] :
keyword[if] identifier[re] . identifier[search] ( identifier[cpattern] , identifier[r_data] ):
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
identifier[result] , identifier[nrepl] = identifier[re] . identifier[subn] ( identifier[cpattern] ,
identifier[repl] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[backslash_literal] keyword[else] identifier[repl] ,
identifier[r_data] ,
identifier[count] )
keyword[if] identifier[nrepl] > literal[int] :
identifier[found] = keyword[True]
identifier[has_changes] = keyword[True] keyword[if] identifier[pattern] != identifier[repl] keyword[else] identifier[has_changes]
keyword[if] identifier[prepend_if_not_found] keyword[or] identifier[append_if_not_found] :
keyword[if] identifier[re] . identifier[search] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( literal[string] . identifier[format] ( identifier[re] . identifier[escape] ( identifier[content] ))),
identifier[r_data] ,
identifier[flags] = identifier[flags_num] ):
identifier[found] = keyword[True]
identifier[orig_file] = identifier[r_data] . identifier[read] ( identifier[filesize] ). identifier[splitlines] ( keyword[True] ) keyword[if] identifier[isinstance] ( identifier[r_data] , identifier[mmap] . identifier[mmap] ) keyword[else] identifier[r_data] . identifier[splitlines] ( keyword[True] )
identifier[new_file] = identifier[result] . identifier[splitlines] ( keyword[True] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string] . identifier[format] ( identifier[path] , identifier[exc] )
)
keyword[finally] :
keyword[if] identifier[r_data] keyword[and] identifier[isinstance] ( identifier[r_data] , identifier[mmap] . identifier[mmap] ):
identifier[r_data] . identifier[close] ()
keyword[if] identifier[has_changes] keyword[and] keyword[not] identifier[dry_run] :
keyword[try] :
identifier[temp_file] = identifier[_mkstemp_copy] ( identifier[path] = identifier[path] ,
identifier[preserve_inode] = identifier[preserve_inode] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[exc] ))
identifier[r_data] = keyword[None]
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] ,
identifier[mode] = literal[string] ,
identifier[buffering] = identifier[bufsize] ) keyword[as] identifier[w_file] :
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[temp_file] ,
identifier[mode] = literal[string] ,
identifier[buffering] = identifier[bufsize] ) keyword[as] identifier[r_file] :
identifier[r_data] = identifier[mmap] . identifier[mmap] ( identifier[r_file] . identifier[fileno] (),
literal[int] ,
identifier[access] = identifier[mmap] . identifier[ACCESS_READ] )
identifier[result] , identifier[nrepl] = identifier[re] . identifier[subn] ( identifier[cpattern] ,
identifier[repl] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[backslash_literal] keyword[else] identifier[repl] ,
identifier[r_data] ,
identifier[count] )
keyword[try] :
identifier[w_file] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[result] ))
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[path] , identifier[temp_file] , identifier[exc] )
)
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[exc] ))
keyword[finally] :
keyword[if] identifier[r_data] keyword[and] identifier[isinstance] ( identifier[r_data] , identifier[mmap] . identifier[mmap] ):
identifier[r_data] . identifier[close] ()
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[exc] ))
keyword[if] keyword[not] identifier[found] keyword[and] ( identifier[append_if_not_found] keyword[or] identifier[prepend_if_not_found] ):
keyword[if] identifier[not_found_content] keyword[is] keyword[None] :
identifier[not_found_content] = identifier[repl]
keyword[if] identifier[prepend_if_not_found] :
identifier[new_file] . identifier[insert] ( literal[int] , identifier[not_found_content] + identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[os] . identifier[linesep] ))
keyword[else] :
keyword[if] identifier[new_file] :
keyword[if] keyword[not] identifier[new_file] [- literal[int] ]. identifier[endswith] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[os] . identifier[linesep] )):
identifier[new_file] [- literal[int] ]+= identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[os] . identifier[linesep] )
identifier[new_file] . identifier[append] ( identifier[not_found_content] + identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[os] . identifier[linesep] ))
identifier[has_changes] = keyword[True]
keyword[if] keyword[not] identifier[dry_run] :
keyword[try] :
identifier[temp_file] = identifier[_mkstemp_copy] ( identifier[path] = identifier[path] ,
identifier[preserve_inode] = identifier[preserve_inode] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[exc] ))
keyword[try] :
identifier[fh_] = identifier[salt] . identifier[utils] . identifier[atomicfile] . identifier[atomic_open] ( identifier[path] , literal[string] )
keyword[for] identifier[line] keyword[in] identifier[new_file] :
identifier[fh_] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[line] ))
keyword[finally] :
identifier[fh_] . identifier[close] ()
keyword[if] identifier[backup] keyword[and] identifier[has_changes] keyword[and] keyword[not] identifier[dry_run] :
identifier[backup_name] = literal[string] . identifier[format] ( identifier[path] , identifier[backup] )
keyword[try] :
identifier[shutil] . identifier[move] ( identifier[temp_file] , identifier[backup_name] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[path] , identifier[temp_file] , identifier[exc] )
)
keyword[if] identifier[symlink] :
identifier[symlink_backup] = literal[string] . identifier[format] ( identifier[given_path] , identifier[backup] )
identifier[target_backup] = literal[string] . identifier[format] ( identifier[target_path] , identifier[backup] )
keyword[try] :
identifier[os] . identifier[symlink] ( identifier[target_backup] , identifier[symlink_backup] )
keyword[except] identifier[OSError] :
identifier[os] . identifier[remove] ( identifier[symlink_backup] )
identifier[os] . identifier[symlink] ( identifier[target_backup] , identifier[symlink_backup] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[symlink_backup] , identifier[target_backup] ,
identifier[exc] )
)
keyword[elif] identifier[temp_file] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[temp_file] )
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string]
literal[string] . identifier[format] ( identifier[temp_file] , identifier[exc] )
)
keyword[if] keyword[not] identifier[dry_run] keyword[and] keyword[not] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] ():
identifier[check_perms] ( identifier[path] , keyword[None] , identifier[pre_user] , identifier[pre_group] , identifier[pre_mode] )
identifier[differences] = identifier[__utils__] [ literal[string] ]( identifier[orig_file] , identifier[new_file] )
keyword[if] identifier[show_changes] :
keyword[return] identifier[differences]
keyword[if] keyword[not] identifier[differences] :
identifier[has_changes] = keyword[False]
keyword[return] identifier[has_changes] | def replace(path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, backslash_literal=False):
"""
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
"""
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path) # depends on [control=['if'], data=[]]
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False # depends on [control=['if'], data=[]]
else:
raise SaltInvocationError('File not found: {0}'.format(path)) # depends on [control=['if'], data=[]]
if not __utils__['files.is_text'](path):
raise SaltInvocationError('Cannot perform string replacements on a binary file: {0}'.format(path)) # depends on [control=['if'], data=[]]
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError('search_only cannot be used with append/prepend_if_not_found') # depends on [control=['if'], data=[]]
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError('Only one of append and prepend_if_not_found is permitted') # depends on [control=['if'], data=[]]
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize # depends on [control=['if'], data=['bufsize']]
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path)) # depends on [control=['if'], data=[]]
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(six.text_type(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content) # depends on [control=['if'], data=[]]
found = False
temp_file = None
content = salt.utils.stringutils.to_unicode(not_found_content) if not_found_content and (prepend_if_not_found or append_if_not_found) else salt.utils.stringutils.to_unicode(repl)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path, mode='rb', buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) # depends on [control=['try'], data=[]]
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes(''.join(r_file)) # depends on [control=['except'], data=[]]
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
else:
(result, nrepl) = re.subn(cpattern, repl.replace('\\', '\\\\') if backslash_literal else repl, r_data, count)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes # depends on [control=['if'], data=[]]
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(salt.utils.stringutils.to_bytes('^{0}($|(?=\r\n))'.format(re.escape(content))), r_data, flags=flags_num):
# Content was found, so set found.
found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
orig_file = r_data.read(filesize).splitlines(True) if isinstance(r_data, mmap.mmap) else r_data.splitlines(True)
new_file = result.splitlines(True) # depends on [control=['with'], data=['r_file']] # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to open file '{0}'. Exception: {1}".format(path, exc)) # depends on [control=['except'], data=['exc']]
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close() # depends on [control=['if'], data=[]]
if has_changes and (not dry_run):
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError('Exception: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path, mode='w', buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file, mode='r', buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ)
(result, nrepl) = re.subn(cpattern, repl.replace('\\', '\\\\') if backslash_literal else repl, r_data, count)
try:
w_file.write(salt.utils.stringutils.to_str(result)) # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to write file '{0}'. Contents may be truncated. Temporary file contains copy at '{1}'. Exception: {2}".format(path, temp_file, exc)) # depends on [control=['except'], data=['exc']] # depends on [control=['with'], data=['r_file']] # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError('Exception: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['w_file']] # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError('Exception: {0}'.format(exc)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl # depends on [control=['if'], data=['not_found_content']]
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep)) # depends on [control=['if'], data=[]]
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError('Exception: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line)) # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
finally:
fh_.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if backup and has_changes and (not dry_run):
# keep the backup only if it was requested
# and only if there were any changes
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name) # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to move the temp file '{0}' to the backup file '{1}'. Exception: {2}".format(path, temp_file, exc)) # depends on [control=['except'], data=['exc']]
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup) # depends on [control=['try'], data=[]]
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup) # depends on [control=['except'], data=[]]
except Exception:
raise CommandExecutionError("Unable create backup symlink '{0}'. Target was '{1}'. Exception: {2}".format(symlink_backup, target_backup, exc)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif temp_file:
try:
os.remove(temp_file) # depends on [control=['try'], data=[]]
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to delete temp file '{0}'. Exception: {1}".format(temp_file, exc)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
if not dry_run and (not salt.utils.platform.is_windows()):
check_perms(path, None, pre_user, pre_group, pre_mode) # depends on [control=['if'], data=[]]
differences = __utils__['stringutils.get_diff'](orig_file, new_file)
if show_changes:
return differences # depends on [control=['if'], data=[]]
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not differences:
has_changes = False # depends on [control=['if'], data=[]]
return has_changes |
def start_time(self):
"""Start timestamp of the dataset"""
dt = self.nc['time'].dt
return datetime(year=dt.year, month=dt.month, day=dt.day,
hour=dt.hour, minute=dt.minute,
second=dt.second, microsecond=dt.microsecond) | def function[start_time, parameter[self]]:
constant[Start timestamp of the dataset]
variable[dt] assign[=] call[name[self].nc][constant[time]].dt
return[call[name[datetime], parameter[]]] | keyword[def] identifier[start_time] ( identifier[self] ):
literal[string]
identifier[dt] = identifier[self] . identifier[nc] [ literal[string] ]. identifier[dt]
keyword[return] identifier[datetime] ( identifier[year] = identifier[dt] . identifier[year] , identifier[month] = identifier[dt] . identifier[month] , identifier[day] = identifier[dt] . identifier[day] ,
identifier[hour] = identifier[dt] . identifier[hour] , identifier[minute] = identifier[dt] . identifier[minute] ,
identifier[second] = identifier[dt] . identifier[second] , identifier[microsecond] = identifier[dt] . identifier[microsecond] ) | def start_time(self):
"""Start timestamp of the dataset"""
dt = self.nc['time'].dt
return datetime(year=dt.year, month=dt.month, day=dt.day, hour=dt.hour, minute=dt.minute, second=dt.second, microsecond=dt.microsecond) |
def set_session(self, session):
"""
Set the session to be used when the TLS/SSL connection is established.
:param session: A Session instance representing the session to use.
:returns: None
.. versionadded:: 0.14
"""
if not isinstance(session, Session):
raise TypeError("session must be a Session instance")
result = _lib.SSL_set_session(self._ssl, session._session)
if not result:
_raise_current_error() | def function[set_session, parameter[self, session]]:
constant[
Set the session to be used when the TLS/SSL connection is established.
:param session: A Session instance representing the session to use.
:returns: None
.. versionadded:: 0.14
]
if <ast.UnaryOp object at 0x7da1b0258670> begin[:]
<ast.Raise object at 0x7da1b025b550>
variable[result] assign[=] call[name[_lib].SSL_set_session, parameter[name[self]._ssl, name[session]._session]]
if <ast.UnaryOp object at 0x7da1b0259fc0> begin[:]
call[name[_raise_current_error], parameter[]] | keyword[def] identifier[set_session] ( identifier[self] , identifier[session] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[session] , identifier[Session] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[result] = identifier[_lib] . identifier[SSL_set_session] ( identifier[self] . identifier[_ssl] , identifier[session] . identifier[_session] )
keyword[if] keyword[not] identifier[result] :
identifier[_raise_current_error] () | def set_session(self, session):
"""
Set the session to be used when the TLS/SSL connection is established.
:param session: A Session instance representing the session to use.
:returns: None
.. versionadded:: 0.14
"""
if not isinstance(session, Session):
raise TypeError('session must be a Session instance') # depends on [control=['if'], data=[]]
result = _lib.SSL_set_session(self._ssl, session._session)
if not result:
_raise_current_error() # depends on [control=['if'], data=[]] |
def _handle_resize(self, signum=None, frame=None):
'Tries to catch resize signals sent from the terminal.'
w, h = utils.get_terminal_size()
self.term_width = w | def function[_handle_resize, parameter[self, signum, frame]]:
constant[Tries to catch resize signals sent from the terminal.]
<ast.Tuple object at 0x7da20c796050> assign[=] call[name[utils].get_terminal_size, parameter[]]
name[self].term_width assign[=] name[w] | keyword[def] identifier[_handle_resize] ( identifier[self] , identifier[signum] = keyword[None] , identifier[frame] = keyword[None] ):
literal[string]
identifier[w] , identifier[h] = identifier[utils] . identifier[get_terminal_size] ()
identifier[self] . identifier[term_width] = identifier[w] | def _handle_resize(self, signum=None, frame=None):
"""Tries to catch resize signals sent from the terminal."""
(w, h) = utils.get_terminal_size()
self.term_width = w |
def _phase_kuramoto(self, teta, t, argv):
"""!
@brief Returns result of phase calculation for specified oscillator in the network.
@param[in] teta (double): Phase of the oscillator that is differentiated.
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Index of the oscillator in the list.
@return (double) New phase for specified oscillator (don't assign here).
"""
index = argv;
phase = 0;
for k in range(0, self._num_osc):
if (self.has_connection(index, k) == True):
phase += math.sin(self._phases[k] - teta);
return ( self._freq[index] + (phase * self._weight / self._num_osc) ); | def function[_phase_kuramoto, parameter[self, teta, t, argv]]:
constant[!
@brief Returns result of phase calculation for specified oscillator in the network.
@param[in] teta (double): Phase of the oscillator that is differentiated.
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Index of the oscillator in the list.
@return (double) New phase for specified oscillator (don't assign here).
]
variable[index] assign[=] name[argv]
variable[phase] assign[=] constant[0]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], name[self]._num_osc]]] begin[:]
if compare[call[name[self].has_connection, parameter[name[index], name[k]]] equal[==] constant[True]] begin[:]
<ast.AugAssign object at 0x7da1b01900a0>
return[binary_operation[call[name[self]._freq][name[index]] + binary_operation[binary_operation[name[phase] * name[self]._weight] / name[self]._num_osc]]] | keyword[def] identifier[_phase_kuramoto] ( identifier[self] , identifier[teta] , identifier[t] , identifier[argv] ):
literal[string]
identifier[index] = identifier[argv] ;
identifier[phase] = literal[int] ;
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_num_osc] ):
keyword[if] ( identifier[self] . identifier[has_connection] ( identifier[index] , identifier[k] )== keyword[True] ):
identifier[phase] += identifier[math] . identifier[sin] ( identifier[self] . identifier[_phases] [ identifier[k] ]- identifier[teta] );
keyword[return] ( identifier[self] . identifier[_freq] [ identifier[index] ]+( identifier[phase] * identifier[self] . identifier[_weight] / identifier[self] . identifier[_num_osc] )); | def _phase_kuramoto(self, teta, t, argv):
"""!
@brief Returns result of phase calculation for specified oscillator in the network.
@param[in] teta (double): Phase of the oscillator that is differentiated.
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Index of the oscillator in the list.
@return (double) New phase for specified oscillator (don't assign here).
"""
index = argv
phase = 0
for k in range(0, self._num_osc):
if self.has_connection(index, k) == True:
phase += math.sin(self._phases[k] - teta) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return self._freq[index] + phase * self._weight / self._num_osc |
def wait_idle(self, timeout=1.0):
"""Wait until the rpc queue is empty.
This method may be called either from within the event loop or from
outside of it. If it is called outside of the event loop it will
block the calling thread until the rpc queue is temporarily empty.
If it is called from within the event loop it will return an awaitable
object that can be used to wait for the same condition.
The awaitable object will already have a timeout if the timeout
parameter is passed.
Args:
timeout (float): The maximum number of seconds to wait.
"""
async def _awaiter():
background_work = {x.join() for x in self._work_queues}
for event in self._events:
if not event.is_set():
background_work.add(event.wait())
_done, pending = await asyncio.wait(background_work, timeout=timeout)
if len(pending) > 0:
raise TimeoutExpiredError("Timeout waiting for event loop to become idle", pending=pending)
if self._on_emulation_thread():
return asyncio.wait_for(_awaiter(), timeout=timeout)
self.run_task_external(_awaiter())
return None | def function[wait_idle, parameter[self, timeout]]:
constant[Wait until the rpc queue is empty.
This method may be called either from within the event loop or from
outside of it. If it is called outside of the event loop it will
block the calling thread until the rpc queue is temporarily empty.
If it is called from within the event loop it will return an awaitable
object that can be used to wait for the same condition.
The awaitable object will already have a timeout if the timeout
parameter is passed.
Args:
timeout (float): The maximum number of seconds to wait.
]
<ast.AsyncFunctionDef object at 0x7da18f720b20>
if call[name[self]._on_emulation_thread, parameter[]] begin[:]
return[call[name[asyncio].wait_for, parameter[call[name[_awaiter], parameter[]]]]]
call[name[self].run_task_external, parameter[call[name[_awaiter], parameter[]]]]
return[constant[None]] | keyword[def] identifier[wait_idle] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
keyword[async] keyword[def] identifier[_awaiter] ():
identifier[background_work] ={ identifier[x] . identifier[join] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[_work_queues] }
keyword[for] identifier[event] keyword[in] identifier[self] . identifier[_events] :
keyword[if] keyword[not] identifier[event] . identifier[is_set] ():
identifier[background_work] . identifier[add] ( identifier[event] . identifier[wait] ())
identifier[_done] , identifier[pending] = keyword[await] identifier[asyncio] . identifier[wait] ( identifier[background_work] , identifier[timeout] = identifier[timeout] )
keyword[if] identifier[len] ( identifier[pending] )> literal[int] :
keyword[raise] identifier[TimeoutExpiredError] ( literal[string] , identifier[pending] = identifier[pending] )
keyword[if] identifier[self] . identifier[_on_emulation_thread] ():
keyword[return] identifier[asyncio] . identifier[wait_for] ( identifier[_awaiter] (), identifier[timeout] = identifier[timeout] )
identifier[self] . identifier[run_task_external] ( identifier[_awaiter] ())
keyword[return] keyword[None] | def wait_idle(self, timeout=1.0):
"""Wait until the rpc queue is empty.
This method may be called either from within the event loop or from
outside of it. If it is called outside of the event loop it will
block the calling thread until the rpc queue is temporarily empty.
If it is called from within the event loop it will return an awaitable
object that can be used to wait for the same condition.
The awaitable object will already have a timeout if the timeout
parameter is passed.
Args:
timeout (float): The maximum number of seconds to wait.
"""
async def _awaiter():
background_work = {x.join() for x in self._work_queues}
for event in self._events:
if not event.is_set():
background_work.add(event.wait()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['event']]
(_done, pending) = await asyncio.wait(background_work, timeout=timeout)
if len(pending) > 0:
raise TimeoutExpiredError('Timeout waiting for event loop to become idle', pending=pending) # depends on [control=['if'], data=[]]
if self._on_emulation_thread():
return asyncio.wait_for(_awaiter(), timeout=timeout) # depends on [control=['if'], data=[]]
self.run_task_external(_awaiter())
return None |
def pre_filter(self, conditions, user):
''' Returns all of the items from conditions which are enabled by a
user being member of a Django Auth Group. '''
return conditions.filter(group__in=user.groups.all()) | def function[pre_filter, parameter[self, conditions, user]]:
constant[ Returns all of the items from conditions which are enabled by a
user being member of a Django Auth Group. ]
return[call[name[conditions].filter, parameter[]]] | keyword[def] identifier[pre_filter] ( identifier[self] , identifier[conditions] , identifier[user] ):
literal[string]
keyword[return] identifier[conditions] . identifier[filter] ( identifier[group__in] = identifier[user] . identifier[groups] . identifier[all] ()) | def pre_filter(self, conditions, user):
""" Returns all of the items from conditions which are enabled by a
user being member of a Django Auth Group. """
return conditions.filter(group__in=user.groups.all()) |
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close() | def function[do_GET, parameter[self]]:
constant[Serve a GET request.]
variable[f] assign[=] call[name[self].send_head, parameter[]]
if name[f] begin[:]
call[name[self].copyfile, parameter[name[f], name[self].wfile]]
call[name[f].close, parameter[]] | keyword[def] identifier[do_GET] ( identifier[self] ):
literal[string]
identifier[f] = identifier[self] . identifier[send_head] ()
keyword[if] identifier[f] :
identifier[self] . identifier[copyfile] ( identifier[f] , identifier[self] . identifier[wfile] )
identifier[f] . identifier[close] () | def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close() # depends on [control=['if'], data=[]] |
def prob_classify(self, text):
"""Return the label probability distribution for classifying a string
of text.
Example:
::
>>> classifier = MaxEntClassifier(train_data)
>>> prob_dist = classifier.prob_classify("I feel happy this morning.")
>>> prob_dist.max()
'positive'
>>> prob_dist.prob("positive")
0.7
:rtype: nltk.probability.DictionaryProbDist
"""
feats = self.extract_features(text)
return self.classifier.prob_classify(feats) | def function[prob_classify, parameter[self, text]]:
constant[Return the label probability distribution for classifying a string
of text.
Example:
::
>>> classifier = MaxEntClassifier(train_data)
>>> prob_dist = classifier.prob_classify("I feel happy this morning.")
>>> prob_dist.max()
'positive'
>>> prob_dist.prob("positive")
0.7
:rtype: nltk.probability.DictionaryProbDist
]
variable[feats] assign[=] call[name[self].extract_features, parameter[name[text]]]
return[call[name[self].classifier.prob_classify, parameter[name[feats]]]] | keyword[def] identifier[prob_classify] ( identifier[self] , identifier[text] ):
literal[string]
identifier[feats] = identifier[self] . identifier[extract_features] ( identifier[text] )
keyword[return] identifier[self] . identifier[classifier] . identifier[prob_classify] ( identifier[feats] ) | def prob_classify(self, text):
"""Return the label probability distribution for classifying a string
of text.
Example:
::
>>> classifier = MaxEntClassifier(train_data)
>>> prob_dist = classifier.prob_classify("I feel happy this morning.")
>>> prob_dist.max()
'positive'
>>> prob_dist.prob("positive")
0.7
:rtype: nltk.probability.DictionaryProbDist
"""
feats = self.extract_features(text)
return self.classifier.prob_classify(feats) |
def accept_record(self, record):
"""Accept a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() | def function[accept_record, parameter[self, record]]:
constant[Accept a record for inclusion in the community.
:param record: Record object.
]
with call[name[db].session.begin_nested, parameter[]] begin[:]
variable[req] assign[=] call[name[InclusionRequest].get, parameter[name[self].id, name[record].id]]
if compare[name[req] is constant[None]] begin[:]
<ast.Raise object at 0x7da204346fe0>
call[name[req].delete, parameter[]]
call[name[self].add_record, parameter[name[record]]]
name[self].last_record_accepted assign[=] call[name[datetime].utcnow, parameter[]] | keyword[def] identifier[accept_record] ( identifier[self] , identifier[record] ):
literal[string]
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[req] = identifier[InclusionRequest] . identifier[get] ( identifier[self] . identifier[id] , identifier[record] . identifier[id] )
keyword[if] identifier[req] keyword[is] keyword[None] :
keyword[raise] identifier[InclusionRequestMissingError] ( identifier[community] = identifier[self] ,
identifier[record] = identifier[record] )
identifier[req] . identifier[delete] ()
identifier[self] . identifier[add_record] ( identifier[record] )
identifier[self] . identifier[last_record_accepted] = identifier[datetime] . identifier[utcnow] () | def accept_record(self, record):
"""Accept a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self, record=record) # depends on [control=['if'], data=[]]
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() # depends on [control=['with'], data=[]] |
def subdict(name, *keys, **kw):
"""
Subdict key.
Takes a `name`, any number of keys as args and keyword argument `trafaret`.
Use it like:
def check_passwords_equal(data):
if data['password'] != data['password_confirm']:
return t.DataError('Passwords are not equal')
return data['password']
passwords_key = subdict(
'password',
t.Key('password', trafaret=check_password),
t.Key('password_confirm', trafaret=check_password),
trafaret=check_passwords_equal,
)
signup_trafaret = t.Dict(
t.Key('email', trafaret=t.Email),
passwords_key,
)
"""
trafaret = kw.pop('trafaret') # coz py2k
def inner(data, context=None):
errors = False
preserve_output = []
touched = set()
collect = {}
for key in keys:
for k, v, names in key(data, context=context):
touched.update(names)
preserve_output.append((k, v, names))
if isinstance(v, t.DataError):
errors = True
else:
collect[k] = v
if errors:
for out in preserve_output:
yield out
elif collect:
yield name, t.catch(trafaret, collect), touched
return inner | def function[subdict, parameter[name]]:
constant[
Subdict key.
Takes a `name`, any number of keys as args and keyword argument `trafaret`.
Use it like:
def check_passwords_equal(data):
if data['password'] != data['password_confirm']:
return t.DataError('Passwords are not equal')
return data['password']
passwords_key = subdict(
'password',
t.Key('password', trafaret=check_password),
t.Key('password_confirm', trafaret=check_password),
trafaret=check_passwords_equal,
)
signup_trafaret = t.Dict(
t.Key('email', trafaret=t.Email),
passwords_key,
)
]
variable[trafaret] assign[=] call[name[kw].pop, parameter[constant[trafaret]]]
def function[inner, parameter[data, context]]:
variable[errors] assign[=] constant[False]
variable[preserve_output] assign[=] list[[]]
variable[touched] assign[=] call[name[set], parameter[]]
variable[collect] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[keys]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1192620>, <ast.Name object at 0x7da1b1190fd0>, <ast.Name object at 0x7da1b1192b00>]]] in starred[call[name[key], parameter[name[data]]]] begin[:]
call[name[touched].update, parameter[name[names]]]
call[name[preserve_output].append, parameter[tuple[[<ast.Name object at 0x7da1b11919c0>, <ast.Name object at 0x7da1b1192320>, <ast.Name object at 0x7da1b1193be0>]]]]
if call[name[isinstance], parameter[name[v], name[t].DataError]] begin[:]
variable[errors] assign[=] constant[True]
if name[errors] begin[:]
for taget[name[out]] in starred[name[preserve_output]] begin[:]
<ast.Yield object at 0x7da1b12bf190>
return[name[inner]] | keyword[def] identifier[subdict] ( identifier[name] ,* identifier[keys] ,** identifier[kw] ):
literal[string]
identifier[trafaret] = identifier[kw] . identifier[pop] ( literal[string] )
keyword[def] identifier[inner] ( identifier[data] , identifier[context] = keyword[None] ):
identifier[errors] = keyword[False]
identifier[preserve_output] =[]
identifier[touched] = identifier[set] ()
identifier[collect] ={}
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[for] identifier[k] , identifier[v] , identifier[names] keyword[in] identifier[key] ( identifier[data] , identifier[context] = identifier[context] ):
identifier[touched] . identifier[update] ( identifier[names] )
identifier[preserve_output] . identifier[append] (( identifier[k] , identifier[v] , identifier[names] ))
keyword[if] identifier[isinstance] ( identifier[v] , identifier[t] . identifier[DataError] ):
identifier[errors] = keyword[True]
keyword[else] :
identifier[collect] [ identifier[k] ]= identifier[v]
keyword[if] identifier[errors] :
keyword[for] identifier[out] keyword[in] identifier[preserve_output] :
keyword[yield] identifier[out]
keyword[elif] identifier[collect] :
keyword[yield] identifier[name] , identifier[t] . identifier[catch] ( identifier[trafaret] , identifier[collect] ), identifier[touched]
keyword[return] identifier[inner] | def subdict(name, *keys, **kw):
"""
Subdict key.
Takes a `name`, any number of keys as args and keyword argument `trafaret`.
Use it like:
def check_passwords_equal(data):
if data['password'] != data['password_confirm']:
return t.DataError('Passwords are not equal')
return data['password']
passwords_key = subdict(
'password',
t.Key('password', trafaret=check_password),
t.Key('password_confirm', trafaret=check_password),
trafaret=check_passwords_equal,
)
signup_trafaret = t.Dict(
t.Key('email', trafaret=t.Email),
passwords_key,
)
"""
trafaret = kw.pop('trafaret') # coz py2k
def inner(data, context=None):
errors = False
preserve_output = []
touched = set()
collect = {}
for key in keys:
for (k, v, names) in key(data, context=context):
touched.update(names)
preserve_output.append((k, v, names))
if isinstance(v, t.DataError):
errors = True # depends on [control=['if'], data=[]]
else:
collect[k] = v # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['key']]
if errors:
for out in preserve_output:
yield out # depends on [control=['for'], data=['out']] # depends on [control=['if'], data=[]]
elif collect:
yield (name, t.catch(trafaret, collect), touched) # depends on [control=['if'], data=[]]
return inner |
def read_from_file(filename):
"""
Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name
"""
with open(filename) as f:
result = CP2KInputFile()
try:
while True:
result.load_children(f)
except EOFError:
pass
return result | def function[read_from_file, parameter[filename]]:
constant[
Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name
]
with call[name[open], parameter[name[filename]]] begin[:]
variable[result] assign[=] call[name[CP2KInputFile], parameter[]]
<ast.Try object at 0x7da20c6aa050>
return[name[result]] | keyword[def] identifier[read_from_file] ( identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[f] :
identifier[result] = identifier[CP2KInputFile] ()
keyword[try] :
keyword[while] keyword[True] :
identifier[result] . identifier[load_children] ( identifier[f] )
keyword[except] identifier[EOFError] :
keyword[pass]
keyword[return] identifier[result] | def read_from_file(filename):
"""
Arguments:
| ``filename`` -- the filename of the input file
Use as follows::
>>> if = CP2KInputFile.read_from_file("somefile.inp")
>>> for section in if:
... print section.name
"""
with open(filename) as f:
result = CP2KInputFile()
try:
while True:
result.load_children(f) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except EOFError:
pass # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['f']]
return result |
def metric_griffiths_2004(logliks):
"""
Thomas L. Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of the National Academy
of Sciences 101, suppl 1: 5228–5235. http://doi.org/10.1073/pnas.0307752101
Calculates the harmonic mean of the loglikelihood values `logliks` as in Griffiths, Steyvers 2004. Burnin values
should already be removed from `logliks`.
Note: requires gmpy2 package for multiple-precision arithmetic to avoid numerical underflow.
see https://github.com/aleaxit/gmpy
"""
import gmpy2
# using median trick as in Martin Ponweiser's Diploma Thesis 2012, p.36
ll_med = np.median(logliks)
ps = [gmpy2.exp(ll_med - x) for x in logliks]
ps_mean = gmpy2.mpfr(0)
for p in ps:
ps_mean += p / len(ps)
return float(ll_med - gmpy2.log(ps_mean)) # after taking the log() we can use a Python float() again | def function[metric_griffiths_2004, parameter[logliks]]:
constant[
Thomas L. Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of the National Academy
of Sciences 101, suppl 1: 5228–5235. http://doi.org/10.1073/pnas.0307752101
Calculates the harmonic mean of the loglikelihood values `logliks` as in Griffiths, Steyvers 2004. Burnin values
should already be removed from `logliks`.
Note: requires gmpy2 package for multiple-precision arithmetic to avoid numerical underflow.
see https://github.com/aleaxit/gmpy
]
import module[gmpy2]
variable[ll_med] assign[=] call[name[np].median, parameter[name[logliks]]]
variable[ps] assign[=] <ast.ListComp object at 0x7da20c7c8be0>
variable[ps_mean] assign[=] call[name[gmpy2].mpfr, parameter[constant[0]]]
for taget[name[p]] in starred[name[ps]] begin[:]
<ast.AugAssign object at 0x7da20c7c9930>
return[call[name[float], parameter[binary_operation[name[ll_med] - call[name[gmpy2].log, parameter[name[ps_mean]]]]]]] | keyword[def] identifier[metric_griffiths_2004] ( identifier[logliks] ):
literal[string]
keyword[import] identifier[gmpy2]
identifier[ll_med] = identifier[np] . identifier[median] ( identifier[logliks] )
identifier[ps] =[ identifier[gmpy2] . identifier[exp] ( identifier[ll_med] - identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[logliks] ]
identifier[ps_mean] = identifier[gmpy2] . identifier[mpfr] ( literal[int] )
keyword[for] identifier[p] keyword[in] identifier[ps] :
identifier[ps_mean] += identifier[p] / identifier[len] ( identifier[ps] )
keyword[return] identifier[float] ( identifier[ll_med] - identifier[gmpy2] . identifier[log] ( identifier[ps_mean] )) | def metric_griffiths_2004(logliks):
"""
Thomas L. Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of the National Academy
of Sciences 101, suppl 1: 5228–5235. http://doi.org/10.1073/pnas.0307752101
Calculates the harmonic mean of the loglikelihood values `logliks` as in Griffiths, Steyvers 2004. Burnin values
should already be removed from `logliks`.
Note: requires gmpy2 package for multiple-precision arithmetic to avoid numerical underflow.
see https://github.com/aleaxit/gmpy
"""
import gmpy2
# using median trick as in Martin Ponweiser's Diploma Thesis 2012, p.36
ll_med = np.median(logliks)
ps = [gmpy2.exp(ll_med - x) for x in logliks]
ps_mean = gmpy2.mpfr(0)
for p in ps:
ps_mean += p / len(ps) # depends on [control=['for'], data=['p']]
return float(ll_med - gmpy2.log(ps_mean)) # after taking the log() we can use a Python float() again |
def add_random_file_from_present_folder(machine_ip, port, zone):
"""Add a random non-py file from this folder and subfolders to soco"""
# Make a list of music files, right now it is done by collection all files
# below the current folder whose extension does not start with .py
# This will probably need to be modded for other pusposes.
music_files = []
print('Looking for music files')
for path, dirs, files in os.walk('.'):
for file_ in files:
if not os.path.splitext(file_)[1].startswith('.py'):
music_files.append(os.path.relpath(os.path.join(path, file_)))
print('Found:', music_files[-1])
random_file = choice(music_files)
# urlencode all the path parts (but not the /'s)
random_file = os.path.join(
*[quote(part) for part in os.path.split(random_file)]
)
print('\nPlaying random file:', random_file)
netpath = 'http://{}:{}/{}'.format(machine_ip, port, random_file)
number_in_queue = zone.add_uri_to_queue(netpath)
# play_from_queue indexes are 0-based
zone.play_from_queue(number_in_queue - 1) | def function[add_random_file_from_present_folder, parameter[machine_ip, port, zone]]:
constant[Add a random non-py file from this folder and subfolders to soco]
variable[music_files] assign[=] list[[]]
call[name[print], parameter[constant[Looking for music files]]]
for taget[tuple[[<ast.Name object at 0x7da20e960f40>, <ast.Name object at 0x7da20e963d60>, <ast.Name object at 0x7da20e962320>]]] in starred[call[name[os].walk, parameter[constant[.]]]] begin[:]
for taget[name[file_]] in starred[name[files]] begin[:]
if <ast.UnaryOp object at 0x7da20e960700> begin[:]
call[name[music_files].append, parameter[call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[name[path], name[file_]]]]]]]
call[name[print], parameter[constant[Found:], call[name[music_files]][<ast.UnaryOp object at 0x7da20e962230>]]]
variable[random_file] assign[=] call[name[choice], parameter[name[music_files]]]
variable[random_file] assign[=] call[name[os].path.join, parameter[<ast.Starred object at 0x7da18ede42e0>]]
call[name[print], parameter[constant[
Playing random file:], name[random_file]]]
variable[netpath] assign[=] call[constant[http://{}:{}/{}].format, parameter[name[machine_ip], name[port], name[random_file]]]
variable[number_in_queue] assign[=] call[name[zone].add_uri_to_queue, parameter[name[netpath]]]
call[name[zone].play_from_queue, parameter[binary_operation[name[number_in_queue] - constant[1]]]] | keyword[def] identifier[add_random_file_from_present_folder] ( identifier[machine_ip] , identifier[port] , identifier[zone] ):
literal[string]
identifier[music_files] =[]
identifier[print] ( literal[string] )
keyword[for] identifier[path] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( literal[string] ):
keyword[for] identifier[file_] keyword[in] identifier[files] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_] )[ literal[int] ]. identifier[startswith] ( literal[string] ):
identifier[music_files] . identifier[append] ( identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[file_] )))
identifier[print] ( literal[string] , identifier[music_files] [- literal[int] ])
identifier[random_file] = identifier[choice] ( identifier[music_files] )
identifier[random_file] = identifier[os] . identifier[path] . identifier[join] (
*[ identifier[quote] ( identifier[part] ) keyword[for] identifier[part] keyword[in] identifier[os] . identifier[path] . identifier[split] ( identifier[random_file] )]
)
identifier[print] ( literal[string] , identifier[random_file] )
identifier[netpath] = literal[string] . identifier[format] ( identifier[machine_ip] , identifier[port] , identifier[random_file] )
identifier[number_in_queue] = identifier[zone] . identifier[add_uri_to_queue] ( identifier[netpath] )
identifier[zone] . identifier[play_from_queue] ( identifier[number_in_queue] - literal[int] ) | def add_random_file_from_present_folder(machine_ip, port, zone):
"""Add a random non-py file from this folder and subfolders to soco"""
# Make a list of music files, right now it is done by collection all files
# below the current folder whose extension does not start with .py
# This will probably need to be modded for other pusposes.
music_files = []
print('Looking for music files')
for (path, dirs, files) in os.walk('.'):
for file_ in files:
if not os.path.splitext(file_)[1].startswith('.py'):
music_files.append(os.path.relpath(os.path.join(path, file_)))
print('Found:', music_files[-1]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_']] # depends on [control=['for'], data=[]]
random_file = choice(music_files)
# urlencode all the path parts (but not the /'s)
random_file = os.path.join(*[quote(part) for part in os.path.split(random_file)])
print('\nPlaying random file:', random_file)
netpath = 'http://{}:{}/{}'.format(machine_ip, port, random_file)
number_in_queue = zone.add_uri_to_queue(netpath)
# play_from_queue indexes are 0-based
zone.play_from_queue(number_in_queue - 1) |
def get_max_days_to_liquidate_by_ticker(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5,
last_n_days=None):
"""
Finds the longest estimated liquidation time for each traded
name over the course of backtest (or last n days of the backtest).
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
last_n_days : integer
Compute for only the last n days of the passed backtest data.
Returns
-------
days_to_liquidate : pd.DataFrame
Max Number of days required to fully liquidate each traded name.
Index of symbols. Columns for days_to_liquidate and the corresponding
date and position_alloc on that day.
"""
dtlp = days_to_liquidate_positions(positions, market_data,
max_bar_consumption=max_bar_consumption,
capital_base=capital_base,
mean_volume_window=mean_volume_window)
if last_n_days is not None:
dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):]
pos_alloc = pos.get_percent_alloc(positions)
pos_alloc = pos_alloc.drop('cash', axis=1)
liq_desc = pd.DataFrame()
liq_desc['days_to_liquidate'] = dtlp.unstack()
liq_desc['pos_alloc_pct'] = pos_alloc.unstack() * 100
liq_desc.index.levels[0].name = 'symbol'
liq_desc.index.levels[1].name = 'date'
worst_liq = liq_desc.reset_index().sort_values(
'days_to_liquidate', ascending=False).groupby('symbol').first()
return worst_liq | def function[get_max_days_to_liquidate_by_ticker, parameter[positions, market_data, max_bar_consumption, capital_base, mean_volume_window, last_n_days]]:
constant[
Finds the longest estimated liquidation time for each traded
name over the course of backtest (or last n days of the backtest).
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
last_n_days : integer
Compute for only the last n days of the passed backtest data.
Returns
-------
days_to_liquidate : pd.DataFrame
Max Number of days required to fully liquidate each traded name.
Index of symbols. Columns for days_to_liquidate and the corresponding
date and position_alloc on that day.
]
variable[dtlp] assign[=] call[name[days_to_liquidate_positions], parameter[name[positions], name[market_data]]]
if compare[name[last_n_days] is_not constant[None]] begin[:]
variable[dtlp] assign[=] call[name[dtlp].loc][<ast.Slice object at 0x7da1b02b53c0>]
variable[pos_alloc] assign[=] call[name[pos].get_percent_alloc, parameter[name[positions]]]
variable[pos_alloc] assign[=] call[name[pos_alloc].drop, parameter[constant[cash]]]
variable[liq_desc] assign[=] call[name[pd].DataFrame, parameter[]]
call[name[liq_desc]][constant[days_to_liquidate]] assign[=] call[name[dtlp].unstack, parameter[]]
call[name[liq_desc]][constant[pos_alloc_pct]] assign[=] binary_operation[call[name[pos_alloc].unstack, parameter[]] * constant[100]]
call[name[liq_desc].index.levels][constant[0]].name assign[=] constant[symbol]
call[name[liq_desc].index.levels][constant[1]].name assign[=] constant[date]
variable[worst_liq] assign[=] call[call[call[call[name[liq_desc].reset_index, parameter[]].sort_values, parameter[constant[days_to_liquidate]]].groupby, parameter[constant[symbol]]].first, parameter[]]
return[name[worst_liq]] | keyword[def] identifier[get_max_days_to_liquidate_by_ticker] ( identifier[positions] , identifier[market_data] ,
identifier[max_bar_consumption] = literal[int] ,
identifier[capital_base] = literal[int] ,
identifier[mean_volume_window] = literal[int] ,
identifier[last_n_days] = keyword[None] ):
literal[string]
identifier[dtlp] = identifier[days_to_liquidate_positions] ( identifier[positions] , identifier[market_data] ,
identifier[max_bar_consumption] = identifier[max_bar_consumption] ,
identifier[capital_base] = identifier[capital_base] ,
identifier[mean_volume_window] = identifier[mean_volume_window] )
keyword[if] identifier[last_n_days] keyword[is] keyword[not] keyword[None] :
identifier[dtlp] = identifier[dtlp] . identifier[loc] [ identifier[dtlp] . identifier[index] . identifier[max] ()- identifier[pd] . identifier[Timedelta] ( identifier[days] = identifier[last_n_days] ):]
identifier[pos_alloc] = identifier[pos] . identifier[get_percent_alloc] ( identifier[positions] )
identifier[pos_alloc] = identifier[pos_alloc] . identifier[drop] ( literal[string] , identifier[axis] = literal[int] )
identifier[liq_desc] = identifier[pd] . identifier[DataFrame] ()
identifier[liq_desc] [ literal[string] ]= identifier[dtlp] . identifier[unstack] ()
identifier[liq_desc] [ literal[string] ]= identifier[pos_alloc] . identifier[unstack] ()* literal[int]
identifier[liq_desc] . identifier[index] . identifier[levels] [ literal[int] ]. identifier[name] = literal[string]
identifier[liq_desc] . identifier[index] . identifier[levels] [ literal[int] ]. identifier[name] = literal[string]
identifier[worst_liq] = identifier[liq_desc] . identifier[reset_index] (). identifier[sort_values] (
literal[string] , identifier[ascending] = keyword[False] ). identifier[groupby] ( literal[string] ). identifier[first] ()
keyword[return] identifier[worst_liq] | def get_max_days_to_liquidate_by_ticker(positions, market_data, max_bar_consumption=0.2, capital_base=1000000.0, mean_volume_window=5, last_n_days=None):
"""
Finds the longest estimated liquidation time for each traded
name over the course of backtest (or last n days of the backtest).
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
last_n_days : integer
Compute for only the last n days of the passed backtest data.
Returns
-------
days_to_liquidate : pd.DataFrame
Max Number of days required to fully liquidate each traded name.
Index of symbols. Columns for days_to_liquidate and the corresponding
date and position_alloc on that day.
"""
dtlp = days_to_liquidate_positions(positions, market_data, max_bar_consumption=max_bar_consumption, capital_base=capital_base, mean_volume_window=mean_volume_window)
if last_n_days is not None:
dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):] # depends on [control=['if'], data=['last_n_days']]
pos_alloc = pos.get_percent_alloc(positions)
pos_alloc = pos_alloc.drop('cash', axis=1)
liq_desc = pd.DataFrame()
liq_desc['days_to_liquidate'] = dtlp.unstack()
liq_desc['pos_alloc_pct'] = pos_alloc.unstack() * 100
liq_desc.index.levels[0].name = 'symbol'
liq_desc.index.levels[1].name = 'date'
worst_liq = liq_desc.reset_index().sort_values('days_to_liquidate', ascending=False).groupby('symbol').first()
return worst_liq |
def overlaps(self, other, permissive=False):
"""
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then [1, 2) and [2, 3] are considered as having
an overlap on value 2 (but not [1, 2) and (2, 3]).
:param other: an atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
"""
if not isinstance(other, AtomicInterval):
raise TypeError('Only AtomicInterval instances are supported.')
if self._lower > other.lower:
first, second = other, self
else:
first, second = self, other
if first._upper == second._lower:
if permissive:
return first._right == CLOSED or second._left == CLOSED
else:
return first._right == CLOSED and second._left == CLOSED
return first._upper > second._lower | def function[overlaps, parameter[self, other, permissive]]:
constant[
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then [1, 2) and [2, 3] are considered as having
an overlap on value 2 (but not [1, 2) and (2, 3]).
:param other: an atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
]
if <ast.UnaryOp object at 0x7da1b1235bd0> begin[:]
<ast.Raise object at 0x7da1b12345e0>
if compare[name[self]._lower greater[>] name[other].lower] begin[:]
<ast.Tuple object at 0x7da1b12351b0> assign[=] tuple[[<ast.Name object at 0x7da1b1237340>, <ast.Name object at 0x7da1b12357e0>]]
if compare[name[first]._upper equal[==] name[second]._lower] begin[:]
if name[permissive] begin[:]
return[<ast.BoolOp object at 0x7da1b1235ae0>]
return[compare[name[first]._upper greater[>] name[second]._lower]] | keyword[def] identifier[overlaps] ( identifier[self] , identifier[other] , identifier[permissive] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[AtomicInterval] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[self] . identifier[_lower] > identifier[other] . identifier[lower] :
identifier[first] , identifier[second] = identifier[other] , identifier[self]
keyword[else] :
identifier[first] , identifier[second] = identifier[self] , identifier[other]
keyword[if] identifier[first] . identifier[_upper] == identifier[second] . identifier[_lower] :
keyword[if] identifier[permissive] :
keyword[return] identifier[first] . identifier[_right] == identifier[CLOSED] keyword[or] identifier[second] . identifier[_left] == identifier[CLOSED]
keyword[else] :
keyword[return] identifier[first] . identifier[_right] == identifier[CLOSED] keyword[and] identifier[second] . identifier[_left] == identifier[CLOSED]
keyword[return] identifier[first] . identifier[_upper] > identifier[second] . identifier[_lower] | def overlaps(self, other, permissive=False):
"""
Test if intervals have any overlapping value.
If 'permissive' is set to True (default is False), then [1, 2) and [2, 3] are considered as having
an overlap on value 2 (but not [1, 2) and (2, 3]).
:param other: an atomic interval.
:param permissive: set to True to consider contiguous intervals as well.
:return True if intervals overlap, False otherwise.
"""
if not isinstance(other, AtomicInterval):
raise TypeError('Only AtomicInterval instances are supported.') # depends on [control=['if'], data=[]]
if self._lower > other.lower:
(first, second) = (other, self) # depends on [control=['if'], data=[]]
else:
(first, second) = (self, other)
if first._upper == second._lower:
if permissive:
return first._right == CLOSED or second._left == CLOSED # depends on [control=['if'], data=[]]
else:
return first._right == CLOSED and second._left == CLOSED # depends on [control=['if'], data=[]]
return first._upper > second._lower |
def get_cursor_position(self, file_path):
"""
Gets the cached cursor position for file_path
:param file_path: path of the file in the cache
:return: Cached cursor position or (0, 0)
"""
try:
map = json.loads(self._settings.value('cachedCursorPosition'))
except TypeError:
map = {}
try:
pos = map[file_path]
except KeyError:
pos = 0
if isinstance(pos, list):
# changed in pyqode 2.6.3, now we store the cursor position
# instead of the line and column (faster)
pos = 0
return pos | def function[get_cursor_position, parameter[self, file_path]]:
constant[
Gets the cached cursor position for file_path
:param file_path: path of the file in the cache
:return: Cached cursor position or (0, 0)
]
<ast.Try object at 0x7da18f09df90>
<ast.Try object at 0x7da18f09e8f0>
if call[name[isinstance], parameter[name[pos], name[list]]] begin[:]
variable[pos] assign[=] constant[0]
return[name[pos]] | keyword[def] identifier[get_cursor_position] ( identifier[self] , identifier[file_path] ):
literal[string]
keyword[try] :
identifier[map] = identifier[json] . identifier[loads] ( identifier[self] . identifier[_settings] . identifier[value] ( literal[string] ))
keyword[except] identifier[TypeError] :
identifier[map] ={}
keyword[try] :
identifier[pos] = identifier[map] [ identifier[file_path] ]
keyword[except] identifier[KeyError] :
identifier[pos] = literal[int]
keyword[if] identifier[isinstance] ( identifier[pos] , identifier[list] ):
identifier[pos] = literal[int]
keyword[return] identifier[pos] | def get_cursor_position(self, file_path):
"""
Gets the cached cursor position for file_path
:param file_path: path of the file in the cache
:return: Cached cursor position or (0, 0)
"""
try:
map = json.loads(self._settings.value('cachedCursorPosition')) # depends on [control=['try'], data=[]]
except TypeError:
map = {} # depends on [control=['except'], data=[]]
try:
pos = map[file_path] # depends on [control=['try'], data=[]]
except KeyError:
pos = 0 # depends on [control=['except'], data=[]]
if isinstance(pos, list):
# changed in pyqode 2.6.3, now we store the cursor position
# instead of the line and column (faster)
pos = 0 # depends on [control=['if'], data=[]]
return pos |
def accepts(self): # type: Union[Iterable[Type[T]], Type[Any]]
"""The types of objects the data sink can store."""
types = set()
any_dispatch = False
try:
types.update(getattr(self.__class__, "put")._accepts)
any_dispatch = True
except AttributeError:
pass
try:
types.update(getattr(self.__class__, "put_many")._accepts)
any_dispatch = True
except AttributeError:
pass
return types if any_dispatch else TYPE_WILDCARD | def function[accepts, parameter[self]]:
constant[The types of objects the data sink can store.]
variable[types] assign[=] call[name[set], parameter[]]
variable[any_dispatch] assign[=] constant[False]
<ast.Try object at 0x7da1b1932cb0>
<ast.Try object at 0x7da1b1931c90>
return[<ast.IfExp object at 0x7da1b1933310>] | keyword[def] identifier[accepts] ( identifier[self] ):
literal[string]
identifier[types] = identifier[set] ()
identifier[any_dispatch] = keyword[False]
keyword[try] :
identifier[types] . identifier[update] ( identifier[getattr] ( identifier[self] . identifier[__class__] , literal[string] ). identifier[_accepts] )
identifier[any_dispatch] = keyword[True]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[try] :
identifier[types] . identifier[update] ( identifier[getattr] ( identifier[self] . identifier[__class__] , literal[string] ). identifier[_accepts] )
identifier[any_dispatch] = keyword[True]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] identifier[types] keyword[if] identifier[any_dispatch] keyword[else] identifier[TYPE_WILDCARD] | def accepts(self): # type: Union[Iterable[Type[T]], Type[Any]]
'The types of objects the data sink can store.'
types = set()
any_dispatch = False
try:
types.update(getattr(self.__class__, 'put')._accepts)
any_dispatch = True # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
try:
types.update(getattr(self.__class__, 'put_many')._accepts)
any_dispatch = True # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
return types if any_dispatch else TYPE_WILDCARD |
def _classify_segment(self, address, length):
"""Determine how a new data segment fits into our existing world
Params:
address (int): The address we wish to classify
length (int): The length of the segment
Returns:
int: One of SparseMemoryMap.prepended
"""
end_address = address + length - 1
_, start_seg = self._find_address(address)
_, end_seg = self._find_address(end_address)
if start_seg is not None or end_seg is not None:
raise ArgumentError("Overlapping segments are not yet supported", address=address, length=length)
return DisjointSegment() | def function[_classify_segment, parameter[self, address, length]]:
constant[Determine how a new data segment fits into our existing world
Params:
address (int): The address we wish to classify
length (int): The length of the segment
Returns:
int: One of SparseMemoryMap.prepended
]
variable[end_address] assign[=] binary_operation[binary_operation[name[address] + name[length]] - constant[1]]
<ast.Tuple object at 0x7da20e9b3d00> assign[=] call[name[self]._find_address, parameter[name[address]]]
<ast.Tuple object at 0x7da20e9b19f0> assign[=] call[name[self]._find_address, parameter[name[end_address]]]
if <ast.BoolOp object at 0x7da20e9b1fc0> begin[:]
<ast.Raise object at 0x7da204621270>
return[call[name[DisjointSegment], parameter[]]] | keyword[def] identifier[_classify_segment] ( identifier[self] , identifier[address] , identifier[length] ):
literal[string]
identifier[end_address] = identifier[address] + identifier[length] - literal[int]
identifier[_] , identifier[start_seg] = identifier[self] . identifier[_find_address] ( identifier[address] )
identifier[_] , identifier[end_seg] = identifier[self] . identifier[_find_address] ( identifier[end_address] )
keyword[if] identifier[start_seg] keyword[is] keyword[not] keyword[None] keyword[or] identifier[end_seg] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ArgumentError] ( literal[string] , identifier[address] = identifier[address] , identifier[length] = identifier[length] )
keyword[return] identifier[DisjointSegment] () | def _classify_segment(self, address, length):
"""Determine how a new data segment fits into our existing world
Params:
address (int): The address we wish to classify
length (int): The length of the segment
Returns:
int: One of SparseMemoryMap.prepended
"""
end_address = address + length - 1
(_, start_seg) = self._find_address(address)
(_, end_seg) = self._find_address(end_address)
if start_seg is not None or end_seg is not None:
raise ArgumentError('Overlapping segments are not yet supported', address=address, length=length) # depends on [control=['if'], data=[]]
return DisjointSegment() |
def parse(self, element):
"""Extracts the values from the specified XML element that is being converted."""
#All the children of this element are what we are trying to parse.
result = []
for child in element:
if child.tag in self.lines:
values = { child.tag: self.lines[child.tag].parse(child) }
result.append(values)
return result | def function[parse, parameter[self, element]]:
constant[Extracts the values from the specified XML element that is being converted.]
variable[result] assign[=] list[[]]
for taget[name[child]] in starred[name[element]] begin[:]
if compare[name[child].tag in name[self].lines] begin[:]
variable[values] assign[=] dictionary[[<ast.Attribute object at 0x7da20c796200>], [<ast.Call object at 0x7da20c795270>]]
call[name[result].append, parameter[name[values]]]
return[name[result]] | keyword[def] identifier[parse] ( identifier[self] , identifier[element] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[child] keyword[in] identifier[element] :
keyword[if] identifier[child] . identifier[tag] keyword[in] identifier[self] . identifier[lines] :
identifier[values] ={ identifier[child] . identifier[tag] : identifier[self] . identifier[lines] [ identifier[child] . identifier[tag] ]. identifier[parse] ( identifier[child] )}
identifier[result] . identifier[append] ( identifier[values] )
keyword[return] identifier[result] | def parse(self, element):
"""Extracts the values from the specified XML element that is being converted."""
#All the children of this element are what we are trying to parse.
result = []
for child in element:
if child.tag in self.lines:
values = {child.tag: self.lines[child.tag].parse(child)}
result.append(values) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return result |
def _preprocess_nodes_for_pydot(nodes_with_data):
"""throw away all node attributes, except for 'label'"""
for (node_id, attrs) in nodes_with_data:
if 'label' in attrs:
yield (quote_for_pydot(node_id),
{'label': quote_for_pydot(attrs['label'])})
else:
yield (quote_for_pydot(node_id), {}) | def function[_preprocess_nodes_for_pydot, parameter[nodes_with_data]]:
constant[throw away all node attributes, except for 'label']
for taget[tuple[[<ast.Name object at 0x7da204620e20>, <ast.Name object at 0x7da2046214e0>]]] in starred[name[nodes_with_data]] begin[:]
if compare[constant[label] in name[attrs]] begin[:]
<ast.Yield object at 0x7da204623430> | keyword[def] identifier[_preprocess_nodes_for_pydot] ( identifier[nodes_with_data] ):
literal[string]
keyword[for] ( identifier[node_id] , identifier[attrs] ) keyword[in] identifier[nodes_with_data] :
keyword[if] literal[string] keyword[in] identifier[attrs] :
keyword[yield] ( identifier[quote_for_pydot] ( identifier[node_id] ),
{ literal[string] : identifier[quote_for_pydot] ( identifier[attrs] [ literal[string] ])})
keyword[else] :
keyword[yield] ( identifier[quote_for_pydot] ( identifier[node_id] ),{}) | def _preprocess_nodes_for_pydot(nodes_with_data):
"""throw away all node attributes, except for 'label'"""
for (node_id, attrs) in nodes_with_data:
if 'label' in attrs:
yield (quote_for_pydot(node_id), {'label': quote_for_pydot(attrs['label'])}) # depends on [control=['if'], data=['attrs']]
else:
yield (quote_for_pydot(node_id), {}) # depends on [control=['for'], data=[]] |
def printrec(recst):
""" Pretty-printing rtsp strings
"""
try:
recst = recst.decode('UTF-8')
except AttributeError:
pass
recs=[ x for x in recst.split('\r\n') if x ]
for rec in recs:
print(rec)
print("\n") | def function[printrec, parameter[recst]]:
constant[ Pretty-printing rtsp strings
]
<ast.Try object at 0x7da20cabc400>
variable[recs] assign[=] <ast.ListComp object at 0x7da20cabeef0>
for taget[name[rec]] in starred[name[recs]] begin[:]
call[name[print], parameter[name[rec]]]
call[name[print], parameter[constant[
]]] | keyword[def] identifier[printrec] ( identifier[recst] ):
literal[string]
keyword[try] :
identifier[recst] = identifier[recst] . identifier[decode] ( literal[string] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[recs] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[recst] . identifier[split] ( literal[string] ) keyword[if] identifier[x] ]
keyword[for] identifier[rec] keyword[in] identifier[recs] :
identifier[print] ( identifier[rec] )
identifier[print] ( literal[string] ) | def printrec(recst):
""" Pretty-printing rtsp strings
"""
try:
recst = recst.decode('UTF-8') # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
recs = [x for x in recst.split('\r\n') if x]
for rec in recs:
print(rec) # depends on [control=['for'], data=['rec']]
print('\n') |
def release_client(self, cb):
"""
Return a Connection object to the pool
:param Connection cb: the client to release
"""
if cb:
self._q.put(cb, True)
self._clients_in_use -= 1 | def function[release_client, parameter[self, cb]]:
constant[
Return a Connection object to the pool
:param Connection cb: the client to release
]
if name[cb] begin[:]
call[name[self]._q.put, parameter[name[cb], constant[True]]]
<ast.AugAssign object at 0x7da20c795270> | keyword[def] identifier[release_client] ( identifier[self] , identifier[cb] ):
literal[string]
keyword[if] identifier[cb] :
identifier[self] . identifier[_q] . identifier[put] ( identifier[cb] , keyword[True] )
identifier[self] . identifier[_clients_in_use] -= literal[int] | def release_client(self, cb):
"""
Return a Connection object to the pool
:param Connection cb: the client to release
"""
if cb:
self._q.put(cb, True)
self._clients_in_use -= 1 # depends on [control=['if'], data=[]] |
def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
"""
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves - side1) | def function[iter_edges, parameter[self, cached_content]]:
constant[
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
]
if <ast.UnaryOp object at 0x7da1b0e2ee00> begin[:]
variable[cached_content] assign[=] call[name[self].get_cached_content, parameter[]]
variable[all_leaves] assign[=] call[name[cached_content]][name[self]]
for taget[tuple[[<ast.Name object at 0x7da1b0e2eb60>, <ast.Name object at 0x7da1b0e2eb30>]]] in starred[call[name[six].iteritems, parameter[name[cached_content]]]] begin[:]
<ast.Yield object at 0x7da1b0e2ea10> | keyword[def] identifier[iter_edges] ( identifier[self] , identifier[cached_content] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[cached_content] :
identifier[cached_content] = identifier[self] . identifier[get_cached_content] ()
identifier[all_leaves] = identifier[cached_content] [ identifier[self] ]
keyword[for] identifier[n] , identifier[side1] keyword[in] identifier[six] . identifier[iteritems] ( identifier[cached_content] ):
keyword[yield] ( identifier[side1] , identifier[all_leaves] - identifier[side1] ) | def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
"""
if not cached_content:
cached_content = self.get_cached_content() # depends on [control=['if'], data=[]]
all_leaves = cached_content[self]
for (n, side1) in six.iteritems(cached_content):
yield (side1, all_leaves - side1) # depends on [control=['for'], data=[]] |
def run(self):
"""
Main interface. Instantiate the SlackAPI, connect to RTM
and start the client.
"""
slack = SlackAPI(token=self.token)
rtm = slack.rtm_start()
factory = SlackClientFactory(rtm['url'])
# Attach attributes
factory.protocol = SlackClientProtocol
factory.protocol.slack = slack
factory.protocol.channel_layer = self.channel_layer
factory.channel_name = self.channel_name
# Here we go
factory.run() | def function[run, parameter[self]]:
constant[
Main interface. Instantiate the SlackAPI, connect to RTM
and start the client.
]
variable[slack] assign[=] call[name[SlackAPI], parameter[]]
variable[rtm] assign[=] call[name[slack].rtm_start, parameter[]]
variable[factory] assign[=] call[name[SlackClientFactory], parameter[call[name[rtm]][constant[url]]]]
name[factory].protocol assign[=] name[SlackClientProtocol]
name[factory].protocol.slack assign[=] name[slack]
name[factory].protocol.channel_layer assign[=] name[self].channel_layer
name[factory].channel_name assign[=] name[self].channel_name
call[name[factory].run, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[slack] = identifier[SlackAPI] ( identifier[token] = identifier[self] . identifier[token] )
identifier[rtm] = identifier[slack] . identifier[rtm_start] ()
identifier[factory] = identifier[SlackClientFactory] ( identifier[rtm] [ literal[string] ])
identifier[factory] . identifier[protocol] = identifier[SlackClientProtocol]
identifier[factory] . identifier[protocol] . identifier[slack] = identifier[slack]
identifier[factory] . identifier[protocol] . identifier[channel_layer] = identifier[self] . identifier[channel_layer]
identifier[factory] . identifier[channel_name] = identifier[self] . identifier[channel_name]
identifier[factory] . identifier[run] () | def run(self):
"""
Main interface. Instantiate the SlackAPI, connect to RTM
and start the client.
"""
slack = SlackAPI(token=self.token)
rtm = slack.rtm_start()
factory = SlackClientFactory(rtm['url'])
# Attach attributes
factory.protocol = SlackClientProtocol
factory.protocol.slack = slack
factory.protocol.channel_layer = self.channel_layer
factory.channel_name = self.channel_name
# Here we go
factory.run() |
def _set_passive(self, v, load=False):
"""
Setter method for passive, mapped from YANG variable /rbridge_id/openflow/logical_instance/passive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: Passive controller connection
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=passive.passive, is_container='container', presence=False, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Passive controller connection', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """passive must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=passive.passive, is_container='container', presence=False, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Passive controller connection', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__passive = t
if hasattr(self, '_set'):
self._set() | def function[_set_passive, parameter[self, v, load]]:
constant[
Setter method for passive, mapped from YANG variable /rbridge_id/openflow/logical_instance/passive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: Passive controller connection
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da1b2596c20>
name[self].__passive assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_passive] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[passive] . identifier[passive] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__passive] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_passive(self, v, load=False):
"""
Setter method for passive, mapped from YANG variable /rbridge_id/openflow/logical_instance/passive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: Passive controller connection
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=passive.passive, is_container='container', presence=False, yang_name='passive', rest_name='passive', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Passive controller connection', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'passive must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=passive.passive, is_container=\'container\', presence=False, yang_name="passive", rest_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Passive controller connection\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-openflow\', defining_module=\'brocade-openflow\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__passive = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def determine_2(self, container_name, container_alias, meta, val):
""""Default the alias to the name of the container"""
if container_alias is not NotSpecified:
return container_alias
return container_name[container_name.rfind(":")+1:].replace('/', '-') | def function[determine_2, parameter[self, container_name, container_alias, meta, val]]:
constant["Default the alias to the name of the container]
if compare[name[container_alias] is_not name[NotSpecified]] begin[:]
return[name[container_alias]]
return[call[call[name[container_name]][<ast.Slice object at 0x7da1b26af130>].replace, parameter[constant[/], constant[-]]]] | keyword[def] identifier[determine_2] ( identifier[self] , identifier[container_name] , identifier[container_alias] , identifier[meta] , identifier[val] ):
literal[string]
keyword[if] identifier[container_alias] keyword[is] keyword[not] identifier[NotSpecified] :
keyword[return] identifier[container_alias]
keyword[return] identifier[container_name] [ identifier[container_name] . identifier[rfind] ( literal[string] )+ literal[int] :]. identifier[replace] ( literal[string] , literal[string] ) | def determine_2(self, container_name, container_alias, meta, val):
""""Default the alias to the name of the container"""
if container_alias is not NotSpecified:
return container_alias # depends on [control=['if'], data=['container_alias']]
return container_name[container_name.rfind(':') + 1:].replace('/', '-') |
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = None
try:
fh = open(path,'r+b')
return file_update_many(fh, points)
finally:
if fh:
fh.close() | def function[update_many, parameter[path, points]]:
constant[update_many(path,points)
path is a string
points is a list of (timestamp,value) points
]
if <ast.UnaryOp object at 0x7da1b23448e0> begin[:]
return[None]
variable[points] assign[=] <ast.ListComp object at 0x7da1b2345960>
call[name[points].sort, parameter[]]
variable[fh] assign[=] constant[None]
<ast.Try object at 0x7da20c76e050> | keyword[def] identifier[update_many] ( identifier[path] , identifier[points] ):
literal[string]
keyword[if] keyword[not] identifier[points] : keyword[return]
identifier[points] =[( identifier[int] ( identifier[t] ), identifier[float] ( identifier[v] )) keyword[for] ( identifier[t] , identifier[v] ) keyword[in] identifier[points] ]
identifier[points] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[p] : identifier[p] [ literal[int] ], identifier[reverse] = keyword[True] )
identifier[fh] = keyword[None]
keyword[try] :
identifier[fh] = identifier[open] ( identifier[path] , literal[string] )
keyword[return] identifier[file_update_many] ( identifier[fh] , identifier[points] )
keyword[finally] :
keyword[if] identifier[fh] :
identifier[fh] . identifier[close] () | def update_many(path, points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
if not points:
return # depends on [control=['if'], data=[]]
points = [(int(t), float(v)) for (t, v) in points]
points.sort(key=lambda p: p[0], reverse=True) #order points by timestamp, newest first
fh = None
try:
fh = open(path, 'r+b')
return file_update_many(fh, points) # depends on [control=['try'], data=[]]
finally:
if fh:
fh.close() # depends on [control=['if'], data=[]] |
def wireshark(pktlist, *args):
"""Run wireshark on a list of packets"""
fname = get_temp_file()
wrpcap(fname, pktlist)
subprocess.Popen([conf.prog.wireshark, "-r", fname] + list(args)) | def function[wireshark, parameter[pktlist]]:
constant[Run wireshark on a list of packets]
variable[fname] assign[=] call[name[get_temp_file], parameter[]]
call[name[wrpcap], parameter[name[fname], name[pktlist]]]
call[name[subprocess].Popen, parameter[binary_operation[list[[<ast.Attribute object at 0x7da1b12a96c0>, <ast.Constant object at 0x7da1b12abf40>, <ast.Name object at 0x7da1b12a8a00>]] + call[name[list], parameter[name[args]]]]]] | keyword[def] identifier[wireshark] ( identifier[pktlist] ,* identifier[args] ):
literal[string]
identifier[fname] = identifier[get_temp_file] ()
identifier[wrpcap] ( identifier[fname] , identifier[pktlist] )
identifier[subprocess] . identifier[Popen] ([ identifier[conf] . identifier[prog] . identifier[wireshark] , literal[string] , identifier[fname] ]+ identifier[list] ( identifier[args] )) | def wireshark(pktlist, *args):
"""Run wireshark on a list of packets"""
fname = get_temp_file()
wrpcap(fname, pktlist)
subprocess.Popen([conf.prog.wireshark, '-r', fname] + list(args)) |
def _generateGraphData(data, oldData=nx.Graph()):
"""
Processing the data from i3visio structures to generate nodes and edges
This function uses the networkx graph library. It will create a new node
for each and i3visio.<something> entities while it will add properties for
all the attribute starting with "@".
Args:
-----
d: The i3visio structures containing a list of
oldData: A graph structure representing the previous information.
Returns:
--------
A graph structure representing the updated information.
"""
def _addNewNode(ent, g):
"""
Wraps the creation of a node
Args:
-----
ent: The hi3visio-like entities to be used as the identifier.
ent = {
"value":"i3visio",
"type":"i3visio.alias,
}
g: The graph in which the entity will be stored.
Returns:
-------
The label used to represent this element.
"""
try:
label = unicode(ent["value"])
except UnicodeEncodeError as e:
# Printing that an error was found
label = str(ent["value"])
g.add_node(label)
g.node[label]["type"] = ent["type"]
return label
def _processAttributes(elems, g):
"""
Function that processes a list of elements to obtain new attributes.
Args:
-----
elems: List of i3visio-like entities.
g: The graph in which the entity will be stored.
Returns:
--------
newAtts: Dict of attributes (to be stored as attributes for the
given entity).
newEntities: List of new Entities (to be stored as attributes for
the given entity).
"""
newAtts = {}
newEntities= []
for att in elems:
# If it is an attribute
if att["type"][0] == "@":
# Removing the @ and the _ of the attributes
attName = str(att["type"][1:]).replace('_', '')
try:
newAtts[attName] = int(att["value"])
except:
newAtts[attName] = att["value"]
elif att["type"][:8] == "i3visio.":
# Creating a dict to represent the pair: type, value entity.
ent = {
"value":att["value"],
"type":att["type"].replace("i3visio.", "i3visio_"),
}
# Appending the new Entity to the entity list
newEntities.append(ent)
# Appending the new node
hashLabel = _addNewNode(ent, g)
# Make this recursive to link the attributes in each and every att
newAttsInAttributes, newEntitiesInAttributes = _processAttributes(att["attributes"], g)
# Updating the attributes to the current entity
g.node[hashLabel].update(newAttsInAttributes)
# Creating the edges (the new entities have also been created in the _processAttributes
for new in newEntitiesInAttributes:
graphData.add_edge(hashLabel, json.dumps(new))
try:
# Here, we would add the properties of the edge
#graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] +=1
pass
except:
# If the attribute does not exist, we would initialize it
#graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] = 1
pass
else:
# An unexpected type
pass
return newAtts, newEntities
graphData = oldData
# Iterating through the results
for elem in data:
# Creating a dict to represent the pair: type, value entity.
ent = {
"value":elem["value"],
"type":elem["type"],
}
# Appending the new node
new_node = _addNewNode(ent, graphData)
# Processing the attributes to grab the attributes (starting with "@..." and entities)
newAtts, newEntities = _processAttributes(elem["attributes"], graphData)
# Updating the attributes to the current entity
graphData.node[new_node].update(newAtts)
# Creating the edges (the new entities have also been created in the _processAttributes
for other_node in newEntities:
# Serializing the second entity
serEnt = json.dumps(new_node)
try:
other_node = unicode(other_node["value"])
except UnicodeEncodeError as e:
# Printing that an error was found
other_node = str(other_node["value"])
# Adding the edge
graphData.add_edge(new_node, other_node)
try:
# Here, we would add the properties of the edge
#graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] +=1
pass
except:
# If the attribute does not exist, we would initialize it
#graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] = 1
pass
return graphData | def function[_generateGraphData, parameter[data, oldData]]:
constant[
Processing the data from i3visio structures to generate nodes and edges
This function uses the networkx graph library. It will create a new node
for each and i3visio.<something> entities while it will add properties for
all the attribute starting with "@".
Args:
-----
d: The i3visio structures containing a list of
oldData: A graph structure representing the previous information.
Returns:
--------
A graph structure representing the updated information.
]
def function[_addNewNode, parameter[ent, g]]:
constant[
Wraps the creation of a node
Args:
-----
ent: The hi3visio-like entities to be used as the identifier.
ent = {
"value":"i3visio",
"type":"i3visio.alias,
}
g: The graph in which the entity will be stored.
Returns:
-------
The label used to represent this element.
]
<ast.Try object at 0x7da1b1108ca0>
call[name[g].add_node, parameter[name[label]]]
call[call[name[g].node][name[label]]][constant[type]] assign[=] call[name[ent]][constant[type]]
return[name[label]]
def function[_processAttributes, parameter[elems, g]]:
constant[
Function that processes a list of elements to obtain new attributes.
Args:
-----
elems: List of i3visio-like entities.
g: The graph in which the entity will be stored.
Returns:
--------
newAtts: Dict of attributes (to be stored as attributes for the
given entity).
newEntities: List of new Entities (to be stored as attributes for
the given entity).
]
variable[newAtts] assign[=] dictionary[[], []]
variable[newEntities] assign[=] list[[]]
for taget[name[att]] in starred[name[elems]] begin[:]
if compare[call[call[name[att]][constant[type]]][constant[0]] equal[==] constant[@]] begin[:]
variable[attName] assign[=] call[call[name[str], parameter[call[call[name[att]][constant[type]]][<ast.Slice object at 0x7da18ede78e0>]]].replace, parameter[constant[_], constant[]]]
<ast.Try object at 0x7da18ede52d0>
return[tuple[[<ast.Name object at 0x7da1b12bb2b0>, <ast.Name object at 0x7da1b12bafe0>]]]
variable[graphData] assign[=] name[oldData]
for taget[name[elem]] in starred[name[data]] begin[:]
variable[ent] assign[=] dictionary[[<ast.Constant object at 0x7da1b12b9ed0>, <ast.Constant object at 0x7da1b12b9c60>], [<ast.Subscript object at 0x7da1b12b8e50>, <ast.Subscript object at 0x7da1b12b90c0>]]
variable[new_node] assign[=] call[name[_addNewNode], parameter[name[ent], name[graphData]]]
<ast.Tuple object at 0x7da1b12b9180> assign[=] call[name[_processAttributes], parameter[call[name[elem]][constant[attributes]], name[graphData]]]
call[call[name[graphData].node][name[new_node]].update, parameter[name[newAtts]]]
for taget[name[other_node]] in starred[name[newEntities]] begin[:]
variable[serEnt] assign[=] call[name[json].dumps, parameter[name[new_node]]]
<ast.Try object at 0x7da1b1121b10>
call[name[graphData].add_edge, parameter[name[new_node], name[other_node]]]
<ast.Try object at 0x7da1b11237c0>
return[name[graphData]] | keyword[def] identifier[_generateGraphData] ( identifier[data] , identifier[oldData] = identifier[nx] . identifier[Graph] ()):
literal[string]
keyword[def] identifier[_addNewNode] ( identifier[ent] , identifier[g] ):
literal[string]
keyword[try] :
identifier[label] = identifier[unicode] ( identifier[ent] [ literal[string] ])
keyword[except] identifier[UnicodeEncodeError] keyword[as] identifier[e] :
identifier[label] = identifier[str] ( identifier[ent] [ literal[string] ])
identifier[g] . identifier[add_node] ( identifier[label] )
identifier[g] . identifier[node] [ identifier[label] ][ literal[string] ]= identifier[ent] [ literal[string] ]
keyword[return] identifier[label]
keyword[def] identifier[_processAttributes] ( identifier[elems] , identifier[g] ):
literal[string]
identifier[newAtts] ={}
identifier[newEntities] =[]
keyword[for] identifier[att] keyword[in] identifier[elems] :
keyword[if] identifier[att] [ literal[string] ][ literal[int] ]== literal[string] :
identifier[attName] = identifier[str] ( identifier[att] [ literal[string] ][ literal[int] :]). identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
identifier[newAtts] [ identifier[attName] ]= identifier[int] ( identifier[att] [ literal[string] ])
keyword[except] :
identifier[newAtts] [ identifier[attName] ]= identifier[att] [ literal[string] ]
keyword[elif] identifier[att] [ literal[string] ][: literal[int] ]== literal[string] :
identifier[ent] ={
literal[string] : identifier[att] [ literal[string] ],
literal[string] : identifier[att] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ),
}
identifier[newEntities] . identifier[append] ( identifier[ent] )
identifier[hashLabel] = identifier[_addNewNode] ( identifier[ent] , identifier[g] )
identifier[newAttsInAttributes] , identifier[newEntitiesInAttributes] = identifier[_processAttributes] ( identifier[att] [ literal[string] ], identifier[g] )
identifier[g] . identifier[node] [ identifier[hashLabel] ]. identifier[update] ( identifier[newAttsInAttributes] )
keyword[for] identifier[new] keyword[in] identifier[newEntitiesInAttributes] :
identifier[graphData] . identifier[add_edge] ( identifier[hashLabel] , identifier[json] . identifier[dumps] ( identifier[new] ))
keyword[try] :
keyword[pass]
keyword[except] :
keyword[pass]
keyword[else] :
keyword[pass]
keyword[return] identifier[newAtts] , identifier[newEntities]
identifier[graphData] = identifier[oldData]
keyword[for] identifier[elem] keyword[in] identifier[data] :
identifier[ent] ={
literal[string] : identifier[elem] [ literal[string] ],
literal[string] : identifier[elem] [ literal[string] ],
}
identifier[new_node] = identifier[_addNewNode] ( identifier[ent] , identifier[graphData] )
identifier[newAtts] , identifier[newEntities] = identifier[_processAttributes] ( identifier[elem] [ literal[string] ], identifier[graphData] )
identifier[graphData] . identifier[node] [ identifier[new_node] ]. identifier[update] ( identifier[newAtts] )
keyword[for] identifier[other_node] keyword[in] identifier[newEntities] :
identifier[serEnt] = identifier[json] . identifier[dumps] ( identifier[new_node] )
keyword[try] :
identifier[other_node] = identifier[unicode] ( identifier[other_node] [ literal[string] ])
keyword[except] identifier[UnicodeEncodeError] keyword[as] identifier[e] :
identifier[other_node] = identifier[str] ( identifier[other_node] [ literal[string] ])
identifier[graphData] . identifier[add_edge] ( identifier[new_node] , identifier[other_node] )
keyword[try] :
keyword[pass]
keyword[except] :
keyword[pass]
keyword[return] identifier[graphData] | def _generateGraphData(data, oldData=nx.Graph()):
"""
Processing the data from i3visio structures to generate nodes and edges
This function uses the networkx graph library. It will create a new node
for each and i3visio.<something> entities while it will add properties for
all the attribute starting with "@".
Args:
-----
d: The i3visio structures containing a list of
oldData: A graph structure representing the previous information.
Returns:
--------
A graph structure representing the updated information.
"""
def _addNewNode(ent, g):
"""
Wraps the creation of a node
Args:
-----
ent: The hi3visio-like entities to be used as the identifier.
ent = {
"value":"i3visio",
"type":"i3visio.alias,
}
g: The graph in which the entity will be stored.
Returns:
-------
The label used to represent this element.
"""
try:
label = unicode(ent['value']) # depends on [control=['try'], data=[]]
except UnicodeEncodeError as e:
# Printing that an error was found
label = str(ent['value']) # depends on [control=['except'], data=[]]
g.add_node(label)
g.node[label]['type'] = ent['type']
return label
def _processAttributes(elems, g):
"""
Function that processes a list of elements to obtain new attributes.
Args:
-----
elems: List of i3visio-like entities.
g: The graph in which the entity will be stored.
Returns:
--------
newAtts: Dict of attributes (to be stored as attributes for the
given entity).
newEntities: List of new Entities (to be stored as attributes for
the given entity).
"""
newAtts = {}
newEntities = []
for att in elems:
# If it is an attribute
if att['type'][0] == '@':
# Removing the @ and the _ of the attributes
attName = str(att['type'][1:]).replace('_', '')
try:
newAtts[attName] = int(att['value']) # depends on [control=['try'], data=[]]
except:
newAtts[attName] = att['value'] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif att['type'][:8] == 'i3visio.':
# Creating a dict to represent the pair: type, value entity.
ent = {'value': att['value'], 'type': att['type'].replace('i3visio.', 'i3visio_')}
# Appending the new Entity to the entity list
newEntities.append(ent)
# Appending the new node
hashLabel = _addNewNode(ent, g)
# Make this recursive to link the attributes in each and every att
(newAttsInAttributes, newEntitiesInAttributes) = _processAttributes(att['attributes'], g)
# Updating the attributes to the current entity
g.node[hashLabel].update(newAttsInAttributes)
# Creating the edges (the new entities have also been created in the _processAttributes
for new in newEntitiesInAttributes:
graphData.add_edge(hashLabel, json.dumps(new))
try:
# Here, we would add the properties of the edge
#graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] +=1
pass # depends on [control=['try'], data=[]]
except:
# If the attribute does not exist, we would initialize it
#graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] = 1
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['new']] # depends on [control=['if'], data=[]]
else:
# An unexpected type
pass # depends on [control=['for'], data=['att']]
return (newAtts, newEntities)
graphData = oldData
# Iterating through the results
for elem in data:
# Creating a dict to represent the pair: type, value entity.
ent = {'value': elem['value'], 'type': elem['type']}
# Appending the new node
new_node = _addNewNode(ent, graphData)
# Processing the attributes to grab the attributes (starting with "@..." and entities)
(newAtts, newEntities) = _processAttributes(elem['attributes'], graphData)
# Updating the attributes to the current entity
graphData.node[new_node].update(newAtts)
# Creating the edges (the new entities have also been created in the _processAttributes
for other_node in newEntities:
# Serializing the second entity
serEnt = json.dumps(new_node)
try:
other_node = unicode(other_node['value']) # depends on [control=['try'], data=[]]
except UnicodeEncodeError as e:
# Printing that an error was found
other_node = str(other_node['value']) # depends on [control=['except'], data=[]]
# Adding the edge
graphData.add_edge(new_node, other_node)
try:
# Here, we would add the properties of the edge
#graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] +=1
pass # depends on [control=['try'], data=[]]
except:
# If the attribute does not exist, we would initialize it
#graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] = 1
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['other_node']] # depends on [control=['for'], data=['elem']]
return graphData |
def _gen_array_table(self):
"""
2D array describing each registered array
together with headers - for use in __str__
"""
headers = ['Array Name', 'Size', 'Type', 'Shape']
# Reify arrays to work out their actual size
reified_arrays = self.arrays(reify=True)
table = []
for array in sorted(self.arrays().itervalues(),
key=lambda aval: aval.name.upper()):
# Get the actual size of the array
nbytes = hcu.array_bytes(reified_arrays[array.name])
# Print shape tuples without spaces and single quotes
sshape = '(%s)' % (','.join(map(str, array.shape)),)
table.append([array.name,
hcu.fmt_bytes(nbytes),
np.dtype(array.dtype).name,
sshape])
return table, headers | def function[_gen_array_table, parameter[self]]:
constant[
2D array describing each registered array
together with headers - for use in __str__
]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da204622b60>, <ast.Constant object at 0x7da204623190>, <ast.Constant object at 0x7da204621bd0>, <ast.Constant object at 0x7da2046202e0>]]
variable[reified_arrays] assign[=] call[name[self].arrays, parameter[]]
variable[table] assign[=] list[[]]
for taget[name[array]] in starred[call[name[sorted], parameter[call[call[name[self].arrays, parameter[]].itervalues, parameter[]]]]] begin[:]
variable[nbytes] assign[=] call[name[hcu].array_bytes, parameter[call[name[reified_arrays]][name[array].name]]]
variable[sshape] assign[=] binary_operation[constant[(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b26add20>]]]
call[name[table].append, parameter[list[[<ast.Attribute object at 0x7da1b26ac6d0>, <ast.Call object at 0x7da1b26adba0>, <ast.Attribute object at 0x7da1b26aee60>, <ast.Name object at 0x7da1b26ae7d0>]]]]
return[tuple[[<ast.Name object at 0x7da204622da0>, <ast.Name object at 0x7da204622080>]]] | keyword[def] identifier[_gen_array_table] ( identifier[self] ):
literal[string]
identifier[headers] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[reified_arrays] = identifier[self] . identifier[arrays] ( identifier[reify] = keyword[True] )
identifier[table] =[]
keyword[for] identifier[array] keyword[in] identifier[sorted] ( identifier[self] . identifier[arrays] (). identifier[itervalues] (),
identifier[key] = keyword[lambda] identifier[aval] : identifier[aval] . identifier[name] . identifier[upper] ()):
identifier[nbytes] = identifier[hcu] . identifier[array_bytes] ( identifier[reified_arrays] [ identifier[array] . identifier[name] ])
identifier[sshape] = literal[string] %( literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[array] . identifier[shape] )),)
identifier[table] . identifier[append] ([ identifier[array] . identifier[name] ,
identifier[hcu] . identifier[fmt_bytes] ( identifier[nbytes] ),
identifier[np] . identifier[dtype] ( identifier[array] . identifier[dtype] ). identifier[name] ,
identifier[sshape] ])
keyword[return] identifier[table] , identifier[headers] | def _gen_array_table(self):
"""
2D array describing each registered array
together with headers - for use in __str__
"""
headers = ['Array Name', 'Size', 'Type', 'Shape']
# Reify arrays to work out their actual size
reified_arrays = self.arrays(reify=True)
table = []
for array in sorted(self.arrays().itervalues(), key=lambda aval: aval.name.upper()):
# Get the actual size of the array
nbytes = hcu.array_bytes(reified_arrays[array.name])
# Print shape tuples without spaces and single quotes
sshape = '(%s)' % (','.join(map(str, array.shape)),)
table.append([array.name, hcu.fmt_bytes(nbytes), np.dtype(array.dtype).name, sshape]) # depends on [control=['for'], data=['array']]
return (table, headers) |
def unlist(ctx, unlist_account, account):
""" Remove an account from any list
"""
account = Account(account, blockchain_instance=ctx.blockchain)
print_tx(account.nolist(unlist_account)) | def function[unlist, parameter[ctx, unlist_account, account]]:
constant[ Remove an account from any list
]
variable[account] assign[=] call[name[Account], parameter[name[account]]]
call[name[print_tx], parameter[call[name[account].nolist, parameter[name[unlist_account]]]]] | keyword[def] identifier[unlist] ( identifier[ctx] , identifier[unlist_account] , identifier[account] ):
literal[string]
identifier[account] = identifier[Account] ( identifier[account] , identifier[blockchain_instance] = identifier[ctx] . identifier[blockchain] )
identifier[print_tx] ( identifier[account] . identifier[nolist] ( identifier[unlist_account] )) | def unlist(ctx, unlist_account, account):
""" Remove an account from any list
"""
account = Account(account, blockchain_instance=ctx.blockchain)
print_tx(account.nolist(unlist_account)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.