code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def attributes(self, params=None):
"""
Gets the attributes from a Group/Indicator or Victim
Yields: attribute json
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
for a in self.tc_requests.attributes(
self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, params=params
):
yield a | def function[attributes, parameter[self, params]]:
constant[
Gets the attributes from a Group/Indicator or Victim
Yields: attribute json
]
if compare[name[params] is constant[None]] begin[:]
variable[params] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da2044c1330> begin[:]
call[name[self]._tcex.handle_error, parameter[constant[910], list[[<ast.Attribute object at 0x7da2044c0940>]]]]
for taget[name[a]] in starred[call[name[self].tc_requests.attributes, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id]]] begin[:]
<ast.Yield object at 0x7da2044c1780> | keyword[def] identifier[attributes] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[params] keyword[is] keyword[None] :
identifier[params] ={}
keyword[if] keyword[not] identifier[self] . identifier[can_update] ():
identifier[self] . identifier[_tcex] . identifier[handle_error] ( literal[int] ,[ identifier[self] . identifier[type] ])
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[tc_requests] . identifier[attributes] (
identifier[self] . identifier[api_type] , identifier[self] . identifier[api_sub_type] , identifier[self] . identifier[unique_id] , identifier[owner] = identifier[self] . identifier[owner] , identifier[params] = identifier[params]
):
keyword[yield] identifier[a] | def attributes(self, params=None):
"""
Gets the attributes from a Group/Indicator or Victim
Yields: attribute json
"""
if params is None:
params = {} # depends on [control=['if'], data=['params']]
if not self.can_update():
self._tcex.handle_error(910, [self.type]) # depends on [control=['if'], data=[]]
for a in self.tc_requests.attributes(self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, params=params):
yield a # depends on [control=['for'], data=['a']] |
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy) | def function[write_fix_accuracy, parameter[self, accuracy]]:
constant[
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
]
if compare[name[accuracy] is constant[None]] begin[:]
variable[accuracy] assign[=] constant[500]
variable[accuracy] assign[=] call[name[int], parameter[name[accuracy]]]
if <ast.UnaryOp object at 0x7da1b04ffc40> begin[:]
<ast.Raise object at 0x7da1b04ffbb0>
call[name[self].write_fr_header, parameter[constant[FXA], binary_operation[constant[%03d] <ast.Mod object at 0x7da2590d6920> name[accuracy]]]] | keyword[def] identifier[write_fix_accuracy] ( identifier[self] , identifier[accuracy] = keyword[None] ):
literal[string]
keyword[if] identifier[accuracy] keyword[is] keyword[None] :
identifier[accuracy] = literal[int]
identifier[accuracy] = identifier[int] ( identifier[accuracy] )
keyword[if] keyword[not] literal[int] < identifier[accuracy] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[write_fr_header] ( literal[string] , literal[string] % identifier[accuracy] ) | def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500 # depends on [control=['if'], data=['accuracy']]
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy') # depends on [control=['if'], data=[]]
self.write_fr_header('FXA', '%03d' % accuracy) |
def resolve(self, var, context):
"""Resolves a variable out of context if it's not in quotes"""
if var is None:
return var
if var[0] in ('"', "'") and var[-1] == var[0]:
return var[1:-1]
else:
return template.Variable(var).resolve(context) | def function[resolve, parameter[self, var, context]]:
constant[Resolves a variable out of context if it's not in quotes]
if compare[name[var] is constant[None]] begin[:]
return[name[var]]
if <ast.BoolOp object at 0x7da1b0651390> begin[:]
return[call[name[var]][<ast.Slice object at 0x7da1b0651660>]] | keyword[def] identifier[resolve] ( identifier[self] , identifier[var] , identifier[context] ):
literal[string]
keyword[if] identifier[var] keyword[is] keyword[None] :
keyword[return] identifier[var]
keyword[if] identifier[var] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[var] [- literal[int] ]== identifier[var] [ literal[int] ]:
keyword[return] identifier[var] [ literal[int] :- literal[int] ]
keyword[else] :
keyword[return] identifier[template] . identifier[Variable] ( identifier[var] ). identifier[resolve] ( identifier[context] ) | def resolve(self, var, context):
"""Resolves a variable out of context if it's not in quotes"""
if var is None:
return var # depends on [control=['if'], data=['var']]
if var[0] in ('"', "'") and var[-1] == var[0]:
return var[1:-1] # depends on [control=['if'], data=[]]
else:
return template.Variable(var).resolve(context) |
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/' | def function[annotate, parameter[head, list]]:
constant[Add '/' suffixes to directories.]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[list]]]]]] begin[:]
if call[name[os].path.isdir, parameter[call[name[os].path.join, parameter[name[head], call[name[list]][name[i]]]]]] begin[:]
call[name[list]][name[i]] assign[=] binary_operation[call[name[list]][name[i]] + constant[/]] | keyword[def] identifier[annotate] ( identifier[head] , identifier[list] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[list] )):
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[head] , identifier[list] [ identifier[i] ])):
identifier[list] [ identifier[i] ]= identifier[list] [ identifier[i] ]+ literal[string] | def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] |
def send_sms(self, text, **kw):
"""
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
"""
params = {
'user': self._user,
'pass': self._passwd,
'msg': text
}
kw.setdefault("verify", False)
if not kw["verify"]:
# remove SSL warning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
res = requests.get(FreeClient.BASE_URL, params=params, **kw)
return FreeResponse(res.status_code) | def function[send_sms, parameter[self, text]]:
constant[
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1018fa0>, <ast.Constant object at 0x7da1b1018040>, <ast.Constant object at 0x7da1b1018ca0>], [<ast.Attribute object at 0x7da1b1018af0>, <ast.Attribute object at 0x7da1b1018940>, <ast.Name object at 0x7da1b1018d30>]]
call[name[kw].setdefault, parameter[constant[verify], constant[False]]]
if <ast.UnaryOp object at 0x7da1b1190d90> begin[:]
call[name[requests].packages.urllib3.disable_warnings, parameter[name[InsecureRequestWarning]]]
variable[res] assign[=] call[name[requests].get, parameter[name[FreeClient].BASE_URL]]
return[call[name[FreeResponse], parameter[name[res].status_code]]] | keyword[def] identifier[send_sms] ( identifier[self] , identifier[text] ,** identifier[kw] ):
literal[string]
identifier[params] ={
literal[string] : identifier[self] . identifier[_user] ,
literal[string] : identifier[self] . identifier[_passwd] ,
literal[string] : identifier[text]
}
identifier[kw] . identifier[setdefault] ( literal[string] , keyword[False] )
keyword[if] keyword[not] identifier[kw] [ literal[string] ]:
identifier[requests] . identifier[packages] . identifier[urllib3] . identifier[disable_warnings] ( identifier[InsecureRequestWarning] )
identifier[res] = identifier[requests] . identifier[get] ( identifier[FreeClient] . identifier[BASE_URL] , identifier[params] = identifier[params] ,** identifier[kw] )
keyword[return] identifier[FreeResponse] ( identifier[res] . identifier[status_code] ) | def send_sms(self, text, **kw):
"""
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
"""
params = {'user': self._user, 'pass': self._passwd, 'msg': text}
kw.setdefault('verify', False)
if not kw['verify']:
# remove SSL warning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # depends on [control=['if'], data=[]]
res = requests.get(FreeClient.BASE_URL, params=params, **kw)
return FreeResponse(res.status_code) |
def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'} | def function[data_vectors, parameter[self]]:
constant[The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
]
return[<ast.DictComp object at 0x7da1b0716b90>] | keyword[def] identifier[data_vectors] ( identifier[self] ):
literal[string]
keyword[return] { identifier[field] : identifier[self] . identifier[record] [ identifier[field] ] keyword[for] identifier[field] keyword[in] identifier[self] . identifier[record] . identifier[dtype] . identifier[names]
keyword[if] identifier[field] != literal[string] } | def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names if field != 'sample'} |
def build(cli, path, package):
"""Build CLI dynamically based on the package structure.
"""
for _, name, ispkg in iter_modules(path):
module = import_module(f'.{name}', package)
if ispkg:
build(cli.group(name)(module.group),
module.__path__,
module.__package__)
else:
cli.command(name)(module.command) | def function[build, parameter[cli, path, package]]:
constant[Build CLI dynamically based on the package structure.
]
for taget[tuple[[<ast.Name object at 0x7da2054a5660>, <ast.Name object at 0x7da2054a6ef0>, <ast.Name object at 0x7da2054a5b40>]]] in starred[call[name[iter_modules], parameter[name[path]]]] begin[:]
variable[module] assign[=] call[name[import_module], parameter[<ast.JoinedStr object at 0x7da2054a54e0>, name[package]]]
if name[ispkg] begin[:]
call[name[build], parameter[call[call[name[cli].group, parameter[name[name]]], parameter[name[module].group]], name[module].__path__, name[module].__package__]] | keyword[def] identifier[build] ( identifier[cli] , identifier[path] , identifier[package] ):
literal[string]
keyword[for] identifier[_] , identifier[name] , identifier[ispkg] keyword[in] identifier[iter_modules] ( identifier[path] ):
identifier[module] = identifier[import_module] ( literal[string] , identifier[package] )
keyword[if] identifier[ispkg] :
identifier[build] ( identifier[cli] . identifier[group] ( identifier[name] )( identifier[module] . identifier[group] ),
identifier[module] . identifier[__path__] ,
identifier[module] . identifier[__package__] )
keyword[else] :
identifier[cli] . identifier[command] ( identifier[name] )( identifier[module] . identifier[command] ) | def build(cli, path, package):
"""Build CLI dynamically based on the package structure.
"""
for (_, name, ispkg) in iter_modules(path):
module = import_module(f'.{name}', package)
if ispkg:
build(cli.group(name)(module.group), module.__path__, module.__package__) # depends on [control=['if'], data=[]]
else:
cli.command(name)(module.command) # depends on [control=['for'], data=[]] |
def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
"""
if algorithm not in self._symmetric_key_algorithms.keys():
raise exceptions.InvalidField(
"The cryptographic algorithm {0} is not a supported symmetric "
"key algorithm.".format(algorithm)
)
cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)
if length not in cryptography_algorithm.key_sizes:
raise exceptions.InvalidField(
"The cryptographic length ({0}) is not valid for "
"the cryptographic algorithm ({1}).".format(
length, algorithm.name
)
)
self.logger.info(
"Generating a {0} symmetric key with length: {1}".format(
algorithm.name, length
)
)
key_bytes = os.urandom(length // 8)
try:
cryptography_algorithm(key_bytes)
except Exception as e:
self.logger.exception(e)
raise exceptions.CryptographicFailure(
"Invalid bytes for the provided cryptographic algorithm.")
return {'value': key_bytes, 'format': enums.KeyFormatType.RAW} | def function[create_symmetric_key, parameter[self, algorithm, length]]:
constant[
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
]
if compare[name[algorithm] <ast.NotIn object at 0x7da2590d7190> call[name[self]._symmetric_key_algorithms.keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da18f720880>
variable[cryptography_algorithm] assign[=] call[name[self]._symmetric_key_algorithms.get, parameter[name[algorithm]]]
if compare[name[length] <ast.NotIn object at 0x7da2590d7190> name[cryptography_algorithm].key_sizes] begin[:]
<ast.Raise object at 0x7da18f7225f0>
call[name[self].logger.info, parameter[call[constant[Generating a {0} symmetric key with length: {1}].format, parameter[name[algorithm].name, name[length]]]]]
variable[key_bytes] assign[=] call[name[os].urandom, parameter[binary_operation[name[length] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]]]]
<ast.Try object at 0x7da1b0296500>
return[dictionary[[<ast.Constant object at 0x7da1b02941c0>, <ast.Constant object at 0x7da1b0297100>], [<ast.Name object at 0x7da1b02954b0>, <ast.Attribute object at 0x7da1b02945e0>]]] | keyword[def] identifier[create_symmetric_key] ( identifier[self] , identifier[algorithm] , identifier[length] ):
literal[string]
keyword[if] identifier[algorithm] keyword[not] keyword[in] identifier[self] . identifier[_symmetric_key_algorithms] . identifier[keys] ():
keyword[raise] identifier[exceptions] . identifier[InvalidField] (
literal[string]
literal[string] . identifier[format] ( identifier[algorithm] )
)
identifier[cryptography_algorithm] = identifier[self] . identifier[_symmetric_key_algorithms] . identifier[get] ( identifier[algorithm] )
keyword[if] identifier[length] keyword[not] keyword[in] identifier[cryptography_algorithm] . identifier[key_sizes] :
keyword[raise] identifier[exceptions] . identifier[InvalidField] (
literal[string]
literal[string] . identifier[format] (
identifier[length] , identifier[algorithm] . identifier[name]
)
)
identifier[self] . identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[algorithm] . identifier[name] , identifier[length]
)
)
identifier[key_bytes] = identifier[os] . identifier[urandom] ( identifier[length] // literal[int] )
keyword[try] :
identifier[cryptography_algorithm] ( identifier[key_bytes] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[exception] ( identifier[e] )
keyword[raise] identifier[exceptions] . identifier[CryptographicFailure] (
literal[string] )
keyword[return] { literal[string] : identifier[key_bytes] , literal[string] : identifier[enums] . identifier[KeyFormatType] . identifier[RAW] } | def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
"""
if algorithm not in self._symmetric_key_algorithms.keys():
raise exceptions.InvalidField('The cryptographic algorithm {0} is not a supported symmetric key algorithm.'.format(algorithm)) # depends on [control=['if'], data=['algorithm']]
cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)
if length not in cryptography_algorithm.key_sizes:
raise exceptions.InvalidField('The cryptographic length ({0}) is not valid for the cryptographic algorithm ({1}).'.format(length, algorithm.name)) # depends on [control=['if'], data=['length']]
self.logger.info('Generating a {0} symmetric key with length: {1}'.format(algorithm.name, length))
key_bytes = os.urandom(length // 8)
try:
cryptography_algorithm(key_bytes) # depends on [control=['try'], data=[]]
except Exception as e:
self.logger.exception(e)
raise exceptions.CryptographicFailure('Invalid bytes for the provided cryptographic algorithm.') # depends on [control=['except'], data=['e']]
return {'value': key_bytes, 'format': enums.KeyFormatType.RAW} |
def get_action(self, action_name, action_id):
"""
Get an action.
action_name -- name of the action
action_id -- ID of the action
Returns the requested action if found, else None.
"""
if action_name not in self.actions:
return None
for action in self.actions[action_name]:
if action.id == action_id:
return action
return None | def function[get_action, parameter[self, action_name, action_id]]:
constant[
Get an action.
action_name -- name of the action
action_id -- ID of the action
Returns the requested action if found, else None.
]
if compare[name[action_name] <ast.NotIn object at 0x7da2590d7190> name[self].actions] begin[:]
return[constant[None]]
for taget[name[action]] in starred[call[name[self].actions][name[action_name]]] begin[:]
if compare[name[action].id equal[==] name[action_id]] begin[:]
return[name[action]]
return[constant[None]] | keyword[def] identifier[get_action] ( identifier[self] , identifier[action_name] , identifier[action_id] ):
literal[string]
keyword[if] identifier[action_name] keyword[not] keyword[in] identifier[self] . identifier[actions] :
keyword[return] keyword[None]
keyword[for] identifier[action] keyword[in] identifier[self] . identifier[actions] [ identifier[action_name] ]:
keyword[if] identifier[action] . identifier[id] == identifier[action_id] :
keyword[return] identifier[action]
keyword[return] keyword[None] | def get_action(self, action_name, action_id):
"""
Get an action.
action_name -- name of the action
action_id -- ID of the action
Returns the requested action if found, else None.
"""
if action_name not in self.actions:
return None # depends on [control=['if'], data=[]]
for action in self.actions[action_name]:
if action.id == action_id:
return action # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['action']]
return None |
def mean(self):
"""Compute a total score for each model over all the tests.
Uses the `norm_score` attribute, since otherwise direct comparison
across different kinds of scores would not be possible.
"""
return np.dot(np.array(self.norm_scores), self.weights) | def function[mean, parameter[self]]:
constant[Compute a total score for each model over all the tests.
Uses the `norm_score` attribute, since otherwise direct comparison
across different kinds of scores would not be possible.
]
return[call[name[np].dot, parameter[call[name[np].array, parameter[name[self].norm_scores]], name[self].weights]]] | keyword[def] identifier[mean] ( identifier[self] ):
literal[string]
keyword[return] identifier[np] . identifier[dot] ( identifier[np] . identifier[array] ( identifier[self] . identifier[norm_scores] ), identifier[self] . identifier[weights] ) | def mean(self):
"""Compute a total score for each model over all the tests.
Uses the `norm_score` attribute, since otherwise direct comparison
across different kinds of scores would not be possible.
"""
return np.dot(np.array(self.norm_scores), self.weights) |
def edge_iterator(edges=[], edges_fn=None):
"""Yield documents from edge for loading into ArangoDB"""
for edge in itertools.chain(edges, files.read_edges(edges_fn)):
subj = copy.deepcopy(edge["edge"]["subject"])
subj_id = str(utils._create_hash_from_doc(subj))
subj["_key"] = subj_id
obj = copy.deepcopy(edge["edge"]["object"])
obj_id = str(utils._create_hash_from_doc(obj))
obj["_key"] = obj_id
relation = copy.deepcopy(edge["edge"]["relation"])
relation["_from"] = f"nodes/{subj_id}"
relation["_to"] = f"nodes/{obj_id}"
# Create edge _key
relation_hash = copy.deepcopy(relation)
relation_hash.pop("edge_dt", None)
relation_hash.pop("edge_hash", None)
relation_hash.pop("nanopub_dt", None)
relation_hash.pop("nanopub_url", None)
relation_hash.pop("subject_canon", None)
relation_hash.pop("object_canon", None)
relation_hash.pop("public_flag", None)
relation_hash.pop("metadata", None)
relation_id = str(utils._create_hash_from_doc(relation_hash))
relation["_key"] = relation_id
if edge.get("nanopub_id", None):
if "metadata" not in relation:
relation["metadata"] = {}
relation["metadata"]["nanopub_id"] = edge["nanopub_id"]
yield ("nodes", subj)
yield ("nodes", obj)
yield ("edges", relation) | def function[edge_iterator, parameter[edges, edges_fn]]:
constant[Yield documents from edge for loading into ArangoDB]
for taget[name[edge]] in starred[call[name[itertools].chain, parameter[name[edges], call[name[files].read_edges, parameter[name[edges_fn]]]]]] begin[:]
variable[subj] assign[=] call[name[copy].deepcopy, parameter[call[call[name[edge]][constant[edge]]][constant[subject]]]]
variable[subj_id] assign[=] call[name[str], parameter[call[name[utils]._create_hash_from_doc, parameter[name[subj]]]]]
call[name[subj]][constant[_key]] assign[=] name[subj_id]
variable[obj] assign[=] call[name[copy].deepcopy, parameter[call[call[name[edge]][constant[edge]]][constant[object]]]]
variable[obj_id] assign[=] call[name[str], parameter[call[name[utils]._create_hash_from_doc, parameter[name[obj]]]]]
call[name[obj]][constant[_key]] assign[=] name[obj_id]
variable[relation] assign[=] call[name[copy].deepcopy, parameter[call[call[name[edge]][constant[edge]]][constant[relation]]]]
call[name[relation]][constant[_from]] assign[=] <ast.JoinedStr object at 0x7da18f810a30>
call[name[relation]][constant[_to]] assign[=] <ast.JoinedStr object at 0x7da18f811810>
variable[relation_hash] assign[=] call[name[copy].deepcopy, parameter[name[relation]]]
call[name[relation_hash].pop, parameter[constant[edge_dt], constant[None]]]
call[name[relation_hash].pop, parameter[constant[edge_hash], constant[None]]]
call[name[relation_hash].pop, parameter[constant[nanopub_dt], constant[None]]]
call[name[relation_hash].pop, parameter[constant[nanopub_url], constant[None]]]
call[name[relation_hash].pop, parameter[constant[subject_canon], constant[None]]]
call[name[relation_hash].pop, parameter[constant[object_canon], constant[None]]]
call[name[relation_hash].pop, parameter[constant[public_flag], constant[None]]]
call[name[relation_hash].pop, parameter[constant[metadata], constant[None]]]
variable[relation_id] assign[=] call[name[str], parameter[call[name[utils]._create_hash_from_doc, parameter[name[relation_hash]]]]]
call[name[relation]][constant[_key]] assign[=] name[relation_id]
if call[name[edge].get, parameter[constant[nanopub_id], constant[None]]] begin[:]
if compare[constant[metadata] <ast.NotIn object at 0x7da2590d7190> name[relation]] begin[:]
call[name[relation]][constant[metadata]] assign[=] dictionary[[], []]
call[call[name[relation]][constant[metadata]]][constant[nanopub_id]] assign[=] call[name[edge]][constant[nanopub_id]]
<ast.Yield object at 0x7da1b23465c0>
<ast.Yield object at 0x7da1b2344b50>
<ast.Yield object at 0x7da1b23473d0> | keyword[def] identifier[edge_iterator] ( identifier[edges] =[], identifier[edges_fn] = keyword[None] ):
literal[string]
keyword[for] identifier[edge] keyword[in] identifier[itertools] . identifier[chain] ( identifier[edges] , identifier[files] . identifier[read_edges] ( identifier[edges_fn] )):
identifier[subj] = identifier[copy] . identifier[deepcopy] ( identifier[edge] [ literal[string] ][ literal[string] ])
identifier[subj_id] = identifier[str] ( identifier[utils] . identifier[_create_hash_from_doc] ( identifier[subj] ))
identifier[subj] [ literal[string] ]= identifier[subj_id]
identifier[obj] = identifier[copy] . identifier[deepcopy] ( identifier[edge] [ literal[string] ][ literal[string] ])
identifier[obj_id] = identifier[str] ( identifier[utils] . identifier[_create_hash_from_doc] ( identifier[obj] ))
identifier[obj] [ literal[string] ]= identifier[obj_id]
identifier[relation] = identifier[copy] . identifier[deepcopy] ( identifier[edge] [ literal[string] ][ literal[string] ])
identifier[relation] [ literal[string] ]= literal[string]
identifier[relation] [ literal[string] ]= literal[string]
identifier[relation_hash] = identifier[copy] . identifier[deepcopy] ( identifier[relation] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_hash] . identifier[pop] ( literal[string] , keyword[None] )
identifier[relation_id] = identifier[str] ( identifier[utils] . identifier[_create_hash_from_doc] ( identifier[relation_hash] ))
identifier[relation] [ literal[string] ]= identifier[relation_id]
keyword[if] identifier[edge] . identifier[get] ( literal[string] , keyword[None] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[relation] :
identifier[relation] [ literal[string] ]={}
identifier[relation] [ literal[string] ][ literal[string] ]= identifier[edge] [ literal[string] ]
keyword[yield] ( literal[string] , identifier[subj] )
keyword[yield] ( literal[string] , identifier[obj] )
keyword[yield] ( literal[string] , identifier[relation] ) | def edge_iterator(edges=[], edges_fn=None):
"""Yield documents from edge for loading into ArangoDB"""
for edge in itertools.chain(edges, files.read_edges(edges_fn)):
subj = copy.deepcopy(edge['edge']['subject'])
subj_id = str(utils._create_hash_from_doc(subj))
subj['_key'] = subj_id
obj = copy.deepcopy(edge['edge']['object'])
obj_id = str(utils._create_hash_from_doc(obj))
obj['_key'] = obj_id
relation = copy.deepcopy(edge['edge']['relation'])
relation['_from'] = f'nodes/{subj_id}'
relation['_to'] = f'nodes/{obj_id}'
# Create edge _key
relation_hash = copy.deepcopy(relation)
relation_hash.pop('edge_dt', None)
relation_hash.pop('edge_hash', None)
relation_hash.pop('nanopub_dt', None)
relation_hash.pop('nanopub_url', None)
relation_hash.pop('subject_canon', None)
relation_hash.pop('object_canon', None)
relation_hash.pop('public_flag', None)
relation_hash.pop('metadata', None)
relation_id = str(utils._create_hash_from_doc(relation_hash))
relation['_key'] = relation_id
if edge.get('nanopub_id', None):
if 'metadata' not in relation:
relation['metadata'] = {} # depends on [control=['if'], data=['relation']]
relation['metadata']['nanopub_id'] = edge['nanopub_id'] # depends on [control=['if'], data=[]]
yield ('nodes', subj)
yield ('nodes', obj)
yield ('edges', relation) # depends on [control=['for'], data=['edge']] |
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return (self in IPv4Network('0.0.0.0/8') or
self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('127.0.0.0/8') or
self in IPv4Network('169.254.0.0/16') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.0.0.0/29') or
self in IPv4Network('192.0.0.170/31') or
self in IPv4Network('192.0.2.0/24') or
self in IPv4Network('192.168.0.0/16') or
self in IPv4Network('198.18.0.0/15') or
self in IPv4Network('198.51.100.0/24') or
self in IPv4Network('203.0.113.0/24') or
self in IPv4Network('240.0.0.0/4') or
self in IPv4Network('255.255.255.255/32')) | def function[is_private, parameter[self]]:
constant[Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
]
return[<ast.BoolOp object at 0x7da204345de0>] | keyword[def] identifier[is_private] ( identifier[self] ):
literal[string]
keyword[return] ( identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] ) keyword[or]
identifier[self] keyword[in] identifier[IPv4Network] ( literal[string] )) | def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return self in IPv4Network('0.0.0.0/8') or self in IPv4Network('10.0.0.0/8') or self in IPv4Network('127.0.0.0/8') or (self in IPv4Network('169.254.0.0/16')) or (self in IPv4Network('172.16.0.0/12')) or (self in IPv4Network('192.0.0.0/29')) or (self in IPv4Network('192.0.0.170/31')) or (self in IPv4Network('192.0.2.0/24')) or (self in IPv4Network('192.168.0.0/16')) or (self in IPv4Network('198.18.0.0/15')) or (self in IPv4Network('198.51.100.0/24')) or (self in IPv4Network('203.0.113.0/24')) or (self in IPv4Network('240.0.0.0/4')) or (self in IPv4Network('255.255.255.255/32')) |
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set) | def function[oil, parameter[data_set]]:
constant[The three phase oil data from Bishop and James (1993).]
if <ast.UnaryOp object at 0x7da1b1c6bbe0> begin[:]
call[name[download_data], parameter[name[data_set]]]
variable[oil_train_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataTrn.txt]]]
variable[oil_trainlbls_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataTrnLbls.txt]]]
variable[oil_test_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataTst.txt]]]
variable[oil_testlbls_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataTstLbls.txt]]]
variable[oil_valid_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataVdn.txt]]]
variable[oil_validlbls_file] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set], constant[DataVdnLbls.txt]]]
variable[fid] assign[=] call[name[open], parameter[name[oil_train_file]]]
variable[X] assign[=] call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c69bd0>, <ast.Constant object at 0x7da1b1c6b640>]]]]
call[name[fid].close, parameter[]]
variable[fid] assign[=] call[name[open], parameter[name[oil_test_file]]]
variable[Xtest] assign[=] call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c6b160>, <ast.Constant object at 0x7da1b1c694e0>]]]]
call[name[fid].close, parameter[]]
variable[fid] assign[=] call[name[open], parameter[name[oil_valid_file]]]
variable[Xvalid] assign[=] call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c6afb0>, <ast.Constant object at 0x7da1b1c6b5b0>]]]]
call[name[fid].close, parameter[]]
variable[fid] assign[=] call[name[open], parameter[name[oil_trainlbls_file]]]
variable[Y] assign[=] binary_operation[binary_operation[call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c6b0a0>, <ast.Constant object at 0x7da1b1c693f0>]]]] * constant[2.0]] - constant[1.0]]
call[name[fid].close, parameter[]]
variable[fid] assign[=] call[name[open], parameter[name[oil_testlbls_file]]]
variable[Ytest] assign[=] binary_operation[binary_operation[call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c6afe0>, <ast.Constant object at 0x7da1b1c68f40>]]]] * constant[2.0]] - constant[1.0]]
call[name[fid].close, parameter[]]
variable[fid] assign[=] call[name[open], parameter[name[oil_validlbls_file]]]
variable[Yvalid] assign[=] binary_operation[binary_operation[call[call[name[np].fromfile, parameter[name[fid]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1c6be80>, <ast.Constant object at 0x7da1b1c68e20>]]]] * constant[2.0]] - constant[1.0]]
call[name[fid].close, parameter[]]
return[call[name[data_details_return], parameter[dictionary[[<ast.Constant object at 0x7da1b1c687f0>, <ast.Constant object at 0x7da1b1c69270>, <ast.Constant object at 0x7da1b1c68f10>, <ast.Constant object at 0x7da1b1c68eb0>, <ast.Constant object at 0x7da1b1c64760>, <ast.Constant object at 0x7da1b1c646d0>, <ast.Constant object at 0x7da1b1c65b10>], [<ast.Name object at 0x7da1b1c657e0>, <ast.Name object at 0x7da1b1c65e10>, <ast.Name object at 0x7da1b1c64370>, <ast.Name object at 0x7da1b1c65f00>, <ast.Name object at 0x7da1b1c65ae0>, <ast.Name object at 0x7da1b1c65de0>, <ast.Name object at 0x7da1b1c65b40>]], name[data_set]]]] | keyword[def] identifier[oil] ( identifier[data_set] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[data_available] ( identifier[data_set] ):
identifier[download_data] ( identifier[data_set] )
identifier[oil_train_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[oil_trainlbls_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[oil_test_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[oil_testlbls_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[oil_valid_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[oil_validlbls_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )
identifier[fid] = identifier[open] ( identifier[oil_train_file] )
identifier[X] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))
identifier[fid] . identifier[close] ()
identifier[fid] = identifier[open] ( identifier[oil_test_file] )
identifier[Xtest] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))
identifier[fid] . identifier[close] ()
identifier[fid] = identifier[open] ( identifier[oil_valid_file] )
identifier[Xvalid] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))
identifier[fid] . identifier[close] ()
identifier[fid] = identifier[open] ( identifier[oil_trainlbls_file] )
identifier[Y] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))* literal[int] - literal[int]
identifier[fid] . identifier[close] ()
identifier[fid] = identifier[open] ( identifier[oil_testlbls_file] )
identifier[Ytest] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))* literal[int] - literal[int]
identifier[fid] . identifier[close] ()
identifier[fid] = identifier[open] ( identifier[oil_validlbls_file] )
identifier[Yvalid] = identifier[np] . identifier[fromfile] ( identifier[fid] , identifier[sep] = literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))* literal[int] - literal[int]
identifier[fid] . identifier[close] ()
keyword[return] identifier[data_details_return] ({ literal[string] : identifier[X] , literal[string] : identifier[Y] , literal[string] : identifier[Xtest] , literal[string] : identifier[Ytest] , literal[string] : identifier[Xtest] , literal[string] : identifier[Xvalid] , literal[string] : identifier[Yvalid] }, identifier[data_set] ) | def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set) # depends on [control=['if'], data=[]]
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest': Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set) |
def md2tvd(self, kind='linear'):
"""
Provides an transformation and interpolation function that converts
MD to TVD.
Args:
kind (str): The kind of interpolation to do, e.g. 'linear',
'cubic', 'nearest'.
Returns:
function.
"""
if self.position is None:
return lambda x: x
return interp1d(self.md, self.tvd,
kind=kind,
assume_sorted=True,
fill_value="extrapolate",
bounds_error=False) | def function[md2tvd, parameter[self, kind]]:
constant[
Provides an transformation and interpolation function that converts
MD to TVD.
Args:
kind (str): The kind of interpolation to do, e.g. 'linear',
'cubic', 'nearest'.
Returns:
function.
]
if compare[name[self].position is constant[None]] begin[:]
return[<ast.Lambda object at 0x7da1b23efa00>]
return[call[name[interp1d], parameter[name[self].md, name[self].tvd]]] | keyword[def] identifier[md2tvd] ( identifier[self] , identifier[kind] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[position] keyword[is] keyword[None] :
keyword[return] keyword[lambda] identifier[x] : identifier[x]
keyword[return] identifier[interp1d] ( identifier[self] . identifier[md] , identifier[self] . identifier[tvd] ,
identifier[kind] = identifier[kind] ,
identifier[assume_sorted] = keyword[True] ,
identifier[fill_value] = literal[string] ,
identifier[bounds_error] = keyword[False] ) | def md2tvd(self, kind='linear'):
"""
Provides an transformation and interpolation function that converts
MD to TVD.
Args:
kind (str): The kind of interpolation to do, e.g. 'linear',
'cubic', 'nearest'.
Returns:
function.
"""
if self.position is None:
return lambda x: x # depends on [control=['if'], data=[]]
return interp1d(self.md, self.tvd, kind=kind, assume_sorted=True, fill_value='extrapolate', bounds_error=False) |
def get_user(self, attr_map=None):
"""
Returns a UserObj (or whatever the self.user_class is) by using the
user's access token.
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return:
"""
user = self.client.get_user(
AccessToken=self.access_token
)
user_metadata = {
'username': user.get('Username'),
'id_token': self.id_token,
'access_token': self.access_token,
'refresh_token': self.refresh_token,
}
return self.get_user_obj(username=self.username,
attribute_list=user.get('UserAttributes'),
metadata=user_metadata,attr_map=attr_map) | def function[get_user, parameter[self, attr_map]]:
constant[
Returns a UserObj (or whatever the self.user_class is) by using the
user's access token.
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return:
]
variable[user] assign[=] call[name[self].client.get_user, parameter[]]
variable[user_metadata] assign[=] dictionary[[<ast.Constant object at 0x7da1b1eccb80>, <ast.Constant object at 0x7da1b1ece890>, <ast.Constant object at 0x7da1b1eccdc0>, <ast.Constant object at 0x7da1b1ecca30>], [<ast.Call object at 0x7da1b1ece200>, <ast.Attribute object at 0x7da1b1ececb0>, <ast.Attribute object at 0x7da1b1ecec50>, <ast.Attribute object at 0x7da1b1eccbb0>]]
return[call[name[self].get_user_obj, parameter[]]] | keyword[def] identifier[get_user] ( identifier[self] , identifier[attr_map] = keyword[None] ):
literal[string]
identifier[user] = identifier[self] . identifier[client] . identifier[get_user] (
identifier[AccessToken] = identifier[self] . identifier[access_token]
)
identifier[user_metadata] ={
literal[string] : identifier[user] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[id_token] ,
literal[string] : identifier[self] . identifier[access_token] ,
literal[string] : identifier[self] . identifier[refresh_token] ,
}
keyword[return] identifier[self] . identifier[get_user_obj] ( identifier[username] = identifier[self] . identifier[username] ,
identifier[attribute_list] = identifier[user] . identifier[get] ( literal[string] ),
identifier[metadata] = identifier[user_metadata] , identifier[attr_map] = identifier[attr_map] ) | def get_user(self, attr_map=None):
"""
Returns a UserObj (or whatever the self.user_class is) by using the
user's access token.
:param attr_map: Dictionary map from Cognito attributes to attribute
names we would like to show to our users
:return:
"""
user = self.client.get_user(AccessToken=self.access_token)
user_metadata = {'username': user.get('Username'), 'id_token': self.id_token, 'access_token': self.access_token, 'refresh_token': self.refresh_token}
return self.get_user_obj(username=self.username, attribute_list=user.get('UserAttributes'), metadata=user_metadata, attr_map=attr_map) |
def add_embedding(self, name, W, b, input_dim, output_channels, has_bias,
input_name, output_name):
"""
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
# Fill in the parameters
spec_layer_params = spec_layer.embedding
spec_layer_params.inputDim = input_dim
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
weights.floatValue.extend(map(float, W.flatten()))
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) | def function[add_embedding, parameter[self, name, W, b, input_dim, output_channels, has_bias, input_name, output_name]]:
constant[
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
]
variable[spec] assign[=] name[self].spec
variable[nn_spec] assign[=] name[self].nn_spec
variable[spec_layer] assign[=] call[name[nn_spec].layers.add, parameter[]]
name[spec_layer].name assign[=] name[name]
call[name[spec_layer].input.append, parameter[name[input_name]]]
call[name[spec_layer].output.append, parameter[name[output_name]]]
variable[spec_layer_params] assign[=] name[spec_layer].embedding
name[spec_layer_params].inputDim assign[=] name[input_dim]
name[spec_layer_params].outputChannels assign[=] name[output_channels]
name[spec_layer_params].hasBias assign[=] name[has_bias]
variable[weights] assign[=] name[spec_layer_params].weights
call[name[weights].floatValue.extend, parameter[call[name[map], parameter[name[float], call[name[W].flatten, parameter[]]]]]]
if name[has_bias] begin[:]
variable[bias] assign[=] name[spec_layer_params].bias
call[name[bias].floatValue.extend, parameter[call[name[map], parameter[name[float], call[name[b].flatten, parameter[]]]]]] | keyword[def] identifier[add_embedding] ( identifier[self] , identifier[name] , identifier[W] , identifier[b] , identifier[input_dim] , identifier[output_channels] , identifier[has_bias] ,
identifier[input_name] , identifier[output_name] ):
literal[string]
identifier[spec] = identifier[self] . identifier[spec]
identifier[nn_spec] = identifier[self] . identifier[nn_spec]
identifier[spec_layer] = identifier[nn_spec] . identifier[layers] . identifier[add] ()
identifier[spec_layer] . identifier[name] = identifier[name]
identifier[spec_layer] . identifier[input] . identifier[append] ( identifier[input_name] )
identifier[spec_layer] . identifier[output] . identifier[append] ( identifier[output_name] )
identifier[spec_layer_params] = identifier[spec_layer] . identifier[embedding]
identifier[spec_layer_params] . identifier[inputDim] = identifier[input_dim]
identifier[spec_layer_params] . identifier[outputChannels] = identifier[output_channels]
identifier[spec_layer_params] . identifier[hasBias] = identifier[has_bias]
identifier[weights] = identifier[spec_layer_params] . identifier[weights]
identifier[weights] . identifier[floatValue] . identifier[extend] ( identifier[map] ( identifier[float] , identifier[W] . identifier[flatten] ()))
keyword[if] identifier[has_bias] :
identifier[bias] = identifier[spec_layer_params] . identifier[bias]
identifier[bias] . identifier[floatValue] . identifier[extend] ( identifier[map] ( identifier[float] , identifier[b] . identifier[flatten] ())) | def add_embedding(self, name, W, b, input_dim, output_channels, has_bias, input_name, output_name):
"""
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
# Fill in the parameters
spec_layer_params = spec_layer.embedding
spec_layer_params.inputDim = input_dim
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
weights.floatValue.extend(map(float, W.flatten()))
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) # depends on [control=['if'], data=[]] |
def get(version: str) -> 'Protocol':
"""
Return enum instance corresponding to input version value ('1.6' etc.)
"""
return Protocol.V_13 if version == Protocol.V_13.value.name else Protocol.DEFAULT | def function[get, parameter[version]]:
constant[
Return enum instance corresponding to input version value ('1.6' etc.)
]
return[<ast.IfExp object at 0x7da2041d8a30>] | keyword[def] identifier[get] ( identifier[version] : identifier[str] )-> literal[string] :
literal[string]
keyword[return] identifier[Protocol] . identifier[V_13] keyword[if] identifier[version] == identifier[Protocol] . identifier[V_13] . identifier[value] . identifier[name] keyword[else] identifier[Protocol] . identifier[DEFAULT] | def get(version: str) -> 'Protocol':
"""
Return enum instance corresponding to input version value ('1.6' etc.)
"""
return Protocol.V_13 if version == Protocol.V_13.value.name else Protocol.DEFAULT |
def write_copy_button(self, text, text_to_copy):
"""Writes a button with 'text' which can be used
to copy 'text_to_copy' to clipboard when it's clicked."""
self.write_copy_script = True
self.write('<button onclick="cp(\'{}\');">{}</button>'
.format(text_to_copy, text)) | def function[write_copy_button, parameter[self, text, text_to_copy]]:
constant[Writes a button with 'text' which can be used
to copy 'text_to_copy' to clipboard when it's clicked.]
name[self].write_copy_script assign[=] constant[True]
call[name[self].write, parameter[call[constant[<button onclick="cp('{}');">{}</button>].format, parameter[name[text_to_copy], name[text]]]]] | keyword[def] identifier[write_copy_button] ( identifier[self] , identifier[text] , identifier[text_to_copy] ):
literal[string]
identifier[self] . identifier[write_copy_script] = keyword[True]
identifier[self] . identifier[write] ( literal[string]
. identifier[format] ( identifier[text_to_copy] , identifier[text] )) | def write_copy_button(self, text, text_to_copy):
"""Writes a button with 'text' which can be used
to copy 'text_to_copy' to clipboard when it's clicked."""
self.write_copy_script = True
self.write('<button onclick="cp(\'{}\');">{}</button>'.format(text_to_copy, text)) |
def _get_cycles(graph_dict, path, visited, result, vertice):
"""recursive function doing the real work for get_cycles"""
if vertice in path:
cycle = [vertice]
for node in path[::-1]:
if node == vertice:
break
cycle.insert(0, node)
# make a canonical representation
start_from = min(cycle)
index = cycle.index(start_from)
cycle = cycle[index:] + cycle[0:index]
# append it to result if not already in
if cycle not in result:
result.append(cycle)
return
path.append(vertice)
try:
for node in graph_dict[vertice]:
# don't check already visited nodes again
if node not in visited:
_get_cycles(graph_dict, path, visited, result, node)
visited.add(node)
except KeyError:
pass
path.pop() | def function[_get_cycles, parameter[graph_dict, path, visited, result, vertice]]:
constant[recursive function doing the real work for get_cycles]
if compare[name[vertice] in name[path]] begin[:]
variable[cycle] assign[=] list[[<ast.Name object at 0x7da1b059eda0>]]
for taget[name[node]] in starred[call[name[path]][<ast.Slice object at 0x7da1b059dd80>]] begin[:]
if compare[name[node] equal[==] name[vertice]] begin[:]
break
call[name[cycle].insert, parameter[constant[0], name[node]]]
variable[start_from] assign[=] call[name[min], parameter[name[cycle]]]
variable[index] assign[=] call[name[cycle].index, parameter[name[start_from]]]
variable[cycle] assign[=] binary_operation[call[name[cycle]][<ast.Slice object at 0x7da1b03511b0>] + call[name[cycle]][<ast.Slice object at 0x7da1b03533a0>]]
if compare[name[cycle] <ast.NotIn object at 0x7da2590d7190> name[result]] begin[:]
call[name[result].append, parameter[name[cycle]]]
return[None]
call[name[path].append, parameter[name[vertice]]]
<ast.Try object at 0x7da1b0350ee0>
call[name[path].pop, parameter[]] | keyword[def] identifier[_get_cycles] ( identifier[graph_dict] , identifier[path] , identifier[visited] , identifier[result] , identifier[vertice] ):
literal[string]
keyword[if] identifier[vertice] keyword[in] identifier[path] :
identifier[cycle] =[ identifier[vertice] ]
keyword[for] identifier[node] keyword[in] identifier[path] [::- literal[int] ]:
keyword[if] identifier[node] == identifier[vertice] :
keyword[break]
identifier[cycle] . identifier[insert] ( literal[int] , identifier[node] )
identifier[start_from] = identifier[min] ( identifier[cycle] )
identifier[index] = identifier[cycle] . identifier[index] ( identifier[start_from] )
identifier[cycle] = identifier[cycle] [ identifier[index] :]+ identifier[cycle] [ literal[int] : identifier[index] ]
keyword[if] identifier[cycle] keyword[not] keyword[in] identifier[result] :
identifier[result] . identifier[append] ( identifier[cycle] )
keyword[return]
identifier[path] . identifier[append] ( identifier[vertice] )
keyword[try] :
keyword[for] identifier[node] keyword[in] identifier[graph_dict] [ identifier[vertice] ]:
keyword[if] identifier[node] keyword[not] keyword[in] identifier[visited] :
identifier[_get_cycles] ( identifier[graph_dict] , identifier[path] , identifier[visited] , identifier[result] , identifier[node] )
identifier[visited] . identifier[add] ( identifier[node] )
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[path] . identifier[pop] () | def _get_cycles(graph_dict, path, visited, result, vertice):
"""recursive function doing the real work for get_cycles"""
if vertice in path:
cycle = [vertice]
for node in path[::-1]:
if node == vertice:
break # depends on [control=['if'], data=[]]
cycle.insert(0, node) # depends on [control=['for'], data=['node']]
# make a canonical representation
start_from = min(cycle)
index = cycle.index(start_from)
cycle = cycle[index:] + cycle[0:index]
# append it to result if not already in
if cycle not in result:
result.append(cycle) # depends on [control=['if'], data=['cycle', 'result']]
return # depends on [control=['if'], data=['vertice', 'path']]
path.append(vertice)
try:
for node in graph_dict[vertice]:
# don't check already visited nodes again
if node not in visited:
_get_cycles(graph_dict, path, visited, result, node)
visited.add(node) # depends on [control=['if'], data=['node', 'visited']] # depends on [control=['for'], data=['node']] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
path.pop() |
def add_data_file(self, from_fp, timestamp=None, content_type=None):
# type: (IO, Optional[datetime.datetime], Optional[str]) -> Text
"""Copy inputs to data/ folder."""
self.self_check()
tmp_dir, tmp_prefix = os.path.split(self.temp_prefix)
with tempfile.NamedTemporaryFile(
prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp:
checksum = checksum_copy(from_fp, tmp)
# Calculate hash-based file path
folder = os.path.join(self.folder, DATA, checksum[0:2])
path = os.path.join(folder, checksum)
# os.rename assumed safe, as our temp file should
# be in same file system as our temp folder
if not os.path.isdir(folder):
os.makedirs(folder)
os.rename(tmp.name, path)
# Relative posix path
# (to avoid \ on Windows)
rel_path = _posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum)
else:
_logger.warning(
u"[provenance] Unknown hash method %s for bagit manifest",
Hasher)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug(u"[provenance] Added data file %s", path)
if timestamp is not None:
self._file_provenance[rel_path] = self._self_made(timestamp)
_logger.debug(u"[provenance] Relative path for data file %s", rel_path)
if content_type is not None:
self._content_types[rel_path] = content_type
return rel_path | def function[add_data_file, parameter[self, from_fp, timestamp, content_type]]:
constant[Copy inputs to data/ folder.]
call[name[self].self_check, parameter[]]
<ast.Tuple object at 0x7da18bc72ad0> assign[=] call[name[os].path.split, parameter[name[self].temp_prefix]]
with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:]
variable[checksum] assign[=] call[name[checksum_copy], parameter[name[from_fp], name[tmp]]]
variable[folder] assign[=] call[name[os].path.join, parameter[name[self].folder, name[DATA], call[name[checksum]][<ast.Slice object at 0x7da18bc70610>]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[folder], name[checksum]]]
if <ast.UnaryOp object at 0x7da2047eb520> begin[:]
call[name[os].makedirs, parameter[name[folder]]]
call[name[os].rename, parameter[name[tmp].name, name[path]]]
variable[rel_path] assign[=] call[name[_posix_path], parameter[call[name[os].path.relpath, parameter[name[path], name[self].folder]]]]
if compare[name[Hasher] equal[==] name[hashlib].sha1] begin[:]
call[name[self]._add_to_bagit, parameter[name[rel_path]]]
call[name[_logger].debug, parameter[constant[[provenance] Added data file %s], name[path]]]
if compare[name[timestamp] is_not constant[None]] begin[:]
call[name[self]._file_provenance][name[rel_path]] assign[=] call[name[self]._self_made, parameter[name[timestamp]]]
call[name[_logger].debug, parameter[constant[[provenance] Relative path for data file %s], name[rel_path]]]
if compare[name[content_type] is_not constant[None]] begin[:]
call[name[self]._content_types][name[rel_path]] assign[=] name[content_type]
return[name[rel_path]] | keyword[def] identifier[add_data_file] ( identifier[self] , identifier[from_fp] , identifier[timestamp] = keyword[None] , identifier[content_type] = keyword[None] ):
literal[string]
identifier[self] . identifier[self_check] ()
identifier[tmp_dir] , identifier[tmp_prefix] = identifier[os] . identifier[path] . identifier[split] ( identifier[self] . identifier[temp_prefix] )
keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] (
identifier[prefix] = identifier[tmp_prefix] , identifier[dir] = identifier[tmp_dir] , identifier[delete] = keyword[False] ) keyword[as] identifier[tmp] :
identifier[checksum] = identifier[checksum_copy] ( identifier[from_fp] , identifier[tmp] )
identifier[folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[folder] , identifier[DATA] , identifier[checksum] [ literal[int] : literal[int] ])
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[checksum] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[folder] ):
identifier[os] . identifier[makedirs] ( identifier[folder] )
identifier[os] . identifier[rename] ( identifier[tmp] . identifier[name] , identifier[path] )
identifier[rel_path] = identifier[_posix_path] ( identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] , identifier[self] . identifier[folder] ))
keyword[if] identifier[Hasher] == identifier[hashlib] . identifier[sha1] :
identifier[self] . identifier[_add_to_bagit] ( identifier[rel_path] , identifier[sha1] = identifier[checksum] )
keyword[else] :
identifier[_logger] . identifier[warning] (
literal[string] ,
identifier[Hasher] )
identifier[self] . identifier[_add_to_bagit] ( identifier[rel_path] )
identifier[_logger] . identifier[debug] ( literal[string] , identifier[path] )
keyword[if] identifier[timestamp] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_file_provenance] [ identifier[rel_path] ]= identifier[self] . identifier[_self_made] ( identifier[timestamp] )
identifier[_logger] . identifier[debug] ( literal[string] , identifier[rel_path] )
keyword[if] identifier[content_type] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_content_types] [ identifier[rel_path] ]= identifier[content_type]
keyword[return] identifier[rel_path] | def add_data_file(self, from_fp, timestamp=None, content_type=None):
# type: (IO, Optional[datetime.datetime], Optional[str]) -> Text
'Copy inputs to data/ folder.'
self.self_check()
(tmp_dir, tmp_prefix) = os.path.split(self.temp_prefix)
with tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp:
checksum = checksum_copy(from_fp, tmp) # depends on [control=['with'], data=['tmp']]
# Calculate hash-based file path
folder = os.path.join(self.folder, DATA, checksum[0:2])
path = os.path.join(folder, checksum)
# os.rename assumed safe, as our temp file should
# be in same file system as our temp folder
if not os.path.isdir(folder):
os.makedirs(folder) # depends on [control=['if'], data=[]]
os.rename(tmp.name, path)
# Relative posix path
# (to avoid \ on Windows)
rel_path = _posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum) # depends on [control=['if'], data=[]]
else:
_logger.warning(u'[provenance] Unknown hash method %s for bagit manifest', Hasher)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug(u'[provenance] Added data file %s', path)
if timestamp is not None:
self._file_provenance[rel_path] = self._self_made(timestamp) # depends on [control=['if'], data=['timestamp']]
_logger.debug(u'[provenance] Relative path for data file %s', rel_path)
if content_type is not None:
self._content_types[rel_path] = content_type # depends on [control=['if'], data=['content_type']]
return rel_path |
def fetch_gpml_usps_resampled_data(transpose_data=True, data_home=None):
"""
Fetch the USPS handwritten digits dataset from the internet and parse
appropriately into python arrays
>>> usps_resampled = fetch_gpml_usps_resampled_data()
>>> usps_resampled.train.targets.shape
(4649,)
>>> usps_resampled.train.targets # doctest: +ELLIPSIS
array([6, 0, 1, ..., 9, 2, 7])
>>> usps_resampled.train.data.shape
(4649, 256)
>>> np.all(-1 <= usps_resampled.train.data)
True
>>> np.all(usps_resampled.train.data < 1)
True
>>> usps_resampled.test.targets.shape
(4649,)
>>> usps_resampled.test.data.shape
(4649, 256)
>>> usps_resampled = fetch_gpml_usps_resampled_data(transpose_data=False)
>>> usps_resampled.train.data.shape
(256, 4649)
"""
data_home = get_data_home(data_home=data_home)
data_filename = os.path.join(data_home,
'usps_resampled/usps_resampled.mat')
if not os.path.exists(data_filename):
r = requests.get('http://www.gaussianprocess.org/gpml/data/'
'usps_resampled.tar.bz2')
with tarfile.open(fileobj=BytesIO(r.content)) as tar_infile:
tar_infile.extract('usps_resampled/usps_resampled.mat',
path=data_home)
matlab_dict = loadmat(data_filename)
train_data = matlab_dict['train_patterns']
test_data = matlab_dict['test_patterns']
if transpose_data:
train_data = train_data.T
test_data = test_data.T
train_targets = matlab_dict['train_labels'].T
train_targets = np.argwhere(train_targets == 1)[:, 1]
test_targets = matlab_dict['test_labels'].T
test_targets = np.argwhere(test_targets == 1)[:, 1]
train_bunch = Bunch(data=train_data,
targets=train_targets)
test_bunch = Bunch(data=test_data,
targets=test_targets)
return Bunch(train=train_bunch, test=test_bunch) | def function[fetch_gpml_usps_resampled_data, parameter[transpose_data, data_home]]:
constant[
Fetch the USPS handwritten digits dataset from the internet and parse
appropriately into python arrays
>>> usps_resampled = fetch_gpml_usps_resampled_data()
>>> usps_resampled.train.targets.shape
(4649,)
>>> usps_resampled.train.targets # doctest: +ELLIPSIS
array([6, 0, 1, ..., 9, 2, 7])
>>> usps_resampled.train.data.shape
(4649, 256)
>>> np.all(-1 <= usps_resampled.train.data)
True
>>> np.all(usps_resampled.train.data < 1)
True
>>> usps_resampled.test.targets.shape
(4649,)
>>> usps_resampled.test.data.shape
(4649, 256)
>>> usps_resampled = fetch_gpml_usps_resampled_data(transpose_data=False)
>>> usps_resampled.train.data.shape
(256, 4649)
]
variable[data_home] assign[=] call[name[get_data_home], parameter[]]
variable[data_filename] assign[=] call[name[os].path.join, parameter[name[data_home], constant[usps_resampled/usps_resampled.mat]]]
if <ast.UnaryOp object at 0x7da2043451b0> begin[:]
variable[r] assign[=] call[name[requests].get, parameter[constant[http://www.gaussianprocess.org/gpml/data/usps_resampled.tar.bz2]]]
with call[name[tarfile].open, parameter[]] begin[:]
call[name[tar_infile].extract, parameter[constant[usps_resampled/usps_resampled.mat]]]
variable[matlab_dict] assign[=] call[name[loadmat], parameter[name[data_filename]]]
variable[train_data] assign[=] call[name[matlab_dict]][constant[train_patterns]]
variable[test_data] assign[=] call[name[matlab_dict]][constant[test_patterns]]
if name[transpose_data] begin[:]
variable[train_data] assign[=] name[train_data].T
variable[test_data] assign[=] name[test_data].T
variable[train_targets] assign[=] call[name[matlab_dict]][constant[train_labels]].T
variable[train_targets] assign[=] call[call[name[np].argwhere, parameter[compare[name[train_targets] equal[==] constant[1]]]]][tuple[[<ast.Slice object at 0x7da2043447f0>, <ast.Constant object at 0x7da2043470a0>]]]
variable[test_targets] assign[=] call[name[matlab_dict]][constant[test_labels]].T
variable[test_targets] assign[=] call[call[name[np].argwhere, parameter[compare[name[test_targets] equal[==] constant[1]]]]][tuple[[<ast.Slice object at 0x7da2043453f0>, <ast.Constant object at 0x7da204345480>]]]
variable[train_bunch] assign[=] call[name[Bunch], parameter[]]
variable[test_bunch] assign[=] call[name[Bunch], parameter[]]
return[call[name[Bunch], parameter[]]] | keyword[def] identifier[fetch_gpml_usps_resampled_data] ( identifier[transpose_data] = keyword[True] , identifier[data_home] = keyword[None] ):
literal[string]
identifier[data_home] = identifier[get_data_home] ( identifier[data_home] = identifier[data_home] )
identifier[data_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_home] ,
literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[data_filename] ):
identifier[r] = identifier[requests] . identifier[get] ( literal[string]
literal[string] )
keyword[with] identifier[tarfile] . identifier[open] ( identifier[fileobj] = identifier[BytesIO] ( identifier[r] . identifier[content] )) keyword[as] identifier[tar_infile] :
identifier[tar_infile] . identifier[extract] ( literal[string] ,
identifier[path] = identifier[data_home] )
identifier[matlab_dict] = identifier[loadmat] ( identifier[data_filename] )
identifier[train_data] = identifier[matlab_dict] [ literal[string] ]
identifier[test_data] = identifier[matlab_dict] [ literal[string] ]
keyword[if] identifier[transpose_data] :
identifier[train_data] = identifier[train_data] . identifier[T]
identifier[test_data] = identifier[test_data] . identifier[T]
identifier[train_targets] = identifier[matlab_dict] [ literal[string] ]. identifier[T]
identifier[train_targets] = identifier[np] . identifier[argwhere] ( identifier[train_targets] == literal[int] )[:, literal[int] ]
identifier[test_targets] = identifier[matlab_dict] [ literal[string] ]. identifier[T]
identifier[test_targets] = identifier[np] . identifier[argwhere] ( identifier[test_targets] == literal[int] )[:, literal[int] ]
identifier[train_bunch] = identifier[Bunch] ( identifier[data] = identifier[train_data] ,
identifier[targets] = identifier[train_targets] )
identifier[test_bunch] = identifier[Bunch] ( identifier[data] = identifier[test_data] ,
identifier[targets] = identifier[test_targets] )
keyword[return] identifier[Bunch] ( identifier[train] = identifier[train_bunch] , identifier[test] = identifier[test_bunch] ) | def fetch_gpml_usps_resampled_data(transpose_data=True, data_home=None):
"""
Fetch the USPS handwritten digits dataset from the internet and parse
appropriately into python arrays
>>> usps_resampled = fetch_gpml_usps_resampled_data()
>>> usps_resampled.train.targets.shape
(4649,)
>>> usps_resampled.train.targets # doctest: +ELLIPSIS
array([6, 0, 1, ..., 9, 2, 7])
>>> usps_resampled.train.data.shape
(4649, 256)
>>> np.all(-1 <= usps_resampled.train.data)
True
>>> np.all(usps_resampled.train.data < 1)
True
>>> usps_resampled.test.targets.shape
(4649,)
>>> usps_resampled.test.data.shape
(4649, 256)
>>> usps_resampled = fetch_gpml_usps_resampled_data(transpose_data=False)
>>> usps_resampled.train.data.shape
(256, 4649)
"""
data_home = get_data_home(data_home=data_home)
data_filename = os.path.join(data_home, 'usps_resampled/usps_resampled.mat')
if not os.path.exists(data_filename):
r = requests.get('http://www.gaussianprocess.org/gpml/data/usps_resampled.tar.bz2')
with tarfile.open(fileobj=BytesIO(r.content)) as tar_infile:
tar_infile.extract('usps_resampled/usps_resampled.mat', path=data_home) # depends on [control=['with'], data=['tar_infile']] # depends on [control=['if'], data=[]]
matlab_dict = loadmat(data_filename)
train_data = matlab_dict['train_patterns']
test_data = matlab_dict['test_patterns']
if transpose_data:
train_data = train_data.T
test_data = test_data.T # depends on [control=['if'], data=[]]
train_targets = matlab_dict['train_labels'].T
train_targets = np.argwhere(train_targets == 1)[:, 1]
test_targets = matlab_dict['test_labels'].T
test_targets = np.argwhere(test_targets == 1)[:, 1]
train_bunch = Bunch(data=train_data, targets=train_targets)
test_bunch = Bunch(data=test_data, targets=test_targets)
return Bunch(train=train_bunch, test=test_bunch) |
def schedCoroSafePend(self, coro):
'''
Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on
Note:
This method may *not* be run inside an event loop
'''
if __debug__:
import synapse.lib.threads as s_threads # avoid import cycle
assert s_threads.iden() != self.tid
task = asyncio.run_coroutine_threadsafe(coro, self.loop)
return task.result() | def function[schedCoroSafePend, parameter[self, coro]]:
constant[
Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on
Note:
This method may *not* be run inside an event loop
]
if name[__debug__] begin[:]
import module[synapse.lib.threads] as alias[s_threads]
assert[compare[call[name[s_threads].iden, parameter[]] not_equal[!=] name[self].tid]]
variable[task] assign[=] call[name[asyncio].run_coroutine_threadsafe, parameter[name[coro], name[self].loop]]
return[call[name[task].result, parameter[]]] | keyword[def] identifier[schedCoroSafePend] ( identifier[self] , identifier[coro] ):
literal[string]
keyword[if] identifier[__debug__] :
keyword[import] identifier[synapse] . identifier[lib] . identifier[threads] keyword[as] identifier[s_threads]
keyword[assert] identifier[s_threads] . identifier[iden] ()!= identifier[self] . identifier[tid]
identifier[task] = identifier[asyncio] . identifier[run_coroutine_threadsafe] ( identifier[coro] , identifier[self] . identifier[loop] )
keyword[return] identifier[task] . identifier[result] () | def schedCoroSafePend(self, coro):
"""
Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on
Note:
This method may *not* be run inside an event loop
"""
if __debug__:
import synapse.lib.threads as s_threads # avoid import cycle
assert s_threads.iden() != self.tid # depends on [control=['if'], data=[]]
task = asyncio.run_coroutine_threadsafe(coro, self.loop)
return task.result() |
def are_equal(self, mol1, mol2):
"""
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
"""
b1 = set(self._get_bonds(mol1))
b2 = set(self._get_bonds(mol2))
return b1 == b2 | def function[are_equal, parameter[self, mol1, mol2]]:
constant[
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
]
variable[b1] assign[=] call[name[set], parameter[call[name[self]._get_bonds, parameter[name[mol1]]]]]
variable[b2] assign[=] call[name[set], parameter[call[name[self]._get_bonds, parameter[name[mol2]]]]]
return[compare[name[b1] equal[==] name[b2]]] | keyword[def] identifier[are_equal] ( identifier[self] , identifier[mol1] , identifier[mol2] ):
literal[string]
identifier[b1] = identifier[set] ( identifier[self] . identifier[_get_bonds] ( identifier[mol1] ))
identifier[b2] = identifier[set] ( identifier[self] . identifier[_get_bonds] ( identifier[mol2] ))
keyword[return] identifier[b1] == identifier[b2] | def are_equal(self, mol1, mol2):
"""
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
"""
b1 = set(self._get_bonds(mol1))
b2 = set(self._get_bonds(mol2))
return b1 == b2 |
def upload(self, fd, name=None, folder_key=None, filedrop_key=None,
path=None, action_on_duplicate=None):
"""Upload file, returns UploadResult object
fd -- file-like object to upload from, expects exclusive access
name -- file name
folder_key -- folderkey of the target folder
path -- path to file relative to folder_key
filedrop_key -- filedrop to use instead of folder_key
action_on_duplicate -- skip, keep, replace
"""
# Get file handle content length in the most reliable way
fd.seek(0, os.SEEK_END)
size = fd.tell()
fd.seek(0, os.SEEK_SET)
if size > UPLOAD_SIMPLE_LIMIT_BYTES:
resumable = True
else:
resumable = False
logger.debug("Calculating checksum")
hash_info = compute_hash_info(fd)
if hash_info.size != size:
# Has the file changed beween computing the hash
# and calling upload()?
raise ValueError("hash_info.size mismatch")
upload_info = _UploadInfo(fd=fd, name=name, folder_key=folder_key,
hash_info=hash_info, size=size, path=path,
filedrop_key=filedrop_key,
action_on_duplicate=action_on_duplicate)
# Check whether file is present
check_result = self._upload_check(upload_info, resumable)
upload_result = None
upload_func = None
folder_key = check_result.get('folder_key', None)
if folder_key is not None:
# We know precisely what folder_key to use, drop path
upload_info.folder_key = folder_key
upload_info.path = None
if check_result['hash_exists'] == 'yes':
# file exists somewhere in MediaFire
if check_result['in_folder'] == 'yes' and \
check_result['file_exists'] == 'yes':
# file exists in this directory
different_hash = check_result.get('different_hash', 'no')
if different_hash == 'no':
# file is already there
upload_func = self._upload_none
if not upload_func:
# different hash or in other folder
upload_func = self._upload_instant
if not upload_func:
if resumable:
resumable_upload_info = check_result['resumable_upload']
upload_info.hash_info = compute_hash_info(
fd, int(resumable_upload_info['unit_size']))
upload_func = self._upload_resumable
else:
upload_func = self._upload_simple
# Retry retriable exceptions
retries = UPLOAD_RETRY_COUNT
while retries > 0:
try:
# Provide check_result to avoid calling API twice
upload_result = upload_func(upload_info, check_result)
except (RetriableUploadError, MediaFireConnectionError):
retries -= 1
logger.exception("%s failed (%d retries left)",
upload_func.__name__, retries)
# Refresh check_result for next iteration
check_result = self._upload_check(upload_info, resumable)
except Exception:
logger.exception("%s failed", upload_func)
break
else:
break
if upload_result is None:
raise UploadError("Upload failed")
return upload_result | def function[upload, parameter[self, fd, name, folder_key, filedrop_key, path, action_on_duplicate]]:
constant[Upload file, returns UploadResult object
fd -- file-like object to upload from, expects exclusive access
name -- file name
folder_key -- folderkey of the target folder
path -- path to file relative to folder_key
filedrop_key -- filedrop to use instead of folder_key
action_on_duplicate -- skip, keep, replace
]
call[name[fd].seek, parameter[constant[0], name[os].SEEK_END]]
variable[size] assign[=] call[name[fd].tell, parameter[]]
call[name[fd].seek, parameter[constant[0], name[os].SEEK_SET]]
if compare[name[size] greater[>] name[UPLOAD_SIMPLE_LIMIT_BYTES]] begin[:]
variable[resumable] assign[=] constant[True]
call[name[logger].debug, parameter[constant[Calculating checksum]]]
variable[hash_info] assign[=] call[name[compute_hash_info], parameter[name[fd]]]
if compare[name[hash_info].size not_equal[!=] name[size]] begin[:]
<ast.Raise object at 0x7da1b0e5b0d0>
variable[upload_info] assign[=] call[name[_UploadInfo], parameter[]]
variable[check_result] assign[=] call[name[self]._upload_check, parameter[name[upload_info], name[resumable]]]
variable[upload_result] assign[=] constant[None]
variable[upload_func] assign[=] constant[None]
variable[folder_key] assign[=] call[name[check_result].get, parameter[constant[folder_key], constant[None]]]
if compare[name[folder_key] is_not constant[None]] begin[:]
name[upload_info].folder_key assign[=] name[folder_key]
name[upload_info].path assign[=] constant[None]
if compare[call[name[check_result]][constant[hash_exists]] equal[==] constant[yes]] begin[:]
if <ast.BoolOp object at 0x7da1b0e59090> begin[:]
variable[different_hash] assign[=] call[name[check_result].get, parameter[constant[different_hash], constant[no]]]
if compare[name[different_hash] equal[==] constant[no]] begin[:]
variable[upload_func] assign[=] name[self]._upload_none
if <ast.UnaryOp object at 0x7da1b0e580d0> begin[:]
variable[upload_func] assign[=] name[self]._upload_instant
if <ast.UnaryOp object at 0x7da1b0e5b340> begin[:]
if name[resumable] begin[:]
variable[resumable_upload_info] assign[=] call[name[check_result]][constant[resumable_upload]]
name[upload_info].hash_info assign[=] call[name[compute_hash_info], parameter[name[fd], call[name[int], parameter[call[name[resumable_upload_info]][constant[unit_size]]]]]]
variable[upload_func] assign[=] name[self]._upload_resumable
variable[retries] assign[=] name[UPLOAD_RETRY_COUNT]
while compare[name[retries] greater[>] constant[0]] begin[:]
<ast.Try object at 0x7da1b0e58be0>
if compare[name[upload_result] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0e591e0>
return[name[upload_result]] | keyword[def] identifier[upload] ( identifier[self] , identifier[fd] , identifier[name] = keyword[None] , identifier[folder_key] = keyword[None] , identifier[filedrop_key] = keyword[None] ,
identifier[path] = keyword[None] , identifier[action_on_duplicate] = keyword[None] ):
literal[string]
identifier[fd] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_END] )
identifier[size] = identifier[fd] . identifier[tell] ()
identifier[fd] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_SET] )
keyword[if] identifier[size] > identifier[UPLOAD_SIMPLE_LIMIT_BYTES] :
identifier[resumable] = keyword[True]
keyword[else] :
identifier[resumable] = keyword[False]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[hash_info] = identifier[compute_hash_info] ( identifier[fd] )
keyword[if] identifier[hash_info] . identifier[size] != identifier[size] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[upload_info] = identifier[_UploadInfo] ( identifier[fd] = identifier[fd] , identifier[name] = identifier[name] , identifier[folder_key] = identifier[folder_key] ,
identifier[hash_info] = identifier[hash_info] , identifier[size] = identifier[size] , identifier[path] = identifier[path] ,
identifier[filedrop_key] = identifier[filedrop_key] ,
identifier[action_on_duplicate] = identifier[action_on_duplicate] )
identifier[check_result] = identifier[self] . identifier[_upload_check] ( identifier[upload_info] , identifier[resumable] )
identifier[upload_result] = keyword[None]
identifier[upload_func] = keyword[None]
identifier[folder_key] = identifier[check_result] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[folder_key] keyword[is] keyword[not] keyword[None] :
identifier[upload_info] . identifier[folder_key] = identifier[folder_key]
identifier[upload_info] . identifier[path] = keyword[None]
keyword[if] identifier[check_result] [ literal[string] ]== literal[string] :
keyword[if] identifier[check_result] [ literal[string] ]== literal[string] keyword[and] identifier[check_result] [ literal[string] ]== literal[string] :
identifier[different_hash] = identifier[check_result] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[different_hash] == literal[string] :
identifier[upload_func] = identifier[self] . identifier[_upload_none]
keyword[if] keyword[not] identifier[upload_func] :
identifier[upload_func] = identifier[self] . identifier[_upload_instant]
keyword[if] keyword[not] identifier[upload_func] :
keyword[if] identifier[resumable] :
identifier[resumable_upload_info] = identifier[check_result] [ literal[string] ]
identifier[upload_info] . identifier[hash_info] = identifier[compute_hash_info] (
identifier[fd] , identifier[int] ( identifier[resumable_upload_info] [ literal[string] ]))
identifier[upload_func] = identifier[self] . identifier[_upload_resumable]
keyword[else] :
identifier[upload_func] = identifier[self] . identifier[_upload_simple]
identifier[retries] = identifier[UPLOAD_RETRY_COUNT]
keyword[while] identifier[retries] > literal[int] :
keyword[try] :
identifier[upload_result] = identifier[upload_func] ( identifier[upload_info] , identifier[check_result] )
keyword[except] ( identifier[RetriableUploadError] , identifier[MediaFireConnectionError] ):
identifier[retries] -= literal[int]
identifier[logger] . identifier[exception] ( literal[string] ,
identifier[upload_func] . identifier[__name__] , identifier[retries] )
identifier[check_result] = identifier[self] . identifier[_upload_check] ( identifier[upload_info] , identifier[resumable] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[upload_func] )
keyword[break]
keyword[else] :
keyword[break]
keyword[if] identifier[upload_result] keyword[is] keyword[None] :
keyword[raise] identifier[UploadError] ( literal[string] )
keyword[return] identifier[upload_result] | def upload(self, fd, name=None, folder_key=None, filedrop_key=None, path=None, action_on_duplicate=None):
"""Upload file, returns UploadResult object
fd -- file-like object to upload from, expects exclusive access
name -- file name
folder_key -- folderkey of the target folder
path -- path to file relative to folder_key
filedrop_key -- filedrop to use instead of folder_key
action_on_duplicate -- skip, keep, replace
"""
# Get file handle content length in the most reliable way
fd.seek(0, os.SEEK_END)
size = fd.tell()
fd.seek(0, os.SEEK_SET)
if size > UPLOAD_SIMPLE_LIMIT_BYTES:
resumable = True # depends on [control=['if'], data=[]]
else:
resumable = False
logger.debug('Calculating checksum')
hash_info = compute_hash_info(fd)
if hash_info.size != size:
# Has the file changed beween computing the hash
# and calling upload()?
raise ValueError('hash_info.size mismatch') # depends on [control=['if'], data=[]]
upload_info = _UploadInfo(fd=fd, name=name, folder_key=folder_key, hash_info=hash_info, size=size, path=path, filedrop_key=filedrop_key, action_on_duplicate=action_on_duplicate)
# Check whether file is present
check_result = self._upload_check(upload_info, resumable)
upload_result = None
upload_func = None
folder_key = check_result.get('folder_key', None)
if folder_key is not None:
# We know precisely what folder_key to use, drop path
upload_info.folder_key = folder_key
upload_info.path = None # depends on [control=['if'], data=['folder_key']]
if check_result['hash_exists'] == 'yes':
# file exists somewhere in MediaFire
if check_result['in_folder'] == 'yes' and check_result['file_exists'] == 'yes':
# file exists in this directory
different_hash = check_result.get('different_hash', 'no')
if different_hash == 'no':
# file is already there
upload_func = self._upload_none # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not upload_func:
# different hash or in other folder
upload_func = self._upload_instant # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not upload_func:
if resumable:
resumable_upload_info = check_result['resumable_upload']
upload_info.hash_info = compute_hash_info(fd, int(resumable_upload_info['unit_size']))
upload_func = self._upload_resumable # depends on [control=['if'], data=[]]
else:
upload_func = self._upload_simple # depends on [control=['if'], data=[]]
# Retry retriable exceptions
retries = UPLOAD_RETRY_COUNT
while retries > 0:
try:
# Provide check_result to avoid calling API twice
upload_result = upload_func(upload_info, check_result) # depends on [control=['try'], data=[]]
except (RetriableUploadError, MediaFireConnectionError):
retries -= 1
logger.exception('%s failed (%d retries left)', upload_func.__name__, retries)
# Refresh check_result for next iteration
check_result = self._upload_check(upload_info, resumable) # depends on [control=['except'], data=[]]
except Exception:
logger.exception('%s failed', upload_func)
break # depends on [control=['except'], data=[]]
else:
break # depends on [control=['while'], data=['retries']]
if upload_result is None:
raise UploadError('Upload failed') # depends on [control=['if'], data=[]]
return upload_result |
def weighted_sample(bn, e):
"""Sample an event from bn that's consistent with the evidence e;
return the event and its weight, the likelihood that the event
accords to the evidence."""
w = 1
event = dict(e) # boldface x in Fig. 14.15
for node in bn.nodes:
Xi = node.variable
if Xi in e:
w *= node.p(e[Xi], event)
else:
event[Xi] = node.sample(event)
return event, w | def function[weighted_sample, parameter[bn, e]]:
constant[Sample an event from bn that's consistent with the evidence e;
return the event and its weight, the likelihood that the event
accords to the evidence.]
variable[w] assign[=] constant[1]
variable[event] assign[=] call[name[dict], parameter[name[e]]]
for taget[name[node]] in starred[name[bn].nodes] begin[:]
variable[Xi] assign[=] name[node].variable
if compare[name[Xi] in name[e]] begin[:]
<ast.AugAssign object at 0x7da1b021d2d0>
return[tuple[[<ast.Name object at 0x7da1b021d420>, <ast.Name object at 0x7da1b021f310>]]] | keyword[def] identifier[weighted_sample] ( identifier[bn] , identifier[e] ):
literal[string]
identifier[w] = literal[int]
identifier[event] = identifier[dict] ( identifier[e] )
keyword[for] identifier[node] keyword[in] identifier[bn] . identifier[nodes] :
identifier[Xi] = identifier[node] . identifier[variable]
keyword[if] identifier[Xi] keyword[in] identifier[e] :
identifier[w] *= identifier[node] . identifier[p] ( identifier[e] [ identifier[Xi] ], identifier[event] )
keyword[else] :
identifier[event] [ identifier[Xi] ]= identifier[node] . identifier[sample] ( identifier[event] )
keyword[return] identifier[event] , identifier[w] | def weighted_sample(bn, e):
"""Sample an event from bn that's consistent with the evidence e;
return the event and its weight, the likelihood that the event
accords to the evidence."""
w = 1
event = dict(e) # boldface x in Fig. 14.15
for node in bn.nodes:
Xi = node.variable
if Xi in e:
w *= node.p(e[Xi], event) # depends on [control=['if'], data=['Xi', 'e']]
else:
event[Xi] = node.sample(event) # depends on [control=['for'], data=['node']]
return (event, w) |
def keystone(*arg):
"""
Swift annotation for adding function to process keystone notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
"""
check_event_type(Openstack.Keystone, *arg)
event_type = arg[0]
def decorator(func):
if event_type.find("*") != -1:
event_type_pattern = pre_compile(event_type)
keystone_customer_process_wildcard[event_type_pattern] = func
else:
keystone_customer_process[event_type] = func
log.info("add function {0} to process event_type:{1}".format(func.__name__, event_type))
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator | def function[keystone, parameter[]]:
constant[
Swift annotation for adding function to process keystone notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
]
call[name[check_event_type], parameter[name[Openstack].Keystone, <ast.Starred object at 0x7da20c6e6740>]]
variable[event_type] assign[=] call[name[arg]][constant[0]]
def function[decorator, parameter[func]]:
if compare[call[name[event_type].find, parameter[constant[*]]] not_equal[!=] <ast.UnaryOp object at 0x7da20c6e4e20>] begin[:]
variable[event_type_pattern] assign[=] call[name[pre_compile], parameter[name[event_type]]]
call[name[keystone_customer_process_wildcard]][name[event_type_pattern]] assign[=] name[func]
call[name[log].info, parameter[call[constant[add function {0} to process event_type:{1}].format, parameter[name[func].__name__, name[event_type]]]]]
def function[wrapper, parameter[]]:
call[name[func], parameter[<ast.Starred object at 0x7da20c6e5cc0>]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[keystone] (* identifier[arg] ):
literal[string]
identifier[check_event_type] ( identifier[Openstack] . identifier[Keystone] ,* identifier[arg] )
identifier[event_type] = identifier[arg] [ literal[int] ]
keyword[def] identifier[decorator] ( identifier[func] ):
keyword[if] identifier[event_type] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[event_type_pattern] = identifier[pre_compile] ( identifier[event_type] )
identifier[keystone_customer_process_wildcard] [ identifier[event_type_pattern] ]= identifier[func]
keyword[else] :
identifier[keystone_customer_process] [ identifier[event_type] ]= identifier[func]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[func] . identifier[__name__] , identifier[event_type] ))
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def keystone(*arg):
"""
Swift annotation for adding function to process keystone notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
"""
check_event_type(Openstack.Keystone, *arg)
event_type = arg[0]
def decorator(func):
if event_type.find('*') != -1:
event_type_pattern = pre_compile(event_type)
keystone_customer_process_wildcard[event_type_pattern] = func # depends on [control=['if'], data=[]]
else:
keystone_customer_process[event_type] = func
log.info('add function {0} to process event_type:{1}'.format(func.__name__, event_type))
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator |
def patch_namespaced_custom_object_status(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
"""patch_namespaced_custom_object_status # noqa: E501
partially update status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_custom_object_status(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
return data | def function[patch_namespaced_custom_object_status, parameter[self, group, version, namespace, plural, name, body]]:
constant[patch_namespaced_custom_object_status # noqa: E501
partially update status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_custom_object_status(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].patch_namespaced_custom_object_status_with_http_info, parameter[name[group], name[version], name[namespace], name[plural], name[name], name[body]]]] | keyword[def] identifier[patch_namespaced_custom_object_status] ( identifier[self] , identifier[group] , identifier[version] , identifier[namespace] , identifier[plural] , identifier[name] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[patch_namespaced_custom_object_status_with_http_info] ( identifier[group] , identifier[version] , identifier[namespace] , identifier[plural] , identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[patch_namespaced_custom_object_status_with_http_info] ( identifier[group] , identifier[version] , identifier[namespace] , identifier[plural] , identifier[name] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def patch_namespaced_custom_object_status(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
"patch_namespaced_custom_object_status # noqa: E501\n\n partially update status of the specified namespace scoped custom object # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_namespaced_custom_object_status(group, version, namespace, plural, name, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str group: the custom resource's group (required)\n :param str version: the custom resource's version (required)\n :param str namespace: The custom resource's namespace (required)\n :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)\n :param str name: the custom object's name (required)\n :param UNKNOWN_BASE_TYPE body: (required)\n :return: object\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
return data |
def antipode(lon, lat):
"""
Calculates the antipode (opposite point on the globe) of the given point or
points. Input and output is expected to be in radians.
Parameters
----------
lon : number or sequence of numbers
Longitude in radians
lat : number or sequence of numbers
Latitude in radians
Returns
-------
lon, lat : arrays
Sequences (regardless of whether or not the input was a single value or
a sequence) of longitude and latitude in radians.
"""
x, y, z = sph2cart(lon, lat)
return cart2sph(-x, -y, -z) | def function[antipode, parameter[lon, lat]]:
constant[
Calculates the antipode (opposite point on the globe) of the given point or
points. Input and output is expected to be in radians.
Parameters
----------
lon : number or sequence of numbers
Longitude in radians
lat : number or sequence of numbers
Latitude in radians
Returns
-------
lon, lat : arrays
Sequences (regardless of whether or not the input was a single value or
a sequence) of longitude and latitude in radians.
]
<ast.Tuple object at 0x7da204622c20> assign[=] call[name[sph2cart], parameter[name[lon], name[lat]]]
return[call[name[cart2sph], parameter[<ast.UnaryOp object at 0x7da2046206d0>, <ast.UnaryOp object at 0x7da20e9b0dc0>, <ast.UnaryOp object at 0x7da20e9b14e0>]]] | keyword[def] identifier[antipode] ( identifier[lon] , identifier[lat] ):
literal[string]
identifier[x] , identifier[y] , identifier[z] = identifier[sph2cart] ( identifier[lon] , identifier[lat] )
keyword[return] identifier[cart2sph] (- identifier[x] ,- identifier[y] ,- identifier[z] ) | def antipode(lon, lat):
"""
Calculates the antipode (opposite point on the globe) of the given point or
points. Input and output is expected to be in radians.
Parameters
----------
lon : number or sequence of numbers
Longitude in radians
lat : number or sequence of numbers
Latitude in radians
Returns
-------
lon, lat : arrays
Sequences (regardless of whether or not the input was a single value or
a sequence) of longitude and latitude in radians.
"""
(x, y, z) = sph2cart(lon, lat)
return cart2sph(-x, -y, -z) |
def _parse_meta(self, meta):
"""
Parse the _meta element from a dynamic host inventory output.
"""
for hostname, hostvars in meta.get('hostvars', {}).items():
for var_key, var_val in hostvars.items():
self._get_host(hostname)['hostvars'][var_key] = var_val | def function[_parse_meta, parameter[self, meta]]:
constant[
Parse the _meta element from a dynamic host inventory output.
]
for taget[tuple[[<ast.Name object at 0x7da1b1dfac80>, <ast.Name object at 0x7da1b1dfabf0>]]] in starred[call[call[name[meta].get, parameter[constant[hostvars], dictionary[[], []]]].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1df8c70>, <ast.Name object at 0x7da1b1df8d60>]]] in starred[call[name[hostvars].items, parameter[]]] begin[:]
call[call[call[name[self]._get_host, parameter[name[hostname]]]][constant[hostvars]]][name[var_key]] assign[=] name[var_val] | keyword[def] identifier[_parse_meta] ( identifier[self] , identifier[meta] ):
literal[string]
keyword[for] identifier[hostname] , identifier[hostvars] keyword[in] identifier[meta] . identifier[get] ( literal[string] ,{}). identifier[items] ():
keyword[for] identifier[var_key] , identifier[var_val] keyword[in] identifier[hostvars] . identifier[items] ():
identifier[self] . identifier[_get_host] ( identifier[hostname] )[ literal[string] ][ identifier[var_key] ]= identifier[var_val] | def _parse_meta(self, meta):
"""
Parse the _meta element from a dynamic host inventory output.
"""
for (hostname, hostvars) in meta.get('hostvars', {}).items():
for (var_key, var_val) in hostvars.items():
self._get_host(hostname)['hostvars'][var_key] = var_val # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def create(self, params, args, data):
# type: (str, dict, dict) -> AppModel
"""
POST /resource/model_cls/
data
Create new resource
"""
ctx = self._create_context(params, args, data)
model = self._create_one(ctx)
self._save_one(model, ctx)
return self._return_saved_one(model, ctx) | def function[create, parameter[self, params, args, data]]:
constant[
POST /resource/model_cls/
data
Create new resource
]
variable[ctx] assign[=] call[name[self]._create_context, parameter[name[params], name[args], name[data]]]
variable[model] assign[=] call[name[self]._create_one, parameter[name[ctx]]]
call[name[self]._save_one, parameter[name[model], name[ctx]]]
return[call[name[self]._return_saved_one, parameter[name[model], name[ctx]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[params] , identifier[args] , identifier[data] ):
literal[string]
identifier[ctx] = identifier[self] . identifier[_create_context] ( identifier[params] , identifier[args] , identifier[data] )
identifier[model] = identifier[self] . identifier[_create_one] ( identifier[ctx] )
identifier[self] . identifier[_save_one] ( identifier[model] , identifier[ctx] )
keyword[return] identifier[self] . identifier[_return_saved_one] ( identifier[model] , identifier[ctx] ) | def create(self, params, args, data):
# type: (str, dict, dict) -> AppModel
'\n POST /resource/model_cls/\n data\n\n Create new resource\n '
ctx = self._create_context(params, args, data)
model = self._create_one(ctx)
self._save_one(model, ctx)
return self._return_saved_one(model, ctx) |
def runRemoteCommand(self, cmd, args, abandonOnFailure=True,
evaluateCommand=lambda cmd: cmd.didFail()):
"""generic RemoteCommand boilerplate"""
cmd = remotecommand.RemoteCommand(cmd, args)
if hasattr(self, "rc_log"):
cmd.useLog(self.rc_log, False)
d = self.runCommand(cmd)
def commandComplete(cmd):
if abandonOnFailure and cmd.didFail():
raise buildstep.BuildStepFailed()
return evaluateCommand(cmd)
d.addCallback(lambda res: commandComplete(cmd))
return d | def function[runRemoteCommand, parameter[self, cmd, args, abandonOnFailure, evaluateCommand]]:
constant[generic RemoteCommand boilerplate]
variable[cmd] assign[=] call[name[remotecommand].RemoteCommand, parameter[name[cmd], name[args]]]
if call[name[hasattr], parameter[name[self], constant[rc_log]]] begin[:]
call[name[cmd].useLog, parameter[name[self].rc_log, constant[False]]]
variable[d] assign[=] call[name[self].runCommand, parameter[name[cmd]]]
def function[commandComplete, parameter[cmd]]:
if <ast.BoolOp object at 0x7da1b2099150> begin[:]
<ast.Raise object at 0x7da1b2099c30>
return[call[name[evaluateCommand], parameter[name[cmd]]]]
call[name[d].addCallback, parameter[<ast.Lambda object at 0x7da1b20981f0>]]
return[name[d]] | keyword[def] identifier[runRemoteCommand] ( identifier[self] , identifier[cmd] , identifier[args] , identifier[abandonOnFailure] = keyword[True] ,
identifier[evaluateCommand] = keyword[lambda] identifier[cmd] : identifier[cmd] . identifier[didFail] ()):
literal[string]
identifier[cmd] = identifier[remotecommand] . identifier[RemoteCommand] ( identifier[cmd] , identifier[args] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[cmd] . identifier[useLog] ( identifier[self] . identifier[rc_log] , keyword[False] )
identifier[d] = identifier[self] . identifier[runCommand] ( identifier[cmd] )
keyword[def] identifier[commandComplete] ( identifier[cmd] ):
keyword[if] identifier[abandonOnFailure] keyword[and] identifier[cmd] . identifier[didFail] ():
keyword[raise] identifier[buildstep] . identifier[BuildStepFailed] ()
keyword[return] identifier[evaluateCommand] ( identifier[cmd] )
identifier[d] . identifier[addCallback] ( keyword[lambda] identifier[res] : identifier[commandComplete] ( identifier[cmd] ))
keyword[return] identifier[d] | def runRemoteCommand(self, cmd, args, abandonOnFailure=True, evaluateCommand=lambda cmd: cmd.didFail()):
"""generic RemoteCommand boilerplate"""
cmd = remotecommand.RemoteCommand(cmd, args)
if hasattr(self, 'rc_log'):
cmd.useLog(self.rc_log, False) # depends on [control=['if'], data=[]]
d = self.runCommand(cmd)
def commandComplete(cmd):
if abandonOnFailure and cmd.didFail():
raise buildstep.BuildStepFailed() # depends on [control=['if'], data=[]]
return evaluateCommand(cmd)
d.addCallback(lambda res: commandComplete(cmd))
return d |
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choice in choices:
parser.add_argument('--' + choice, default=1, type=int,
help='Diagnose {}.'.format(choice))
parser.add_argument('--region', default='', type=str,
help="Additional sites in which region(s) to test. \
Specify 'cn' for example to test mirror sites in China.")
parser.add_argument('--timeout', default=10, type=int,
help="Connection test timeout threshold, 0 to disable.")
args = parser.parse_args()
return args | def function[parse_args, parameter[]]:
constant[Parse arguments.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
variable[choices] assign[=] list[[<ast.Constant object at 0x7da20e955a50>, <ast.Constant object at 0x7da20e9547c0>, <ast.Constant object at 0x7da20e955540>, <ast.Constant object at 0x7da20e956530>, <ast.Constant object at 0x7da20e954d00>, <ast.Constant object at 0x7da20e9544f0>]]
for taget[name[choice]] in starred[name[choices]] begin[:]
call[name[parser].add_argument, parameter[binary_operation[constant[--] + name[choice]]]]
call[name[parser].add_argument, parameter[constant[--region]]]
call[name[parser].add_argument, parameter[constant[--timeout]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
return[name[args]] | keyword[def] identifier[parse_args] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[formatter_class] = identifier[argparse] . identifier[ArgumentDefaultsHelpFormatter] ,
identifier[description] = literal[string] )
identifier[choices] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[choice] keyword[in] identifier[choices] :
identifier[parser] . identifier[add_argument] ( literal[string] + identifier[choice] , identifier[default] = literal[int] , identifier[type] = identifier[int] ,
identifier[help] = literal[string] . identifier[format] ( identifier[choice] ))
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = literal[string] , identifier[type] = identifier[str] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = literal[int] , identifier[type] = identifier[int] ,
identifier[help] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
keyword[return] identifier[args] | def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choice in choices:
parser.add_argument('--' + choice, default=1, type=int, help='Diagnose {}.'.format(choice)) # depends on [control=['for'], data=['choice']]
parser.add_argument('--region', default='', type=str, help="Additional sites in which region(s) to test. Specify 'cn' for example to test mirror sites in China.")
parser.add_argument('--timeout', default=10, type=int, help='Connection test timeout threshold, 0 to disable.')
args = parser.parse_args()
return args |
def toVertical(self,R,phi=None):
"""
NAME:
toVertical
PURPOSE:
convert a 3D potential into a linear (vertical) potential at R
INPUT:
R - Galactocentric radius at which to create the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to create the vertical potential (can be Quantity); required for non-axisymmetric potential
OUTPUT:
linear (vertical) potential
HISTORY
unknown
"""
if _APY_LOADED and isinstance(R,units.Quantity):
R= R.to(units.kpc).value/self._ro
if _APY_LOADED and isinstance(phi,units.Quantity):
phi= phi.to(units.rad).value
from galpy.potential import toVerticalPotential
return toVerticalPotential(self,R,phi=phi) | def function[toVertical, parameter[self, R, phi]]:
constant[
NAME:
toVertical
PURPOSE:
convert a 3D potential into a linear (vertical) potential at R
INPUT:
R - Galactocentric radius at which to create the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to create the vertical potential (can be Quantity); required for non-axisymmetric potential
OUTPUT:
linear (vertical) potential
HISTORY
unknown
]
if <ast.BoolOp object at 0x7da1b0c96050> begin[:]
variable[R] assign[=] binary_operation[call[name[R].to, parameter[name[units].kpc]].value / name[self]._ro]
if <ast.BoolOp object at 0x7da1b0c97c10> begin[:]
variable[phi] assign[=] call[name[phi].to, parameter[name[units].rad]].value
from relative_module[galpy.potential] import module[toVerticalPotential]
return[call[name[toVerticalPotential], parameter[name[self], name[R]]]] | keyword[def] identifier[toVertical] ( identifier[self] , identifier[R] , identifier[phi] = keyword[None] ):
literal[string]
keyword[if] identifier[_APY_LOADED] keyword[and] identifier[isinstance] ( identifier[R] , identifier[units] . identifier[Quantity] ):
identifier[R] = identifier[R] . identifier[to] ( identifier[units] . identifier[kpc] ). identifier[value] / identifier[self] . identifier[_ro]
keyword[if] identifier[_APY_LOADED] keyword[and] identifier[isinstance] ( identifier[phi] , identifier[units] . identifier[Quantity] ):
identifier[phi] = identifier[phi] . identifier[to] ( identifier[units] . identifier[rad] ). identifier[value]
keyword[from] identifier[galpy] . identifier[potential] keyword[import] identifier[toVerticalPotential]
keyword[return] identifier[toVerticalPotential] ( identifier[self] , identifier[R] , identifier[phi] = identifier[phi] ) | def toVertical(self, R, phi=None):
"""
NAME:
toVertical
PURPOSE:
convert a 3D potential into a linear (vertical) potential at R
INPUT:
R - Galactocentric radius at which to create the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to create the vertical potential (can be Quantity); required for non-axisymmetric potential
OUTPUT:
linear (vertical) potential
HISTORY
unknown
"""
if _APY_LOADED and isinstance(R, units.Quantity):
R = R.to(units.kpc).value / self._ro # depends on [control=['if'], data=[]]
if _APY_LOADED and isinstance(phi, units.Quantity):
phi = phi.to(units.rad).value # depends on [control=['if'], data=[]]
from galpy.potential import toVerticalPotential
return toVerticalPotential(self, R, phi=phi) |
def split_results(self):
"""
Convenience method to separate failed and successful results.
.. versionadded:: 2.0.0
This function will split the results of the failed operation
(see :attr:`.all_results`) into "good" and "bad" dictionaries.
The intent is for the application to handle any successful
results in a success code path, and handle any failed results
in a "retry" code path. For example
.. code-block:: python
try:
cb.add_multi(docs)
except CouchbaseTransientError as e:
# Temporary failure or server OOM
_, fail = e.split_results()
# Sleep for a bit to reduce the load on the server
time.sleep(0.5)
# Try to add only the failed results again
cb.add_multi(fail)
Of course, in the example above, the second retry may fail as
well, and a more robust implementation is left as an exercise
to the reader.
:return: A tuple of ( `ok`, `bad` ) dictionaries.
"""
ret_ok, ret_fail = {}, {}
count = 0
nokey_prefix = ([""] + sorted(filter(bool, self.all_results.keys())))[-1]
for key, v in self.all_results.items():
if not key:
key = nokey_prefix + ":nokey:" + str(count)
count += 1
success = getattr(v,'success', True)
if success:
ret_ok[key] = v
else:
ret_fail[key] = v
return ret_ok, ret_fail | def function[split_results, parameter[self]]:
constant[
Convenience method to separate failed and successful results.
.. versionadded:: 2.0.0
This function will split the results of the failed operation
(see :attr:`.all_results`) into "good" and "bad" dictionaries.
The intent is for the application to handle any successful
results in a success code path, and handle any failed results
in a "retry" code path. For example
.. code-block:: python
try:
cb.add_multi(docs)
except CouchbaseTransientError as e:
# Temporary failure or server OOM
_, fail = e.split_results()
# Sleep for a bit to reduce the load on the server
time.sleep(0.5)
# Try to add only the failed results again
cb.add_multi(fail)
Of course, in the example above, the second retry may fail as
well, and a more robust implementation is left as an exercise
to the reader.
:return: A tuple of ( `ok`, `bad` ) dictionaries.
]
<ast.Tuple object at 0x7da18f723ee0> assign[=] tuple[[<ast.Dict object at 0x7da18f723370>, <ast.Dict object at 0x7da18f722e90>]]
variable[count] assign[=] constant[0]
variable[nokey_prefix] assign[=] call[binary_operation[list[[<ast.Constant object at 0x7da18f7209a0>]] + call[name[sorted], parameter[call[name[filter], parameter[name[bool], call[name[self].all_results.keys, parameter[]]]]]]]][<ast.UnaryOp object at 0x7da18f722d70>]
for taget[tuple[[<ast.Name object at 0x7da18f7212a0>, <ast.Name object at 0x7da18f723280>]]] in starred[call[name[self].all_results.items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26aee90> begin[:]
variable[key] assign[=] binary_operation[binary_operation[name[nokey_prefix] + constant[:nokey:]] + call[name[str], parameter[name[count]]]]
<ast.AugAssign object at 0x7da18f721030>
variable[success] assign[=] call[name[getattr], parameter[name[v], constant[success], constant[True]]]
if name[success] begin[:]
call[name[ret_ok]][name[key]] assign[=] name[v]
return[tuple[[<ast.Name object at 0x7da18f720370>, <ast.Name object at 0x7da18f721450>]]] | keyword[def] identifier[split_results] ( identifier[self] ):
literal[string]
identifier[ret_ok] , identifier[ret_fail] ={},{}
identifier[count] = literal[int]
identifier[nokey_prefix] =([ literal[string] ]+ identifier[sorted] ( identifier[filter] ( identifier[bool] , identifier[self] . identifier[all_results] . identifier[keys] ())))[- literal[int] ]
keyword[for] identifier[key] , identifier[v] keyword[in] identifier[self] . identifier[all_results] . identifier[items] ():
keyword[if] keyword[not] identifier[key] :
identifier[key] = identifier[nokey_prefix] + literal[string] + identifier[str] ( identifier[count] )
identifier[count] += literal[int]
identifier[success] = identifier[getattr] ( identifier[v] , literal[string] , keyword[True] )
keyword[if] identifier[success] :
identifier[ret_ok] [ identifier[key] ]= identifier[v]
keyword[else] :
identifier[ret_fail] [ identifier[key] ]= identifier[v]
keyword[return] identifier[ret_ok] , identifier[ret_fail] | def split_results(self):
"""
Convenience method to separate failed and successful results.
.. versionadded:: 2.0.0
This function will split the results of the failed operation
(see :attr:`.all_results`) into "good" and "bad" dictionaries.
The intent is for the application to handle any successful
results in a success code path, and handle any failed results
in a "retry" code path. For example
.. code-block:: python
try:
cb.add_multi(docs)
except CouchbaseTransientError as e:
# Temporary failure or server OOM
_, fail = e.split_results()
# Sleep for a bit to reduce the load on the server
time.sleep(0.5)
# Try to add only the failed results again
cb.add_multi(fail)
Of course, in the example above, the second retry may fail as
well, and a more robust implementation is left as an exercise
to the reader.
:return: A tuple of ( `ok`, `bad` ) dictionaries.
"""
(ret_ok, ret_fail) = ({}, {})
count = 0
nokey_prefix = ([''] + sorted(filter(bool, self.all_results.keys())))[-1]
for (key, v) in self.all_results.items():
if not key:
key = nokey_prefix + ':nokey:' + str(count)
count += 1 # depends on [control=['if'], data=[]]
success = getattr(v, 'success', True)
if success:
ret_ok[key] = v # depends on [control=['if'], data=[]]
else:
ret_fail[key] = v # depends on [control=['for'], data=[]]
return (ret_ok, ret_fail) |
def get_crime_category(self, id, date=None):
"""
Get a particular crime category by ID, valid at a particular date. Uses
the crime-categories_ API call.
:rtype: CrimeCategory
:param str id: The ID of the crime category to get.
:param date: The date that the given crime category is valid for (the
latest date is used if ``None``).
:type date: str or None
:return: A crime category with the given ID which is valid for the
specified date (or at the latest date, if ``None``).
"""
try:
return self._get_crime_categories(date=date)[id]
except KeyError:
raise InvalidCategoryException(
'Category %s not found for %s' % (id, date)) | def function[get_crime_category, parameter[self, id, date]]:
constant[
Get a particular crime category by ID, valid at a particular date. Uses
the crime-categories_ API call.
:rtype: CrimeCategory
:param str id: The ID of the crime category to get.
:param date: The date that the given crime category is valid for (the
latest date is used if ``None``).
:type date: str or None
:return: A crime category with the given ID which is valid for the
specified date (or at the latest date, if ``None``).
]
<ast.Try object at 0x7da1b0393520> | keyword[def] identifier[get_crime_category] ( identifier[self] , identifier[id] , identifier[date] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_get_crime_categories] ( identifier[date] = identifier[date] )[ identifier[id] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidCategoryException] (
literal[string] %( identifier[id] , identifier[date] )) | def get_crime_category(self, id, date=None):
"""
Get a particular crime category by ID, valid at a particular date. Uses
the crime-categories_ API call.
:rtype: CrimeCategory
:param str id: The ID of the crime category to get.
:param date: The date that the given crime category is valid for (the
latest date is used if ``None``).
:type date: str or None
:return: A crime category with the given ID which is valid for the
specified date (or at the latest date, if ``None``).
"""
try:
return self._get_crime_categories(date=date)[id] # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidCategoryException('Category %s not found for %s' % (id, date)) # depends on [control=['except'], data=[]] |
def _flatten_action_profile(action_profile, indptr):
"""
Flatten the given action profile.
Parameters
----------
action_profile : array_like(int or array_like(float, ndim=1))
Profile of actions of the N players, where each player i' action
is a pure action (int) or a mixed action (array_like of floats
of length n_i).
indptr : array_like(int, ndim=1)
Array of index pointers of length N+1, where `indptr[0] = 0` and
`indptr[i+1] = indptr[i] + n_i`.
Returns
-------
out : ndarray(float, ndim=1)
Array of flattened mixed action profile of length equal to n_0 +
... + n_N-1, where `out[indptr[i]:indptr[i+1]]` contains player
i's mixed action.
"""
N = len(indptr) - 1
out = np.empty(indptr[-1])
for i in range(N):
if isinstance(action_profile[i], numbers.Integral): # pure action
num_actions = indptr[i+1] - indptr[i]
mixed_action = pure2mixed(num_actions, action_profile[i])
else: # mixed action
mixed_action = action_profile[i]
out[indptr[i]:indptr[i+1]] = mixed_action
return out | def function[_flatten_action_profile, parameter[action_profile, indptr]]:
constant[
Flatten the given action profile.
Parameters
----------
action_profile : array_like(int or array_like(float, ndim=1))
Profile of actions of the N players, where each player i' action
is a pure action (int) or a mixed action (array_like of floats
of length n_i).
indptr : array_like(int, ndim=1)
Array of index pointers of length N+1, where `indptr[0] = 0` and
`indptr[i+1] = indptr[i] + n_i`.
Returns
-------
out : ndarray(float, ndim=1)
Array of flattened mixed action profile of length equal to n_0 +
... + n_N-1, where `out[indptr[i]:indptr[i+1]]` contains player
i's mixed action.
]
variable[N] assign[=] binary_operation[call[name[len], parameter[name[indptr]]] - constant[1]]
variable[out] assign[=] call[name[np].empty, parameter[call[name[indptr]][<ast.UnaryOp object at 0x7da1b1cb18d0>]]]
for taget[name[i]] in starred[call[name[range], parameter[name[N]]]] begin[:]
if call[name[isinstance], parameter[call[name[action_profile]][name[i]], name[numbers].Integral]] begin[:]
variable[num_actions] assign[=] binary_operation[call[name[indptr]][binary_operation[name[i] + constant[1]]] - call[name[indptr]][name[i]]]
variable[mixed_action] assign[=] call[name[pure2mixed], parameter[name[num_actions], call[name[action_profile]][name[i]]]]
call[name[out]][<ast.Slice object at 0x7da204566200>] assign[=] name[mixed_action]
return[name[out]] | keyword[def] identifier[_flatten_action_profile] ( identifier[action_profile] , identifier[indptr] ):
literal[string]
identifier[N] = identifier[len] ( identifier[indptr] )- literal[int]
identifier[out] = identifier[np] . identifier[empty] ( identifier[indptr] [- literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[N] ):
keyword[if] identifier[isinstance] ( identifier[action_profile] [ identifier[i] ], identifier[numbers] . identifier[Integral] ):
identifier[num_actions] = identifier[indptr] [ identifier[i] + literal[int] ]- identifier[indptr] [ identifier[i] ]
identifier[mixed_action] = identifier[pure2mixed] ( identifier[num_actions] , identifier[action_profile] [ identifier[i] ])
keyword[else] :
identifier[mixed_action] = identifier[action_profile] [ identifier[i] ]
identifier[out] [ identifier[indptr] [ identifier[i] ]: identifier[indptr] [ identifier[i] + literal[int] ]]= identifier[mixed_action]
keyword[return] identifier[out] | def _flatten_action_profile(action_profile, indptr):
"""
Flatten the given action profile.
Parameters
----------
action_profile : array_like(int or array_like(float, ndim=1))
Profile of actions of the N players, where each player i' action
is a pure action (int) or a mixed action (array_like of floats
of length n_i).
indptr : array_like(int, ndim=1)
Array of index pointers of length N+1, where `indptr[0] = 0` and
`indptr[i+1] = indptr[i] + n_i`.
Returns
-------
out : ndarray(float, ndim=1)
Array of flattened mixed action profile of length equal to n_0 +
... + n_N-1, where `out[indptr[i]:indptr[i+1]]` contains player
i's mixed action.
"""
N = len(indptr) - 1
out = np.empty(indptr[-1])
for i in range(N):
if isinstance(action_profile[i], numbers.Integral): # pure action
num_actions = indptr[i + 1] - indptr[i]
mixed_action = pure2mixed(num_actions, action_profile[i]) # depends on [control=['if'], data=[]]
else: # mixed action
mixed_action = action_profile[i]
out[indptr[i]:indptr[i + 1]] = mixed_action # depends on [control=['for'], data=['i']]
return out |
def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)"""
if self.num_vertices == 0:
return np.zeros(20, np.ubyte)
else:
return sum(self.vertex_fingerprints) | def function[fingerprint, parameter[self]]:
constant[A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)]
if compare[name[self].num_vertices equal[==] constant[0]] begin[:]
return[call[name[np].zeros, parameter[constant[20], name[np].ubyte]]] | keyword[def] identifier[fingerprint] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[num_vertices] == literal[int] :
keyword[return] identifier[np] . identifier[zeros] ( literal[int] , identifier[np] . identifier[ubyte] )
keyword[else] :
keyword[return] identifier[sum] ( identifier[self] . identifier[vertex_fingerprints] ) | def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)"""
if self.num_vertices == 0:
return np.zeros(20, np.ubyte) # depends on [control=['if'], data=[]]
else:
return sum(self.vertex_fingerprints) |
def convert2(self, imtls, sids):
"""
Convert a probability map into a composite array of shape (N,)
and dtype `imtls.dt`.
:param imtls:
DictArray instance
:param sids:
the IDs of the sites we are interested in
:returns:
an array of curves of shape (N,)
"""
assert self.shape_z == 1, self.shape_z
curves = numpy.zeros(len(sids), imtls.dt)
for imt in curves.dtype.names:
curves_by_imt = curves[imt]
for i, sid in numpy.ndenumerate(sids):
try:
pcurve = self[sid]
except KeyError:
pass # the poes will be zeros
else:
curves_by_imt[i] = pcurve.array[imtls(imt), 0]
return curves | def function[convert2, parameter[self, imtls, sids]]:
constant[
Convert a probability map into a composite array of shape (N,)
and dtype `imtls.dt`.
:param imtls:
DictArray instance
:param sids:
the IDs of the sites we are interested in
:returns:
an array of curves of shape (N,)
]
assert[compare[name[self].shape_z equal[==] constant[1]]]
variable[curves] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[sids]]], name[imtls].dt]]
for taget[name[imt]] in starred[name[curves].dtype.names] begin[:]
variable[curves_by_imt] assign[=] call[name[curves]][name[imt]]
for taget[tuple[[<ast.Name object at 0x7da204621b40>, <ast.Name object at 0x7da18dc062c0>]]] in starred[call[name[numpy].ndenumerate, parameter[name[sids]]]] begin[:]
<ast.Try object at 0x7da18dc069b0>
return[name[curves]] | keyword[def] identifier[convert2] ( identifier[self] , identifier[imtls] , identifier[sids] ):
literal[string]
keyword[assert] identifier[self] . identifier[shape_z] == literal[int] , identifier[self] . identifier[shape_z]
identifier[curves] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[sids] ), identifier[imtls] . identifier[dt] )
keyword[for] identifier[imt] keyword[in] identifier[curves] . identifier[dtype] . identifier[names] :
identifier[curves_by_imt] = identifier[curves] [ identifier[imt] ]
keyword[for] identifier[i] , identifier[sid] keyword[in] identifier[numpy] . identifier[ndenumerate] ( identifier[sids] ):
keyword[try] :
identifier[pcurve] = identifier[self] [ identifier[sid] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[else] :
identifier[curves_by_imt] [ identifier[i] ]= identifier[pcurve] . identifier[array] [ identifier[imtls] ( identifier[imt] ), literal[int] ]
keyword[return] identifier[curves] | def convert2(self, imtls, sids):
"""
Convert a probability map into a composite array of shape (N,)
and dtype `imtls.dt`.
:param imtls:
DictArray instance
:param sids:
the IDs of the sites we are interested in
:returns:
an array of curves of shape (N,)
"""
assert self.shape_z == 1, self.shape_z
curves = numpy.zeros(len(sids), imtls.dt)
for imt in curves.dtype.names:
curves_by_imt = curves[imt]
for (i, sid) in numpy.ndenumerate(sids):
try:
pcurve = self[sid] # depends on [control=['try'], data=[]]
except KeyError:
pass # the poes will be zeros # depends on [control=['except'], data=[]]
else:
curves_by_imt[i] = pcurve.array[imtls(imt), 0] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['imt']]
return curves |
def Get_RpRs(d, **kwargs):
'''
Returns the value of the planet radius over the stellar radius
for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
if ps is None:
raise Exception("Unable to import `pysyzygy`.")
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs=RpRs, **kwargs)([kwargs.get('t0', 0.)])
def DiffSq(r):
return 1.e10 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp=False) | def function[Get_RpRs, parameter[d]]:
constant[
Returns the value of the planet radius over the stellar radius
for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
]
if compare[name[ps] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0e96b00>
def function[Depth, parameter[RpRs]]:
return[binary_operation[constant[1] - call[call[name[ps].Transit, parameter[]], parameter[list[[<ast.Call object at 0x7da1b0e97220>]]]]]]
def function[DiffSq, parameter[r]]:
return[binary_operation[constant[10000000000.0] * binary_operation[binary_operation[name[d] - call[name[Depth], parameter[name[r]]]] ** constant[2]]]]
return[call[name[fmin], parameter[name[DiffSq], list[[<ast.Call object at 0x7da1b0e96800>]]]]] | keyword[def] identifier[Get_RpRs] ( identifier[d] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[ps] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[def] identifier[Depth] ( identifier[RpRs] ,** identifier[kwargs] ):
keyword[return] literal[int] - identifier[ps] . identifier[Transit] ( identifier[RpRs] = identifier[RpRs] ,** identifier[kwargs] )([ identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )])
keyword[def] identifier[DiffSq] ( identifier[r] ):
keyword[return] literal[int] *( identifier[d] - identifier[Depth] ( identifier[r] ,** identifier[kwargs] ))** literal[int]
keyword[return] identifier[fmin] ( identifier[DiffSq] ,[ identifier[np] . identifier[sqrt] ( identifier[d] )], identifier[disp] = keyword[False] ) | def Get_RpRs(d, **kwargs):
"""
Returns the value of the planet radius over the stellar radius
for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
"""
if ps is None:
raise Exception('Unable to import `pysyzygy`.') # depends on [control=['if'], data=[]]
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs=RpRs, **kwargs)([kwargs.get('t0', 0.0)])
def DiffSq(r):
return 10000000000.0 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp=False) |
def mount_share(share_path):
"""Mounts a share at /Volumes
Args:
share_path: String URL with all auth info to connect to file share.
Returns:
The mount point or raises an error.
"""
sh_url = CFURLCreateWithString(None, share_path, None)
# Set UI to reduced interaction
open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}
# Allow mounting sub-directories of root shares
mount_options = {NetFS.kNetFSAllowSubMountsKey: True}
# Build our connected pointers for our results
result, output = NetFS.NetFSMountURLSync(sh_url, None, None, None,
open_options, mount_options, None)
# Check if it worked
if result != 0:
raise Exception('Error mounting url "%s": %s' % (share_path, output))
# Return the mountpath
return str(output[0]) | def function[mount_share, parameter[share_path]]:
constant[Mounts a share at /Volumes
Args:
share_path: String URL with all auth info to connect to file share.
Returns:
The mount point or raises an error.
]
variable[sh_url] assign[=] call[name[CFURLCreateWithString], parameter[constant[None], name[share_path], constant[None]]]
variable[open_options] assign[=] dictionary[[<ast.Attribute object at 0x7da1b2346740>], [<ast.Attribute object at 0x7da1b23449d0>]]
variable[mount_options] assign[=] dictionary[[<ast.Attribute object at 0x7da1b2344100>], [<ast.Constant object at 0x7da1b2345c00>]]
<ast.Tuple object at 0x7da1b2345ea0> assign[=] call[name[NetFS].NetFSMountURLSync, parameter[name[sh_url], constant[None], constant[None], constant[None], name[open_options], name[mount_options], constant[None]]]
if compare[name[result] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b2344310>
return[call[name[str], parameter[call[name[output]][constant[0]]]]] | keyword[def] identifier[mount_share] ( identifier[share_path] ):
literal[string]
identifier[sh_url] = identifier[CFURLCreateWithString] ( keyword[None] , identifier[share_path] , keyword[None] )
identifier[open_options] ={ identifier[NetFS] . identifier[kNAUIOptionKey] : identifier[NetFS] . identifier[kNAUIOptionNoUI] }
identifier[mount_options] ={ identifier[NetFS] . identifier[kNetFSAllowSubMountsKey] : keyword[True] }
identifier[result] , identifier[output] = identifier[NetFS] . identifier[NetFSMountURLSync] ( identifier[sh_url] , keyword[None] , keyword[None] , keyword[None] ,
identifier[open_options] , identifier[mount_options] , keyword[None] )
keyword[if] identifier[result] != literal[int] :
keyword[raise] identifier[Exception] ( literal[string] %( identifier[share_path] , identifier[output] ))
keyword[return] identifier[str] ( identifier[output] [ literal[int] ]) | def mount_share(share_path):
"""Mounts a share at /Volumes
Args:
share_path: String URL with all auth info to connect to file share.
Returns:
The mount point or raises an error.
"""
sh_url = CFURLCreateWithString(None, share_path, None)
# Set UI to reduced interaction
open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}
# Allow mounting sub-directories of root shares
mount_options = {NetFS.kNetFSAllowSubMountsKey: True}
# Build our connected pointers for our results
(result, output) = NetFS.NetFSMountURLSync(sh_url, None, None, None, open_options, mount_options, None)
# Check if it worked
if result != 0:
raise Exception('Error mounting url "%s": %s' % (share_path, output)) # depends on [control=['if'], data=[]]
# Return the mountpath
return str(output[0]) |
def merge_dicts(src, patch):
"""Merge contents of dict `patch` into `src`."""
for key in patch:
if key in src:
if isinstance(src[key], dict) and isinstance(patch[key], dict):
merge_dicts(src[key], patch[key])
else:
src[key] = merge_values(src[key], patch[key])
else:
src[key] = patch[key]
return src | def function[merge_dicts, parameter[src, patch]]:
constant[Merge contents of dict `patch` into `src`.]
for taget[name[key]] in starred[name[patch]] begin[:]
if compare[name[key] in name[src]] begin[:]
if <ast.BoolOp object at 0x7da1b06c8e50> begin[:]
call[name[merge_dicts], parameter[call[name[src]][name[key]], call[name[patch]][name[key]]]]
return[name[src]] | keyword[def] identifier[merge_dicts] ( identifier[src] , identifier[patch] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[patch] :
keyword[if] identifier[key] keyword[in] identifier[src] :
keyword[if] identifier[isinstance] ( identifier[src] [ identifier[key] ], identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[patch] [ identifier[key] ], identifier[dict] ):
identifier[merge_dicts] ( identifier[src] [ identifier[key] ], identifier[patch] [ identifier[key] ])
keyword[else] :
identifier[src] [ identifier[key] ]= identifier[merge_values] ( identifier[src] [ identifier[key] ], identifier[patch] [ identifier[key] ])
keyword[else] :
identifier[src] [ identifier[key] ]= identifier[patch] [ identifier[key] ]
keyword[return] identifier[src] | def merge_dicts(src, patch):
"""Merge contents of dict `patch` into `src`."""
for key in patch:
if key in src:
if isinstance(src[key], dict) and isinstance(patch[key], dict):
merge_dicts(src[key], patch[key]) # depends on [control=['if'], data=[]]
else:
src[key] = merge_values(src[key], patch[key]) # depends on [control=['if'], data=['key', 'src']]
else:
src[key] = patch[key] # depends on [control=['for'], data=['key']]
return src |
def _filter_or_exclude(self, negate, *args, **kwargs):
"""
Overrides default behavior to handle linguist fields.
"""
from .models import Translation
new_args = self.get_cleaned_args(args)
new_kwargs = self.get_cleaned_kwargs(kwargs)
translation_args = self.get_translation_args(args)
translation_kwargs = self.get_translation_kwargs(kwargs)
has_linguist_args = self.has_linguist_args(args)
has_linguist_kwargs = self.has_linguist_kwargs(kwargs)
if translation_args or translation_kwargs:
ids = list(
set(
Translation.objects.filter(
*translation_args, **translation_kwargs
).values_list("object_id", flat=True)
)
)
if ids:
new_kwargs["id__in"] = ids
has_kwargs = has_linguist_kwargs and not (new_kwargs or new_args)
has_args = has_linguist_args and not (new_args or new_kwargs)
# No translations but we looked for translations?
# Returns empty queryset.
if has_kwargs or has_args:
return self._clone().none()
return super(QuerySetMixin, self)._filter_or_exclude(
negate, *new_args, **new_kwargs
) | def function[_filter_or_exclude, parameter[self, negate]]:
constant[
Overrides default behavior to handle linguist fields.
]
from relative_module[models] import module[Translation]
variable[new_args] assign[=] call[name[self].get_cleaned_args, parameter[name[args]]]
variable[new_kwargs] assign[=] call[name[self].get_cleaned_kwargs, parameter[name[kwargs]]]
variable[translation_args] assign[=] call[name[self].get_translation_args, parameter[name[args]]]
variable[translation_kwargs] assign[=] call[name[self].get_translation_kwargs, parameter[name[kwargs]]]
variable[has_linguist_args] assign[=] call[name[self].has_linguist_args, parameter[name[args]]]
variable[has_linguist_kwargs] assign[=] call[name[self].has_linguist_kwargs, parameter[name[kwargs]]]
if <ast.BoolOp object at 0x7da1b2847a90> begin[:]
variable[ids] assign[=] call[name[list], parameter[call[name[set], parameter[call[call[name[Translation].objects.filter, parameter[<ast.Starred object at 0x7da1b28443a0>]].values_list, parameter[constant[object_id]]]]]]]
if name[ids] begin[:]
call[name[new_kwargs]][constant[id__in]] assign[=] name[ids]
variable[has_kwargs] assign[=] <ast.BoolOp object at 0x7da1b28441f0>
variable[has_args] assign[=] <ast.BoolOp object at 0x7da1b272f550>
if <ast.BoolOp object at 0x7da1b272f2b0> begin[:]
return[call[call[name[self]._clone, parameter[]].none, parameter[]]]
return[call[call[name[super], parameter[name[QuerySetMixin], name[self]]]._filter_or_exclude, parameter[name[negate], <ast.Starred object at 0x7da1b272f9a0>]]] | keyword[def] identifier[_filter_or_exclude] ( identifier[self] , identifier[negate] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[Translation]
identifier[new_args] = identifier[self] . identifier[get_cleaned_args] ( identifier[args] )
identifier[new_kwargs] = identifier[self] . identifier[get_cleaned_kwargs] ( identifier[kwargs] )
identifier[translation_args] = identifier[self] . identifier[get_translation_args] ( identifier[args] )
identifier[translation_kwargs] = identifier[self] . identifier[get_translation_kwargs] ( identifier[kwargs] )
identifier[has_linguist_args] = identifier[self] . identifier[has_linguist_args] ( identifier[args] )
identifier[has_linguist_kwargs] = identifier[self] . identifier[has_linguist_kwargs] ( identifier[kwargs] )
keyword[if] identifier[translation_args] keyword[or] identifier[translation_kwargs] :
identifier[ids] = identifier[list] (
identifier[set] (
identifier[Translation] . identifier[objects] . identifier[filter] (
* identifier[translation_args] ,** identifier[translation_kwargs]
). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] )
)
)
keyword[if] identifier[ids] :
identifier[new_kwargs] [ literal[string] ]= identifier[ids]
identifier[has_kwargs] = identifier[has_linguist_kwargs] keyword[and] keyword[not] ( identifier[new_kwargs] keyword[or] identifier[new_args] )
identifier[has_args] = identifier[has_linguist_args] keyword[and] keyword[not] ( identifier[new_args] keyword[or] identifier[new_kwargs] )
keyword[if] identifier[has_kwargs] keyword[or] identifier[has_args] :
keyword[return] identifier[self] . identifier[_clone] (). identifier[none] ()
keyword[return] identifier[super] ( identifier[QuerySetMixin] , identifier[self] ). identifier[_filter_or_exclude] (
identifier[negate] ,* identifier[new_args] ,** identifier[new_kwargs]
) | def _filter_or_exclude(self, negate, *args, **kwargs):
"""
Overrides default behavior to handle linguist fields.
"""
from .models import Translation
new_args = self.get_cleaned_args(args)
new_kwargs = self.get_cleaned_kwargs(kwargs)
translation_args = self.get_translation_args(args)
translation_kwargs = self.get_translation_kwargs(kwargs)
has_linguist_args = self.has_linguist_args(args)
has_linguist_kwargs = self.has_linguist_kwargs(kwargs)
if translation_args or translation_kwargs:
ids = list(set(Translation.objects.filter(*translation_args, **translation_kwargs).values_list('object_id', flat=True)))
if ids:
new_kwargs['id__in'] = ids # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
has_kwargs = has_linguist_kwargs and (not (new_kwargs or new_args))
has_args = has_linguist_args and (not (new_args or new_kwargs))
# No translations but we looked for translations?
# Returns empty queryset.
if has_kwargs or has_args:
return self._clone().none() # depends on [control=['if'], data=[]]
return super(QuerySetMixin, self)._filter_or_exclude(negate, *new_args, **new_kwargs) |
def create_or_replace_primary_key(self,
table: str,
fieldnames: Sequence[str]) -> int:
"""Make a primary key, or replace it if it exists."""
# *** create_or_replace_primary_key: Uses code specific to MySQL
sql = """
SELECT COUNT(*)
FROM information_schema.table_constraints
WHERE table_name=?
AND table_schema={}
AND constraint_name='PRIMARY'
""".format(self.get_current_schema_expr())
# http://forums.mysql.com/read.php?10,114742,114748#msg-114748
row = self.fetchone(sql, table)
has_pk_already = True if row[0] >= 1 else False
drop_pk_if_exists = " DROP PRIMARY KEY," if has_pk_already else ""
fieldlist = ",".join([self.delimit(f) for f in fieldnames])
sql = ("ALTER TABLE " + self.delimit(table) +
drop_pk_if_exists +
" ADD PRIMARY KEY(" + fieldlist + ")")
# http://stackoverflow.com/questions/8859353
return self.db_exec(sql) | def function[create_or_replace_primary_key, parameter[self, table, fieldnames]]:
constant[Make a primary key, or replace it if it exists.]
variable[sql] assign[=] call[constant[
SELECT COUNT(*)
FROM information_schema.table_constraints
WHERE table_name=?
AND table_schema={}
AND constraint_name='PRIMARY'
].format, parameter[call[name[self].get_current_schema_expr, parameter[]]]]
variable[row] assign[=] call[name[self].fetchone, parameter[name[sql], name[table]]]
variable[has_pk_already] assign[=] <ast.IfExp object at 0x7da1b184b8e0>
variable[drop_pk_if_exists] assign[=] <ast.IfExp object at 0x7da1b184b5b0>
variable[fieldlist] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b184b100>]]
variable[sql] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[ALTER TABLE ] + call[name[self].delimit, parameter[name[table]]]] + name[drop_pk_if_exists]] + constant[ ADD PRIMARY KEY(]] + name[fieldlist]] + constant[)]]
return[call[name[self].db_exec, parameter[name[sql]]]] | keyword[def] identifier[create_or_replace_primary_key] ( identifier[self] ,
identifier[table] : identifier[str] ,
identifier[fieldnames] : identifier[Sequence] [ identifier[str] ])-> identifier[int] :
literal[string]
identifier[sql] = literal[string] . identifier[format] ( identifier[self] . identifier[get_current_schema_expr] ())
identifier[row] = identifier[self] . identifier[fetchone] ( identifier[sql] , identifier[table] )
identifier[has_pk_already] = keyword[True] keyword[if] identifier[row] [ literal[int] ]>= literal[int] keyword[else] keyword[False]
identifier[drop_pk_if_exists] = literal[string] keyword[if] identifier[has_pk_already] keyword[else] literal[string]
identifier[fieldlist] = literal[string] . identifier[join] ([ identifier[self] . identifier[delimit] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[fieldnames] ])
identifier[sql] =( literal[string] + identifier[self] . identifier[delimit] ( identifier[table] )+
identifier[drop_pk_if_exists] +
literal[string] + identifier[fieldlist] + literal[string] )
keyword[return] identifier[self] . identifier[db_exec] ( identifier[sql] ) | def create_or_replace_primary_key(self, table: str, fieldnames: Sequence[str]) -> int:
"""Make a primary key, or replace it if it exists."""
# *** create_or_replace_primary_key: Uses code specific to MySQL
sql = "\n SELECT COUNT(*)\n FROM information_schema.table_constraints\n WHERE table_name=?\n AND table_schema={}\n AND constraint_name='PRIMARY'\n ".format(self.get_current_schema_expr())
# http://forums.mysql.com/read.php?10,114742,114748#msg-114748
row = self.fetchone(sql, table)
has_pk_already = True if row[0] >= 1 else False
drop_pk_if_exists = ' DROP PRIMARY KEY,' if has_pk_already else ''
fieldlist = ','.join([self.delimit(f) for f in fieldnames])
sql = 'ALTER TABLE ' + self.delimit(table) + drop_pk_if_exists + ' ADD PRIMARY KEY(' + fieldlist + ')'
# http://stackoverflow.com/questions/8859353
return self.db_exec(sql) |
def send_activation_email(self, user):
"""
Send the activation email. The activation key is the username,
signed using TimestampSigner.
"""
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context['user'] = user
subject = render_to_string(
template_name=self.email_subject_template,
context=context,
request=self.request
)
# Force subject to a single line to avoid header-injection
# issues.
subject = ''.join(subject.splitlines())
message = render_to_string(
template_name=self.email_body_template,
context=context,
request=self.request
)
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) | def function[send_activation_email, parameter[self, user]]:
constant[
Send the activation email. The activation key is the username,
signed using TimestampSigner.
]
variable[activation_key] assign[=] call[name[self].get_activation_key, parameter[name[user]]]
variable[context] assign[=] call[name[self].get_email_context, parameter[name[activation_key]]]
call[name[context]][constant[user]] assign[=] name[user]
variable[subject] assign[=] call[name[render_to_string], parameter[]]
variable[subject] assign[=] call[constant[].join, parameter[call[name[subject].splitlines, parameter[]]]]
variable[message] assign[=] call[name[render_to_string], parameter[]]
call[name[user].email_user, parameter[name[subject], name[message], name[settings].DEFAULT_FROM_EMAIL]] | keyword[def] identifier[send_activation_email] ( identifier[self] , identifier[user] ):
literal[string]
identifier[activation_key] = identifier[self] . identifier[get_activation_key] ( identifier[user] )
identifier[context] = identifier[self] . identifier[get_email_context] ( identifier[activation_key] )
identifier[context] [ literal[string] ]= identifier[user]
identifier[subject] = identifier[render_to_string] (
identifier[template_name] = identifier[self] . identifier[email_subject_template] ,
identifier[context] = identifier[context] ,
identifier[request] = identifier[self] . identifier[request]
)
identifier[subject] = literal[string] . identifier[join] ( identifier[subject] . identifier[splitlines] ())
identifier[message] = identifier[render_to_string] (
identifier[template_name] = identifier[self] . identifier[email_body_template] ,
identifier[context] = identifier[context] ,
identifier[request] = identifier[self] . identifier[request]
)
identifier[user] . identifier[email_user] ( identifier[subject] , identifier[message] , identifier[settings] . identifier[DEFAULT_FROM_EMAIL] ) | def send_activation_email(self, user):
"""
Send the activation email. The activation key is the username,
signed using TimestampSigner.
"""
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context['user'] = user
subject = render_to_string(template_name=self.email_subject_template, context=context, request=self.request)
# Force subject to a single line to avoid header-injection
# issues.
subject = ''.join(subject.splitlines())
message = render_to_string(template_name=self.email_body_template, context=context, request=self.request)
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) |
def extrude_triangulation(vertices,
faces,
height,
**kwargs):
"""
Turn a 2D triangulation into a watertight Trimesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs:
passed to Trimesh
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
"""
vertices = np.asanyarray(vertices, dtype=np.float64)
height = float(height)
faces = np.asanyarray(faces, dtype=np.int64)
if not util.is_shape(vertices, (-1, 2)):
raise ValueError('Vertices must be (n,2)')
if not util.is_shape(faces, (-1, 3)):
raise ValueError('Faces must be (n,3)')
if np.abs(height) < tol.merge:
raise ValueError('Height must be nonzero!')
# make sure triangulation winding is pointing up
normal_test = normals(
[util.stack_3D(vertices[faces[0]])])[0]
normal_dot = np.dot(normal_test,
[0.0, 0.0, np.sign(height)])[0]
# make sure the triangulation is aligned with the sign of
# the height we've been passed
if normal_dot < 0.0:
faces = np.fliplr(faces)
# stack the (n,3) faces into (3*n, 2) edges
edges = faces_to_edges(faces)
edges_sorted = np.sort(edges, axis=1)
# edges which only occur once are on the boundary of the polygon
# since the triangulation may have subdivided the boundary of the
# shapely polygon, we need to find it again
edges_unique = grouping.group_rows(edges_sorted, require_count=1)
# (n, 2, 2) set of line segments (positions, not references)
boundary = vertices[edges[edges_unique]]
# we are creating two vertical triangles for every 2D line segment
# on the boundary of the 2D triangulation
vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
vertical = np.column_stack((vertical,
np.tile([0, height, 0, height],
len(boundary))))
vertical_faces = np.tile([3, 1, 2, 2, 1, 0],
(len(boundary), 1))
vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
vertical_faces = vertical_faces.reshape((-1, 3))
# stack the (n,2) vertices with zeros to make them (n, 3)
vertices_3D = util.stack_3D(vertices)
# a sequence of zero- indexed faces, which will then be appended
# with offsets to create the final mesh
faces_seq = [faces[:, ::-1],
faces.copy(),
vertical_faces]
vertices_seq = [vertices_3D,
vertices_3D.copy() + [0.0, 0, height],
vertical]
mesh = Trimesh(*util.append_faces(vertices_seq,
faces_seq),
process=True,
**kwargs)
assert mesh.volume > 0.0
return mesh | def function[extrude_triangulation, parameter[vertices, faces, height]]:
constant[
Turn a 2D triangulation into a watertight Trimesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs:
passed to Trimesh
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
]
variable[vertices] assign[=] call[name[np].asanyarray, parameter[name[vertices]]]
variable[height] assign[=] call[name[float], parameter[name[height]]]
variable[faces] assign[=] call[name[np].asanyarray, parameter[name[faces]]]
if <ast.UnaryOp object at 0x7da20c9911b0> begin[:]
<ast.Raise object at 0x7da20c991de0>
if <ast.UnaryOp object at 0x7da20c993460> begin[:]
<ast.Raise object at 0x7da20c993190>
if compare[call[name[np].abs, parameter[name[height]]] less[<] name[tol].merge] begin[:]
<ast.Raise object at 0x7da20c9927d0>
variable[normal_test] assign[=] call[call[name[normals], parameter[list[[<ast.Call object at 0x7da20c993b20>]]]]][constant[0]]
variable[normal_dot] assign[=] call[call[name[np].dot, parameter[name[normal_test], list[[<ast.Constant object at 0x7da20c993790>, <ast.Constant object at 0x7da20c9912d0>, <ast.Call object at 0x7da20c990190>]]]]][constant[0]]
if compare[name[normal_dot] less[<] constant[0.0]] begin[:]
variable[faces] assign[=] call[name[np].fliplr, parameter[name[faces]]]
variable[edges] assign[=] call[name[faces_to_edges], parameter[name[faces]]]
variable[edges_sorted] assign[=] call[name[np].sort, parameter[name[edges]]]
variable[edges_unique] assign[=] call[name[grouping].group_rows, parameter[name[edges_sorted]]]
variable[boundary] assign[=] call[name[vertices]][call[name[edges]][name[edges_unique]]]
variable[vertical] assign[=] call[call[name[np].tile, parameter[call[name[boundary].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da20c7ca620>, <ast.Constant object at 0x7da20c7ca1d0>]]]], constant[2]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da20c7cb7f0>, <ast.Constant object at 0x7da20c7c8520>]]]]
variable[vertical] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Name object at 0x7da20c7cac20>, <ast.Call object at 0x7da20c7c9a80>]]]]
variable[vertical_faces] assign[=] call[name[np].tile, parameter[list[[<ast.Constant object at 0x7da20c7c9f60>, <ast.Constant object at 0x7da20c7cb1f0>, <ast.Constant object at 0x7da20c7cb880>, <ast.Constant object at 0x7da20c7c9c60>, <ast.Constant object at 0x7da20c7ca410>, <ast.Constant object at 0x7da20c7c8b20>]], tuple[[<ast.Call object at 0x7da20c7c8580>, <ast.Constant object at 0x7da20c7c8310>]]]]
<ast.AugAssign object at 0x7da20c7c91e0>
variable[vertical_faces] assign[=] call[name[vertical_faces].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da20c7c9fc0>, <ast.Constant object at 0x7da20c7caf80>]]]]
variable[vertices_3D] assign[=] call[name[util].stack_3D, parameter[name[vertices]]]
variable[faces_seq] assign[=] list[[<ast.Subscript object at 0x7da20c7c98d0>, <ast.Call object at 0x7da20c7cbc70>, <ast.Name object at 0x7da20c7cb010>]]
variable[vertices_seq] assign[=] list[[<ast.Name object at 0x7da20c7cada0>, <ast.BinOp object at 0x7da20c7ca9b0>, <ast.Name object at 0x7da20c7ca800>]]
variable[mesh] assign[=] call[name[Trimesh], parameter[<ast.Starred object at 0x7da20c7cb670>]]
assert[compare[name[mesh].volume greater[>] constant[0.0]]]
return[name[mesh]] | keyword[def] identifier[extrude_triangulation] ( identifier[vertices] ,
identifier[faces] ,
identifier[height] ,
** identifier[kwargs] ):
literal[string]
identifier[vertices] = identifier[np] . identifier[asanyarray] ( identifier[vertices] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[height] = identifier[float] ( identifier[height] )
identifier[faces] = identifier[np] . identifier[asanyarray] ( identifier[faces] , identifier[dtype] = identifier[np] . identifier[int64] )
keyword[if] keyword[not] identifier[util] . identifier[is_shape] ( identifier[vertices] ,(- literal[int] , literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[util] . identifier[is_shape] ( identifier[faces] ,(- literal[int] , literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[np] . identifier[abs] ( identifier[height] )< identifier[tol] . identifier[merge] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[normal_test] = identifier[normals] (
[ identifier[util] . identifier[stack_3D] ( identifier[vertices] [ identifier[faces] [ literal[int] ]])])[ literal[int] ]
identifier[normal_dot] = identifier[np] . identifier[dot] ( identifier[normal_test] ,
[ literal[int] , literal[int] , identifier[np] . identifier[sign] ( identifier[height] )])[ literal[int] ]
keyword[if] identifier[normal_dot] < literal[int] :
identifier[faces] = identifier[np] . identifier[fliplr] ( identifier[faces] )
identifier[edges] = identifier[faces_to_edges] ( identifier[faces] )
identifier[edges_sorted] = identifier[np] . identifier[sort] ( identifier[edges] , identifier[axis] = literal[int] )
identifier[edges_unique] = identifier[grouping] . identifier[group_rows] ( identifier[edges_sorted] , identifier[require_count] = literal[int] )
identifier[boundary] = identifier[vertices] [ identifier[edges] [ identifier[edges_unique] ]]
identifier[vertical] = identifier[np] . identifier[tile] ( identifier[boundary] . identifier[reshape] ((- literal[int] , literal[int] )), literal[int] ). identifier[reshape] ((- literal[int] , literal[int] ))
identifier[vertical] = identifier[np] . identifier[column_stack] (( identifier[vertical] ,
identifier[np] . identifier[tile] ([ literal[int] , identifier[height] , literal[int] , identifier[height] ],
identifier[len] ( identifier[boundary] ))))
identifier[vertical_faces] = identifier[np] . identifier[tile] ([ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],
( identifier[len] ( identifier[boundary] ), literal[int] ))
identifier[vertical_faces] += identifier[np] . identifier[arange] ( identifier[len] ( identifier[boundary] )). identifier[reshape] ((- literal[int] , literal[int] ))* literal[int]
identifier[vertical_faces] = identifier[vertical_faces] . identifier[reshape] ((- literal[int] , literal[int] ))
identifier[vertices_3D] = identifier[util] . identifier[stack_3D] ( identifier[vertices] )
identifier[faces_seq] =[ identifier[faces] [:,::- literal[int] ],
identifier[faces] . identifier[copy] (),
identifier[vertical_faces] ]
identifier[vertices_seq] =[ identifier[vertices_3D] ,
identifier[vertices_3D] . identifier[copy] ()+[ literal[int] , literal[int] , identifier[height] ],
identifier[vertical] ]
identifier[mesh] = identifier[Trimesh] (* identifier[util] . identifier[append_faces] ( identifier[vertices_seq] ,
identifier[faces_seq] ),
identifier[process] = keyword[True] ,
** identifier[kwargs] )
keyword[assert] identifier[mesh] . identifier[volume] > literal[int]
keyword[return] identifier[mesh] | def extrude_triangulation(vertices, faces, height, **kwargs):
"""
Turn a 2D triangulation into a watertight Trimesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs:
passed to Trimesh
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
"""
vertices = np.asanyarray(vertices, dtype=np.float64)
height = float(height)
faces = np.asanyarray(faces, dtype=np.int64)
if not util.is_shape(vertices, (-1, 2)):
raise ValueError('Vertices must be (n,2)') # depends on [control=['if'], data=[]]
if not util.is_shape(faces, (-1, 3)):
raise ValueError('Faces must be (n,3)') # depends on [control=['if'], data=[]]
if np.abs(height) < tol.merge:
raise ValueError('Height must be nonzero!') # depends on [control=['if'], data=[]]
# make sure triangulation winding is pointing up
normal_test = normals([util.stack_3D(vertices[faces[0]])])[0]
normal_dot = np.dot(normal_test, [0.0, 0.0, np.sign(height)])[0]
# make sure the triangulation is aligned with the sign of
# the height we've been passed
if normal_dot < 0.0:
faces = np.fliplr(faces) # depends on [control=['if'], data=[]]
# stack the (n,3) faces into (3*n, 2) edges
edges = faces_to_edges(faces)
edges_sorted = np.sort(edges, axis=1)
# edges which only occur once are on the boundary of the polygon
# since the triangulation may have subdivided the boundary of the
# shapely polygon, we need to find it again
edges_unique = grouping.group_rows(edges_sorted, require_count=1)
# (n, 2, 2) set of line segments (positions, not references)
boundary = vertices[edges[edges_unique]]
# we are creating two vertical triangles for every 2D line segment
# on the boundary of the 2D triangulation
vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
vertical = np.column_stack((vertical, np.tile([0, height, 0, height], len(boundary))))
vertical_faces = np.tile([3, 1, 2, 2, 1, 0], (len(boundary), 1))
vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
vertical_faces = vertical_faces.reshape((-1, 3))
# stack the (n,2) vertices with zeros to make them (n, 3)
vertices_3D = util.stack_3D(vertices)
# a sequence of zero- indexed faces, which will then be appended
# with offsets to create the final mesh
faces_seq = [faces[:, ::-1], faces.copy(), vertical_faces]
vertices_seq = [vertices_3D, vertices_3D.copy() + [0.0, 0, height], vertical]
mesh = Trimesh(*util.append_faces(vertices_seq, faces_seq), process=True, **kwargs)
assert mesh.volume > 0.0
return mesh |
def get_permissions_for_role(role, brain_or_object):
"""Return the permissions of the role which are granted on the object
Code extracted from `IRoleManager.permissionsOfRole`
:param role: The role to check the permission
:param brain_or_object: Catalog brain or object
:returns: List of permissions of the role
"""
obj = api.get_object(brain_or_object)
# Raise an error if the role is invalid
valid_roles = get_valid_roles_for(obj)
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role))
out = []
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
if role in permission.getRoles():
out.append(name)
return out | def function[get_permissions_for_role, parameter[role, brain_or_object]]:
constant[Return the permissions of the role which are granted on the object
Code extracted from `IRoleManager.permissionsOfRole`
:param role: The role to check the permission
:param brain_or_object: Catalog brain or object
:returns: List of permissions of the role
]
variable[obj] assign[=] call[name[api].get_object, parameter[name[brain_or_object]]]
variable[valid_roles] assign[=] call[name[get_valid_roles_for], parameter[name[obj]]]
if compare[name[role] <ast.NotIn object at 0x7da2590d7190> name[valid_roles]] begin[:]
<ast.Raise object at 0x7da1b1d652a0>
variable[out] assign[=] list[[]]
for taget[name[item]] in starred[call[name[obj].ac_inherited_permissions, parameter[constant[1]]]] begin[:]
<ast.Tuple object at 0x7da1b1d64fa0> assign[=] call[name[item]][<ast.Slice object at 0x7da1b1d67940>]
variable[permission] assign[=] call[name[Permission], parameter[name[name], name[value], name[obj]]]
if compare[name[role] in call[name[permission].getRoles, parameter[]]] begin[:]
call[name[out].append, parameter[name[name]]]
return[name[out]] | keyword[def] identifier[get_permissions_for_role] ( identifier[role] , identifier[brain_or_object] ):
literal[string]
identifier[obj] = identifier[api] . identifier[get_object] ( identifier[brain_or_object] )
identifier[valid_roles] = identifier[get_valid_roles_for] ( identifier[obj] )
keyword[if] identifier[role] keyword[not] keyword[in] identifier[valid_roles] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[role] ))
identifier[out] =[]
keyword[for] identifier[item] keyword[in] identifier[obj] . identifier[ac_inherited_permissions] ( literal[int] ):
identifier[name] , identifier[value] = identifier[item] [: literal[int] ]
identifier[permission] = identifier[Permission] ( identifier[name] , identifier[value] , identifier[obj] )
keyword[if] identifier[role] keyword[in] identifier[permission] . identifier[getRoles] ():
identifier[out] . identifier[append] ( identifier[name] )
keyword[return] identifier[out] | def get_permissions_for_role(role, brain_or_object):
"""Return the permissions of the role which are granted on the object
Code extracted from `IRoleManager.permissionsOfRole`
:param role: The role to check the permission
:param brain_or_object: Catalog brain or object
:returns: List of permissions of the role
"""
obj = api.get_object(brain_or_object)
# Raise an error if the role is invalid
valid_roles = get_valid_roles_for(obj)
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role)) # depends on [control=['if'], data=['role']]
out = []
for item in obj.ac_inherited_permissions(1):
(name, value) = item[:2]
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
if role in permission.getRoles():
out.append(name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return out |
def solveServiceArea(self,facilities,method="POST",
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
travelMode=None,
attributeParameterValues=None,
defaultBreaks=None,
excludeSourcesFromPolygons=None,
mergeSimilarPolygonRanges=None,
outputLines=None,
outputPolygons=None,
overlapLines=None,
overlapPolygons=None,
splitLinesAtBreaks=None,
splitPolygonsAtBreaks=None,
trimOuterPolygon=None,
trimPolygonDistance=None,
trimPolygonDistanceUnits=None,
returnFacilities=False,
returnBarriers=False,
returnPolylineBarriers=False,
returnPolygonBarriers=False,
outSR=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits='esriUnknownUnits',
useHierarchy=None,
timeOfDay=None,
timeOfDayIsUTC=None,
travelDirection=None,
returnZ=False):
""" The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
"""
if not self.layerType == "esriNAServerServiceAreaLayer":
raise ValueError("The solveServiceArea operation is supported on a network "
"layer of Service Area type only")
url = self._url + "/solveServiceArea"
params = {
"f" : "json",
"facilities": facilities
}
if not barriers is None:
params['barriers'] = barriers
if not polylineBarriers is None:
params['polylineBarriers'] = polylineBarriers
if not polygonBarriers is None:
params['polygonBarriers'] = polygonBarriers
if not travelMode is None:
params['travelMode'] = travelMode
if not attributeParameterValues is None:
params['attributeParameterValues'] = attributeParameterValues
if not defaultBreaks is None:
params['defaultBreaks'] = defaultBreaks
if not excludeSourcesFromPolygons is None:
params['excludeSourcesFromPolygons'] = excludeSourcesFromPolygons
if not mergeSimilarPolygonRanges is None:
params['mergeSimilarPolygonRanges'] = mergeSimilarPolygonRanges
if not outputLines is None:
params['outputLines'] = outputLines
if not outputPolygons is None:
params['outputPolygons'] = outputPolygons
if not overlapLines is None:
params['overlapLines'] = overlapLines
if not overlapPolygons is None:
params['overlapPolygons'] = overlapPolygons
if not splitLinesAtBreaks is None:
params['splitLinesAtBreaks'] = splitLinesAtBreaks
if not splitPolygonsAtBreaks is None:
params['splitPolygonsAtBreaks'] = splitPolygonsAtBreaks
if not trimOuterPolygon is None:
params['trimOuterPolygon'] = trimOuterPolygon
if not trimPolygonDistance is None:
params['trimPolygonDistance'] = trimPolygonDistance
if not trimPolygonDistanceUnits is None:
params['trimPolygonDistanceUnits'] = trimPolygonDistanceUnits
if not returnFacilities is None:
params['returnFacilities'] = returnFacilities
if not returnBarriers is None:
params['returnBarriers'] = returnBarriers
if not returnPolylineBarriers is None:
params['returnPolylineBarriers'] = returnPolylineBarriers
if not returnPolygonBarriers is None:
params['returnPolygonBarriers'] = returnPolygonBarriers
if not outSR is None:
params['outSR'] = outSR
if not accumulateAttributeNames is None:
params['accumulateAttributeNames'] = accumulateAttributeNames
if not impedanceAttributeName is None:
params['impedanceAttributeName'] = impedanceAttributeName
if not restrictionAttributeNames is None:
params['restrictionAttributeNames'] = restrictionAttributeNames
if not restrictUTurns is None:
params['restrictUTurns'] = restrictUTurns
if not outputGeometryPrecision is None:
params['outputGeometryPrecision'] = outputGeometryPrecision
if not outputGeometryPrecisionUnits is None:
params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits
if not useHierarchy is None:
params['useHierarchy'] = useHierarchy
if not timeOfDay is None:
params['timeOfDay'] = timeOfDay
if not timeOfDayIsUTC is None:
params['timeOfDayIsUTC'] = timeOfDayIsUTC
if not travelDirection is None:
params['travelDirection'] = travelDirection
if not returnZ is None:
params['returnZ'] = returnZ
if method.lower() == "post":
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | def function[solveServiceArea, parameter[self, facilities, method, barriers, polylineBarriers, polygonBarriers, travelMode, attributeParameterValues, defaultBreaks, excludeSourcesFromPolygons, mergeSimilarPolygonRanges, outputLines, outputPolygons, overlapLines, overlapPolygons, splitLinesAtBreaks, splitPolygonsAtBreaks, trimOuterPolygon, trimPolygonDistance, trimPolygonDistanceUnits, returnFacilities, returnBarriers, returnPolylineBarriers, returnPolygonBarriers, outSR, accumulateAttributeNames, impedanceAttributeName, restrictionAttributeNames, restrictUTurns, outputGeometryPrecision, outputGeometryPrecisionUnits, useHierarchy, timeOfDay, timeOfDayIsUTC, travelDirection, returnZ]]:
constant[ The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
]
if <ast.UnaryOp object at 0x7da1b1291c60> begin[:]
<ast.Raise object at 0x7da1b1291b70>
variable[url] assign[=] binary_operation[name[self]._url + constant[/solveServiceArea]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1291900>, <ast.Constant object at 0x7da1b12918d0>], [<ast.Constant object at 0x7da1b12918a0>, <ast.Name object at 0x7da1b1291870>]]
if <ast.UnaryOp object at 0x7da1b1291810> begin[:]
call[name[params]][constant[barriers]] assign[=] name[barriers]
if <ast.UnaryOp object at 0x7da1b1291630> begin[:]
call[name[params]][constant[polylineBarriers]] assign[=] name[polylineBarriers]
if <ast.UnaryOp object at 0x7da1b1291450> begin[:]
call[name[params]][constant[polygonBarriers]] assign[=] name[polygonBarriers]
if <ast.UnaryOp object at 0x7da1b1291270> begin[:]
call[name[params]][constant[travelMode]] assign[=] name[travelMode]
if <ast.UnaryOp object at 0x7da1b1291090> begin[:]
call[name[params]][constant[attributeParameterValues]] assign[=] name[attributeParameterValues]
if <ast.UnaryOp object at 0x7da1b1290eb0> begin[:]
call[name[params]][constant[defaultBreaks]] assign[=] name[defaultBreaks]
if <ast.UnaryOp object at 0x7da1b1290cd0> begin[:]
call[name[params]][constant[excludeSourcesFromPolygons]] assign[=] name[excludeSourcesFromPolygons]
if <ast.UnaryOp object at 0x7da1b1290af0> begin[:]
call[name[params]][constant[mergeSimilarPolygonRanges]] assign[=] name[mergeSimilarPolygonRanges]
if <ast.UnaryOp object at 0x7da1b1290910> begin[:]
call[name[params]][constant[outputLines]] assign[=] name[outputLines]
if <ast.UnaryOp object at 0x7da1b1290730> begin[:]
call[name[params]][constant[outputPolygons]] assign[=] name[outputPolygons]
if <ast.UnaryOp object at 0x7da1b1290550> begin[:]
call[name[params]][constant[overlapLines]] assign[=] name[overlapLines]
if <ast.UnaryOp object at 0x7da1b1290370> begin[:]
call[name[params]][constant[overlapPolygons]] assign[=] name[overlapPolygons]
if <ast.UnaryOp object at 0x7da1b1290190> begin[:]
call[name[params]][constant[splitLinesAtBreaks]] assign[=] name[splitLinesAtBreaks]
if <ast.UnaryOp object at 0x7da18bc721a0> begin[:]
call[name[params]][constant[splitPolygonsAtBreaks]] assign[=] name[splitPolygonsAtBreaks]
if <ast.UnaryOp object at 0x7da18bc721d0> begin[:]
call[name[params]][constant[trimOuterPolygon]] assign[=] name[trimOuterPolygon]
if <ast.UnaryOp object at 0x7da18bc731c0> begin[:]
call[name[params]][constant[trimPolygonDistance]] assign[=] name[trimPolygonDistance]
if <ast.UnaryOp object at 0x7da18bc70df0> begin[:]
call[name[params]][constant[trimPolygonDistanceUnits]] assign[=] name[trimPolygonDistanceUnits]
if <ast.UnaryOp object at 0x7da18bc73a60> begin[:]
call[name[params]][constant[returnFacilities]] assign[=] name[returnFacilities]
if <ast.UnaryOp object at 0x7da18bc71750> begin[:]
call[name[params]][constant[returnBarriers]] assign[=] name[returnBarriers]
if <ast.UnaryOp object at 0x7da18bc73220> begin[:]
call[name[params]][constant[returnPolylineBarriers]] assign[=] name[returnPolylineBarriers]
if <ast.UnaryOp object at 0x7da18bc71390> begin[:]
call[name[params]][constant[returnPolygonBarriers]] assign[=] name[returnPolygonBarriers]
if <ast.UnaryOp object at 0x7da18bc71e10> begin[:]
call[name[params]][constant[outSR]] assign[=] name[outSR]
if <ast.UnaryOp object at 0x7da18bc703a0> begin[:]
call[name[params]][constant[accumulateAttributeNames]] assign[=] name[accumulateAttributeNames]
if <ast.UnaryOp object at 0x7da18bc715d0> begin[:]
call[name[params]][constant[impedanceAttributeName]] assign[=] name[impedanceAttributeName]
if <ast.UnaryOp object at 0x7da18bc722c0> begin[:]
call[name[params]][constant[restrictionAttributeNames]] assign[=] name[restrictionAttributeNames]
if <ast.UnaryOp object at 0x7da1b1237070> begin[:]
call[name[params]][constant[restrictUTurns]] assign[=] name[restrictUTurns]
if <ast.UnaryOp object at 0x7da1b1234490> begin[:]
call[name[params]][constant[outputGeometryPrecision]] assign[=] name[outputGeometryPrecision]
if <ast.UnaryOp object at 0x7da1b1237fa0> begin[:]
call[name[params]][constant[outputGeometryPrecisionUnits]] assign[=] name[outputGeometryPrecisionUnits]
if <ast.UnaryOp object at 0x7da1b1234a30> begin[:]
call[name[params]][constant[useHierarchy]] assign[=] name[useHierarchy]
if <ast.UnaryOp object at 0x7da1b1237a60> begin[:]
call[name[params]][constant[timeOfDay]] assign[=] name[timeOfDay]
if <ast.UnaryOp object at 0x7da1b1237280> begin[:]
call[name[params]][constant[timeOfDayIsUTC]] assign[=] name[timeOfDayIsUTC]
if <ast.UnaryOp object at 0x7da1b1236f80> begin[:]
call[name[params]][constant[travelDirection]] assign[=] name[travelDirection]
if <ast.UnaryOp object at 0x7da1b1234610> begin[:]
call[name[params]][constant[returnZ]] assign[=] name[returnZ]
if compare[call[name[method].lower, parameter[]] equal[==] constant[post]] begin[:]
return[call[name[self]._post, parameter[]]] | keyword[def] identifier[solveServiceArea] ( identifier[self] , identifier[facilities] , identifier[method] = literal[string] ,
identifier[barriers] = keyword[None] ,
identifier[polylineBarriers] = keyword[None] ,
identifier[polygonBarriers] = keyword[None] ,
identifier[travelMode] = keyword[None] ,
identifier[attributeParameterValues] = keyword[None] ,
identifier[defaultBreaks] = keyword[None] ,
identifier[excludeSourcesFromPolygons] = keyword[None] ,
identifier[mergeSimilarPolygonRanges] = keyword[None] ,
identifier[outputLines] = keyword[None] ,
identifier[outputPolygons] = keyword[None] ,
identifier[overlapLines] = keyword[None] ,
identifier[overlapPolygons] = keyword[None] ,
identifier[splitLinesAtBreaks] = keyword[None] ,
identifier[splitPolygonsAtBreaks] = keyword[None] ,
identifier[trimOuterPolygon] = keyword[None] ,
identifier[trimPolygonDistance] = keyword[None] ,
identifier[trimPolygonDistanceUnits] = keyword[None] ,
identifier[returnFacilities] = keyword[False] ,
identifier[returnBarriers] = keyword[False] ,
identifier[returnPolylineBarriers] = keyword[False] ,
identifier[returnPolygonBarriers] = keyword[False] ,
identifier[outSR] = keyword[None] ,
identifier[accumulateAttributeNames] = keyword[None] ,
identifier[impedanceAttributeName] = keyword[None] ,
identifier[restrictionAttributeNames] = keyword[None] ,
identifier[restrictUTurns] = keyword[None] ,
identifier[outputGeometryPrecision] = keyword[None] ,
identifier[outputGeometryPrecisionUnits] = literal[string] ,
identifier[useHierarchy] = keyword[None] ,
identifier[timeOfDay] = keyword[None] ,
identifier[timeOfDayIsUTC] = keyword[None] ,
identifier[travelDirection] = keyword[None] ,
identifier[returnZ] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[layerType] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[url] = identifier[self] . identifier[_url] + literal[string]
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : identifier[facilities]
}
keyword[if] keyword[not] identifier[barriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[barriers]
keyword[if] keyword[not] identifier[polylineBarriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[polylineBarriers]
keyword[if] keyword[not] identifier[polygonBarriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[polygonBarriers]
keyword[if] keyword[not] identifier[travelMode] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[travelMode]
keyword[if] keyword[not] identifier[attributeParameterValues] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[attributeParameterValues]
keyword[if] keyword[not] identifier[defaultBreaks] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[defaultBreaks]
keyword[if] keyword[not] identifier[excludeSourcesFromPolygons] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[excludeSourcesFromPolygons]
keyword[if] keyword[not] identifier[mergeSimilarPolygonRanges] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[mergeSimilarPolygonRanges]
keyword[if] keyword[not] identifier[outputLines] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[outputLines]
keyword[if] keyword[not] identifier[outputPolygons] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[outputPolygons]
keyword[if] keyword[not] identifier[overlapLines] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[overlapLines]
keyword[if] keyword[not] identifier[overlapPolygons] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[overlapPolygons]
keyword[if] keyword[not] identifier[splitLinesAtBreaks] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[splitLinesAtBreaks]
keyword[if] keyword[not] identifier[splitPolygonsAtBreaks] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[splitPolygonsAtBreaks]
keyword[if] keyword[not] identifier[trimOuterPolygon] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[trimOuterPolygon]
keyword[if] keyword[not] identifier[trimPolygonDistance] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[trimPolygonDistance]
keyword[if] keyword[not] identifier[trimPolygonDistanceUnits] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[trimPolygonDistanceUnits]
keyword[if] keyword[not] identifier[returnFacilities] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[returnFacilities]
keyword[if] keyword[not] identifier[returnBarriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[returnBarriers]
keyword[if] keyword[not] identifier[returnPolylineBarriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[returnPolylineBarriers]
keyword[if] keyword[not] identifier[returnPolygonBarriers] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[returnPolygonBarriers]
keyword[if] keyword[not] identifier[outSR] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[outSR]
keyword[if] keyword[not] identifier[accumulateAttributeNames] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[accumulateAttributeNames]
keyword[if] keyword[not] identifier[impedanceAttributeName] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[impedanceAttributeName]
keyword[if] keyword[not] identifier[restrictionAttributeNames] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[restrictionAttributeNames]
keyword[if] keyword[not] identifier[restrictUTurns] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[restrictUTurns]
keyword[if] keyword[not] identifier[outputGeometryPrecision] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[outputGeometryPrecision]
keyword[if] keyword[not] identifier[outputGeometryPrecisionUnits] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[outputGeometryPrecisionUnits]
keyword[if] keyword[not] identifier[useHierarchy] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[useHierarchy]
keyword[if] keyword[not] identifier[timeOfDay] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[timeOfDay]
keyword[if] keyword[not] identifier[timeOfDayIsUTC] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[timeOfDayIsUTC]
keyword[if] keyword[not] identifier[travelDirection] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[travelDirection]
keyword[if] keyword[not] identifier[returnZ] keyword[is] keyword[None] :
identifier[params] [ literal[string] ]= identifier[returnZ]
keyword[if] identifier[method] . identifier[lower] ()== literal[string] :
keyword[return] identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] )
keyword[else] :
keyword[return] identifier[self] . identifier[_get] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ) | def solveServiceArea(self, facilities, method='POST', barriers=None, polylineBarriers=None, polygonBarriers=None, travelMode=None, attributeParameterValues=None, defaultBreaks=None, excludeSourcesFromPolygons=None, mergeSimilarPolygonRanges=None, outputLines=None, outputPolygons=None, overlapLines=None, overlapPolygons=None, splitLinesAtBreaks=None, splitPolygonsAtBreaks=None, trimOuterPolygon=None, trimPolygonDistance=None, trimPolygonDistanceUnits=None, returnFacilities=False, returnBarriers=False, returnPolylineBarriers=False, returnPolygonBarriers=False, outSR=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits='esriUnknownUnits', useHierarchy=None, timeOfDay=None, timeOfDayIsUTC=None, travelDirection=None, returnZ=False):
""" The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
"""
if not self.layerType == 'esriNAServerServiceAreaLayer':
raise ValueError('The solveServiceArea operation is supported on a network layer of Service Area type only') # depends on [control=['if'], data=[]]
url = self._url + '/solveServiceArea'
params = {'f': 'json', 'facilities': facilities}
if not barriers is None:
params['barriers'] = barriers # depends on [control=['if'], data=[]]
if not polylineBarriers is None:
params['polylineBarriers'] = polylineBarriers # depends on [control=['if'], data=[]]
if not polygonBarriers is None:
params['polygonBarriers'] = polygonBarriers # depends on [control=['if'], data=[]]
if not travelMode is None:
params['travelMode'] = travelMode # depends on [control=['if'], data=[]]
if not attributeParameterValues is None:
params['attributeParameterValues'] = attributeParameterValues # depends on [control=['if'], data=[]]
if not defaultBreaks is None:
params['defaultBreaks'] = defaultBreaks # depends on [control=['if'], data=[]]
if not excludeSourcesFromPolygons is None:
params['excludeSourcesFromPolygons'] = excludeSourcesFromPolygons # depends on [control=['if'], data=[]]
if not mergeSimilarPolygonRanges is None:
params['mergeSimilarPolygonRanges'] = mergeSimilarPolygonRanges # depends on [control=['if'], data=[]]
if not outputLines is None:
params['outputLines'] = outputLines # depends on [control=['if'], data=[]]
if not outputPolygons is None:
params['outputPolygons'] = outputPolygons # depends on [control=['if'], data=[]]
if not overlapLines is None:
params['overlapLines'] = overlapLines # depends on [control=['if'], data=[]]
if not overlapPolygons is None:
params['overlapPolygons'] = overlapPolygons # depends on [control=['if'], data=[]]
if not splitLinesAtBreaks is None:
params['splitLinesAtBreaks'] = splitLinesAtBreaks # depends on [control=['if'], data=[]]
if not splitPolygonsAtBreaks is None:
params['splitPolygonsAtBreaks'] = splitPolygonsAtBreaks # depends on [control=['if'], data=[]]
if not trimOuterPolygon is None:
params['trimOuterPolygon'] = trimOuterPolygon # depends on [control=['if'], data=[]]
if not trimPolygonDistance is None:
params['trimPolygonDistance'] = trimPolygonDistance # depends on [control=['if'], data=[]]
if not trimPolygonDistanceUnits is None:
params['trimPolygonDistanceUnits'] = trimPolygonDistanceUnits # depends on [control=['if'], data=[]]
if not returnFacilities is None:
params['returnFacilities'] = returnFacilities # depends on [control=['if'], data=[]]
if not returnBarriers is None:
params['returnBarriers'] = returnBarriers # depends on [control=['if'], data=[]]
if not returnPolylineBarriers is None:
params['returnPolylineBarriers'] = returnPolylineBarriers # depends on [control=['if'], data=[]]
if not returnPolygonBarriers is None:
params['returnPolygonBarriers'] = returnPolygonBarriers # depends on [control=['if'], data=[]]
if not outSR is None:
params['outSR'] = outSR # depends on [control=['if'], data=[]]
if not accumulateAttributeNames is None:
params['accumulateAttributeNames'] = accumulateAttributeNames # depends on [control=['if'], data=[]]
if not impedanceAttributeName is None:
params['impedanceAttributeName'] = impedanceAttributeName # depends on [control=['if'], data=[]]
if not restrictionAttributeNames is None:
params['restrictionAttributeNames'] = restrictionAttributeNames # depends on [control=['if'], data=[]]
if not restrictUTurns is None:
params['restrictUTurns'] = restrictUTurns # depends on [control=['if'], data=[]]
if not outputGeometryPrecision is None:
params['outputGeometryPrecision'] = outputGeometryPrecision # depends on [control=['if'], data=[]]
if not outputGeometryPrecisionUnits is None:
params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits # depends on [control=['if'], data=[]]
if not useHierarchy is None:
params['useHierarchy'] = useHierarchy # depends on [control=['if'], data=[]]
if not timeOfDay is None:
params['timeOfDay'] = timeOfDay # depends on [control=['if'], data=[]]
if not timeOfDayIsUTC is None:
params['timeOfDayIsUTC'] = timeOfDayIsUTC # depends on [control=['if'], data=[]]
if not travelDirection is None:
params['travelDirection'] = travelDirection # depends on [control=['if'], data=[]]
if not returnZ is None:
params['returnZ'] = returnZ # depends on [control=['if'], data=[]]
if method.lower() == 'post':
return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) # depends on [control=['if'], data=[]]
else:
return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) |
def metablockLength(self):
"""Read MNIBBLES and meta block length;
if empty block, skip block and return true.
"""
self.MLEN = self.verboseRead(MetablockLengthAlphabet())
if self.MLEN:
return False
#empty block; skip and return False
self.verboseRead(ReservedAlphabet())
MSKIP = self.verboseRead(SkipLengthAlphabet())
self.verboseRead(FillerAlphabet(streamPos=self.stream.pos))
self.stream.pos += 8*MSKIP
print("Skipping to {:x}".format(self.stream.pos>>3))
return True | def function[metablockLength, parameter[self]]:
constant[Read MNIBBLES and meta block length;
if empty block, skip block and return true.
]
name[self].MLEN assign[=] call[name[self].verboseRead, parameter[call[name[MetablockLengthAlphabet], parameter[]]]]
if name[self].MLEN begin[:]
return[constant[False]]
call[name[self].verboseRead, parameter[call[name[ReservedAlphabet], parameter[]]]]
variable[MSKIP] assign[=] call[name[self].verboseRead, parameter[call[name[SkipLengthAlphabet], parameter[]]]]
call[name[self].verboseRead, parameter[call[name[FillerAlphabet], parameter[]]]]
<ast.AugAssign object at 0x7da204623a60>
call[name[print], parameter[call[constant[Skipping to {:x}].format, parameter[binary_operation[name[self].stream.pos <ast.RShift object at 0x7da2590d6a40> constant[3]]]]]]
return[constant[True]] | keyword[def] identifier[metablockLength] ( identifier[self] ):
literal[string]
identifier[self] . identifier[MLEN] = identifier[self] . identifier[verboseRead] ( identifier[MetablockLengthAlphabet] ())
keyword[if] identifier[self] . identifier[MLEN] :
keyword[return] keyword[False]
identifier[self] . identifier[verboseRead] ( identifier[ReservedAlphabet] ())
identifier[MSKIP] = identifier[self] . identifier[verboseRead] ( identifier[SkipLengthAlphabet] ())
identifier[self] . identifier[verboseRead] ( identifier[FillerAlphabet] ( identifier[streamPos] = identifier[self] . identifier[stream] . identifier[pos] ))
identifier[self] . identifier[stream] . identifier[pos] += literal[int] * identifier[MSKIP]
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[stream] . identifier[pos] >> literal[int] ))
keyword[return] keyword[True] | def metablockLength(self):
"""Read MNIBBLES and meta block length;
if empty block, skip block and return true.
"""
self.MLEN = self.verboseRead(MetablockLengthAlphabet())
if self.MLEN:
return False # depends on [control=['if'], data=[]]
#empty block; skip and return False
self.verboseRead(ReservedAlphabet())
MSKIP = self.verboseRead(SkipLengthAlphabet())
self.verboseRead(FillerAlphabet(streamPos=self.stream.pos))
self.stream.pos += 8 * MSKIP
print('Skipping to {:x}'.format(self.stream.pos >> 3))
return True |
def SETNAE(cpu, dest):
"""
Sets byte if not above or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.CF, 1, 0)) | def function[SETNAE, parameter[cpu, dest]]:
constant[
Sets byte if not above or equal.
:param cpu: current CPU.
:param dest: destination operand.
]
call[name[dest].write, parameter[call[name[Operators].ITEBV, parameter[name[dest].size, name[cpu].CF, constant[1], constant[0]]]]] | keyword[def] identifier[SETNAE] ( identifier[cpu] , identifier[dest] ):
literal[string]
identifier[dest] . identifier[write] ( identifier[Operators] . identifier[ITEBV] ( identifier[dest] . identifier[size] , identifier[cpu] . identifier[CF] , literal[int] , literal[int] )) | def SETNAE(cpu, dest):
"""
Sets byte if not above or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.CF, 1, 0)) |
def _encode_multipart_formdata(fields, files):
"""
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------A_vEry_UnlikelY_bouNdary_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"' % key))
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body | def function[_encode_multipart_formdata, parameter[fields, files]]:
constant[
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
]
variable[BOUNDARY] assign[=] constant[----------A_vEry_UnlikelY_bouNdary_$]
variable[CRLF] assign[=] constant[
]
variable[L] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0aa0c10>, <ast.Name object at 0x7da1b0aa0c40>]]] in starred[name[fields]] begin[:]
call[name[L].append, parameter[binary_operation[constant[--] + name[BOUNDARY]]]]
call[name[L].append, parameter[call[name[str], parameter[binary_operation[constant[Content-Disposition: form-data; name="%s"] <ast.Mod object at 0x7da2590d6920> name[key]]]]]]
call[name[L].append, parameter[constant[]]]
call[name[L].append, parameter[name[value]]]
for taget[tuple[[<ast.Name object at 0x7da1b0aa0970>, <ast.Name object at 0x7da1b0aa2ec0>, <ast.Name object at 0x7da1b0aa2d70>]]] in starred[name[files]] begin[:]
call[name[L].append, parameter[binary_operation[constant[--] + name[BOUNDARY]]]]
call[name[L].append, parameter[call[name[str], parameter[binary_operation[constant[Content-Disposition: form-data; name="%s"; filename="%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0aa0730>, <ast.Name object at 0x7da1b0aa0d30>]]]]]]]
call[name[L].append, parameter[binary_operation[constant[Content-Type: %s] <ast.Mod object at 0x7da2590d6920> call[name[get_content_type], parameter[name[filename]]]]]]
call[name[L].append, parameter[constant[]]]
call[name[L].append, parameter[name[value]]]
call[name[L].append, parameter[binary_operation[binary_operation[constant[--] + name[BOUNDARY]] + constant[--]]]]
call[name[L].append, parameter[constant[]]]
variable[body] assign[=] call[name[CRLF].join, parameter[name[L]]]
variable[content_type] assign[=] binary_operation[constant[multipart/form-data; boundary=%s] <ast.Mod object at 0x7da2590d6920> name[BOUNDARY]]
return[tuple[[<ast.Name object at 0x7da1b0aa0520>, <ast.Name object at 0x7da1b0a705e0>]]] | keyword[def] identifier[_encode_multipart_formdata] ( identifier[fields] , identifier[files] ):
literal[string]
identifier[BOUNDARY] = literal[string]
identifier[CRLF] = literal[string]
identifier[L] =[]
keyword[for] ( identifier[key] , identifier[value] ) keyword[in] identifier[fields] :
identifier[L] . identifier[append] ( literal[string] + identifier[BOUNDARY] )
identifier[L] . identifier[append] ( identifier[str] ( literal[string] % identifier[key] ))
identifier[L] . identifier[append] ( literal[string] )
identifier[L] . identifier[append] ( identifier[value] )
keyword[for] ( identifier[key] , identifier[filename] , identifier[value] ) keyword[in] identifier[files] :
identifier[L] . identifier[append] ( literal[string] + identifier[BOUNDARY] )
identifier[L] . identifier[append] ( identifier[str] ( literal[string] %( identifier[key] , identifier[filename] )))
identifier[L] . identifier[append] ( literal[string] % identifier[get_content_type] ( identifier[filename] ))
identifier[L] . identifier[append] ( literal[string] )
identifier[L] . identifier[append] ( identifier[value] )
identifier[L] . identifier[append] ( literal[string] + identifier[BOUNDARY] + literal[string] )
identifier[L] . identifier[append] ( literal[string] )
identifier[body] = identifier[CRLF] . identifier[join] ( identifier[L] )
identifier[content_type] = literal[string] % identifier[BOUNDARY]
keyword[return] identifier[content_type] , identifier[body] | def _encode_multipart_formdata(fields, files):
"""
Create a multipart encoded form for use in PUTing and POSTing.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------A_vEry_UnlikelY_bouNdary_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"' % key))
L.append('')
L.append(value) # depends on [control=['for'], data=[]]
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(str('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value) # depends on [control=['for'], data=[]]
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return (content_type, body) |
def main_subtask(module_name, method_prefixs=["task_"], optional_params={}):
"""
http://stackoverflow.com/questions/3217673/why-use-argparse-rather-than-optparse
As of 2.7, optparse is deprecated, and will hopefully go away in the future
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument('method_name', help='')
for optional_param_key, optional_param_help in optional_params.items():
parser.add_argument(optional_param_key,
required=False,
help=optional_param_help)
# parser.add_argument('--reset_cache', required=False, help='')
args = parser.parse_args()
for prefix in method_prefixs:
if args.method_name.startswith(prefix):
if prefix == "test_":
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Reconfigure logging again, this time with a file.
logging.basicConfig(format='[%(levelname)s][%(asctime)s][%(module)s][%(funcName)s][%(lineno)s] %(message)s', level=logging.DEBUG) # noqa
# http://stackoverflow.com/questions/17734618/dynamic-method-call-in-python-2-7-using-strings-of-method-names
the_method = getattr(sys.modules[module_name], args.method_name)
if the_method:
the_method(args=vars(args))
logging.info("done")
return
else:
break
logging.info("unsupported") | def function[main_subtask, parameter[module_name, method_prefixs, optional_params]]:
constant[
http://stackoverflow.com/questions/3217673/why-use-argparse-rather-than-optparse
As of 2.7, optparse is deprecated, and will hopefully go away in the future
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[method_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b23724a0>, <ast.Name object at 0x7da1b2373190>]]] in starred[call[name[optional_params].items, parameter[]]] begin[:]
call[name[parser].add_argument, parameter[name[optional_param_key]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
for taget[name[prefix]] in starred[name[method_prefixs]] begin[:]
if call[name[args].method_name.startswith, parameter[name[prefix]]] begin[:]
if compare[name[prefix] equal[==] constant[test_]] begin[:]
for taget[name[handler]] in starred[call[name[logging].root.handlers][<ast.Slice object at 0x7da1b2371ae0>]] begin[:]
call[name[logging].root.removeHandler, parameter[name[handler]]]
call[name[logging].basicConfig, parameter[]]
variable[the_method] assign[=] call[name[getattr], parameter[call[name[sys].modules][name[module_name]], name[args].method_name]]
if name[the_method] begin[:]
call[name[the_method], parameter[]]
call[name[logging].info, parameter[constant[done]]]
return[None]
call[name[logging].info, parameter[constant[unsupported]]] | keyword[def] identifier[main_subtask] ( identifier[module_name] , identifier[method_prefixs] =[ literal[string] ], identifier[optional_params] ={}):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
keyword[for] identifier[optional_param_key] , identifier[optional_param_help] keyword[in] identifier[optional_params] . identifier[items] ():
identifier[parser] . identifier[add_argument] ( identifier[optional_param_key] ,
identifier[required] = keyword[False] ,
identifier[help] = identifier[optional_param_help] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
keyword[for] identifier[prefix] keyword[in] identifier[method_prefixs] :
keyword[if] identifier[args] . identifier[method_name] . identifier[startswith] ( identifier[prefix] ):
keyword[if] identifier[prefix] == literal[string] :
keyword[for] identifier[handler] keyword[in] identifier[logging] . identifier[root] . identifier[handlers] [:]:
identifier[logging] . identifier[root] . identifier[removeHandler] ( identifier[handler] )
identifier[logging] . identifier[basicConfig] ( identifier[format] = literal[string] , identifier[level] = identifier[logging] . identifier[DEBUG] )
identifier[the_method] = identifier[getattr] ( identifier[sys] . identifier[modules] [ identifier[module_name] ], identifier[args] . identifier[method_name] )
keyword[if] identifier[the_method] :
identifier[the_method] ( identifier[args] = identifier[vars] ( identifier[args] ))
identifier[logging] . identifier[info] ( literal[string] )
keyword[return]
keyword[else] :
keyword[break]
identifier[logging] . identifier[info] ( literal[string] ) | def main_subtask(module_name, method_prefixs=['task_'], optional_params={}):
"""
http://stackoverflow.com/questions/3217673/why-use-argparse-rather-than-optparse
As of 2.7, optparse is deprecated, and will hopefully go away in the future
"""
parser = argparse.ArgumentParser(description='')
parser.add_argument('method_name', help='')
for (optional_param_key, optional_param_help) in optional_params.items():
parser.add_argument(optional_param_key, required=False, help=optional_param_help) # depends on [control=['for'], data=[]]
# parser.add_argument('--reset_cache', required=False, help='')
args = parser.parse_args()
for prefix in method_prefixs:
if args.method_name.startswith(prefix):
if prefix == 'test_':
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler) # depends on [control=['for'], data=['handler']]
# Reconfigure logging again, this time with a file.
logging.basicConfig(format='[%(levelname)s][%(asctime)s][%(module)s][%(funcName)s][%(lineno)s] %(message)s', level=logging.DEBUG) # noqa # depends on [control=['if'], data=[]]
# http://stackoverflow.com/questions/17734618/dynamic-method-call-in-python-2-7-using-strings-of-method-names
the_method = getattr(sys.modules[module_name], args.method_name)
if the_method:
the_method(args=vars(args))
logging.info('done')
return # depends on [control=['if'], data=[]]
else:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prefix']]
logging.info('unsupported') |
def crop_coords(img, padding):
"""Find coordinates describing extent of non-zero portion of image, padded"""
coords = np.nonzero(img)
empty_axis_exists = np.any([len(arr) == 0 for arr in coords])
if empty_axis_exists:
end_coords = img.shape
beg_coords = np.zeros((1, img.ndim)).astype(int).flatten()
else:
min_coords = np.array([arr.min() for arr in coords])
max_coords = np.array([arr.max() for arr in coords])
beg_coords = np.fmax(0, min_coords - padding)
end_coords = np.fmin(img.shape, max_coords + padding)
return beg_coords, end_coords | def function[crop_coords, parameter[img, padding]]:
constant[Find coordinates describing extent of non-zero portion of image, padded]
variable[coords] assign[=] call[name[np].nonzero, parameter[name[img]]]
variable[empty_axis_exists] assign[=] call[name[np].any, parameter[<ast.ListComp object at 0x7da18bccac80>]]
if name[empty_axis_exists] begin[:]
variable[end_coords] assign[=] name[img].shape
variable[beg_coords] assign[=] call[call[call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da18bccb340>, <ast.Attribute object at 0x7da18bccb3d0>]]]].astype, parameter[name[int]]].flatten, parameter[]]
return[tuple[[<ast.Name object at 0x7da18bcc9c00>, <ast.Name object at 0x7da18bcc80a0>]]] | keyword[def] identifier[crop_coords] ( identifier[img] , identifier[padding] ):
literal[string]
identifier[coords] = identifier[np] . identifier[nonzero] ( identifier[img] )
identifier[empty_axis_exists] = identifier[np] . identifier[any] ([ identifier[len] ( identifier[arr] )== literal[int] keyword[for] identifier[arr] keyword[in] identifier[coords] ])
keyword[if] identifier[empty_axis_exists] :
identifier[end_coords] = identifier[img] . identifier[shape]
identifier[beg_coords] = identifier[np] . identifier[zeros] (( literal[int] , identifier[img] . identifier[ndim] )). identifier[astype] ( identifier[int] ). identifier[flatten] ()
keyword[else] :
identifier[min_coords] = identifier[np] . identifier[array] ([ identifier[arr] . identifier[min] () keyword[for] identifier[arr] keyword[in] identifier[coords] ])
identifier[max_coords] = identifier[np] . identifier[array] ([ identifier[arr] . identifier[max] () keyword[for] identifier[arr] keyword[in] identifier[coords] ])
identifier[beg_coords] = identifier[np] . identifier[fmax] ( literal[int] , identifier[min_coords] - identifier[padding] )
identifier[end_coords] = identifier[np] . identifier[fmin] ( identifier[img] . identifier[shape] , identifier[max_coords] + identifier[padding] )
keyword[return] identifier[beg_coords] , identifier[end_coords] | def crop_coords(img, padding):
"""Find coordinates describing extent of non-zero portion of image, padded"""
coords = np.nonzero(img)
empty_axis_exists = np.any([len(arr) == 0 for arr in coords])
if empty_axis_exists:
end_coords = img.shape
beg_coords = np.zeros((1, img.ndim)).astype(int).flatten() # depends on [control=['if'], data=[]]
else:
min_coords = np.array([arr.min() for arr in coords])
max_coords = np.array([arr.max() for arr in coords])
beg_coords = np.fmax(0, min_coords - padding)
end_coords = np.fmin(img.shape, max_coords + padding)
return (beg_coords, end_coords) |
def select_lamb(self, lamb=None, out=bool):
""" Return a wavelength index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference time vector self.ddataRef['lamb']
Parameters
----------
lamb : None / float / np.ndarray / list / tuple
The time values to be selected:
- None : ind matches all wavelength values
- float : ind is True only for the wavelength closest to lamb
- np.ndarray : ind True only for the wavelength closest to lamb
- list (len()==2): ind True for wavelength in [lamb[0],lamb[1]]
- tuple (len()==2): ind True for wavelength outside ]t[0];t[1][
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nlamb'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
if not self._isSpectral():
msg = ""
raise Exception(msg)
assert out in [bool,int]
ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb'])
if out is int:
ind = ind.nonzero()[0]
return ind | def function[select_lamb, parameter[self, lamb, out]]:
constant[ Return a wavelength index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference time vector self.ddataRef['lamb']
Parameters
----------
lamb : None / float / np.ndarray / list / tuple
The time values to be selected:
- None : ind matches all wavelength values
- float : ind is True only for the wavelength closest to lamb
- np.ndarray : ind True only for the wavelength closest to lamb
- list (len()==2): ind True for wavelength in [lamb[0],lamb[1]]
- tuple (len()==2): ind True for wavelength outside ]t[0];t[1][
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nlamb'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
]
if <ast.UnaryOp object at 0x7da207f011b0> begin[:]
variable[msg] assign[=] constant[]
<ast.Raise object at 0x7da207f01240>
assert[compare[name[out] in list[[<ast.Name object at 0x7da207f02bf0>, <ast.Name object at 0x7da207f01930>]]]]
variable[ind] assign[=] call[name[_select_ind], parameter[name[lamb], call[name[self]._ddataRef][constant[lamb]], call[name[self]._ddataRef][constant[nlamb]]]]
if compare[name[out] is name[int]] begin[:]
variable[ind] assign[=] call[call[name[ind].nonzero, parameter[]]][constant[0]]
return[name[ind]] | keyword[def] identifier[select_lamb] ( identifier[self] , identifier[lamb] = keyword[None] , identifier[out] = identifier[bool] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_isSpectral] ():
identifier[msg] = literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] )
keyword[assert] identifier[out] keyword[in] [ identifier[bool] , identifier[int] ]
identifier[ind] = identifier[_select_ind] ( identifier[lamb] , identifier[self] . identifier[_ddataRef] [ literal[string] ], identifier[self] . identifier[_ddataRef] [ literal[string] ])
keyword[if] identifier[out] keyword[is] identifier[int] :
identifier[ind] = identifier[ind] . identifier[nonzero] ()[ literal[int] ]
keyword[return] identifier[ind] | def select_lamb(self, lamb=None, out=bool):
""" Return a wavelength index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference time vector self.ddataRef['lamb']
Parameters
----------
lamb : None / float / np.ndarray / list / tuple
The time values to be selected:
- None : ind matches all wavelength values
- float : ind is True only for the wavelength closest to lamb
- np.ndarray : ind True only for the wavelength closest to lamb
- list (len()==2): ind True for wavelength in [lamb[0],lamb[1]]
- tuple (len()==2): ind True for wavelength outside ]t[0];t[1][
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nlamb'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
if not self._isSpectral():
msg = ''
raise Exception(msg) # depends on [control=['if'], data=[]]
assert out in [bool, int]
ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb'])
if out is int:
ind = ind.nonzero()[0] # depends on [control=['if'], data=[]]
return ind |
def get(key, profile=None):
'''
Get a value from memcached
'''
conn = salt.utils.memcached.get_conn(profile)
return salt.utils.memcached.get(conn, key) | def function[get, parameter[key, profile]]:
constant[
Get a value from memcached
]
variable[conn] assign[=] call[name[salt].utils.memcached.get_conn, parameter[name[profile]]]
return[call[name[salt].utils.memcached.get, parameter[name[conn], name[key]]]] | keyword[def] identifier[get] ( identifier[key] , identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[salt] . identifier[utils] . identifier[memcached] . identifier[get_conn] ( identifier[profile] )
keyword[return] identifier[salt] . identifier[utils] . identifier[memcached] . identifier[get] ( identifier[conn] , identifier[key] ) | def get(key, profile=None):
"""
Get a value from memcached
"""
conn = salt.utils.memcached.get_conn(profile)
return salt.utils.memcached.get(conn, key) |
def get_possible_paths(base_path, path_regex):
"""
Looks for path_regex within base_path. Each match is append
in the returned list.
path_regex may contain subfolder structure.
If any part of the folder structure is a
:param base_path: str
:param path_regex: str
:return list of strings
"""
if not path_regex:
return []
if len(path_regex) < 1:
return []
if path_regex[0] == os.sep:
path_regex = path_regex[1:]
rest_files = ''
if os.sep in path_regex:
#split by os.sep
node_names = path_regex.partition(os.sep)
first_node = node_names[0]
rest_nodes = node_names[2]
folder_names = filter_list(os.listdir(base_path), first_node)
for nom in folder_names:
new_base = op.join(base_path, nom)
if op.isdir(new_base):
rest_files = get_possible_paths(new_base, rest_nodes)
else:
rest_files = filter_list(os.listdir(base_path), path_regex)
files = []
if rest_files:
files = [op.join(base_path, f) for f in rest_files]
return files | def function[get_possible_paths, parameter[base_path, path_regex]]:
constant[
Looks for path_regex within base_path. Each match is append
in the returned list.
path_regex may contain subfolder structure.
If any part of the folder structure is a
:param base_path: str
:param path_regex: str
:return list of strings
]
if <ast.UnaryOp object at 0x7da1afe38730> begin[:]
return[list[[]]]
if compare[call[name[len], parameter[name[path_regex]]] less[<] constant[1]] begin[:]
return[list[[]]]
if compare[call[name[path_regex]][constant[0]] equal[==] name[os].sep] begin[:]
variable[path_regex] assign[=] call[name[path_regex]][<ast.Slice object at 0x7da1afe386d0>]
variable[rest_files] assign[=] constant[]
if compare[name[os].sep in name[path_regex]] begin[:]
variable[node_names] assign[=] call[name[path_regex].partition, parameter[name[os].sep]]
variable[first_node] assign[=] call[name[node_names]][constant[0]]
variable[rest_nodes] assign[=] call[name[node_names]][constant[2]]
variable[folder_names] assign[=] call[name[filter_list], parameter[call[name[os].listdir, parameter[name[base_path]]], name[first_node]]]
for taget[name[nom]] in starred[name[folder_names]] begin[:]
variable[new_base] assign[=] call[name[op].join, parameter[name[base_path], name[nom]]]
if call[name[op].isdir, parameter[name[new_base]]] begin[:]
variable[rest_files] assign[=] call[name[get_possible_paths], parameter[name[new_base], name[rest_nodes]]]
variable[files] assign[=] list[[]]
if name[rest_files] begin[:]
variable[files] assign[=] <ast.ListComp object at 0x7da1afe0e020>
return[name[files]] | keyword[def] identifier[get_possible_paths] ( identifier[base_path] , identifier[path_regex] ):
literal[string]
keyword[if] keyword[not] identifier[path_regex] :
keyword[return] []
keyword[if] identifier[len] ( identifier[path_regex] )< literal[int] :
keyword[return] []
keyword[if] identifier[path_regex] [ literal[int] ]== identifier[os] . identifier[sep] :
identifier[path_regex] = identifier[path_regex] [ literal[int] :]
identifier[rest_files] = literal[string]
keyword[if] identifier[os] . identifier[sep] keyword[in] identifier[path_regex] :
identifier[node_names] = identifier[path_regex] . identifier[partition] ( identifier[os] . identifier[sep] )
identifier[first_node] = identifier[node_names] [ literal[int] ]
identifier[rest_nodes] = identifier[node_names] [ literal[int] ]
identifier[folder_names] = identifier[filter_list] ( identifier[os] . identifier[listdir] ( identifier[base_path] ), identifier[first_node] )
keyword[for] identifier[nom] keyword[in] identifier[folder_names] :
identifier[new_base] = identifier[op] . identifier[join] ( identifier[base_path] , identifier[nom] )
keyword[if] identifier[op] . identifier[isdir] ( identifier[new_base] ):
identifier[rest_files] = identifier[get_possible_paths] ( identifier[new_base] , identifier[rest_nodes] )
keyword[else] :
identifier[rest_files] = identifier[filter_list] ( identifier[os] . identifier[listdir] ( identifier[base_path] ), identifier[path_regex] )
identifier[files] =[]
keyword[if] identifier[rest_files] :
identifier[files] =[ identifier[op] . identifier[join] ( identifier[base_path] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[rest_files] ]
keyword[return] identifier[files] | def get_possible_paths(base_path, path_regex):
"""
Looks for path_regex within base_path. Each match is append
in the returned list.
path_regex may contain subfolder structure.
If any part of the folder structure is a
:param base_path: str
:param path_regex: str
:return list of strings
"""
if not path_regex:
return [] # depends on [control=['if'], data=[]]
if len(path_regex) < 1:
return [] # depends on [control=['if'], data=[]]
if path_regex[0] == os.sep:
path_regex = path_regex[1:] # depends on [control=['if'], data=[]]
rest_files = ''
if os.sep in path_regex:
#split by os.sep
node_names = path_regex.partition(os.sep)
first_node = node_names[0]
rest_nodes = node_names[2]
folder_names = filter_list(os.listdir(base_path), first_node)
for nom in folder_names:
new_base = op.join(base_path, nom)
if op.isdir(new_base):
rest_files = get_possible_paths(new_base, rest_nodes) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['nom']] # depends on [control=['if'], data=['path_regex']]
else:
rest_files = filter_list(os.listdir(base_path), path_regex)
files = []
if rest_files:
files = [op.join(base_path, f) for f in rest_files] # depends on [control=['if'], data=[]]
return files |
def from_verb(cls, verb):
"""
Constructs a :class:`Spoolverb` instance from the string
representation of the given verb.
Args:
verb (str): representation of the verb e.g.:
``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in
binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.
Returns:
:class:`Spoolverb` instance.
"""
pattern = r'^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$'
try:
verb = verb.decode()
except AttributeError:
pass
match = re.match(pattern, verb)
if not match:
raise SpoolverbError('Invalid spoolverb: {}'.format(verb))
data = match.groupdict()
meta = data['meta']
version = data['version']
action = data['action']
if action == 'EDITIONS':
num_editions = data['arg1']
return cls(meta=meta, version=version, action=action, num_editions=int(num_editions))
elif action == 'LOAN':
# TODO Review. Workaround for piece loans
try:
edition_num = int(data['arg1'])
except TypeError:
edition_num = 0
loan_start = data['arg2'][:6]
loan_end = data['arg2'][6:]
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num),
loan_start=loan_start, loan_end=loan_end)
elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']:
# no edition number for these verbs
return cls(meta=meta, version=version, action=action)
else:
edition_num = data['arg1']
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num)) | def function[from_verb, parameter[cls, verb]]:
constant[
Constructs a :class:`Spoolverb` instance from the string
representation of the given verb.
Args:
verb (str): representation of the verb e.g.:
``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in
binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.
Returns:
:class:`Spoolverb` instance.
]
variable[pattern] assign[=] constant[^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$]
<ast.Try object at 0x7da1b092e5c0>
variable[match] assign[=] call[name[re].match, parameter[name[pattern], name[verb]]]
if <ast.UnaryOp object at 0x7da1b092f130> begin[:]
<ast.Raise object at 0x7da1b092dc00>
variable[data] assign[=] call[name[match].groupdict, parameter[]]
variable[meta] assign[=] call[name[data]][constant[meta]]
variable[version] assign[=] call[name[data]][constant[version]]
variable[action] assign[=] call[name[data]][constant[action]]
if compare[name[action] equal[==] constant[EDITIONS]] begin[:]
variable[num_editions] assign[=] call[name[data]][constant[arg1]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_verb] ( identifier[cls] , identifier[verb] ):
literal[string]
identifier[pattern] = literal[string]
keyword[try] :
identifier[verb] = identifier[verb] . identifier[decode] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[match] = identifier[re] . identifier[match] ( identifier[pattern] , identifier[verb] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[SpoolverbError] ( literal[string] . identifier[format] ( identifier[verb] ))
identifier[data] = identifier[match] . identifier[groupdict] ()
identifier[meta] = identifier[data] [ literal[string] ]
identifier[version] = identifier[data] [ literal[string] ]
identifier[action] = identifier[data] [ literal[string] ]
keyword[if] identifier[action] == literal[string] :
identifier[num_editions] = identifier[data] [ literal[string] ]
keyword[return] identifier[cls] ( identifier[meta] = identifier[meta] , identifier[version] = identifier[version] , identifier[action] = identifier[action] , identifier[num_editions] = identifier[int] ( identifier[num_editions] ))
keyword[elif] identifier[action] == literal[string] :
keyword[try] :
identifier[edition_num] = identifier[int] ( identifier[data] [ literal[string] ])
keyword[except] identifier[TypeError] :
identifier[edition_num] = literal[int]
identifier[loan_start] = identifier[data] [ literal[string] ][: literal[int] ]
identifier[loan_end] = identifier[data] [ literal[string] ][ literal[int] :]
keyword[return] identifier[cls] ( identifier[meta] = identifier[meta] , identifier[version] = identifier[version] , identifier[action] = identifier[action] , identifier[edition_num] = identifier[int] ( identifier[edition_num] ),
identifier[loan_start] = identifier[loan_start] , identifier[loan_end] = identifier[loan_end] )
keyword[elif] identifier[action] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[cls] ( identifier[meta] = identifier[meta] , identifier[version] = identifier[version] , identifier[action] = identifier[action] )
keyword[else] :
identifier[edition_num] = identifier[data] [ literal[string] ]
keyword[return] identifier[cls] ( identifier[meta] = identifier[meta] , identifier[version] = identifier[version] , identifier[action] = identifier[action] , identifier[edition_num] = identifier[int] ( identifier[edition_num] )) | def from_verb(cls, verb):
"""
Constructs a :class:`Spoolverb` instance from the string
representation of the given verb.
Args:
verb (str): representation of the verb e.g.:
``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in
binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.
Returns:
:class:`Spoolverb` instance.
"""
pattern = '^(?P<meta>[A-Z]+)(?P<version>\\d+)(?P<action>[A-Z]+)(?P<arg1>\\d+)?(\\/(?P<arg2>\\d+))?$'
try:
verb = verb.decode() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
match = re.match(pattern, verb)
if not match:
raise SpoolverbError('Invalid spoolverb: {}'.format(verb)) # depends on [control=['if'], data=[]]
data = match.groupdict()
meta = data['meta']
version = data['version']
action = data['action']
if action == 'EDITIONS':
num_editions = data['arg1']
return cls(meta=meta, version=version, action=action, num_editions=int(num_editions)) # depends on [control=['if'], data=['action']]
elif action == 'LOAN':
# TODO Review. Workaround for piece loans
try:
edition_num = int(data['arg1']) # depends on [control=['try'], data=[]]
except TypeError:
edition_num = 0 # depends on [control=['except'], data=[]]
loan_start = data['arg2'][:6]
loan_end = data['arg2'][6:]
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num), loan_start=loan_start, loan_end=loan_end) # depends on [control=['if'], data=['action']]
elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']:
# no edition number for these verbs
return cls(meta=meta, version=version, action=action) # depends on [control=['if'], data=['action']]
else:
edition_num = data['arg1']
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num)) |
def constructTotalCounts(self, logger):
'''
This function constructs the total count for each valid character in the array or loads them if they already exist.
These will always be stored in '<DIR>/totalCounts.p', a pickled file
'''
self.totalSize = self.bwt.shape[0]
abtFN = self.dirName+'/totalCounts.p'
if os.path.exists(abtFN):
fp = open(abtFN, 'r')
self.totalCounts = pickle.load(fp)
fp.close()
else:
chunkSize = 2**20
if logger != None:
logger.info('First time calculation of \'%s\'' % abtFN)
#figure out the counts using the standard counting techniques, one chunk at a time
self.totalCounts = [0]*self.vcLen
i = 0
while i*chunkSize < self.bwt.shape[0]:
self.totalCounts = np.add(self.totalCounts, np.bincount(self.bwt[i*chunkSize:(i+1)*chunkSize], minlength=self.vcLen))
i += 1
#save the total count to '<DIR>/totalCounts.p'
fp = open(abtFN, 'w+')
pickle.dump(self.totalCounts, fp)
fp.close() | def function[constructTotalCounts, parameter[self, logger]]:
constant[
This function constructs the total count for each valid character in the array or loads them if they already exist.
These will always be stored in '<DIR>/totalCounts.p', a pickled file
]
name[self].totalSize assign[=] call[name[self].bwt.shape][constant[0]]
variable[abtFN] assign[=] binary_operation[name[self].dirName + constant[/totalCounts.p]]
if call[name[os].path.exists, parameter[name[abtFN]]] begin[:]
variable[fp] assign[=] call[name[open], parameter[name[abtFN], constant[r]]]
name[self].totalCounts assign[=] call[name[pickle].load, parameter[name[fp]]]
call[name[fp].close, parameter[]] | keyword[def] identifier[constructTotalCounts] ( identifier[self] , identifier[logger] ):
literal[string]
identifier[self] . identifier[totalSize] = identifier[self] . identifier[bwt] . identifier[shape] [ literal[int] ]
identifier[abtFN] = identifier[self] . identifier[dirName] + literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[abtFN] ):
identifier[fp] = identifier[open] ( identifier[abtFN] , literal[string] )
identifier[self] . identifier[totalCounts] = identifier[pickle] . identifier[load] ( identifier[fp] )
identifier[fp] . identifier[close] ()
keyword[else] :
identifier[chunkSize] = literal[int] ** literal[int]
keyword[if] identifier[logger] != keyword[None] :
identifier[logger] . identifier[info] ( literal[string] % identifier[abtFN] )
identifier[self] . identifier[totalCounts] =[ literal[int] ]* identifier[self] . identifier[vcLen]
identifier[i] = literal[int]
keyword[while] identifier[i] * identifier[chunkSize] < identifier[self] . identifier[bwt] . identifier[shape] [ literal[int] ]:
identifier[self] . identifier[totalCounts] = identifier[np] . identifier[add] ( identifier[self] . identifier[totalCounts] , identifier[np] . identifier[bincount] ( identifier[self] . identifier[bwt] [ identifier[i] * identifier[chunkSize] :( identifier[i] + literal[int] )* identifier[chunkSize] ], identifier[minlength] = identifier[self] . identifier[vcLen] ))
identifier[i] += literal[int]
identifier[fp] = identifier[open] ( identifier[abtFN] , literal[string] )
identifier[pickle] . identifier[dump] ( identifier[self] . identifier[totalCounts] , identifier[fp] )
identifier[fp] . identifier[close] () | def constructTotalCounts(self, logger):
"""
This function constructs the total count for each valid character in the array or loads them if they already exist.
These will always be stored in '<DIR>/totalCounts.p', a pickled file
"""
self.totalSize = self.bwt.shape[0]
abtFN = self.dirName + '/totalCounts.p'
if os.path.exists(abtFN):
fp = open(abtFN, 'r')
self.totalCounts = pickle.load(fp)
fp.close() # depends on [control=['if'], data=[]]
else:
chunkSize = 2 ** 20
if logger != None:
logger.info("First time calculation of '%s'" % abtFN) # depends on [control=['if'], data=['logger']]
#figure out the counts using the standard counting techniques, one chunk at a time
self.totalCounts = [0] * self.vcLen
i = 0
while i * chunkSize < self.bwt.shape[0]:
self.totalCounts = np.add(self.totalCounts, np.bincount(self.bwt[i * chunkSize:(i + 1) * chunkSize], minlength=self.vcLen))
i += 1 # depends on [control=['while'], data=[]]
#save the total count to '<DIR>/totalCounts.p'
fp = open(abtFN, 'w+')
pickle.dump(self.totalCounts, fp)
fp.close() |
def weld_str_startswith(array, pat):
"""Check which elements start with pattern.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
pat : str
To check for.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
pat_id = get_weld_obj_id(weld_obj, pat)
"""alternative implementation for reference
let res = result(
for(zip(slice(e, 0L, lenPat), {pat}),
merger[i64, +],
|b: merger[i64, +], i: i64, e: {{i8, i8}}|
if(e.$0 == e.$1,
merge(b, 1L),
merge(b, 0L)
)
)
);
res == lenPat
"""
weld_template = """let lenPat = len({pat});
map({array},
|e: vec[i8]|
let lenString = len(e);
if(lenPat > lenString,
false,
iterate({{0L, true}},
|q|
let found = lookup(e, q.$0) == lookup({pat}, q.$0);
{{
{{q.$0 + 1L, found}},
q.$0 + 1L < lenPat &&
found == true
}}
).$1
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
pat=pat_id)
return weld_obj | def function[weld_str_startswith, parameter[array, pat]]:
constant[Check which elements start with pattern.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
pat : str
To check for.
Returns
-------
WeldObject
Representation of this computation.
]
<ast.Tuple object at 0x7da1b0966110> assign[=] call[name[create_weld_object], parameter[name[array]]]
variable[pat_id] assign[=] call[name[get_weld_obj_id], parameter[name[weld_obj], name[pat]]]
constant[alternative implementation for reference
let res = result(
for(zip(slice(e, 0L, lenPat), {pat}),
merger[i64, +],
|b: merger[i64, +], i: i64, e: {{i8, i8}}|
if(e.$0 == e.$1,
merge(b, 1L),
merge(b, 0L)
)
)
);
res == lenPat
]
variable[weld_template] assign[=] constant[let lenPat = len({pat});
map({array},
|e: vec[i8]|
let lenString = len(e);
if(lenPat > lenString,
false,
iterate({{0L, true}},
|q|
let found = lookup(e, q.$0) == lookup({pat}, q.$0);
{{
{{q.$0 + 1L, found}},
q.$0 + 1L < lenPat &&
found == true
}}
).$1
)
)]
name[weld_obj].weld_code assign[=] call[name[weld_template].format, parameter[]]
return[name[weld_obj]] | keyword[def] identifier[weld_str_startswith] ( identifier[array] , identifier[pat] ):
literal[string]
identifier[obj_id] , identifier[weld_obj] = identifier[create_weld_object] ( identifier[array] )
identifier[pat_id] = identifier[get_weld_obj_id] ( identifier[weld_obj] , identifier[pat] )
literal[string]
identifier[weld_template] = literal[string]
identifier[weld_obj] . identifier[weld_code] = identifier[weld_template] . identifier[format] ( identifier[array] = identifier[obj_id] ,
identifier[pat] = identifier[pat_id] )
keyword[return] identifier[weld_obj] | def weld_str_startswith(array, pat):
"""Check which elements start with pattern.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
pat : str
To check for.
Returns
-------
WeldObject
Representation of this computation.
"""
(obj_id, weld_obj) = create_weld_object(array)
pat_id = get_weld_obj_id(weld_obj, pat)
'alternative implementation for reference\n let res = result(\n for(zip(slice(e, 0L, lenPat), {pat}),\n merger[i64, +],\n |b: merger[i64, +], i: i64, e: {{i8, i8}}|\n if(e.$0 == e.$1, \n merge(b, 1L), \n merge(b, 0L)\n )\n )\n );\n res == lenPat\n '
weld_template = 'let lenPat = len({pat});\nmap({array},\n |e: vec[i8]|\n let lenString = len(e);\n if(lenPat > lenString,\n false,\n iterate({{0L, true}}, \n |q| \n let found = lookup(e, q.$0) == lookup({pat}, q.$0);\n {{\n {{q.$0 + 1L, found}}, \n q.$0 + 1L < lenPat &&\n found == true\n }}\n ).$1\n )\n)'
weld_obj.weld_code = weld_template.format(array=obj_id, pat=pat_id)
return weld_obj |
def getChecks(self, **parameters):
"""Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['limit', 'offset', 'tags']:
sys.stderr.write('%s not a valid argument for getChecks()\n'
% key)
response = self.request('GET', 'checks', parameters)
return [PingdomCheck(self, x) for x in response.json()['checks']] | def function[getChecks, parameter[self]]:
constant[Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None
]
for taget[name[key]] in starred[name[parameters]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b0e63dc0>, <ast.Constant object at 0x7da1b0f51000>, <ast.Constant object at 0x7da1b0f50730>]]] begin[:]
call[name[sys].stderr.write, parameter[binary_operation[constant[%s not a valid argument for getChecks()
] <ast.Mod object at 0x7da2590d6920> name[key]]]]
variable[response] assign[=] call[name[self].request, parameter[constant[GET], constant[checks], name[parameters]]]
return[<ast.ListComp object at 0x7da1b0f51210>] | keyword[def] identifier[getChecks] ( identifier[self] ,** identifier[parameters] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[parameters] :
keyword[if] identifier[key] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string]
% identifier[key] )
identifier[response] = identifier[self] . identifier[request] ( literal[string] , literal[string] , identifier[parameters] )
keyword[return] [ identifier[PingdomCheck] ( identifier[self] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[response] . identifier[json] ()[ literal[string] ]] | def getChecks(self, **parameters):
"""Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['limit', 'offset', 'tags']:
sys.stderr.write('%s not a valid argument for getChecks()\n' % key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
response = self.request('GET', 'checks', parameters)
return [PingdomCheck(self, x) for x in response.json()['checks']] |
def _parse_doc(doc):
"""Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
"""
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0]
else:
description = "{0}\n\n{1}".format(
lines[0], textwrap.dedent("\n".join(descriptions[2:])))
args = list(itertools.takewhile(
_checker(_KEYWORDS_OTHERS),
itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))
argmap = {}
if len(args) > 1:
for pair in args[1:]:
kv = [v.strip() for v in pair.split(":")]
if len(kv) >= 2:
argmap[kv[0]] = ":".join(kv[1:])
return dict(headline=descriptions[0], description=description, args=argmap) | def function[_parse_doc, parameter[doc]]:
constant[Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
]
variable[lines] assign[=] call[name[doc].split, parameter[constant[
]]]
variable[descriptions] assign[=] call[name[list], parameter[call[name[itertools].takewhile, parameter[call[name[_checker], parameter[name[_KEYWORDS]]], name[lines]]]]]
if compare[call[name[len], parameter[name[descriptions]]] less[<] constant[3]] begin[:]
variable[description] assign[=] call[name[lines]][constant[0]]
variable[args] assign[=] call[name[list], parameter[call[name[itertools].takewhile, parameter[call[name[_checker], parameter[name[_KEYWORDS_OTHERS]]], call[name[itertools].dropwhile, parameter[call[name[_checker], parameter[name[_KEYWORDS_ARGS]]], name[lines]]]]]]]
variable[argmap] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[args]]] greater[>] constant[1]] begin[:]
for taget[name[pair]] in starred[call[name[args]][<ast.Slice object at 0x7da18ede5f30>]] begin[:]
variable[kv] assign[=] <ast.ListComp object at 0x7da18ede7820>
if compare[call[name[len], parameter[name[kv]]] greater_or_equal[>=] constant[2]] begin[:]
call[name[argmap]][call[name[kv]][constant[0]]] assign[=] call[constant[:].join, parameter[call[name[kv]][<ast.Slice object at 0x7da18ede6f50>]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[_parse_doc] ( identifier[doc] ):
literal[string]
identifier[lines] = identifier[doc] . identifier[split] ( literal[string] )
identifier[descriptions] = identifier[list] ( identifier[itertools] . identifier[takewhile] ( identifier[_checker] ( identifier[_KEYWORDS] ), identifier[lines] ))
keyword[if] identifier[len] ( identifier[descriptions] )< literal[int] :
identifier[description] = identifier[lines] [ literal[int] ]
keyword[else] :
identifier[description] = literal[string] . identifier[format] (
identifier[lines] [ literal[int] ], identifier[textwrap] . identifier[dedent] ( literal[string] . identifier[join] ( identifier[descriptions] [ literal[int] :])))
identifier[args] = identifier[list] ( identifier[itertools] . identifier[takewhile] (
identifier[_checker] ( identifier[_KEYWORDS_OTHERS] ),
identifier[itertools] . identifier[dropwhile] ( identifier[_checker] ( identifier[_KEYWORDS_ARGS] ), identifier[lines] )))
identifier[argmap] ={}
keyword[if] identifier[len] ( identifier[args] )> literal[int] :
keyword[for] identifier[pair] keyword[in] identifier[args] [ literal[int] :]:
identifier[kv] =[ identifier[v] . identifier[strip] () keyword[for] identifier[v] keyword[in] identifier[pair] . identifier[split] ( literal[string] )]
keyword[if] identifier[len] ( identifier[kv] )>= literal[int] :
identifier[argmap] [ identifier[kv] [ literal[int] ]]= literal[string] . identifier[join] ( identifier[kv] [ literal[int] :])
keyword[return] identifier[dict] ( identifier[headline] = identifier[descriptions] [ literal[int] ], identifier[description] = identifier[description] , identifier[args] = identifier[argmap] ) | def _parse_doc(doc):
"""Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
"""
lines = doc.split('\n')
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0] # depends on [control=['if'], data=[]]
else:
description = '{0}\n\n{1}'.format(lines[0], textwrap.dedent('\n'.join(descriptions[2:])))
args = list(itertools.takewhile(_checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))
argmap = {}
if len(args) > 1:
for pair in args[1:]:
kv = [v.strip() for v in pair.split(':')]
if len(kv) >= 2:
argmap[kv[0]] = ':'.join(kv[1:]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pair']] # depends on [control=['if'], data=[]]
return dict(headline=descriptions[0], description=description, args=argmap) |
def name_to_object(repo, name, return_ref=False):
"""
:return: object specified by the given name, hexshas ( short and long )
as well as references are supported
:param return_ref: if name specifies a reference, we will return the reference
instead of the object. Otherwise it will raise BadObject or BadName
"""
hexsha = None
# is it a hexsha ? Try the most common ones, which is 7 to 40
if repo.re_hexsha_shortened.match(name):
if len(name) != 40:
# find long sha for short sha
hexsha = short_to_long(repo.odb, name)
else:
hexsha = name
# END handle short shas
# END find sha if it matches
# if we couldn't find an object for what seemed to be a short hexsha
# try to find it as reference anyway, it could be named 'aaa' for instance
if hexsha is None:
for base in ('%s', 'refs/%s', 'refs/tags/%s', 'refs/heads/%s', 'refs/remotes/%s', 'refs/remotes/%s/HEAD'):
try:
hexsha = SymbolicReference.dereference_recursive(repo, base % name)
if return_ref:
return SymbolicReference(repo, base % name)
# END handle symbolic ref
break
except ValueError:
pass
# END for each base
# END handle hexsha
# didn't find any ref, this is an error
if return_ref:
raise BadObject("Couldn't find reference named %r" % name)
# END handle return ref
# tried everything ? fail
if hexsha is None:
raise BadName(name)
# END assert hexsha was found
return Object.new_from_sha(repo, hex_to_bin(hexsha)) | def function[name_to_object, parameter[repo, name, return_ref]]:
constant[
:return: object specified by the given name, hexshas ( short and long )
as well as references are supported
:param return_ref: if name specifies a reference, we will return the reference
instead of the object. Otherwise it will raise BadObject or BadName
]
variable[hexsha] assign[=] constant[None]
if call[name[repo].re_hexsha_shortened.match, parameter[name[name]]] begin[:]
if compare[call[name[len], parameter[name[name]]] not_equal[!=] constant[40]] begin[:]
variable[hexsha] assign[=] call[name[short_to_long], parameter[name[repo].odb, name[name]]]
if compare[name[hexsha] is constant[None]] begin[:]
for taget[name[base]] in starred[tuple[[<ast.Constant object at 0x7da1b1d65510>, <ast.Constant object at 0x7da1b1d67250>, <ast.Constant object at 0x7da1b1d67040>, <ast.Constant object at 0x7da1b1d64730>, <ast.Constant object at 0x7da1b1d67760>, <ast.Constant object at 0x7da1b1d67d30>]]] begin[:]
<ast.Try object at 0x7da1b1d649d0>
if name[return_ref] begin[:]
<ast.Raise object at 0x7da18bcca500>
if compare[name[hexsha] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bcc80d0>
return[call[name[Object].new_from_sha, parameter[name[repo], call[name[hex_to_bin], parameter[name[hexsha]]]]]] | keyword[def] identifier[name_to_object] ( identifier[repo] , identifier[name] , identifier[return_ref] = keyword[False] ):
literal[string]
identifier[hexsha] = keyword[None]
keyword[if] identifier[repo] . identifier[re_hexsha_shortened] . identifier[match] ( identifier[name] ):
keyword[if] identifier[len] ( identifier[name] )!= literal[int] :
identifier[hexsha] = identifier[short_to_long] ( identifier[repo] . identifier[odb] , identifier[name] )
keyword[else] :
identifier[hexsha] = identifier[name]
keyword[if] identifier[hexsha] keyword[is] keyword[None] :
keyword[for] identifier[base] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[try] :
identifier[hexsha] = identifier[SymbolicReference] . identifier[dereference_recursive] ( identifier[repo] , identifier[base] % identifier[name] )
keyword[if] identifier[return_ref] :
keyword[return] identifier[SymbolicReference] ( identifier[repo] , identifier[base] % identifier[name] )
keyword[break]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[if] identifier[return_ref] :
keyword[raise] identifier[BadObject] ( literal[string] % identifier[name] )
keyword[if] identifier[hexsha] keyword[is] keyword[None] :
keyword[raise] identifier[BadName] ( identifier[name] )
keyword[return] identifier[Object] . identifier[new_from_sha] ( identifier[repo] , identifier[hex_to_bin] ( identifier[hexsha] )) | def name_to_object(repo, name, return_ref=False):
"""
:return: object specified by the given name, hexshas ( short and long )
as well as references are supported
:param return_ref: if name specifies a reference, we will return the reference
instead of the object. Otherwise it will raise BadObject or BadName
"""
hexsha = None
# is it a hexsha ? Try the most common ones, which is 7 to 40
if repo.re_hexsha_shortened.match(name):
if len(name) != 40:
# find long sha for short sha
hexsha = short_to_long(repo.odb, name) # depends on [control=['if'], data=[]]
else:
hexsha = name # depends on [control=['if'], data=[]]
# END handle short shas
# END find sha if it matches
# if we couldn't find an object for what seemed to be a short hexsha
# try to find it as reference anyway, it could be named 'aaa' for instance
if hexsha is None:
for base in ('%s', 'refs/%s', 'refs/tags/%s', 'refs/heads/%s', 'refs/remotes/%s', 'refs/remotes/%s/HEAD'):
try:
hexsha = SymbolicReference.dereference_recursive(repo, base % name)
if return_ref:
return SymbolicReference(repo, base % name) # depends on [control=['if'], data=[]]
# END handle symbolic ref
break # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['base']] # depends on [control=['if'], data=['hexsha']]
# END for each base
# END handle hexsha
# didn't find any ref, this is an error
if return_ref:
raise BadObject("Couldn't find reference named %r" % name) # depends on [control=['if'], data=[]]
# END handle return ref
# tried everything ? fail
if hexsha is None:
raise BadName(name) # depends on [control=['if'], data=[]]
# END assert hexsha was found
return Object.new_from_sha(repo, hex_to_bin(hexsha)) |
def _unpack_storm_date(date):
'''
given a packed storm date field, unpack and return 'YYYY-MM-DD' string.
'''
year = (date & 0x7f) + 2000 # 7 bits
day = (date >> 7) & 0x01f # 5 bits
month = (date >> 12) & 0x0f # 4 bits
return "%s-%s-%s" % (year, month, day) | def function[_unpack_storm_date, parameter[date]]:
constant[
given a packed storm date field, unpack and return 'YYYY-MM-DD' string.
]
variable[year] assign[=] binary_operation[binary_operation[name[date] <ast.BitAnd object at 0x7da2590d6b60> constant[127]] + constant[2000]]
variable[day] assign[=] binary_operation[binary_operation[name[date] <ast.RShift object at 0x7da2590d6a40> constant[7]] <ast.BitAnd object at 0x7da2590d6b60> constant[31]]
variable[month] assign[=] binary_operation[binary_operation[name[date] <ast.RShift object at 0x7da2590d6a40> constant[12]] <ast.BitAnd object at 0x7da2590d6b60> constant[15]]
return[binary_operation[constant[%s-%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b01c2a70>, <ast.Name object at 0x7da1b01c2c20>, <ast.Name object at 0x7da1b01c2860>]]]] | keyword[def] identifier[_unpack_storm_date] ( identifier[date] ):
literal[string]
identifier[year] =( identifier[date] & literal[int] )+ literal[int]
identifier[day] =( identifier[date] >> literal[int] )& literal[int]
identifier[month] =( identifier[date] >> literal[int] )& literal[int]
keyword[return] literal[string] %( identifier[year] , identifier[month] , identifier[day] ) | def _unpack_storm_date(date):
"""
given a packed storm date field, unpack and return 'YYYY-MM-DD' string.
"""
year = (date & 127) + 2000 # 7 bits
day = date >> 7 & 31 # 5 bits
month = date >> 12 & 15 # 4 bits
return '%s-%s-%s' % (year, month, day) |
def xarray_to_ndarray(data, *, var_names=None, combined=True):
"""Take xarray data and unpacks into variables and data into list and numpy array respectively.
Assumes that chain and draw are in coordinates
Parameters
----------
data: xarray.DataSet
Data in an xarray from an InferenceData object. Examples include posterior or sample_stats
var_names: iter
Should be a subset of data.data_vars not including chain and draws. Defaults to all of them
combined: bool
Whether to combine chain into one array
Returns
-------
var_names: list
List of variable names
data: np.array
Data values
"""
unpacked_data, unpacked_var_names, = [], []
# Merge chains and variables
for var_name, selection, data_array in xarray_var_iter(
data, var_names=var_names, combined=combined
):
unpacked_data.append(data_array.flatten())
unpacked_var_names.append(make_label(var_name, selection))
return unpacked_var_names, np.array(unpacked_data) | def function[xarray_to_ndarray, parameter[data]]:
constant[Take xarray data and unpacks into variables and data into list and numpy array respectively.
Assumes that chain and draw are in coordinates
Parameters
----------
data: xarray.DataSet
Data in an xarray from an InferenceData object. Examples include posterior or sample_stats
var_names: iter
Should be a subset of data.data_vars not including chain and draws. Defaults to all of them
combined: bool
Whether to combine chain into one array
Returns
-------
var_names: list
List of variable names
data: np.array
Data values
]
<ast.Tuple object at 0x7da1b1b298d0> assign[=] tuple[[<ast.List object at 0x7da1b1b28a60>, <ast.List object at 0x7da1b1b29f90>]]
for taget[tuple[[<ast.Name object at 0x7da1b1b29f30>, <ast.Name object at 0x7da1b1b28a90>, <ast.Name object at 0x7da1b1b2a110>]]] in starred[call[name[xarray_var_iter], parameter[name[data]]]] begin[:]
call[name[unpacked_data].append, parameter[call[name[data_array].flatten, parameter[]]]]
call[name[unpacked_var_names].append, parameter[call[name[make_label], parameter[name[var_name], name[selection]]]]]
return[tuple[[<ast.Name object at 0x7da1b1b0ee60>, <ast.Call object at 0x7da1b1b0db10>]]] | keyword[def] identifier[xarray_to_ndarray] ( identifier[data] ,*, identifier[var_names] = keyword[None] , identifier[combined] = keyword[True] ):
literal[string]
identifier[unpacked_data] , identifier[unpacked_var_names] ,=[],[]
keyword[for] identifier[var_name] , identifier[selection] , identifier[data_array] keyword[in] identifier[xarray_var_iter] (
identifier[data] , identifier[var_names] = identifier[var_names] , identifier[combined] = identifier[combined]
):
identifier[unpacked_data] . identifier[append] ( identifier[data_array] . identifier[flatten] ())
identifier[unpacked_var_names] . identifier[append] ( identifier[make_label] ( identifier[var_name] , identifier[selection] ))
keyword[return] identifier[unpacked_var_names] , identifier[np] . identifier[array] ( identifier[unpacked_data] ) | def xarray_to_ndarray(data, *, var_names=None, combined=True):
"""Take xarray data and unpacks into variables and data into list and numpy array respectively.
Assumes that chain and draw are in coordinates
Parameters
----------
data: xarray.DataSet
Data in an xarray from an InferenceData object. Examples include posterior or sample_stats
var_names: iter
Should be a subset of data.data_vars not including chain and draws. Defaults to all of them
combined: bool
Whether to combine chain into one array
Returns
-------
var_names: list
List of variable names
data: np.array
Data values
"""
(unpacked_data, unpacked_var_names) = ([], [])
# Merge chains and variables
for (var_name, selection, data_array) in xarray_var_iter(data, var_names=var_names, combined=combined):
unpacked_data.append(data_array.flatten())
unpacked_var_names.append(make_label(var_name, selection)) # depends on [control=['for'], data=[]]
return (unpacked_var_names, np.array(unpacked_data)) |
def clone(self) -> "Event":
"""
Clone the event
Returns:
:class:`slack.events.Event`
"""
return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata)) | def function[clone, parameter[self]]:
constant[
Clone the event
Returns:
:class:`slack.events.Event`
]
return[call[name[self].__class__, parameter[call[name[copy].deepcopy, parameter[name[self].event]], call[name[copy].deepcopy, parameter[name[self].metadata]]]]] | keyword[def] identifier[clone] ( identifier[self] )-> literal[string] :
literal[string]
keyword[return] identifier[self] . identifier[__class__] ( identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[event] ), identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[metadata] )) | def clone(self) -> 'Event':
"""
Clone the event
Returns:
:class:`slack.events.Event`
"""
return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata)) |
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200) | def function[response_from_prediction, parameter[self, y_pred, single]]:
constant[Turns a model's prediction in *y_pred* into a JSON
response.
]
variable[result] assign[=] call[name[y_pred].tolist, parameter[]]
if name[single] begin[:]
variable[result] assign[=] call[name[result]][constant[0]]
variable[response] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c9180>, <ast.Constant object at 0x7da20c7c8670>], [<ast.Call object at 0x7da20c7c9ff0>, <ast.Name object at 0x7da20c7c98a0>]]
return[call[name[make_ujson_response], parameter[name[response]]]] | keyword[def] identifier[response_from_prediction] ( identifier[self] , identifier[y_pred] , identifier[single] = keyword[True] ):
literal[string]
identifier[result] = identifier[y_pred] . identifier[tolist] ()
keyword[if] identifier[single] :
identifier[result] = identifier[result] [ literal[int] ]
identifier[response] ={
literal[string] : identifier[get_metadata] (),
literal[string] : identifier[result] ,
}
keyword[return] identifier[make_ujson_response] ( identifier[response] , identifier[status_code] = literal[int] ) | def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0] # depends on [control=['if'], data=[]]
response = {'metadata': get_metadata(), 'result': result}
return make_ujson_response(response, status_code=200) |
def get_default_blocks(self, top=False):
"""
Return a list of column default block tuples (URL, verbose name).
Used for quick add block buttons.
"""
default_blocks = []
for block_model, block_name in self.glitter_page.default_blocks:
block = apps.get_model(block_model)
base_url = reverse('block_admin:{}_{}_add'.format(
block._meta.app_label, block._meta.model_name,
), kwargs={
'version_id': self.glitter_page.version.id,
})
block_qs = {
'column': self.name,
'top': top,
}
block_url = '{}?{}'.format(base_url, urlencode(block_qs))
block_text = capfirst(force_text(block._meta.verbose_name))
default_blocks.append((block_url, block_text))
return default_blocks | def function[get_default_blocks, parameter[self, top]]:
constant[
Return a list of column default block tuples (URL, verbose name).
Used for quick add block buttons.
]
variable[default_blocks] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1123b50>, <ast.Name object at 0x7da1b1123eb0>]]] in starred[name[self].glitter_page.default_blocks] begin[:]
variable[block] assign[=] call[name[apps].get_model, parameter[name[block_model]]]
variable[base_url] assign[=] call[name[reverse], parameter[call[constant[block_admin:{}_{}_add].format, parameter[name[block]._meta.app_label, name[block]._meta.model_name]]]]
variable[block_qs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1123010>, <ast.Constant object at 0x7da1b1120460>], [<ast.Attribute object at 0x7da204565c90>, <ast.Name object at 0x7da204565f30>]]
variable[block_url] assign[=] call[constant[{}?{}].format, parameter[name[base_url], call[name[urlencode], parameter[name[block_qs]]]]]
variable[block_text] assign[=] call[name[capfirst], parameter[call[name[force_text], parameter[name[block]._meta.verbose_name]]]]
call[name[default_blocks].append, parameter[tuple[[<ast.Name object at 0x7da2045674f0>, <ast.Name object at 0x7da204566ad0>]]]]
return[name[default_blocks]] | keyword[def] identifier[get_default_blocks] ( identifier[self] , identifier[top] = keyword[False] ):
literal[string]
identifier[default_blocks] =[]
keyword[for] identifier[block_model] , identifier[block_name] keyword[in] identifier[self] . identifier[glitter_page] . identifier[default_blocks] :
identifier[block] = identifier[apps] . identifier[get_model] ( identifier[block_model] )
identifier[base_url] = identifier[reverse] ( literal[string] . identifier[format] (
identifier[block] . identifier[_meta] . identifier[app_label] , identifier[block] . identifier[_meta] . identifier[model_name] ,
), identifier[kwargs] ={
literal[string] : identifier[self] . identifier[glitter_page] . identifier[version] . identifier[id] ,
})
identifier[block_qs] ={
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[top] ,
}
identifier[block_url] = literal[string] . identifier[format] ( identifier[base_url] , identifier[urlencode] ( identifier[block_qs] ))
identifier[block_text] = identifier[capfirst] ( identifier[force_text] ( identifier[block] . identifier[_meta] . identifier[verbose_name] ))
identifier[default_blocks] . identifier[append] (( identifier[block_url] , identifier[block_text] ))
keyword[return] identifier[default_blocks] | def get_default_blocks(self, top=False):
"""
Return a list of column default block tuples (URL, verbose name).
Used for quick add block buttons.
"""
default_blocks = []
for (block_model, block_name) in self.glitter_page.default_blocks:
block = apps.get_model(block_model)
base_url = reverse('block_admin:{}_{}_add'.format(block._meta.app_label, block._meta.model_name), kwargs={'version_id': self.glitter_page.version.id})
block_qs = {'column': self.name, 'top': top}
block_url = '{}?{}'.format(base_url, urlencode(block_qs))
block_text = capfirst(force_text(block._meta.verbose_name))
default_blocks.append((block_url, block_text)) # depends on [control=['for'], data=[]]
return default_blocks |
def get_locale_choices(locale_dir):
"""
Get a list of locale file names in the given locale dir.
"""
#/
file_name_s = os.listdir(locale_dir)
#/
choice_s = []
for file_name in file_name_s:
if file_name.endswith(I18n.TT_FILE_EXT_STXT):
file_name_noext, _ = os.path.splitext(file_name)
if file_name_noext:
choice_s.append(file_name_noext)
#/
choice_s = sorted(choice_s)
#/
return choice_s | def function[get_locale_choices, parameter[locale_dir]]:
constant[
Get a list of locale file names in the given locale dir.
]
variable[file_name_s] assign[=] call[name[os].listdir, parameter[name[locale_dir]]]
variable[choice_s] assign[=] list[[]]
for taget[name[file_name]] in starred[name[file_name_s]] begin[:]
if call[name[file_name].endswith, parameter[name[I18n].TT_FILE_EXT_STXT]] begin[:]
<ast.Tuple object at 0x7da1b168c5e0> assign[=] call[name[os].path.splitext, parameter[name[file_name]]]
if name[file_name_noext] begin[:]
call[name[choice_s].append, parameter[name[file_name_noext]]]
variable[choice_s] assign[=] call[name[sorted], parameter[name[choice_s]]]
return[name[choice_s]] | keyword[def] identifier[get_locale_choices] ( identifier[locale_dir] ):
literal[string]
identifier[file_name_s] = identifier[os] . identifier[listdir] ( identifier[locale_dir] )
identifier[choice_s] =[]
keyword[for] identifier[file_name] keyword[in] identifier[file_name_s] :
keyword[if] identifier[file_name] . identifier[endswith] ( identifier[I18n] . identifier[TT_FILE_EXT_STXT] ):
identifier[file_name_noext] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_name] )
keyword[if] identifier[file_name_noext] :
identifier[choice_s] . identifier[append] ( identifier[file_name_noext] )
identifier[choice_s] = identifier[sorted] ( identifier[choice_s] )
keyword[return] identifier[choice_s] | def get_locale_choices(locale_dir):
"""
Get a list of locale file names in the given locale dir.
"""
#/
file_name_s = os.listdir(locale_dir)
#/
choice_s = []
for file_name in file_name_s:
if file_name.endswith(I18n.TT_FILE_EXT_STXT):
(file_name_noext, _) = os.path.splitext(file_name)
if file_name_noext:
choice_s.append(file_name_noext) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_name']]
#/
choice_s = sorted(choice_s)
#/
return choice_s |
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
\mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
"""
Ef = self.eval_Rf(self.Xf)
E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)
return (np.linalg.norm(self.W * E)**2) / 2.0 | def function[obfn_dfd, parameter[self]]:
constant[Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
\mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
]
variable[Ef] assign[=] call[name[self].eval_Rf, parameter[name[self].Xf]]
variable[E] assign[=] call[name[sl].irfftn, parameter[name[Ef], name[self].cri.Nv, name[self].cri.axisN]]
return[binary_operation[binary_operation[call[name[np].linalg.norm, parameter[binary_operation[name[self].W * name[E]]]] ** constant[2]] / constant[2.0]]] | keyword[def] identifier[obfn_dfd] ( identifier[self] ):
literal[string]
identifier[Ef] = identifier[self] . identifier[eval_Rf] ( identifier[self] . identifier[Xf] )
identifier[E] = identifier[sl] . identifier[irfftn] ( identifier[Ef] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] )
keyword[return] ( identifier[np] . identifier[linalg] . identifier[norm] ( identifier[self] . identifier[W] * identifier[E] )** literal[int] )/ literal[int] | def obfn_dfd(self):
"""Compute data fidelity term :math:`(1/2) \\sum_k \\| W (\\sum_m
\\mathbf{d}_m * \\mathbf{x}_{k,m} - \\mathbf{s}_k) \\|_2^2`
"""
Ef = self.eval_Rf(self.Xf)
E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)
return np.linalg.norm(self.W * E) ** 2 / 2.0 |
def get_locales(self):
"""Get a list of supported locales.
Computes the list using ``I18N_LANGUAGES`` configuration variable.
"""
if self._locales_cache is None:
langs = [self.babel.default_locale]
for l, dummy_title in current_app.config.get('I18N_LANGUAGES', []):
langs.append(self.babel.load_locale(l))
self._locales_cache = langs
return self._locales_cache | def function[get_locales, parameter[self]]:
constant[Get a list of supported locales.
Computes the list using ``I18N_LANGUAGES`` configuration variable.
]
if compare[name[self]._locales_cache is constant[None]] begin[:]
variable[langs] assign[=] list[[<ast.Attribute object at 0x7da18f813400>]]
for taget[tuple[[<ast.Name object at 0x7da18f8105b0>, <ast.Name object at 0x7da1b26ae9e0>]]] in starred[call[name[current_app].config.get, parameter[constant[I18N_LANGUAGES], list[[]]]]] begin[:]
call[name[langs].append, parameter[call[name[self].babel.load_locale, parameter[name[l]]]]]
name[self]._locales_cache assign[=] name[langs]
return[name[self]._locales_cache] | keyword[def] identifier[get_locales] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_locales_cache] keyword[is] keyword[None] :
identifier[langs] =[ identifier[self] . identifier[babel] . identifier[default_locale] ]
keyword[for] identifier[l] , identifier[dummy_title] keyword[in] identifier[current_app] . identifier[config] . identifier[get] ( literal[string] ,[]):
identifier[langs] . identifier[append] ( identifier[self] . identifier[babel] . identifier[load_locale] ( identifier[l] ))
identifier[self] . identifier[_locales_cache] = identifier[langs]
keyword[return] identifier[self] . identifier[_locales_cache] | def get_locales(self):
"""Get a list of supported locales.
Computes the list using ``I18N_LANGUAGES`` configuration variable.
"""
if self._locales_cache is None:
langs = [self.babel.default_locale]
for (l, dummy_title) in current_app.config.get('I18N_LANGUAGES', []):
langs.append(self.babel.load_locale(l)) # depends on [control=['for'], data=[]]
self._locales_cache = langs # depends on [control=['if'], data=[]]
return self._locales_cache |
def _advapi32_create_handles(cipher, key, iv):
"""
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY)
"""
context_handle = None
if cipher == 'aes':
algorithm_id = {
16: Advapi32Const.CALG_AES_128,
24: Advapi32Const.CALG_AES_192,
32: Advapi32Const.CALG_AES_256,
}[len(key)]
else:
algorithm_id = {
'des': Advapi32Const.CALG_DES,
'tripledes_2key': Advapi32Const.CALG_3DES_112,
'tripledes_3key': Advapi32Const.CALG_3DES,
'rc2': Advapi32Const.CALG_RC2,
'rc4': Advapi32Const.CALG_RC4,
}[cipher]
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
context_handle = open_context_handle(provider, verify_only=False)
blob_header_pointer = struct(advapi32, 'BLOBHEADER')
blob_header = unwrap(blob_header_pointer)
blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOB
blob_header.bVersion = Advapi32Const.CUR_BLOB_VERSION
blob_header.reserved = 0
blob_header.aiKeyAlg = algorithm_id
blob_struct_pointer = struct(advapi32, 'PLAINTEXTKEYBLOB')
blob_struct = unwrap(blob_struct_pointer)
blob_struct.hdr = blob_header
blob_struct.dwKeySize = len(key)
blob = struct_bytes(blob_struct_pointer) + key
flags = 0
if cipher in set(['rc2', 'rc4']) and len(key) == 5:
flags = Advapi32Const.CRYPT_NO_SALT
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
blob,
len(blob),
null(),
flags,
key_handle_pointer
)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
if cipher == 'rc2':
buf = new(advapi32, 'DWORD *', len(key) * 8)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_EFFECTIVE_KEYLEN,
buf,
0
)
handle_error(res)
if cipher != 'rc4':
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_IV,
iv,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.CRYPT_MODE_CBC)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_MODE,
buf,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.PKCS5_PADDING)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_PADDING,
buf,
0
)
handle_error(res)
return (context_handle, key_handle) | def function[_advapi32_create_handles, parameter[cipher, key, iv]]:
constant[
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY)
]
variable[context_handle] assign[=] constant[None]
if compare[name[cipher] equal[==] constant[aes]] begin[:]
variable[algorithm_id] assign[=] call[dictionary[[<ast.Constant object at 0x7da1b0084520>, <ast.Constant object at 0x7da1b00840d0>, <ast.Constant object at 0x7da1b0084610>], [<ast.Attribute object at 0x7da1b00d7730>, <ast.Attribute object at 0x7da1b00d7700>, <ast.Attribute object at 0x7da1b00d7070>]]][call[name[len], parameter[name[key]]]]
variable[provider] assign[=] name[Advapi32Const].MS_ENH_RSA_AES_PROV
variable[context_handle] assign[=] call[name[open_context_handle], parameter[name[provider]]]
variable[blob_header_pointer] assign[=] call[name[struct], parameter[name[advapi32], constant[BLOBHEADER]]]
variable[blob_header] assign[=] call[name[unwrap], parameter[name[blob_header_pointer]]]
name[blob_header].bType assign[=] name[Advapi32Const].PLAINTEXTKEYBLOB
name[blob_header].bVersion assign[=] name[Advapi32Const].CUR_BLOB_VERSION
name[blob_header].reserved assign[=] constant[0]
name[blob_header].aiKeyAlg assign[=] name[algorithm_id]
variable[blob_struct_pointer] assign[=] call[name[struct], parameter[name[advapi32], constant[PLAINTEXTKEYBLOB]]]
variable[blob_struct] assign[=] call[name[unwrap], parameter[name[blob_struct_pointer]]]
name[blob_struct].hdr assign[=] name[blob_header]
name[blob_struct].dwKeySize assign[=] call[name[len], parameter[name[key]]]
variable[blob] assign[=] binary_operation[call[name[struct_bytes], parameter[name[blob_struct_pointer]]] + name[key]]
variable[flags] assign[=] constant[0]
if <ast.BoolOp object at 0x7da1b00d5480> begin[:]
variable[flags] assign[=] name[Advapi32Const].CRYPT_NO_SALT
variable[key_handle_pointer] assign[=] call[name[new], parameter[name[advapi32], constant[HCRYPTKEY *]]]
variable[res] assign[=] call[name[advapi32].CryptImportKey, parameter[name[context_handle], name[blob], call[name[len], parameter[name[blob]]], call[name[null], parameter[]], name[flags], name[key_handle_pointer]]]
call[name[handle_error], parameter[name[res]]]
variable[key_handle] assign[=] call[name[unwrap], parameter[name[key_handle_pointer]]]
if compare[name[cipher] equal[==] constant[rc2]] begin[:]
variable[buf] assign[=] call[name[new], parameter[name[advapi32], constant[DWORD *], binary_operation[call[name[len], parameter[name[key]]] * constant[8]]]]
variable[res] assign[=] call[name[advapi32].CryptSetKeyParam, parameter[name[key_handle], name[Advapi32Const].KP_EFFECTIVE_KEYLEN, name[buf], constant[0]]]
call[name[handle_error], parameter[name[res]]]
if compare[name[cipher] not_equal[!=] constant[rc4]] begin[:]
variable[res] assign[=] call[name[advapi32].CryptSetKeyParam, parameter[name[key_handle], name[Advapi32Const].KP_IV, name[iv], constant[0]]]
call[name[handle_error], parameter[name[res]]]
variable[buf] assign[=] call[name[new], parameter[name[advapi32], constant[DWORD *], name[Advapi32Const].CRYPT_MODE_CBC]]
variable[res] assign[=] call[name[advapi32].CryptSetKeyParam, parameter[name[key_handle], name[Advapi32Const].KP_MODE, name[buf], constant[0]]]
call[name[handle_error], parameter[name[res]]]
variable[buf] assign[=] call[name[new], parameter[name[advapi32], constant[DWORD *], name[Advapi32Const].PKCS5_PADDING]]
variable[res] assign[=] call[name[advapi32].CryptSetKeyParam, parameter[name[key_handle], name[Advapi32Const].KP_PADDING, name[buf], constant[0]]]
call[name[handle_error], parameter[name[res]]]
return[tuple[[<ast.Name object at 0x7da1aff3db70>, <ast.Name object at 0x7da1aff3d9f0>]]] | keyword[def] identifier[_advapi32_create_handles] ( identifier[cipher] , identifier[key] , identifier[iv] ):
literal[string]
identifier[context_handle] = keyword[None]
keyword[if] identifier[cipher] == literal[string] :
identifier[algorithm_id] ={
literal[int] : identifier[Advapi32Const] . identifier[CALG_AES_128] ,
literal[int] : identifier[Advapi32Const] . identifier[CALG_AES_192] ,
literal[int] : identifier[Advapi32Const] . identifier[CALG_AES_256] ,
}[ identifier[len] ( identifier[key] )]
keyword[else] :
identifier[algorithm_id] ={
literal[string] : identifier[Advapi32Const] . identifier[CALG_DES] ,
literal[string] : identifier[Advapi32Const] . identifier[CALG_3DES_112] ,
literal[string] : identifier[Advapi32Const] . identifier[CALG_3DES] ,
literal[string] : identifier[Advapi32Const] . identifier[CALG_RC2] ,
literal[string] : identifier[Advapi32Const] . identifier[CALG_RC4] ,
}[ identifier[cipher] ]
identifier[provider] = identifier[Advapi32Const] . identifier[MS_ENH_RSA_AES_PROV]
identifier[context_handle] = identifier[open_context_handle] ( identifier[provider] , identifier[verify_only] = keyword[False] )
identifier[blob_header_pointer] = identifier[struct] ( identifier[advapi32] , literal[string] )
identifier[blob_header] = identifier[unwrap] ( identifier[blob_header_pointer] )
identifier[blob_header] . identifier[bType] = identifier[Advapi32Const] . identifier[PLAINTEXTKEYBLOB]
identifier[blob_header] . identifier[bVersion] = identifier[Advapi32Const] . identifier[CUR_BLOB_VERSION]
identifier[blob_header] . identifier[reserved] = literal[int]
identifier[blob_header] . identifier[aiKeyAlg] = identifier[algorithm_id]
identifier[blob_struct_pointer] = identifier[struct] ( identifier[advapi32] , literal[string] )
identifier[blob_struct] = identifier[unwrap] ( identifier[blob_struct_pointer] )
identifier[blob_struct] . identifier[hdr] = identifier[blob_header]
identifier[blob_struct] . identifier[dwKeySize] = identifier[len] ( identifier[key] )
identifier[blob] = identifier[struct_bytes] ( identifier[blob_struct_pointer] )+ identifier[key]
identifier[flags] = literal[int]
keyword[if] identifier[cipher] keyword[in] identifier[set] ([ literal[string] , literal[string] ]) keyword[and] identifier[len] ( identifier[key] )== literal[int] :
identifier[flags] = identifier[Advapi32Const] . identifier[CRYPT_NO_SALT]
identifier[key_handle_pointer] = identifier[new] ( identifier[advapi32] , literal[string] )
identifier[res] = identifier[advapi32] . identifier[CryptImportKey] (
identifier[context_handle] ,
identifier[blob] ,
identifier[len] ( identifier[blob] ),
identifier[null] (),
identifier[flags] ,
identifier[key_handle_pointer]
)
identifier[handle_error] ( identifier[res] )
identifier[key_handle] = identifier[unwrap] ( identifier[key_handle_pointer] )
keyword[if] identifier[cipher] == literal[string] :
identifier[buf] = identifier[new] ( identifier[advapi32] , literal[string] , identifier[len] ( identifier[key] )* literal[int] )
identifier[res] = identifier[advapi32] . identifier[CryptSetKeyParam] (
identifier[key_handle] ,
identifier[Advapi32Const] . identifier[KP_EFFECTIVE_KEYLEN] ,
identifier[buf] ,
literal[int]
)
identifier[handle_error] ( identifier[res] )
keyword[if] identifier[cipher] != literal[string] :
identifier[res] = identifier[advapi32] . identifier[CryptSetKeyParam] (
identifier[key_handle] ,
identifier[Advapi32Const] . identifier[KP_IV] ,
identifier[iv] ,
literal[int]
)
identifier[handle_error] ( identifier[res] )
identifier[buf] = identifier[new] ( identifier[advapi32] , literal[string] , identifier[Advapi32Const] . identifier[CRYPT_MODE_CBC] )
identifier[res] = identifier[advapi32] . identifier[CryptSetKeyParam] (
identifier[key_handle] ,
identifier[Advapi32Const] . identifier[KP_MODE] ,
identifier[buf] ,
literal[int]
)
identifier[handle_error] ( identifier[res] )
identifier[buf] = identifier[new] ( identifier[advapi32] , literal[string] , identifier[Advapi32Const] . identifier[PKCS5_PADDING] )
identifier[res] = identifier[advapi32] . identifier[CryptSetKeyParam] (
identifier[key_handle] ,
identifier[Advapi32Const] . identifier[KP_PADDING] ,
identifier[buf] ,
literal[int]
)
identifier[handle_error] ( identifier[res] )
keyword[return] ( identifier[context_handle] , identifier[key_handle] ) | def _advapi32_create_handles(cipher, key, iv):
"""
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY)
"""
context_handle = None
if cipher == 'aes':
algorithm_id = {16: Advapi32Const.CALG_AES_128, 24: Advapi32Const.CALG_AES_192, 32: Advapi32Const.CALG_AES_256}[len(key)] # depends on [control=['if'], data=[]]
else:
algorithm_id = {'des': Advapi32Const.CALG_DES, 'tripledes_2key': Advapi32Const.CALG_3DES_112, 'tripledes_3key': Advapi32Const.CALG_3DES, 'rc2': Advapi32Const.CALG_RC2, 'rc4': Advapi32Const.CALG_RC4}[cipher]
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
context_handle = open_context_handle(provider, verify_only=False)
blob_header_pointer = struct(advapi32, 'BLOBHEADER')
blob_header = unwrap(blob_header_pointer)
blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOB
blob_header.bVersion = Advapi32Const.CUR_BLOB_VERSION
blob_header.reserved = 0
blob_header.aiKeyAlg = algorithm_id
blob_struct_pointer = struct(advapi32, 'PLAINTEXTKEYBLOB')
blob_struct = unwrap(blob_struct_pointer)
blob_struct.hdr = blob_header
blob_struct.dwKeySize = len(key)
blob = struct_bytes(blob_struct_pointer) + key
flags = 0
if cipher in set(['rc2', 'rc4']) and len(key) == 5:
flags = Advapi32Const.CRYPT_NO_SALT # depends on [control=['if'], data=[]]
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(context_handle, blob, len(blob), null(), flags, key_handle_pointer)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
if cipher == 'rc2':
buf = new(advapi32, 'DWORD *', len(key) * 8)
res = advapi32.CryptSetKeyParam(key_handle, Advapi32Const.KP_EFFECTIVE_KEYLEN, buf, 0)
handle_error(res) # depends on [control=['if'], data=[]]
if cipher != 'rc4':
res = advapi32.CryptSetKeyParam(key_handle, Advapi32Const.KP_IV, iv, 0)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.CRYPT_MODE_CBC)
res = advapi32.CryptSetKeyParam(key_handle, Advapi32Const.KP_MODE, buf, 0)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.PKCS5_PADDING)
res = advapi32.CryptSetKeyParam(key_handle, Advapi32Const.KP_PADDING, buf, 0)
handle_error(res) # depends on [control=['if'], data=[]]
return (context_handle, key_handle) |
def containsPval(self, paramName, value):
"""Returns true if *value* for parameter type *paramName* is in the
auto parameters
:param paramName: the name of the auto-parameter to match, e.g. 'frequency'
:type paramName: str
:param value: the value of the parameter to search for
:returns: bool
"""
# this may break if there are two parameters in the model with
# the same parameter type!!!
params = self._autoParams.allData()
steps = self.autoParamRanges()
pnames = [p['parameter'] for p in params]
if paramName in pnames:
pidx = pnames.index(paramName)
return value in steps[pidx]
else:
return False | def function[containsPval, parameter[self, paramName, value]]:
constant[Returns true if *value* for parameter type *paramName* is in the
auto parameters
:param paramName: the name of the auto-parameter to match, e.g. 'frequency'
:type paramName: str
:param value: the value of the parameter to search for
:returns: bool
]
variable[params] assign[=] call[name[self]._autoParams.allData, parameter[]]
variable[steps] assign[=] call[name[self].autoParamRanges, parameter[]]
variable[pnames] assign[=] <ast.ListComp object at 0x7da18fe90be0>
if compare[name[paramName] in name[pnames]] begin[:]
variable[pidx] assign[=] call[name[pnames].index, parameter[name[paramName]]]
return[compare[name[value] in call[name[steps]][name[pidx]]]] | keyword[def] identifier[containsPval] ( identifier[self] , identifier[paramName] , identifier[value] ):
literal[string]
identifier[params] = identifier[self] . identifier[_autoParams] . identifier[allData] ()
identifier[steps] = identifier[self] . identifier[autoParamRanges] ()
identifier[pnames] =[ identifier[p] [ literal[string] ] keyword[for] identifier[p] keyword[in] identifier[params] ]
keyword[if] identifier[paramName] keyword[in] identifier[pnames] :
identifier[pidx] = identifier[pnames] . identifier[index] ( identifier[paramName] )
keyword[return] identifier[value] keyword[in] identifier[steps] [ identifier[pidx] ]
keyword[else] :
keyword[return] keyword[False] | def containsPval(self, paramName, value):
"""Returns true if *value* for parameter type *paramName* is in the
auto parameters
:param paramName: the name of the auto-parameter to match, e.g. 'frequency'
:type paramName: str
:param value: the value of the parameter to search for
:returns: bool
""" # this may break if there are two parameters in the model with
# the same parameter type!!!
params = self._autoParams.allData()
steps = self.autoParamRanges()
pnames = [p['parameter'] for p in params]
if paramName in pnames:
pidx = pnames.index(paramName)
return value in steps[pidx] # depends on [control=['if'], data=['paramName', 'pnames']]
else:
return False |
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.city = None
else:
self.city = vals[i]
i += 1
if len(vals[i]) == 0:
self.state_province_region = None
else:
self.state_province_region = vals[i]
i += 1
if len(vals[i]) == 0:
self.country = None
else:
self.country = vals[i]
i += 1
if len(vals[i]) == 0:
self.source = None
else:
self.source = vals[i]
i += 1
if len(vals[i]) == 0:
self.wmo = None
else:
self.wmo = vals[i]
i += 1
if len(vals[i]) == 0:
self.latitude = None
else:
self.latitude = vals[i]
i += 1
if len(vals[i]) == 0:
self.longitude = None
else:
self.longitude = vals[i]
i += 1
if len(vals[i]) == 0:
self.timezone = None
else:
self.timezone = vals[i]
i += 1
if len(vals[i]) == 0:
self.elevation = None
else:
self.elevation = vals[i]
i += 1 | def function[read, parameter[self, vals]]:
constant[Read values.
Args:
vals (list): list of strings representing values
]
variable[i] assign[=] constant[0]
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].city assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9f6a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].state_province_region assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9fdf0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].country assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9e6e0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].source assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9f370>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].wmo assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9c3a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].latitude assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9cd90>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].longitude assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9ef50>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].timezone assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9fc70>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].elevation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9dc60> | keyword[def] identifier[read] ( identifier[self] , identifier[vals] ):
literal[string]
identifier[i] = literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[city] = keyword[None]
keyword[else] :
identifier[self] . identifier[city] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[state_province_region] = keyword[None]
keyword[else] :
identifier[self] . identifier[state_province_region] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[country] = keyword[None]
keyword[else] :
identifier[self] . identifier[country] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[source] = keyword[None]
keyword[else] :
identifier[self] . identifier[source] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[wmo] = keyword[None]
keyword[else] :
identifier[self] . identifier[wmo] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[latitude] = keyword[None]
keyword[else] :
identifier[self] . identifier[latitude] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[longitude] = keyword[None]
keyword[else] :
identifier[self] . identifier[longitude] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[timezone] = keyword[None]
keyword[else] :
identifier[self] . identifier[timezone] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[elevation] = keyword[None]
keyword[else] :
identifier[self] . identifier[elevation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int] | def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.city = None # depends on [control=['if'], data=[]]
else:
self.city = vals[i]
i += 1
if len(vals[i]) == 0:
self.state_province_region = None # depends on [control=['if'], data=[]]
else:
self.state_province_region = vals[i]
i += 1
if len(vals[i]) == 0:
self.country = None # depends on [control=['if'], data=[]]
else:
self.country = vals[i]
i += 1
if len(vals[i]) == 0:
self.source = None # depends on [control=['if'], data=[]]
else:
self.source = vals[i]
i += 1
if len(vals[i]) == 0:
self.wmo = None # depends on [control=['if'], data=[]]
else:
self.wmo = vals[i]
i += 1
if len(vals[i]) == 0:
self.latitude = None # depends on [control=['if'], data=[]]
else:
self.latitude = vals[i]
i += 1
if len(vals[i]) == 0:
self.longitude = None # depends on [control=['if'], data=[]]
else:
self.longitude = vals[i]
i += 1
if len(vals[i]) == 0:
self.timezone = None # depends on [control=['if'], data=[]]
else:
self.timezone = vals[i]
i += 1
if len(vals[i]) == 0:
self.elevation = None # depends on [control=['if'], data=[]]
else:
self.elevation = vals[i]
i += 1 |
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False):
'''
::param: html_content
::returns:
a text representation of the html content.
'''
html_content = html_content.strip()
if not html_content:
return ""
# strip XML declaration, if necessary
if html_content.startswith('<?xml '):
html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1)
html_tree = fromstring(html_content)
parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links)
return parser.get_text() | def function[get_text, parameter[html_content, display_images, deduplicate_captions, display_links]]:
constant[
::param: html_content
::returns:
a text representation of the html content.
]
variable[html_content] assign[=] call[name[html_content].strip, parameter[]]
if <ast.UnaryOp object at 0x7da1b0408520> begin[:]
return[constant[]]
if call[name[html_content].startswith, parameter[constant[<?xml ]]] begin[:]
variable[html_content] assign[=] call[name[RE_STRIP_XML_DECLARATION].sub, parameter[constant[], name[html_content]]]
variable[html_tree] assign[=] call[name[fromstring], parameter[name[html_content]]]
variable[parser] assign[=] call[name[Inscriptis], parameter[name[html_tree]]]
return[call[name[parser].get_text, parameter[]]] | keyword[def] identifier[get_text] ( identifier[html_content] , identifier[display_images] = keyword[False] , identifier[deduplicate_captions] = keyword[False] , identifier[display_links] = keyword[False] ):
literal[string]
identifier[html_content] = identifier[html_content] . identifier[strip] ()
keyword[if] keyword[not] identifier[html_content] :
keyword[return] literal[string]
keyword[if] identifier[html_content] . identifier[startswith] ( literal[string] ):
identifier[html_content] = identifier[RE_STRIP_XML_DECLARATION] . identifier[sub] ( literal[string] , identifier[html_content] , identifier[count] = literal[int] )
identifier[html_tree] = identifier[fromstring] ( identifier[html_content] )
identifier[parser] = identifier[Inscriptis] ( identifier[html_tree] , identifier[display_images] = identifier[display_images] , identifier[deduplicate_captions] = identifier[deduplicate_captions] , identifier[display_links] = identifier[display_links] )
keyword[return] identifier[parser] . identifier[get_text] () | def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False):
"""
::param: html_content
::returns:
a text representation of the html content.
"""
html_content = html_content.strip()
if not html_content:
return '' # depends on [control=['if'], data=[]]
# strip XML declaration, if necessary
if html_content.startswith('<?xml '):
html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) # depends on [control=['if'], data=[]]
html_tree = fromstring(html_content)
parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links)
return parser.get_text() |
def get_banks_by_assessment_offered(self, assessment_offered_id):
"""Gets the list of ``Banks`` mapped to an ``AssessmentOffered``.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.BankList) - list of banks
raise: NotFound - ``assessment_offered_id`` is not found
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bins_by_resource
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
return lookup_session.get_banks_by_ids(
self.get_bank_ids_by_assessment_offered(assessment_offered_id)) | def function[get_banks_by_assessment_offered, parameter[self, assessment_offered_id]]:
constant[Gets the list of ``Banks`` mapped to an ``AssessmentOffered``.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.BankList) - list of banks
raise: NotFound - ``assessment_offered_id`` is not found
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
]
variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[ASSESSMENT]]]
variable[lookup_session] assign[=] call[name[mgr].get_bank_lookup_session, parameter[]]
return[call[name[lookup_session].get_banks_by_ids, parameter[call[name[self].get_bank_ids_by_assessment_offered, parameter[name[assessment_offered_id]]]]]] | keyword[def] identifier[get_banks_by_assessment_offered] ( identifier[self] , identifier[assessment_offered_id] ):
literal[string]
identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] , identifier[local] = keyword[True] )
identifier[lookup_session] = identifier[mgr] . identifier[get_bank_lookup_session] ( identifier[proxy] = identifier[self] . identifier[_proxy] )
keyword[return] identifier[lookup_session] . identifier[get_banks_by_ids] (
identifier[self] . identifier[get_bank_ids_by_assessment_offered] ( identifier[assessment_offered_id] )) | def get_banks_by_assessment_offered(self, assessment_offered_id):
"""Gets the list of ``Banks`` mapped to an ``AssessmentOffered``.
arg: assessment_offered_id (osid.id.Id): ``Id`` of an
``AssessmentOffered``
return: (osid.assessment.BankList) - list of banks
raise: NotFound - ``assessment_offered_id`` is not found
raise: NullArgument - ``assessment_offered_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bins_by_resource
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
return lookup_session.get_banks_by_ids(self.get_bank_ids_by_assessment_offered(assessment_offered_id)) |
def set_pkg_licenses_concluded(self, doc, licenses):
"""Sets the package's concluded licenses.
licenses - License info.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
Raises SPDXValueError if data malformed.
"""
self.assert_package_exists()
if not self.package_conc_lics_set:
self.package_conc_lics_set = True
if validations.validate_lics_conc(licenses):
doc.package.conc_lics = licenses
return True
else:
raise SPDXValueError('Package::ConcludedLicenses')
else:
raise CardinalityError('Package::ConcludedLicenses') | def function[set_pkg_licenses_concluded, parameter[self, doc, licenses]]:
constant[Sets the package's concluded licenses.
licenses - License info.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
Raises SPDXValueError if data malformed.
]
call[name[self].assert_package_exists, parameter[]]
if <ast.UnaryOp object at 0x7da1b0168100> begin[:]
name[self].package_conc_lics_set assign[=] constant[True]
if call[name[validations].validate_lics_conc, parameter[name[licenses]]] begin[:]
name[doc].package.conc_lics assign[=] name[licenses]
return[constant[True]] | keyword[def] identifier[set_pkg_licenses_concluded] ( identifier[self] , identifier[doc] , identifier[licenses] ):
literal[string]
identifier[self] . identifier[assert_package_exists] ()
keyword[if] keyword[not] identifier[self] . identifier[package_conc_lics_set] :
identifier[self] . identifier[package_conc_lics_set] = keyword[True]
keyword[if] identifier[validations] . identifier[validate_lics_conc] ( identifier[licenses] ):
identifier[doc] . identifier[package] . identifier[conc_lics] = identifier[licenses]
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[SPDXValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[CardinalityError] ( literal[string] ) | def set_pkg_licenses_concluded(self, doc, licenses):
"""Sets the package's concluded licenses.
licenses - License info.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
Raises SPDXValueError if data malformed.
"""
self.assert_package_exists()
if not self.package_conc_lics_set:
self.package_conc_lics_set = True
if validations.validate_lics_conc(licenses):
doc.package.conc_lics = licenses
return True # depends on [control=['if'], data=[]]
else:
raise SPDXValueError('Package::ConcludedLicenses') # depends on [control=['if'], data=[]]
else:
raise CardinalityError('Package::ConcludedLicenses') |
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the ICMPecho health monitor plugin.
"""
parser.add_argument('--icmp_check_interval',
dest='icmp_check_interval',
required=False, default=2, type=float,
help="ICMPecho interval in seconds, default 2 "
"(only for 'icmpecho' health monitor plugin)")
return ["icmp_check_interval"] | def function[add_arguments, parameter[cls, parser, sys_arg_list]]:
constant[
Arguments for the ICMPecho health monitor plugin.
]
call[name[parser].add_argument, parameter[constant[--icmp_check_interval]]]
return[list[[<ast.Constant object at 0x7da18dc9aa10>]]] | keyword[def] identifier[add_arguments] ( identifier[cls] , identifier[parser] , identifier[sys_arg_list] = keyword[None] ):
literal[string]
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[dest] = literal[string] ,
identifier[required] = keyword[False] , identifier[default] = literal[int] , identifier[type] = identifier[float] ,
identifier[help] = literal[string]
literal[string] )
keyword[return] [ literal[string] ] | def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the ICMPecho health monitor plugin.
"""
parser.add_argument('--icmp_check_interval', dest='icmp_check_interval', required=False, default=2, type=float, help="ICMPecho interval in seconds, default 2 (only for 'icmpecho' health monitor plugin)")
return ['icmp_check_interval'] |
def set_do_not_order_list(self, restricted_list, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn(
"`set_do_not_order_list(security_lists.leveraged_etf_list)` "
"is deprecated. Use `set_asset_restrictions("
"security_lists.restrict_leveraged_etfs)` instead.",
category=DeprecationWarning,
stacklevel=2
)
restrictions = SecurityListRestrictions(restricted_list)
else:
warnings.warn(
"`set_do_not_order_list(container_of_assets)` is deprecated. "
"Create a zipline.finance.asset_restrictions."
"StaticRestrictions object with a container of assets and use "
"`set_asset_restrictions(StaticRestrictions("
"container_of_assets))` instead.",
category=DeprecationWarning,
stacklevel=2
)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error) | def function[set_do_not_order_list, parameter[self, restricted_list, on_error]]:
constant[Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
]
if call[name[isinstance], parameter[name[restricted_list], name[SecurityList]]] begin[:]
call[name[warnings].warn, parameter[constant[`set_do_not_order_list(security_lists.leveraged_etf_list)` is deprecated. Use `set_asset_restrictions(security_lists.restrict_leveraged_etfs)` instead.]]]
variable[restrictions] assign[=] call[name[SecurityListRestrictions], parameter[name[restricted_list]]]
call[name[self].set_asset_restrictions, parameter[name[restrictions], name[on_error]]] | keyword[def] identifier[set_do_not_order_list] ( identifier[self] , identifier[restricted_list] , identifier[on_error] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[restricted_list] , identifier[SecurityList] ):
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] ,
identifier[category] = identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int]
)
identifier[restrictions] = identifier[SecurityListRestrictions] ( identifier[restricted_list] )
keyword[else] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[category] = identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int]
)
identifier[restrictions] = identifier[StaticRestrictions] ( identifier[restricted_list] )
identifier[self] . identifier[set_asset_restrictions] ( identifier[restrictions] , identifier[on_error] ) | def set_do_not_order_list(self, restricted_list, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn('`set_do_not_order_list(security_lists.leveraged_etf_list)` is deprecated. Use `set_asset_restrictions(security_lists.restrict_leveraged_etfs)` instead.', category=DeprecationWarning, stacklevel=2)
restrictions = SecurityListRestrictions(restricted_list) # depends on [control=['if'], data=[]]
else:
warnings.warn('`set_do_not_order_list(container_of_assets)` is deprecated. Create a zipline.finance.asset_restrictions.StaticRestrictions object with a container of assets and use `set_asset_restrictions(StaticRestrictions(container_of_assets))` instead.', category=DeprecationWarning, stacklevel=2)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error) |
def add_signature(key, inputs, outputs):
"""Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or SparseTensor.
outputs: Signature outputs as a map from string to Tensor or SparseTensor.
(Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
"""
_check_dict_maps_to_tensors_or_sparse_tensors(inputs)
_check_dict_maps_to_tensors_or_sparse_tensors(outputs)
input_info = {
input_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for input_name, tensor in inputs.items()
}
output_info = {
output_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for output_name, tensor in outputs.items()
}
signature = tf_v1.saved_model.signature_def_utils.build_signature_def(
input_info, output_info)
tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature)) | def function[add_signature, parameter[key, inputs, outputs]]:
constant[Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or SparseTensor.
outputs: Signature outputs as a map from string to Tensor or SparseTensor.
(Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
]
call[name[_check_dict_maps_to_tensors_or_sparse_tensors], parameter[name[inputs]]]
call[name[_check_dict_maps_to_tensors_or_sparse_tensors], parameter[name[outputs]]]
variable[input_info] assign[=] <ast.DictComp object at 0x7da1b20bbd60>
variable[output_info] assign[=] <ast.DictComp object at 0x7da1b1f606d0>
variable[signature] assign[=] call[name[tf_v1].saved_model.signature_def_utils.build_signature_def, parameter[name[input_info], name[output_info]]]
call[name[tf_v1].add_to_collection, parameter[name[_SIGNATURE_COLLECTION], tuple[[<ast.Name object at 0x7da1b1f702e0>, <ast.Name object at 0x7da1b1f73190>]]]] | keyword[def] identifier[add_signature] ( identifier[key] , identifier[inputs] , identifier[outputs] ):
literal[string]
identifier[_check_dict_maps_to_tensors_or_sparse_tensors] ( identifier[inputs] )
identifier[_check_dict_maps_to_tensors_or_sparse_tensors] ( identifier[outputs] )
identifier[input_info] ={
identifier[input_name] : identifier[tf_v1] . identifier[saved_model] . identifier[utils] . identifier[build_tensor_info] ( identifier[tensor] )
keyword[for] identifier[input_name] , identifier[tensor] keyword[in] identifier[inputs] . identifier[items] ()
}
identifier[output_info] ={
identifier[output_name] : identifier[tf_v1] . identifier[saved_model] . identifier[utils] . identifier[build_tensor_info] ( identifier[tensor] )
keyword[for] identifier[output_name] , identifier[tensor] keyword[in] identifier[outputs] . identifier[items] ()
}
identifier[signature] = identifier[tf_v1] . identifier[saved_model] . identifier[signature_def_utils] . identifier[build_signature_def] (
identifier[input_info] , identifier[output_info] )
identifier[tf_v1] . identifier[add_to_collection] ( identifier[_SIGNATURE_COLLECTION] ,( identifier[key] , identifier[signature] )) | def add_signature(key, inputs, outputs):
"""Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or SparseTensor.
outputs: Signature outputs as a map from string to Tensor or SparseTensor.
(Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
"""
_check_dict_maps_to_tensors_or_sparse_tensors(inputs)
_check_dict_maps_to_tensors_or_sparse_tensors(outputs)
input_info = {input_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (input_name, tensor) in inputs.items()}
output_info = {output_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (output_name, tensor) in outputs.items()}
signature = tf_v1.saved_model.signature_def_utils.build_signature_def(input_info, output_info)
tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature)) |
def discard(self, element):
"""Remove an element. Do not raise an exception if absent."""
key = self._transform(element)
if key in self._elements:
del self._elements[key] | def function[discard, parameter[self, element]]:
constant[Remove an element. Do not raise an exception if absent.]
variable[key] assign[=] call[name[self]._transform, parameter[name[element]]]
if compare[name[key] in name[self]._elements] begin[:]
<ast.Delete object at 0x7da204564a90> | keyword[def] identifier[discard] ( identifier[self] , identifier[element] ):
literal[string]
identifier[key] = identifier[self] . identifier[_transform] ( identifier[element] )
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_elements] :
keyword[del] identifier[self] . identifier[_elements] [ identifier[key] ] | def discard(self, element):
"""Remove an element. Do not raise an exception if absent."""
key = self._transform(element)
if key in self._elements:
del self._elements[key] # depends on [control=['if'], data=['key']] |
def element_should_contain_text(self, locator, expected, message=''):
"""Verifies element identified by ``locator`` contains text ``expected``.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
Key attributes for arbitrary elements are ``id`` and ``xpath``. ``message`` can be used to override the default error message.
New in AppiumLibrary 1.4.
"""
self._info("Verifying element '%s' contains text '%s'."
% (locator, expected))
actual = self._get_text(locator)
if not expected in actual:
if not message:
message = "Element '%s' should have contained text '%s' but "\
"its text was '%s'." % (locator, expected, actual)
raise AssertionError(message) | def function[element_should_contain_text, parameter[self, locator, expected, message]]:
constant[Verifies element identified by ``locator`` contains text ``expected``.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
Key attributes for arbitrary elements are ``id`` and ``xpath``. ``message`` can be used to override the default error message.
New in AppiumLibrary 1.4.
]
call[name[self]._info, parameter[binary_operation[constant[Verifying element '%s' contains text '%s'.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8119f0>, <ast.Name object at 0x7da18f811540>]]]]]
variable[actual] assign[=] call[name[self]._get_text, parameter[name[locator]]]
if <ast.UnaryOp object at 0x7da18f8134f0> begin[:]
if <ast.UnaryOp object at 0x7da18f8122c0> begin[:]
variable[message] assign[=] binary_operation[constant[Element '%s' should have contained text '%s' but its text was '%s'.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8105e0>, <ast.Name object at 0x7da18f812830>, <ast.Name object at 0x7da18f8138b0>]]]
<ast.Raise object at 0x7da18f813be0> | keyword[def] identifier[element_should_contain_text] ( identifier[self] , identifier[locator] , identifier[expected] , identifier[message] = literal[string] ):
literal[string]
identifier[self] . identifier[_info] ( literal[string]
%( identifier[locator] , identifier[expected] ))
identifier[actual] = identifier[self] . identifier[_get_text] ( identifier[locator] )
keyword[if] keyword[not] identifier[expected] keyword[in] identifier[actual] :
keyword[if] keyword[not] identifier[message] :
identifier[message] = literal[string] literal[string] %( identifier[locator] , identifier[expected] , identifier[actual] )
keyword[raise] identifier[AssertionError] ( identifier[message] ) | def element_should_contain_text(self, locator, expected, message=''):
"""Verifies element identified by ``locator`` contains text ``expected``.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
Key attributes for arbitrary elements are ``id`` and ``xpath``. ``message`` can be used to override the default error message.
New in AppiumLibrary 1.4.
"""
self._info("Verifying element '%s' contains text '%s'." % (locator, expected))
actual = self._get_text(locator)
if not expected in actual:
if not message:
message = "Element '%s' should have contained text '%s' but its text was '%s'." % (locator, expected, actual) # depends on [control=['if'], data=[]]
raise AssertionError(message) # depends on [control=['if'], data=[]] |
def check_str_length(str_to_check, limit=MAX_LENGTH):
"""Check the length of a string. If exceeds limit, then truncate it.
:type str_to_check: str
:param str_to_check: String to check.
:type limit: int
:param limit: The upper limit of the length.
:rtype: tuple
:returns: The string it self if not exceeded length, or truncated string
if exceeded and the truncated byte count.
"""
str_bytes = str_to_check.encode(UTF8)
str_len = len(str_bytes)
truncated_byte_count = 0
if str_len > limit:
truncated_byte_count = str_len - limit
str_bytes = str_bytes[:limit]
result = str(str_bytes.decode(UTF8, errors='ignore'))
return (result, truncated_byte_count) | def function[check_str_length, parameter[str_to_check, limit]]:
constant[Check the length of a string. If exceeds limit, then truncate it.
:type str_to_check: str
:param str_to_check: String to check.
:type limit: int
:param limit: The upper limit of the length.
:rtype: tuple
:returns: The string it self if not exceeded length, or truncated string
if exceeded and the truncated byte count.
]
variable[str_bytes] assign[=] call[name[str_to_check].encode, parameter[name[UTF8]]]
variable[str_len] assign[=] call[name[len], parameter[name[str_bytes]]]
variable[truncated_byte_count] assign[=] constant[0]
if compare[name[str_len] greater[>] name[limit]] begin[:]
variable[truncated_byte_count] assign[=] binary_operation[name[str_len] - name[limit]]
variable[str_bytes] assign[=] call[name[str_bytes]][<ast.Slice object at 0x7da204622080>]
variable[result] assign[=] call[name[str], parameter[call[name[str_bytes].decode, parameter[name[UTF8]]]]]
return[tuple[[<ast.Name object at 0x7da204623910>, <ast.Name object at 0x7da204620e50>]]] | keyword[def] identifier[check_str_length] ( identifier[str_to_check] , identifier[limit] = identifier[MAX_LENGTH] ):
literal[string]
identifier[str_bytes] = identifier[str_to_check] . identifier[encode] ( identifier[UTF8] )
identifier[str_len] = identifier[len] ( identifier[str_bytes] )
identifier[truncated_byte_count] = literal[int]
keyword[if] identifier[str_len] > identifier[limit] :
identifier[truncated_byte_count] = identifier[str_len] - identifier[limit]
identifier[str_bytes] = identifier[str_bytes] [: identifier[limit] ]
identifier[result] = identifier[str] ( identifier[str_bytes] . identifier[decode] ( identifier[UTF8] , identifier[errors] = literal[string] ))
keyword[return] ( identifier[result] , identifier[truncated_byte_count] ) | def check_str_length(str_to_check, limit=MAX_LENGTH):
"""Check the length of a string. If exceeds limit, then truncate it.
:type str_to_check: str
:param str_to_check: String to check.
:type limit: int
:param limit: The upper limit of the length.
:rtype: tuple
:returns: The string it self if not exceeded length, or truncated string
if exceeded and the truncated byte count.
"""
str_bytes = str_to_check.encode(UTF8)
str_len = len(str_bytes)
truncated_byte_count = 0
if str_len > limit:
truncated_byte_count = str_len - limit
str_bytes = str_bytes[:limit] # depends on [control=['if'], data=['str_len', 'limit']]
result = str(str_bytes.decode(UTF8, errors='ignore'))
return (result, truncated_byte_count) |
def fuzzer(buffer, fuzz_factor=101):
"""Fuzz given buffer.
Take a buffer of bytes, create a copy, and replace some bytes
with random values. Number of bytes to modify depends on fuzz_factor.
This code is taken from Charlie Miller's fuzzer code.
:param buffer: the data to fuzz.
:type buffer: byte array
:param fuzz_factor: degree of fuzzing.
:type fuzz_factor: int
:return: fuzzed buffer.
:rtype: byte array
"""
buf = deepcopy(buffer)
num_writes = number_of_bytes_to_modify(len(buf), fuzz_factor)
for _ in range(num_writes):
random_byte = random.randrange(256)
random_position = random.randrange(len(buf))
buf[random_position] = random_byte
return buf | def function[fuzzer, parameter[buffer, fuzz_factor]]:
constant[Fuzz given buffer.
Take a buffer of bytes, create a copy, and replace some bytes
with random values. Number of bytes to modify depends on fuzz_factor.
This code is taken from Charlie Miller's fuzzer code.
:param buffer: the data to fuzz.
:type buffer: byte array
:param fuzz_factor: degree of fuzzing.
:type fuzz_factor: int
:return: fuzzed buffer.
:rtype: byte array
]
variable[buf] assign[=] call[name[deepcopy], parameter[name[buffer]]]
variable[num_writes] assign[=] call[name[number_of_bytes_to_modify], parameter[call[name[len], parameter[name[buf]]], name[fuzz_factor]]]
for taget[name[_]] in starred[call[name[range], parameter[name[num_writes]]]] begin[:]
variable[random_byte] assign[=] call[name[random].randrange, parameter[constant[256]]]
variable[random_position] assign[=] call[name[random].randrange, parameter[call[name[len], parameter[name[buf]]]]]
call[name[buf]][name[random_position]] assign[=] name[random_byte]
return[name[buf]] | keyword[def] identifier[fuzzer] ( identifier[buffer] , identifier[fuzz_factor] = literal[int] ):
literal[string]
identifier[buf] = identifier[deepcopy] ( identifier[buffer] )
identifier[num_writes] = identifier[number_of_bytes_to_modify] ( identifier[len] ( identifier[buf] ), identifier[fuzz_factor] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[num_writes] ):
identifier[random_byte] = identifier[random] . identifier[randrange] ( literal[int] )
identifier[random_position] = identifier[random] . identifier[randrange] ( identifier[len] ( identifier[buf] ))
identifier[buf] [ identifier[random_position] ]= identifier[random_byte]
keyword[return] identifier[buf] | def fuzzer(buffer, fuzz_factor=101):
"""Fuzz given buffer.
Take a buffer of bytes, create a copy, and replace some bytes
with random values. Number of bytes to modify depends on fuzz_factor.
This code is taken from Charlie Miller's fuzzer code.
:param buffer: the data to fuzz.
:type buffer: byte array
:param fuzz_factor: degree of fuzzing.
:type fuzz_factor: int
:return: fuzzed buffer.
:rtype: byte array
"""
buf = deepcopy(buffer)
num_writes = number_of_bytes_to_modify(len(buf), fuzz_factor)
for _ in range(num_writes):
random_byte = random.randrange(256)
random_position = random.randrange(len(buf))
buf[random_position] = random_byte # depends on [control=['for'], data=[]]
return buf |
def get_direction(self, direction, rev=False):
"""
Translate a direction in compass degrees into 'up' or 'down'.
"""
if (direction < 90.0) or (direction >= 270.0):
if not rev:
return 'up'
else:
return 'down'
elif (90.0 <= direction < 270.0):
if not rev:
return 'down'
else:
return 'up'
else:
return 'none' | def function[get_direction, parameter[self, direction, rev]]:
constant[
Translate a direction in compass degrees into 'up' or 'down'.
]
if <ast.BoolOp object at 0x7da1b0c24d90> begin[:]
if <ast.UnaryOp object at 0x7da1b0c271f0> begin[:]
return[constant[up]] | keyword[def] identifier[get_direction] ( identifier[self] , identifier[direction] , identifier[rev] = keyword[False] ):
literal[string]
keyword[if] ( identifier[direction] < literal[int] ) keyword[or] ( identifier[direction] >= literal[int] ):
keyword[if] keyword[not] identifier[rev] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string]
keyword[elif] ( literal[int] <= identifier[direction] < literal[int] ):
keyword[if] keyword[not] identifier[rev] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string] | def get_direction(self, direction, rev=False):
"""
Translate a direction in compass degrees into 'up' or 'down'.
"""
if direction < 90.0 or direction >= 270.0:
if not rev:
return 'up' # depends on [control=['if'], data=[]]
else:
return 'down' # depends on [control=['if'], data=[]]
elif 90.0 <= direction < 270.0:
if not rev:
return 'down' # depends on [control=['if'], data=[]]
else:
return 'up' # depends on [control=['if'], data=[]]
else:
return 'none' |
def alternator(*pipes):
''' a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends '''
try:
for p in cycle(map(iter, pipes)):
yield next(p)
except StopIteration:
pass | def function[alternator, parameter[]]:
constant[ a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends ]
<ast.Try object at 0x7da20c7966b0> | keyword[def] identifier[alternator] (* identifier[pipes] ):
literal[string]
keyword[try] :
keyword[for] identifier[p] keyword[in] identifier[cycle] ( identifier[map] ( identifier[iter] , identifier[pipes] )):
keyword[yield] identifier[next] ( identifier[p] )
keyword[except] identifier[StopIteration] :
keyword[pass] | def alternator(*pipes):
""" a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends """
try:
for p in cycle(map(iter, pipes)):
yield next(p) # depends on [control=['for'], data=['p']] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]] |
def omg(args):
"""
%prog omg weightsfile
Run Sankoff's OMG algorithm to get orthologs. Download OMG code at:
<http://137.122.149.195/IsbraSoftware/OMGMec.html>
This script only writes the partitions, but not launch OMGMec. You may need to:
$ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf?????
Then followed by omgparse() to get the gene lists.
"""
p = OptionParser(omg.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
weightsfiles = args
groupfile = group(weightsfiles + ["--outfile=groups"])
weights = get_weights(weightsfiles)
info = get_info()
fp = open(groupfile)
work = "work"
mkdir(work)
for i, row in enumerate(fp):
gf = op.join(work, "gf{0:05d}".format(i))
genes = row.rstrip().split(",")
fw = open(gf, "w")
contents = ""
npairs = 0
for gene in genes:
gene_pairs = weights[gene]
for a, b, c in gene_pairs:
if b not in genes:
continue
contents += "weight {0}".format(c) + '\n'
contents += info[a] + '\n'
contents += info[b] + '\n\n'
npairs += 1
header = "a group of genes :length ={0}".format(npairs)
print(header, file=fw)
print(contents, file=fw)
fw.close() | def function[omg, parameter[args]]:
constant[
%prog omg weightsfile
Run Sankoff's OMG algorithm to get orthologs. Download OMG code at:
<http://137.122.149.195/IsbraSoftware/OMGMec.html>
This script only writes the partitions, but not launch OMGMec. You may need to:
$ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf?????
Then followed by omgparse() to get the gene lists.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[omg].__doc__]]
<ast.Tuple object at 0x7da1b09bf310> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b09bec80>]]
variable[weightsfiles] assign[=] name[args]
variable[groupfile] assign[=] call[name[group], parameter[binary_operation[name[weightsfiles] + list[[<ast.Constant object at 0x7da1b09bd0f0>]]]]]
variable[weights] assign[=] call[name[get_weights], parameter[name[weightsfiles]]]
variable[info] assign[=] call[name[get_info], parameter[]]
variable[fp] assign[=] call[name[open], parameter[name[groupfile]]]
variable[work] assign[=] constant[work]
call[name[mkdir], parameter[name[work]]]
for taget[tuple[[<ast.Name object at 0x7da1b09bc940>, <ast.Name object at 0x7da1b09be8c0>]]] in starred[call[name[enumerate], parameter[name[fp]]]] begin[:]
variable[gf] assign[=] call[name[op].join, parameter[name[work], call[constant[gf{0:05d}].format, parameter[name[i]]]]]
variable[genes] assign[=] call[call[name[row].rstrip, parameter[]].split, parameter[constant[,]]]
variable[fw] assign[=] call[name[open], parameter[name[gf], constant[w]]]
variable[contents] assign[=] constant[]
variable[npairs] assign[=] constant[0]
for taget[name[gene]] in starred[name[genes]] begin[:]
variable[gene_pairs] assign[=] call[name[weights]][name[gene]]
for taget[tuple[[<ast.Name object at 0x7da1b09bdf90>, <ast.Name object at 0x7da1b09bd270>, <ast.Name object at 0x7da1b09be1d0>]]] in starred[name[gene_pairs]] begin[:]
if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[genes]] begin[:]
continue
<ast.AugAssign object at 0x7da1b09bc670>
<ast.AugAssign object at 0x7da1b09bf520>
<ast.AugAssign object at 0x7da1b09bfd60>
<ast.AugAssign object at 0x7da1b09bc640>
variable[header] assign[=] call[constant[a group of genes :length ={0}].format, parameter[name[npairs]]]
call[name[print], parameter[name[header]]]
call[name[print], parameter[name[contents]]]
call[name[fw].close, parameter[]] | keyword[def] identifier[omg] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[omg] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[weightsfiles] = identifier[args]
identifier[groupfile] = identifier[group] ( identifier[weightsfiles] +[ literal[string] ])
identifier[weights] = identifier[get_weights] ( identifier[weightsfiles] )
identifier[info] = identifier[get_info] ()
identifier[fp] = identifier[open] ( identifier[groupfile] )
identifier[work] = literal[string]
identifier[mkdir] ( identifier[work] )
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[fp] ):
identifier[gf] = identifier[op] . identifier[join] ( identifier[work] , literal[string] . identifier[format] ( identifier[i] ))
identifier[genes] = identifier[row] . identifier[rstrip] (). identifier[split] ( literal[string] )
identifier[fw] = identifier[open] ( identifier[gf] , literal[string] )
identifier[contents] = literal[string]
identifier[npairs] = literal[int]
keyword[for] identifier[gene] keyword[in] identifier[genes] :
identifier[gene_pairs] = identifier[weights] [ identifier[gene] ]
keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[gene_pairs] :
keyword[if] identifier[b] keyword[not] keyword[in] identifier[genes] :
keyword[continue]
identifier[contents] += literal[string] . identifier[format] ( identifier[c] )+ literal[string]
identifier[contents] += identifier[info] [ identifier[a] ]+ literal[string]
identifier[contents] += identifier[info] [ identifier[b] ]+ literal[string]
identifier[npairs] += literal[int]
identifier[header] = literal[string] . identifier[format] ( identifier[npairs] )
identifier[print] ( identifier[header] , identifier[file] = identifier[fw] )
identifier[print] ( identifier[contents] , identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] () | def omg(args):
"""
%prog omg weightsfile
Run Sankoff's OMG algorithm to get orthologs. Download OMG code at:
<http://137.122.149.195/IsbraSoftware/OMGMec.html>
This script only writes the partitions, but not launch OMGMec. You may need to:
$ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf?????
Then followed by omgparse() to get the gene lists.
"""
p = OptionParser(omg.__doc__)
(opts, args) = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
weightsfiles = args
groupfile = group(weightsfiles + ['--outfile=groups'])
weights = get_weights(weightsfiles)
info = get_info()
fp = open(groupfile)
work = 'work'
mkdir(work)
for (i, row) in enumerate(fp):
gf = op.join(work, 'gf{0:05d}'.format(i))
genes = row.rstrip().split(',')
fw = open(gf, 'w')
contents = ''
npairs = 0
for gene in genes:
gene_pairs = weights[gene]
for (a, b, c) in gene_pairs:
if b not in genes:
continue # depends on [control=['if'], data=[]]
contents += 'weight {0}'.format(c) + '\n'
contents += info[a] + '\n'
contents += info[b] + '\n\n'
npairs += 1 # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['gene']]
header = 'a group of genes :length ={0}'.format(npairs)
print(header, file=fw)
print(contents, file=fw)
fw.close() # depends on [control=['for'], data=[]] |
def popup(self, title, callfn, initialdir=None):
"""Let user select a directory."""
super(DirectorySelection, self).popup(title, callfn, initialdir) | def function[popup, parameter[self, title, callfn, initialdir]]:
constant[Let user select a directory.]
call[call[name[super], parameter[name[DirectorySelection], name[self]]].popup, parameter[name[title], name[callfn], name[initialdir]]] | keyword[def] identifier[popup] ( identifier[self] , identifier[title] , identifier[callfn] , identifier[initialdir] = keyword[None] ):
literal[string]
identifier[super] ( identifier[DirectorySelection] , identifier[self] ). identifier[popup] ( identifier[title] , identifier[callfn] , identifier[initialdir] ) | def popup(self, title, callfn, initialdir=None):
"""Let user select a directory."""
super(DirectorySelection, self).popup(title, callfn, initialdir) |
def emit_nicknames(self):
"""
Send the nickname list to the Websocket. Called whenever the
nicknames list changes.
"""
nicknames = [{"nickname": name, "color": color(name)}
for name in sorted(self.nicknames.keys())]
self.namespace.emit("nicknames", nicknames) | def function[emit_nicknames, parameter[self]]:
constant[
Send the nickname list to the Websocket. Called whenever the
nicknames list changes.
]
variable[nicknames] assign[=] <ast.ListComp object at 0x7da1b0fe4370>
call[name[self].namespace.emit, parameter[constant[nicknames], name[nicknames]]] | keyword[def] identifier[emit_nicknames] ( identifier[self] ):
literal[string]
identifier[nicknames] =[{ literal[string] : identifier[name] , literal[string] : identifier[color] ( identifier[name] )}
keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[nicknames] . identifier[keys] ())]
identifier[self] . identifier[namespace] . identifier[emit] ( literal[string] , identifier[nicknames] ) | def emit_nicknames(self):
"""
Send the nickname list to the Websocket. Called whenever the
nicknames list changes.
"""
nicknames = [{'nickname': name, 'color': color(name)} for name in sorted(self.nicknames.keys())]
self.namespace.emit('nicknames', nicknames) |
def _login(self, username, password, client_id, client_secret):
"""Performs login with the provided credentials"""
url = self.api_url + self.auth_token_url
auth_string = '%s:%s' % (client_id, client_secret)
authorization = base64.b64encode(auth_string.encode()).decode()
headers = {
'Authorization': "Basic " + authorization,
'Content-Type': "application/x-www-form-urlencoded"
}
params = {
'username': str(username),
'password': str(password),
# 'client_id': client_id,
'grant_type': 'password',
'response_type': 'token'
}
return self.session.post(url, params=params, headers=headers) | def function[_login, parameter[self, username, password, client_id, client_secret]]:
constant[Performs login with the provided credentials]
variable[url] assign[=] binary_operation[name[self].api_url + name[self].auth_token_url]
variable[auth_string] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7951b0>, <ast.Name object at 0x7da20c796410>]]]
variable[authorization] assign[=] call[call[name[base64].b64encode, parameter[call[name[auth_string].encode, parameter[]]]].decode, parameter[]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20c7940a0>, <ast.Constant object at 0x7da20c795e40>], [<ast.BinOp object at 0x7da20c795d80>, <ast.Constant object at 0x7da1b2299ba0>]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b229ae60>, <ast.Constant object at 0x7da1b2299210>, <ast.Constant object at 0x7da1b22991e0>, <ast.Constant object at 0x7da1b2299360>], [<ast.Call object at 0x7da1b229b8e0>, <ast.Call object at 0x7da1b229ab60>, <ast.Constant object at 0x7da1b2298ac0>, <ast.Constant object at 0x7da1b229bb20>]]
return[call[name[self].session.post, parameter[name[url]]]] | keyword[def] identifier[_login] ( identifier[self] , identifier[username] , identifier[password] , identifier[client_id] , identifier[client_secret] ):
literal[string]
identifier[url] = identifier[self] . identifier[api_url] + identifier[self] . identifier[auth_token_url]
identifier[auth_string] = literal[string] %( identifier[client_id] , identifier[client_secret] )
identifier[authorization] = identifier[base64] . identifier[b64encode] ( identifier[auth_string] . identifier[encode] ()). identifier[decode] ()
identifier[headers] ={
literal[string] : literal[string] + identifier[authorization] ,
literal[string] : literal[string]
}
identifier[params] ={
literal[string] : identifier[str] ( identifier[username] ),
literal[string] : identifier[str] ( identifier[password] ),
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[return] identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[params] = identifier[params] , identifier[headers] = identifier[headers] ) | def _login(self, username, password, client_id, client_secret):
"""Performs login with the provided credentials"""
url = self.api_url + self.auth_token_url
auth_string = '%s:%s' % (client_id, client_secret)
authorization = base64.b64encode(auth_string.encode()).decode()
headers = {'Authorization': 'Basic ' + authorization, 'Content-Type': 'application/x-www-form-urlencoded'}
# 'client_id': client_id,
params = {'username': str(username), 'password': str(password), 'grant_type': 'password', 'response_type': 'token'}
return self.session.post(url, params=params, headers=headers) |
def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
node_id = self._get_node_id_for_packet(packet)
client_protocol = self._client_protocols.get(node_id)
if node_id and client_protocol:
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True
else:
self._logger.error('Client protocol is not connected for packet %s', packet)
raise ClientDisconnected()
else:
# No node found to send request
self._logger.error('Out of %s, Client Not found for packet %s', self._client_protocols.keys(), packet)
raise ClientNotFoundError() | def function[_request_sender, parameter[self, packet]]:
constant[
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
]
variable[node_id] assign[=] call[name[self]._get_node_id_for_packet, parameter[name[packet]]]
variable[client_protocol] assign[=] call[name[self]._client_protocols.get, parameter[name[node_id]]]
if <ast.BoolOp object at 0x7da20c76e1a0> begin[:]
if call[name[client_protocol].is_connected, parameter[]] begin[:]
call[name[packet]][constant[to]] assign[=] name[node_id]
call[name[client_protocol].send, parameter[name[packet]]]
return[constant[True]] | keyword[def] identifier[_request_sender] ( identifier[self] , identifier[packet] : identifier[dict] ):
literal[string]
identifier[node_id] = identifier[self] . identifier[_get_node_id_for_packet] ( identifier[packet] )
identifier[client_protocol] = identifier[self] . identifier[_client_protocols] . identifier[get] ( identifier[node_id] )
keyword[if] identifier[node_id] keyword[and] identifier[client_protocol] :
keyword[if] identifier[client_protocol] . identifier[is_connected] ():
identifier[packet] [ literal[string] ]= identifier[node_id]
identifier[client_protocol] . identifier[send] ( identifier[packet] )
keyword[return] keyword[True]
keyword[else] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] , identifier[packet] )
keyword[raise] identifier[ClientDisconnected] ()
keyword[else] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] , identifier[self] . identifier[_client_protocols] . identifier[keys] (), identifier[packet] )
keyword[raise] identifier[ClientNotFoundError] () | def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
node_id = self._get_node_id_for_packet(packet)
client_protocol = self._client_protocols.get(node_id)
if node_id and client_protocol:
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True # depends on [control=['if'], data=[]]
else:
self._logger.error('Client protocol is not connected for packet %s', packet)
raise ClientDisconnected() # depends on [control=['if'], data=[]]
else:
# No node found to send request
self._logger.error('Out of %s, Client Not found for packet %s', self._client_protocols.keys(), packet)
raise ClientNotFoundError() |
def env(ctx, *args, **kwargs):
"""
print debug info about running environment
"""
import sys, platform, os, shutil
from pkg_resources import get_distribution, working_set
print("\n##################\n")
print("Information about the running environment of brother_ql.")
print("(Please provide this information when reporting any issue.)\n")
# computer
print("About the computer:")
for attr in ('platform', 'processor', 'release', 'system', 'machine', 'architecture'):
print(' * '+attr.title()+':', getattr(platform, attr)())
# Python
print("About the installed Python version:")
py_version = str(sys.version).replace('\n', ' ')
print(" *", py_version)
# brother_ql
print("About the brother_ql package:")
pkg = get_distribution('brother_ql')
print(" * package location:", pkg.location)
print(" * package version: ", pkg.version)
try:
cli_loc = shutil.which('brother_ql')
except:
cli_loc = 'unknown'
print(" * brother_ql CLI path:", cli_loc)
# brother_ql's requirements
print("About the requirements of brother_ql:")
fmt = " {req:14s} | {spec:10s} | {ins_vers:17s}"
print(fmt.format(req='requirement', spec='requested', ins_vers='installed version'))
print(fmt.format(req='-' * 14, spec='-'*10, ins_vers='-'*17))
requirements = list(pkg.requires())
requirements.sort(key=lambda x: x.project_name)
for req in requirements:
proj = req.project_name
req_pkg = get_distribution(proj)
spec = ' '.join(req.specs[0]) if req.specs else 'any'
print(fmt.format(req=proj, spec=spec, ins_vers=req_pkg.version))
print("\n##################\n") | def function[env, parameter[ctx]]:
constant[
print debug info about running environment
]
import module[sys], module[platform], module[os], module[shutil]
from relative_module[pkg_resources] import module[get_distribution], module[working_set]
call[name[print], parameter[constant[
##################
]]]
call[name[print], parameter[constant[Information about the running environment of brother_ql.]]]
call[name[print], parameter[constant[(Please provide this information when reporting any issue.)
]]]
call[name[print], parameter[constant[About the computer:]]]
for taget[name[attr]] in starred[tuple[[<ast.Constant object at 0x7da207f01030>, <ast.Constant object at 0x7da207f00dc0>, <ast.Constant object at 0x7da207f03eb0>, <ast.Constant object at 0x7da207f036d0>, <ast.Constant object at 0x7da207f02410>, <ast.Constant object at 0x7da207f01600>]]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[ * ] + call[name[attr].title, parameter[]]] + constant[:]], call[call[name[getattr], parameter[name[platform], name[attr]]], parameter[]]]]
call[name[print], parameter[constant[About the installed Python version:]]]
variable[py_version] assign[=] call[call[name[str], parameter[name[sys].version]].replace, parameter[constant[
], constant[ ]]]
call[name[print], parameter[constant[ *], name[py_version]]]
call[name[print], parameter[constant[About the brother_ql package:]]]
variable[pkg] assign[=] call[name[get_distribution], parameter[constant[brother_ql]]]
call[name[print], parameter[constant[ * package location:], name[pkg].location]]
call[name[print], parameter[constant[ * package version: ], name[pkg].version]]
<ast.Try object at 0x7da207f036a0>
call[name[print], parameter[constant[ * brother_ql CLI path:], name[cli_loc]]]
call[name[print], parameter[constant[About the requirements of brother_ql:]]]
variable[fmt] assign[=] constant[ {req:14s} | {spec:10s} | {ins_vers:17s}]
call[name[print], parameter[call[name[fmt].format, parameter[]]]]
call[name[print], parameter[call[name[fmt].format, parameter[]]]]
variable[requirements] assign[=] call[name[list], parameter[call[name[pkg].requires, parameter[]]]]
call[name[requirements].sort, parameter[]]
for taget[name[req]] in starred[name[requirements]] begin[:]
variable[proj] assign[=] name[req].project_name
variable[req_pkg] assign[=] call[name[get_distribution], parameter[name[proj]]]
variable[spec] assign[=] <ast.IfExp object at 0x7da207f00f10>
call[name[print], parameter[call[name[fmt].format, parameter[]]]]
call[name[print], parameter[constant[
##################
]]] | keyword[def] identifier[env] ( identifier[ctx] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[sys] , identifier[platform] , identifier[os] , identifier[shutil]
keyword[from] identifier[pkg_resources] keyword[import] identifier[get_distribution] , identifier[working_set]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[print] ( literal[string] + identifier[attr] . identifier[title] ()+ literal[string] , identifier[getattr] ( identifier[platform] , identifier[attr] )())
identifier[print] ( literal[string] )
identifier[py_version] = identifier[str] ( identifier[sys] . identifier[version] ). identifier[replace] ( literal[string] , literal[string] )
identifier[print] ( literal[string] , identifier[py_version] )
identifier[print] ( literal[string] )
identifier[pkg] = identifier[get_distribution] ( literal[string] )
identifier[print] ( literal[string] , identifier[pkg] . identifier[location] )
identifier[print] ( literal[string] , identifier[pkg] . identifier[version] )
keyword[try] :
identifier[cli_loc] = identifier[shutil] . identifier[which] ( literal[string] )
keyword[except] :
identifier[cli_loc] = literal[string]
identifier[print] ( literal[string] , identifier[cli_loc] )
identifier[print] ( literal[string] )
identifier[fmt] = literal[string]
identifier[print] ( identifier[fmt] . identifier[format] ( identifier[req] = literal[string] , identifier[spec] = literal[string] , identifier[ins_vers] = literal[string] ))
identifier[print] ( identifier[fmt] . identifier[format] ( identifier[req] = literal[string] * literal[int] , identifier[spec] = literal[string] * literal[int] , identifier[ins_vers] = literal[string] * literal[int] ))
identifier[requirements] = identifier[list] ( identifier[pkg] . identifier[requires] ())
identifier[requirements] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[project_name] )
keyword[for] identifier[req] keyword[in] identifier[requirements] :
identifier[proj] = identifier[req] . identifier[project_name]
identifier[req_pkg] = identifier[get_distribution] ( identifier[proj] )
identifier[spec] = literal[string] . identifier[join] ( identifier[req] . identifier[specs] [ literal[int] ]) keyword[if] identifier[req] . identifier[specs] keyword[else] literal[string]
identifier[print] ( identifier[fmt] . identifier[format] ( identifier[req] = identifier[proj] , identifier[spec] = identifier[spec] , identifier[ins_vers] = identifier[req_pkg] . identifier[version] ))
identifier[print] ( literal[string] ) | def env(ctx, *args, **kwargs):
"""
print debug info about running environment
"""
import sys, platform, os, shutil
from pkg_resources import get_distribution, working_set
print('\n##################\n')
print('Information about the running environment of brother_ql.')
print('(Please provide this information when reporting any issue.)\n')
# computer
print('About the computer:')
for attr in ('platform', 'processor', 'release', 'system', 'machine', 'architecture'):
print(' * ' + attr.title() + ':', getattr(platform, attr)()) # depends on [control=['for'], data=['attr']]
# Python
print('About the installed Python version:')
py_version = str(sys.version).replace('\n', ' ')
print(' *', py_version)
# brother_ql
print('About the brother_ql package:')
pkg = get_distribution('brother_ql')
print(' * package location:', pkg.location)
print(' * package version: ', pkg.version)
try:
cli_loc = shutil.which('brother_ql') # depends on [control=['try'], data=[]]
except:
cli_loc = 'unknown' # depends on [control=['except'], data=[]]
print(' * brother_ql CLI path:', cli_loc)
# brother_ql's requirements
print('About the requirements of brother_ql:')
fmt = ' {req:14s} | {spec:10s} | {ins_vers:17s}'
print(fmt.format(req='requirement', spec='requested', ins_vers='installed version'))
print(fmt.format(req='-' * 14, spec='-' * 10, ins_vers='-' * 17))
requirements = list(pkg.requires())
requirements.sort(key=lambda x: x.project_name)
for req in requirements:
proj = req.project_name
req_pkg = get_distribution(proj)
spec = ' '.join(req.specs[0]) if req.specs else 'any'
print(fmt.format(req=proj, spec=spec, ins_vers=req_pkg.version)) # depends on [control=['for'], data=['req']]
print('\n##################\n') |
def get_host_binds(container_map, config_name, config, instance, policy, named_volumes):
"""
Generates the list of host volumes and named volumes (where applicable) for the host config ``bind`` argument
during container creation.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param config: Container configuration.
:type config: dockermap.map.config.container.ContainerConfiguration
:param instance: Instance name. Pass ``None`` if not applicable.
:type instance: unicode | str
:return: List of shared volumes with host volumes and the read-only flag.
:rtype: list[unicode | str]
"""
def volume_str(paths, readonly):
return '{0[1]}:{0[0]}:{1}'.format(paths, 'ro' if readonly else 'rw')
def _attached_volume(vol):
parent_name = config_name if use_attached_parent_name else None
volume_name = aname(map_name, vol.name, parent_name=parent_name)
if isinstance(vol, UsedVolume):
path = resolve_value(vol.path)
else:
path = resolve_value(default_paths.get(vol.name))
return volume_str((path, volume_name), vol.readonly)
def _used_volume(vol):
if use_attached_parent_name:
parent_name, __, alias = vol.name.partition('.')
else:
alias = vol.name
parent_name = None
if alias not in default_paths:
return None
volume_name = aname(map_name, alias, parent_name=parent_name)
if isinstance(vol, UsedVolume):
path = resolve_value(vol.path)
else:
path = resolve_value(default_paths[alias])
return volume_str((path, volume_name), vol.readonly)
aname = policy.aname
map_name = container_map.name
use_attached_parent_name = container_map.use_attached_parent_name
default_paths = policy.default_volume_paths[map_name]
bind = [volume_str(get_shared_volume_path(container_map, shared_volume, instance), shared_volume.readonly)
for shared_volume in config.binds]
if named_volumes:
bind.extend(map(_attached_volume, config.attaches))
bind.extend(filter(None, map(_used_volume, config.uses)))
return bind | def function[get_host_binds, parameter[container_map, config_name, config, instance, policy, named_volumes]]:
constant[
Generates the list of host volumes and named volumes (where applicable) for the host config ``bind`` argument
during container creation.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param config: Container configuration.
:type config: dockermap.map.config.container.ContainerConfiguration
:param instance: Instance name. Pass ``None`` if not applicable.
:type instance: unicode | str
:return: List of shared volumes with host volumes and the read-only flag.
:rtype: list[unicode | str]
]
def function[volume_str, parameter[paths, readonly]]:
return[call[constant[{0[1]}:{0[0]}:{1}].format, parameter[name[paths], <ast.IfExp object at 0x7da1b2796aa0>]]]
def function[_attached_volume, parameter[vol]]:
variable[parent_name] assign[=] <ast.IfExp object at 0x7da1b2794e50>
variable[volume_name] assign[=] call[name[aname], parameter[name[map_name], name[vol].name]]
if call[name[isinstance], parameter[name[vol], name[UsedVolume]]] begin[:]
variable[path] assign[=] call[name[resolve_value], parameter[name[vol].path]]
return[call[name[volume_str], parameter[tuple[[<ast.Name object at 0x7da1b2795f60>, <ast.Name object at 0x7da1b2795990>]], name[vol].readonly]]]
def function[_used_volume, parameter[vol]]:
if name[use_attached_parent_name] begin[:]
<ast.Tuple object at 0x7da1b2797670> assign[=] call[name[vol].name.partition, parameter[constant[.]]]
if compare[name[alias] <ast.NotIn object at 0x7da2590d7190> name[default_paths]] begin[:]
return[constant[None]]
variable[volume_name] assign[=] call[name[aname], parameter[name[map_name], name[alias]]]
if call[name[isinstance], parameter[name[vol], name[UsedVolume]]] begin[:]
variable[path] assign[=] call[name[resolve_value], parameter[name[vol].path]]
return[call[name[volume_str], parameter[tuple[[<ast.Name object at 0x7da1b2795660>, <ast.Name object at 0x7da1b2796fb0>]], name[vol].readonly]]]
variable[aname] assign[=] name[policy].aname
variable[map_name] assign[=] name[container_map].name
variable[use_attached_parent_name] assign[=] name[container_map].use_attached_parent_name
variable[default_paths] assign[=] call[name[policy].default_volume_paths][name[map_name]]
variable[bind] assign[=] <ast.ListComp object at 0x7da1b2794be0>
if name[named_volumes] begin[:]
call[name[bind].extend, parameter[call[name[map], parameter[name[_attached_volume], name[config].attaches]]]]
call[name[bind].extend, parameter[call[name[filter], parameter[constant[None], call[name[map], parameter[name[_used_volume], name[config].uses]]]]]]
return[name[bind]] | keyword[def] identifier[get_host_binds] ( identifier[container_map] , identifier[config_name] , identifier[config] , identifier[instance] , identifier[policy] , identifier[named_volumes] ):
literal[string]
keyword[def] identifier[volume_str] ( identifier[paths] , identifier[readonly] ):
keyword[return] literal[string] . identifier[format] ( identifier[paths] , literal[string] keyword[if] identifier[readonly] keyword[else] literal[string] )
keyword[def] identifier[_attached_volume] ( identifier[vol] ):
identifier[parent_name] = identifier[config_name] keyword[if] identifier[use_attached_parent_name] keyword[else] keyword[None]
identifier[volume_name] = identifier[aname] ( identifier[map_name] , identifier[vol] . identifier[name] , identifier[parent_name] = identifier[parent_name] )
keyword[if] identifier[isinstance] ( identifier[vol] , identifier[UsedVolume] ):
identifier[path] = identifier[resolve_value] ( identifier[vol] . identifier[path] )
keyword[else] :
identifier[path] = identifier[resolve_value] ( identifier[default_paths] . identifier[get] ( identifier[vol] . identifier[name] ))
keyword[return] identifier[volume_str] (( identifier[path] , identifier[volume_name] ), identifier[vol] . identifier[readonly] )
keyword[def] identifier[_used_volume] ( identifier[vol] ):
keyword[if] identifier[use_attached_parent_name] :
identifier[parent_name] , identifier[__] , identifier[alias] = identifier[vol] . identifier[name] . identifier[partition] ( literal[string] )
keyword[else] :
identifier[alias] = identifier[vol] . identifier[name]
identifier[parent_name] = keyword[None]
keyword[if] identifier[alias] keyword[not] keyword[in] identifier[default_paths] :
keyword[return] keyword[None]
identifier[volume_name] = identifier[aname] ( identifier[map_name] , identifier[alias] , identifier[parent_name] = identifier[parent_name] )
keyword[if] identifier[isinstance] ( identifier[vol] , identifier[UsedVolume] ):
identifier[path] = identifier[resolve_value] ( identifier[vol] . identifier[path] )
keyword[else] :
identifier[path] = identifier[resolve_value] ( identifier[default_paths] [ identifier[alias] ])
keyword[return] identifier[volume_str] (( identifier[path] , identifier[volume_name] ), identifier[vol] . identifier[readonly] )
identifier[aname] = identifier[policy] . identifier[aname]
identifier[map_name] = identifier[container_map] . identifier[name]
identifier[use_attached_parent_name] = identifier[container_map] . identifier[use_attached_parent_name]
identifier[default_paths] = identifier[policy] . identifier[default_volume_paths] [ identifier[map_name] ]
identifier[bind] =[ identifier[volume_str] ( identifier[get_shared_volume_path] ( identifier[container_map] , identifier[shared_volume] , identifier[instance] ), identifier[shared_volume] . identifier[readonly] )
keyword[for] identifier[shared_volume] keyword[in] identifier[config] . identifier[binds] ]
keyword[if] identifier[named_volumes] :
identifier[bind] . identifier[extend] ( identifier[map] ( identifier[_attached_volume] , identifier[config] . identifier[attaches] ))
identifier[bind] . identifier[extend] ( identifier[filter] ( keyword[None] , identifier[map] ( identifier[_used_volume] , identifier[config] . identifier[uses] )))
keyword[return] identifier[bind] | def get_host_binds(container_map, config_name, config, instance, policy, named_volumes):
"""
Generates the list of host volumes and named volumes (where applicable) for the host config ``bind`` argument
during container creation.
:param container_map: Container map.
:type container_map: dockermap.map.config.main.ContainerMap
:param config: Container configuration.
:type config: dockermap.map.config.container.ContainerConfiguration
:param instance: Instance name. Pass ``None`` if not applicable.
:type instance: unicode | str
:return: List of shared volumes with host volumes and the read-only flag.
:rtype: list[unicode | str]
"""
def volume_str(paths, readonly):
return '{0[1]}:{0[0]}:{1}'.format(paths, 'ro' if readonly else 'rw')
def _attached_volume(vol):
parent_name = config_name if use_attached_parent_name else None
volume_name = aname(map_name, vol.name, parent_name=parent_name)
if isinstance(vol, UsedVolume):
path = resolve_value(vol.path) # depends on [control=['if'], data=[]]
else:
path = resolve_value(default_paths.get(vol.name))
return volume_str((path, volume_name), vol.readonly)
def _used_volume(vol):
if use_attached_parent_name:
(parent_name, __, alias) = vol.name.partition('.') # depends on [control=['if'], data=[]]
else:
alias = vol.name
parent_name = None
if alias not in default_paths:
return None # depends on [control=['if'], data=[]]
volume_name = aname(map_name, alias, parent_name=parent_name)
if isinstance(vol, UsedVolume):
path = resolve_value(vol.path) # depends on [control=['if'], data=[]]
else:
path = resolve_value(default_paths[alias])
return volume_str((path, volume_name), vol.readonly)
aname = policy.aname
map_name = container_map.name
use_attached_parent_name = container_map.use_attached_parent_name
default_paths = policy.default_volume_paths[map_name]
bind = [volume_str(get_shared_volume_path(container_map, shared_volume, instance), shared_volume.readonly) for shared_volume in config.binds]
if named_volumes:
bind.extend(map(_attached_volume, config.attaches))
bind.extend(filter(None, map(_used_volume, config.uses))) # depends on [control=['if'], data=[]]
return bind |
def assign_parser(self, name):
'''
Restricts parsing
**name** is a name of the parser class
NB: this is the PUBLIC method
@procedure
'''
for n, p in list(self.Parsers.items()):
if n != name:
del self.Parsers[n]
if len(self.Parsers) != 1:
raise RuntimeError('Parser cannot be assigned!') | def function[assign_parser, parameter[self, name]]:
constant[
Restricts parsing
**name** is a name of the parser class
NB: this is the PUBLIC method
@procedure
]
for taget[tuple[[<ast.Name object at 0x7da18f7206a0>, <ast.Name object at 0x7da18f720a60>]]] in starred[call[name[list], parameter[call[name[self].Parsers.items, parameter[]]]]] begin[:]
if compare[name[n] not_equal[!=] name[name]] begin[:]
<ast.Delete object at 0x7da1b19ecf10>
if compare[call[name[len], parameter[name[self].Parsers]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b19eca60> | keyword[def] identifier[assign_parser] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[n] , identifier[p] keyword[in] identifier[list] ( identifier[self] . identifier[Parsers] . identifier[items] ()):
keyword[if] identifier[n] != identifier[name] :
keyword[del] identifier[self] . identifier[Parsers] [ identifier[n] ]
keyword[if] identifier[len] ( identifier[self] . identifier[Parsers] )!= literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def assign_parser(self, name):
"""
Restricts parsing
**name** is a name of the parser class
NB: this is the PUBLIC method
@procedure
"""
for (n, p) in list(self.Parsers.items()):
if n != name:
del self.Parsers[n] # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=[]]
if len(self.Parsers) != 1:
raise RuntimeError('Parser cannot be assigned!') # depends on [control=['if'], data=[]] |
def xpath(self, *args, **kwargs):
""" Perform XPath on the passage XML
:param args: Ordered arguments for etree._Element().xpath()
:param kwargs: Named arguments
:return: Result list
:rtype: list(etree._Element)
"""
if "smart_strings" not in kwargs:
kwargs["smart_strings"] = False
return self.resource.xpath(*args, **kwargs) | def function[xpath, parameter[self]]:
constant[ Perform XPath on the passage XML
:param args: Ordered arguments for etree._Element().xpath()
:param kwargs: Named arguments
:return: Result list
:rtype: list(etree._Element)
]
if compare[constant[smart_strings] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[smart_strings]] assign[=] constant[False]
return[call[name[self].resource.xpath, parameter[<ast.Starred object at 0x7da1b235c760>]]] | keyword[def] identifier[xpath] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[False]
keyword[return] identifier[self] . identifier[resource] . identifier[xpath] (* identifier[args] ,** identifier[kwargs] ) | def xpath(self, *args, **kwargs):
""" Perform XPath on the passage XML
:param args: Ordered arguments for etree._Element().xpath()
:param kwargs: Named arguments
:return: Result list
:rtype: list(etree._Element)
"""
if 'smart_strings' not in kwargs:
kwargs['smart_strings'] = False # depends on [control=['if'], data=['kwargs']]
return self.resource.xpath(*args, **kwargs) |
def nl_cb_call(cb, type_, msg):
"""Call a callback function.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink-private/netlink.h#L136
Positional arguments:
cb -- nl_cb class instance.
type_ -- callback type integer (e.g. NL_CB_MSG_OUT).
msg -- Netlink message (nl_msg class instance).
Returns:
Integer from the callback function (like NL_OK, NL_SKIP, etc).
"""
cb.cb_active = type_
ret = cb.cb_set[type_](msg, cb.cb_args[type_])
cb.cb_active = 10 + 1 # NL_CB_TYPE_MAX + 1
return int(ret) | def function[nl_cb_call, parameter[cb, type_, msg]]:
constant[Call a callback function.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink-private/netlink.h#L136
Positional arguments:
cb -- nl_cb class instance.
type_ -- callback type integer (e.g. NL_CB_MSG_OUT).
msg -- Netlink message (nl_msg class instance).
Returns:
Integer from the callback function (like NL_OK, NL_SKIP, etc).
]
name[cb].cb_active assign[=] name[type_]
variable[ret] assign[=] call[call[name[cb].cb_set][name[type_]], parameter[name[msg], call[name[cb].cb_args][name[type_]]]]
name[cb].cb_active assign[=] binary_operation[constant[10] + constant[1]]
return[call[name[int], parameter[name[ret]]]] | keyword[def] identifier[nl_cb_call] ( identifier[cb] , identifier[type_] , identifier[msg] ):
literal[string]
identifier[cb] . identifier[cb_active] = identifier[type_]
identifier[ret] = identifier[cb] . identifier[cb_set] [ identifier[type_] ]( identifier[msg] , identifier[cb] . identifier[cb_args] [ identifier[type_] ])
identifier[cb] . identifier[cb_active] = literal[int] + literal[int]
keyword[return] identifier[int] ( identifier[ret] ) | def nl_cb_call(cb, type_, msg):
"""Call a callback function.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink-private/netlink.h#L136
Positional arguments:
cb -- nl_cb class instance.
type_ -- callback type integer (e.g. NL_CB_MSG_OUT).
msg -- Netlink message (nl_msg class instance).
Returns:
Integer from the callback function (like NL_OK, NL_SKIP, etc).
"""
cb.cb_active = type_
ret = cb.cb_set[type_](msg, cb.cb_args[type_])
cb.cb_active = 10 + 1 # NL_CB_TYPE_MAX + 1
return int(ret) |
def to_export(export):
"""Serializes export to id string
:param export: object to serialize
:return: string id
"""
from sevenbridges.models.storage_export import Export
if not export:
raise SbgError('Export is required!')
elif isinstance(export, Export):
return export.id
elif isinstance(export, six.string_types):
return export
else:
raise SbgError('Invalid export parameter!') | def function[to_export, parameter[export]]:
constant[Serializes export to id string
:param export: object to serialize
:return: string id
]
from relative_module[sevenbridges.models.storage_export] import module[Export]
if <ast.UnaryOp object at 0x7da2041d8400> begin[:]
<ast.Raise object at 0x7da2041da5c0> | keyword[def] identifier[to_export] ( identifier[export] ):
literal[string]
keyword[from] identifier[sevenbridges] . identifier[models] . identifier[storage_export] keyword[import] identifier[Export]
keyword[if] keyword[not] identifier[export] :
keyword[raise] identifier[SbgError] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[export] , identifier[Export] ):
keyword[return] identifier[export] . identifier[id]
keyword[elif] identifier[isinstance] ( identifier[export] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[export]
keyword[else] :
keyword[raise] identifier[SbgError] ( literal[string] ) | def to_export(export):
"""Serializes export to id string
:param export: object to serialize
:return: string id
"""
from sevenbridges.models.storage_export import Export
if not export:
raise SbgError('Export is required!') # depends on [control=['if'], data=[]]
elif isinstance(export, Export):
return export.id # depends on [control=['if'], data=[]]
elif isinstance(export, six.string_types):
return export # depends on [control=['if'], data=[]]
else:
raise SbgError('Invalid export parameter!') |
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append(
"%s=%s" % (key, quote_header_value(value, allow_token=allow_token))
)
else:
items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
return ", ".join(items) | def function[dump_header, parameter[iterable, allow_token]]:
constant[Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
]
if call[name[isinstance], parameter[name[iterable], name[dict]]] begin[:]
variable[items] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f8111e0>, <ast.Name object at 0x7da18f8133d0>]]] in starred[call[name[iteritems], parameter[name[iterable]]]] begin[:]
if compare[name[value] is constant[None]] begin[:]
call[name[items].append, parameter[name[key]]]
return[call[constant[, ].join, parameter[name[items]]]] | keyword[def] identifier[dump_header] ( identifier[iterable] , identifier[allow_token] = keyword[True] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[iterable] , identifier[dict] ):
identifier[items] =[]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[iterable] ):
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[items] . identifier[append] ( identifier[key] )
keyword[else] :
identifier[items] . identifier[append] (
literal[string] %( identifier[key] , identifier[quote_header_value] ( identifier[value] , identifier[allow_token] = identifier[allow_token] ))
)
keyword[else] :
identifier[items] =[ identifier[quote_header_value] ( identifier[x] , identifier[allow_token] = identifier[allow_token] ) keyword[for] identifier[x] keyword[in] identifier[iterable] ]
keyword[return] literal[string] . identifier[join] ( identifier[items] ) | def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for (key, value) in iteritems(iterable):
if value is None:
items.append(key) # depends on [control=['if'], data=[]]
else:
items.append('%s=%s' % (key, quote_header_value(value, allow_token=allow_token))) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
return ', '.join(items) |
def zdivide(a, b, null=0):
'''
zdivide(a, b) returns the quotient a / b as a numpy array object. Unlike numpy's divide function
or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is
(4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)].
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The zdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the divide function instead.
Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z
is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the
result of zdivide(a,b, null=z) rather than the equivalent element of a times z.
'''
(a,b) = unbroadcast(a,b)
return czdivide(a,b, null=null) | def function[zdivide, parameter[a, b, null]]:
constant[
zdivide(a, b) returns the quotient a / b as a numpy array object. Unlike numpy's divide function
or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is
(4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)].
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The zdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the divide function instead.
Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z
is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the
result of zdivide(a,b, null=z) rather than the equivalent element of a times z.
]
<ast.Tuple object at 0x7da1b0e399c0> assign[=] call[name[unbroadcast], parameter[name[a], name[b]]]
return[call[name[czdivide], parameter[name[a], name[b]]]] | keyword[def] identifier[zdivide] ( identifier[a] , identifier[b] , identifier[null] = literal[int] ):
literal[string]
( identifier[a] , identifier[b] )= identifier[unbroadcast] ( identifier[a] , identifier[b] )
keyword[return] identifier[czdivide] ( identifier[a] , identifier[b] , identifier[null] = identifier[null] ) | def zdivide(a, b, null=0):
"""
zdivide(a, b) returns the quotient a / b as a numpy array object. Unlike numpy's divide function
or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is
(4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)].
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The zdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the divide function instead.
Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z
is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the
result of zdivide(a,b, null=z) rather than the equivalent element of a times z.
"""
(a, b) = unbroadcast(a, b)
return czdivide(a, b, null=null) |
def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target | def function[move_safe, parameter[origin, target]]:
constant[
Move file, skip if exists
]
if compare[name[origin] equal[==] name[target]] begin[:]
return[name[origin]]
if call[name[file_exists], parameter[name[target]]] begin[:]
return[name[target]]
call[name[shutil].move, parameter[name[origin], name[target]]]
return[name[target]] | keyword[def] identifier[move_safe] ( identifier[origin] , identifier[target] ):
literal[string]
keyword[if] identifier[origin] == identifier[target] :
keyword[return] identifier[origin]
keyword[if] identifier[file_exists] ( identifier[target] ):
keyword[return] identifier[target]
identifier[shutil] . identifier[move] ( identifier[origin] , identifier[target] )
keyword[return] identifier[target] | def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin # depends on [control=['if'], data=['origin']]
if file_exists(target):
return target # depends on [control=['if'], data=[]]
shutil.move(origin, target)
return target |
async def get_version(self, timeout: int = 15) -> Optional[str]:
"""Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus
"""
command = ["-version"]
# open input for capture 1 frame
is_open = await self.open(cmd=command, input_source=None, output="")
# error after open?
if not is_open:
_LOGGER.warning("Error starting FFmpeg.")
return
# read output
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
output, _ = await self._loop.run_in_executor(None, proc_func)
result = re.search(r"ffmpeg version (\S*)", output.decode())
if result is not None:
return result.group(1)
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout reading stdout.")
self.kill()
return None | <ast.AsyncFunctionDef object at 0x7da1b04d5ed0> | keyword[async] keyword[def] identifier[get_version] ( identifier[self] , identifier[timeout] : identifier[int] = literal[int] )-> identifier[Optional] [ identifier[str] ]:
literal[string]
identifier[command] =[ literal[string] ]
identifier[is_open] = keyword[await] identifier[self] . identifier[open] ( identifier[cmd] = identifier[command] , identifier[input_source] = keyword[None] , identifier[output] = literal[string] )
keyword[if] keyword[not] identifier[is_open] :
identifier[_LOGGER] . identifier[warning] ( literal[string] )
keyword[return]
keyword[try] :
identifier[proc_func] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[_proc] . identifier[communicate] , identifier[timeout] = identifier[timeout] )
identifier[output] , identifier[_] = keyword[await] identifier[self] . identifier[_loop] . identifier[run_in_executor] ( keyword[None] , identifier[proc_func] )
identifier[result] = identifier[re] . identifier[search] ( literal[string] , identifier[output] . identifier[decode] ())
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[result] . identifier[group] ( literal[int] )
keyword[except] ( identifier[subprocess] . identifier[TimeoutExpired] , identifier[ValueError] ):
identifier[_LOGGER] . identifier[warning] ( literal[string] )
identifier[self] . identifier[kill] ()
keyword[return] keyword[None] | async def get_version(self, timeout: int=15) -> Optional[str]:
"""Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus
"""
command = ['-version']
# open input for capture 1 frame
is_open = await self.open(cmd=command, input_source=None, output='')
# error after open?
if not is_open:
_LOGGER.warning('Error starting FFmpeg.')
return # depends on [control=['if'], data=[]]
# read output
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
(output, _) = await self._loop.run_in_executor(None, proc_func)
result = re.search('ffmpeg version (\\S*)', output.decode())
if result is not None:
return result.group(1) # depends on [control=['if'], data=['result']] # depends on [control=['try'], data=[]]
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning('Timeout reading stdout.')
self.kill() # depends on [control=['except'], data=[]]
return None |
def rfc3339(self):
"""Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
"""
if self._nanosecond == 0:
return to_rfc3339(self)
nanos = str(self._nanosecond).rjust(9, '0').rstrip("0")
return "{}.{}Z".format(self.strftime(_RFC3339_NO_FRACTION), nanos) | def function[rfc3339, parameter[self]]:
constant[Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
]
if compare[name[self]._nanosecond equal[==] constant[0]] begin[:]
return[call[name[to_rfc3339], parameter[name[self]]]]
variable[nanos] assign[=] call[call[call[name[str], parameter[name[self]._nanosecond]].rjust, parameter[constant[9], constant[0]]].rstrip, parameter[constant[0]]]
return[call[constant[{}.{}Z].format, parameter[call[name[self].strftime, parameter[name[_RFC3339_NO_FRACTION]]], name[nanos]]]] | keyword[def] identifier[rfc3339] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_nanosecond] == literal[int] :
keyword[return] identifier[to_rfc3339] ( identifier[self] )
identifier[nanos] = identifier[str] ( identifier[self] . identifier[_nanosecond] ). identifier[rjust] ( literal[int] , literal[string] ). identifier[rstrip] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[strftime] ( identifier[_RFC3339_NO_FRACTION] ), identifier[nanos] ) | def rfc3339(self):
"""Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
"""
if self._nanosecond == 0:
return to_rfc3339(self) # depends on [control=['if'], data=[]]
nanos = str(self._nanosecond).rjust(9, '0').rstrip('0')
return '{}.{}Z'.format(self.strftime(_RFC3339_NO_FRACTION), nanos) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.