Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,900 | pandemicsyn/statsdpy | statsdpy/statsd.py | StatsdServer.process_timer | def process_timer(self, key, fields):
"""
Process a received timer event
:param key: Key of timer
:param fields: Received fields
"""
try:
if key not in self.timers:
self.timers[key] = []
self.timers[key].append(float(fields[0]))
if self.stats_seen >= maxint:
self.logger.info("hit maxint, reset seen counter")
self.stats_seen = 0
self.stats_seen += 1
except Exception as err:
self.logger.info("error decoding timer event: %s" % err)
if self.debug:
print "error decoding timer event: %s" % err | python | def process_timer(self, key, fields):
"""
Process a received timer event
:param key: Key of timer
:param fields: Received fields
"""
try:
if key not in self.timers:
self.timers[key] = []
self.timers[key].append(float(fields[0]))
if self.stats_seen >= maxint:
self.logger.info("hit maxint, reset seen counter")
self.stats_seen = 0
self.stats_seen += 1
except Exception as err:
self.logger.info("error decoding timer event: %s" % err)
if self.debug:
print "error decoding timer event: %s" % err | ['def', 'process_timer', '(', 'self', ',', 'key', ',', 'fields', ')', ':', 'try', ':', 'if', 'key', 'not', 'in', 'self', '.', 'timers', ':', 'self', '.', 'timers', '[', 'key', ']', '=', '[', ']', 'self', '.', 'timers', '[', 'key', ']', '.', 'append', '(', 'float', '(', 'fields', '[', '0', ']', ')', ')', 'if', 'self', '.', 'stats_seen', '>=', 'maxint', ':', 'self', '.', 'logger', '.', 'info', '(', '"hit maxint, reset seen counter"', ')', 'self', '.', 'stats_seen', '=', '0', 'self', '.', 'stats_seen', '+=', '1', 'except', 'Exception', 'as', 'err', ':', 'self', '.', 'logger', '.', 'info', '(', '"error decoding timer event: %s"', '%', 'err', ')', 'if', 'self', '.', 'debug', ':', 'print', '"error decoding timer event: %s"', '%', 'err'] | Process a received timer event
:param key: Key of timer
:param fields: Received fields | ['Process', 'a', 'received', 'timer', 'event'] | train | https://github.com/pandemicsyn/statsdpy/blob/9cfccf89121fd6a12df20f17fa3eb8f618a36455/statsdpy/statsd.py#L236-L254 |
7,901 | crytic/slither | slither/printers/inheritance/inheritance_graph.py | PrinterInheritanceGraph._get_indirect_shadowing_information | def _get_indirect_shadowing_information(contract):
"""
Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists.
:param var: The variable to collect shadowing information for.
:param contract: The contract in which this variable is being analyzed.
:return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists.
"""
# If this variable is an overshadowing variable, we'll want to return information describing it.
result = []
indirect_shadows = detect_c3_function_shadowing(contract)
if indirect_shadows:
for collision_set in sorted(indirect_shadows, key=lambda x: x[0][1].name):
winner = collision_set[-1][1].contract.name
collision_steps = [colliding_function.contract.name for _, colliding_function in collision_set]
collision_steps = ', '.join(collision_steps)
result.append(f"'{collision_set[0][1].full_name}' collides in inherited contracts {collision_steps} where {winner} is chosen.")
return '\n'.join(result) | python | def _get_indirect_shadowing_information(contract):
"""
Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists.
:param var: The variable to collect shadowing information for.
:param contract: The contract in which this variable is being analyzed.
:return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists.
"""
# If this variable is an overshadowing variable, we'll want to return information describing it.
result = []
indirect_shadows = detect_c3_function_shadowing(contract)
if indirect_shadows:
for collision_set in sorted(indirect_shadows, key=lambda x: x[0][1].name):
winner = collision_set[-1][1].contract.name
collision_steps = [colliding_function.contract.name for _, colliding_function in collision_set]
collision_steps = ', '.join(collision_steps)
result.append(f"'{collision_set[0][1].full_name}' collides in inherited contracts {collision_steps} where {winner} is chosen.")
return '\n'.join(result) | ['def', '_get_indirect_shadowing_information', '(', 'contract', ')', ':', "# If this variable is an overshadowing variable, we'll want to return information describing it.", 'result', '=', '[', ']', 'indirect_shadows', '=', 'detect_c3_function_shadowing', '(', 'contract', ')', 'if', 'indirect_shadows', ':', 'for', 'collision_set', 'in', 'sorted', '(', 'indirect_shadows', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '0', ']', '[', '1', ']', '.', 'name', ')', ':', 'winner', '=', 'collision_set', '[', '-', '1', ']', '[', '1', ']', '.', 'contract', '.', 'name', 'collision_steps', '=', '[', 'colliding_function', '.', 'contract', '.', 'name', 'for', '_', ',', 'colliding_function', 'in', 'collision_set', ']', 'collision_steps', '=', "', '", '.', 'join', '(', 'collision_steps', ')', 'result', '.', 'append', '(', 'f"\'{collision_set[0][1].full_name}\' collides in inherited contracts {collision_steps} where {winner} is chosen."', ')', 'return', "'\\n'", '.', 'join', '(', 'result', ')'] | Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists.
:param var: The variable to collect shadowing information for.
:param contract: The contract in which this variable is being analyzed.
:return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists. | ['Obtain', 'a', 'string', 'that', 'describes', 'variable', 'shadowing', 'for', 'the', 'given', 'variable', '.', 'None', 'if', 'no', 'shadowing', 'exists', '.', ':', 'param', 'var', ':', 'The', 'variable', 'to', 'collect', 'shadowing', 'information', 'for', '.', ':', 'param', 'contract', ':', 'The', 'contract', 'in', 'which', 'this', 'variable', 'is', 'being', 'analyzed', '.', ':', 'return', ':', 'Returns', 'a', 'string', 'describing', 'variable', 'shadowing', 'for', 'the', 'given', 'variable', '.', 'None', 'if', 'no', 'shadowing', 'exists', '.'] | train | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/printers/inheritance/inheritance_graph.py#L82-L98 |
7,902 | peterwittek/ncpol2sdpa | ncpol2sdpa/physics_utils.py | bosonic_constraints | def bosonic_constraints(a):
"""Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions.
"""
substitutions = {}
for i, ai in enumerate(a):
substitutions[ai * Dagger(ai)] = 1.0 + Dagger(ai) * ai
for aj in a[i+1:]:
# substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj
substitutions[ai*Dagger(aj)] = Dagger(aj)*ai
substitutions[Dagger(ai)*aj] = aj*Dagger(ai)
substitutions[ai*aj] = aj*ai
substitutions[Dagger(ai) * Dagger(aj)] = Dagger(aj) * Dagger(ai)
return substitutions | python | def bosonic_constraints(a):
"""Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions.
"""
substitutions = {}
for i, ai in enumerate(a):
substitutions[ai * Dagger(ai)] = 1.0 + Dagger(ai) * ai
for aj in a[i+1:]:
# substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj
substitutions[ai*Dagger(aj)] = Dagger(aj)*ai
substitutions[Dagger(ai)*aj] = aj*Dagger(ai)
substitutions[ai*aj] = aj*ai
substitutions[Dagger(ai) * Dagger(aj)] = Dagger(aj) * Dagger(ai)
return substitutions | ['def', 'bosonic_constraints', '(', 'a', ')', ':', 'substitutions', '=', '{', '}', 'for', 'i', ',', 'ai', 'in', 'enumerate', '(', 'a', ')', ':', 'substitutions', '[', 'ai', '*', 'Dagger', '(', 'ai', ')', ']', '=', '1.0', '+', 'Dagger', '(', 'ai', ')', '*', 'ai', 'for', 'aj', 'in', 'a', '[', 'i', '+', '1', ':', ']', ':', '# substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj', 'substitutions', '[', 'ai', '*', 'Dagger', '(', 'aj', ')', ']', '=', 'Dagger', '(', 'aj', ')', '*', 'ai', 'substitutions', '[', 'Dagger', '(', 'ai', ')', '*', 'aj', ']', '=', 'aj', '*', 'Dagger', '(', 'ai', ')', 'substitutions', '[', 'ai', '*', 'aj', ']', '=', 'aj', '*', 'ai', 'substitutions', '[', 'Dagger', '(', 'ai', ')', '*', 'Dagger', '(', 'aj', ')', ']', '=', 'Dagger', '(', 'aj', ')', '*', 'Dagger', '(', 'ai', ')', 'return', 'substitutions'] | Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions. | ['Return', 'a', 'set', 'of', 'constraints', 'that', 'define', 'fermionic', 'ladder', 'operators', '.'] | train | https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/physics_utils.py#L82-L99 |
7,903 | scottjbarr/bitfinex | bitfinex/client.py | Client._convert_to_floats | def _convert_to_floats(self, data):
"""
Convert all values in a dict to floats
"""
for key, value in data.items():
data[key] = float(value)
return data | python | def _convert_to_floats(self, data):
"""
Convert all values in a dict to floats
"""
for key, value in data.items():
data[key] = float(value)
return data | ['def', '_convert_to_floats', '(', 'self', ',', 'data', ')', ':', 'for', 'key', ',', 'value', 'in', 'data', '.', 'items', '(', ')', ':', 'data', '[', 'key', ']', '=', 'float', '(', 'value', ')', 'return', 'data'] | Convert all values in a dict to floats | ['Convert', 'all', 'values', 'in', 'a', 'dict', 'to', 'floats'] | train | https://github.com/scottjbarr/bitfinex/blob/03f7c71615fe38c2e28be0ebb761d3106ef0a51a/bitfinex/client.py#L497-L504 |
7,904 | hovren/crisp | crisp/camera.py | AtanCameraModel.invert | def invert(self, points):
"""Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points
"""
X = points if not points.ndim == 1 else points.reshape((points.size, 1))
wx, wy = self.wc
# Switch to polar coordinates
rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2)
phi = np.arctan2(X[1,:] - wy, X[0,:]-wx)
# 'atan' method
r = np.tan(rn * self.lgamma) / self.lgamma;
# Switch back to rectangular coordinates
Y = np.ones(X.shape)
Y[0,:] = wx + r * np.cos(phi)
Y[1,:]= wy + r * np.sin(phi)
return Y | python | def invert(self, points):
"""Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points
"""
X = points if not points.ndim == 1 else points.reshape((points.size, 1))
wx, wy = self.wc
# Switch to polar coordinates
rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2)
phi = np.arctan2(X[1,:] - wy, X[0,:]-wx)
# 'atan' method
r = np.tan(rn * self.lgamma) / self.lgamma;
# Switch back to rectangular coordinates
Y = np.ones(X.shape)
Y[0,:] = wx + r * np.cos(phi)
Y[1,:]= wy + r * np.sin(phi)
return Y | ['def', 'invert', '(', 'self', ',', 'points', ')', ':', 'X', '=', 'points', 'if', 'not', 'points', '.', 'ndim', '==', '1', 'else', 'points', '.', 'reshape', '(', '(', 'points', '.', 'size', ',', '1', ')', ')', 'wx', ',', 'wy', '=', 'self', '.', 'wc', '# Switch to polar coordinates', 'rn', '=', 'np', '.', 'sqrt', '(', '(', 'X', '[', '0', ',', ':', ']', '-', 'wx', ')', '**', '2', '+', '(', 'X', '[', '1', ',', ':', ']', '-', 'wy', ')', '**', '2', ')', 'phi', '=', 'np', '.', 'arctan2', '(', 'X', '[', '1', ',', ':', ']', '-', 'wy', ',', 'X', '[', '0', ',', ':', ']', '-', 'wx', ')', "# 'atan' method", 'r', '=', 'np', '.', 'tan', '(', 'rn', '*', 'self', '.', 'lgamma', ')', '/', 'self', '.', 'lgamma', '# Switch back to rectangular coordinates', 'Y', '=', 'np', '.', 'ones', '(', 'X', '.', 'shape', ')', 'Y', '[', '0', ',', ':', ']', '=', 'wx', '+', 'r', '*', 'np', '.', 'cos', '(', 'phi', ')', 'Y', '[', '1', ',', ':', ']', '=', 'wy', '+', 'r', '*', 'np', '.', 'sin', '(', 'phi', ')', 'return', 'Y'] | Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points | ['Invert', 'the', 'distortion'] | train | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L160-L187 |
7,905 | allenai/allennlp | allennlp/semparse/worlds/atis_world.py | AtisWorld._update_grammar | def _update_grammar(self):
"""
We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also
has the new entities that are extracted from the utterance. Stitching together the expressions
to form the grammar is a little tedious here, but it is worth it because we don't have to create
a new grammar from scratch. Creating a new grammar is expensive because we have many production
rules that have all database values in the column on the right hand side. We update the expressions
bottom up, since the higher level expressions may refer to the lower level ones. For example, the
ternary expression will refer to the start and end times.
"""
# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object
# contains ``Expression`` objects that have tuples containing the members of that expression.
# We have to create new sub-expression objects so that original grammar is not mutated.
new_grammar = copy(AtisWorld.sql_table_context.grammar)
for numeric_nonterminal in NUMERIC_NONTERMINALS:
self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar)
self._update_expression_reference(new_grammar, 'pos_value', 'number')
ternary_expressions = [self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('NOT'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('not'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']])]
new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr')
self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr')
new_binary_expressions = []
fare_round_trip_cost_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('fare'),
Literal('.'),
Literal('round_trip_cost'),
new_grammar['binaryop'],
new_grammar['fare_round_trip_cost']])
new_binary_expressions.append(fare_round_trip_cost_expression)
fare_one_direction_cost_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('fare'),
Literal('.'),
Literal('one_direction_cost'),
new_grammar['binaryop'],
new_grammar['fare_one_direction_cost']])
new_binary_expressions.append(fare_one_direction_cost_expression)
flight_number_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('flight'),
Literal('.'),
Literal('flight_number'),
new_grammar['binaryop'],
new_grammar['flight_number']])
new_binary_expressions.append(flight_number_expression)
if self.dates:
year_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('year'),
new_grammar['binaryop'],
new_grammar['year_number']])
month_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('month_number'),
new_grammar['binaryop'],
new_grammar['month_number']])
day_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('day_number'),
new_grammar['binaryop'],
new_grammar['day_number']])
new_binary_expressions.extend([year_binary_expression,
month_binary_expression,
day_binary_expression])
new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members)
new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr')
self._update_expression_reference(new_grammar, 'condition', 'biexpr')
return new_grammar | python | def _update_grammar(self):
"""
We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also
has the new entities that are extracted from the utterance. Stitching together the expressions
to form the grammar is a little tedious here, but it is worth it because we don't have to create
a new grammar from scratch. Creating a new grammar is expensive because we have many production
rules that have all database values in the column on the right hand side. We update the expressions
bottom up, since the higher level expressions may refer to the lower level ones. For example, the
ternary expression will refer to the start and end times.
"""
# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object
# contains ``Expression`` objects that have tuples containing the members of that expression.
# We have to create new sub-expression objects so that original grammar is not mutated.
new_grammar = copy(AtisWorld.sql_table_context.grammar)
for numeric_nonterminal in NUMERIC_NONTERMINALS:
self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar)
self._update_expression_reference(new_grammar, 'pos_value', 'number')
ternary_expressions = [self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('NOT'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('not'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']])]
new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr')
self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr')
new_binary_expressions = []
fare_round_trip_cost_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('fare'),
Literal('.'),
Literal('round_trip_cost'),
new_grammar['binaryop'],
new_grammar['fare_round_trip_cost']])
new_binary_expressions.append(fare_round_trip_cost_expression)
fare_one_direction_cost_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('fare'),
Literal('.'),
Literal('one_direction_cost'),
new_grammar['binaryop'],
new_grammar['fare_one_direction_cost']])
new_binary_expressions.append(fare_one_direction_cost_expression)
flight_number_expression = \
self._get_sequence_with_spacing(new_grammar,
[Literal('flight'),
Literal('.'),
Literal('flight_number'),
new_grammar['binaryop'],
new_grammar['flight_number']])
new_binary_expressions.append(flight_number_expression)
if self.dates:
year_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('year'),
new_grammar['binaryop'],
new_grammar['year_number']])
month_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('month_number'),
new_grammar['binaryop'],
new_grammar['month_number']])
day_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('day_number'),
new_grammar['binaryop'],
new_grammar['day_number']])
new_binary_expressions.extend([year_binary_expression,
month_binary_expression,
day_binary_expression])
new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members)
new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr')
self._update_expression_reference(new_grammar, 'condition', 'biexpr')
return new_grammar | ['def', '_update_grammar', '(', 'self', ')', ':', '# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object', '# contains ``Expression`` objects that have tuples containing the members of that expression.', '# We have to create new sub-expression objects so that original grammar is not mutated.', 'new_grammar', '=', 'copy', '(', 'AtisWorld', '.', 'sql_table_context', '.', 'grammar', ')', 'for', 'numeric_nonterminal', 'in', 'NUMERIC_NONTERMINALS', ':', 'self', '.', '_add_numeric_nonterminal_to_grammar', '(', 'numeric_nonterminal', ',', 'new_grammar', ')', 'self', '.', '_update_expression_reference', '(', 'new_grammar', ',', "'pos_value'", ',', "'number'", ')', 'ternary_expressions', '=', '[', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'new_grammar', '[', "'col_ref'", ']', ',', 'Literal', '(', "'BETWEEN'", ')', ',', 'new_grammar', '[', "'time_range_start'", ']', ',', 'Literal', '(', "f'AND'", ')', ',', 'new_grammar', '[', "'time_range_end'", ']', ']', ')', ',', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'new_grammar', '[', "'col_ref'", ']', ',', 'Literal', '(', "'NOT'", ')', ',', 'Literal', '(', "'BETWEEN'", ')', ',', 'new_grammar', '[', "'time_range_start'", ']', ',', 'Literal', '(', "f'AND'", ')', ',', 'new_grammar', '[', "'time_range_end'", ']', ']', ')', ',', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'new_grammar', '[', "'col_ref'", ']', ',', 'Literal', '(', "'not'", ')', ',', 'Literal', '(', "'BETWEEN'", ')', ',', 'new_grammar', '[', "'time_range_start'", ']', ',', 'Literal', '(', "f'AND'", ')', ',', 'new_grammar', '[', "'time_range_end'", ']', ']', ')', ']', 'new_grammar', '[', "'ternaryexpr'", ']', '=', 'OneOf', '(', '*', 'ternary_expressions', ',', 'name', '=', "'ternaryexpr'", ')', 'self', '.', '_update_expression_reference', '(', 'new_grammar', ',', "'condition'", ',', "'ternaryexpr'", ')', 'new_binary_expressions', '=', '[', ']', 'fare_round_trip_cost_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'fare'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'round_trip_cost'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'fare_round_trip_cost'", ']', ']', ')', 'new_binary_expressions', '.', 'append', '(', 'fare_round_trip_cost_expression', ')', 'fare_one_direction_cost_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'fare'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'one_direction_cost'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'fare_one_direction_cost'", ']', ']', ')', 'new_binary_expressions', '.', 'append', '(', 'fare_one_direction_cost_expression', ')', 'flight_number_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'flight'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'flight_number'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'flight_number'", ']', ']', ')', 'new_binary_expressions', '.', 'append', '(', 'flight_number_expression', ')', 'if', 'self', '.', 'dates', ':', 'year_binary_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'date_day'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'year'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'year_number'", ']', ']', ')', 'month_binary_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'date_day'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'month_number'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'month_number'", ']', ']', ')', 'day_binary_expression', '=', 'self', '.', '_get_sequence_with_spacing', '(', 'new_grammar', ',', '[', 'Literal', '(', "'date_day'", ')', ',', 'Literal', '(', "'.'", ')', ',', 'Literal', '(', "'day_number'", ')', ',', 'new_grammar', '[', "'binaryop'", ']', ',', 'new_grammar', '[', "'day_number'", ']', ']', ')', 'new_binary_expressions', '.', 'extend', '(', '[', 'year_binary_expression', ',', 'month_binary_expression', ',', 'day_binary_expression', ']', ')', 'new_binary_expressions', '=', 'new_binary_expressions', '+', 'list', '(', 'new_grammar', '[', "'biexpr'", ']', '.', 'members', ')', 'new_grammar', '[', "'biexpr'", ']', '=', 'OneOf', '(', '*', 'new_binary_expressions', ',', 'name', '=', "'biexpr'", ')', 'self', '.', '_update_expression_reference', '(', 'new_grammar', ',', "'condition'", ',', "'biexpr'", ')', 'return', 'new_grammar'] | We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also
has the new entities that are extracted from the utterance. Stitching together the expressions
to form the grammar is a little tedious here, but it is worth it because we don't have to create
a new grammar from scratch. Creating a new grammar is expensive because we have many production
rules that have all database values in the column on the right hand side. We update the expressions
bottom up, since the higher level expressions may refer to the lower level ones. For example, the
ternary expression will refer to the start and end times. | ['We', 'create', 'a', 'new', 'Grammar', 'object', 'from', 'the', 'one', 'in', 'AtisSqlTableContext', 'that', 'also', 'has', 'the', 'new', 'entities', 'that', 'are', 'extracted', 'from', 'the', 'utterance', '.', 'Stitching', 'together', 'the', 'expressions', 'to', 'form', 'the', 'grammar', 'is', 'a', 'little', 'tedious', 'here', 'but', 'it', 'is', 'worth', 'it', 'because', 'we', 'don', 't', 'have', 'to', 'create', 'a', 'new', 'grammar', 'from', 'scratch', '.', 'Creating', 'a', 'new', 'grammar', 'is', 'expensive', 'because', 'we', 'have', 'many', 'production', 'rules', 'that', 'have', 'all', 'database', 'values', 'in', 'the', 'column', 'on', 'the', 'right', 'hand', 'side', '.', 'We', 'update', 'the', 'expressions', 'bottom', 'up', 'since', 'the', 'higher', 'level', 'expressions', 'may', 'refer', 'to', 'the', 'lower', 'level', 'ones', '.', 'For', 'example', 'the', 'ternary', 'expression', 'will', 'refer', 'to', 'the', 'start', 'and', 'end', 'times', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/atis_world.py#L83-L183 |
7,906 | fracpete/python-weka-wrapper3 | python/weka/core/typeconv.py | string_array_to_list | def string_array_to_list(a):
"""
Turns the Java string array into Python unicode string list.
:param a: the string array to convert
:type a: JB_Object
:return: the string list
:rtype: list
"""
result = []
length = javabridge.get_env().get_array_length(a)
wrapped = javabridge.get_env().get_object_array_elements(a)
for i in range(length):
result.append(javabridge.get_env().get_string(wrapped[i]))
return result | python | def string_array_to_list(a):
"""
Turns the Java string array into Python unicode string list.
:param a: the string array to convert
:type a: JB_Object
:return: the string list
:rtype: list
"""
result = []
length = javabridge.get_env().get_array_length(a)
wrapped = javabridge.get_env().get_object_array_elements(a)
for i in range(length):
result.append(javabridge.get_env().get_string(wrapped[i]))
return result | ['def', 'string_array_to_list', '(', 'a', ')', ':', 'result', '=', '[', ']', 'length', '=', 'javabridge', '.', 'get_env', '(', ')', '.', 'get_array_length', '(', 'a', ')', 'wrapped', '=', 'javabridge', '.', 'get_env', '(', ')', '.', 'get_object_array_elements', '(', 'a', ')', 'for', 'i', 'in', 'range', '(', 'length', ')', ':', 'result', '.', 'append', '(', 'javabridge', '.', 'get_env', '(', ')', '.', 'get_string', '(', 'wrapped', '[', 'i', ']', ')', ')', 'return', 'result'] | Turns the Java string array into Python unicode string list.
:param a: the string array to convert
:type a: JB_Object
:return: the string list
:rtype: list | ['Turns', 'the', 'Java', 'string', 'array', 'into', 'Python', 'unicode', 'string', 'list', '.'] | train | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/typeconv.py#L25-L39 |
7,907 | matthew-brett/delocate | delocate/libsana.py | tree_libs | def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | python | def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | ['def', 'tree_libs', '(', 'start_path', ',', 'filt_func', '=', 'None', ')', ':', 'lib_dict', '=', '{', '}', 'for', 'dirpath', ',', 'dirnames', ',', 'basenames', 'in', 'os', '.', 'walk', '(', 'start_path', ')', ':', 'for', 'base', 'in', 'basenames', ':', 'depending_libpath', '=', 'realpath', '(', 'pjoin', '(', 'dirpath', ',', 'base', ')', ')', 'if', 'not', 'filt_func', 'is', 'None', 'and', 'not', 'filt_func', '(', 'depending_libpath', ')', ':', 'continue', 'rpaths', '=', 'get_rpaths', '(', 'depending_libpath', ')', 'for', 'install_name', 'in', 'get_install_names', '(', 'depending_libpath', ')', ':', 'lib_path', '=', '(', 'install_name', 'if', 'install_name', '.', 'startswith', '(', "'@'", ')', 'else', 'realpath', '(', 'install_name', ')', ')', 'lib_path', '=', 'resolve_rpath', '(', 'lib_path', ',', 'rpaths', ')', 'if', 'lib_path', 'in', 'lib_dict', ':', 'lib_dict', '[', 'lib_path', ']', '[', 'depending_libpath', ']', '=', 'install_name', 'else', ':', 'lib_dict', '[', 'lib_path', ']', '=', '{', 'depending_libpath', ':', 'install_name', '}', 'return', 'lib_dict'] | Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html | ['Return', 'analysis', 'of', 'library', 'dependencies', 'within', 'start_path'] | train | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L14-L65 |
7,908 | jorisroovers/gitlint | gitlint/cli.py | build_config | def build_config(ctx, target, config_path, c, extra_path, ignore, verbose, silent, debug):
""" Creates a LintConfig object based on a set of commandline parameters. """
config_builder = LintConfigBuilder()
try:
# Config precedence:
# First, load default config or config from configfile
if config_path:
config_builder.set_from_config_file(config_path)
elif os.path.exists(DEFAULT_CONFIG_FILE):
config_builder.set_from_config_file(DEFAULT_CONFIG_FILE)
# Then process any commandline configuration flags
config_builder.set_config_from_string_list(c)
# Finally, overwrite with any convenience commandline flags
if ignore:
config_builder.set_option('general', 'ignore', ignore)
if silent:
config_builder.set_option('general', 'verbosity', 0)
elif verbose > 0:
config_builder.set_option('general', 'verbosity', verbose)
if extra_path:
config_builder.set_option('general', 'extra-path', extra_path)
if target:
config_builder.set_option('general', 'target', target)
if debug:
config_builder.set_option('general', 'debug', debug)
config = config_builder.build()
return config, config_builder
except LintConfigError as e:
click.echo(u"Config Error: {0}".format(ustr(e)))
ctx.exit(CONFIG_ERROR_CODE) | python | def build_config(ctx, target, config_path, c, extra_path, ignore, verbose, silent, debug):
""" Creates a LintConfig object based on a set of commandline parameters. """
config_builder = LintConfigBuilder()
try:
# Config precedence:
# First, load default config or config from configfile
if config_path:
config_builder.set_from_config_file(config_path)
elif os.path.exists(DEFAULT_CONFIG_FILE):
config_builder.set_from_config_file(DEFAULT_CONFIG_FILE)
# Then process any commandline configuration flags
config_builder.set_config_from_string_list(c)
# Finally, overwrite with any convenience commandline flags
if ignore:
config_builder.set_option('general', 'ignore', ignore)
if silent:
config_builder.set_option('general', 'verbosity', 0)
elif verbose > 0:
config_builder.set_option('general', 'verbosity', verbose)
if extra_path:
config_builder.set_option('general', 'extra-path', extra_path)
if target:
config_builder.set_option('general', 'target', target)
if debug:
config_builder.set_option('general', 'debug', debug)
config = config_builder.build()
return config, config_builder
except LintConfigError as e:
click.echo(u"Config Error: {0}".format(ustr(e)))
ctx.exit(CONFIG_ERROR_CODE) | ['def', 'build_config', '(', 'ctx', ',', 'target', ',', 'config_path', ',', 'c', ',', 'extra_path', ',', 'ignore', ',', 'verbose', ',', 'silent', ',', 'debug', ')', ':', 'config_builder', '=', 'LintConfigBuilder', '(', ')', 'try', ':', '# Config precedence:', '# First, load default config or config from configfile', 'if', 'config_path', ':', 'config_builder', '.', 'set_from_config_file', '(', 'config_path', ')', 'elif', 'os', '.', 'path', '.', 'exists', '(', 'DEFAULT_CONFIG_FILE', ')', ':', 'config_builder', '.', 'set_from_config_file', '(', 'DEFAULT_CONFIG_FILE', ')', '# Then process any commandline configuration flags', 'config_builder', '.', 'set_config_from_string_list', '(', 'c', ')', '# Finally, overwrite with any convenience commandline flags', 'if', 'ignore', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'ignore'", ',', 'ignore', ')', 'if', 'silent', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'verbosity'", ',', '0', ')', 'elif', 'verbose', '>', '0', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'verbosity'", ',', 'verbose', ')', 'if', 'extra_path', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'extra-path'", ',', 'extra_path', ')', 'if', 'target', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'target'", ',', 'target', ')', 'if', 'debug', ':', 'config_builder', '.', 'set_option', '(', "'general'", ',', "'debug'", ',', 'debug', ')', 'config', '=', 'config_builder', '.', 'build', '(', ')', 'return', 'config', ',', 'config_builder', 'except', 'LintConfigError', 'as', 'e', ':', 'click', '.', 'echo', '(', 'u"Config Error: {0}"', '.', 'format', '(', 'ustr', '(', 'e', ')', ')', ')', 'ctx', '.', 'exit', '(', 'CONFIG_ERROR_CODE', ')'] | Creates a LintConfig object based on a set of commandline parameters. | ['Creates', 'a', 'LintConfig', 'object', 'based', 'on', 'a', 'set', 'of', 'commandline', 'parameters', '.'] | train | https://github.com/jorisroovers/gitlint/blob/6248bd6cbc20c1be3bb6d196a5ec0425af99733b/gitlint/cli.py#L57-L93 |
7,909 | lord63/tldr.py | tldr/cli.py | parse_man_page | def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines | python | def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines | ['def', 'parse_man_page', '(', 'command', ',', 'platform', ')', ':', 'page_path', '=', 'find_page_location', '(', 'command', ',', 'platform', ')', 'output_lines', '=', 'parse_page', '(', 'page_path', ')', 'return', 'output_lines'] | Parse the man page and return the parsed lines. | ['Parse', 'the', 'man', 'page', 'and', 'return', 'the', 'parsed', 'lines', '.'] | train | https://github.com/lord63/tldr.py/blob/73cf9f86254691b2476910ea6a743b6d8bd04963/tldr/cli.py#L22-L26 |
7,910 | jsommers/switchyard | switchyard/lib/topo/util.py | unhumanize_bandwidth | def unhumanize_bandwidth(bitsstr):
'''
Take a string representing a link capacity, e.g., 10 Mb/s, and
return an integer representing the number of bits/sec.
Recognizes:
- 'bits/sec' or 'b/s' are treated as plain bits per second
- 'Kb' or 'kb' as thousand bits/sec
- 'Mb' or 'mb' as million bits/sec
- 'Gb' or 'gb' as billion bits/sec
- 'Tb' or 'tb' as trillion bits/sec
- if second character is 'B', quantity is interpreted as bytes/sec
- any subsequent characters after the first two are ignored, so
Kb/s Kb/sec Kbps are interpreted identically.
Returns None if the string doesn't contain anything parseable.
'''
if isinstance(bitsstr, int):
return bitsstr
mobj = re.match('^\s*([\d\.]+)\s*(.*)\s*$', bitsstr)
if not mobj:
return None
value, units = mobj.groups()
value = float(value)
multipliers = { 'b':1, 'k':1e3, 'm':1e6, 'g':1e9, 't':1e12 }
if not units:
units = 'bits'
mult = multipliers.get(units[0].lower(), 0)
bits = 1
if len(units) > 1:
if units[1] == 'B': bits = 8
# print (bitsstr, value, mult, bits)
return int(value * mult * bits) | python | def unhumanize_bandwidth(bitsstr):
'''
Take a string representing a link capacity, e.g., 10 Mb/s, and
return an integer representing the number of bits/sec.
Recognizes:
- 'bits/sec' or 'b/s' are treated as plain bits per second
- 'Kb' or 'kb' as thousand bits/sec
- 'Mb' or 'mb' as million bits/sec
- 'Gb' or 'gb' as billion bits/sec
- 'Tb' or 'tb' as trillion bits/sec
- if second character is 'B', quantity is interpreted as bytes/sec
- any subsequent characters after the first two are ignored, so
Kb/s Kb/sec Kbps are interpreted identically.
Returns None if the string doesn't contain anything parseable.
'''
if isinstance(bitsstr, int):
return bitsstr
mobj = re.match('^\s*([\d\.]+)\s*(.*)\s*$', bitsstr)
if not mobj:
return None
value, units = mobj.groups()
value = float(value)
multipliers = { 'b':1, 'k':1e3, 'm':1e6, 'g':1e9, 't':1e12 }
if not units:
units = 'bits'
mult = multipliers.get(units[0].lower(), 0)
bits = 1
if len(units) > 1:
if units[1] == 'B': bits = 8
# print (bitsstr, value, mult, bits)
return int(value * mult * bits) | ['def', 'unhumanize_bandwidth', '(', 'bitsstr', ')', ':', 'if', 'isinstance', '(', 'bitsstr', ',', 'int', ')', ':', 'return', 'bitsstr', 'mobj', '=', 're', '.', 'match', '(', "'^\\s*([\\d\\.]+)\\s*(.*)\\s*$'", ',', 'bitsstr', ')', 'if', 'not', 'mobj', ':', 'return', 'None', 'value', ',', 'units', '=', 'mobj', '.', 'groups', '(', ')', 'value', '=', 'float', '(', 'value', ')', 'multipliers', '=', '{', "'b'", ':', '1', ',', "'k'", ':', '1e3', ',', "'m'", ':', '1e6', ',', "'g'", ':', '1e9', ',', "'t'", ':', '1e12', '}', 'if', 'not', 'units', ':', 'units', '=', "'bits'", 'mult', '=', 'multipliers', '.', 'get', '(', 'units', '[', '0', ']', '.', 'lower', '(', ')', ',', '0', ')', 'bits', '=', '1', 'if', 'len', '(', 'units', ')', '>', '1', ':', 'if', 'units', '[', '1', ']', '==', "'B'", ':', 'bits', '=', '8', '# print (bitsstr, value, mult, bits)', 'return', 'int', '(', 'value', '*', 'mult', '*', 'bits', ')'] | Take a string representing a link capacity, e.g., 10 Mb/s, and
return an integer representing the number of bits/sec.
Recognizes:
- 'bits/sec' or 'b/s' are treated as plain bits per second
- 'Kb' or 'kb' as thousand bits/sec
- 'Mb' or 'mb' as million bits/sec
- 'Gb' or 'gb' as billion bits/sec
- 'Tb' or 'tb' as trillion bits/sec
- if second character is 'B', quantity is interpreted as bytes/sec
- any subsequent characters after the first two are ignored, so
Kb/s Kb/sec Kbps are interpreted identically.
Returns None if the string doesn't contain anything parseable. | ['Take', 'a', 'string', 'representing', 'a', 'link', 'capacity', 'e', '.', 'g', '.', '10', 'Mb', '/', 's', 'and', 'return', 'an', 'integer', 'representing', 'the', 'number', 'of', 'bits', '/', 'sec', '.', 'Recognizes', ':', '-', 'bits', '/', 'sec', 'or', 'b', '/', 's', 'are', 'treated', 'as', 'plain', 'bits', 'per', 'second', '-', 'Kb', 'or', 'kb', 'as', 'thousand', 'bits', '/', 'sec', '-', 'Mb', 'or', 'mb', 'as', 'million', 'bits', '/', 'sec', '-', 'Gb', 'or', 'gb', 'as', 'billion', 'bits', '/', 'sec', '-', 'Tb', 'or', 'tb', 'as', 'trillion', 'bits', '/', 'sec', '-', 'if', 'second', 'character', 'is', 'B', 'quantity', 'is', 'interpreted', 'as', 'bytes', '/', 'sec', '-', 'any', 'subsequent', 'characters', 'after', 'the', 'first', 'two', 'are', 'ignored', 'so', 'Kb', '/', 's', 'Kb', '/', 'sec', 'Kbps', 'are', 'interpreted', 'identically', '.'] | train | https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/topo/util.py#L40-L72 |
7,911 | drslump/pyshould | pyshould/expectation.py | Expectation._assertion | def _assertion(self, matcher, value):
""" Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError.
"""
# To support the syntax `any_of(subject) | should ...` we check if the
# value to check is an Expectation object and if it is we use the descriptor
# protocol to bind the value's assertion logic to this expectation.
if isinstance(value, Expectation):
assertion = value._assertion.__get__(self, Expectation)
assertion(matcher, value.value)
else:
hc.assert_that(value, matcher) | python | def _assertion(self, matcher, value):
""" Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError.
"""
# To support the syntax `any_of(subject) | should ...` we check if the
# value to check is an Expectation object and if it is we use the descriptor
# protocol to bind the value's assertion logic to this expectation.
if isinstance(value, Expectation):
assertion = value._assertion.__get__(self, Expectation)
assertion(matcher, value.value)
else:
hc.assert_that(value, matcher) | ['def', '_assertion', '(', 'self', ',', 'matcher', ',', 'value', ')', ':', '# To support the syntax `any_of(subject) | should ...` we check if the', '# value to check is an Expectation object and if it is we use the descriptor', "# protocol to bind the value's assertion logic to this expectation.", 'if', 'isinstance', '(', 'value', ',', 'Expectation', ')', ':', 'assertion', '=', 'value', '.', '_assertion', '.', '__get__', '(', 'self', ',', 'Expectation', ')', 'assertion', '(', 'matcher', ',', 'value', '.', 'value', ')', 'else', ':', 'hc', '.', 'assert_that', '(', 'value', ',', 'matcher', ')'] | Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError. | ['Perform', 'the', 'actual', 'assertion', 'for', 'the', 'given', 'matcher', 'and', 'value', '.', 'Override', 'this', 'method', 'to', 'apply', 'a', 'special', 'configuration', 'when', 'performing', 'the', 'assertion', '.', 'If', 'the', 'assertion', 'fails', 'it', 'should', 'raise', 'an', 'AssertionError', '.'] | train | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L102-L114 |
7,912 | jaredLunde/vital-tools | vital/security/__init__.py | aes_b64_encrypt | def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') | python | def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') | ['def', 'aes_b64_encrypt', '(', 'value', ',', 'secret', ',', 'block_size', '=', 'AES', '.', 'block_size', ')', ':', '# iv = randstr(block_size * 2, rng=random)', 'iv', '=', 'randstr', '(', 'block_size', '*', '2', ')', 'cipher', '=', 'AES', '.', 'new', '(', 'secret', '[', ':', '32', ']', ',', 'AES', '.', 'MODE_CFB', ',', 'iv', '[', ':', 'block_size', ']', '.', 'encode', '(', ')', ')', 'return', 'iv', '+', 'b64encode', '(', 'cipher', '.', 'encrypt', '(', 'uniorbytes', '(', 'value', ',', 'bytes', ')', ')', ')', '.', 'decode', '(', "'utf-8'", ')'] | AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
.. | ['AES', 'encrypt', '@value', 'with', '@secret', 'using', 'the', '|CFB|', 'mode', 'of', 'AES', 'with', 'a', 'cryptographically', 'secure', 'initialization', 'vector', '.'] | train | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/security/__init__.py#L31-L52 |
7,913 | lrq3000/pyFileFixity | pyFileFixity/lib/gooey/gui/components.py | Counter.GetValue | def GetValue(self):
'''
NOTE: Added on plane. Cannot remember exact implementation
of counter objects. I believe that they count sequentail
pairings of options
e.g.
-vvvvv
But I'm not sure. That's what I'm going with for now.
Returns
str(action.options_string[0]) * DropDown Value
'''
dropdown_value = self._widget.GetValue()
if not str(dropdown_value).isdigit():
return None
arg = str(self._action.option_strings[0]).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args | python | def GetValue(self):
'''
NOTE: Added on plane. Cannot remember exact implementation
of counter objects. I believe that they count sequentail
pairings of options
e.g.
-vvvvv
But I'm not sure. That's what I'm going with for now.
Returns
str(action.options_string[0]) * DropDown Value
'''
dropdown_value = self._widget.GetValue()
if not str(dropdown_value).isdigit():
return None
arg = str(self._action.option_strings[0]).replace('-', '')
repeated_args = arg * int(dropdown_value)
return '-' + repeated_args | ['def', 'GetValue', '(', 'self', ')', ':', 'dropdown_value', '=', 'self', '.', '_widget', '.', 'GetValue', '(', ')', 'if', 'not', 'str', '(', 'dropdown_value', ')', '.', 'isdigit', '(', ')', ':', 'return', 'None', 'arg', '=', 'str', '(', 'self', '.', '_action', '.', 'option_strings', '[', '0', ']', ')', '.', 'replace', '(', "'-'", ',', "''", ')', 'repeated_args', '=', 'arg', '*', 'int', '(', 'dropdown_value', ')', 'return', "'-'", '+', 'repeated_args'] | NOTE: Added on plane. Cannot remember exact implementation
of counter objects. I believe that they count sequentail
pairings of options
e.g.
-vvvvv
But I'm not sure. That's what I'm going with for now.
Returns
str(action.options_string[0]) * DropDown Value | ['NOTE', ':', 'Added', 'on', 'plane', '.', 'Cannot', 'remember', 'exact', 'implementation', 'of', 'counter', 'objects', '.', 'I', 'believe', 'that', 'they', 'count', 'sequentail', 'pairings', 'of', 'options', 'e', '.', 'g', '.', '-', 'vvvvv', 'But', 'I', 'm', 'not', 'sure', '.', 'That', 's', 'what', 'I', 'm', 'going', 'with', 'for', 'now', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/gooey/gui/components.py#L429-L446 |
7,914 | pazz/alot | alot/db/utils.py | add_signature_headers | def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None`
'''
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance(error_msg, str)
if not sigs:
error_msg = error_msg or u'no signature found'
elif not error_msg:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, since we did not break from the loop.
sig_from = key.uids[0].uid
except GPGProblem:
sig_from = sigs[0].fpr
sig_known = False
if error_msg:
msg = 'Invalid: {}'.format(error_msg)
elif uid_trusted:
msg = 'Valid: {}'.format(sig_from)
else:
msg = 'Untrusted: {}'.format(sig_from)
mail.add_header(X_SIGNATURE_VALID_HEADER,
'False' if (error_msg or not sig_known) else 'True')
mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) | python | def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None`
'''
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance(error_msg, str)
if not sigs:
error_msg = error_msg or u'no signature found'
elif not error_msg:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, since we did not break from the loop.
sig_from = key.uids[0].uid
except GPGProblem:
sig_from = sigs[0].fpr
sig_known = False
if error_msg:
msg = 'Invalid: {}'.format(error_msg)
elif uid_trusted:
msg = 'Valid: {}'.format(sig_from)
else:
msg = 'Untrusted: {}'.format(sig_from)
mail.add_header(X_SIGNATURE_VALID_HEADER,
'False' if (error_msg or not sig_known) else 'True')
mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) | ['def', 'add_signature_headers', '(', 'mail', ',', 'sigs', ',', 'error_msg', ')', ':', 'sig_from', '=', "''", 'sig_known', '=', 'True', 'uid_trusted', '=', 'False', 'assert', 'error_msg', 'is', 'None', 'or', 'isinstance', '(', 'error_msg', ',', 'str', ')', 'if', 'not', 'sigs', ':', 'error_msg', '=', 'error_msg', 'or', "u'no signature found'", 'elif', 'not', 'error_msg', ':', 'try', ':', 'key', '=', 'crypto', '.', 'get_key', '(', 'sigs', '[', '0', ']', '.', 'fpr', ')', 'for', 'uid', 'in', 'key', '.', 'uids', ':', 'if', 'crypto', '.', 'check_uid_validity', '(', 'key', ',', 'uid', '.', 'email', ')', ':', 'sig_from', '=', 'uid', '.', 'uid', 'uid_trusted', '=', 'True', 'break', 'else', ':', '# No trusted uid found, since we did not break from the loop.', 'sig_from', '=', 'key', '.', 'uids', '[', '0', ']', '.', 'uid', 'except', 'GPGProblem', ':', 'sig_from', '=', 'sigs', '[', '0', ']', '.', 'fpr', 'sig_known', '=', 'False', 'if', 'error_msg', ':', 'msg', '=', "'Invalid: {}'", '.', 'format', '(', 'error_msg', ')', 'elif', 'uid_trusted', ':', 'msg', '=', "'Valid: {}'", '.', 'format', '(', 'sig_from', ')', 'else', ':', 'msg', '=', "'Untrusted: {}'", '.', 'format', '(', 'sig_from', ')', 'mail', '.', 'add_header', '(', 'X_SIGNATURE_VALID_HEADER', ',', "'False'", 'if', '(', 'error_msg', 'or', 'not', 'sig_known', ')', 'else', "'True'", ')', 'mail', '.', 'add_header', '(', 'X_SIGNATURE_MESSAGE_HEADER', ',', 'msg', ')'] | Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None` | ['Add', 'pseudo', 'headers', 'to', 'the', 'mail', 'indicating', 'whether', 'the', 'signature', 'verification', 'was', 'successful', '.'] | train | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L38-L79 |
7,915 | vijaykatam/django-cache-manager | django_cache_manager/mixins.py | CacheInvalidateMixin.invalidate_model_cache | def invalidate_model_cache(self):
"""
Invalidate model cache by generating new key for the model.
"""
logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in self.model._meta.get_fields()
if ((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created)])
else:
related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables))
update_model_cache(self.model._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) | python | def invalidate_model_cache(self):
"""
Invalidate model cache by generating new key for the model.
"""
logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in self.model._meta.get_fields()
if ((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created)])
else:
related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables))
update_model_cache(self.model._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) | ['def', 'invalidate_model_cache', '(', 'self', ')', ':', 'logger', '.', 'info', '(', "'Invalidating cache for table {0}'", '.', 'format', '(', 'self', '.', 'model', '.', '_meta', '.', 'db_table', ')', ')', 'if', 'django', '.', 'VERSION', '>=', '(', '1', ',', '8', ')', ':', 'related_tables', '=', 'set', '(', '[', 'f', '.', 'related_model', '.', '_meta', '.', 'db_table', 'for', 'f', 'in', 'self', '.', 'model', '.', '_meta', '.', 'get_fields', '(', ')', 'if', '(', '(', 'f', '.', 'one_to_many', 'or', 'f', '.', 'one_to_one', ')', 'and', 'f', '.', 'auto_created', ')', 'or', 'f', '.', 'many_to_one', 'or', '(', 'f', '.', 'many_to_many', 'and', 'not', 'f', '.', 'auto_created', ')', ']', ')', 'else', ':', 'related_tables', '=', 'set', '(', '[', 'rel', '.', 'model', '.', '_meta', '.', 'db_table', 'for', 'rel', 'in', 'self', '.', 'model', '.', '_meta', '.', 'get_all_related_objects', '(', ')', ']', ')', '# temporary fix for m2m relations with an intermediate model, goes away after better join caching', 'related_tables', '|=', 'set', '(', '[', 'field', '.', 'rel', '.', 'to', '.', '_meta', '.', 'db_table', 'for', 'field', 'in', 'self', '.', 'model', '.', '_meta', '.', 'fields', 'if', 'issubclass', '(', 'type', '(', 'field', ')', ',', 'RelatedField', ')', ']', ')', 'logger', '.', 'debug', '(', "'Related tables of model {0} are {1}'", '.', 'format', '(', 'self', '.', 'model', ',', 'related_tables', ')', ')', 'update_model_cache', '(', 'self', '.', 'model', '.', '_meta', '.', 'db_table', ')', 'for', 'related_table', 'in', 'related_tables', ':', 'update_model_cache', '(', 'related_table', ')'] | Invalidate model cache by generating new key for the model. | ['Invalidate', 'model', 'cache', 'by', 'generating', 'new', 'key', 'for', 'the', 'model', '.'] | train | https://github.com/vijaykatam/django-cache-manager/blob/05142c44eb349d3f24f962592945888d9d367375/django_cache_manager/mixins.py#L66-L84 |
7,916 | SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.clean_data | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | python | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | ['def', 'clean_data', '(', 'self', ',', 'data', ',', 'rename_col', '=', 'None', ',', 'drop_col', '=', 'None', ',', 'resample', '=', 'True', ',', 'freq', '=', "'h'", ',', 'resampler', '=', "'mean'", ',', 'interpolate', '=', 'True', ',', 'limit', '=', '1', ',', 'method', '=', "'linear'", ',', 'remove_na', '=', 'True', ',', 'remove_na_how', '=', "'any'", ',', 'remove_outliers', '=', 'True', ',', 'sd_val', '=', '3', ',', 'remove_out_of_bounds', '=', 'True', ',', 'low_bound', '=', '0', ',', 'high_bound', '=', 'float', '(', "'inf'", ')', ',', 'save_file', '=', 'True', ')', ':', '# Check to ensure data is a pandas dataframe', 'if', 'not', 'isinstance', '(', 'data', ',', 'pd', '.', 'DataFrame', ')', ':', 'raise', 'TypeError', '(', "'data has to be a pandas dataframe.'", ')', '# Create instance and clean the data', 'clean_data_obj', '=', 'Clean_Data', '(', 'data', ')', 'clean_data_obj', '.', 'clean_data', '(', 'resample', '=', 'resample', ',', 'freq', '=', 'freq', ',', 'resampler', '=', 'resampler', ',', 'interpolate', '=', 'interpolate', ',', 'limit', '=', 'limit', ',', 'method', '=', 'method', ',', 'remove_na', '=', 'remove_na', ',', 'remove_na_how', '=', 'remove_na_how', ',', 'remove_outliers', '=', 'remove_outliers', ',', 'sd_val', '=', 'sd_val', ',', 'remove_out_of_bounds', '=', 'remove_out_of_bounds', ',', 'low_bound', '=', 'low_bound', ',', 'high_bound', '=', 'high_bound', ')', '# Correlation plot', '# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)', "# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')", 'if', 'rename_col', ':', '# Rename columns of dataframe', 'clean_data_obj', '.', 'rename_columns', '(', 'rename_col', ')', 'if', 'drop_col', ':', '# Drop columns of dataframe', 'clean_data_obj', '.', 'drop_columns', '(', 'drop_col', ')', '# Store cleaned data in wrapper class', 'self', '.', 'cleaned_data', '=', 'clean_data_obj', '.', 'cleaned_data', '# Logging', 'self', '.', 'result', '[', "'Clean'", ']', '=', '{', "'Rename Col'", ':', 'rename_col', ',', "'Drop Col'", ':', 'drop_col', ',', "'Resample'", ':', 'resample', ',', "'Frequency'", ':', 'freq', ',', "'Resampler'", ':', 'resampler', ',', "'Interpolate'", ':', 'interpolate', ',', "'Limit'", ':', 'limit', ',', "'Method'", ':', 'method', ',', "'Remove NA'", ':', 'remove_na', ',', "'Remove NA How'", ':', 'remove_na_how', ',', "'Remove Outliers'", ':', 'remove_outliers', ',', "'SD Val'", ':', 'sd_val', ',', "'Remove Out of Bounds'", ':', 'remove_out_of_bounds', ',', "'Low Bound'", ':', 'low_bound', ',', "'High Bound'", ':', 'str', '(', 'high_bound', ')', 'if', 'high_bound', '==', 'float', '(', "'inf'", ')', 'else', 'high_bound', ',', "'Save File'", ':', 'save_file', '}', 'if', 'save_file', ':', 'f', '=', 'self', '.', 'results_folder_name', '+', "'/cleaned_data-'", '+', 'str', '(', 'self', '.', 'get_global_count', '(', ')', ')', '+', "'.csv'", 'self', '.', 'cleaned_data', '.', 'to_csv', '(', 'f', ')', 'self', '.', 'result', '[', "'Clean'", ']', '[', "'Saved File'", ']', '=', 'f', 'else', ':', 'self', '.', 'result', '[', "'Clean'", ']', '[', "'Saved File'", ']', '=', "''", 'return', 'self', '.', 'cleaned_data'] | Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data. | ['Cleans', 'dataframe', 'according', 'to', 'user', 'specifications', 'and', 'stores', 'result', 'in', 'self', '.', 'cleaned_data', '.'] | train | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L439-L543 |
7,917 | hobson/pug-dj | pug/dj/miner/management/commands/modeldb.py | Command.normalize_col_name | def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes | python | def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes | ['def', 'normalize_col_name', '(', 'self', ',', 'col_name', ',', 'used_column_names', ',', 'is_relation', ')', ':', 'field_params', '=', '{', '}', 'field_notes', '=', '[', ']', 'new_name', '=', 'col_name', '.', 'lower', '(', ')', 'if', 'new_name', '!=', 'col_name', ':', 'field_notes', '.', 'append', '(', "'Field name made lowercase.'", ')', 'if', 'is_relation', ':', 'if', 'new_name', '.', 'endswith', '(', "'_id'", ')', ':', 'new_name', '=', 'new_name', '[', ':', '-', '3', ']', 'else', ':', 'field_params', '[', "'db_column'", ']', '=', 'col_name', 'new_name', ',', 'num_repl', '=', 're', '.', 'subn', '(', "r'\\W'", ',', "'_'", ',', 'new_name', ')', 'if', 'num_repl', '>', '0', ':', 'field_notes', '.', 'append', '(', "'Field renamed to remove unsuitable characters.'", ')', 'if', 'new_name', '.', 'find', '(', "'__'", ')', '>=', '0', ':', 'while', 'new_name', '.', 'find', '(', "'__'", ')', '>=', '0', ':', 'new_name', '=', 'new_name', '.', 'replace', '(', "'__'", ',', "'_'", ')', 'if', 'col_name', '.', 'lower', '(', ')', '.', 'find', '(', "'__'", ')', '>=', '0', ':', '# Only add the comment if the double underscore was in the original name', 'field_notes', '.', 'append', '(', '"Field renamed because it contained more than one \'_\' in a row."', ')', 'if', 'new_name', '.', 'startswith', '(', "'_'", ')', ':', 'new_name', '=', "'field%s'", '%', 'new_name', 'field_notes', '.', 'append', '(', '"Field renamed because it started with \'_\'."', ')', 'if', 'new_name', '.', 'endswith', '(', "'_'", ')', ':', 'new_name', '=', "'%sfield'", '%', 'new_name', 'field_notes', '.', 'append', '(', '"Field renamed because it ended with \'_\'."', ')', 'if', 'keyword', '.', 'iskeyword', '(', 'new_name', ')', ':', 'new_name', '+=', "'_field'", 'field_notes', '.', 'append', '(', "'Field renamed because it was a Python reserved word.'", ')', 'if', 'new_name', '[', '0', ']', '.', 'isdigit', '(', ')', ':', 'new_name', '=', "'number_%s'", '%', 'new_name', 'field_notes', '.', 'append', '(', '"Field renamed because it wasn\'t a valid Python identifier."', ')', 'if', 'new_name', 'in', 'used_column_names', ':', 'num', '=', '0', 'while', "'%s_%d'", '%', '(', 'new_name', ',', 'num', ')', 'in', 'used_column_names', ':', 'num', '+=', '1', 'new_name', '=', "'%s_%d'", '%', '(', 'new_name', ',', 'num', ')', 'field_notes', '.', 'append', '(', "'Field renamed because of name conflict.'", ')', 'if', 'col_name', '!=', 'new_name', 'and', 'field_notes', ':', 'field_params', '[', "'db_column'", ']', '=', 'col_name', 'return', 'new_name', ',', 'field_params', ',', 'field_notes'] | Modify the column name to make it Python-compatible as a field name | ['Modify', 'the', 'column', 'name', 'to', 'make', 'it', 'Python', '-', 'compatible', 'as', 'a', 'field', 'name'] | train | https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/management/commands/modeldb.py#L129-L183 |
7,918 | ucsb-cs/submit | submit/models.py | User.classes_can_admin | def classes_can_admin(self):
"""Return all the classes (sorted) that this user can admin."""
if self.is_admin:
return sorted(Session.query(Class).all())
else:
return sorted(self.admin_for) | python | def classes_can_admin(self):
"""Return all the classes (sorted) that this user can admin."""
if self.is_admin:
return sorted(Session.query(Class).all())
else:
return sorted(self.admin_for) | ['def', 'classes_can_admin', '(', 'self', ')', ':', 'if', 'self', '.', 'is_admin', ':', 'return', 'sorted', '(', 'Session', '.', 'query', '(', 'Class', ')', '.', 'all', '(', ')', ')', 'else', ':', 'return', 'sorted', '(', 'self', '.', 'admin_for', ')'] | Return all the classes (sorted) that this user can admin. | ['Return', 'all', 'the', 'classes', '(', 'sorted', ')', 'that', 'this', 'user', 'can', 'admin', '.'] | train | https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L965-L970 |
7,919 | PSPC-SPAC-buyandsell/von_agent | von_agent/agent/issuer.py | Issuer.send_cred_def | async def send_cred_def(self, s_id: str, revocation: bool = True, rr_size: int = None) -> str:
"""
Create a credential definition as Issuer, store it in its wallet, and send it to the ledger.
Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure
to send credential definition to ledger if need be, or IndyError for any other failure
to create and store credential definition in wallet.
:param s_id: schema identifier
:param revocation: whether to support revocation for cred def
:param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported
:return: json credential definition as it appears on ledger
"""
LOGGER.debug('Issuer.send_cred_def >>> s_id: %s, revocation: %s, rr_size: %s', s_id, revocation, rr_size)
rv_json = json.dumps({})
schema_json = await self.get_schema(schema_key(s_id))
schema = json.loads(schema_json)
cd_id = cred_def_id(self.did, schema['seqNo'])
private_key_ok = True
with CRED_DEF_CACHE.lock:
try:
rv_json = await self.get_cred_def(cd_id)
LOGGER.info(
'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another',
schema['name'],
schema['version'],
self.wallet.name)
except AbsentCredDef:
pass # OK - about to create, store, and send it
try:
(_, cred_def_json) = await anoncreds.issuer_create_and_store_credential_def(
self.wallet.handle,
self.did, # issuer DID
schema_json,
CD_ID_TAG, # expect only one cred def per schema and issuer
'CL',
json.dumps({'support_revocation': revocation}))
if json.loads(rv_json):
private_key_ok = False
LOGGER.warning(
'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cd_id)
# carry on though, this agent may have other roles so public key may be good enough
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError:
if json.loads(rv_json):
LOGGER.info(
'Issuer wallet %s reusing existing cred def on schema %s version %s',
self.wallet.name,
schema['name'],
schema['version'])
else:
LOGGER.debug('Issuer.send_cred_def: <!< corrupt wallet %s', self.wallet.name)
raise CorruptWallet(
'Corrupt Issuer wallet {} has cred def on schema {} version {} not on ledger'.format(
self.wallet.name,
schema['name'],
schema['version']))
else:
LOGGER.debug(
'Issuer.send_cred_def: <!< cannot store cred def in wallet %s: indy error code %s',
self.wallet.name,
x_indy.error_code)
raise
if not json.loads(rv_json): # checking the ledger returned no cred def: send it
req_json = await ledger.build_cred_def_request(self.did, cred_def_json)
await self._sign_submit(req_json)
rv_json = await self.get_cred_def(cd_id) # pick up from ledger and parse; add to cache
if revocation:
await self._sync_revoc(rev_reg_id(cd_id, 0), rr_size) # create new rev reg, tails file for tag 0
if revocation and private_key_ok:
for tag in [str(t) for t in range(int(Tails.next_tag(self._dir_tails, cd_id)[0]))]: # '0' to str(next-1)
await self._sync_revoc(rev_reg_id(cd_id, tag), rr_size if tag == 0 else None)
dir_cred_def = join(self._dir_tails, cd_id)
if not isdir(dir_cred_def): # make sure a directory exists for box id collection when required, revo or not
makedirs(dir_cred_def, exist_ok=True)
LOGGER.debug('Issuer.send_cred_def <<< %s', rv_json)
return rv_json | python | async def send_cred_def(self, s_id: str, revocation: bool = True, rr_size: int = None) -> str:
"""
Create a credential definition as Issuer, store it in its wallet, and send it to the ledger.
Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure
to send credential definition to ledger if need be, or IndyError for any other failure
to create and store credential definition in wallet.
:param s_id: schema identifier
:param revocation: whether to support revocation for cred def
:param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported
:return: json credential definition as it appears on ledger
"""
LOGGER.debug('Issuer.send_cred_def >>> s_id: %s, revocation: %s, rr_size: %s', s_id, revocation, rr_size)
rv_json = json.dumps({})
schema_json = await self.get_schema(schema_key(s_id))
schema = json.loads(schema_json)
cd_id = cred_def_id(self.did, schema['seqNo'])
private_key_ok = True
with CRED_DEF_CACHE.lock:
try:
rv_json = await self.get_cred_def(cd_id)
LOGGER.info(
'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another',
schema['name'],
schema['version'],
self.wallet.name)
except AbsentCredDef:
pass # OK - about to create, store, and send it
try:
(_, cred_def_json) = await anoncreds.issuer_create_and_store_credential_def(
self.wallet.handle,
self.did, # issuer DID
schema_json,
CD_ID_TAG, # expect only one cred def per schema and issuer
'CL',
json.dumps({'support_revocation': revocation}))
if json.loads(rv_json):
private_key_ok = False
LOGGER.warning(
'New cred def on %s in wallet shadows existing one on ledger: private key not usable', cd_id)
# carry on though, this agent may have other roles so public key may be good enough
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError:
if json.loads(rv_json):
LOGGER.info(
'Issuer wallet %s reusing existing cred def on schema %s version %s',
self.wallet.name,
schema['name'],
schema['version'])
else:
LOGGER.debug('Issuer.send_cred_def: <!< corrupt wallet %s', self.wallet.name)
raise CorruptWallet(
'Corrupt Issuer wallet {} has cred def on schema {} version {} not on ledger'.format(
self.wallet.name,
schema['name'],
schema['version']))
else:
LOGGER.debug(
'Issuer.send_cred_def: <!< cannot store cred def in wallet %s: indy error code %s',
self.wallet.name,
x_indy.error_code)
raise
if not json.loads(rv_json): # checking the ledger returned no cred def: send it
req_json = await ledger.build_cred_def_request(self.did, cred_def_json)
await self._sign_submit(req_json)
rv_json = await self.get_cred_def(cd_id) # pick up from ledger and parse; add to cache
if revocation:
await self._sync_revoc(rev_reg_id(cd_id, 0), rr_size) # create new rev reg, tails file for tag 0
if revocation and private_key_ok:
for tag in [str(t) for t in range(int(Tails.next_tag(self._dir_tails, cd_id)[0]))]: # '0' to str(next-1)
await self._sync_revoc(rev_reg_id(cd_id, tag), rr_size if tag == 0 else None)
dir_cred_def = join(self._dir_tails, cd_id)
if not isdir(dir_cred_def): # make sure a directory exists for box id collection when required, revo or not
makedirs(dir_cred_def, exist_ok=True)
LOGGER.debug('Issuer.send_cred_def <<< %s', rv_json)
return rv_json | ['async', 'def', 'send_cred_def', '(', 'self', ',', 's_id', ':', 'str', ',', 'revocation', ':', 'bool', '=', 'True', ',', 'rr_size', ':', 'int', '=', 'None', ')', '->', 'str', ':', 'LOGGER', '.', 'debug', '(', "'Issuer.send_cred_def >>> s_id: %s, revocation: %s, rr_size: %s'", ',', 's_id', ',', 'revocation', ',', 'rr_size', ')', 'rv_json', '=', 'json', '.', 'dumps', '(', '{', '}', ')', 'schema_json', '=', 'await', 'self', '.', 'get_schema', '(', 'schema_key', '(', 's_id', ')', ')', 'schema', '=', 'json', '.', 'loads', '(', 'schema_json', ')', 'cd_id', '=', 'cred_def_id', '(', 'self', '.', 'did', ',', 'schema', '[', "'seqNo'", ']', ')', 'private_key_ok', '=', 'True', 'with', 'CRED_DEF_CACHE', '.', 'lock', ':', 'try', ':', 'rv_json', '=', 'await', 'self', '.', 'get_cred_def', '(', 'cd_id', ')', 'LOGGER', '.', 'info', '(', "'Cred def on schema %s version %s already exists on ledger; Issuer %s not sending another'", ',', 'schema', '[', "'name'", ']', ',', 'schema', '[', "'version'", ']', ',', 'self', '.', 'wallet', '.', 'name', ')', 'except', 'AbsentCredDef', ':', 'pass', '# OK - about to create, store, and send it', 'try', ':', '(', '_', ',', 'cred_def_json', ')', '=', 'await', 'anoncreds', '.', 'issuer_create_and_store_credential_def', '(', 'self', '.', 'wallet', '.', 'handle', ',', 'self', '.', 'did', ',', '# issuer DID', 'schema_json', ',', 'CD_ID_TAG', ',', '# expect only one cred def per schema and issuer', "'CL'", ',', 'json', '.', 'dumps', '(', '{', "'support_revocation'", ':', 'revocation', '}', ')', ')', 'if', 'json', '.', 'loads', '(', 'rv_json', ')', ':', 'private_key_ok', '=', 'False', 'LOGGER', '.', 'warning', '(', "'New cred def on %s in wallet shadows existing one on ledger: private key not usable'", ',', 'cd_id', ')', '# carry on though, this agent may have other roles so public key may be good enough', 'except', 'IndyError', 'as', 'x_indy', ':', 'if', 'x_indy', '.', 'error_code', '==', 'ErrorCode', '.', 'AnoncredsCredDefAlreadyExistsError', ':', 'if', 'json', '.', 'loads', '(', 'rv_json', ')', ':', 'LOGGER', '.', 'info', '(', "'Issuer wallet %s reusing existing cred def on schema %s version %s'", ',', 'self', '.', 'wallet', '.', 'name', ',', 'schema', '[', "'name'", ']', ',', 'schema', '[', "'version'", ']', ')', 'else', ':', 'LOGGER', '.', 'debug', '(', "'Issuer.send_cred_def: <!< corrupt wallet %s'", ',', 'self', '.', 'wallet', '.', 'name', ')', 'raise', 'CorruptWallet', '(', "'Corrupt Issuer wallet {} has cred def on schema {} version {} not on ledger'", '.', 'format', '(', 'self', '.', 'wallet', '.', 'name', ',', 'schema', '[', "'name'", ']', ',', 'schema', '[', "'version'", ']', ')', ')', 'else', ':', 'LOGGER', '.', 'debug', '(', "'Issuer.send_cred_def: <!< cannot store cred def in wallet %s: indy error code %s'", ',', 'self', '.', 'wallet', '.', 'name', ',', 'x_indy', '.', 'error_code', ')', 'raise', 'if', 'not', 'json', '.', 'loads', '(', 'rv_json', ')', ':', '# checking the ledger returned no cred def: send it', 'req_json', '=', 'await', 'ledger', '.', 'build_cred_def_request', '(', 'self', '.', 'did', ',', 'cred_def_json', ')', 'await', 'self', '.', '_sign_submit', '(', 'req_json', ')', 'rv_json', '=', 'await', 'self', '.', 'get_cred_def', '(', 'cd_id', ')', '# pick up from ledger and parse; add to cache', 'if', 'revocation', ':', 'await', 'self', '.', '_sync_revoc', '(', 'rev_reg_id', '(', 'cd_id', ',', '0', ')', ',', 'rr_size', ')', '# create new rev reg, tails file for tag 0', 'if', 'revocation', 'and', 'private_key_ok', ':', 'for', 'tag', 'in', '[', 'str', '(', 't', ')', 'for', 't', 'in', 'range', '(', 'int', '(', 'Tails', '.', 'next_tag', '(', 'self', '.', '_dir_tails', ',', 'cd_id', ')', '[', '0', ']', ')', ')', ']', ':', "# '0' to str(next-1)", 'await', 'self', '.', '_sync_revoc', '(', 'rev_reg_id', '(', 'cd_id', ',', 'tag', ')', ',', 'rr_size', 'if', 'tag', '==', '0', 'else', 'None', ')', 'dir_cred_def', '=', 'join', '(', 'self', '.', '_dir_tails', ',', 'cd_id', ')', 'if', 'not', 'isdir', '(', 'dir_cred_def', ')', ':', '# make sure a directory exists for box id collection when required, revo or not', 'makedirs', '(', 'dir_cred_def', ',', 'exist_ok', '=', 'True', ')', 'LOGGER', '.', 'debug', '(', "'Issuer.send_cred_def <<< %s'", ',', 'rv_json', ')', 'return', 'rv_json'] | Create a credential definition as Issuer, store it in its wallet, and send it to the ledger.
Raise CorruptWallet for wallet not pertaining to current ledger, BadLedgerTxn on failure
to send credential definition to ledger if need be, or IndyError for any other failure
to create and store credential definition in wallet.
:param s_id: schema identifier
:param revocation: whether to support revocation for cred def
:param rr_size: size of initial revocation registry (default as per _create_rev_reg()), if revocation supported
:return: json credential definition as it appears on ledger | ['Create', 'a', 'credential', 'definition', 'as', 'Issuer', 'store', 'it', 'in', 'its', 'wallet', 'and', 'send', 'it', 'to', 'the', 'ledger', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/issuer.py#L190-L275 |
7,920 | log2timeline/dfvfs | dfvfs/encryption/aes_decrypter.py | AESDecrypter.Decrypt | def Decrypt(self, encrypted_data):
"""Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data.
"""
index_split = -(len(encrypted_data) % AES.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._aes_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data | python | def Decrypt(self, encrypted_data):
"""Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data.
"""
index_split = -(len(encrypted_data) % AES.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._aes_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data | ['def', 'Decrypt', '(', 'self', ',', 'encrypted_data', ')', ':', 'index_split', '=', '-', '(', 'len', '(', 'encrypted_data', ')', '%', 'AES', '.', 'block_size', ')', 'if', 'index_split', ':', 'remaining_encrypted_data', '=', 'encrypted_data', '[', 'index_split', ':', ']', 'encrypted_data', '=', 'encrypted_data', '[', ':', 'index_split', ']', 'else', ':', 'remaining_encrypted_data', '=', "b''", 'decrypted_data', '=', 'self', '.', '_aes_cipher', '.', 'decrypt', '(', 'encrypted_data', ')', 'return', 'decrypted_data', ',', 'remaining_encrypted_data'] | Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data. | ['Decrypts', 'the', 'encrypted', 'data', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/encryption/aes_decrypter.py#L57-L75 |
7,921 | twilio/twilio-python | twilio/rest/video/v1/composition_hook.py | CompositionHookList.list | def list(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
limit=None, page_size=None):
"""
Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
"""
return list(self.stream(
enabled=enabled,
date_created_after=date_created_after,
date_created_before=date_created_before,
friendly_name=friendly_name,
limit=limit,
page_size=page_size,
)) | python | def list(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
limit=None, page_size=None):
"""
Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance]
"""
return list(self.stream(
enabled=enabled,
date_created_after=date_created_after,
date_created_before=date_created_before,
friendly_name=friendly_name,
limit=limit,
page_size=page_size,
)) | ['def', 'list', '(', 'self', ',', 'enabled', '=', 'values', '.', 'unset', ',', 'date_created_after', '=', 'values', '.', 'unset', ',', 'date_created_before', '=', 'values', '.', 'unset', ',', 'friendly_name', '=', 'values', '.', 'unset', ',', 'limit', '=', 'None', ',', 'page_size', '=', 'None', ')', ':', 'return', 'list', '(', 'self', '.', 'stream', '(', 'enabled', '=', 'enabled', ',', 'date_created_after', '=', 'date_created_after', ',', 'date_created_before', '=', 'date_created_before', ',', 'friendly_name', '=', 'friendly_name', ',', 'limit', '=', 'limit', ',', 'page_size', '=', 'page_size', ',', ')', ')'] | Lists CompositionHookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.composition_hook.CompositionHookInstance] | ['Lists', 'CompositionHookInstance', 'records', 'from', 'the', 'API', 'as', 'a', 'list', '.', 'Unlike', 'stream', '()', 'this', 'operation', 'is', 'eager', 'and', 'will', 'load', 'limit', 'records', 'into', 'memory', 'before', 'returning', '.'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/composition_hook.py#L73-L102 |
7,922 | mitsei/dlkit | dlkit/services/learning.py | ObjectiveBank.use_isolated_objective_bank_view | def use_isolated_objective_bank_view(self):
"""Pass through to provider ObjectiveLookupSession.use_isolated_objective_bank_view"""
self._objective_bank_view = ISOLATED
# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_isolated_objective_bank_view()
except AttributeError:
pass | python | def use_isolated_objective_bank_view(self):
"""Pass through to provider ObjectiveLookupSession.use_isolated_objective_bank_view"""
self._objective_bank_view = ISOLATED
# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_isolated_objective_bank_view()
except AttributeError:
pass | ['def', 'use_isolated_objective_bank_view', '(', 'self', ')', ':', 'self', '.', '_objective_bank_view', '=', 'ISOLATED', "# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked", 'for', 'session', 'in', 'self', '.', '_get_provider_sessions', '(', ')', ':', 'try', ':', 'session', '.', 'use_isolated_objective_bank_view', '(', ')', 'except', 'AttributeError', ':', 'pass'] | Pass through to provider ObjectiveLookupSession.use_isolated_objective_bank_view | ['Pass', 'through', 'to', 'provider', 'ObjectiveLookupSession', '.', 'use_isolated_objective_bank_view'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/learning.py#L1575-L1583 |
7,923 | spacetelescope/drizzlepac | drizzlepac/adrizzle.py | do_driz | def do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
wcslin_pscale=1.0,uniqid=1, pixfrac=1.0, kernel='square',
fillval="INDEF", stepsize=10,wcsmap=None):
"""
Core routine for performing 'drizzle' operation on a single input image
All input values will be Python objects such as ndarrays, instead
of filenames.
File handling (input and output) will be performed by calling routine.
"""
# Insure that the fillval parameter gets properly interpreted for use with tdriz
if util.is_blank(fillval):
fillval = 'INDEF'
else:
fillval = str(fillval)
if in_units == 'cps':
expscale = 1.0
else:
expscale = expin
# Compute what plane of the context image this input would
# correspond to:
planeid = int((uniqid-1) / 32)
# Check if the context image has this many planes
if outcon.ndim == 3:
nplanes = outcon.shape[0]
elif outcon.ndim == 2:
nplanes = 1
else:
nplanes = 0
if nplanes <= planeid:
raise IndexError("Not enough planes in drizzle context image")
# Alias context image to the requested plane if 3d
if outcon.ndim == 2:
outctx = outcon
else:
outctx = outcon[planeid]
pix_ratio = output_wcs.pscale/wcslin_pscale
if wcsmap is None and cdriz is not None:
log.info('Using WCSLIB-based coordinate transformation...')
log.info('stepsize = %s' % stepsize)
mapping = cdriz.DefaultWCSMapping(
input_wcs, output_wcs,
input_wcs.pixel_shape[0], input_wcs.pixel_shape[1],
stepsize
)
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
log.info('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap
wmap = wcsmap(input_wcs,output_wcs)
mapping = wmap.forward
_shift_fr = 'output'
_shift_un = 'output'
ystart = 0
nmiss = 0
nskip = 0
#
# This call to 'cdriz.tdriz' uses the new C syntax
#
_dny = insci.shape[0]
# Call 'drizzle' to perform image combination
if insci.dtype > np.float32:
#WARNING: Input array recast as a float32 array
insci = insci.astype(np.float32)
_vers,nmiss,nskip = cdriz.tdriz(insci, inwht, outsci, outwht,
outctx, uniqid, ystart, 1, 1, _dny,
pix_ratio, 1.0, 1.0, 'center', pixfrac,
kernel, in_units, expscale, wt_scl,
fillval, nmiss, nskip, 1, mapping)
if nmiss > 0:
log.warning('! %s points were outside the output image.' % nmiss)
if nskip > 0:
log.debug('! Note, %s input lines were skipped completely.' % nskip)
return _vers | python | def do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
wcslin_pscale=1.0,uniqid=1, pixfrac=1.0, kernel='square',
fillval="INDEF", stepsize=10,wcsmap=None):
"""
Core routine for performing 'drizzle' operation on a single input image
All input values will be Python objects such as ndarrays, instead
of filenames.
File handling (input and output) will be performed by calling routine.
"""
# Insure that the fillval parameter gets properly interpreted for use with tdriz
if util.is_blank(fillval):
fillval = 'INDEF'
else:
fillval = str(fillval)
if in_units == 'cps':
expscale = 1.0
else:
expscale = expin
# Compute what plane of the context image this input would
# correspond to:
planeid = int((uniqid-1) / 32)
# Check if the context image has this many planes
if outcon.ndim == 3:
nplanes = outcon.shape[0]
elif outcon.ndim == 2:
nplanes = 1
else:
nplanes = 0
if nplanes <= planeid:
raise IndexError("Not enough planes in drizzle context image")
# Alias context image to the requested plane if 3d
if outcon.ndim == 2:
outctx = outcon
else:
outctx = outcon[planeid]
pix_ratio = output_wcs.pscale/wcslin_pscale
if wcsmap is None and cdriz is not None:
log.info('Using WCSLIB-based coordinate transformation...')
log.info('stepsize = %s' % stepsize)
mapping = cdriz.DefaultWCSMapping(
input_wcs, output_wcs,
input_wcs.pixel_shape[0], input_wcs.pixel_shape[1],
stepsize
)
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
log.info('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap
wmap = wcsmap(input_wcs,output_wcs)
mapping = wmap.forward
_shift_fr = 'output'
_shift_un = 'output'
ystart = 0
nmiss = 0
nskip = 0
#
# This call to 'cdriz.tdriz' uses the new C syntax
#
_dny = insci.shape[0]
# Call 'drizzle' to perform image combination
if insci.dtype > np.float32:
#WARNING: Input array recast as a float32 array
insci = insci.astype(np.float32)
_vers,nmiss,nskip = cdriz.tdriz(insci, inwht, outsci, outwht,
outctx, uniqid, ystart, 1, 1, _dny,
pix_ratio, 1.0, 1.0, 'center', pixfrac,
kernel, in_units, expscale, wt_scl,
fillval, nmiss, nskip, 1, mapping)
if nmiss > 0:
log.warning('! %s points were outside the output image.' % nmiss)
if nskip > 0:
log.debug('! Note, %s input lines were skipped completely.' % nskip)
return _vers | ['def', 'do_driz', '(', 'insci', ',', 'input_wcs', ',', 'inwht', ',', 'output_wcs', ',', 'outsci', ',', 'outwht', ',', 'outcon', ',', 'expin', ',', 'in_units', ',', 'wt_scl', ',', 'wcslin_pscale', '=', '1.0', ',', 'uniqid', '=', '1', ',', 'pixfrac', '=', '1.0', ',', 'kernel', '=', "'square'", ',', 'fillval', '=', '"INDEF"', ',', 'stepsize', '=', '10', ',', 'wcsmap', '=', 'None', ')', ':', '# Insure that the fillval parameter gets properly interpreted for use with tdriz', 'if', 'util', '.', 'is_blank', '(', 'fillval', ')', ':', 'fillval', '=', "'INDEF'", 'else', ':', 'fillval', '=', 'str', '(', 'fillval', ')', 'if', 'in_units', '==', "'cps'", ':', 'expscale', '=', '1.0', 'else', ':', 'expscale', '=', 'expin', '# Compute what plane of the context image this input would', '# correspond to:', 'planeid', '=', 'int', '(', '(', 'uniqid', '-', '1', ')', '/', '32', ')', '# Check if the context image has this many planes', 'if', 'outcon', '.', 'ndim', '==', '3', ':', 'nplanes', '=', 'outcon', '.', 'shape', '[', '0', ']', 'elif', 'outcon', '.', 'ndim', '==', '2', ':', 'nplanes', '=', '1', 'else', ':', 'nplanes', '=', '0', 'if', 'nplanes', '<=', 'planeid', ':', 'raise', 'IndexError', '(', '"Not enough planes in drizzle context image"', ')', '# Alias context image to the requested plane if 3d', 'if', 'outcon', '.', 'ndim', '==', '2', ':', 'outctx', '=', 'outcon', 'else', ':', 'outctx', '=', 'outcon', '[', 'planeid', ']', 'pix_ratio', '=', 'output_wcs', '.', 'pscale', '/', 'wcslin_pscale', 'if', 'wcsmap', 'is', 'None', 'and', 'cdriz', 'is', 'not', 'None', ':', 'log', '.', 'info', '(', "'Using WCSLIB-based coordinate transformation...'", ')', 'log', '.', 'info', '(', "'stepsize = %s'", '%', 'stepsize', ')', 'mapping', '=', 'cdriz', '.', 'DefaultWCSMapping', '(', 'input_wcs', ',', 'output_wcs', ',', 'input_wcs', '.', 'pixel_shape', '[', '0', ']', ',', 'input_wcs', '.', 'pixel_shape', '[', '1', ']', ',', 'stepsize', ')', 'else', ':', '#', '##Using the Python class for the WCS-based transformation', '#', '# Use user provided mapping function', 'log', '.', 'info', '(', "'Using coordinate transformation defined by user...'", ')', 'if', 'wcsmap', 'is', 'None', ':', 'wcsmap', '=', 'wcs_functions', '.', 'WCSMap', 'wmap', '=', 'wcsmap', '(', 'input_wcs', ',', 'output_wcs', ')', 'mapping', '=', 'wmap', '.', 'forward', '_shift_fr', '=', "'output'", '_shift_un', '=', "'output'", 'ystart', '=', '0', 'nmiss', '=', '0', 'nskip', '=', '0', '#', "# This call to 'cdriz.tdriz' uses the new C syntax", '#', '_dny', '=', 'insci', '.', 'shape', '[', '0', ']', "# Call 'drizzle' to perform image combination", 'if', 'insci', '.', 'dtype', '>', 'np', '.', 'float32', ':', '#WARNING: Input array recast as a float32 array', 'insci', '=', 'insci', '.', 'astype', '(', 'np', '.', 'float32', ')', '_vers', ',', 'nmiss', ',', 'nskip', '=', 'cdriz', '.', 'tdriz', '(', 'insci', ',', 'inwht', ',', 'outsci', ',', 'outwht', ',', 'outctx', ',', 'uniqid', ',', 'ystart', ',', '1', ',', '1', ',', '_dny', ',', 'pix_ratio', ',', '1.0', ',', '1.0', ',', "'center'", ',', 'pixfrac', ',', 'kernel', ',', 'in_units', ',', 'expscale', ',', 'wt_scl', ',', 'fillval', ',', 'nmiss', ',', 'nskip', ',', '1', ',', 'mapping', ')', 'if', 'nmiss', '>', '0', ':', 'log', '.', 'warning', '(', "'! %s points were outside the output image.'", '%', 'nmiss', ')', 'if', 'nskip', '>', '0', ':', 'log', '.', 'debug', '(', "'! Note, %s input lines were skipped completely.'", '%', 'nskip', ')', 'return', '_vers'] | Core routine for performing 'drizzle' operation on a single input image
All input values will be Python objects such as ndarrays, instead
of filenames.
File handling (input and output) will be performed by calling routine. | ['Core', 'routine', 'for', 'performing', 'drizzle', 'operation', 'on', 'a', 'single', 'input', 'image', 'All', 'input', 'values', 'will', 'be', 'Python', 'objects', 'such', 'as', 'ndarrays', 'instead', 'of', 'filenames', '.', 'File', 'handling', '(', 'input', 'and', 'output', ')', 'will', 'be', 'performed', 'by', 'calling', 'routine', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/adrizzle.py#L1011-L1101 |
7,924 | scot-dev/scot | scot/xvschema.py | singletrial | def singletrial(num_trials, skipstep=1):
""" Single-trial cross-validation schema
Use one trial for training, all others for testing.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
only use every `skipstep` trial for training
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset)
"""
for t in range(0, num_trials, skipstep):
trainset = [t]
testset = [i for i in range(trainset[0])] + \
[i for i in range(trainset[-1] + 1, num_trials)]
testset = sort([t % num_trials for t in testset])
yield trainset, testset | python | def singletrial(num_trials, skipstep=1):
""" Single-trial cross-validation schema
Use one trial for training, all others for testing.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
only use every `skipstep` trial for training
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset)
"""
for t in range(0, num_trials, skipstep):
trainset = [t]
testset = [i for i in range(trainset[0])] + \
[i for i in range(trainset[-1] + 1, num_trials)]
testset = sort([t % num_trials for t in testset])
yield trainset, testset | ['def', 'singletrial', '(', 'num_trials', ',', 'skipstep', '=', '1', ')', ':', 'for', 't', 'in', 'range', '(', '0', ',', 'num_trials', ',', 'skipstep', ')', ':', 'trainset', '=', '[', 't', ']', 'testset', '=', '[', 'i', 'for', 'i', 'in', 'range', '(', 'trainset', '[', '0', ']', ')', ']', '+', '[', 'i', 'for', 'i', 'in', 'range', '(', 'trainset', '[', '-', '1', ']', '+', '1', ',', 'num_trials', ')', ']', 'testset', '=', 'sort', '(', '[', 't', '%', 'num_trials', 'for', 't', 'in', 'testset', ']', ')', 'yield', 'trainset', ',', 'testset'] | Single-trial cross-validation schema
Use one trial for training, all others for testing.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
only use every `skipstep` trial for training
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset) | ['Single', '-', 'trial', 'cross', '-', 'validation', 'schema'] | train | https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/xvschema.py#L14-L36 |
7,925 | lemieuxl/pyGenClean | pyGenClean/run_data_clean_up.py | run_duplicated_snps | def run_duplicated_snps(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step2 (duplicated snps).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSNPs.duplicated_snps`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
This function creates a ``map`` file, needed for the
:py:mod:`pyGenClean.DupSNPs.duplicated_snps` module.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need a tfile
required_type = "tfile"
check_input_files(in_prefix, in_type, required_type)
# This step require a map file (we now have a tfile)
if not os.path.isfile(in_prefix + ".map"):
outputFile = None
try:
outputFile = open(in_prefix + ".map", "w")
except IOError:
msg = "{}: can't write file".format(in_prefix + ".map")
raise ProgramError(msg)
try:
with open(in_prefix + ".tped", 'r') as inputFile:
for line in inputFile:
row = createRowFromPlinkSpacedOutput(line)
print >>outputFile, "\t".join(row[:4])
except IOError:
msg = "{}: no such file".format(in_prefix + ".tped")
raise ProgramError(msg)
outputFile.close()
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "dup_snps")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
duplicated_snps.main(options)
except duplicated_snps.ProgramError as e:
msg = "duplicated_snps: {}".format(e)
raise ProgramError(msg)
# Reading the number of duplicated markers
duplicated_count = defaultdict(int)
if os.path.isfile(script_prefix + ".duplicated_snps.tped"):
with open(script_prefix + ".duplicated_snps.tped", "r") as i_file:
duplicated_count = Counter(
(i[0], i[3]) for i in [
tuple(createRowFromPlinkSpacedOutput(line)[:4])
for line in i_file
]
)
# Counting the number of zeroed out genotypes per duplicated markers
zeroed_out = defaultdict(int)
if os.path.isfile(script_prefix + ".zeroed_out"):
with open(script_prefix + ".zeroed_out", "r") as i_file:
zeroed_out = Counter([
tuple(line.rstrip("\r\n").split("\t")[:2])
for line in i_file.read().splitlines()[1:]
])
nb_zeroed_out = sum(zeroed_out.values())
# Checking the not good enough markers
not_good_enough = set()
if os.path.isfile(script_prefix + ".not_good_enough"):
with open(script_prefix + ".not_good_enough", "r") as i_file:
not_good_enough = {
line.rstrip("\r\n").split("\t")[0]
for line in i_file.read().splitlines()[1:]
}
# Checking which markers were chosen
chosen_markers = set()
if os.path.isfile(script_prefix + ".chosen_snps.info"):
with open(script_prefix + ".chosen_snps.info", "r") as i_file:
chosen_markers = set(i_file.read().splitlines())
# Finding if some 'not_good_enough' samples were chosen
not_good_still = chosen_markers & not_good_enough
# Adding the 'not chosen markers' to the list of excluded markers
removed_markers = set()
o_filename = os.path.join(base_dir, "excluded_markers.txt")
if os.path.isfile(script_prefix + ".removed_duplicates"):
with open(script_prefix + ".removed_duplicates", "r") as i_file:
removed_markers = set(i_file.read().splitlines())
with open(o_filename, "a") as o_file:
for marker_id in removed_markers:
print >>o_file, marker_id + "\t" + "removed duplicate"
# Writing the summary results
total_remaining = 0
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
rep_counter = Counter(duplicated_count.values()).most_common()
if rep_counter:
print >>o_file, "Number of replicated markers"
else:
print >>o_file, "Number of replicated markers\t0"
total_nb_removed_rep = 0
for rep_type, rep_count in rep_counter:
nb_removed_rep = (rep_count * rep_type) - rep_count
print >>o_file, " - x{}\t{:,d}\t-{:,d}".format(
rep_type,
rep_count,
nb_removed_rep,
)
total_nb_removed_rep += nb_removed_rep
total_remaining = total_nb_removed_rep - len(removed_markers)
print >>o_file, (
"Number of replicated markers kept\t{nb:,d}\t+{nb:,d}".format(
nb=total_remaining,
)
)
print >>o_file, ("Poorly chosen replicated markers\t"
"{nb:,d}".format(nb=len(not_good_still)))
print >>o_file, ("Final number of excluded markers\t"
"{nb:,d}".format(nb=len(removed_markers)))
print >>o_file, "---"
# We create a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
duplicated_snps.pretty_name
)
text = (
"A total of {:,d} duplicated marker{} {} found.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
"were" if len(duplicated_count) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(duplicated_count) > 0:
text = (
"While merging duplicates, a total of {:,d} genotype{} {} "
"zeroed out. A total of {:,d} marker{} {} found to be not "
"good enough for duplicate completion.".format(
nb_zeroed_out,
"s" if nb_zeroed_out > 1 else "",
"were" if nb_zeroed_out > 1 else "was",
len(not_good_enough),
"s" if len(not_good_enough) > 1 else "",
"were" if len(not_good_enough) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
text = (
"A total of {:,d} marker{} {} excluded while creating the "
"final dataset.".format(
len(removed_markers),
"s" if len(removed_markers) > 1 else "",
"were" if len(removed_markers) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if total_remaining > 0:
text = latex_template.textbf(
"In total, {:,d} maker{} {} not merged for different "
"reasons (low completion rate, discordant allele, "
"discordant MAF, etc) and {} still present in the "
"dataset.".format(
total_remaining,
"s" if total_remaining > 1 else "",
"were" if total_remaining > 1 else "was",
"are" if total_remaining > 1 else "is",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(not_good_still) > 0:
start = "A total of"
end = " and {} still present in the final dataset.".format(
"are" if len(not_good_still) > 1 else "is",
)
if total_remaining > 0:
start = "Out of these,"
end = "."
text = latex_template.textbf(
start + " {:,d} marker{} {} not good enough for "
"completion, but {} still selected as the best "
"duplicate{}".format(
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
end,
)
)
print >>o_file, latex_template.wrap_lines(text)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# We know this step does produce a new data set (tfile), so we return it
return _StepResult(
next_file=os.path.join(out_prefix, "dup_snps.final"),
next_file_type="tfile",
latex_summary=latex_file,
description=duplicated_snps.desc,
long_description=duplicated_snps.long_desc,
graph_path=None,
) | python | def run_duplicated_snps(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step2 (duplicated snps).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSNPs.duplicated_snps`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
This function creates a ``map`` file, needed for the
:py:mod:`pyGenClean.DupSNPs.duplicated_snps` module.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need a tfile
required_type = "tfile"
check_input_files(in_prefix, in_type, required_type)
# This step require a map file (we now have a tfile)
if not os.path.isfile(in_prefix + ".map"):
outputFile = None
try:
outputFile = open(in_prefix + ".map", "w")
except IOError:
msg = "{}: can't write file".format(in_prefix + ".map")
raise ProgramError(msg)
try:
with open(in_prefix + ".tped", 'r') as inputFile:
for line in inputFile:
row = createRowFromPlinkSpacedOutput(line)
print >>outputFile, "\t".join(row[:4])
except IOError:
msg = "{}: no such file".format(in_prefix + ".tped")
raise ProgramError(msg)
outputFile.close()
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "dup_snps")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
duplicated_snps.main(options)
except duplicated_snps.ProgramError as e:
msg = "duplicated_snps: {}".format(e)
raise ProgramError(msg)
# Reading the number of duplicated markers
duplicated_count = defaultdict(int)
if os.path.isfile(script_prefix + ".duplicated_snps.tped"):
with open(script_prefix + ".duplicated_snps.tped", "r") as i_file:
duplicated_count = Counter(
(i[0], i[3]) for i in [
tuple(createRowFromPlinkSpacedOutput(line)[:4])
for line in i_file
]
)
# Counting the number of zeroed out genotypes per duplicated markers
zeroed_out = defaultdict(int)
if os.path.isfile(script_prefix + ".zeroed_out"):
with open(script_prefix + ".zeroed_out", "r") as i_file:
zeroed_out = Counter([
tuple(line.rstrip("\r\n").split("\t")[:2])
for line in i_file.read().splitlines()[1:]
])
nb_zeroed_out = sum(zeroed_out.values())
# Checking the not good enough markers
not_good_enough = set()
if os.path.isfile(script_prefix + ".not_good_enough"):
with open(script_prefix + ".not_good_enough", "r") as i_file:
not_good_enough = {
line.rstrip("\r\n").split("\t")[0]
for line in i_file.read().splitlines()[1:]
}
# Checking which markers were chosen
chosen_markers = set()
if os.path.isfile(script_prefix + ".chosen_snps.info"):
with open(script_prefix + ".chosen_snps.info", "r") as i_file:
chosen_markers = set(i_file.read().splitlines())
# Finding if some 'not_good_enough' samples were chosen
not_good_still = chosen_markers & not_good_enough
# Adding the 'not chosen markers' to the list of excluded markers
removed_markers = set()
o_filename = os.path.join(base_dir, "excluded_markers.txt")
if os.path.isfile(script_prefix + ".removed_duplicates"):
with open(script_prefix + ".removed_duplicates", "r") as i_file:
removed_markers = set(i_file.read().splitlines())
with open(o_filename, "a") as o_file:
for marker_id in removed_markers:
print >>o_file, marker_id + "\t" + "removed duplicate"
# Writing the summary results
total_remaining = 0
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
rep_counter = Counter(duplicated_count.values()).most_common()
if rep_counter:
print >>o_file, "Number of replicated markers"
else:
print >>o_file, "Number of replicated markers\t0"
total_nb_removed_rep = 0
for rep_type, rep_count in rep_counter:
nb_removed_rep = (rep_count * rep_type) - rep_count
print >>o_file, " - x{}\t{:,d}\t-{:,d}".format(
rep_type,
rep_count,
nb_removed_rep,
)
total_nb_removed_rep += nb_removed_rep
total_remaining = total_nb_removed_rep - len(removed_markers)
print >>o_file, (
"Number of replicated markers kept\t{nb:,d}\t+{nb:,d}".format(
nb=total_remaining,
)
)
print >>o_file, ("Poorly chosen replicated markers\t"
"{nb:,d}".format(nb=len(not_good_still)))
print >>o_file, ("Final number of excluded markers\t"
"{nb:,d}".format(nb=len(removed_markers)))
print >>o_file, "---"
# We create a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
duplicated_snps.pretty_name
)
text = (
"A total of {:,d} duplicated marker{} {} found.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
"were" if len(duplicated_count) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(duplicated_count) > 0:
text = (
"While merging duplicates, a total of {:,d} genotype{} {} "
"zeroed out. A total of {:,d} marker{} {} found to be not "
"good enough for duplicate completion.".format(
nb_zeroed_out,
"s" if nb_zeroed_out > 1 else "",
"were" if nb_zeroed_out > 1 else "was",
len(not_good_enough),
"s" if len(not_good_enough) > 1 else "",
"were" if len(not_good_enough) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
text = (
"A total of {:,d} marker{} {} excluded while creating the "
"final dataset.".format(
len(removed_markers),
"s" if len(removed_markers) > 1 else "",
"were" if len(removed_markers) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if total_remaining > 0:
text = latex_template.textbf(
"In total, {:,d} maker{} {} not merged for different "
"reasons (low completion rate, discordant allele, "
"discordant MAF, etc) and {} still present in the "
"dataset.".format(
total_remaining,
"s" if total_remaining > 1 else "",
"were" if total_remaining > 1 else "was",
"are" if total_remaining > 1 else "is",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(not_good_still) > 0:
start = "A total of"
end = " and {} still present in the final dataset.".format(
"are" if len(not_good_still) > 1 else "is",
)
if total_remaining > 0:
start = "Out of these,"
end = "."
text = latex_template.textbf(
start + " {:,d} marker{} {} not good enough for "
"completion, but {} still selected as the best "
"duplicate{}".format(
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
end,
)
)
print >>o_file, latex_template.wrap_lines(text)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# We know this step does produce a new data set (tfile), so we return it
return _StepResult(
next_file=os.path.join(out_prefix, "dup_snps.final"),
next_file_type="tfile",
latex_summary=latex_file,
description=duplicated_snps.desc,
long_description=duplicated_snps.long_desc,
graph_path=None,
) | ['def', 'run_duplicated_snps', '(', 'in_prefix', ',', 'in_type', ',', 'out_prefix', ',', 'base_dir', ',', 'options', ')', ':', '# Creating the output directory', 'os', '.', 'mkdir', '(', 'out_prefix', ')', '# We know we need a tfile', 'required_type', '=', '"tfile"', 'check_input_files', '(', 'in_prefix', ',', 'in_type', ',', 'required_type', ')', '# This step require a map file (we now have a tfile)', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'in_prefix', '+', '".map"', ')', ':', 'outputFile', '=', 'None', 'try', ':', 'outputFile', '=', 'open', '(', 'in_prefix', '+', '".map"', ',', '"w"', ')', 'except', 'IOError', ':', 'msg', '=', '"{}: can\'t write file"', '.', 'format', '(', 'in_prefix', '+', '".map"', ')', 'raise', 'ProgramError', '(', 'msg', ')', 'try', ':', 'with', 'open', '(', 'in_prefix', '+', '".tped"', ',', "'r'", ')', 'as', 'inputFile', ':', 'for', 'line', 'in', 'inputFile', ':', 'row', '=', 'createRowFromPlinkSpacedOutput', '(', 'line', ')', 'print', '>>', 'outputFile', ',', '"\\t"', '.', 'join', '(', 'row', '[', ':', '4', ']', ')', 'except', 'IOError', ':', 'msg', '=', '"{}: no such file"', '.', 'format', '(', 'in_prefix', '+', '".tped"', ')', 'raise', 'ProgramError', '(', 'msg', ')', 'outputFile', '.', 'close', '(', ')', '# We need to inject the name of the input file and the name of the output', '# prefix', 'script_prefix', '=', 'os', '.', 'path', '.', 'join', '(', 'out_prefix', ',', '"dup_snps"', ')', 'options', '+=', '[', '"--{}"', '.', 'format', '(', 'required_type', ')', ',', 'in_prefix', ',', '"--out"', ',', 'script_prefix', ']', '# We run the script', 'try', ':', 'duplicated_snps', '.', 'main', '(', 'options', ')', 'except', 'duplicated_snps', '.', 'ProgramError', 'as', 'e', ':', 'msg', '=', '"duplicated_snps: {}"', '.', 'format', '(', 'e', ')', 'raise', 'ProgramError', '(', 'msg', ')', '# Reading the number of duplicated markers', 'duplicated_count', '=', 'defaultdict', '(', 'int', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'script_prefix', '+', '".duplicated_snps.tped"', ')', ':', 'with', 'open', '(', 'script_prefix', '+', '".duplicated_snps.tped"', ',', '"r"', ')', 'as', 'i_file', ':', 'duplicated_count', '=', 'Counter', '(', '(', 'i', '[', '0', ']', ',', 'i', '[', '3', ']', ')', 'for', 'i', 'in', '[', 'tuple', '(', 'createRowFromPlinkSpacedOutput', '(', 'line', ')', '[', ':', '4', ']', ')', 'for', 'line', 'in', 'i_file', ']', ')', '# Counting the number of zeroed out genotypes per duplicated markers', 'zeroed_out', '=', 'defaultdict', '(', 'int', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'script_prefix', '+', '".zeroed_out"', ')', ':', 'with', 'open', '(', 'script_prefix', '+', '".zeroed_out"', ',', '"r"', ')', 'as', 'i_file', ':', 'zeroed_out', '=', 'Counter', '(', '[', 'tuple', '(', 'line', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', '[', ':', '2', ']', ')', 'for', 'line', 'in', 'i_file', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', '[', '1', ':', ']', ']', ')', 'nb_zeroed_out', '=', 'sum', '(', 'zeroed_out', '.', 'values', '(', ')', ')', '# Checking the not good enough markers', 'not_good_enough', '=', 'set', '(', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'script_prefix', '+', '".not_good_enough"', ')', ':', 'with', 'open', '(', 'script_prefix', '+', '".not_good_enough"', ',', '"r"', ')', 'as', 'i_file', ':', 'not_good_enough', '=', '{', 'line', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', '[', '0', ']', 'for', 'line', 'in', 'i_file', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', '[', '1', ':', ']', '}', '# Checking which markers were chosen', 'chosen_markers', '=', 'set', '(', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'script_prefix', '+', '".chosen_snps.info"', ')', ':', 'with', 'open', '(', 'script_prefix', '+', '".chosen_snps.info"', ',', '"r"', ')', 'as', 'i_file', ':', 'chosen_markers', '=', 'set', '(', 'i_file', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', ')', "# Finding if some 'not_good_enough' samples were chosen", 'not_good_still', '=', 'chosen_markers', '&', 'not_good_enough', "# Adding the 'not chosen markers' to the list of excluded markers", 'removed_markers', '=', 'set', '(', ')', 'o_filename', '=', 'os', '.', 'path', '.', 'join', '(', 'base_dir', ',', '"excluded_markers.txt"', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'script_prefix', '+', '".removed_duplicates"', ')', ':', 'with', 'open', '(', 'script_prefix', '+', '".removed_duplicates"', ',', '"r"', ')', 'as', 'i_file', ':', 'removed_markers', '=', 'set', '(', 'i_file', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', ')', 'with', 'open', '(', 'o_filename', ',', '"a"', ')', 'as', 'o_file', ':', 'for', 'marker_id', 'in', 'removed_markers', ':', 'print', '>>', 'o_file', ',', 'marker_id', '+', '"\\t"', '+', '"removed duplicate"', '# Writing the summary results', 'total_remaining', '=', '0', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'base_dir', ',', '"results_summary.txt"', ')', ',', '"a"', ')', 'as', 'o_file', ':', 'print', '>>', 'o_file', ',', '"# {}"', '.', 'format', '(', 'script_prefix', ')', 'rep_counter', '=', 'Counter', '(', 'duplicated_count', '.', 'values', '(', ')', ')', '.', 'most_common', '(', ')', 'if', 'rep_counter', ':', 'print', '>>', 'o_file', ',', '"Number of replicated markers"', 'else', ':', 'print', '>>', 'o_file', ',', '"Number of replicated markers\\t0"', 'total_nb_removed_rep', '=', '0', 'for', 'rep_type', ',', 'rep_count', 'in', 'rep_counter', ':', 'nb_removed_rep', '=', '(', 'rep_count', '*', 'rep_type', ')', '-', 'rep_count', 'print', '>>', 'o_file', ',', '" - x{}\\t{:,d}\\t-{:,d}"', '.', 'format', '(', 'rep_type', ',', 'rep_count', ',', 'nb_removed_rep', ',', ')', 'total_nb_removed_rep', '+=', 'nb_removed_rep', 'total_remaining', '=', 'total_nb_removed_rep', '-', 'len', '(', 'removed_markers', ')', 'print', '>>', 'o_file', ',', '(', '"Number of replicated markers kept\\t{nb:,d}\\t+{nb:,d}"', '.', 'format', '(', 'nb', '=', 'total_remaining', ',', ')', ')', 'print', '>>', 'o_file', ',', '(', '"Poorly chosen replicated markers\\t"', '"{nb:,d}"', '.', 'format', '(', 'nb', '=', 'len', '(', 'not_good_still', ')', ')', ')', 'print', '>>', 'o_file', ',', '(', '"Final number of excluded markers\\t"', '"{nb:,d}"', '.', 'format', '(', 'nb', '=', 'len', '(', 'removed_markers', ')', ')', ')', 'print', '>>', 'o_file', ',', '"---"', '# We create a LaTeX summary', 'latex_file', '=', 'os', '.', 'path', '.', 'join', '(', 'script_prefix', '+', '".summary.tex"', ')', 'try', ':', 'with', 'open', '(', 'latex_file', ',', '"w"', ')', 'as', 'o_file', ':', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'subsection', '(', 'duplicated_snps', '.', 'pretty_name', ')', 'text', '=', '(', '"A total of {:,d} duplicated marker{} {} found."', '.', 'format', '(', 'len', '(', 'duplicated_count', ')', ',', '"s"', 'if', 'len', '(', 'duplicated_count', ')', '>', '1', 'else', '""', ',', '"were"', 'if', 'len', '(', 'duplicated_count', ')', '>', '1', 'else', '"was"', ',', ')', ')', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'wrap_lines', '(', 'text', ')', 'if', 'len', '(', 'duplicated_count', ')', '>', '0', ':', 'text', '=', '(', '"While merging duplicates, a total of {:,d} genotype{} {} "', '"zeroed out. A total of {:,d} marker{} {} found to be not "', '"good enough for duplicate completion."', '.', 'format', '(', 'nb_zeroed_out', ',', '"s"', 'if', 'nb_zeroed_out', '>', '1', 'else', '""', ',', '"were"', 'if', 'nb_zeroed_out', '>', '1', 'else', '"was"', ',', 'len', '(', 'not_good_enough', ')', ',', '"s"', 'if', 'len', '(', 'not_good_enough', ')', '>', '1', 'else', '""', ',', '"were"', 'if', 'len', '(', 'not_good_enough', ')', '>', '1', 'else', '"was"', ',', ')', ')', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'wrap_lines', '(', 'text', ')', 'text', '=', '(', '"A total of {:,d} marker{} {} excluded while creating the "', '"final dataset."', '.', 'format', '(', 'len', '(', 'removed_markers', ')', ',', '"s"', 'if', 'len', '(', 'removed_markers', ')', '>', '1', 'else', '""', ',', '"were"', 'if', 'len', '(', 'removed_markers', ')', '>', '1', 'else', '"was"', ',', ')', ')', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'wrap_lines', '(', 'text', ')', 'if', 'total_remaining', '>', '0', ':', 'text', '=', 'latex_template', '.', 'textbf', '(', '"In total, {:,d} maker{} {} not merged for different "', '"reasons (low completion rate, discordant allele, "', '"discordant MAF, etc) and {} still present in the "', '"dataset."', '.', 'format', '(', 'total_remaining', ',', '"s"', 'if', 'total_remaining', '>', '1', 'else', '""', ',', '"were"', 'if', 'total_remaining', '>', '1', 'else', '"was"', ',', '"are"', 'if', 'total_remaining', '>', '1', 'else', '"is"', ',', ')', ')', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'wrap_lines', '(', 'text', ')', 'if', 'len', '(', 'not_good_still', ')', '>', '0', ':', 'start', '=', '"A total of"', 'end', '=', '" and {} still present in the final dataset."', '.', 'format', '(', '"are"', 'if', 'len', '(', 'not_good_still', ')', '>', '1', 'else', '"is"', ',', ')', 'if', 'total_remaining', '>', '0', ':', 'start', '=', '"Out of these,"', 'end', '=', '"."', 'text', '=', 'latex_template', '.', 'textbf', '(', 'start', '+', '" {:,d} marker{} {} not good enough for "', '"completion, but {} still selected as the best "', '"duplicate{}"', '.', 'format', '(', 'len', '(', 'not_good_still', ')', ',', '"s"', 'if', 'len', '(', 'not_good_still', ')', '>', '1', 'else', '""', ',', '"were"', 'if', 'len', '(', 'not_good_still', ')', '>', '1', 'else', '"was"', ',', '"were"', 'if', 'len', '(', 'not_good_still', ')', '>', '1', 'else', '"was"', ',', 'end', ',', ')', ')', 'print', '>>', 'o_file', ',', 'latex_template', '.', 'wrap_lines', '(', 'text', ')', 'except', 'IOError', ':', 'msg', '=', '"{}: cannot write LaTeX summary"', '.', 'format', '(', 'latex_file', ')', 'raise', 'ProgramError', '(', 'msg', ')', '# We know this step does produce a new data set (tfile), so we return it', 'return', '_StepResult', '(', 'next_file', '=', 'os', '.', 'path', '.', 'join', '(', 'out_prefix', ',', '"dup_snps.final"', ')', ',', 'next_file_type', '=', '"tfile"', ',', 'latex_summary', '=', 'latex_file', ',', 'description', '=', 'duplicated_snps', '.', 'desc', ',', 'long_description', '=', 'duplicated_snps', '.', 'long_desc', ',', 'graph_path', '=', 'None', ',', ')'] | Runs step2 (duplicated snps).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSNPs.duplicated_snps`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
.. note::
This function creates a ``map`` file, needed for the
:py:mod:`pyGenClean.DupSNPs.duplicated_snps` module. | ['Runs', 'step2', '(', 'duplicated', 'snps', ')', '.'] | train | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/run_data_clean_up.py#L496-L731 |
7,926 | djgagne/hagelslag | hagelslag/processing/STObject.py | STObject.get_corner | def get_corner(self, time):
"""
Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index.
"""
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
return self.i[diff][0, 0], self.j[diff][0, 0]
else:
return -1, -1 | python | def get_corner(self, time):
"""
Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index.
"""
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
return self.i[diff][0, 0], self.j[diff][0, 0]
else:
return -1, -1 | ['def', 'get_corner', '(', 'self', ',', 'time', ')', ':', 'if', 'self', '.', 'start_time', '<=', 'time', '<=', 'self', '.', 'end_time', ':', 'diff', '=', 'time', '-', 'self', '.', 'start_time', 'return', 'self', '.', 'i', '[', 'diff', ']', '[', '0', ',', '0', ']', ',', 'self', '.', 'j', '[', 'diff', ']', '[', '0', ',', '0', ']', 'else', ':', 'return', '-', '1', ',', '-', '1'] | Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index. | ['Gets', 'the', 'corner', 'array', 'indices', 'of', 'the', 'STObject', 'at', 'a', 'given', 'time', 'that', 'corresponds', 'to', 'the', 'upper', 'left', 'corner', 'of', 'the', 'bounding', 'box', 'for', 'the', 'STObject', '.'] | train | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L155-L170 |
7,927 | mitsei/dlkit | dlkit/aws_adapter/repository/sessions.py | AssetCompositionDesignSession.order_assets | def order_assets(self, asset_ids, composition_id):
"""Reorders a set of assets in a composition.
arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of
``Assets``
arg: composition_id (osid.id.Id): ``Id`` of the
``Composition``
raise: NotFound - ``composition_id`` not found or, an
``asset_id`` not related to ``composition_id``
raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
self._provider_session.order_assets(self, asset_ids, composition_id) | python | def order_assets(self, asset_ids, composition_id):
"""Reorders a set of assets in a composition.
arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of
``Assets``
arg: composition_id (osid.id.Id): ``Id`` of the
``Composition``
raise: NotFound - ``composition_id`` not found or, an
``asset_id`` not related to ``composition_id``
raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
self._provider_session.order_assets(self, asset_ids, composition_id) | ['def', 'order_assets', '(', 'self', ',', 'asset_ids', ',', 'composition_id', ')', ':', 'self', '.', '_provider_session', '.', 'order_assets', '(', 'self', ',', 'asset_ids', ',', 'composition_id', ')'] | Reorders a set of assets in a composition.
arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of
``Assets``
arg: composition_id (osid.id.Id): ``Id`` of the
``Composition``
raise: NotFound - ``composition_id`` not found or, an
``asset_id`` not related to ``composition_id``
raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Reorders', 'a', 'set', 'of', 'assets', 'in', 'a', 'composition', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/sessions.py#L2097-L2113 |
7,928 | PyHDI/Pyverilog | pyverilog/vparser/parser.py | VerilogParser.p_ioport_head | def p_ioport_head(self, p):
'ioport_head : sigtypes portname'
p[0] = self.create_ioport(p[1], p[2], lineno=p.lineno(2))
p.set_lineno(0, p.lineno(1)) | python | def p_ioport_head(self, p):
'ioport_head : sigtypes portname'
p[0] = self.create_ioport(p[1], p[2], lineno=p.lineno(2))
p.set_lineno(0, p.lineno(1)) | ['def', 'p_ioport_head', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'self', '.', 'create_ioport', '(', 'p', '[', '1', ']', ',', 'p', '[', '2', ']', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '2', ')', ')', 'p', '.', 'set_lineno', '(', '0', ',', 'p', '.', 'lineno', '(', '1', ')', ')'] | ioport_head : sigtypes portname | ['ioport_head', ':', 'sigtypes', 'portname'] | train | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L418-L421 |
7,929 | openego/ding0 | ding0/core/network/__init__.py | RingDing0.lv_load_areas | def lv_load_areas(self):
""" #TODO: description
"""
for lv_load_area in self._grid._graph.nodes():
if isinstance(lv_load_area, LVLoadAreaDing0):
if lv_load_area.ring == self:
yield lv_load_area | python | def lv_load_areas(self):
""" #TODO: description
"""
for lv_load_area in self._grid._graph.nodes():
if isinstance(lv_load_area, LVLoadAreaDing0):
if lv_load_area.ring == self:
yield lv_load_area | ['def', 'lv_load_areas', '(', 'self', ')', ':', 'for', 'lv_load_area', 'in', 'self', '.', '_grid', '.', '_graph', '.', 'nodes', '(', ')', ':', 'if', 'isinstance', '(', 'lv_load_area', ',', 'LVLoadAreaDing0', ')', ':', 'if', 'lv_load_area', '.', 'ring', '==', 'self', ':', 'yield', 'lv_load_area'] | #TODO: description | ['#TODO', ':', 'description'] | train | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/__init__.py#L534-L540 |
7,930 | siznax/wptools | wptools/wikidata.py | WPToolsWikidata._query | def _query(self, action, qobj):
"""
returns wikidata query string
"""
if action == 'labels':
return qobj.labels(self._pop_entities())
elif action == 'wikidata':
return qobj.wikidata(self.params.get('title'),
self.params.get('wikibase')) | python | def _query(self, action, qobj):
"""
returns wikidata query string
"""
if action == 'labels':
return qobj.labels(self._pop_entities())
elif action == 'wikidata':
return qobj.wikidata(self.params.get('title'),
self.params.get('wikibase')) | ['def', '_query', '(', 'self', ',', 'action', ',', 'qobj', ')', ':', 'if', 'action', '==', "'labels'", ':', 'return', 'qobj', '.', 'labels', '(', 'self', '.', '_pop_entities', '(', ')', ')', 'elif', 'action', '==', "'wikidata'", ':', 'return', 'qobj', '.', 'wikidata', '(', 'self', '.', 'params', '.', 'get', '(', "'title'", ')', ',', 'self', '.', 'params', '.', 'get', '(', "'wikibase'", ')', ')'] | returns wikidata query string | ['returns', 'wikidata', 'query', 'string'] | train | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/wikidata.py#L103-L111 |
7,931 | portantier/habu | habu/cli/cmd_shodan_open.py | cmd_shodan_open | def cmd_shodan_open(ip, no_cache, json_output, nmap_command, verbose, output):
"""Output the open ports for an IP against shodan (nmap format).
Example:
\b
$ habu.shodan.open 8.8.8.8
T:53,U:53
"""
habucfg = loadcfg()
if 'SHODAN_APIKEY' not in habucfg:
print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY')
print('Get your API key from https://www.shodan.io/')
sys.exit(1)
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose)
ports = []
if 'data' in data:
for service in data['data']:
ports.append('{}:{}'.format(
service['transport'][0].upper(),
service['port']
))
if nmap_command:
if ports:
output.write('nmap -A -v -p {} {}'.format(','.join(ports), ip))
else:
if json_output:
output.write(json.dumps(ports, indent=4))
output.write('\n')
else:
output.write(','.join(ports)) | python | def cmd_shodan_open(ip, no_cache, json_output, nmap_command, verbose, output):
"""Output the open ports for an IP against shodan (nmap format).
Example:
\b
$ habu.shodan.open 8.8.8.8
T:53,U:53
"""
habucfg = loadcfg()
if 'SHODAN_APIKEY' not in habucfg:
print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY')
print('Get your API key from https://www.shodan.io/')
sys.exit(1)
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose)
ports = []
if 'data' in data:
for service in data['data']:
ports.append('{}:{}'.format(
service['transport'][0].upper(),
service['port']
))
if nmap_command:
if ports:
output.write('nmap -A -v -p {} {}'.format(','.join(ports), ip))
else:
if json_output:
output.write(json.dumps(ports, indent=4))
output.write('\n')
else:
output.write(','.join(ports)) | ['def', 'cmd_shodan_open', '(', 'ip', ',', 'no_cache', ',', 'json_output', ',', 'nmap_command', ',', 'verbose', ',', 'output', ')', ':', 'habucfg', '=', 'loadcfg', '(', ')', 'if', "'SHODAN_APIKEY'", 'not', 'in', 'habucfg', ':', 'print', '(', "'You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY'", ')', 'print', '(', "'Get your API key from https://www.shodan.io/'", ')', 'sys', '.', 'exit', '(', '1', ')', 'if', 'verbose', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ',', 'format', '=', "'%(message)s'", ')', 'data', '=', 'shodan_get_result', '(', 'ip', ',', 'habucfg', '[', "'SHODAN_APIKEY'", ']', ',', 'no_cache', ',', 'verbose', ')', 'ports', '=', '[', ']', 'if', "'data'", 'in', 'data', ':', 'for', 'service', 'in', 'data', '[', "'data'", ']', ':', 'ports', '.', 'append', '(', "'{}:{}'", '.', 'format', '(', 'service', '[', "'transport'", ']', '[', '0', ']', '.', 'upper', '(', ')', ',', 'service', '[', "'port'", ']', ')', ')', 'if', 'nmap_command', ':', 'if', 'ports', ':', 'output', '.', 'write', '(', "'nmap -A -v -p {} {}'", '.', 'format', '(', "','", '.', 'join', '(', 'ports', ')', ',', 'ip', ')', ')', 'else', ':', 'if', 'json_output', ':', 'output', '.', 'write', '(', 'json', '.', 'dumps', '(', 'ports', ',', 'indent', '=', '4', ')', ')', 'output', '.', 'write', '(', "'\\n'", ')', 'else', ':', 'output', '.', 'write', '(', "','", '.', 'join', '(', 'ports', ')', ')'] | Output the open ports for an IP against shodan (nmap format).
Example:
\b
$ habu.shodan.open 8.8.8.8
T:53,U:53 | ['Output', 'the', 'open', 'ports', 'for', 'an', 'IP', 'against', 'shodan', '(', 'nmap', 'format', ')', '.'] | train | https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_shodan_open.py#L22-L60 |
7,932 | noahbenson/neuropythy | neuropythy/optimize/core.py | to_potential | def to_potential(f):
'''
to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to
a potential function, that conversion is performed then the result is yielded.
to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the
identity function).
to_potential(None) is equivalent to to_potential(0).
The following can be converted into potential functions:
* Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants).
* Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for
the parameter vector x.
'''
if is_potential(f): return f
elif f is Ellipsis: return identity
elif pimms.is_array(f, 'number'): return const_potential(f)
elif isinstance(f, tuple) and len(f) == 2: return PotentialLambda(f[0], f[1])
else: raise ValueError('Could not convert object to potential function') | python | def to_potential(f):
'''
to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to
a potential function, that conversion is performed then the result is yielded.
to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the
identity function).
to_potential(None) is equivalent to to_potential(0).
The following can be converted into potential functions:
* Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants).
* Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for
the parameter vector x.
'''
if is_potential(f): return f
elif f is Ellipsis: return identity
elif pimms.is_array(f, 'number'): return const_potential(f)
elif isinstance(f, tuple) and len(f) == 2: return PotentialLambda(f[0], f[1])
else: raise ValueError('Could not convert object to potential function') | ['def', 'to_potential', '(', 'f', ')', ':', 'if', 'is_potential', '(', 'f', ')', ':', 'return', 'f', 'elif', 'f', 'is', 'Ellipsis', ':', 'return', 'identity', 'elif', 'pimms', '.', 'is_array', '(', 'f', ',', "'number'", ')', ':', 'return', 'const_potential', '(', 'f', ')', 'elif', 'isinstance', '(', 'f', ',', 'tuple', ')', 'and', 'len', '(', 'f', ')', '==', '2', ':', 'return', 'PotentialLambda', '(', 'f', '[', '0', ']', ',', 'f', '[', '1', ']', ')', 'else', ':', 'raise', 'ValueError', '(', "'Could not convert object to potential function'", ')'] | to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to
a potential function, that conversion is performed then the result is yielded.
to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the
identity function).
to_potential(None) is equivalent to to_potential(0).
The following can be converted into potential functions:
* Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants).
* Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for
the parameter vector x. | ['to_potential', '(', 'f', ')', 'yields', 'f', 'if', 'f', 'is', 'a', 'potential', 'function', ';', 'if', 'f', 'is', 'not', 'but', 'f', 'can', 'be', 'converted', 'to', 'a', 'potential', 'function', 'that', 'conversion', 'is', 'performed', 'then', 'the', 'result', 'is', 'yielded', '.', 'to_potential', '(', 'Ellipsis', ')', 'yields', 'a', 'potential', 'function', 'whose', 'output', 'is', 'simply', 'its', 'input', '(', 'i', '.', 'e', '.', 'the', 'identity', 'function', ')', '.', 'to_potential', '(', 'None', ')', 'is', 'equivalent', 'to', 'to_potential', '(', '0', ')', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/optimize/core.py#L293-L310 |
7,933 | lyft/python-kmsauth | kmsauth/__init__.py | KMSTokenValidator._get_key_alias_from_cache | def _get_key_alias_from_cache(self, key_arn):
'''
Find a key's alias by looking up its key_arn in the KEY_METADATA
cache. This function will only work after a key has been lookedup by
its alias and is meant as a convenience function for turning an ARN
that's already been looked up back into its alias.
'''
for alias in self.KEY_METADATA:
if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn:
return alias
return None | python | def _get_key_alias_from_cache(self, key_arn):
'''
Find a key's alias by looking up its key_arn in the KEY_METADATA
cache. This function will only work after a key has been lookedup by
its alias and is meant as a convenience function for turning an ARN
that's already been looked up back into its alias.
'''
for alias in self.KEY_METADATA:
if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn:
return alias
return None | ['def', '_get_key_alias_from_cache', '(', 'self', ',', 'key_arn', ')', ':', 'for', 'alias', 'in', 'self', '.', 'KEY_METADATA', ':', 'if', 'self', '.', 'KEY_METADATA', '[', 'alias', ']', '[', "'KeyMetadata'", ']', '[', "'Arn'", ']', '==', 'key_arn', ':', 'return', 'alias', 'return', 'None'] | Find a key's alias by looking up its key_arn in the KEY_METADATA
cache. This function will only work after a key has been lookedup by
its alias and is meant as a convenience function for turning an ARN
that's already been looked up back into its alias. | ['Find', 'a', 'key', 's', 'alias', 'by', 'looking', 'up', 'its', 'key_arn', 'in', 'the', 'KEY_METADATA', 'cache', '.', 'This', 'function', 'will', 'only', 'work', 'after', 'a', 'key', 'has', 'been', 'lookedup', 'by', 'its', 'alias', 'and', 'is', 'meant', 'as', 'a', 'convenience', 'function', 'for', 'turning', 'an', 'ARN', 'that', 's', 'already', 'been', 'looked', 'up', 'back', 'into', 'its', 'alias', '.'] | train | https://github.com/lyft/python-kmsauth/blob/aa2dd957a5d3e58c89fe51a55c6053ff81d9191e/kmsauth/__init__.py#L156-L166 |
7,934 | 4Kaylum/Brickfront | brickfront/client.py | Client.getSet | def getSet(self, setID):
'''
Gets the information of one specific build using its Brickset set ID.
:param str setID: The ID of the build from Brickset.
:returns: A single Build object.
:rtype: :class:`brickfront.build.Build`
:raises brickfront.errors.InvalidSetID: If no sets exist by that ID.
'''
params = {
'apiKey': self.apiKey,
'userHash': self.userHash,
'setID': setID
}
url = Client.ENDPOINT.format('getSet')
returned = get(url, params=params)
self.checkResponse(returned)
# Put it into a Build class
root = ET.fromstring(returned.text)
v = [Build(i, self) for i in root]
# Return to user
try:
return v[0]
except IndexError:
raise InvalidSetID('There is no set with the ID of `{}`.'.format(setID)) | python | def getSet(self, setID):
'''
Gets the information of one specific build using its Brickset set ID.
:param str setID: The ID of the build from Brickset.
:returns: A single Build object.
:rtype: :class:`brickfront.build.Build`
:raises brickfront.errors.InvalidSetID: If no sets exist by that ID.
'''
params = {
'apiKey': self.apiKey,
'userHash': self.userHash,
'setID': setID
}
url = Client.ENDPOINT.format('getSet')
returned = get(url, params=params)
self.checkResponse(returned)
# Put it into a Build class
root = ET.fromstring(returned.text)
v = [Build(i, self) for i in root]
# Return to user
try:
return v[0]
except IndexError:
raise InvalidSetID('There is no set with the ID of `{}`.'.format(setID)) | ['def', 'getSet', '(', 'self', ',', 'setID', ')', ':', 'params', '=', '{', "'apiKey'", ':', 'self', '.', 'apiKey', ',', "'userHash'", ':', 'self', '.', 'userHash', ',', "'setID'", ':', 'setID', '}', 'url', '=', 'Client', '.', 'ENDPOINT', '.', 'format', '(', "'getSet'", ')', 'returned', '=', 'get', '(', 'url', ',', 'params', '=', 'params', ')', 'self', '.', 'checkResponse', '(', 'returned', ')', '# Put it into a Build class', 'root', '=', 'ET', '.', 'fromstring', '(', 'returned', '.', 'text', ')', 'v', '=', '[', 'Build', '(', 'i', ',', 'self', ')', 'for', 'i', 'in', 'root', ']', '# Return to user', 'try', ':', 'return', 'v', '[', '0', ']', 'except', 'IndexError', ':', 'raise', 'InvalidSetID', '(', "'There is no set with the ID of `{}`.'", '.', 'format', '(', 'setID', ')', ')'] | Gets the information of one specific build using its Brickset set ID.
:param str setID: The ID of the build from Brickset.
:returns: A single Build object.
:rtype: :class:`brickfront.build.Build`
:raises brickfront.errors.InvalidSetID: If no sets exist by that ID. | ['Gets', 'the', 'information', 'of', 'one', 'specific', 'build', 'using', 'its', 'Brickset', 'set', 'ID', '.'] | train | https://github.com/4Kaylum/Brickfront/blob/9545f2183249862b077677d48fcfb9b4bfe1f87d/brickfront/client.py#L144-L171 |
7,935 | quantopian/zipline | zipline/utils/paths.py | zipline_root | def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root | python | def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root | ['def', 'zipline_root', '(', 'environ', '=', 'None', ')', ':', 'if', 'environ', 'is', 'None', ':', 'environ', '=', 'os', '.', 'environ', 'root', '=', 'environ', '.', 'get', '(', "'ZIPLINE_ROOT'", ',', 'None', ')', 'if', 'root', 'is', 'None', ':', 'root', '=', 'expanduser', '(', "'~/.zipline'", ')', 'return', 'root'] | Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir. | ['Get', 'the', 'root', 'directory', 'for', 'all', 'zipline', '-', 'managed', 'files', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/paths.py#L107-L131 |
7,936 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mslib.py | generate | def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' | python | def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' | ['def', 'generate', '(', 'env', ')', ':', 'SCons', '.', 'Tool', '.', 'createStaticLibBuilder', '(', 'env', ')', '# Set-up ms tools paths', 'msvc_setup_env_once', '(', 'env', ')', 'env', '[', "'AR'", ']', '=', "'lib'", 'env', '[', "'ARFLAGS'", ']', '=', 'SCons', '.', 'Util', '.', 'CLVar', '(', "'/nologo'", ')', 'env', '[', "'ARCOM'", ']', '=', '"${TEMPFILE(\'$AR $ARFLAGS /OUT:$TARGET $SOURCES\',\'$ARCOMSTR\')}"', 'env', '[', "'LIBPREFIX'", ']', '=', "''", 'env', '[', "'LIBSUFFIX'", ']', '=', "'.lib'"] | Add Builders and construction variables for lib to an Environment. | ['Add', 'Builders', 'and', 'construction', 'variables', 'for', 'lib', 'to', 'an', 'Environment', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mslib.py#L44-L55 |
7,937 | eventbrite/eventbrite-sdk-python | eventbrite/access_methods.py | AccessMethodsMixin.get_organizers_events | def get_organizers_events(self, id, **data):
"""
GET /organizers/:id/events/
Gets events of the :format:`organizer`.
"""
return self.get("/organizers/{0}/events/".format(id), data=data) | python | def get_organizers_events(self, id, **data):
"""
GET /organizers/:id/events/
Gets events of the :format:`organizer`.
"""
return self.get("/organizers/{0}/events/".format(id), data=data) | ['def', 'get_organizers_events', '(', 'self', ',', 'id', ',', '*', '*', 'data', ')', ':', 'return', 'self', '.', 'get', '(', '"/organizers/{0}/events/"', '.', 'format', '(', 'id', ')', ',', 'data', '=', 'data', ')'] | GET /organizers/:id/events/
Gets events of the :format:`organizer`. | ['GET', '/', 'organizers', '/', ':', 'id', '/', 'events', '/', 'Gets', 'events', 'of', 'the', ':', 'format', ':', 'organizer', '.'] | train | https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L601-L607 |
7,938 | pyapi-gitlab/pyapi-gitlab | gitlab/__init__.py | Gitlab.getfilearchive | def getfilearchive(self, project_id, filepath=None):
"""
Get an archive of the repository
:param project_id: project id
:param filepath: path to save the file to
:return: True if the file was saved to the filepath
"""
if not filepath:
filepath = ''
request = requests.get(
'{0}/{1}/repository/archive'.format(self.projects_url, project_id),
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
if filepath == "":
filepath = request.headers['content-disposition'].split(';')[1].split('=')[1].strip('"')
with open(filepath, 'wb') as filesave:
filesave.write(request.content)
# TODO: Catch oserror exceptions as no permissions and such
# TODO: change the filepath to a path and keep always the filename?
return True
else:
msg = request.json()['message']
raise exceptions.HttpError(msg) | python | def getfilearchive(self, project_id, filepath=None):
"""
Get an archive of the repository
:param project_id: project id
:param filepath: path to save the file to
:return: True if the file was saved to the filepath
"""
if not filepath:
filepath = ''
request = requests.get(
'{0}/{1}/repository/archive'.format(self.projects_url, project_id),
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
if filepath == "":
filepath = request.headers['content-disposition'].split(';')[1].split('=')[1].strip('"')
with open(filepath, 'wb') as filesave:
filesave.write(request.content)
# TODO: Catch oserror exceptions as no permissions and such
# TODO: change the filepath to a path and keep always the filename?
return True
else:
msg = request.json()['message']
raise exceptions.HttpError(msg) | ['def', 'getfilearchive', '(', 'self', ',', 'project_id', ',', 'filepath', '=', 'None', ')', ':', 'if', 'not', 'filepath', ':', 'filepath', '=', "''", 'request', '=', 'requests', '.', 'get', '(', "'{0}/{1}/repository/archive'", '.', 'format', '(', 'self', '.', 'projects_url', ',', 'project_id', ')', ',', 'verify', '=', 'self', '.', 'verify_ssl', ',', 'auth', '=', 'self', '.', 'auth', ',', 'headers', '=', 'self', '.', 'headers', ',', 'timeout', '=', 'self', '.', 'timeout', ')', 'if', 'request', '.', 'status_code', '==', '200', ':', 'if', 'filepath', '==', '""', ':', 'filepath', '=', 'request', '.', 'headers', '[', "'content-disposition'", ']', '.', 'split', '(', "';'", ')', '[', '1', ']', '.', 'split', '(', "'='", ')', '[', '1', ']', '.', 'strip', '(', '\'"\'', ')', 'with', 'open', '(', 'filepath', ',', "'wb'", ')', 'as', 'filesave', ':', 'filesave', '.', 'write', '(', 'request', '.', 'content', ')', '# TODO: Catch oserror exceptions as no permissions and such', '# TODO: change the filepath to a path and keep always the filename?', 'return', 'True', 'else', ':', 'msg', '=', 'request', '.', 'json', '(', ')', '[', "'message'", ']', 'raise', 'exceptions', '.', 'HttpError', '(', 'msg', ')'] | Get an archive of the repository
:param project_id: project id
:param filepath: path to save the file to
:return: True if the file was saved to the filepath | ['Get', 'an', 'archive', 'of', 'the', 'repository'] | train | https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L1655-L1680 |
7,939 | panzarino/mlbgame | mlbgame/game.py | players | def players(game_id):
"""Gets player/coach/umpire information for the game with matching id."""
# get data
data = mlbgame.data.get_players(game_id)
# parse data
parsed = etree.parse(data)
root = parsed.getroot()
output = {}
output['game_id'] = game_id
# get player/coach data
for team in root.findall('team'):
type = team.attrib['type'] + "_team"
# the type is either home_team or away_team
output[type] = {}
output[type]['players'] = []
output[type]['coaches'] = []
for p in team.findall('player'):
player = {}
for key in p.keys():
player[key] = p.get(key)
output[type]['players'].append(player)
for c in team.findall('coach'):
coach = {}
for key in c.keys():
coach[key] = c.get(key)
output[type]['coaches'].append(coach)
# get umpire data
output['umpires'] = []
for u in root.find('umpires').findall('umpire'):
umpire = {}
for key in u.keys():
umpire[key] = u.get(key)
output['umpires'].append(umpire)
return output | python | def players(game_id):
"""Gets player/coach/umpire information for the game with matching id."""
# get data
data = mlbgame.data.get_players(game_id)
# parse data
parsed = etree.parse(data)
root = parsed.getroot()
output = {}
output['game_id'] = game_id
# get player/coach data
for team in root.findall('team'):
type = team.attrib['type'] + "_team"
# the type is either home_team or away_team
output[type] = {}
output[type]['players'] = []
output[type]['coaches'] = []
for p in team.findall('player'):
player = {}
for key in p.keys():
player[key] = p.get(key)
output[type]['players'].append(player)
for c in team.findall('coach'):
coach = {}
for key in c.keys():
coach[key] = c.get(key)
output[type]['coaches'].append(coach)
# get umpire data
output['umpires'] = []
for u in root.find('umpires').findall('umpire'):
umpire = {}
for key in u.keys():
umpire[key] = u.get(key)
output['umpires'].append(umpire)
return output | ['def', 'players', '(', 'game_id', ')', ':', '# get data', 'data', '=', 'mlbgame', '.', 'data', '.', 'get_players', '(', 'game_id', ')', '# parse data', 'parsed', '=', 'etree', '.', 'parse', '(', 'data', ')', 'root', '=', 'parsed', '.', 'getroot', '(', ')', 'output', '=', '{', '}', 'output', '[', "'game_id'", ']', '=', 'game_id', '# get player/coach data', 'for', 'team', 'in', 'root', '.', 'findall', '(', "'team'", ')', ':', 'type', '=', 'team', '.', 'attrib', '[', "'type'", ']', '+', '"_team"', '# the type is either home_team or away_team', 'output', '[', 'type', ']', '=', '{', '}', 'output', '[', 'type', ']', '[', "'players'", ']', '=', '[', ']', 'output', '[', 'type', ']', '[', "'coaches'", ']', '=', '[', ']', 'for', 'p', 'in', 'team', '.', 'findall', '(', "'player'", ')', ':', 'player', '=', '{', '}', 'for', 'key', 'in', 'p', '.', 'keys', '(', ')', ':', 'player', '[', 'key', ']', '=', 'p', '.', 'get', '(', 'key', ')', 'output', '[', 'type', ']', '[', "'players'", ']', '.', 'append', '(', 'player', ')', 'for', 'c', 'in', 'team', '.', 'findall', '(', "'coach'", ')', ':', 'coach', '=', '{', '}', 'for', 'key', 'in', 'c', '.', 'keys', '(', ')', ':', 'coach', '[', 'key', ']', '=', 'c', '.', 'get', '(', 'key', ')', 'output', '[', 'type', ']', '[', "'coaches'", ']', '.', 'append', '(', 'coach', ')', '# get umpire data', 'output', '[', "'umpires'", ']', '=', '[', ']', 'for', 'u', 'in', 'root', '.', 'find', '(', "'umpires'", ')', '.', 'findall', '(', "'umpire'", ')', ':', 'umpire', '=', '{', '}', 'for', 'key', 'in', 'u', '.', 'keys', '(', ')', ':', 'umpire', '[', 'key', ']', '=', 'u', '.', 'get', '(', 'key', ')', 'output', '[', "'umpires'", ']', '.', 'append', '(', 'umpire', ')', 'return', 'output'] | Gets player/coach/umpire information for the game with matching id. | ['Gets', 'player', '/', 'coach', '/', 'umpire', 'information', 'for', 'the', 'game', 'with', 'matching', 'id', '.'] | train | https://github.com/panzarino/mlbgame/blob/0a2d10540de793fdc3b8476aa18f5cf3b53d0b54/mlbgame/game.py#L548-L587 |
7,940 | gwastro/pycbc | pycbc/inject/inject.py | set_sim_data | def set_sim_data(inj, field, data):
"""Sets data of a SimInspiral instance."""
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
# for tc, map to geocentric times
if sim_field == 'tc':
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
else:
setattr(inj, sim_field, data) | python | def set_sim_data(inj, field, data):
"""Sets data of a SimInspiral instance."""
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
# for tc, map to geocentric times
if sim_field == 'tc':
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
else:
setattr(inj, sim_field, data) | ['def', 'set_sim_data', '(', 'inj', ',', 'field', ',', 'data', ')', ':', 'try', ':', 'sim_field', '=', 'sim_inspiral_map', '[', 'field', ']', 'except', 'KeyError', ':', 'sim_field', '=', 'field', '# for tc, map to geocentric times', 'if', 'sim_field', '==', "'tc'", ':', 'inj', '.', 'geocent_end_time', '=', 'int', '(', 'data', ')', 'inj', '.', 'geocent_end_time_ns', '=', 'int', '(', '1e9', '*', '(', 'data', '%', '1', ')', ')', 'else', ':', 'setattr', '(', 'inj', ',', 'sim_field', ',', 'data', ')'] | Sets data of a SimInspiral instance. | ['Sets', 'data', 'of', 'a', 'SimInspiral', 'instance', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inject/inject.py#L69-L80 |
7,941 | MacHu-GWU/rolex-project | rolex/generator.py | rnd_date_list_high_performance | def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs):
"""
Generate mass random date.
:param size: int, number of
:param start: date similar object, int / str / date / datetime
:param end: date similar object, int / str / date / datetime, default today's date
:param kwargs: args placeholder
:return: list of datetime.date
"""
if end is None:
end = date.today()
start_days = to_ordinal(parser.parse_datetime(start))
end_days = to_ordinal(parser.parse_datetime(end))
_assert_correct_start_end(start_days, end_days)
if has_np: # pragma: no cover
return [
from_ordinal(days)
for days in np.random.randint(start_days, end_days, size)
]
else:
return [
from_ordinal(random.randint(start_days, end_days))
for _ in range(size)
] | python | def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs):
"""
Generate mass random date.
:param size: int, number of
:param start: date similar object, int / str / date / datetime
:param end: date similar object, int / str / date / datetime, default today's date
:param kwargs: args placeholder
:return: list of datetime.date
"""
if end is None:
end = date.today()
start_days = to_ordinal(parser.parse_datetime(start))
end_days = to_ordinal(parser.parse_datetime(end))
_assert_correct_start_end(start_days, end_days)
if has_np: # pragma: no cover
return [
from_ordinal(days)
for days in np.random.randint(start_days, end_days, size)
]
else:
return [
from_ordinal(random.randint(start_days, end_days))
for _ in range(size)
] | ['def', 'rnd_date_list_high_performance', '(', 'size', ',', 'start', '=', 'date', '(', '1970', ',', '1', ',', '1', ')', ',', 'end', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'end', 'is', 'None', ':', 'end', '=', 'date', '.', 'today', '(', ')', 'start_days', '=', 'to_ordinal', '(', 'parser', '.', 'parse_datetime', '(', 'start', ')', ')', 'end_days', '=', 'to_ordinal', '(', 'parser', '.', 'parse_datetime', '(', 'end', ')', ')', '_assert_correct_start_end', '(', 'start_days', ',', 'end_days', ')', 'if', 'has_np', ':', '# pragma: no cover', 'return', '[', 'from_ordinal', '(', 'days', ')', 'for', 'days', 'in', 'np', '.', 'random', '.', 'randint', '(', 'start_days', ',', 'end_days', ',', 'size', ')', ']', 'else', ':', 'return', '[', 'from_ordinal', '(', 'random', '.', 'randint', '(', 'start_days', ',', 'end_days', ')', ')', 'for', '_', 'in', 'range', '(', 'size', ')', ']'] | Generate mass random date.
:param size: int, number of
:param start: date similar object, int / str / date / datetime
:param end: date similar object, int / str / date / datetime, default today's date
:param kwargs: args placeholder
:return: list of datetime.date | ['Generate', 'mass', 'random', 'date', '.'] | train | https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/generator.py#L295-L319 |
7,942 | BerkeleyAutomation/autolab_core | autolab_core/learning_analysis.py | BinaryClassificationResult.app_score | def app_score(self):
""" Computes the area under the app curve. """
# compute curve
precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False)
# compute area
app = 0
total = 0
for k in range(len(precisions)-1):
# read cur data
cur_prec = precisions[k]
cur_pp = pct_pred_pos[k]
cur_tau = taus[k]
# read next data
next_prec = precisions[k+1]
next_pp = pct_pred_pos[k+1]
next_tau = taus[k+1]
# approximate with rectangles
mid_prec = (cur_prec + next_prec) / 2.0
width_pp = np.abs(next_pp - cur_pp)
app += mid_prec * width_pp
total += width_pp
return app | python | def app_score(self):
""" Computes the area under the app curve. """
# compute curve
precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False)
# compute area
app = 0
total = 0
for k in range(len(precisions)-1):
# read cur data
cur_prec = precisions[k]
cur_pp = pct_pred_pos[k]
cur_tau = taus[k]
# read next data
next_prec = precisions[k+1]
next_pp = pct_pred_pos[k+1]
next_tau = taus[k+1]
# approximate with rectangles
mid_prec = (cur_prec + next_prec) / 2.0
width_pp = np.abs(next_pp - cur_pp)
app += mid_prec * width_pp
total += width_pp
return app | ['def', 'app_score', '(', 'self', ')', ':', '# compute curve', 'precisions', ',', 'pct_pred_pos', ',', 'taus', '=', 'self', '.', 'precision_pct_pred_pos_curve', '(', 'interval', '=', 'False', ')', '# compute area', 'app', '=', '0', 'total', '=', '0', 'for', 'k', 'in', 'range', '(', 'len', '(', 'precisions', ')', '-', '1', ')', ':', '# read cur data', 'cur_prec', '=', 'precisions', '[', 'k', ']', 'cur_pp', '=', 'pct_pred_pos', '[', 'k', ']', 'cur_tau', '=', 'taus', '[', 'k', ']', '# read next data', 'next_prec', '=', 'precisions', '[', 'k', '+', '1', ']', 'next_pp', '=', 'pct_pred_pos', '[', 'k', '+', '1', ']', 'next_tau', '=', 'taus', '[', 'k', '+', '1', ']', '# approximate with rectangles', 'mid_prec', '=', '(', 'cur_prec', '+', 'next_prec', ')', '/', '2.0', 'width_pp', '=', 'np', '.', 'abs', '(', 'next_pp', '-', 'cur_pp', ')', 'app', '+=', 'mid_prec', '*', 'width_pp', 'total', '+=', 'width_pp', 'return', 'app'] | Computes the area under the app curve. | ['Computes', 'the', 'area', 'under', 'the', 'app', 'curve', '.'] | train | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L467-L492 |
7,943 | mswart/pyopenmensa | feed.py | extractDate | def extractDate(text):
""" Tries to extract a date from a given :obj:`str`.
:param str text: Input date. A :obj:`datetime.date` object is passed
thought without modification.
:rtype: :obj:`datetime.date`"""
if type(text) is datetime.date:
return text
match = date_format.search(text.lower())
if not match:
raise ValueError('unsupported date format: {0}'.format(text.lower()))
# convert DD.MM.YYYY into YYYY-MM-DD
if match.group('month'):
if not match.group('month') in month_names:
raise ValueError('unknown month names: "{0}"'
.format(match.group('month')))
year = int(match.group('year'))
return datetime.date(
year if year > 2000 else 2000 + year,
int(month_names[match.group('month')]),
int(match.group('day')))
else:
parts = list(map(lambda v: int(v), '-'.join(reversed(
match.group('datestr').split('.'))).split('-')))
if parts[0] < 2000:
parts[0] += 2000
return datetime.date(*parts) | python | def extractDate(text):
""" Tries to extract a date from a given :obj:`str`.
:param str text: Input date. A :obj:`datetime.date` object is passed
thought without modification.
:rtype: :obj:`datetime.date`"""
if type(text) is datetime.date:
return text
match = date_format.search(text.lower())
if not match:
raise ValueError('unsupported date format: {0}'.format(text.lower()))
# convert DD.MM.YYYY into YYYY-MM-DD
if match.group('month'):
if not match.group('month') in month_names:
raise ValueError('unknown month names: "{0}"'
.format(match.group('month')))
year = int(match.group('year'))
return datetime.date(
year if year > 2000 else 2000 + year,
int(month_names[match.group('month')]),
int(match.group('day')))
else:
parts = list(map(lambda v: int(v), '-'.join(reversed(
match.group('datestr').split('.'))).split('-')))
if parts[0] < 2000:
parts[0] += 2000
return datetime.date(*parts) | ['def', 'extractDate', '(', 'text', ')', ':', 'if', 'type', '(', 'text', ')', 'is', 'datetime', '.', 'date', ':', 'return', 'text', 'match', '=', 'date_format', '.', 'search', '(', 'text', '.', 'lower', '(', ')', ')', 'if', 'not', 'match', ':', 'raise', 'ValueError', '(', "'unsupported date format: {0}'", '.', 'format', '(', 'text', '.', 'lower', '(', ')', ')', ')', '# convert DD.MM.YYYY into YYYY-MM-DD', 'if', 'match', '.', 'group', '(', "'month'", ')', ':', 'if', 'not', 'match', '.', 'group', '(', "'month'", ')', 'in', 'month_names', ':', 'raise', 'ValueError', '(', '\'unknown month names: "{0}"\'', '.', 'format', '(', 'match', '.', 'group', '(', "'month'", ')', ')', ')', 'year', '=', 'int', '(', 'match', '.', 'group', '(', "'year'", ')', ')', 'return', 'datetime', '.', 'date', '(', 'year', 'if', 'year', '>', '2000', 'else', '2000', '+', 'year', ',', 'int', '(', 'month_names', '[', 'match', '.', 'group', '(', "'month'", ')', ']', ')', ',', 'int', '(', 'match', '.', 'group', '(', "'day'", ')', ')', ')', 'else', ':', 'parts', '=', 'list', '(', 'map', '(', 'lambda', 'v', ':', 'int', '(', 'v', ')', ',', "'-'", '.', 'join', '(', 'reversed', '(', 'match', '.', 'group', '(', "'datestr'", ')', '.', 'split', '(', "'.'", ')', ')', ')', '.', 'split', '(', "'-'", ')', ')', ')', 'if', 'parts', '[', '0', ']', '<', '2000', ':', 'parts', '[', '0', ']', '+=', '2000', 'return', 'datetime', '.', 'date', '(', '*', 'parts', ')'] | Tries to extract a date from a given :obj:`str`.
:param str text: Input date. A :obj:`datetime.date` object is passed
thought without modification.
:rtype: :obj:`datetime.date` | ['Tries', 'to', 'extract', 'a', 'date', 'from', 'a', 'given', ':', 'obj', ':', 'str', '.'] | train | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L47-L73 |
7,944 | google/grr | grr/client/grr_response_client/vfs_handlers/files.py | File.GetMountPoint | def GetMountPoint(self, path=None):
"""Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
path string of the mount point
"""
path = os.path.abspath(
client_utils.CanonicalPathToLocalPath(path or self.path))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path | python | def GetMountPoint(self, path=None):
"""Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
path string of the mount point
"""
path = os.path.abspath(
client_utils.CanonicalPathToLocalPath(path or self.path))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path | ['def', 'GetMountPoint', '(', 'self', ',', 'path', '=', 'None', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'client_utils', '.', 'CanonicalPathToLocalPath', '(', 'path', 'or', 'self', '.', 'path', ')', ')', 'while', 'not', 'os', '.', 'path', '.', 'ismount', '(', 'path', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'path', ')', 'return', 'path'] | Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
path string of the mount point | ['Walk', 'back', 'from', 'the', 'path', 'to', 'find', 'the', 'mount', 'point', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/vfs_handlers/files.py#L313-L329 |
7,945 | jason-weirather/py-seq-tools | seqtools/errors.py | BaseError.set_unobserved_after | def set_unobserved_after(self,tlen,qlen,nt,p):
"""Set the unobservable sequence data after this base
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param nt: nucleotide
:param p: p is the probability of attributing this base to the unobserved error
:type tlen: int
:type qlen: int
:type nt: char
:type p: float
"""
self._unobservable.set_after(tlen,qlen,nt,p) | python | def set_unobserved_after(self,tlen,qlen,nt,p):
"""Set the unobservable sequence data after this base
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param nt: nucleotide
:param p: p is the probability of attributing this base to the unobserved error
:type tlen: int
:type qlen: int
:type nt: char
:type p: float
"""
self._unobservable.set_after(tlen,qlen,nt,p) | ['def', 'set_unobserved_after', '(', 'self', ',', 'tlen', ',', 'qlen', ',', 'nt', ',', 'p', ')', ':', 'self', '.', '_unobservable', '.', 'set_after', '(', 'tlen', ',', 'qlen', ',', 'nt', ',', 'p', ')'] | Set the unobservable sequence data after this base
:param tlen: target homopolymer length
:param qlen: query homopolymer length
:param nt: nucleotide
:param p: p is the probability of attributing this base to the unobserved error
:type tlen: int
:type qlen: int
:type nt: char
:type p: float | ['Set', 'the', 'unobservable', 'sequence', 'data', 'after', 'this', 'base'] | train | https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/errors.py#L375-L388 |
7,946 | xav/Grapefruit | grapefruit.py | Color.from_xyz | def from_xyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref) | python | def from_xyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref) | ['def', 'from_xyz', '(', 'x', ',', 'y', ',', 'z', ',', 'alpha', '=', '1.0', ',', 'wref', '=', '_DEFAULT_WREF', ')', ':', 'return', 'Color', '(', 'xyz_to_rgb', '(', 'x', ',', 'y', ',', 'z', ')', ',', "'rgb'", ',', 'alpha', ',', 'wref', ')'] | Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5) | ['Create', 'a', 'new', 'instance', 'based', 'on', 'the', 'specifed', 'CIE', '-', 'XYZ', 'values', '.'] | train | https://github.com/xav/Grapefruit/blob/b3d88375be727a3a1ec5839fbc462e0e8e0836e4/grapefruit.py#L1259-L1283 |
7,947 | PythonCharmers/python-future | src/future/backports/urllib/request.py | FancyURLopener.http_error_307 | def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers) | python | def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers) | ['def', 'http_error_307', '(', 'self', ',', 'url', ',', 'fp', ',', 'errcode', ',', 'errmsg', ',', 'headers', ',', 'data', '=', 'None', ')', ':', 'if', 'data', 'is', 'None', ':', 'return', 'self', '.', 'http_error_302', '(', 'url', ',', 'fp', ',', 'errcode', ',', 'errmsg', ',', 'headers', ',', 'data', ')', 'else', ':', 'return', 'self', '.', 'http_error_default', '(', 'url', ',', 'fp', ',', 'errcode', ',', 'errmsg', ',', 'headers', ')'] | Error 307 -- relocated, but turn POST into error. | ['Error', '307', '--', 'relocated', 'but', 'turn', 'POST', 'into', 'error', '.'] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L2115-L2120 |
7,948 | cloudnull/cloudlib | cloudlib/package_installer.py | PackageInstaller.install | def install(self):
"""Install packages from the packages_dict."""
self.distro = distro_check()
package_list = self.packages_dict.get(self.distro)
self._installer(package_list=package_list.get('packages')) | python | def install(self):
"""Install packages from the packages_dict."""
self.distro = distro_check()
package_list = self.packages_dict.get(self.distro)
self._installer(package_list=package_list.get('packages')) | ['def', 'install', '(', 'self', ')', ':', 'self', '.', 'distro', '=', 'distro_check', '(', ')', 'package_list', '=', 'self', '.', 'packages_dict', '.', 'get', '(', 'self', '.', 'distro', ')', 'self', '.', '_installer', '(', 'package_list', '=', 'package_list', '.', 'get', '(', "'packages'", ')', ')'] | Install packages from the packages_dict. | ['Install', 'packages', 'from', 'the', 'packages_dict', '.'] | train | https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/package_installer.py#L101-L105 |
7,949 | PmagPy/PmagPy | pmagpy/builder2.py | ErMagicBuilder.validate_items | def validate_items(self, item_list, item_type):
"""
Go through a list Pmag_objects and check for:
parent errors,
children errors,
type errors.
Return a dictionary of exceptions in this format:
{sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]},
sample2: {'child': [warning1], 'type': [warning1, warning2]},
...}
"""
def append_or_create_dict_item(warning_type, dictionary, key, value):
"""
Add to dictionary with this format:
{key1: {warning_type1: [value1, value2], warning_type2: [value1]},
...}
"""
if not value:
return
try:
name = key.name
except AttributeError:
name = key
if not name in dictionary:
dictionary[name] = {}
if not warning_type in dictionary[name]:
dictionary[name][warning_type] = []
for v in value:
dictionary[name][warning_type].append(v)
def check_item_type(item, item_type):#, warnings=None):
"""
Make sure that item has appropriate type, and is in the data object.
"""
warnings = []
item_list, item_class, item_constructor = self.data_lists[item_type]
if not isinstance(item, item_class):
warnings.append(PmagException('wrong type'))
if item not in item_list:
warnings.append(PmagException('not in data object'))
return warnings
def check_item_for_parent(item, item_type, parent_type):
"""
Make sure that item has a parent of the correct type
"""
if not parent_type:
return []
if not isinstance(item, Pmag_object):
return []
warnings = []
parent = item.get_parent()
parent_list, parent_class, parent_constructor = self.data_lists[parent_type]
if not parent or not parent.name:
warnings.append(PmagException('missing parent'))
return warnings
if not isinstance(parent, parent_class):
warnings.append(PmagException('invalid parent type', parent))
if not parent in parent_list:
warnings.append(PmagException('parent not in data object', parent))
return warnings
def check_item_for_children(item, child_type):
"""
Make sure that any children are of the correct type,
and are in the data object
"""
if not child_type:
return []
warnings = []
children = item.children
child_list, child_class, child_constructor = self.data_lists[child_type]
for child in children:
if not isinstance(child, child_class):
warnings.append(PmagException('child has wrong type', child))
if not child in child_list:
warnings.append(PmagException('child not in data object', child))
return warnings
warnings = {}
type_ind = self.ancestry.index(item_type)
parent_type = self.ancestry[type_ind+1]
child_type = self.ancestry[type_ind-1]
for item in item_list:
#warnings[item] = []
type_warnings = check_item_type(item, item_type)
append_or_create_dict_item('type', warnings, item, type_warnings)
parent_warnings = check_item_for_parent(item, item_type, parent_type)
append_or_create_dict_item('parent', warnings, item, parent_warnings)
child_warnings = check_item_for_children(item, child_type)
append_or_create_dict_item('children', warnings, item, child_warnings)
return warnings | python | def validate_items(self, item_list, item_type):
"""
Go through a list Pmag_objects and check for:
parent errors,
children errors,
type errors.
Return a dictionary of exceptions in this format:
{sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]},
sample2: {'child': [warning1], 'type': [warning1, warning2]},
...}
"""
def append_or_create_dict_item(warning_type, dictionary, key, value):
"""
Add to dictionary with this format:
{key1: {warning_type1: [value1, value2], warning_type2: [value1]},
...}
"""
if not value:
return
try:
name = key.name
except AttributeError:
name = key
if not name in dictionary:
dictionary[name] = {}
if not warning_type in dictionary[name]:
dictionary[name][warning_type] = []
for v in value:
dictionary[name][warning_type].append(v)
def check_item_type(item, item_type):#, warnings=None):
"""
Make sure that item has appropriate type, and is in the data object.
"""
warnings = []
item_list, item_class, item_constructor = self.data_lists[item_type]
if not isinstance(item, item_class):
warnings.append(PmagException('wrong type'))
if item not in item_list:
warnings.append(PmagException('not in data object'))
return warnings
def check_item_for_parent(item, item_type, parent_type):
"""
Make sure that item has a parent of the correct type
"""
if not parent_type:
return []
if not isinstance(item, Pmag_object):
return []
warnings = []
parent = item.get_parent()
parent_list, parent_class, parent_constructor = self.data_lists[parent_type]
if not parent or not parent.name:
warnings.append(PmagException('missing parent'))
return warnings
if not isinstance(parent, parent_class):
warnings.append(PmagException('invalid parent type', parent))
if not parent in parent_list:
warnings.append(PmagException('parent not in data object', parent))
return warnings
def check_item_for_children(item, child_type):
"""
Make sure that any children are of the correct type,
and are in the data object
"""
if not child_type:
return []
warnings = []
children = item.children
child_list, child_class, child_constructor = self.data_lists[child_type]
for child in children:
if not isinstance(child, child_class):
warnings.append(PmagException('child has wrong type', child))
if not child in child_list:
warnings.append(PmagException('child not in data object', child))
return warnings
warnings = {}
type_ind = self.ancestry.index(item_type)
parent_type = self.ancestry[type_ind+1]
child_type = self.ancestry[type_ind-1]
for item in item_list:
#warnings[item] = []
type_warnings = check_item_type(item, item_type)
append_or_create_dict_item('type', warnings, item, type_warnings)
parent_warnings = check_item_for_parent(item, item_type, parent_type)
append_or_create_dict_item('parent', warnings, item, parent_warnings)
child_warnings = check_item_for_children(item, child_type)
append_or_create_dict_item('children', warnings, item, child_warnings)
return warnings | ['def', 'validate_items', '(', 'self', ',', 'item_list', ',', 'item_type', ')', ':', 'def', 'append_or_create_dict_item', '(', 'warning_type', ',', 'dictionary', ',', 'key', ',', 'value', ')', ':', '"""\n Add to dictionary with this format:\n {key1: {warning_type1: [value1, value2], warning_type2: [value1]},\n ...}\n """', 'if', 'not', 'value', ':', 'return', 'try', ':', 'name', '=', 'key', '.', 'name', 'except', 'AttributeError', ':', 'name', '=', 'key', 'if', 'not', 'name', 'in', 'dictionary', ':', 'dictionary', '[', 'name', ']', '=', '{', '}', 'if', 'not', 'warning_type', 'in', 'dictionary', '[', 'name', ']', ':', 'dictionary', '[', 'name', ']', '[', 'warning_type', ']', '=', '[', ']', 'for', 'v', 'in', 'value', ':', 'dictionary', '[', 'name', ']', '[', 'warning_type', ']', '.', 'append', '(', 'v', ')', 'def', 'check_item_type', '(', 'item', ',', 'item_type', ')', ':', '#, warnings=None):', '"""\n Make sure that item has appropriate type, and is in the data object.\n """', 'warnings', '=', '[', ']', 'item_list', ',', 'item_class', ',', 'item_constructor', '=', 'self', '.', 'data_lists', '[', 'item_type', ']', 'if', 'not', 'isinstance', '(', 'item', ',', 'item_class', ')', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'wrong type'", ')', ')', 'if', 'item', 'not', 'in', 'item_list', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'not in data object'", ')', ')', 'return', 'warnings', 'def', 'check_item_for_parent', '(', 'item', ',', 'item_type', ',', 'parent_type', ')', ':', '"""\n Make sure that item has a parent of the correct type\n """', 'if', 'not', 'parent_type', ':', 'return', '[', ']', 'if', 'not', 'isinstance', '(', 'item', ',', 'Pmag_object', ')', ':', 'return', '[', ']', 'warnings', '=', '[', ']', 'parent', '=', 'item', '.', 'get_parent', '(', ')', 'parent_list', ',', 'parent_class', ',', 'parent_constructor', '=', 'self', '.', 'data_lists', '[', 'parent_type', ']', 'if', 'not', 'parent', 'or', 'not', 'parent', '.', 'name', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'missing parent'", ')', ')', 'return', 'warnings', 'if', 'not', 'isinstance', '(', 'parent', ',', 'parent_class', ')', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'invalid parent type'", ',', 'parent', ')', ')', 'if', 'not', 'parent', 'in', 'parent_list', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'parent not in data object'", ',', 'parent', ')', ')', 'return', 'warnings', 'def', 'check_item_for_children', '(', 'item', ',', 'child_type', ')', ':', '"""\n Make sure that any children are of the correct type,\n and are in the data object\n """', 'if', 'not', 'child_type', ':', 'return', '[', ']', 'warnings', '=', '[', ']', 'children', '=', 'item', '.', 'children', 'child_list', ',', 'child_class', ',', 'child_constructor', '=', 'self', '.', 'data_lists', '[', 'child_type', ']', 'for', 'child', 'in', 'children', ':', 'if', 'not', 'isinstance', '(', 'child', ',', 'child_class', ')', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'child has wrong type'", ',', 'child', ')', ')', 'if', 'not', 'child', 'in', 'child_list', ':', 'warnings', '.', 'append', '(', 'PmagException', '(', "'child not in data object'", ',', 'child', ')', ')', 'return', 'warnings', 'warnings', '=', '{', '}', 'type_ind', '=', 'self', '.', 'ancestry', '.', 'index', '(', 'item_type', ')', 'parent_type', '=', 'self', '.', 'ancestry', '[', 'type_ind', '+', '1', ']', 'child_type', '=', 'self', '.', 'ancestry', '[', 'type_ind', '-', '1', ']', 'for', 'item', 'in', 'item_list', ':', '#warnings[item] = []', 'type_warnings', '=', 'check_item_type', '(', 'item', ',', 'item_type', ')', 'append_or_create_dict_item', '(', "'type'", ',', 'warnings', ',', 'item', ',', 'type_warnings', ')', 'parent_warnings', '=', 'check_item_for_parent', '(', 'item', ',', 'item_type', ',', 'parent_type', ')', 'append_or_create_dict_item', '(', "'parent'", ',', 'warnings', ',', 'item', ',', 'parent_warnings', ')', 'child_warnings', '=', 'check_item_for_children', '(', 'item', ',', 'child_type', ')', 'append_or_create_dict_item', '(', "'children'", ',', 'warnings', ',', 'item', ',', 'child_warnings', ')', 'return', 'warnings'] | Go through a list Pmag_objects and check for:
parent errors,
children errors,
type errors.
Return a dictionary of exceptions in this format:
{sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]},
sample2: {'child': [warning1], 'type': [warning1, warning2]},
...} | ['Go', 'through', 'a', 'list', 'Pmag_objects', 'and', 'check', 'for', ':', 'parent', 'errors', 'children', 'errors', 'type', 'errors', '.', 'Return', 'a', 'dictionary', 'of', 'exceptions', 'in', 'this', 'format', ':', '{', 'sample1', ':', '{', 'parent', ':', '[', 'warning1', 'warning2', 'warning3', ']', 'child', ':', '[', 'warning1', 'warning2', ']', '}', 'sample2', ':', '{', 'child', ':', '[', 'warning1', ']', 'type', ':', '[', 'warning1', 'warning2', ']', '}', '...', '}'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L1210-L1302 |
7,950 | foliant-docs/foliantcontrib.init | setup.py | get_templates | def get_templates(path: Path) -> List[str]:
'''List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``.
'''
result = []
for item in path.glob('**/*'):
if item.is_file() and not item.name.startswith('_'):
result.append(item.relative_to(path.parent).as_posix())
return result | python | def get_templates(path: Path) -> List[str]:
'''List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``.
'''
result = []
for item in path.glob('**/*'):
if item.is_file() and not item.name.startswith('_'):
result.append(item.relative_to(path.parent).as_posix())
return result | ['def', 'get_templates', '(', 'path', ':', 'Path', ')', '->', 'List', '[', 'str', ']', ':', 'result', '=', '[', ']', 'for', 'item', 'in', 'path', '.', 'glob', '(', "'**/*'", ')', ':', 'if', 'item', '.', 'is_file', '(', ')', 'and', 'not', 'item', '.', 'name', '.', 'startswith', '(', "'_'", ')', ':', 'result', '.', 'append', '(', 'item', '.', 'relative_to', '(', 'path', '.', 'parent', ')', '.', 'as_posix', '(', ')', ')', 'return', 'result'] | List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``. | ['List', 'all', 'files', 'in', 'templates', 'directory', 'including', 'all', 'subdirectories', '.'] | train | https://github.com/foliant-docs/foliantcontrib.init/blob/39aa38949b6270a750c800b79b4e71dd827f28d8/setup.py#L14-L26 |
7,951 | gebn/nibble | nibble/expression/parser.py | Parser.p_information_duration_speed | def p_information_duration_speed(self, p):
'information : duration AT speed'
logger.debug('information = duration %s at speed %s', p[1], p[3])
p[0] = p[3].for_duration(p[1]) | python | def p_information_duration_speed(self, p):
'information : duration AT speed'
logger.debug('information = duration %s at speed %s', p[1], p[3])
p[0] = p[3].for_duration(p[1]) | ['def', 'p_information_duration_speed', '(', 'self', ',', 'p', ')', ':', 'logger', '.', 'debug', '(', "'information = duration %s at speed %s'", ',', 'p', '[', '1', ']', ',', 'p', '[', '3', ']', ')', 'p', '[', '0', ']', '=', 'p', '[', '3', ']', '.', 'for_duration', '(', 'p', '[', '1', ']', ')'] | information : duration AT speed | ['information', ':', 'duration', 'AT', 'speed'] | train | https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/expression/parser.py#L67-L70 |
7,952 | log2timeline/dfvfs | dfvfs/vfs/bde_file_system.py | BDEFileSystem.GetRootFileEntry | def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None.
"""
path_spec = bde_path_spec.BDEPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | python | def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None.
"""
path_spec = bde_path_spec.BDEPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | ['def', 'GetRootFileEntry', '(', 'self', ')', ':', 'path_spec', '=', 'bde_path_spec', '.', 'BDEPathSpec', '(', 'parent', '=', 'self', '.', '_path_spec', '.', 'parent', ')', 'return', 'self', '.', 'GetFileEntryByPathSpec', '(', 'path_spec', ')'] | Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None. | ['Retrieves', 'the', 'root', 'file', 'entry', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/bde_file_system.py#L98-L105 |
7,953 | evhub/coconut | coconut/compiler/util.py | disable_inside | def disable_inside(item, *elems, **kwargs):
"""Prevent elems from matching inside of item.
Returns (item with elem disabled, *new versions of elems).
"""
_invert = kwargs.get("_invert", False)
internal_assert(set(kwargs.keys()) <= set(("_invert",)), "excess keyword arguments passed to disable_inside")
level = [0] # number of wrapped items deep we are; in a list to allow modification
@contextmanager
def manage_item(self, instring, loc):
level[0] += 1
try:
yield
finally:
level[0] -= 1
yield Wrap(item, manage_item)
@contextmanager
def manage_elem(self, instring, loc):
if level[0] == 0 if not _invert else level[0] > 0:
yield
else:
raise ParseException(instring, loc, self.errmsg, self)
for elem in elems:
yield Wrap(elem, manage_elem) | python | def disable_inside(item, *elems, **kwargs):
"""Prevent elems from matching inside of item.
Returns (item with elem disabled, *new versions of elems).
"""
_invert = kwargs.get("_invert", False)
internal_assert(set(kwargs.keys()) <= set(("_invert",)), "excess keyword arguments passed to disable_inside")
level = [0] # number of wrapped items deep we are; in a list to allow modification
@contextmanager
def manage_item(self, instring, loc):
level[0] += 1
try:
yield
finally:
level[0] -= 1
yield Wrap(item, manage_item)
@contextmanager
def manage_elem(self, instring, loc):
if level[0] == 0 if not _invert else level[0] > 0:
yield
else:
raise ParseException(instring, loc, self.errmsg, self)
for elem in elems:
yield Wrap(elem, manage_elem) | ['def', 'disable_inside', '(', 'item', ',', '*', 'elems', ',', '*', '*', 'kwargs', ')', ':', '_invert', '=', 'kwargs', '.', 'get', '(', '"_invert"', ',', 'False', ')', 'internal_assert', '(', 'set', '(', 'kwargs', '.', 'keys', '(', ')', ')', '<=', 'set', '(', '(', '"_invert"', ',', ')', ')', ',', '"excess keyword arguments passed to disable_inside"', ')', 'level', '=', '[', '0', ']', '# number of wrapped items deep we are; in a list to allow modification', '@', 'contextmanager', 'def', 'manage_item', '(', 'self', ',', 'instring', ',', 'loc', ')', ':', 'level', '[', '0', ']', '+=', '1', 'try', ':', 'yield', 'finally', ':', 'level', '[', '0', ']', '-=', '1', 'yield', 'Wrap', '(', 'item', ',', 'manage_item', ')', '@', 'contextmanager', 'def', 'manage_elem', '(', 'self', ',', 'instring', ',', 'loc', ')', ':', 'if', 'level', '[', '0', ']', '==', '0', 'if', 'not', '_invert', 'else', 'level', '[', '0', ']', '>', '0', ':', 'yield', 'else', ':', 'raise', 'ParseException', '(', 'instring', ',', 'loc', ',', 'self', '.', 'errmsg', ',', 'self', ')', 'for', 'elem', 'in', 'elems', ':', 'yield', 'Wrap', '(', 'elem', ',', 'manage_elem', ')'] | Prevent elems from matching inside of item.
Returns (item with elem disabled, *new versions of elems). | ['Prevent', 'elems', 'from', 'matching', 'inside', 'of', 'item', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/util.py#L533-L561 |
7,954 | berkeley-cocosci/Wallace | wallace/command_line.py | setup | def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) | python | def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) | ['def', 'setup', '(', ')', ':', '# Create the Wallace config file if it does not already exist.', 'config_name', '=', '".wallaceconfig"', 'config_path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'expanduser', '(', '"~"', ')', ',', 'config_name', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'config_path', ')', ':', 'log', '(', '"Wallace config file already exists."', ',', 'chevrons', '=', 'False', ')', 'else', ':', 'log', '(', '"Creating Wallace config file at ~/.wallaceconfig..."', ',', 'chevrons', '=', 'False', ')', 'wallace_module_path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', ')', 'src', '=', 'os', '.', 'path', '.', 'join', '(', 'wallace_module_path', ',', '"config"', ',', 'config_name', ')', 'shutil', '.', 'copyfile', '(', 'src', ',', 'config_path', ')'] | Walk the user though the Wallace setup. | ['Walk', 'the', 'user', 'though', 'the', 'Wallace', 'setup', '.'] | train | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L66-L80 |
7,955 | user-cont/conu | conu/utils/http_client.py | get_url | def get_url(path, host, port, method="http"):
"""
make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str
"""
return urlunsplit(
(method, "%s:%s" % (host, port), path, "", "")
) | python | def get_url(path, host, port, method="http"):
"""
make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str
"""
return urlunsplit(
(method, "%s:%s" % (host, port), path, "", "")
) | ['def', 'get_url', '(', 'path', ',', 'host', ',', 'port', ',', 'method', '=', '"http"', ')', ':', 'return', 'urlunsplit', '(', '(', 'method', ',', '"%s:%s"', '%', '(', 'host', ',', 'port', ')', ',', 'path', ',', '""', ',', '""', ')', ')'] | make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str | ['make', 'url', 'from', 'path', 'host', 'and', 'port'] | train | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/http_client.py#L20-L32 |
7,956 | mushkevych/scheduler | synergy/system/process_helper.py | get_process_pid | def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """
try:
pid_filename = get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid | python | def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """
try:
pid_filename = get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid | ['def', 'get_process_pid', '(', 'process_name', ')', ':', 'try', ':', 'pid_filename', '=', 'get_pid_filename', '(', 'process_name', ')', 'with', 'open', '(', 'pid_filename', ',', 'mode', '=', "'r'", ')', 'as', 'pid_file', ':', 'pid', '=', 'int', '(', 'pid_file', '.', 'read', '(', ')', '.', 'strip', '(', ')', ')', 'except', 'IOError', ':', 'pid', '=', 'None', 'return', 'pid'] | check for process' pid file and returns pid from there | ['check', 'for', 'process', 'pid', 'file', 'and', 'returns', 'pid', 'from', 'there'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/process_helper.py#L14-L22 |
7,957 | anayjoshi/platypus | platypus/cfg/ast_to_cfg.py | get_cfg | def get_cfg(ast_func):
"""
Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function
"""
cfg_func = cfg.Function()
for ast_var in ast_func.input_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_input_variable(cfg_var)
for ast_var in ast_func.output_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_output_variable(cfg_var)
bb_start = cfg.BasicBlock()
cfg_func.add_basic_block(bb_start)
for stmt in ast_func.body:
bb_temp = bb_start
bb_temp = process_cfg(stmt, bb_temp, cfg_func)
cfg_func.clean_up()
cfg_func.add_summary(ast_func.summary)
return cfg_func | python | def get_cfg(ast_func):
"""
Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function
"""
cfg_func = cfg.Function()
for ast_var in ast_func.input_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_input_variable(cfg_var)
for ast_var in ast_func.output_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_output_variable(cfg_var)
bb_start = cfg.BasicBlock()
cfg_func.add_basic_block(bb_start)
for stmt in ast_func.body:
bb_temp = bb_start
bb_temp = process_cfg(stmt, bb_temp, cfg_func)
cfg_func.clean_up()
cfg_func.add_summary(ast_func.summary)
return cfg_func | ['def', 'get_cfg', '(', 'ast_func', ')', ':', 'cfg_func', '=', 'cfg', '.', 'Function', '(', ')', 'for', 'ast_var', 'in', 'ast_func', '.', 'input_variable_list', ':', 'cfg_var', '=', 'cfg_func', '.', 'get_variable', '(', 'ast_var', '.', 'name', ')', 'cfg_func', '.', 'add_input_variable', '(', 'cfg_var', ')', 'for', 'ast_var', 'in', 'ast_func', '.', 'output_variable_list', ':', 'cfg_var', '=', 'cfg_func', '.', 'get_variable', '(', 'ast_var', '.', 'name', ')', 'cfg_func', '.', 'add_output_variable', '(', 'cfg_var', ')', 'bb_start', '=', 'cfg', '.', 'BasicBlock', '(', ')', 'cfg_func', '.', 'add_basic_block', '(', 'bb_start', ')', 'for', 'stmt', 'in', 'ast_func', '.', 'body', ':', 'bb_temp', '=', 'bb_start', 'bb_temp', '=', 'process_cfg', '(', 'stmt', ',', 'bb_temp', ',', 'cfg_func', ')', 'cfg_func', '.', 'clean_up', '(', ')', 'cfg_func', '.', 'add_summary', '(', 'ast_func', '.', 'summary', ')', 'return', 'cfg_func'] | Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function | ['Traverses', 'the', 'AST', 'and', 'returns', 'the', 'corresponding', 'CFG'] | train | https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/ast_to_cfg.py#L4-L28 |
7,958 | quantumlib/Cirq | cirq/google/sim/mem_manager.py | SharedMemManager._create_array | def _create_array(self, arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
# pylint: disable=protected-access
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
# Note storing the shape is a workaround for an issue encountered
# when upgrading to numpy 1.15.
# See https://github.com/numpy/numpy/issues/11636
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current | python | def _create_array(self, arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
# pylint: disable=protected-access
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
# Note storing the shape is a workaround for an issue encountered
# when upgrading to numpy 1.15.
# See https://github.com/numpy/numpy/issues/11636
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current | ['def', '_create_array', '(', 'self', ',', 'arr', ':', 'np', '.', 'ndarray', ')', '->', 'int', ':', 'if', 'not', 'isinstance', '(', 'arr', ',', 'np', '.', 'ndarray', ')', ':', 'raise', 'ValueError', '(', "'Array is not a numpy ndarray.'", ')', 'try', ':', 'c_arr', '=', 'np', '.', 'ctypeslib', '.', 'as_ctypes', '(', 'arr', ')', 'except', '(', 'KeyError', ',', 'NotImplementedError', ')', ':', 'raise', 'ValueError', '(', "'Array has unsupported dtype {}.'", '.', 'format', '(', 'arr', '.', 'dtype', ')', ')', '# pylint: disable=protected-access', 'raw_arr', '=', 'RawArray', '(', 'c_arr', '.', '_type_', ',', 'c_arr', ')', 'with', 'self', '.', '_lock', ':', 'if', 'self', '.', '_count', '>=', 'len', '(', 'self', '.', '_arrays', ')', ':', 'self', '.', '_arrays', '+=', 'len', '(', 'self', '.', '_arrays', ')', '*', '[', 'None', ']', 'self', '.', '_get_next_free', '(', ')', '# Note storing the shape is a workaround for an issue encountered', '# when upgrading to numpy 1.15.', '# See https://github.com/numpy/numpy/issues/11636', 'self', '.', '_arrays', '[', 'self', '.', '_current', ']', '=', '(', 'raw_arr', ',', 'arr', '.', 'shape', ')', 'self', '.', '_count', '+=', '1', 'return', 'self', '.', '_current'] | Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around. | ['Returns', 'the', 'handle', 'of', 'a', 'RawArray', 'created', 'from', 'the', 'given', 'numpy', 'array', '.'] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/mem_manager.py#L53-L91 |
7,959 | vnmabus/dcor | dcor/_dcor.py | _u_distance_covariance_sqr_naive | def _u_distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b) | python | def _u_distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b) | ['def', '_u_distance_covariance_sqr_naive', '(', 'x', ',', 'y', ',', 'exponent', '=', '1', ')', ':', 'a', '=', '_u_distance_matrix', '(', 'x', ',', 'exponent', '=', 'exponent', ')', 'b', '=', '_u_distance_matrix', '(', 'y', ',', 'exponent', '=', 'exponent', ')', 'return', 'u_product', '(', 'a', ',', 'b', ')'] | Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm. | ['Naive', 'unbiased', 'estimator', 'for', 'distance', 'covariance', '.'] | train | https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L47-L57 |
7,960 | CalebBell/fluids | fluids/jet_pump.py | liquid_jet_pump_ancillary | def liquid_jet_pump_ancillary(rhop, rhos, Kp, Ks, d_nozzle=None, d_mixing=None,
Qp=None, Qs=None, P1=None, P2=None):
r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985.
'''
unknowns = sum(i is None for i in (d_nozzle, d_mixing, Qs, Qp, P1, P2))
if unknowns > 1:
raise Exception('Too many unknowns')
elif unknowns < 1:
raise Exception('Overspecified')
C = rhos/rhop
if Qp is not None and Qs is not None:
M = Qs/Qp
if d_nozzle is not None:
A_nozzle = pi/4*d_nozzle*d_nozzle
if d_mixing is not None:
A_mixing = pi/4*d_mixing*d_mixing
R = A_nozzle/A_mixing
if P1 is None:
return rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P2
elif P2 is None:
return -rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P1
elif Qs is None:
try:
return ((-2*A_nozzle**2*P1 + 2*A_nozzle**2*P2 + Kp*Qp**2*rhop + Qp**2*rhop)/(C*rhop*(Ks + 1)))**0.5*(A_mixing - A_nozzle)/A_nozzle
except ValueError:
return -1j
elif Qp is None:
return A_nozzle*((2*A_mixing**2*P1 - 2*A_mixing**2*P2 - 4*A_mixing*A_nozzle*P1 + 4*A_mixing*A_nozzle*P2 + 2*A_nozzle**2*P1 - 2*A_nozzle**2*P2 + C*Ks*Qs**2*rhop + C*Qs**2*rhop)/(rhop*(Kp + 1)))**0.5/(A_mixing - A_nozzle)
elif d_nozzle is None:
def err(d_nozzle):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
return brenth(err, 1E-9, d_mixing*20)
elif d_mixing is None:
def err(d_mixing):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
try:
return brenth(err, 1E-9, d_nozzle*20)
except:
return newton(err, d_nozzle*2) | python | def liquid_jet_pump_ancillary(rhop, rhos, Kp, Ks, d_nozzle=None, d_mixing=None,
Qp=None, Qs=None, P1=None, P2=None):
r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985.
'''
unknowns = sum(i is None for i in (d_nozzle, d_mixing, Qs, Qp, P1, P2))
if unknowns > 1:
raise Exception('Too many unknowns')
elif unknowns < 1:
raise Exception('Overspecified')
C = rhos/rhop
if Qp is not None and Qs is not None:
M = Qs/Qp
if d_nozzle is not None:
A_nozzle = pi/4*d_nozzle*d_nozzle
if d_mixing is not None:
A_mixing = pi/4*d_mixing*d_mixing
R = A_nozzle/A_mixing
if P1 is None:
return rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P2
elif P2 is None:
return -rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P1
elif Qs is None:
try:
return ((-2*A_nozzle**2*P1 + 2*A_nozzle**2*P2 + Kp*Qp**2*rhop + Qp**2*rhop)/(C*rhop*(Ks + 1)))**0.5*(A_mixing - A_nozzle)/A_nozzle
except ValueError:
return -1j
elif Qp is None:
return A_nozzle*((2*A_mixing**2*P1 - 2*A_mixing**2*P2 - 4*A_mixing*A_nozzle*P1 + 4*A_mixing*A_nozzle*P2 + 2*A_nozzle**2*P1 - 2*A_nozzle**2*P2 + C*Ks*Qs**2*rhop + C*Qs**2*rhop)/(rhop*(Kp + 1)))**0.5/(A_mixing - A_nozzle)
elif d_nozzle is None:
def err(d_nozzle):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
return brenth(err, 1E-9, d_mixing*20)
elif d_mixing is None:
def err(d_mixing):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
try:
return brenth(err, 1E-9, d_nozzle*20)
except:
return newton(err, d_nozzle*2) | ['def', 'liquid_jet_pump_ancillary', '(', 'rhop', ',', 'rhos', ',', 'Kp', ',', 'Ks', ',', 'd_nozzle', '=', 'None', ',', 'd_mixing', '=', 'None', ',', 'Qp', '=', 'None', ',', 'Qs', '=', 'None', ',', 'P1', '=', 'None', ',', 'P2', '=', 'None', ')', ':', 'unknowns', '=', 'sum', '(', 'i', 'is', 'None', 'for', 'i', 'in', '(', 'd_nozzle', ',', 'd_mixing', ',', 'Qs', ',', 'Qp', ',', 'P1', ',', 'P2', ')', ')', 'if', 'unknowns', '>', '1', ':', 'raise', 'Exception', '(', "'Too many unknowns'", ')', 'elif', 'unknowns', '<', '1', ':', 'raise', 'Exception', '(', "'Overspecified'", ')', 'C', '=', 'rhos', '/', 'rhop', 'if', 'Qp', 'is', 'not', 'None', 'and', 'Qs', 'is', 'not', 'None', ':', 'M', '=', 'Qs', '/', 'Qp', 'if', 'd_nozzle', 'is', 'not', 'None', ':', 'A_nozzle', '=', 'pi', '/', '4', '*', 'd_nozzle', '*', 'd_nozzle', 'if', 'd_mixing', 'is', 'not', 'None', ':', 'A_mixing', '=', 'pi', '/', '4', '*', 'd_mixing', '*', 'd_mixing', 'R', '=', 'A_nozzle', '/', 'A_mixing', 'if', 'P1', 'is', 'None', ':', 'return', 'rhop', '/', '2', '*', '(', 'Qp', '/', 'A_nozzle', ')', '**', '2', '*', '(', '(', '1', '+', 'Kp', ')', '-', 'C', '*', '(', '1', '+', 'Ks', ')', '*', '(', '(', 'M', '*', 'R', ')', '/', '(', '1', '-', 'R', ')', ')', '**', '2', ')', '+', 'P2', 'elif', 'P2', 'is', 'None', ':', 'return', '-', 'rhop', '/', '2', '*', '(', 'Qp', '/', 'A_nozzle', ')', '**', '2', '*', '(', '(', '1', '+', 'Kp', ')', '-', 'C', '*', '(', '1', '+', 'Ks', ')', '*', '(', '(', 'M', '*', 'R', ')', '/', '(', '1', '-', 'R', ')', ')', '**', '2', ')', '+', 'P1', 'elif', 'Qs', 'is', 'None', ':', 'try', ':', 'return', '(', '(', '-', '2', '*', 'A_nozzle', '**', '2', '*', 'P1', '+', '2', '*', 'A_nozzle', '**', '2', '*', 'P2', '+', 'Kp', '*', 'Qp', '**', '2', '*', 'rhop', '+', 'Qp', '**', '2', '*', 'rhop', ')', '/', '(', 'C', '*', 'rhop', '*', '(', 'Ks', '+', '1', ')', ')', ')', '**', '0.5', '*', '(', 'A_mixing', '-', 'A_nozzle', ')', '/', 'A_nozzle', 'except', 'ValueError', ':', 'return', '-', '1j', 'elif', 'Qp', 'is', 'None', ':', 'return', 'A_nozzle', '*', '(', '(', '2', '*', 'A_mixing', '**', '2', '*', 'P1', '-', '2', '*', 'A_mixing', '**', '2', '*', 'P2', '-', '4', '*', 'A_mixing', '*', 'A_nozzle', '*', 'P1', '+', '4', '*', 'A_mixing', '*', 'A_nozzle', '*', 'P2', '+', '2', '*', 'A_nozzle', '**', '2', '*', 'P1', '-', '2', '*', 'A_nozzle', '**', '2', '*', 'P2', '+', 'C', '*', 'Ks', '*', 'Qs', '**', '2', '*', 'rhop', '+', 'C', '*', 'Qs', '**', '2', '*', 'rhop', ')', '/', '(', 'rhop', '*', '(', 'Kp', '+', '1', ')', ')', ')', '**', '0.5', '/', '(', 'A_mixing', '-', 'A_nozzle', ')', 'elif', 'd_nozzle', 'is', 'None', ':', 'def', 'err', '(', 'd_nozzle', ')', ':', 'return', 'P1', '-', 'liquid_jet_pump_ancillary', '(', 'rhop', '=', 'rhop', ',', 'rhos', '=', 'rhos', ',', 'Kp', '=', 'Kp', ',', 'Ks', '=', 'Ks', ',', 'd_nozzle', '=', 'd_nozzle', ',', 'd_mixing', '=', 'd_mixing', ',', 'Qp', '=', 'Qp', ',', 'Qs', '=', 'Qs', ',', 'P1', '=', 'None', ',', 'P2', '=', 'P2', ')', 'return', 'brenth', '(', 'err', ',', '1E-9', ',', 'd_mixing', '*', '20', ')', 'elif', 'd_mixing', 'is', 'None', ':', 'def', 'err', '(', 'd_mixing', ')', ':', 'return', 'P1', '-', 'liquid_jet_pump_ancillary', '(', 'rhop', '=', 'rhop', ',', 'rhos', '=', 'rhos', ',', 'Kp', '=', 'Kp', ',', 'Ks', '=', 'Ks', ',', 'd_nozzle', '=', 'd_nozzle', ',', 'd_mixing', '=', 'd_mixing', ',', 'Qp', '=', 'Qp', ',', 'Qs', '=', 'Qs', ',', 'P1', '=', 'None', ',', 'P2', '=', 'P2', ')', 'try', ':', 'return', 'brenth', '(', 'err', ',', '1E-9', ',', 'd_nozzle', '*', '20', ')', 'except', ':', 'return', 'newton', '(', 'err', ',', 'd_nozzle', '*', '2', ')'] | r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985. | ['r', 'Calculates', 'the', 'remaining', 'variable', 'in', 'a', 'liquid', 'jet', 'pump', 'when', 'solving', 'for', 'one', 'if', 'the', 'inlet', 'variables', 'only', 'and', 'the', 'rest', 'of', 'them', 'are', 'known', '.', 'The', 'equation', 'comes', 'from', 'conservation', 'of', 'energy', 'and', 'momentum', 'in', 'the', 'mixing', 'chamber', '.', 'The', 'variable', 'to', 'be', 'solved', 'for', 'must', 'be', 'one', 'of', 'd_nozzle', 'd_mixing', 'Qp', 'Qs', 'P1', 'or', 'P2', '.', '..', 'math', '::', 'P_1', '-', 'P_2', '=', '\\', 'frac', '{', '1', '}', '{', '2', '}', '\\', 'rho_pV_n^2', '(', '1', '+', 'K_p', ')', '-', '\\', 'frac', '{', '1', '}', '{', '2', '}', '\\', 'rho_s', 'V_3^2', '(', '1', '+', 'K_s', ')', 'Rearrange', 'to', 'express', 'V3', 'in', 'terms', 'of', 'Vn', 'and', 'using', 'the', 'density', 'ratio', 'C', 'the', 'expression', 'becomes', ':', '..', 'math', '::', 'P_1', '-', 'P_2', '=', '\\', 'frac', '{', '1', '}', '{', '2', '}', '\\', 'rho_p', 'V_n^2', '\\', 'left', '[', '(', '1', '+', 'K_p', ')', '-', 'C', '(', '1', '+', 'K_s', ')', '\\', 'left', '(', '\\', 'frac', '{', 'MR', '}', '{', '1', '-', 'R', '}', '\\', 'right', ')', '^2', '\\', 'right', ']'] | train | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/jet_pump.py#L33-L163 |
7,961 | thisfred/val | val/_val.py | And._validated | def _validated(self, data):
"""Validate data if all subschemas validate it."""
for sub in self.schemas:
data = sub(data)
return data | python | def _validated(self, data):
"""Validate data if all subschemas validate it."""
for sub in self.schemas:
data = sub(data)
return data | ['def', '_validated', '(', 'self', ',', 'data', ')', ':', 'for', 'sub', 'in', 'self', '.', 'schemas', ':', 'data', '=', 'sub', '(', 'data', ')', 'return', 'data'] | Validate data if all subschemas validate it. | ['Validate', 'data', 'if', 'all', 'subschemas', 'validate', 'it', '.'] | train | https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L339-L343 |
7,962 | vertexproject/synapse | synapse/lib/hive.py | Hive.pop | async def pop(self, full):
'''
Remove and return the value for the given node.
'''
node = self.nodes.get(full)
if node is None:
return
valu = await self._popHiveNode(node)
return valu | python | async def pop(self, full):
'''
Remove and return the value for the given node.
'''
node = self.nodes.get(full)
if node is None:
return
valu = await self._popHiveNode(node)
return valu | ['async', 'def', 'pop', '(', 'self', ',', 'full', ')', ':', 'node', '=', 'self', '.', 'nodes', '.', 'get', '(', 'full', ')', 'if', 'node', 'is', 'None', ':', 'return', 'valu', '=', 'await', 'self', '.', '_popHiveNode', '(', 'node', ')', 'return', 'valu'] | Remove and return the value for the given node. | ['Remove', 'and', 'return', 'the', 'value', 'for', 'the', 'given', 'node', '.'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/hive.py#L257-L267 |
7,963 | rigetti/grove | grove/alpha/fermion_transforms/bktransform.py | BKTransform._operator_generator | def _operator_generator(self, index, conj):
"""
Internal method to generate the appropriate ladder operator at fermion
orbital at 'index'
If conj == -1 --> creation
conj == +1 --> annihilation
:param int index: fermion orbital to generate ladder operator at
:param int conj: -1 for creation, +1 for annihilation
"""
if conj != -1 and conj != +1:
raise ValueError("Improper conjugate coefficient")
if index >= self.n_qubits or index < 0:
raise IndexError("Operator index outside number of qubits for "
"current Bravyi-Kitaev transform.")
# parity set P(j). apply Z to, for parity sign.
parity_set = [node.index for node in self.tree.get_parity_set(index)]
# update set U(j). apply X to, for updating purposes.
ancestors = [node.index for node in self.tree.get_update_set(index)]
# remainder set C(j) = P(j) \ F(j)
ancestor_children = [node.index for node in self.tree.get_remainder_set(index)]
# Under Majorana basis, creation/annihilation operators given by
# a^{\pm} = (c \mp id) / 2
# c_j = a_j + a_j^{\dagger} = X_{U(j)} X_j Z_{P(j)}
c_maj = PauliTerm('X', index)
for node_idx in parity_set:
c_maj *= PauliTerm('Z', node_idx)
for node_idx in ancestors:
c_maj *= PauliTerm('X', node_idx)
# d_j = i(a_j^{\dagger} - a_j) = X_{U(j)} Y_j Z_{C(j)}
d_maj = PauliTerm('Y', index)
for node_idx in ancestors:
d_maj *= PauliTerm('X', node_idx)
for node_idx in ancestor_children:
d_maj *= PauliTerm('Z', node_idx)
result = 0.5 * (c_maj + 1j * conj * d_maj)
return result.simplify() | python | def _operator_generator(self, index, conj):
"""
Internal method to generate the appropriate ladder operator at fermion
orbital at 'index'
If conj == -1 --> creation
conj == +1 --> annihilation
:param int index: fermion orbital to generate ladder operator at
:param int conj: -1 for creation, +1 for annihilation
"""
if conj != -1 and conj != +1:
raise ValueError("Improper conjugate coefficient")
if index >= self.n_qubits or index < 0:
raise IndexError("Operator index outside number of qubits for "
"current Bravyi-Kitaev transform.")
# parity set P(j). apply Z to, for parity sign.
parity_set = [node.index for node in self.tree.get_parity_set(index)]
# update set U(j). apply X to, for updating purposes.
ancestors = [node.index for node in self.tree.get_update_set(index)]
# remainder set C(j) = P(j) \ F(j)
ancestor_children = [node.index for node in self.tree.get_remainder_set(index)]
# Under Majorana basis, creation/annihilation operators given by
# a^{\pm} = (c \mp id) / 2
# c_j = a_j + a_j^{\dagger} = X_{U(j)} X_j Z_{P(j)}
c_maj = PauliTerm('X', index)
for node_idx in parity_set:
c_maj *= PauliTerm('Z', node_idx)
for node_idx in ancestors:
c_maj *= PauliTerm('X', node_idx)
# d_j = i(a_j^{\dagger} - a_j) = X_{U(j)} Y_j Z_{C(j)}
d_maj = PauliTerm('Y', index)
for node_idx in ancestors:
d_maj *= PauliTerm('X', node_idx)
for node_idx in ancestor_children:
d_maj *= PauliTerm('Z', node_idx)
result = 0.5 * (c_maj + 1j * conj * d_maj)
return result.simplify() | ['def', '_operator_generator', '(', 'self', ',', 'index', ',', 'conj', ')', ':', 'if', 'conj', '!=', '-', '1', 'and', 'conj', '!=', '+', '1', ':', 'raise', 'ValueError', '(', '"Improper conjugate coefficient"', ')', 'if', 'index', '>=', 'self', '.', 'n_qubits', 'or', 'index', '<', '0', ':', 'raise', 'IndexError', '(', '"Operator index outside number of qubits for "', '"current Bravyi-Kitaev transform."', ')', '# parity set P(j). apply Z to, for parity sign.', 'parity_set', '=', '[', 'node', '.', 'index', 'for', 'node', 'in', 'self', '.', 'tree', '.', 'get_parity_set', '(', 'index', ')', ']', '# update set U(j). apply X to, for updating purposes.', 'ancestors', '=', '[', 'node', '.', 'index', 'for', 'node', 'in', 'self', '.', 'tree', '.', 'get_update_set', '(', 'index', ')', ']', '# remainder set C(j) = P(j) \\ F(j)', 'ancestor_children', '=', '[', 'node', '.', 'index', 'for', 'node', 'in', 'self', '.', 'tree', '.', 'get_remainder_set', '(', 'index', ')', ']', '# Under Majorana basis, creation/annihilation operators given by', '# a^{\\pm} = (c \\mp id) / 2', '# c_j = a_j + a_j^{\\dagger} = X_{U(j)} X_j Z_{P(j)}', 'c_maj', '=', 'PauliTerm', '(', "'X'", ',', 'index', ')', 'for', 'node_idx', 'in', 'parity_set', ':', 'c_maj', '*=', 'PauliTerm', '(', "'Z'", ',', 'node_idx', ')', 'for', 'node_idx', 'in', 'ancestors', ':', 'c_maj', '*=', 'PauliTerm', '(', "'X'", ',', 'node_idx', ')', '# d_j = i(a_j^{\\dagger} - a_j) = X_{U(j)} Y_j Z_{C(j)}', 'd_maj', '=', 'PauliTerm', '(', "'Y'", ',', 'index', ')', 'for', 'node_idx', 'in', 'ancestors', ':', 'd_maj', '*=', 'PauliTerm', '(', "'X'", ',', 'node_idx', ')', 'for', 'node_idx', 'in', 'ancestor_children', ':', 'd_maj', '*=', 'PauliTerm', '(', "'Z'", ',', 'node_idx', ')', 'result', '=', '0.5', '*', '(', 'c_maj', '+', '1j', '*', 'conj', '*', 'd_maj', ')', 'return', 'result', '.', 'simplify', '(', ')'] | Internal method to generate the appropriate ladder operator at fermion
orbital at 'index'
If conj == -1 --> creation
conj == +1 --> annihilation
:param int index: fermion orbital to generate ladder operator at
:param int conj: -1 for creation, +1 for annihilation | ['Internal', 'method', 'to', 'generate', 'the', 'appropriate', 'ladder', 'operator', 'at', 'fermion', 'orbital', 'at', 'index', 'If', 'conj', '==', '-', '1', '--', '>', 'creation', 'conj', '==', '+', '1', '--', '>', 'annihilation'] | train | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/alpha/fermion_transforms/bktransform.py#L89-L132 |
7,964 | RI-imaging/nrefocus | nrefocus/metrics.py | spectral | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | python | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | ['def', 'spectral', '(', 'data', ',', 'lambd', ',', '*', 'kwargs', ')', ':', '# Set up fast fourier transform', '# if not data.dtype == np.dtype(np.complex):', '# data = np.array(data, dtype=np.complex)', '# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,', '# direction="forward", flags=_fftwflags)', '# fftdata = np.zeros(data.shape, dtype=np.complex)', '# fftplan.guru_execute_dft(data, fftdata)', '# fftw.destroy_plan(fftplan)', 'fftdata', '=', 'np', '.', 'fft', '.', 'fftn', '(', 'data', ')', '# Filter Fourier transform', 'fftdata', '[', '0', ',', '0', ']', '=', '0', 'kx', '=', '2', '*', 'np', '.', 'pi', '*', 'np', '.', 'fft', '.', 'fftfreq', '(', 'data', '.', 'shape', '[', '0', ']', ')', '.', 'reshape', '(', '1', ',', '-', '1', ')', 'ky', '=', '2', '*', 'np', '.', 'pi', '*', 'np', '.', 'fft', '.', 'fftfreq', '(', 'data', '.', 'shape', '[', '1', ']', ')', '.', 'reshape', '(', '-', '1', ',', '1', ')', 'kmax', '=', '(', '2', '*', 'np', '.', 'pi', ')', '/', '(', '2', '*', 'lambd', ')', 'fftdata', '[', 'np', '.', 'where', '(', 'kx', '**', '2', '+', 'ky', '**', '2', '>', 'kmax', '**', '2', ')', ']', '=', '0', 'spec', '=', 'np', '.', 'sum', '(', 'np', '.', 'log', '(', '1', '+', 'np', '.', 'abs', '(', 'fftdata', ')', ')', ')', '/', 'np', '.', 'sqrt', '(', 'np', '.', 'prod', '(', 'data', '.', 'shape', ')', ')', 'return', 'spec'] | Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels | ['Compute', 'spectral', 'contrast', 'of', 'image'] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L18-L52 |
7,965 | 72squared/redpipe | redpipe/fields.py | ListField.encode | def encode(cls, value):
"""
take a list and turn it into a utf-8 encoded byte-string for redis.
:param value: list
:return: bytes
"""
try:
coerced = list(value)
if coerced == value:
return json.dumps(coerced).encode(cls._encoding)
except TypeError:
pass
raise InvalidValue('not a list') | python | def encode(cls, value):
"""
take a list and turn it into a utf-8 encoded byte-string for redis.
:param value: list
:return: bytes
"""
try:
coerced = list(value)
if coerced == value:
return json.dumps(coerced).encode(cls._encoding)
except TypeError:
pass
raise InvalidValue('not a list') | ['def', 'encode', '(', 'cls', ',', 'value', ')', ':', 'try', ':', 'coerced', '=', 'list', '(', 'value', ')', 'if', 'coerced', '==', 'value', ':', 'return', 'json', '.', 'dumps', '(', 'coerced', ')', '.', 'encode', '(', 'cls', '.', '_encoding', ')', 'except', 'TypeError', ':', 'pass', 'raise', 'InvalidValue', '(', "'not a list'", ')'] | take a list and turn it into a utf-8 encoded byte-string for redis.
:param value: list
:return: bytes | ['take', 'a', 'list', 'and', 'turn', 'it', 'into', 'a', 'utf', '-', '8', 'encoded', 'byte', '-', 'string', 'for', 'redis', '.'] | train | https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/fields.py#L225-L239 |
7,966 | pywbem/pywbem | pywbem_mock/_wbemconnection_mock.py | FakedWBEMConnection._fake_modifyinstance | def _fake_modifyinstance(self, namespace, **params):
"""
Implements a server responder for
:meth:`~pywbem.WBEMConnection.CreateInstance`
Modify a CIM instance in the local repository.
Raises:
CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS
"""
if self._repo_lite:
raise CIMError(
CIM_ERR_NOT_SUPPORTED,
"ModifyInstance not supported when repo_lite set.")
# Validate namespace
instance_repo = self._get_instance_repo(namespace)
modified_instance = deepcopy(params['ModifiedInstance'])
property_list = params['PropertyList']
# Return if empty property list
if property_list is not None and not property_list:
return
if modified_instance is not None and not modified_instance:
return
if not isinstance(modified_instance, CIMInstance):
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("The ModifiedInstance parameter is not a valid "
"CIMInstance. Rcvd type={0}", type(modified_instance)))
# Classnames in instance and path must match
if modified_instance.classname != modified_instance.path.classname:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("ModifyInstance classname in path and instance do "
"not match. classname={0!A}, path.classname={1!A}",
modified_instance.classname,
modified_instance.path.classname))
# Get class including properties from superclasses from repo
try:
target_class = self.GetClass(modified_instance.classname,
namespace=namespace,
LocalOnly=False,
IncludeQualifiers=True,
IncludeClassOrigin=True)
except CIMError as ce:
if ce.status_code == CIM_ERR_NOT_FOUND:
raise CIMError(
CIM_ERR_INVALID_CLASS,
_format("Cannot modify instance because its creation "
"class {0!A} does not exist in namespace {1!A}.",
modified_instance.classname, namespace))
raise
# get key properties and all class props
cl_props = [p.name for p in six.itervalues(target_class.properties)]
key_props = [p.name for p in six.itervalues(target_class.properties)
if 'key' in p.qualifiers]
# Get original instance in repo. Does not copy the orig instance.
mod_inst_path = modified_instance.path.copy()
if modified_instance.path.namespace is None:
mod_inst_path.namespace = namespace
orig_instance_tup = self._find_instance(mod_inst_path, instance_repo)
if orig_instance_tup[0] is None:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Original Instance {0!A} not found in namespace {1!A}",
modified_instance.path, namespace))
original_instance = orig_instance_tup[1]
# Remove duplicate properties from property_list
if property_list:
if len(property_list) != len(set(property_list)):
property_list = list(set(property_list))
# Test that all properties in modified instance and property list
# are in the class
if property_list:
for p in property_list:
if p not in cl_props:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} in PropertyList not in class "
"{1!A}", p, modified_instance.classname))
for p in modified_instance:
if p not in cl_props:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} in ModifiedInstance not in class "
"{1!A}", p, modified_instance.classname))
# Set the class value for properties in the property list but not
# in the modified_instance. This sets just the value component.
mod_inst_props = set(modified_instance.keys())
new_props = mod_inst_props.difference(set(cl_props))
if new_props:
for new_prop in new_props:
modified_instance[new_prop] = \
target_class.properties[new_prop].value
# Remove all properties that do not change value between original
# instance and modified instance
for p in list(modified_instance):
if original_instance[p] == modified_instance[p]:
del modified_instance[p]
# Confirm no key properties in remaining modified instance
for p in key_props:
if p in modified_instance:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("ModifyInstance cannot modify key property {0!A}",
p))
# Remove any properties from modified instance not in the property_list
if property_list:
for p in list(modified_instance):
if p not in property_list:
del modified_instance[p]
# Exception if property in instance but not class or types do not
# match
for pname in modified_instance:
if pname not in target_class.properties:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} specified in ModifiedInstance is "
"not exposed by class {1!A} in namespace {2!A}",
pname, target_class.classname, namespace))
cprop = target_class.properties[pname]
iprop = modified_instance.properties[pname]
if iprop.is_array != cprop.is_array \
or iprop.type != cprop.type \
or iprop.array_size != cprop.array_size:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Instance and class property name={0!A} type "
"or other attributes do not match: "
"instance={1!A}, class={2!A}",
pname, iprop, cprop))
# Modify the value of properties in the repo with those from
# modified instance
index = orig_instance_tup[0]
instance_repo[index].update(modified_instance.properties)
return | python | def _fake_modifyinstance(self, namespace, **params):
"""
Implements a server responder for
:meth:`~pywbem.WBEMConnection.CreateInstance`
Modify a CIM instance in the local repository.
Raises:
CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS
"""
if self._repo_lite:
raise CIMError(
CIM_ERR_NOT_SUPPORTED,
"ModifyInstance not supported when repo_lite set.")
# Validate namespace
instance_repo = self._get_instance_repo(namespace)
modified_instance = deepcopy(params['ModifiedInstance'])
property_list = params['PropertyList']
# Return if empty property list
if property_list is not None and not property_list:
return
if modified_instance is not None and not modified_instance:
return
if not isinstance(modified_instance, CIMInstance):
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("The ModifiedInstance parameter is not a valid "
"CIMInstance. Rcvd type={0}", type(modified_instance)))
# Classnames in instance and path must match
if modified_instance.classname != modified_instance.path.classname:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("ModifyInstance classname in path and instance do "
"not match. classname={0!A}, path.classname={1!A}",
modified_instance.classname,
modified_instance.path.classname))
# Get class including properties from superclasses from repo
try:
target_class = self.GetClass(modified_instance.classname,
namespace=namespace,
LocalOnly=False,
IncludeQualifiers=True,
IncludeClassOrigin=True)
except CIMError as ce:
if ce.status_code == CIM_ERR_NOT_FOUND:
raise CIMError(
CIM_ERR_INVALID_CLASS,
_format("Cannot modify instance because its creation "
"class {0!A} does not exist in namespace {1!A}.",
modified_instance.classname, namespace))
raise
# get key properties and all class props
cl_props = [p.name for p in six.itervalues(target_class.properties)]
key_props = [p.name for p in six.itervalues(target_class.properties)
if 'key' in p.qualifiers]
# Get original instance in repo. Does not copy the orig instance.
mod_inst_path = modified_instance.path.copy()
if modified_instance.path.namespace is None:
mod_inst_path.namespace = namespace
orig_instance_tup = self._find_instance(mod_inst_path, instance_repo)
if orig_instance_tup[0] is None:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Original Instance {0!A} not found in namespace {1!A}",
modified_instance.path, namespace))
original_instance = orig_instance_tup[1]
# Remove duplicate properties from property_list
if property_list:
if len(property_list) != len(set(property_list)):
property_list = list(set(property_list))
# Test that all properties in modified instance and property list
# are in the class
if property_list:
for p in property_list:
if p not in cl_props:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} in PropertyList not in class "
"{1!A}", p, modified_instance.classname))
for p in modified_instance:
if p not in cl_props:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} in ModifiedInstance not in class "
"{1!A}", p, modified_instance.classname))
# Set the class value for properties in the property list but not
# in the modified_instance. This sets just the value component.
mod_inst_props = set(modified_instance.keys())
new_props = mod_inst_props.difference(set(cl_props))
if new_props:
for new_prop in new_props:
modified_instance[new_prop] = \
target_class.properties[new_prop].value
# Remove all properties that do not change value between original
# instance and modified instance
for p in list(modified_instance):
if original_instance[p] == modified_instance[p]:
del modified_instance[p]
# Confirm no key properties in remaining modified instance
for p in key_props:
if p in modified_instance:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("ModifyInstance cannot modify key property {0!A}",
p))
# Remove any properties from modified instance not in the property_list
if property_list:
for p in list(modified_instance):
if p not in property_list:
del modified_instance[p]
# Exception if property in instance but not class or types do not
# match
for pname in modified_instance:
if pname not in target_class.properties:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Property {0!A} specified in ModifiedInstance is "
"not exposed by class {1!A} in namespace {2!A}",
pname, target_class.classname, namespace))
cprop = target_class.properties[pname]
iprop = modified_instance.properties[pname]
if iprop.is_array != cprop.is_array \
or iprop.type != cprop.type \
or iprop.array_size != cprop.array_size:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Instance and class property name={0!A} type "
"or other attributes do not match: "
"instance={1!A}, class={2!A}",
pname, iprop, cprop))
# Modify the value of properties in the repo with those from
# modified instance
index = orig_instance_tup[0]
instance_repo[index].update(modified_instance.properties)
return | ['def', '_fake_modifyinstance', '(', 'self', ',', 'namespace', ',', '*', '*', 'params', ')', ':', 'if', 'self', '.', '_repo_lite', ':', 'raise', 'CIMError', '(', 'CIM_ERR_NOT_SUPPORTED', ',', '"ModifyInstance not supported when repo_lite set."', ')', '# Validate namespace', 'instance_repo', '=', 'self', '.', '_get_instance_repo', '(', 'namespace', ')', 'modified_instance', '=', 'deepcopy', '(', 'params', '[', "'ModifiedInstance'", ']', ')', 'property_list', '=', 'params', '[', "'PropertyList'", ']', '# Return if empty property list', 'if', 'property_list', 'is', 'not', 'None', 'and', 'not', 'property_list', ':', 'return', 'if', 'modified_instance', 'is', 'not', 'None', 'and', 'not', 'modified_instance', ':', 'return', 'if', 'not', 'isinstance', '(', 'modified_instance', ',', 'CIMInstance', ')', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"The ModifiedInstance parameter is not a valid "', '"CIMInstance. Rcvd type={0}"', ',', 'type', '(', 'modified_instance', ')', ')', ')', '# Classnames in instance and path must match', 'if', 'modified_instance', '.', 'classname', '!=', 'modified_instance', '.', 'path', '.', 'classname', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"ModifyInstance classname in path and instance do "', '"not match. classname={0!A}, path.classname={1!A}"', ',', 'modified_instance', '.', 'classname', ',', 'modified_instance', '.', 'path', '.', 'classname', ')', ')', '# Get class including properties from superclasses from repo', 'try', ':', 'target_class', '=', 'self', '.', 'GetClass', '(', 'modified_instance', '.', 'classname', ',', 'namespace', '=', 'namespace', ',', 'LocalOnly', '=', 'False', ',', 'IncludeQualifiers', '=', 'True', ',', 'IncludeClassOrigin', '=', 'True', ')', 'except', 'CIMError', 'as', 'ce', ':', 'if', 'ce', '.', 'status_code', '==', 'CIM_ERR_NOT_FOUND', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_CLASS', ',', '_format', '(', '"Cannot modify instance because its creation "', '"class {0!A} does not exist in namespace {1!A}."', ',', 'modified_instance', '.', 'classname', ',', 'namespace', ')', ')', 'raise', '# get key properties and all class props', 'cl_props', '=', '[', 'p', '.', 'name', 'for', 'p', 'in', 'six', '.', 'itervalues', '(', 'target_class', '.', 'properties', ')', ']', 'key_props', '=', '[', 'p', '.', 'name', 'for', 'p', 'in', 'six', '.', 'itervalues', '(', 'target_class', '.', 'properties', ')', 'if', "'key'", 'in', 'p', '.', 'qualifiers', ']', '# Get original instance in repo. Does not copy the orig instance.', 'mod_inst_path', '=', 'modified_instance', '.', 'path', '.', 'copy', '(', ')', 'if', 'modified_instance', '.', 'path', '.', 'namespace', 'is', 'None', ':', 'mod_inst_path', '.', 'namespace', '=', 'namespace', 'orig_instance_tup', '=', 'self', '.', '_find_instance', '(', 'mod_inst_path', ',', 'instance_repo', ')', 'if', 'orig_instance_tup', '[', '0', ']', 'is', 'None', ':', 'raise', 'CIMError', '(', 'CIM_ERR_NOT_FOUND', ',', '_format', '(', '"Original Instance {0!A} not found in namespace {1!A}"', ',', 'modified_instance', '.', 'path', ',', 'namespace', ')', ')', 'original_instance', '=', 'orig_instance_tup', '[', '1', ']', '# Remove duplicate properties from property_list', 'if', 'property_list', ':', 'if', 'len', '(', 'property_list', ')', '!=', 'len', '(', 'set', '(', 'property_list', ')', ')', ':', 'property_list', '=', 'list', '(', 'set', '(', 'property_list', ')', ')', '# Test that all properties in modified instance and property list', '# are in the class', 'if', 'property_list', ':', 'for', 'p', 'in', 'property_list', ':', 'if', 'p', 'not', 'in', 'cl_props', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Property {0!A} in PropertyList not in class "', '"{1!A}"', ',', 'p', ',', 'modified_instance', '.', 'classname', ')', ')', 'for', 'p', 'in', 'modified_instance', ':', 'if', 'p', 'not', 'in', 'cl_props', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Property {0!A} in ModifiedInstance not in class "', '"{1!A}"', ',', 'p', ',', 'modified_instance', '.', 'classname', ')', ')', '# Set the class value for properties in the property list but not', '# in the modified_instance. This sets just the value component.', 'mod_inst_props', '=', 'set', '(', 'modified_instance', '.', 'keys', '(', ')', ')', 'new_props', '=', 'mod_inst_props', '.', 'difference', '(', 'set', '(', 'cl_props', ')', ')', 'if', 'new_props', ':', 'for', 'new_prop', 'in', 'new_props', ':', 'modified_instance', '[', 'new_prop', ']', '=', 'target_class', '.', 'properties', '[', 'new_prop', ']', '.', 'value', '# Remove all properties that do not change value between original', '# instance and modified instance', 'for', 'p', 'in', 'list', '(', 'modified_instance', ')', ':', 'if', 'original_instance', '[', 'p', ']', '==', 'modified_instance', '[', 'p', ']', ':', 'del', 'modified_instance', '[', 'p', ']', '# Confirm no key properties in remaining modified instance', 'for', 'p', 'in', 'key_props', ':', 'if', 'p', 'in', 'modified_instance', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"ModifyInstance cannot modify key property {0!A}"', ',', 'p', ')', ')', '# Remove any properties from modified instance not in the property_list', 'if', 'property_list', ':', 'for', 'p', 'in', 'list', '(', 'modified_instance', ')', ':', 'if', 'p', 'not', 'in', 'property_list', ':', 'del', 'modified_instance', '[', 'p', ']', '# Exception if property in instance but not class or types do not', '# match', 'for', 'pname', 'in', 'modified_instance', ':', 'if', 'pname', 'not', 'in', 'target_class', '.', 'properties', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Property {0!A} specified in ModifiedInstance is "', '"not exposed by class {1!A} in namespace {2!A}"', ',', 'pname', ',', 'target_class', '.', 'classname', ',', 'namespace', ')', ')', 'cprop', '=', 'target_class', '.', 'properties', '[', 'pname', ']', 'iprop', '=', 'modified_instance', '.', 'properties', '[', 'pname', ']', 'if', 'iprop', '.', 'is_array', '!=', 'cprop', '.', 'is_array', 'or', 'iprop', '.', 'type', '!=', 'cprop', '.', 'type', 'or', 'iprop', '.', 'array_size', '!=', 'cprop', '.', 'array_size', ':', 'raise', 'CIMError', '(', 'CIM_ERR_INVALID_PARAMETER', ',', '_format', '(', '"Instance and class property name={0!A} type "', '"or other attributes do not match: "', '"instance={1!A}, class={2!A}"', ',', 'pname', ',', 'iprop', ',', 'cprop', ')', ')', '# Modify the value of properties in the repo with those from', '# modified instance', 'index', '=', 'orig_instance_tup', '[', '0', ']', 'instance_repo', '[', 'index', ']', '.', 'update', '(', 'modified_instance', '.', 'properties', ')', 'return'] | Implements a server responder for
:meth:`~pywbem.WBEMConnection.CreateInstance`
Modify a CIM instance in the local repository.
Raises:
CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS | ['Implements', 'a', 'server', 'responder', 'for', ':', 'meth', ':', '~pywbem', '.', 'WBEMConnection', '.', 'CreateInstance'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L2207-L2362 |
7,967 | MartinThoma/hwrt | hwrt/serve.py | _get_part | def _get_part(pointlist, strokes):
"""Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
"""
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result | python | def _get_part(pointlist, strokes):
"""Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
"""
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result | ['def', '_get_part', '(', 'pointlist', ',', 'strokes', ')', ':', 'result', '=', '[', ']', 'strokes', '=', 'sorted', '(', 'strokes', ')', 'for', 'stroke_index', 'in', 'strokes', ':', 'result', '.', 'append', '(', 'pointlist', '[', 'stroke_index', ']', ')', 'return', 'result'] | Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts | ['Get', 'some', 'strokes', 'of', 'pointlist'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/serve.py#L164-L180 |
7,968 | mikedh/trimesh | trimesh/transformations.py | quaternion_imag | def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([0., 1., 2.])
"""
return np.array(quaternion[1:4], dtype=np.float64, copy=True) | python | def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([0., 1., 2.])
"""
return np.array(quaternion[1:4], dtype=np.float64, copy=True) | ['def', 'quaternion_imag', '(', 'quaternion', ')', ':', 'return', 'np', '.', 'array', '(', 'quaternion', '[', '1', ':', '4', ']', ',', 'dtype', '=', 'np', '.', 'float64', ',', 'copy', '=', 'True', ')'] | Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([0., 1., 2.]) | ['Return', 'imaginary', 'part', 'of', 'quaternion', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/transformations.py#L1458-L1465 |
7,969 | bcbio/bcbio-nextgen | bcbio/workflow/template.py | _retrieve_remote | def _retrieve_remote(fnames):
"""Retrieve remote inputs found in the same bucket as the template or metadata files.
"""
for fname in fnames:
if objectstore.is_remote(fname):
inputs = []
regions = []
remote_base = os.path.dirname(fname)
for rfname in objectstore.list(remote_base):
if rfname.endswith(tuple(KNOWN_EXTS.keys())):
inputs.append(rfname)
elif rfname.endswith((".bed", ".bed.gz")):
regions.append(rfname)
return {"base": remote_base,
"inputs": inputs,
"region": regions[0] if len(regions) == 1 else None}
return {} | python | def _retrieve_remote(fnames):
"""Retrieve remote inputs found in the same bucket as the template or metadata files.
"""
for fname in fnames:
if objectstore.is_remote(fname):
inputs = []
regions = []
remote_base = os.path.dirname(fname)
for rfname in objectstore.list(remote_base):
if rfname.endswith(tuple(KNOWN_EXTS.keys())):
inputs.append(rfname)
elif rfname.endswith((".bed", ".bed.gz")):
regions.append(rfname)
return {"base": remote_base,
"inputs": inputs,
"region": regions[0] if len(regions) == 1 else None}
return {} | ['def', '_retrieve_remote', '(', 'fnames', ')', ':', 'for', 'fname', 'in', 'fnames', ':', 'if', 'objectstore', '.', 'is_remote', '(', 'fname', ')', ':', 'inputs', '=', '[', ']', 'regions', '=', '[', ']', 'remote_base', '=', 'os', '.', 'path', '.', 'dirname', '(', 'fname', ')', 'for', 'rfname', 'in', 'objectstore', '.', 'list', '(', 'remote_base', ')', ':', 'if', 'rfname', '.', 'endswith', '(', 'tuple', '(', 'KNOWN_EXTS', '.', 'keys', '(', ')', ')', ')', ':', 'inputs', '.', 'append', '(', 'rfname', ')', 'elif', 'rfname', '.', 'endswith', '(', '(', '".bed"', ',', '".bed.gz"', ')', ')', ':', 'regions', '.', 'append', '(', 'rfname', ')', 'return', '{', '"base"', ':', 'remote_base', ',', '"inputs"', ':', 'inputs', ',', '"region"', ':', 'regions', '[', '0', ']', 'if', 'len', '(', 'regions', ')', '==', '1', 'else', 'None', '}', 'return', '{', '}'] | Retrieve remote inputs found in the same bucket as the template or metadata files. | ['Retrieve', 'remote', 'inputs', 'found', 'in', 'the', 'same', 'bucket', 'as', 'the', 'template', 'or', 'metadata', 'files', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L477-L493 |
7,970 | molmod/molmod | molmod/transformations.py | Translation.inv | def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result | python | def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result | ['def', 'inv', '(', 'self', ')', ':', 'result', '=', 'Translation', '(', '-', 'self', '.', 't', ')', 'result', '.', '_cache_inv', '=', 'self', 'return', 'result'] | The inverse translation | ['The', 'inverse', 'translation'] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/transformations.py#L94-L98 |
7,971 | RudolfCardinal/pythonlib | cardinal_pythonlib/dogpile_cache.py | fkg_allowing_type_hints | def fkg_allowing_type_hints(
namespace: Optional[str],
fn: Callable,
to_str: Callable[[Any], str] = repr) -> Callable[[Any], str]:
"""
Replacement for :func:`dogpile.cache.util.function_key_generator` that
handles type-hinted functions like
.. code-block:: python
def testfunc(param: str) -> str:
return param + "hello"
... at which :func:`inspect.getargspec` balks; plus
:func:`inspect.getargspec` is deprecated in Python 3.
Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``.
Also modified to make the cached function unique per INSTANCE for normal
methods of a class.
Args:
namespace: optional namespace, as per :func:`get_namespace`
fn: function to generate a key for (usually the function being
decorated)
to_str: function to apply to map arguments to a string (to make a
unique key for a particular call to the function); by default it
is :func:`repr`
Returns:
a function that generates a string key, based on a given function as
well as arguments to the returned function itself.
"""
namespace = get_namespace(fn, namespace)
sig = inspect.signature(fn)
argnames = [p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
has_self = bool(argnames and argnames[0] in ('self', 'cls'))
def generate_key(*args: Any, **kw: Any) -> str:
"""
Makes the actual key for a specific call to the decorated function,
with particular ``args``/``kwargs``.
"""
if kw:
raise ValueError("This dogpile.cache key function generator, "
"fkg_allowing_type_hints, "
"does not accept keyword arguments.")
if has_self:
# Unlike dogpile's default, make it instance- (or class-) specific
# by including a representation of the "self" or "cls" argument:
args = [hex(id(args[0]))] + list(args[1:])
key = namespace + "|" + " ".join(map(to_str, args))
if DEBUG_INTERNALS:
log.debug("fkg_allowing_type_hints.generate_key() -> {!r}", key)
return key
return generate_key | python | def fkg_allowing_type_hints(
namespace: Optional[str],
fn: Callable,
to_str: Callable[[Any], str] = repr) -> Callable[[Any], str]:
"""
Replacement for :func:`dogpile.cache.util.function_key_generator` that
handles type-hinted functions like
.. code-block:: python
def testfunc(param: str) -> str:
return param + "hello"
... at which :func:`inspect.getargspec` balks; plus
:func:`inspect.getargspec` is deprecated in Python 3.
Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``.
Also modified to make the cached function unique per INSTANCE for normal
methods of a class.
Args:
namespace: optional namespace, as per :func:`get_namespace`
fn: function to generate a key for (usually the function being
decorated)
to_str: function to apply to map arguments to a string (to make a
unique key for a particular call to the function); by default it
is :func:`repr`
Returns:
a function that generates a string key, based on a given function as
well as arguments to the returned function itself.
"""
namespace = get_namespace(fn, namespace)
sig = inspect.signature(fn)
argnames = [p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
has_self = bool(argnames and argnames[0] in ('self', 'cls'))
def generate_key(*args: Any, **kw: Any) -> str:
"""
Makes the actual key for a specific call to the decorated function,
with particular ``args``/``kwargs``.
"""
if kw:
raise ValueError("This dogpile.cache key function generator, "
"fkg_allowing_type_hints, "
"does not accept keyword arguments.")
if has_self:
# Unlike dogpile's default, make it instance- (or class-) specific
# by including a representation of the "self" or "cls" argument:
args = [hex(id(args[0]))] + list(args[1:])
key = namespace + "|" + " ".join(map(to_str, args))
if DEBUG_INTERNALS:
log.debug("fkg_allowing_type_hints.generate_key() -> {!r}", key)
return key
return generate_key | ['def', 'fkg_allowing_type_hints', '(', 'namespace', ':', 'Optional', '[', 'str', ']', ',', 'fn', ':', 'Callable', ',', 'to_str', ':', 'Callable', '[', '[', 'Any', ']', ',', 'str', ']', '=', 'repr', ')', '->', 'Callable', '[', '[', 'Any', ']', ',', 'str', ']', ':', 'namespace', '=', 'get_namespace', '(', 'fn', ',', 'namespace', ')', 'sig', '=', 'inspect', '.', 'signature', '(', 'fn', ')', 'argnames', '=', '[', 'p', '.', 'name', 'for', 'p', 'in', 'sig', '.', 'parameters', '.', 'values', '(', ')', 'if', 'p', '.', 'kind', '==', 'inspect', '.', 'Parameter', '.', 'POSITIONAL_OR_KEYWORD', ']', 'has_self', '=', 'bool', '(', 'argnames', 'and', 'argnames', '[', '0', ']', 'in', '(', "'self'", ',', "'cls'", ')', ')', 'def', 'generate_key', '(', '*', 'args', ':', 'Any', ',', '*', '*', 'kw', ':', 'Any', ')', '->', 'str', ':', '"""\n Makes the actual key for a specific call to the decorated function,\n with particular ``args``/``kwargs``.\n """', 'if', 'kw', ':', 'raise', 'ValueError', '(', '"This dogpile.cache key function generator, "', '"fkg_allowing_type_hints, "', '"does not accept keyword arguments."', ')', 'if', 'has_self', ':', "# Unlike dogpile's default, make it instance- (or class-) specific", '# by including a representation of the "self" or "cls" argument:', 'args', '=', '[', 'hex', '(', 'id', '(', 'args', '[', '0', ']', ')', ')', ']', '+', 'list', '(', 'args', '[', '1', ':', ']', ')', 'key', '=', 'namespace', '+', '"|"', '+', '" "', '.', 'join', '(', 'map', '(', 'to_str', ',', 'args', ')', ')', 'if', 'DEBUG_INTERNALS', ':', 'log', '.', 'debug', '(', '"fkg_allowing_type_hints.generate_key() -> {!r}"', ',', 'key', ')', 'return', 'key', 'return', 'generate_key'] | Replacement for :func:`dogpile.cache.util.function_key_generator` that
handles type-hinted functions like
.. code-block:: python
def testfunc(param: str) -> str:
return param + "hello"
... at which :func:`inspect.getargspec` balks; plus
:func:`inspect.getargspec` is deprecated in Python 3.
Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``.
Also modified to make the cached function unique per INSTANCE for normal
methods of a class.
Args:
namespace: optional namespace, as per :func:`get_namespace`
fn: function to generate a key for (usually the function being
decorated)
to_str: function to apply to map arguments to a string (to make a
unique key for a particular call to the function); by default it
is :func:`repr`
Returns:
a function that generates a string key, based on a given function as
well as arguments to the returned function itself. | ['Replacement', 'for', ':', 'func', ':', 'dogpile', '.', 'cache', '.', 'util', '.', 'function_key_generator', 'that', 'handles', 'type', '-', 'hinted', 'functions', 'like'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/dogpile_cache.py#L151-L210 |
7,972 | OLC-Bioinformatics/sipprverse | pointsippr/pointsippr.py | PointSippr.runner | def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Initialise the GenObject
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
try:
sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus]
except KeyError:
sample[self.analysistype].pointfindergenus = 'ND'
# Run the raw read mapping
PointSipping(inputobject=self,
cutoff=self.cutoff)
# Create FASTA files from the raw read matcves
self.fasta()
# Run PointFinder on the FASTA files
self.run_pointfinder()
# Create summary reports of the PointFinder outputs
self.parse_pointfinder() | python | def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Initialise the GenObject
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
try:
sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus]
except KeyError:
sample[self.analysistype].pointfindergenus = 'ND'
# Run the raw read mapping
PointSipping(inputobject=self,
cutoff=self.cutoff)
# Create FASTA files from the raw read matcves
self.fasta()
# Run PointFinder on the FASTA files
self.run_pointfinder()
# Create summary reports of the PointFinder outputs
self.parse_pointfinder() | ['def', 'runner', '(', 'self', ')', ':', 'logging', '.', 'info', '(', "'Starting {} analysis pipeline'", '.', 'format', '(', 'self', '.', 'analysistype', ')', ')', '# Initialise the GenObject', 'for', 'sample', 'in', 'self', '.', 'runmetadata', '.', 'samples', ':', 'setattr', '(', 'sample', ',', 'self', '.', 'analysistype', ',', 'GenObject', '(', ')', ')', 'try', ':', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfindergenus', '=', 'self', '.', 'pointfinder_org_dict', '[', 'sample', '.', 'general', '.', 'referencegenus', ']', 'except', 'KeyError', ':', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfindergenus', '=', "'ND'", '# Run the raw read mapping', 'PointSipping', '(', 'inputobject', '=', 'self', ',', 'cutoff', '=', 'self', '.', 'cutoff', ')', '# Create FASTA files from the raw read matcves', 'self', '.', 'fasta', '(', ')', '# Run PointFinder on the FASTA files', 'self', '.', 'run_pointfinder', '(', ')', '# Create summary reports of the PointFinder outputs', 'self', '.', 'parse_pointfinder', '(', ')'] | Run the necessary methods in the correct order | ['Run', 'the', 'necessary', 'methods', 'in', 'the', 'correct', 'order'] | train | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointsippr/pointsippr.py#L19-L39 |
7,973 | cyberdelia/astrolabe | astrolabe/interval.py | Interval.stop | def stop(self):
"""Mark the stop of the interval.
Calling stop on an already stopped interval has no effect.
An interval can only be stopped once.
:returns: the duration if the interval is truely stopped otherwise ``False``.
"""
if self._start_instant is None:
raise IntervalException("Attempt to stop an interval that has not started.")
if self._stop_instant is None:
self._stop_instant = instant()
self._duration = int((self._stop_instant - self._start_instant) * 1000)
return self._duration
return False | python | def stop(self):
"""Mark the stop of the interval.
Calling stop on an already stopped interval has no effect.
An interval can only be stopped once.
:returns: the duration if the interval is truely stopped otherwise ``False``.
"""
if self._start_instant is None:
raise IntervalException("Attempt to stop an interval that has not started.")
if self._stop_instant is None:
self._stop_instant = instant()
self._duration = int((self._stop_instant - self._start_instant) * 1000)
return self._duration
return False | ['def', 'stop', '(', 'self', ')', ':', 'if', 'self', '.', '_start_instant', 'is', 'None', ':', 'raise', 'IntervalException', '(', '"Attempt to stop an interval that has not started."', ')', 'if', 'self', '.', '_stop_instant', 'is', 'None', ':', 'self', '.', '_stop_instant', '=', 'instant', '(', ')', 'self', '.', '_duration', '=', 'int', '(', '(', 'self', '.', '_stop_instant', '-', 'self', '.', '_start_instant', ')', '*', '1000', ')', 'return', 'self', '.', '_duration', 'return', 'False'] | Mark the stop of the interval.
Calling stop on an already stopped interval has no effect.
An interval can only be stopped once.
:returns: the duration if the interval is truely stopped otherwise ``False``. | ['Mark', 'the', 'stop', 'of', 'the', 'interval', '.'] | train | https://github.com/cyberdelia/astrolabe/blob/c8496d330fd6fd6c7bb8f9912b684519ccb5c84e/astrolabe/interval.py#L68-L82 |
7,974 | dnanexus/dx-toolkit | doc/examples/dx-apps/report_example/src/report_example.py | main | def main(**kwargs):
"""
Draw a couple of simple graphs and optionally generate an HTML file to upload them
"""
draw_lines()
draw_histogram()
draw_bar_chart()
destination = "-r /report"
if use_html:
generate_html()
command = "dx-build-report-html {h} {d}".format(h=html_filename, d=destination)
else:
command = "dx-build-report-html {l} {b} {h} {d}".format(l=lines_filename, b=bars_filename, h=histogram_filename, d=destination)
sub_output = json.loads(subprocess.check_output(command, shell=True))
output = {}
output["report"] = dxpy.dxlink(sub_output["recordId"])
return output | python | def main(**kwargs):
"""
Draw a couple of simple graphs and optionally generate an HTML file to upload them
"""
draw_lines()
draw_histogram()
draw_bar_chart()
destination = "-r /report"
if use_html:
generate_html()
command = "dx-build-report-html {h} {d}".format(h=html_filename, d=destination)
else:
command = "dx-build-report-html {l} {b} {h} {d}".format(l=lines_filename, b=bars_filename, h=histogram_filename, d=destination)
sub_output = json.loads(subprocess.check_output(command, shell=True))
output = {}
output["report"] = dxpy.dxlink(sub_output["recordId"])
return output | ['def', 'main', '(', '*', '*', 'kwargs', ')', ':', 'draw_lines', '(', ')', 'draw_histogram', '(', ')', 'draw_bar_chart', '(', ')', 'destination', '=', '"-r /report"', 'if', 'use_html', ':', 'generate_html', '(', ')', 'command', '=', '"dx-build-report-html {h} {d}"', '.', 'format', '(', 'h', '=', 'html_filename', ',', 'd', '=', 'destination', ')', 'else', ':', 'command', '=', '"dx-build-report-html {l} {b} {h} {d}"', '.', 'format', '(', 'l', '=', 'lines_filename', ',', 'b', '=', 'bars_filename', ',', 'h', '=', 'histogram_filename', ',', 'd', '=', 'destination', ')', 'sub_output', '=', 'json', '.', 'loads', '(', 'subprocess', '.', 'check_output', '(', 'command', ',', 'shell', '=', 'True', ')', ')', 'output', '=', '{', '}', 'output', '[', '"report"', ']', '=', 'dxpy', '.', 'dxlink', '(', 'sub_output', '[', '"recordId"', ']', ')', 'return', 'output'] | Draw a couple of simple graphs and optionally generate an HTML file to upload them | ['Draw', 'a', 'couple', 'of', 'simple', 'graphs', 'and', 'optionally', 'generate', 'an', 'HTML', 'file', 'to', 'upload', 'them'] | train | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/doc/examples/dx-apps/report_example/src/report_example.py#L37-L53 |
7,975 | anuragkumarak95/wordnet | wordnet/word_net.py | generate_net | def generate_net(df,tf_idf,dump_path=None):
'''Generate WordNetwork dict of Word() instance, and dump as a file if asked to.
@Args:
--
df : IDF value generated by find_tf_idf()
tf_idf : TF-IDF value generated by find_tf_idf()
dump_path : file_path where to dump network entities, standart format is '.wrnt' (default=None)
@returns:
--
word_net : list if Word() instances.(creating a network of words)
'''
# error handling
if dump_path and dump_path[-4:] != __WRNT_FORMAT: raise Exception(__WRNG_FORMAT_MSG)
start_t = datetime.now()
print(TAG,'Network Genertion initiated..')
word_net = {} # list of word entities.
#registering all word instances in a dict of network
for word in df.keys():
word_net[word] = Word(word)
print(TAG,'word-network instances created..',datetime.now()-start_t)
start_t = datetime.now()
#TODO: code for going through all the tf_idf elements and finding backward links and forward links of every word in word_net.
for docs in tf_idf:
for word in docs.keys():
word_net[word].addtofrwrd_links(set(docs.keys()))
print(TAG, 'word filled with their relative words(network generated)... ',datetime.now()-start_t)
# Dump the generated lists if dump_path is given.
if dump_path:
start_t = datetime.now()
__words = {}
__network = []
i=0
# creating word dict for refrence in next stage.
for word in word_net:
__words[word] = i
i+=1
# creating final network list to be dumped. format=['word',1,2,3,4...(refrences from words dict)]
for word in word_net:
__temp_list = [word]
__temp_list.extend([__words[w] for w in word_net[word].frwrd_links])
__network.append(__temp_list)
del __temp_list
print(TAG, 'created final relative-words list.. return ready.',datetime.now()-start_t)
start_t = datetime.now()
# Dumping data using pickle
dump_file = open(dump_path,'wb')
pickle.dump(__network,dump_file,protocol=pickle.HIGHEST_PROTOCOL)
dump_file.close()
print(TAG,'word network dumped @',dump_path,datetime.now()-start_t)
#cleaning afterwards
del __words
del __network
return word_net | python | def generate_net(df,tf_idf,dump_path=None):
'''Generate WordNetwork dict of Word() instance, and dump as a file if asked to.
@Args:
--
df : IDF value generated by find_tf_idf()
tf_idf : TF-IDF value generated by find_tf_idf()
dump_path : file_path where to dump network entities, standart format is '.wrnt' (default=None)
@returns:
--
word_net : list if Word() instances.(creating a network of words)
'''
# error handling
if dump_path and dump_path[-4:] != __WRNT_FORMAT: raise Exception(__WRNG_FORMAT_MSG)
start_t = datetime.now()
print(TAG,'Network Genertion initiated..')
word_net = {} # list of word entities.
#registering all word instances in a dict of network
for word in df.keys():
word_net[word] = Word(word)
print(TAG,'word-network instances created..',datetime.now()-start_t)
start_t = datetime.now()
#TODO: code for going through all the tf_idf elements and finding backward links and forward links of every word in word_net.
for docs in tf_idf:
for word in docs.keys():
word_net[word].addtofrwrd_links(set(docs.keys()))
print(TAG, 'word filled with their relative words(network generated)... ',datetime.now()-start_t)
# Dump the generated lists if dump_path is given.
if dump_path:
start_t = datetime.now()
__words = {}
__network = []
i=0
# creating word dict for refrence in next stage.
for word in word_net:
__words[word] = i
i+=1
# creating final network list to be dumped. format=['word',1,2,3,4...(refrences from words dict)]
for word in word_net:
__temp_list = [word]
__temp_list.extend([__words[w] for w in word_net[word].frwrd_links])
__network.append(__temp_list)
del __temp_list
print(TAG, 'created final relative-words list.. return ready.',datetime.now()-start_t)
start_t = datetime.now()
# Dumping data using pickle
dump_file = open(dump_path,'wb')
pickle.dump(__network,dump_file,protocol=pickle.HIGHEST_PROTOCOL)
dump_file.close()
print(TAG,'word network dumped @',dump_path,datetime.now()-start_t)
#cleaning afterwards
del __words
del __network
return word_net | ['def', 'generate_net', '(', 'df', ',', 'tf_idf', ',', 'dump_path', '=', 'None', ')', ':', '# error handling', 'if', 'dump_path', 'and', 'dump_path', '[', '-', '4', ':', ']', '!=', '__WRNT_FORMAT', ':', 'raise', 'Exception', '(', '__WRNG_FORMAT_MSG', ')', 'start_t', '=', 'datetime', '.', 'now', '(', ')', 'print', '(', 'TAG', ',', "'Network Genertion initiated..'", ')', 'word_net', '=', '{', '}', '# list of word entities.', '#registering all word instances in a dict of network', 'for', 'word', 'in', 'df', '.', 'keys', '(', ')', ':', 'word_net', '[', 'word', ']', '=', 'Word', '(', 'word', ')', 'print', '(', 'TAG', ',', "'word-network instances created..'", ',', 'datetime', '.', 'now', '(', ')', '-', 'start_t', ')', 'start_t', '=', 'datetime', '.', 'now', '(', ')', '#TODO: code for going through all the tf_idf elements and finding backward links and forward links of every word in word_net.', 'for', 'docs', 'in', 'tf_idf', ':', 'for', 'word', 'in', 'docs', '.', 'keys', '(', ')', ':', 'word_net', '[', 'word', ']', '.', 'addtofrwrd_links', '(', 'set', '(', 'docs', '.', 'keys', '(', ')', ')', ')', 'print', '(', 'TAG', ',', "'word filled with their relative words(network generated)... '", ',', 'datetime', '.', 'now', '(', ')', '-', 'start_t', ')', '# Dump the generated lists if dump_path is given.', 'if', 'dump_path', ':', 'start_t', '=', 'datetime', '.', 'now', '(', ')', '__words', '=', '{', '}', '__network', '=', '[', ']', 'i', '=', '0', '# creating word dict for refrence in next stage.', 'for', 'word', 'in', 'word_net', ':', '__words', '[', 'word', ']', '=', 'i', 'i', '+=', '1', "# creating final network list to be dumped. format=['word',1,2,3,4...(refrences from words dict)]", 'for', 'word', 'in', 'word_net', ':', '__temp_list', '=', '[', 'word', ']', '__temp_list', '.', 'extend', '(', '[', '__words', '[', 'w', ']', 'for', 'w', 'in', 'word_net', '[', 'word', ']', '.', 'frwrd_links', ']', ')', '__network', '.', 'append', '(', '__temp_list', ')', 'del', '__temp_list', 'print', '(', 'TAG', ',', "'created final relative-words list.. return ready.'", ',', 'datetime', '.', 'now', '(', ')', '-', 'start_t', ')', 'start_t', '=', 'datetime', '.', 'now', '(', ')', '# Dumping data using pickle', 'dump_file', '=', 'open', '(', 'dump_path', ',', "'wb'", ')', 'pickle', '.', 'dump', '(', '__network', ',', 'dump_file', ',', 'protocol', '=', 'pickle', '.', 'HIGHEST_PROTOCOL', ')', 'dump_file', '.', 'close', '(', ')', 'print', '(', 'TAG', ',', "'word network dumped @'", ',', 'dump_path', ',', 'datetime', '.', 'now', '(', ')', '-', 'start_t', ')', '#cleaning afterwards', 'del', '__words', 'del', '__network', 'return', 'word_net'] | Generate WordNetwork dict of Word() instance, and dump as a file if asked to.
@Args:
--
df : IDF value generated by find_tf_idf()
tf_idf : TF-IDF value generated by find_tf_idf()
dump_path : file_path where to dump network entities, standart format is '.wrnt' (default=None)
@returns:
--
word_net : list if Word() instances.(creating a network of words) | ['Generate', 'WordNetwork', 'dict', 'of', 'Word', '()', 'instance', 'and', 'dump', 'as', 'a', 'file', 'if', 'asked', 'to', '.'] | train | https://github.com/anuragkumarak95/wordnet/blob/7aba239ddebb0971e9e76124890373b60a2573c8/wordnet/word_net.py#L14-L73 |
7,976 | mapnik/Cascadenik | cascadenik/parse.py | parse_rule | def parse_rule(tokens, variables, neighbors, parents, is_merc):
""" Parse a rule set, return a list of declarations.
Requires a dictionary of declared variables. Selectors in the neighbors
list are simply grouped, and are generated from comma-delimited lists
of selectors in the stylesheet. Selectors in the parents list should
be combined with those found by this functions, and are generated
from nested, Less-style rulesets.
A rule set is a combination of selectors and declarations:
http://www.w3.org/TR/CSS2/syndata.html#rule-sets
Nesting is described in the Less CSS spec:
http://lesscss.org/#-nested-rules
To handle groups of selectors, use recursion:
http://www.w3.org/TR/CSS2/selector.html#grouping
"""
#
# Local helper function
#
def validate_selector_elements(elements, line, col):
if len(elements) > 2:
raise ParseException('Only two-element selectors are supported for Mapnik styles', line, col)
if len(elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
if elements[0].names[0] not in ('Map', 'Layer') and elements[0].names[0][0] not in ('.', '#', '*'):
raise ParseException('All non-ID, non-class first elements must be "Layer" Mapnik styles', line, col)
if set([name[:1] for name in elements[0].names[1:]]) - set('#.'):
raise ParseException('All names after the first must be IDs or classes', line, col)
if len(elements) == 2 and elements[1].countTests():
raise ParseException('Only the first element in a selector may have attributes in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countIDs():
raise ParseException('Only the first element in a selector may have an ID in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countClasses():
raise ParseException('Only the first element in a selector may have a class in Mapnik styles', line, col)
def parse_variable_definition(tokens):
""" Look for variable value tokens after an @keyword, return an array.
"""
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) == ('CHAR', ':'):
vtokens = []
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) in (('CHAR', ';'), ('S', '\n')):
return vtokens
elif tname not in ('S', 'COMMENT'):
vtokens.append((tname, tvalue, line, col))
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in variable definition: "%s"' % tvalue, line, col)
#
# The work.
#
ElementClass = SelectorElement
element = None
elements = []
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'ATKEYWORD':
#
# Likely variable definition:
# http://lesscss.org/#-variables
#
variables[tvalue] = parse_variable_definition(tokens)
elif (tname, tvalue) == ('CHAR', '&'):
#
# Start of a nested block with a "&" combinator
# http://lesscss.org/#-nested-rules
#
ElementClass = ConcatenatedElement
elif tname == 'S':
#
# Definitely no longer in a "&" combinator.
#
ElementClass = SelectorElement
elif tname == 'IDENT':
#
# Identifier always starts a new element.
#
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif tname == 'HASH':
#
# Hash is an ID selector:
# http://www.w3.org/TR/CSS2/selector.html#id-selectors
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '.'):
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'IDENT':
#
# Identifier after a period is a class selector:
# http://www.w3.org/TR/CSS2/selector.html#class-html
#
if not element:
element = ElementClass()
elements.append(element)
element.addName('.'+tvalue)
break
else:
raise ParseException('Malformed class selector', line, col)
elif (tname, tvalue) == ('CHAR', '*'):
#
# Asterisk character is a universal selector:
# http://www.w3.org/TR/CSS2/selector.html#universal-selector
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '['):
#
# Left-bracket is the start of an attribute selector:
# http://www.w3.org/TR/CSS2/selector.html#attribute-selectors
#
if not element:
element = ElementClass()
elements.append(element)
test = parse_attribute(tokens, is_merc)
element.addTest(test)
elif (tname, tvalue) == ('CHAR', ','):
#
# Comma delineates one of a group of selectors:
# http://www.w3.org/TR/CSS2/selector.html#grouping
#
# Recurse here.
#
neighbors.append(Selector(*elements))
return parse_rule(tokens, variables, neighbors, parents, is_merc)
elif (tname, tvalue) == ('CHAR', '{'):
#
# Left-brace is the start of a block:
# http://www.w3.org/TR/CSS2/syndata.html#block
#
# Return a full block here.
#
class DummySelector:
def __init__(self, *elements):
self.elements = elements[:]
neighbors.append(DummySelector(*elements))
selectors = []
#
# Combine lists of parents and neighbors into a single list of
# selectors, for passing off to parse_block(). There might not
# be any parents, but there will definitely be neighbors.
#
for parent in (parents or [DummySelector()]):
for neighbor in neighbors:
if len(neighbor.elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
elements = chain(parent.elements + neighbor.elements)
selector = Selector(deepcopy(elements.next()))
for element in elements:
if element.__class__ is ConcatenatedElement:
for name in element.names: selector.elements[-1].addName(deepcopy(name))
for test in element.tests: selector.elements[-1].addTest(deepcopy(test))
else:
selector.addElement(deepcopy(element))
# selector should be fully valid at this point.
validate_selector_elements(selector.elements, line, col)
selector.convertZoomTests(is_merc)
selectors.append(selector)
return parse_block(tokens, variables, selectors, is_merc)
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in selector: "%s"' % tvalue, line, col) | python | def parse_rule(tokens, variables, neighbors, parents, is_merc):
""" Parse a rule set, return a list of declarations.
Requires a dictionary of declared variables. Selectors in the neighbors
list are simply grouped, and are generated from comma-delimited lists
of selectors in the stylesheet. Selectors in the parents list should
be combined with those found by this functions, and are generated
from nested, Less-style rulesets.
A rule set is a combination of selectors and declarations:
http://www.w3.org/TR/CSS2/syndata.html#rule-sets
Nesting is described in the Less CSS spec:
http://lesscss.org/#-nested-rules
To handle groups of selectors, use recursion:
http://www.w3.org/TR/CSS2/selector.html#grouping
"""
#
# Local helper function
#
def validate_selector_elements(elements, line, col):
if len(elements) > 2:
raise ParseException('Only two-element selectors are supported for Mapnik styles', line, col)
if len(elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
if elements[0].names[0] not in ('Map', 'Layer') and elements[0].names[0][0] not in ('.', '#', '*'):
raise ParseException('All non-ID, non-class first elements must be "Layer" Mapnik styles', line, col)
if set([name[:1] for name in elements[0].names[1:]]) - set('#.'):
raise ParseException('All names after the first must be IDs or classes', line, col)
if len(elements) == 2 and elements[1].countTests():
raise ParseException('Only the first element in a selector may have attributes in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countIDs():
raise ParseException('Only the first element in a selector may have an ID in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countClasses():
raise ParseException('Only the first element in a selector may have a class in Mapnik styles', line, col)
def parse_variable_definition(tokens):
""" Look for variable value tokens after an @keyword, return an array.
"""
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) == ('CHAR', ':'):
vtokens = []
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) in (('CHAR', ';'), ('S', '\n')):
return vtokens
elif tname not in ('S', 'COMMENT'):
vtokens.append((tname, tvalue, line, col))
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in variable definition: "%s"' % tvalue, line, col)
#
# The work.
#
ElementClass = SelectorElement
element = None
elements = []
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'ATKEYWORD':
#
# Likely variable definition:
# http://lesscss.org/#-variables
#
variables[tvalue] = parse_variable_definition(tokens)
elif (tname, tvalue) == ('CHAR', '&'):
#
# Start of a nested block with a "&" combinator
# http://lesscss.org/#-nested-rules
#
ElementClass = ConcatenatedElement
elif tname == 'S':
#
# Definitely no longer in a "&" combinator.
#
ElementClass = SelectorElement
elif tname == 'IDENT':
#
# Identifier always starts a new element.
#
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif tname == 'HASH':
#
# Hash is an ID selector:
# http://www.w3.org/TR/CSS2/selector.html#id-selectors
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '.'):
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'IDENT':
#
# Identifier after a period is a class selector:
# http://www.w3.org/TR/CSS2/selector.html#class-html
#
if not element:
element = ElementClass()
elements.append(element)
element.addName('.'+tvalue)
break
else:
raise ParseException('Malformed class selector', line, col)
elif (tname, tvalue) == ('CHAR', '*'):
#
# Asterisk character is a universal selector:
# http://www.w3.org/TR/CSS2/selector.html#universal-selector
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '['):
#
# Left-bracket is the start of an attribute selector:
# http://www.w3.org/TR/CSS2/selector.html#attribute-selectors
#
if not element:
element = ElementClass()
elements.append(element)
test = parse_attribute(tokens, is_merc)
element.addTest(test)
elif (tname, tvalue) == ('CHAR', ','):
#
# Comma delineates one of a group of selectors:
# http://www.w3.org/TR/CSS2/selector.html#grouping
#
# Recurse here.
#
neighbors.append(Selector(*elements))
return parse_rule(tokens, variables, neighbors, parents, is_merc)
elif (tname, tvalue) == ('CHAR', '{'):
#
# Left-brace is the start of a block:
# http://www.w3.org/TR/CSS2/syndata.html#block
#
# Return a full block here.
#
class DummySelector:
def __init__(self, *elements):
self.elements = elements[:]
neighbors.append(DummySelector(*elements))
selectors = []
#
# Combine lists of parents and neighbors into a single list of
# selectors, for passing off to parse_block(). There might not
# be any parents, but there will definitely be neighbors.
#
for parent in (parents or [DummySelector()]):
for neighbor in neighbors:
if len(neighbor.elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
elements = chain(parent.elements + neighbor.elements)
selector = Selector(deepcopy(elements.next()))
for element in elements:
if element.__class__ is ConcatenatedElement:
for name in element.names: selector.elements[-1].addName(deepcopy(name))
for test in element.tests: selector.elements[-1].addTest(deepcopy(test))
else:
selector.addElement(deepcopy(element))
# selector should be fully valid at this point.
validate_selector_elements(selector.elements, line, col)
selector.convertZoomTests(is_merc)
selectors.append(selector)
return parse_block(tokens, variables, selectors, is_merc)
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in selector: "%s"' % tvalue, line, col) | ['def', 'parse_rule', '(', 'tokens', ',', 'variables', ',', 'neighbors', ',', 'parents', ',', 'is_merc', ')', ':', '#', '# Local helper function', '#', 'def', 'validate_selector_elements', '(', 'elements', ',', 'line', ',', 'col', ')', ':', 'if', 'len', '(', 'elements', ')', '>', '2', ':', 'raise', 'ParseException', '(', "'Only two-element selectors are supported for Mapnik styles'", ',', 'line', ',', 'col', ')', 'if', 'len', '(', 'elements', ')', '==', '0', ':', 'raise', 'ParseException', '(', "'At least one element must be present in selectors for Mapnik styles'", ',', 'line', ',', 'col', ')', 'if', 'elements', '[', '0', ']', '.', 'names', '[', '0', ']', 'not', 'in', '(', "'Map'", ',', "'Layer'", ')', 'and', 'elements', '[', '0', ']', '.', 'names', '[', '0', ']', '[', '0', ']', 'not', 'in', '(', "'.'", ',', "'#'", ',', "'*'", ')', ':', 'raise', 'ParseException', '(', '\'All non-ID, non-class first elements must be "Layer" Mapnik styles\'', ',', 'line', ',', 'col', ')', 'if', 'set', '(', '[', 'name', '[', ':', '1', ']', 'for', 'name', 'in', 'elements', '[', '0', ']', '.', 'names', '[', '1', ':', ']', ']', ')', '-', 'set', '(', "'#.'", ')', ':', 'raise', 'ParseException', '(', "'All names after the first must be IDs or classes'", ',', 'line', ',', 'col', ')', 'if', 'len', '(', 'elements', ')', '==', '2', 'and', 'elements', '[', '1', ']', '.', 'countTests', '(', ')', ':', 'raise', 'ParseException', '(', "'Only the first element in a selector may have attributes in Mapnik styles'", ',', 'line', ',', 'col', ')', 'if', 'len', '(', 'elements', ')', '==', '2', 'and', 'elements', '[', '1', ']', '.', 'countIDs', '(', ')', ':', 'raise', 'ParseException', '(', "'Only the first element in a selector may have an ID in Mapnik styles'", ',', 'line', ',', 'col', ')', 'if', 'len', '(', 'elements', ')', '==', '2', 'and', 'elements', '[', '1', ']', '.', 'countClasses', '(', ')', ':', 'raise', 'ParseException', '(', "'Only the first element in a selector may have a class in Mapnik styles'", ',', 'line', ',', 'col', ')', 'def', 'parse_variable_definition', '(', 'tokens', ')', ':', '""" Look for variable value tokens after an @keyword, return an array.\n """', 'while', 'True', ':', 'tname', ',', 'tvalue', ',', 'line', ',', 'col', '=', 'tokens', '.', 'next', '(', ')', 'if', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "':'", ')', ':', 'vtokens', '=', '[', ']', 'while', 'True', ':', 'tname', ',', 'tvalue', ',', 'line', ',', 'col', '=', 'tokens', '.', 'next', '(', ')', 'if', '(', 'tname', ',', 'tvalue', ')', 'in', '(', '(', "'CHAR'", ',', "';'", ')', ',', '(', "'S'", ',', "'\\n'", ')', ')', ':', 'return', 'vtokens', 'elif', 'tname', 'not', 'in', '(', "'S'", ',', "'COMMENT'", ')', ':', 'vtokens', '.', 'append', '(', '(', 'tname', ',', 'tvalue', ',', 'line', ',', 'col', ')', ')', 'elif', 'tname', 'not', 'in', '(', "'S'", ',', "'COMMENT'", ')', ':', 'raise', 'ParseException', '(', '\'Unexpected token in variable definition: "%s"\'', '%', 'tvalue', ',', 'line', ',', 'col', ')', '#', '# The work.', '#', 'ElementClass', '=', 'SelectorElement', 'element', '=', 'None', 'elements', '=', '[', ']', 'while', 'True', ':', 'tname', ',', 'tvalue', ',', 'line', ',', 'col', '=', 'tokens', '.', 'next', '(', ')', 'if', 'tname', '==', "'ATKEYWORD'", ':', '#', '# Likely variable definition:', '# http://lesscss.org/#-variables', '#', 'variables', '[', 'tvalue', ']', '=', 'parse_variable_definition', '(', 'tokens', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "'&'", ')', ':', '#', '# Start of a nested block with a "&" combinator', '# http://lesscss.org/#-nested-rules', '#', 'ElementClass', '=', 'ConcatenatedElement', 'elif', 'tname', '==', "'S'", ':', '#', '# Definitely no longer in a "&" combinator.', '#', 'ElementClass', '=', 'SelectorElement', 'elif', 'tname', '==', "'IDENT'", ':', '#', '# Identifier always starts a new element.', '#', 'element', '=', 'ElementClass', '(', ')', 'elements', '.', 'append', '(', 'element', ')', 'element', '.', 'addName', '(', 'tvalue', ')', 'elif', 'tname', '==', "'HASH'", ':', '#', '# Hash is an ID selector:', '# http://www.w3.org/TR/CSS2/selector.html#id-selectors', '#', 'if', 'not', 'element', ':', 'element', '=', 'ElementClass', '(', ')', 'elements', '.', 'append', '(', 'element', ')', 'element', '.', 'addName', '(', 'tvalue', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "'.'", ')', ':', 'while', 'True', ':', 'tname', ',', 'tvalue', ',', 'line', ',', 'col', '=', 'tokens', '.', 'next', '(', ')', 'if', 'tname', '==', "'IDENT'", ':', '#', '# Identifier after a period is a class selector:', '# http://www.w3.org/TR/CSS2/selector.html#class-html', '#', 'if', 'not', 'element', ':', 'element', '=', 'ElementClass', '(', ')', 'elements', '.', 'append', '(', 'element', ')', 'element', '.', 'addName', '(', "'.'", '+', 'tvalue', ')', 'break', 'else', ':', 'raise', 'ParseException', '(', "'Malformed class selector'", ',', 'line', ',', 'col', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "'*'", ')', ':', '#', '# Asterisk character is a universal selector:', '# http://www.w3.org/TR/CSS2/selector.html#universal-selector', '#', 'if', 'not', 'element', ':', 'element', '=', 'ElementClass', '(', ')', 'elements', '.', 'append', '(', 'element', ')', 'element', '.', 'addName', '(', 'tvalue', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "'['", ')', ':', '#', '# Left-bracket is the start of an attribute selector:', '# http://www.w3.org/TR/CSS2/selector.html#attribute-selectors', '#', 'if', 'not', 'element', ':', 'element', '=', 'ElementClass', '(', ')', 'elements', '.', 'append', '(', 'element', ')', 'test', '=', 'parse_attribute', '(', 'tokens', ',', 'is_merc', ')', 'element', '.', 'addTest', '(', 'test', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "','", ')', ':', '#', '# Comma delineates one of a group of selectors:', '# http://www.w3.org/TR/CSS2/selector.html#grouping', '#', '# Recurse here.', '#', 'neighbors', '.', 'append', '(', 'Selector', '(', '*', 'elements', ')', ')', 'return', 'parse_rule', '(', 'tokens', ',', 'variables', ',', 'neighbors', ',', 'parents', ',', 'is_merc', ')', 'elif', '(', 'tname', ',', 'tvalue', ')', '==', '(', "'CHAR'", ',', "'{'", ')', ':', '#', '# Left-brace is the start of a block:', '# http://www.w3.org/TR/CSS2/syndata.html#block', '#', '# Return a full block here.', '#', 'class', 'DummySelector', ':', 'def', '__init__', '(', 'self', ',', '*', 'elements', ')', ':', 'self', '.', 'elements', '=', 'elements', '[', ':', ']', 'neighbors', '.', 'append', '(', 'DummySelector', '(', '*', 'elements', ')', ')', 'selectors', '=', '[', ']', '#', '# Combine lists of parents and neighbors into a single list of', '# selectors, for passing off to parse_block(). There might not', '# be any parents, but there will definitely be neighbors.', '#', 'for', 'parent', 'in', '(', 'parents', 'or', '[', 'DummySelector', '(', ')', ']', ')', ':', 'for', 'neighbor', 'in', 'neighbors', ':', 'if', 'len', '(', 'neighbor', '.', 'elements', ')', '==', '0', ':', 'raise', 'ParseException', '(', "'At least one element must be present in selectors for Mapnik styles'", ',', 'line', ',', 'col', ')', 'elements', '=', 'chain', '(', 'parent', '.', 'elements', '+', 'neighbor', '.', 'elements', ')', 'selector', '=', 'Selector', '(', 'deepcopy', '(', 'elements', '.', 'next', '(', ')', ')', ')', 'for', 'element', 'in', 'elements', ':', 'if', 'element', '.', '__class__', 'is', 'ConcatenatedElement', ':', 'for', 'name', 'in', 'element', '.', 'names', ':', 'selector', '.', 'elements', '[', '-', '1', ']', '.', 'addName', '(', 'deepcopy', '(', 'name', ')', ')', 'for', 'test', 'in', 'element', '.', 'tests', ':', 'selector', '.', 'elements', '[', '-', '1', ']', '.', 'addTest', '(', 'deepcopy', '(', 'test', ')', ')', 'else', ':', 'selector', '.', 'addElement', '(', 'deepcopy', '(', 'element', ')', ')', '# selector should be fully valid at this point.', 'validate_selector_elements', '(', 'selector', '.', 'elements', ',', 'line', ',', 'col', ')', 'selector', '.', 'convertZoomTests', '(', 'is_merc', ')', 'selectors', '.', 'append', '(', 'selector', ')', 'return', 'parse_block', '(', 'tokens', ',', 'variables', ',', 'selectors', ',', 'is_merc', ')', 'elif', 'tname', 'not', 'in', '(', "'S'", ',', "'COMMENT'", ')', ':', 'raise', 'ParseException', '(', '\'Unexpected token in selector: "%s"\'', '%', 'tvalue', ',', 'line', ',', 'col', ')'] | Parse a rule set, return a list of declarations.
Requires a dictionary of declared variables. Selectors in the neighbors
list are simply grouped, and are generated from comma-delimited lists
of selectors in the stylesheet. Selectors in the parents list should
be combined with those found by this functions, and are generated
from nested, Less-style rulesets.
A rule set is a combination of selectors and declarations:
http://www.w3.org/TR/CSS2/syndata.html#rule-sets
Nesting is described in the Less CSS spec:
http://lesscss.org/#-nested-rules
To handle groups of selectors, use recursion:
http://www.w3.org/TR/CSS2/selector.html#grouping | ['Parse', 'a', 'rule', 'set', 'return', 'a', 'list', 'of', 'declarations', '.', 'Requires', 'a', 'dictionary', 'of', 'declared', 'variables', '.', 'Selectors', 'in', 'the', 'neighbors', 'list', 'are', 'simply', 'grouped', 'and', 'are', 'generated', 'from', 'comma', '-', 'delimited', 'lists', 'of', 'selectors', 'in', 'the', 'stylesheet', '.', 'Selectors', 'in', 'the', 'parents', 'list', 'should', 'be', 'combined', 'with', 'those', 'found', 'by', 'this', 'functions', 'and', 'are', 'generated', 'from', 'nested', 'Less', '-', 'style', 'rulesets', '.', 'A', 'rule', 'set', 'is', 'a', 'combination', 'of', 'selectors', 'and', 'declarations', ':', 'http', ':', '//', 'www', '.', 'w3', '.', 'org', '/', 'TR', '/', 'CSS2', '/', 'syndata', '.', 'html#rule', '-', 'sets', 'Nesting', 'is', 'described', 'in', 'the', 'Less', 'CSS', 'spec', ':', 'http', ':', '//', 'lesscss', '.', 'org', '/', '#', '-', 'nested', '-', 'rules', 'To', 'handle', 'groups', 'of', 'selectors', 'use', 'recursion', ':', 'http', ':', '//', 'www', '.', 'w3', '.', 'org', '/', 'TR', '/', 'CSS2', '/', 'selector', '.', 'html#grouping'] | train | https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/parse.py#L498-L709 |
7,977 | jjgomera/iapws | iapws/_iapws.py | _Tension | def _Tension(T):
"""Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
"""
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound") | python | def _Tension(T):
"""Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
"""
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound") | ['def', '_Tension', '(', 'T', ')', ':', 'if', '248.15', '<=', 'T', '<=', 'Tc', ':', 'Tr', '=', 'T', '/', 'Tc', 'return', '1e-3', '*', '(', '235.8', '*', '(', '1', '-', 'Tr', ')', '**', '1.256', '*', '(', '1', '-', '0.625', '*', '(', '1', '-', 'Tr', ')', ')', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '"Incoming out of bound"', ')'] | Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html | ['Equation', 'for', 'the', 'surface', 'tension'] | train | https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/_iapws.py#L872-L908 |
7,978 | Opentrons/opentrons | api/src/opentrons/legacy_api/instruments/pipette.py | Pipette.touch_tip | def touch_tip(self, location=None, radius=1.0, v_offset=-1.0, speed=60.0):
"""
Touch the :any:`Pipette` tip to the sides of a well,
with the intent of removing left-over droplets
Notes
-----
If no `location` is passed, the pipette will touch_tip
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the touch_tip.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
radius : float
Radius is a floating point describing the percentage of a well's
radius. When radius=1.0, :any:`touch_tip()` will move to 100% of
the wells radius. When radius=0.5, :any:`touch_tip()` will move to
50% of the wells radius.
Default: 1.0 (100%)
speed: float
The speed for touch tip motion, in mm/s.
Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s
v_offset: float
The offset in mm from the top of the well to touch tip.
Default: -1.0 mm
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '8') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]).touch_tip() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot touch tip without a tip attached.")
if speed > 80.0:
log.warning("Touch tip speeds greater than 80mm/s not allowed")
speed = 80.0
if speed < 20.0:
log.warning("Touch tip speeds greater than 80mm/s not allowed")
speed = 20.0
if helpers.is_number(location):
# Deprecated syntax
log.warning("Please use the `v_offset` named parameter")
v_offset = location
location = None
# if no location specified, use the previously
# associated placeable to get Well dimensions
if location is None:
location = self.previous_placeable
do_publish(self.broker, commands.touch_tip, self.touch_tip, 'before',
None, None, self, location, radius, v_offset, speed)
# move to location if we're not already there
if location != self.previous_placeable:
self.move_to(location)
v_offset = (0, 0, v_offset)
well_edges = [
location.from_center(x=radius, y=0, z=1), # right edge
location.from_center(x=radius * -1, y=0, z=1), # left edge
location.from_center(x=0, y=radius, z=1), # back edge
location.from_center(x=0, y=radius * -1, z=1) # front edge
]
# Apply vertical offset to well edges
well_edges = map(lambda x: x + v_offset, well_edges)
self.robot.gantry.push_speed()
self.robot.gantry.set_speed(speed)
[self.move_to((location, e), strategy='direct') for e in well_edges]
self.robot.gantry.pop_speed()
do_publish(self.broker, commands.touch_tip, self.touch_tip, 'after',
self, None, self, location, radius, v_offset, speed)
return self | python | def touch_tip(self, location=None, radius=1.0, v_offset=-1.0, speed=60.0):
"""
Touch the :any:`Pipette` tip to the sides of a well,
with the intent of removing left-over droplets
Notes
-----
If no `location` is passed, the pipette will touch_tip
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the touch_tip.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
radius : float
Radius is a floating point describing the percentage of a well's
radius. When radius=1.0, :any:`touch_tip()` will move to 100% of
the wells radius. When radius=0.5, :any:`touch_tip()` will move to
50% of the wells radius.
Default: 1.0 (100%)
speed: float
The speed for touch tip motion, in mm/s.
Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s
v_offset: float
The offset in mm from the top of the well to touch tip.
Default: -1.0 mm
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '8') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]).touch_tip() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot touch tip without a tip attached.")
if speed > 80.0:
log.warning("Touch tip speeds greater than 80mm/s not allowed")
speed = 80.0
if speed < 20.0:
log.warning("Touch tip speeds greater than 80mm/s not allowed")
speed = 20.0
if helpers.is_number(location):
# Deprecated syntax
log.warning("Please use the `v_offset` named parameter")
v_offset = location
location = None
# if no location specified, use the previously
# associated placeable to get Well dimensions
if location is None:
location = self.previous_placeable
do_publish(self.broker, commands.touch_tip, self.touch_tip, 'before',
None, None, self, location, radius, v_offset, speed)
# move to location if we're not already there
if location != self.previous_placeable:
self.move_to(location)
v_offset = (0, 0, v_offset)
well_edges = [
location.from_center(x=radius, y=0, z=1), # right edge
location.from_center(x=radius * -1, y=0, z=1), # left edge
location.from_center(x=0, y=radius, z=1), # back edge
location.from_center(x=0, y=radius * -1, z=1) # front edge
]
# Apply vertical offset to well edges
well_edges = map(lambda x: x + v_offset, well_edges)
self.robot.gantry.push_speed()
self.robot.gantry.set_speed(speed)
[self.move_to((location, e), strategy='direct') for e in well_edges]
self.robot.gantry.pop_speed()
do_publish(self.broker, commands.touch_tip, self.touch_tip, 'after',
self, None, self, location, radius, v_offset, speed)
return self | ['def', 'touch_tip', '(', 'self', ',', 'location', '=', 'None', ',', 'radius', '=', '1.0', ',', 'v_offset', '=', '-', '1.0', ',', 'speed', '=', '60.0', ')', ':', 'if', 'not', 'self', '.', 'tip_attached', ':', 'log', '.', 'warning', '(', '"Cannot touch tip without a tip attached."', ')', 'if', 'speed', '>', '80.0', ':', 'log', '.', 'warning', '(', '"Touch tip speeds greater than 80mm/s not allowed"', ')', 'speed', '=', '80.0', 'if', 'speed', '<', '20.0', ':', 'log', '.', 'warning', '(', '"Touch tip speeds greater than 80mm/s not allowed"', ')', 'speed', '=', '20.0', 'if', 'helpers', '.', 'is_number', '(', 'location', ')', ':', '# Deprecated syntax', 'log', '.', 'warning', '(', '"Please use the `v_offset` named parameter"', ')', 'v_offset', '=', 'location', 'location', '=', 'None', '# if no location specified, use the previously', '# associated placeable to get Well dimensions', 'if', 'location', 'is', 'None', ':', 'location', '=', 'self', '.', 'previous_placeable', 'do_publish', '(', 'self', '.', 'broker', ',', 'commands', '.', 'touch_tip', ',', 'self', '.', 'touch_tip', ',', "'before'", ',', 'None', ',', 'None', ',', 'self', ',', 'location', ',', 'radius', ',', 'v_offset', ',', 'speed', ')', "# move to location if we're not already there", 'if', 'location', '!=', 'self', '.', 'previous_placeable', ':', 'self', '.', 'move_to', '(', 'location', ')', 'v_offset', '=', '(', '0', ',', '0', ',', 'v_offset', ')', 'well_edges', '=', '[', 'location', '.', 'from_center', '(', 'x', '=', 'radius', ',', 'y', '=', '0', ',', 'z', '=', '1', ')', ',', '# right edge', 'location', '.', 'from_center', '(', 'x', '=', 'radius', '*', '-', '1', ',', 'y', '=', '0', ',', 'z', '=', '1', ')', ',', '# left edge', 'location', '.', 'from_center', '(', 'x', '=', '0', ',', 'y', '=', 'radius', ',', 'z', '=', '1', ')', ',', '# back edge', 'location', '.', 'from_center', '(', 'x', '=', '0', ',', 'y', '=', 'radius', '*', '-', '1', ',', 'z', '=', '1', ')', '# front edge', ']', '# Apply vertical offset to well edges', 'well_edges', '=', 'map', '(', 'lambda', 'x', ':', 'x', '+', 'v_offset', ',', 'well_edges', ')', 'self', '.', 'robot', '.', 'gantry', '.', 'push_speed', '(', ')', 'self', '.', 'robot', '.', 'gantry', '.', 'set_speed', '(', 'speed', ')', '[', 'self', '.', 'move_to', '(', '(', 'location', ',', 'e', ')', ',', 'strategy', '=', "'direct'", ')', 'for', 'e', 'in', 'well_edges', ']', 'self', '.', 'robot', '.', 'gantry', '.', 'pop_speed', '(', ')', 'do_publish', '(', 'self', '.', 'broker', ',', 'commands', '.', 'touch_tip', ',', 'self', '.', 'touch_tip', ',', "'after'", ',', 'self', ',', 'None', ',', 'self', ',', 'location', ',', 'radius', ',', 'v_offset', ',', 'speed', ')', 'return', 'self'] | Touch the :any:`Pipette` tip to the sides of a well,
with the intent of removing left-over droplets
Notes
-----
If no `location` is passed, the pipette will touch_tip
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the touch_tip.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
radius : float
Radius is a floating point describing the percentage of a well's
radius. When radius=1.0, :any:`touch_tip()` will move to 100% of
the wells radius. When radius=0.5, :any:`touch_tip()` will move to
50% of the wells radius.
Default: 1.0 (100%)
speed: float
The speed for touch tip motion, in mm/s.
Default: 60.0 mm/s, Max: 80.0 mm/s, Min: 20.0 mm/s
v_offset: float
The offset in mm from the top of the well to touch tip.
Default: -1.0 mm
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '8') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]).touch_tip() # doctest: +SKIP | ['Touch', 'the', ':', 'any', ':', 'Pipette', 'tip', 'to', 'the', 'sides', 'of', 'a', 'well', 'with', 'the', 'intent', 'of', 'removing', 'left', '-', 'over', 'droplets'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L723-L817 |
7,979 | ThreatConnect-Inc/tcex | app_init/playbook_utility/app.py | App.run | def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
# read inputs
indent = int(self.tcex.playbook.read(self.args.indent))
json_data = self.tcex.playbook.read(self.args.json_data)
# get the playbook variable type
json_data_type = self.tcex.playbook.variable_type(self.args.json_data)
# convert string input to dict
if json_data_type in ['String']:
json_data = json.loads(json_data)
# generate the new "pretty" json (this will be used as an option variable)
try:
self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys)
except Exception:
self.tcex.exit(1, 'Failed parsing JSON data.')
# set the App exit message
self.exit_message = 'JSON prettified.' | python | def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
# read inputs
indent = int(self.tcex.playbook.read(self.args.indent))
json_data = self.tcex.playbook.read(self.args.json_data)
# get the playbook variable type
json_data_type = self.tcex.playbook.variable_type(self.args.json_data)
# convert string input to dict
if json_data_type in ['String']:
json_data = json.loads(json_data)
# generate the new "pretty" json (this will be used as an option variable)
try:
self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys)
except Exception:
self.tcex.exit(1, 'Failed parsing JSON data.')
# set the App exit message
self.exit_message = 'JSON prettified.' | ['def', 'run', '(', 'self', ')', ':', '# read inputs', 'indent', '=', 'int', '(', 'self', '.', 'tcex', '.', 'playbook', '.', 'read', '(', 'self', '.', 'args', '.', 'indent', ')', ')', 'json_data', '=', 'self', '.', 'tcex', '.', 'playbook', '.', 'read', '(', 'self', '.', 'args', '.', 'json_data', ')', '# get the playbook variable type', 'json_data_type', '=', 'self', '.', 'tcex', '.', 'playbook', '.', 'variable_type', '(', 'self', '.', 'args', '.', 'json_data', ')', '# convert string input to dict', 'if', 'json_data_type', 'in', '[', "'String'", ']', ':', 'json_data', '=', 'json', '.', 'loads', '(', 'json_data', ')', '# generate the new "pretty" json (this will be used as an option variable)', 'try', ':', 'self', '.', 'pretty_json', '=', 'json', '.', 'dumps', '(', 'json_data', ',', 'indent', '=', 'indent', ',', 'sort_keys', '=', 'self', '.', 'args', '.', 'sort_keys', ')', 'except', 'Exception', ':', 'self', '.', 'tcex', '.', 'exit', '(', '1', ',', "'Failed parsing JSON data.'", ')', '# set the App exit message', 'self', '.', 'exit_message', '=', "'JSON prettified.'"] | Run the App main logic.
This method should contain the core logic of the App. | ['Run', 'the', 'App', 'main', 'logic', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/app_init/playbook_utility/app.py#L25-L48 |
7,980 | podio/podio-py | pypodio2/areas.py | Files.create | def create(self, filename, filedata):
"""Create a file from raw data"""
attributes = {'filename': filename,
'source': filedata}
return self.transport.POST(url='/file/v2/', body=attributes, type='multipart/form-data') | python | def create(self, filename, filedata):
"""Create a file from raw data"""
attributes = {'filename': filename,
'source': filedata}
return self.transport.POST(url='/file/v2/', body=attributes, type='multipart/form-data') | ['def', 'create', '(', 'self', ',', 'filename', ',', 'filedata', ')', ':', 'attributes', '=', '{', "'filename'", ':', 'filename', ',', "'source'", ':', 'filedata', '}', 'return', 'self', '.', 'transport', '.', 'POST', '(', 'url', '=', "'/file/v2/'", ',', 'body', '=', 'attributes', ',', 'type', '=', "'multipart/form-data'", ')'] | Create a file from raw data | ['Create', 'a', 'file', 'from', 'raw', 'data'] | train | https://github.com/podio/podio-py/blob/5ce956034a06c98b0ef18fcd940b36da0908ad6c/pypodio2/areas.py#L522-L526 |
7,981 | Falkonry/falkonry-python-client | falkonryclient/service/falkonry.py | FalkonryService.get_assessment | def get_assessment(self, assessment):
"""
To get Assessment by id
"""
response = self.http.get('/Assessment/' + str(assessment))
assessment = Schemas.Assessment(assessment=response)
return assessment | python | def get_assessment(self, assessment):
"""
To get Assessment by id
"""
response = self.http.get('/Assessment/' + str(assessment))
assessment = Schemas.Assessment(assessment=response)
return assessment | ['def', 'get_assessment', '(', 'self', ',', 'assessment', ')', ':', 'response', '=', 'self', '.', 'http', '.', 'get', '(', "'/Assessment/'", '+', 'str', '(', 'assessment', ')', ')', 'assessment', '=', 'Schemas', '.', 'Assessment', '(', 'assessment', '=', 'response', ')', 'return', 'assessment'] | To get Assessment by id | ['To', 'get', 'Assessment', 'by', 'id'] | train | https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L82-L88 |
7,982 | nccgroup/Scout2 | AWSScout2/services/cloudwatch.py | CloudWatchRegionConfig.parse_alarm | def parse_alarm(self, global_params, region, alarm):
"""
Parse a single CloudWatch trail
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param alarm: Alarm
"""
alarm['arn'] = alarm.pop('AlarmArn')
alarm['name'] = alarm.pop('AlarmName')
# Drop some data
for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']:
foo = alarm.pop(k) if k in alarm else None
alarm_id = self.get_non_aws_id(alarm['arn'])
self.alarms[alarm_id] = alarm | python | def parse_alarm(self, global_params, region, alarm):
"""
Parse a single CloudWatch trail
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param alarm: Alarm
"""
alarm['arn'] = alarm.pop('AlarmArn')
alarm['name'] = alarm.pop('AlarmName')
# Drop some data
for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']:
foo = alarm.pop(k) if k in alarm else None
alarm_id = self.get_non_aws_id(alarm['arn'])
self.alarms[alarm_id] = alarm | ['def', 'parse_alarm', '(', 'self', ',', 'global_params', ',', 'region', ',', 'alarm', ')', ':', 'alarm', '[', "'arn'", ']', '=', 'alarm', '.', 'pop', '(', "'AlarmArn'", ')', 'alarm', '[', "'name'", ']', '=', 'alarm', '.', 'pop', '(', "'AlarmName'", ')', '# Drop some data', 'for', 'k', 'in', '[', "'AlarmConfigurationUpdatedTimestamp'", ',', "'StateReason'", ',', "'StateReasonData'", ',', "'StateUpdatedTimestamp'", ']', ':', 'foo', '=', 'alarm', '.', 'pop', '(', 'k', ')', 'if', 'k', 'in', 'alarm', 'else', 'None', 'alarm_id', '=', 'self', '.', 'get_non_aws_id', '(', 'alarm', '[', "'arn'", ']', ')', 'self', '.', 'alarms', '[', 'alarm_id', ']', '=', 'alarm'] | Parse a single CloudWatch trail
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param alarm: Alarm | ['Parse', 'a', 'single', 'CloudWatch', 'trail'] | train | https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/cloudwatch.py#L21-L35 |
7,983 | wavycloud/pyboto3 | pyboto3/elasticloadbalancingv2.py | modify_target_group | def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass | python | def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass | ['def', 'modify_target_group', '(', 'TargetGroupArn', '=', 'None', ',', 'HealthCheckProtocol', '=', 'None', ',', 'HealthCheckPort', '=', 'None', ',', 'HealthCheckPath', '=', 'None', ',', 'HealthCheckIntervalSeconds', '=', 'None', ',', 'HealthCheckTimeoutSeconds', '=', 'None', ',', 'HealthyThresholdCount', '=', 'None', ',', 'UnhealthyThresholdCount', '=', 'None', ',', 'Matcher', '=', 'None', ')', ':', 'pass'] | Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) -- | ['Modifies', 'the', 'health', 'checks', 'used', 'when', 'evaluating', 'the', 'health', 'state', 'of', 'the', 'targets', 'in', 'the', 'specified', 'target', 'group', '.', 'To', 'monitor', 'the', 'health', 'of', 'the', 'targets', 'use', 'DescribeTargetHealth', '.', 'See', 'also', ':', 'AWS', 'API', 'Documentation', 'Examples', 'This', 'example', 'changes', 'the', 'configuration', 'of', 'the', 'health', 'checks', 'used', 'to', 'evaluate', 'the', 'health', 'of', 'the', 'targets', 'for', 'the', 'specified', 'target', 'group', '.', 'Expected', 'Output', ':', ':', 'example', ':', 'response', '=', 'client', '.', 'modify_target_group', '(', 'TargetGroupArn', '=', 'string', 'HealthCheckProtocol', '=', 'HTTP', '|', 'HTTPS', 'HealthCheckPort', '=', 'string', 'HealthCheckPath', '=', 'string', 'HealthCheckIntervalSeconds', '=', '123', 'HealthCheckTimeoutSeconds', '=', '123', 'HealthyThresholdCount', '=', '123', 'UnhealthyThresholdCount', '=', '123', 'Matcher', '=', '{', 'HttpCode', ':', 'string', '}', ')', ':', 'type', 'TargetGroupArn', ':', 'string', ':', 'param', 'TargetGroupArn', ':', '[', 'REQUIRED', ']', 'The', 'Amazon', 'Resource', 'Name', '(', 'ARN', ')', 'of', 'the', 'target', 'group', '.'] | train | https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/elasticloadbalancingv2.py#L1532-L1619 |
7,984 | senaite/senaite.core | bika/lims/browser/partition_magic.py | PartitionMagicView.get_sampletype_data | def get_sampletype_data(self):
"""Returns a list of SampleType data
"""
for obj in self.get_sampletypes():
info = self.get_base_info(obj)
yield info | python | def get_sampletype_data(self):
"""Returns a list of SampleType data
"""
for obj in self.get_sampletypes():
info = self.get_base_info(obj)
yield info | ['def', 'get_sampletype_data', '(', 'self', ')', ':', 'for', 'obj', 'in', 'self', '.', 'get_sampletypes', '(', ')', ':', 'info', '=', 'self', '.', 'get_base_info', '(', 'obj', ')', 'yield', 'info'] | Returns a list of SampleType data | ['Returns', 'a', 'list', 'of', 'SampleType', 'data'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/partition_magic.py#L155-L160 |
7,985 | aouyar/PyMunin | pysysinfo/system.py | SystemInfo.getCPUuse | def getCPUuse(self):
"""Return cpu time utilization in seconds.
@return: Dictionary of stats.
"""
hz = os.sysconf('SC_CLK_TCK')
info_dict = {}
try:
fp = open(cpustatFile, 'r')
line = fp.readline()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % cpustatFile)
headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest']
arr = line.split()
if len(arr) > 1 and arr[0] == 'cpu':
return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]]))
return info_dict | python | def getCPUuse(self):
"""Return cpu time utilization in seconds.
@return: Dictionary of stats.
"""
hz = os.sysconf('SC_CLK_TCK')
info_dict = {}
try:
fp = open(cpustatFile, 'r')
line = fp.readline()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % cpustatFile)
headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest']
arr = line.split()
if len(arr) > 1 and arr[0] == 'cpu':
return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]]))
return info_dict | ['def', 'getCPUuse', '(', 'self', ')', ':', 'hz', '=', 'os', '.', 'sysconf', '(', "'SC_CLK_TCK'", ')', 'info_dict', '=', '{', '}', 'try', ':', 'fp', '=', 'open', '(', 'cpustatFile', ',', "'r'", ')', 'line', '=', 'fp', '.', 'readline', '(', ')', 'fp', '.', 'close', '(', ')', 'except', ':', 'raise', 'IOError', '(', "'Failed reading stats from file: %s'", '%', 'cpustatFile', ')', 'headers', '=', '[', "'user'", ',', "'nice'", ',', "'system'", ',', "'idle'", ',', "'iowait'", ',', "'irq'", ',', "'softirq'", ',', "'steal'", ',', "'guest'", ']', 'arr', '=', 'line', '.', 'split', '(', ')', 'if', 'len', '(', 'arr', ')', '>', '1', 'and', 'arr', '[', '0', ']', '==', "'cpu'", ':', 'return', 'dict', '(', 'zip', '(', 'headers', '[', '0', ':', 'len', '(', 'arr', ')', ']', ',', '[', '(', 'float', '(', 't', ')', '/', 'hz', ')', 'for', 't', 'in', 'arr', '[', '1', ':', ']', ']', ')', ')', 'return', 'info_dict'] | Return cpu time utilization in seconds.
@return: Dictionary of stats. | ['Return', 'cpu', 'time', 'utilization', 'in', 'seconds', '.'] | train | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L78-L96 |
7,986 | note35/sinon | sinon/lib/util/CollectionHandler.py | dict_partial_cmp | def dict_partial_cmp(target_dict, dict_list, ducktype):
"""
Whether partial dict are in dict_list or not
"""
for called_dict in dict_list:
# ignore invalid test case
if len(target_dict) > len(called_dict):
continue
# get the intersection of two dicts
intersection = {}
for item in target_dict:
dtype = ducktype(target_dict[item])
if hasattr(dtype, "mtest"):
if item in called_dict and dtype.mtest(called_dict[item]):
intersection[item] = target_dict[item]
else:
if item in called_dict and dtype == called_dict[item]:
intersection[item] = target_dict[item]
if intersection == target_dict:
return True
# if no any arguments matched to called_args, return False
return False | python | def dict_partial_cmp(target_dict, dict_list, ducktype):
"""
Whether partial dict are in dict_list or not
"""
for called_dict in dict_list:
# ignore invalid test case
if len(target_dict) > len(called_dict):
continue
# get the intersection of two dicts
intersection = {}
for item in target_dict:
dtype = ducktype(target_dict[item])
if hasattr(dtype, "mtest"):
if item in called_dict and dtype.mtest(called_dict[item]):
intersection[item] = target_dict[item]
else:
if item in called_dict and dtype == called_dict[item]:
intersection[item] = target_dict[item]
if intersection == target_dict:
return True
# if no any arguments matched to called_args, return False
return False | ['def', 'dict_partial_cmp', '(', 'target_dict', ',', 'dict_list', ',', 'ducktype', ')', ':', 'for', 'called_dict', 'in', 'dict_list', ':', '# ignore invalid test case', 'if', 'len', '(', 'target_dict', ')', '>', 'len', '(', 'called_dict', ')', ':', 'continue', '# get the intersection of two dicts', 'intersection', '=', '{', '}', 'for', 'item', 'in', 'target_dict', ':', 'dtype', '=', 'ducktype', '(', 'target_dict', '[', 'item', ']', ')', 'if', 'hasattr', '(', 'dtype', ',', '"mtest"', ')', ':', 'if', 'item', 'in', 'called_dict', 'and', 'dtype', '.', 'mtest', '(', 'called_dict', '[', 'item', ']', ')', ':', 'intersection', '[', 'item', ']', '=', 'target_dict', '[', 'item', ']', 'else', ':', 'if', 'item', 'in', 'called_dict', 'and', 'dtype', '==', 'called_dict', '[', 'item', ']', ':', 'intersection', '[', 'item', ']', '=', 'target_dict', '[', 'item', ']', 'if', 'intersection', '==', 'target_dict', ':', 'return', 'True', '# if no any arguments matched to called_args, return False', 'return', 'False'] | Whether partial dict are in dict_list or not | ['Whether', 'partial', 'dict', 'are', 'in', 'dict_list', 'or', 'not'] | train | https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/util/CollectionHandler.py#L115-L136 |
7,987 | NICTA/revrand | revrand/btypes.py | hstack | def hstack(tup):
"""
Horizontally stack a sequence of value bounds pairs.
Parameters
----------
tup: sequence
a sequence of value, ``Bound`` pairs
Returns
-------
value: ndarray
a horizontally concatenated array1d
bounds:
a list of Bounds
"""
vals, bounds = zip(*tup)
stackvalue = np.hstack(vals)
stackbounds = list(chain(*bounds))
return stackvalue, stackbounds | python | def hstack(tup):
"""
Horizontally stack a sequence of value bounds pairs.
Parameters
----------
tup: sequence
a sequence of value, ``Bound`` pairs
Returns
-------
value: ndarray
a horizontally concatenated array1d
bounds:
a list of Bounds
"""
vals, bounds = zip(*tup)
stackvalue = np.hstack(vals)
stackbounds = list(chain(*bounds))
return stackvalue, stackbounds | ['def', 'hstack', '(', 'tup', ')', ':', 'vals', ',', 'bounds', '=', 'zip', '(', '*', 'tup', ')', 'stackvalue', '=', 'np', '.', 'hstack', '(', 'vals', ')', 'stackbounds', '=', 'list', '(', 'chain', '(', '*', 'bounds', ')', ')', 'return', 'stackvalue', ',', 'stackbounds'] | Horizontally stack a sequence of value bounds pairs.
Parameters
----------
tup: sequence
a sequence of value, ``Bound`` pairs
Returns
-------
value: ndarray
a horizontally concatenated array1d
bounds:
a list of Bounds | ['Horizontally', 'stack', 'a', 'sequence', 'of', 'value', 'bounds', 'pairs', '.'] | train | https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/btypes.py#L374-L394 |
7,988 | fastai/fastai | fastai/vision/data.py | ImageImageList.show_xyzs | def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
title = 'Input / Prediction / Target'
axs = subplots(len(xs), 3, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], **kwargs)
y.show(ax=axs[i,2], **kwargs)
z.show(ax=axs[i,1], **kwargs) | python | def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
title = 'Input / Prediction / Target'
axs = subplots(len(xs), 3, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], **kwargs)
y.show(ax=axs[i,2], **kwargs)
z.show(ax=axs[i,1], **kwargs) | ['def', 'show_xyzs', '(', 'self', ',', 'xs', ',', 'ys', ',', 'zs', ',', 'imgsize', ':', 'int', '=', '4', ',', 'figsize', ':', 'Optional', '[', 'Tuple', '[', 'int', ',', 'int', ']', ']', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'title', '=', "'Input / Prediction / Target'", 'axs', '=', 'subplots', '(', 'len', '(', 'xs', ')', ',', '3', ',', 'imgsize', '=', 'imgsize', ',', 'figsize', '=', 'figsize', ',', 'title', '=', 'title', ',', 'weight', '=', "'bold'", ',', 'size', '=', '14', ')', 'for', 'i', ',', '(', 'x', ',', 'y', ',', 'z', ')', 'in', 'enumerate', '(', 'zip', '(', 'xs', ',', 'ys', ',', 'zs', ')', ')', ':', 'x', '.', 'show', '(', 'ax', '=', 'axs', '[', 'i', ',', '0', ']', ',', '*', '*', 'kwargs', ')', 'y', '.', 'show', '(', 'ax', '=', 'axs', '[', 'i', ',', '2', ']', ',', '*', '*', 'kwargs', ')', 'z', '.', 'show', '(', 'ax', '=', 'axs', '[', 'i', ',', '1', ']', ',', '*', '*', 'kwargs', ')'] | Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. | ['Show', 'xs', '(', 'inputs', ')', 'ys', '(', 'targets', ')', 'and', 'zs', '(', 'predictions', ')', 'on', 'a', 'figure', 'of', 'figsize', '.'] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L424-L431 |
7,989 | aiogram/aiogram | aiogram/contrib/middlewares/i18n.py | I18nMiddleware.find_locales | def find_locales(self) -> Dict[str, gettext.GNUTranslations]:
"""
Load all compiled locales from path
:return: dict with locales
"""
translations = {}
for name in os.listdir(self.path):
if not os.path.isdir(os.path.join(self.path, name)):
continue
mo_path = os.path.join(self.path, name, 'LC_MESSAGES', self.domain + '.mo')
if os.path.exists(mo_path):
with open(mo_path, 'rb') as fp:
translations[name] = gettext.GNUTranslations(fp)
elif os.path.exists(mo_path[:-2] + 'po'):
raise RuntimeError(f"Found locale '{name} but this language is not compiled!")
return translations | python | def find_locales(self) -> Dict[str, gettext.GNUTranslations]:
"""
Load all compiled locales from path
:return: dict with locales
"""
translations = {}
for name in os.listdir(self.path):
if not os.path.isdir(os.path.join(self.path, name)):
continue
mo_path = os.path.join(self.path, name, 'LC_MESSAGES', self.domain + '.mo')
if os.path.exists(mo_path):
with open(mo_path, 'rb') as fp:
translations[name] = gettext.GNUTranslations(fp)
elif os.path.exists(mo_path[:-2] + 'po'):
raise RuntimeError(f"Found locale '{name} but this language is not compiled!")
return translations | ['def', 'find_locales', '(', 'self', ')', '->', 'Dict', '[', 'str', ',', 'gettext', '.', 'GNUTranslations', ']', ':', 'translations', '=', '{', '}', 'for', 'name', 'in', 'os', '.', 'listdir', '(', 'self', '.', 'path', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', 'name', ')', ')', ':', 'continue', 'mo_path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', 'name', ',', "'LC_MESSAGES'", ',', 'self', '.', 'domain', '+', "'.mo'", ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'mo_path', ')', ':', 'with', 'open', '(', 'mo_path', ',', "'rb'", ')', 'as', 'fp', ':', 'translations', '[', 'name', ']', '=', 'gettext', '.', 'GNUTranslations', '(', 'fp', ')', 'elif', 'os', '.', 'path', '.', 'exists', '(', 'mo_path', '[', ':', '-', '2', ']', '+', "'po'", ')', ':', 'raise', 'RuntimeError', '(', 'f"Found locale \'{name} but this language is not compiled!"', ')', 'return', 'translations'] | Load all compiled locales from path
:return: dict with locales | ['Load', 'all', 'compiled', 'locales', 'from', 'path'] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/contrib/middlewares/i18n.py#L45-L64 |
7,990 | pecan/pecan | pecan/scaffolds/__init__.py | makedirs | def makedirs(directory):
""" Resursively create a named directory. """
parent = os.path.dirname(os.path.abspath(directory))
if not os.path.exists(parent):
makedirs(parent)
os.mkdir(directory) | python | def makedirs(directory):
""" Resursively create a named directory. """
parent = os.path.dirname(os.path.abspath(directory))
if not os.path.exists(parent):
makedirs(parent)
os.mkdir(directory) | ['def', 'makedirs', '(', 'directory', ')', ':', 'parent', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'abspath', '(', 'directory', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'parent', ')', ':', 'makedirs', '(', 'parent', ')', 'os', '.', 'mkdir', '(', 'directory', ')'] | Resursively create a named directory. | ['Resursively', 'create', 'a', 'named', 'directory', '.'] | train | https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/scaffolds/__init__.py#L105-L110 |
7,991 | openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.init_model_based_tags | def init_model_based_tags(self, model):
"""Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string
"""
self.model = model
if 'G7' in self.model:
self.MEMORY_SIZE_TAG = "MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed"
self.NIC_INFORMATION_TAG = "NIC_INFOMATION"
else:
self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A"
self.NIC_INFORMATION_TAG = "NIC_INFORMATION" | python | def init_model_based_tags(self, model):
"""Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string
"""
self.model = model
if 'G7' in self.model:
self.MEMORY_SIZE_TAG = "MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed"
self.NIC_INFORMATION_TAG = "NIC_INFOMATION"
else:
self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A"
self.NIC_INFORMATION_TAG = "NIC_INFORMATION" | ['def', 'init_model_based_tags', '(', 'self', ',', 'model', ')', ':', 'self', '.', 'model', '=', 'model', 'if', "'G7'", 'in', 'self', '.', 'model', ':', 'self', '.', 'MEMORY_SIZE_TAG', '=', '"MEMORY_SIZE"', 'self', '.', 'MEMORY_SIZE_NOT_PRESENT_TAG', '=', '"Not Installed"', 'self', '.', 'NIC_INFORMATION_TAG', '=', '"NIC_INFOMATION"', 'else', ':', 'self', '.', 'MEMORY_SIZE_TAG', '=', '"TOTAL_MEMORY_SIZE"', 'self', '.', 'MEMORY_SIZE_NOT_PRESENT_TAG', '=', '"N/A"', 'self', '.', 'NIC_INFORMATION_TAG', '=', '"NIC_INFORMATION"'] | Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string | ['Initializing', 'the', 'model', 'based', 'memory', 'and', 'NIC', 'information', 'tags', '.'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L96-L118 |
7,992 | DAI-Lab/Copulas | copulas/univariate/base.py | Univariate._replace_constant_methods | def _replace_constant_methods(self):
"""Replaces conventional distribution methods by its constant counterparts."""
self.cumulative_distribution = self._constant_cumulative_distribution
self.percent_point = self._constant_percent_point
self.probability_density = self._constant_probability_density
self.sample = self._constant_sample | python | def _replace_constant_methods(self):
"""Replaces conventional distribution methods by its constant counterparts."""
self.cumulative_distribution = self._constant_cumulative_distribution
self.percent_point = self._constant_percent_point
self.probability_density = self._constant_probability_density
self.sample = self._constant_sample | ['def', '_replace_constant_methods', '(', 'self', ')', ':', 'self', '.', 'cumulative_distribution', '=', 'self', '.', '_constant_cumulative_distribution', 'self', '.', 'percent_point', '=', 'self', '.', '_constant_percent_point', 'self', '.', 'probability_density', '=', 'self', '.', '_constant_probability_density', 'self', '.', 'sample', '=', 'self', '.', '_constant_sample'] | Replaces conventional distribution methods by its constant counterparts. | ['Replaces', 'conventional', 'distribution', 'methods', 'by', 'its', 'constant', 'counterparts', '.'] | train | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/univariate/base.py#L192-L197 |
7,993 | maas/python-libmaas | maas/client/viscera/machines.py | Machine.deploy | async def deploy(
self, *, user_data: typing.Union[bytes, str] = None,
distro_series: str = None, hwe_kernel: str = None,
comment: str = None, wait: bool = False, wait_interval: int = 5):
"""Deploy this machine.
:param user_data: User-data to provide to the machine when booting. If
provided as a byte string, it will be base-64 encoded prior to
transmission. If provided as a Unicode string it will be assumed
to be already base-64 encoded.
:param distro_series: The OS to deploy.
:param hwe_kernel: The HWE kernel to deploy. Probably only relevant
when deploying Ubuntu.
:param comment: A comment for the event log.
:param wait: If specified, wait until the deploy is complete.
:param wait_interval: How often to poll, defaults to 5 seconds
"""
params = {"system_id": self.system_id}
if user_data is not None:
if isinstance(user_data, bytes):
params["user_data"] = base64.encodebytes(user_data)
else:
# Already base-64 encoded. Convert to a byte string in
# preparation for multipart assembly.
params["user_data"] = user_data.encode("ascii")
if distro_series is not None:
params["distro_series"] = distro_series
if hwe_kernel is not None:
params["hwe_kernel"] = hwe_kernel
if comment is not None:
params["comment"] = comment
self._data = await self._handler.deploy(**params)
if not wait:
return self
else:
# Wait for the machine to be fully deployed
while self.status == NodeStatus.DEPLOYING:
await asyncio.sleep(wait_interval)
self._data = await self._handler.read(system_id=self.system_id)
if self.status == NodeStatus.FAILED_DEPLOYMENT:
msg = "{hostname} failed to deploy.".format(
hostname=self.hostname
)
raise FailedDeployment(msg, self)
return self | python | async def deploy(
self, *, user_data: typing.Union[bytes, str] = None,
distro_series: str = None, hwe_kernel: str = None,
comment: str = None, wait: bool = False, wait_interval: int = 5):
"""Deploy this machine.
:param user_data: User-data to provide to the machine when booting. If
provided as a byte string, it will be base-64 encoded prior to
transmission. If provided as a Unicode string it will be assumed
to be already base-64 encoded.
:param distro_series: The OS to deploy.
:param hwe_kernel: The HWE kernel to deploy. Probably only relevant
when deploying Ubuntu.
:param comment: A comment for the event log.
:param wait: If specified, wait until the deploy is complete.
:param wait_interval: How often to poll, defaults to 5 seconds
"""
params = {"system_id": self.system_id}
if user_data is not None:
if isinstance(user_data, bytes):
params["user_data"] = base64.encodebytes(user_data)
else:
# Already base-64 encoded. Convert to a byte string in
# preparation for multipart assembly.
params["user_data"] = user_data.encode("ascii")
if distro_series is not None:
params["distro_series"] = distro_series
if hwe_kernel is not None:
params["hwe_kernel"] = hwe_kernel
if comment is not None:
params["comment"] = comment
self._data = await self._handler.deploy(**params)
if not wait:
return self
else:
# Wait for the machine to be fully deployed
while self.status == NodeStatus.DEPLOYING:
await asyncio.sleep(wait_interval)
self._data = await self._handler.read(system_id=self.system_id)
if self.status == NodeStatus.FAILED_DEPLOYMENT:
msg = "{hostname} failed to deploy.".format(
hostname=self.hostname
)
raise FailedDeployment(msg, self)
return self | ['async', 'def', 'deploy', '(', 'self', ',', '*', ',', 'user_data', ':', 'typing', '.', 'Union', '[', 'bytes', ',', 'str', ']', '=', 'None', ',', 'distro_series', ':', 'str', '=', 'None', ',', 'hwe_kernel', ':', 'str', '=', 'None', ',', 'comment', ':', 'str', '=', 'None', ',', 'wait', ':', 'bool', '=', 'False', ',', 'wait_interval', ':', 'int', '=', '5', ')', ':', 'params', '=', '{', '"system_id"', ':', 'self', '.', 'system_id', '}', 'if', 'user_data', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'user_data', ',', 'bytes', ')', ':', 'params', '[', '"user_data"', ']', '=', 'base64', '.', 'encodebytes', '(', 'user_data', ')', 'else', ':', '# Already base-64 encoded. Convert to a byte string in', '# preparation for multipart assembly.', 'params', '[', '"user_data"', ']', '=', 'user_data', '.', 'encode', '(', '"ascii"', ')', 'if', 'distro_series', 'is', 'not', 'None', ':', 'params', '[', '"distro_series"', ']', '=', 'distro_series', 'if', 'hwe_kernel', 'is', 'not', 'None', ':', 'params', '[', '"hwe_kernel"', ']', '=', 'hwe_kernel', 'if', 'comment', 'is', 'not', 'None', ':', 'params', '[', '"comment"', ']', '=', 'comment', 'self', '.', '_data', '=', 'await', 'self', '.', '_handler', '.', 'deploy', '(', '*', '*', 'params', ')', 'if', 'not', 'wait', ':', 'return', 'self', 'else', ':', '# Wait for the machine to be fully deployed', 'while', 'self', '.', 'status', '==', 'NodeStatus', '.', 'DEPLOYING', ':', 'await', 'asyncio', '.', 'sleep', '(', 'wait_interval', ')', 'self', '.', '_data', '=', 'await', 'self', '.', '_handler', '.', 'read', '(', 'system_id', '=', 'self', '.', 'system_id', ')', 'if', 'self', '.', 'status', '==', 'NodeStatus', '.', 'FAILED_DEPLOYMENT', ':', 'msg', '=', '"{hostname} failed to deploy."', '.', 'format', '(', 'hostname', '=', 'self', '.', 'hostname', ')', 'raise', 'FailedDeployment', '(', 'msg', ',', 'self', ')', 'return', 'self'] | Deploy this machine.
:param user_data: User-data to provide to the machine when booting. If
provided as a byte string, it will be base-64 encoded prior to
transmission. If provided as a Unicode string it will be assumed
to be already base-64 encoded.
:param distro_series: The OS to deploy.
:param hwe_kernel: The HWE kernel to deploy. Probably only relevant
when deploying Ubuntu.
:param comment: A comment for the event log.
:param wait: If specified, wait until the deploy is complete.
:param wait_interval: How often to poll, defaults to 5 seconds | ['Deploy', 'this', 'machine', '.'] | train | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/machines.py#L484-L528 |
7,994 | photo/openphoto-python | trovebox/objects/action.py | Action.view | def view(self, **kwds):
"""
Endpoint: /action/<id>/view.json
Requests the full contents of the action.
Updates the action object's fields with the response.
"""
result = self._client.action.view(self, **kwds)
self._replace_fields(result.get_fields())
self._update_fields_with_objects() | python | def view(self, **kwds):
"""
Endpoint: /action/<id>/view.json
Requests the full contents of the action.
Updates the action object's fields with the response.
"""
result = self._client.action.view(self, **kwds)
self._replace_fields(result.get_fields())
self._update_fields_with_objects() | ['def', 'view', '(', 'self', ',', '*', '*', 'kwds', ')', ':', 'result', '=', 'self', '.', '_client', '.', 'action', '.', 'view', '(', 'self', ',', '*', '*', 'kwds', ')', 'self', '.', '_replace_fields', '(', 'result', '.', 'get_fields', '(', ')', ')', 'self', '.', '_update_fields_with_objects', '(', ')'] | Endpoint: /action/<id>/view.json
Requests the full contents of the action.
Updates the action object's fields with the response. | ['Endpoint', ':', '/', 'action', '/', '<id', '>', '/', 'view', '.', 'json'] | train | https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/objects/action.py#L39-L48 |
7,995 | thunder-project/thunder | thunder/series/series.py | Series.center | def center(self, axis=1):
"""
Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records.
"""
if axis == 1:
return self.map(lambda x: x - mean(x))
elif axis == 0:
meanval = self.mean().toarray()
return self.map(lambda x: x - meanval)
else:
raise Exception('Axis must be 0 or 1') | python | def center(self, axis=1):
"""
Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records.
"""
if axis == 1:
return self.map(lambda x: x - mean(x))
elif axis == 0:
meanval = self.mean().toarray()
return self.map(lambda x: x - meanval)
else:
raise Exception('Axis must be 0 or 1') | ['def', 'center', '(', 'self', ',', 'axis', '=', '1', ')', ':', 'if', 'axis', '==', '1', ':', 'return', 'self', '.', 'map', '(', 'lambda', 'x', ':', 'x', '-', 'mean', '(', 'x', ')', ')', 'elif', 'axis', '==', '0', ':', 'meanval', '=', 'self', '.', 'mean', '(', ')', '.', 'toarray', '(', ')', 'return', 'self', '.', 'map', '(', 'lambda', 'x', ':', 'x', '-', 'meanval', ')', 'else', ':', 'raise', 'Exception', '(', "'Axis must be 0 or 1'", ')'] | Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records. | ['Subtract', 'the', 'mean', 'either', 'within', 'or', 'across', 'records', '.'] | train | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L350-L365 |
7,996 | planetarypy/planetaryimage | planetaryimage/image.py | PlanetaryImage.image | def image(self):
"""An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations.
"""
if self.bands == 1:
return self.data.squeeze()
elif self.bands == 3:
return numpy.dstack(self.data) | python | def image(self):
"""An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations.
"""
if self.bands == 1:
return self.data.squeeze()
elif self.bands == 3:
return numpy.dstack(self.data) | ['def', 'image', '(', 'self', ')', ':', 'if', 'self', '.', 'bands', '==', '1', ':', 'return', 'self', '.', 'data', '.', 'squeeze', '(', ')', 'elif', 'self', '.', 'bands', '==', '3', ':', 'return', 'numpy', '.', 'dstack', '(', 'self', '.', 'data', ')'] | An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations. | ['An', 'Image', 'like', 'array', 'of', 'self', '.', 'data', 'convenient', 'for', 'image', 'processing', 'tasks'] | train | https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/image.py#L131-L146 |
7,997 | alorence/pysvg-py3 | pysvg/builders.py | ShapeBuilder.createPolygon | def createPolygon(self, points, strokewidth=1, stroke='black', fill='none'):
"""
Creates a Polygon
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@type fill: string (either css constants like "black" or numerical values like "#FFFFFF")
@param fill: color with which to fill the element (default: no filling)
@return: a polygon object
"""
style_dict = {'fill':fill, 'stroke-width':strokewidth, 'stroke':stroke}
myStyle = StyleBuilder(style_dict)
p = Polygon(points=points)
p.set_style(myStyle.getStyle())
return p | python | def createPolygon(self, points, strokewidth=1, stroke='black', fill='none'):
"""
Creates a Polygon
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@type fill: string (either css constants like "black" or numerical values like "#FFFFFF")
@param fill: color with which to fill the element (default: no filling)
@return: a polygon object
"""
style_dict = {'fill':fill, 'stroke-width':strokewidth, 'stroke':stroke}
myStyle = StyleBuilder(style_dict)
p = Polygon(points=points)
p.set_style(myStyle.getStyle())
return p | ['def', 'createPolygon', '(', 'self', ',', 'points', ',', 'strokewidth', '=', '1', ',', 'stroke', '=', "'black'", ',', 'fill', '=', "'none'", ')', ':', 'style_dict', '=', '{', "'fill'", ':', 'fill', ',', "'stroke-width'", ':', 'strokewidth', ',', "'stroke'", ':', 'stroke', '}', 'myStyle', '=', 'StyleBuilder', '(', 'style_dict', ')', 'p', '=', 'Polygon', '(', 'points', '=', 'points', ')', 'p', '.', 'set_style', '(', 'myStyle', '.', 'getStyle', '(', ')', ')', 'return', 'p'] | Creates a Polygon
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@type fill: string (either css constants like "black" or numerical values like "#FFFFFF")
@param fill: color with which to fill the element (default: no filling)
@return: a polygon object | ['Creates', 'a', 'Polygon'] | train | https://github.com/alorence/pysvg-py3/blob/ce217a4da3ada44a71d3e2f391d37c67d95c724e/pysvg/builders.py#L100-L117 |
7,998 | django-danceschool/django-danceschool | danceschool/core/models.py | Event.soldOutForRole | def soldOutForRole(self,role,includeTemporaryRegs=False):
'''
Accepts a DanceRole object and responds if the number of registrations for that
role exceeds the capacity for that role at this event.
'''
return self.numRegisteredForRole(
role,includeTemporaryRegs=includeTemporaryRegs) >= (self.capacityForRole(role) or 0) | python | def soldOutForRole(self,role,includeTemporaryRegs=False):
'''
Accepts a DanceRole object and responds if the number of registrations for that
role exceeds the capacity for that role at this event.
'''
return self.numRegisteredForRole(
role,includeTemporaryRegs=includeTemporaryRegs) >= (self.capacityForRole(role) or 0) | ['def', 'soldOutForRole', '(', 'self', ',', 'role', ',', 'includeTemporaryRegs', '=', 'False', ')', ':', 'return', 'self', '.', 'numRegisteredForRole', '(', 'role', ',', 'includeTemporaryRegs', '=', 'includeTemporaryRegs', ')', '>=', '(', 'self', '.', 'capacityForRole', '(', 'role', ')', 'or', '0', ')'] | Accepts a DanceRole object and responds if the number of registrations for that
role exceeds the capacity for that role at this event. | ['Accepts', 'a', 'DanceRole', 'object', 'and', 'responds', 'if', 'the', 'number', 'of', 'registrations', 'for', 'that', 'role', 'exceeds', 'the', 'capacity', 'for', 'that', 'role', 'at', 'this', 'event', '.'] | train | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L1024-L1030 |
7,999 | openego/ding0 | ding0/core/network/grids.py | MVGridDing0.add_ring | def add_ring(self, ring):
"""Adds a ring to _rings if not already existing"""
if ring not in self._rings and isinstance(ring, RingDing0):
self._rings.append(ring) | python | def add_ring(self, ring):
"""Adds a ring to _rings if not already existing"""
if ring not in self._rings and isinstance(ring, RingDing0):
self._rings.append(ring) | ['def', 'add_ring', '(', 'self', ',', 'ring', ')', ':', 'if', 'ring', 'not', 'in', 'self', '.', '_rings', 'and', 'isinstance', '(', 'ring', ',', 'RingDing0', ')', ':', 'self', '.', '_rings', '.', 'append', '(', 'ring', ')'] | Adds a ring to _rings if not already existing | ['Adds', 'a', 'ring', 'to', '_rings', 'if', 'not', 'already', 'existing'] | train | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/grids.py#L167-L170 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.