Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,400 | cloudendpoints/endpoints-management-python | endpoints_management/control/report_request.py | Info._as_log_entry | def _as_log_entry(self, name, now):
"""Makes a `LogEntry` from this instance for the given log_name.
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
now (:class:`datetime.DateTime`): the current time
Return:
a ``LogEntry`` generated from this instance with the given name
and timestamp
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesReportRequest``
"""
# initialize the struct with fields that are always present
d = {
u'http_response_code': self.response_code,
u'timestamp': time.mktime(now.timetuple())
}
# compute the severity
severity = _SEVERITY.INFO
if self.response_code >= 400:
severity = _SEVERITY.ERROR
d[u'error_cause'] = self.error_cause.name
# add 'optional' fields to the struct
if self.request_size > 0:
d[u'request_size'] = self.request_size
if self.response_size > 0:
d[u'response_size'] = self.response_size
if self.method:
d[u'http_method'] = self.method
if self.request_time:
d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000
# add 'copyable' fields to the struct
for key in self.COPYABLE_LOG_FIELDS:
value = getattr(self, key, None)
if value:
d[key] = value
return sc_messages.LogEntry(
name=name,
timestamp=timestamp.to_rfc3339(now),
severity=severity,
structPayload=_struct_payload_from(d)) | python | def _as_log_entry(self, name, now):
"""Makes a `LogEntry` from this instance for the given log_name.
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
now (:class:`datetime.DateTime`): the current time
Return:
a ``LogEntry`` generated from this instance with the given name
and timestamp
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesReportRequest``
"""
# initialize the struct with fields that are always present
d = {
u'http_response_code': self.response_code,
u'timestamp': time.mktime(now.timetuple())
}
# compute the severity
severity = _SEVERITY.INFO
if self.response_code >= 400:
severity = _SEVERITY.ERROR
d[u'error_cause'] = self.error_cause.name
# add 'optional' fields to the struct
if self.request_size > 0:
d[u'request_size'] = self.request_size
if self.response_size > 0:
d[u'response_size'] = self.response_size
if self.method:
d[u'http_method'] = self.method
if self.request_time:
d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000
# add 'copyable' fields to the struct
for key in self.COPYABLE_LOG_FIELDS:
value = getattr(self, key, None)
if value:
d[key] = value
return sc_messages.LogEntry(
name=name,
timestamp=timestamp.to_rfc3339(now),
severity=severity,
structPayload=_struct_payload_from(d)) | ['def', '_as_log_entry', '(', 'self', ',', 'name', ',', 'now', ')', ':', '# initialize the struct with fields that are always present', 'd', '=', '{', "u'http_response_code'", ':', 'self', '.', 'response_code', ',', "u'timestamp'", ':', 'time', '.', 'mktime', '(', 'now', '.', 'timetuple', '(', ')', ')', '}', '# compute the severity', 'severity', '=', '_SEVERITY', '.', 'INFO', 'if', 'self', '.', 'response_code', '>=', '400', ':', 'severity', '=', '_SEVERITY', '.', 'ERROR', 'd', '[', "u'error_cause'", ']', '=', 'self', '.', 'error_cause', '.', 'name', "# add 'optional' fields to the struct", 'if', 'self', '.', 'request_size', '>', '0', ':', 'd', '[', "u'request_size'", ']', '=', 'self', '.', 'request_size', 'if', 'self', '.', 'response_size', '>', '0', ':', 'd', '[', "u'response_size'", ']', '=', 'self', '.', 'response_size', 'if', 'self', '.', 'method', ':', 'd', '[', "u'http_method'", ']', '=', 'self', '.', 'method', 'if', 'self', '.', 'request_time', ':', 'd', '[', "u'request_latency_in_ms'", ']', '=', 'self', '.', 'request_time', '.', 'total_seconds', '(', ')', '*', '1000', "# add 'copyable' fields to the struct", 'for', 'key', 'in', 'self', '.', 'COPYABLE_LOG_FIELDS', ':', 'value', '=', 'getattr', '(', 'self', ',', 'key', ',', 'None', ')', 'if', 'value', ':', 'd', '[', 'key', ']', '=', 'value', 'return', 'sc_messages', '.', 'LogEntry', '(', 'name', '=', 'name', ',', 'timestamp', '=', 'timestamp', '.', 'to_rfc3339', '(', 'now', ')', ',', 'severity', '=', 'severity', ',', 'structPayload', '=', '_struct_payload_from', '(', 'd', ')', ')'] | Makes a `LogEntry` from this instance for the given log_name.
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
now (:class:`datetime.DateTime`): the current time
Return:
a ``LogEntry`` generated from this instance with the given name
and timestamp
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesReportRequest`` | ['Makes', 'a', 'LogEntry', 'from', 'this', 'instance', 'for', 'the', 'given', 'log_name', '.'] | train | https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/report_request.py#L293-L342 |
6,401 | mongodb/mongo-python-driver | pymongo/message.py | _update | def _update(collection_name, upsert, multi, spec, doc, check_keys, opts):
"""Get an OP_UPDATE message."""
flags = 0
if upsert:
flags += 1
if multi:
flags += 2
encode = _dict_to_bson # Make local. Uses extensions.
encoded_update = encode(doc, check_keys, opts)
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(flags),
encode(spec, False, opts),
encoded_update]), len(encoded_update) | python | def _update(collection_name, upsert, multi, spec, doc, check_keys, opts):
"""Get an OP_UPDATE message."""
flags = 0
if upsert:
flags += 1
if multi:
flags += 2
encode = _dict_to_bson # Make local. Uses extensions.
encoded_update = encode(doc, check_keys, opts)
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(flags),
encode(spec, False, opts),
encoded_update]), len(encoded_update) | ['def', '_update', '(', 'collection_name', ',', 'upsert', ',', 'multi', ',', 'spec', ',', 'doc', ',', 'check_keys', ',', 'opts', ')', ':', 'flags', '=', '0', 'if', 'upsert', ':', 'flags', '+=', '1', 'if', 'multi', ':', 'flags', '+=', '2', 'encode', '=', '_dict_to_bson', '# Make local. Uses extensions.', 'encoded_update', '=', 'encode', '(', 'doc', ',', 'check_keys', ',', 'opts', ')', 'return', 'b""', '.', 'join', '(', '[', '_ZERO_32', ',', '_make_c_string', '(', 'collection_name', ')', ',', '_pack_int', '(', 'flags', ')', ',', 'encode', '(', 'spec', ',', 'False', ',', 'opts', ')', ',', 'encoded_update', ']', ')', ',', 'len', '(', 'encoded_update', ')'] | Get an OP_UPDATE message. | ['Get', 'an', 'OP_UPDATE', 'message', '.'] | train | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L568-L582 |
6,402 | allenai/allennlp | allennlp/data/fields/text_field.py | TextField.get_padding_lengths | def get_padding_lengths(self) -> Dict[str, int]:
"""
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays.
"""
# Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens
# for each indexer separately. Then we will combine the results for each indexer into a single
# dictionary, resolving any (unlikely) key conflicts by taking a max.
lengths = []
if self._indexed_tokens is None:
raise ConfigurationError("You must call .index(vocabulary) on a "
"field before determining padding lengths.")
# Each indexer can return a different sequence length, and for indexers that return
# multiple arrays each can have a different length. We'll keep track of them here.
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:
# This is a list of dicts, one for each token in the field.
token_lengths = [indexer.get_padding_lengths(token)
for token in self._indexed_tokens[indexed_tokens_key]]
if not token_lengths:
# This is a padding edge case and occurs when we want to pad a ListField of
# TextFields. In order to pad the list field, we need to be able to have an
# _empty_ TextField, but if this is the case, token_lengths will be an empty
# list, so we add the default empty padding dictionary to the list instead.
token_lengths = [{}]
# Iterate over the keys and find the maximum token length.
# It's fine to iterate over the keys of the first token since all tokens have the same keys.
for key in token_lengths[0]:
indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)
lengths.append(indexer_lengths)
padding_lengths = {}
num_tokens = set()
for indexer_name, token_list in self._indexed_tokens.items():
padding_lengths[f"{indexer_name}_length"] = len(token_list)
num_tokens.add(len(token_list))
# We don't actually use this for padding anywhere, but we used to. We add this key back in
# so that older configs still work if they sorted by this key in a BucketIterator. Taking
# the max of all of these should be fine for that purpose.
padding_lengths['num_tokens'] = max(num_tokens)
# Get all keys which have been used for padding for each indexer and take the max if there are duplicates.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)
return padding_lengths | python | def get_padding_lengths(self) -> Dict[str, int]:
"""
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays.
"""
# Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens
# for each indexer separately. Then we will combine the results for each indexer into a single
# dictionary, resolving any (unlikely) key conflicts by taking a max.
lengths = []
if self._indexed_tokens is None:
raise ConfigurationError("You must call .index(vocabulary) on a "
"field before determining padding lengths.")
# Each indexer can return a different sequence length, and for indexers that return
# multiple arrays each can have a different length. We'll keep track of them here.
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:
# This is a list of dicts, one for each token in the field.
token_lengths = [indexer.get_padding_lengths(token)
for token in self._indexed_tokens[indexed_tokens_key]]
if not token_lengths:
# This is a padding edge case and occurs when we want to pad a ListField of
# TextFields. In order to pad the list field, we need to be able to have an
# _empty_ TextField, but if this is the case, token_lengths will be an empty
# list, so we add the default empty padding dictionary to the list instead.
token_lengths = [{}]
# Iterate over the keys and find the maximum token length.
# It's fine to iterate over the keys of the first token since all tokens have the same keys.
for key in token_lengths[0]:
indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)
lengths.append(indexer_lengths)
padding_lengths = {}
num_tokens = set()
for indexer_name, token_list in self._indexed_tokens.items():
padding_lengths[f"{indexer_name}_length"] = len(token_list)
num_tokens.add(len(token_list))
# We don't actually use this for padding anywhere, but we used to. We add this key back in
# so that older configs still work if they sorted by this key in a BucketIterator. Taking
# the max of all of these should be fine for that purpose.
padding_lengths['num_tokens'] = max(num_tokens)
# Get all keys which have been used for padding for each indexer and take the max if there are duplicates.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)
return padding_lengths | ['def', 'get_padding_lengths', '(', 'self', ')', '->', 'Dict', '[', 'str', ',', 'int', ']', ':', '# Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens', '# for each indexer separately. Then we will combine the results for each indexer into a single', '# dictionary, resolving any (unlikely) key conflicts by taking a max.', 'lengths', '=', '[', ']', 'if', 'self', '.', '_indexed_tokens', 'is', 'None', ':', 'raise', 'ConfigurationError', '(', '"You must call .index(vocabulary) on a "', '"field before determining padding lengths."', ')', '# Each indexer can return a different sequence length, and for indexers that return', "# multiple arrays each can have a different length. We'll keep track of them here.", 'for', 'indexer_name', ',', 'indexer', 'in', 'self', '.', '_token_indexers', '.', 'items', '(', ')', ':', 'indexer_lengths', '=', '{', '}', 'for', 'indexed_tokens_key', 'in', 'self', '.', '_indexer_name_to_indexed_token', '[', 'indexer_name', ']', ':', '# This is a list of dicts, one for each token in the field.', 'token_lengths', '=', '[', 'indexer', '.', 'get_padding_lengths', '(', 'token', ')', 'for', 'token', 'in', 'self', '.', '_indexed_tokens', '[', 'indexed_tokens_key', ']', ']', 'if', 'not', 'token_lengths', ':', '# This is a padding edge case and occurs when we want to pad a ListField of', '# TextFields. In order to pad the list field, we need to be able to have an', '# _empty_ TextField, but if this is the case, token_lengths will be an empty', '# list, so we add the default empty padding dictionary to the list instead.', 'token_lengths', '=', '[', '{', '}', ']', '# Iterate over the keys and find the maximum token length.', "# It's fine to iterate over the keys of the first token since all tokens have the same keys.", 'for', 'key', 'in', 'token_lengths', '[', '0', ']', ':', 'indexer_lengths', '[', 'key', ']', '=', 'max', '(', 'x', '[', 'key', ']', 'if', 'key', 'in', 'x', 'else', '0', 'for', 'x', 'in', 'token_lengths', ')', 'lengths', '.', 'append', '(', 'indexer_lengths', ')', 'padding_lengths', '=', '{', '}', 'num_tokens', '=', 'set', '(', ')', 'for', 'indexer_name', ',', 'token_list', 'in', 'self', '.', '_indexed_tokens', '.', 'items', '(', ')', ':', 'padding_lengths', '[', 'f"{indexer_name}_length"', ']', '=', 'len', '(', 'token_list', ')', 'num_tokens', '.', 'add', '(', 'len', '(', 'token_list', ')', ')', "# We don't actually use this for padding anywhere, but we used to. We add this key back in", '# so that older configs still work if they sorted by this key in a BucketIterator. Taking', '# the max of all of these should be fine for that purpose.', 'padding_lengths', '[', "'num_tokens'", ']', '=', 'max', '(', 'num_tokens', ')', '# Get all keys which have been used for padding for each indexer and take the max if there are duplicates.', 'padding_keys', '=', '{', 'key', 'for', 'd', 'in', 'lengths', 'for', 'key', 'in', 'd', '.', 'keys', '(', ')', '}', 'for', 'padding_key', 'in', 'padding_keys', ':', 'padding_lengths', '[', 'padding_key', ']', '=', 'max', '(', 'x', '[', 'padding_key', ']', 'if', 'padding_key', 'in', 'x', 'else', '0', 'for', 'x', 'in', 'lengths', ')', 'return', 'padding_lengths'] | The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays. | ['The', 'TextField', 'has', 'a', 'list', 'of', 'Tokens', 'and', 'each', 'Token', 'gets', 'converted', 'into', 'arrays', 'by', '(', 'potentially', ')', 'several', 'TokenIndexers', '.', 'This', 'method', 'gets', 'the', 'max', 'length', '(', 'over', 'tokens', ')', 'associated', 'with', 'each', 'of', 'these', 'arrays', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/fields/text_field.py#L75-L125 |
6,403 | ray-project/ray | python/ray/rllib/utils/filter.py | MeanStdFilter.apply_changes | def apply_changes(self, other, with_buffer=False):
"""Applies updates from the buffer of another filter.
Params:
other (MeanStdFilter): Other filter to apply info from
with_buffer (bool): Flag for specifying if the buffer should be
copied from other.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, 1.5, 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> a.apply_changes(b, with_buffer=False)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[3, 4.333333333333333, 2]
>>> a.apply_changes(b, with_buffer=True)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[4, 5.75, 1]
"""
self.rs.update(other.buffer)
if with_buffer:
self.buffer = other.buffer.copy() | python | def apply_changes(self, other, with_buffer=False):
"""Applies updates from the buffer of another filter.
Params:
other (MeanStdFilter): Other filter to apply info from
with_buffer (bool): Flag for specifying if the buffer should be
copied from other.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, 1.5, 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> a.apply_changes(b, with_buffer=False)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[3, 4.333333333333333, 2]
>>> a.apply_changes(b, with_buffer=True)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[4, 5.75, 1]
"""
self.rs.update(other.buffer)
if with_buffer:
self.buffer = other.buffer.copy() | ['def', 'apply_changes', '(', 'self', ',', 'other', ',', 'with_buffer', '=', 'False', ')', ':', 'self', '.', 'rs', '.', 'update', '(', 'other', '.', 'buffer', ')', 'if', 'with_buffer', ':', 'self', '.', 'buffer', '=', 'other', '.', 'buffer', '.', 'copy', '(', ')'] | Applies updates from the buffer of another filter.
Params:
other (MeanStdFilter): Other filter to apply info from
with_buffer (bool): Flag for specifying if the buffer should be
copied from other.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, 1.5, 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> a.apply_changes(b, with_buffer=False)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[3, 4.333333333333333, 2]
>>> a.apply_changes(b, with_buffer=True)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[4, 5.75, 1] | ['Applies', 'updates', 'from', 'the', 'buffer', 'of', 'another', 'filter', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/utils/filter.py#L156-L181 |
6,404 | awacha/sastool | sastool/fitting/fitfunctions/sasbasic.py | GuinierPorod | def GuinierPorod(q, G, Rg, alpha):
"""Empirical Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``G``: factor of the Guinier-branch
``Rg``: radius of gyration
``alpha``: power-law exponent
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``a`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
return GuinierPorodMulti(q, G, Rg, alpha) | python | def GuinierPorod(q, G, Rg, alpha):
"""Empirical Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``G``: factor of the Guinier-branch
``Rg``: radius of gyration
``alpha``: power-law exponent
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``a`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
return GuinierPorodMulti(q, G, Rg, alpha) | ['def', 'GuinierPorod', '(', 'q', ',', 'G', ',', 'Rg', ',', 'alpha', ')', ':', 'return', 'GuinierPorodMulti', '(', 'q', ',', 'G', ',', 'Rg', ',', 'alpha', ')'] | Empirical Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``G``: factor of the Guinier-branch
``Rg``: radius of gyration
``alpha``: power-law exponent
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``a`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719. | ['Empirical', 'Guinier', '-', 'Porod', 'scattering'] | train | https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L86-L107 |
6,405 | awslabs/sockeye | sockeye/loss.py | LengthRatioMSEMetric.update_dict | def update_dict(self, label: Dict, pred: Dict):
"""
If label is missing the right name, copy it from the prediction.
"""
if not set(self.label_names).issubset(set(label.keys())):
label.update({name:pred[name] for name in self.label_names})
super().update_dict(label, pred) | python | def update_dict(self, label: Dict, pred: Dict):
"""
If label is missing the right name, copy it from the prediction.
"""
if not set(self.label_names).issubset(set(label.keys())):
label.update({name:pred[name] for name in self.label_names})
super().update_dict(label, pred) | ['def', 'update_dict', '(', 'self', ',', 'label', ':', 'Dict', ',', 'pred', ':', 'Dict', ')', ':', 'if', 'not', 'set', '(', 'self', '.', 'label_names', ')', '.', 'issubset', '(', 'set', '(', 'label', '.', 'keys', '(', ')', ')', ')', ':', 'label', '.', 'update', '(', '{', 'name', ':', 'pred', '[', 'name', ']', 'for', 'name', 'in', 'self', '.', 'label_names', '}', ')', 'super', '(', ')', '.', 'update_dict', '(', 'label', ',', 'pred', ')'] | If label is missing the right name, copy it from the prediction. | ['If', 'label', 'is', 'missing', 'the', 'right', 'name', 'copy', 'it', 'from', 'the', 'prediction', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/loss.py#L367-L373 |
6,406 | acutesoftware/AIKIF | aikif/web_app/page_search.py | get_page | def get_page(search_text):
"""
formats the entire search result in a table output
"""
lst = search_aikif(search_text)
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
for result in lst:
txt += '<TR><TD>' + result + '</TD></TR>'
txt += '</TABLE>\n\n'
return txt | python | def get_page(search_text):
"""
formats the entire search result in a table output
"""
lst = search_aikif(search_text)
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
for result in lst:
txt += '<TR><TD>' + result + '</TD></TR>'
txt += '</TABLE>\n\n'
return txt | ['def', 'get_page', '(', 'search_text', ')', ':', 'lst', '=', 'search_aikif', '(', 'search_text', ')', 'txt', '=', '\'<table class="as-table as-table-zebra as-table-horizontal">\'', 'for', 'result', 'in', 'lst', ':', 'txt', '+=', "'<TR><TD>'", '+', 'result', '+', "'</TD></TR>'", 'txt', '+=', "'</TABLE>\\n\\n'", 'return', 'txt'] | formats the entire search result in a table output | ['formats', 'the', 'entire', 'search', 'result', 'in', 'a', 'table', 'output'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_search.py#L12-L21 |
6,407 | common-workflow-language/cwltool | cwltool/job.py | ContainerCommandLineJob.docker_monitor | def docker_monitor(self, cidfile, tmpdir_prefix, cleanup_cidfile, process):
# type: (Text, Text, bool, subprocess.Popen) -> None
"""Record memory usage of the running Docker container."""
# Todo: consider switching to `docker create` / `docker start`
# instead of `docker run` as `docker create` outputs the container ID
# to stdout, but the container is frozen, thus allowing us to start the
# monitoring process without dealing with the cidfile or too-fast
# container execution
cid = None
while cid is None:
time.sleep(1)
if process.returncode is not None:
if cleanup_cidfile:
os.remove(cidfile)
return
try:
with open(cidfile) as cidhandle:
cid = cidhandle.readline().strip()
except (OSError, IOError):
cid = None
max_mem = self.docker_get_memory(cid)
tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix)
stats_file = tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir)
with open(stats_file.name, mode="w") as stats_file_handle:
stats_proc = subprocess.Popen(
['docker', 'stats', '--no-trunc', '--format', '{{.MemPerc}}',
cid], stdout=stats_file_handle, stderr=subprocess.DEVNULL)
process.wait()
stats_proc.kill()
max_mem_percent = 0
with open(stats_file.name, mode="r") as stats:
for line in stats:
try:
mem_percent = float(re.sub(
CONTROL_CODE_RE, '', line).replace('%', ''))
if mem_percent > max_mem_percent:
max_mem_percent = mem_percent
except ValueError:
break
_logger.info(u"[job %s] Max memory used: %iMiB", self.name,
int((max_mem_percent * max_mem) / (2 ** 20)))
if cleanup_cidfile:
os.remove(cidfile) | python | def docker_monitor(self, cidfile, tmpdir_prefix, cleanup_cidfile, process):
# type: (Text, Text, bool, subprocess.Popen) -> None
"""Record memory usage of the running Docker container."""
# Todo: consider switching to `docker create` / `docker start`
# instead of `docker run` as `docker create` outputs the container ID
# to stdout, but the container is frozen, thus allowing us to start the
# monitoring process without dealing with the cidfile or too-fast
# container execution
cid = None
while cid is None:
time.sleep(1)
if process.returncode is not None:
if cleanup_cidfile:
os.remove(cidfile)
return
try:
with open(cidfile) as cidhandle:
cid = cidhandle.readline().strip()
except (OSError, IOError):
cid = None
max_mem = self.docker_get_memory(cid)
tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix)
stats_file = tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir)
with open(stats_file.name, mode="w") as stats_file_handle:
stats_proc = subprocess.Popen(
['docker', 'stats', '--no-trunc', '--format', '{{.MemPerc}}',
cid], stdout=stats_file_handle, stderr=subprocess.DEVNULL)
process.wait()
stats_proc.kill()
max_mem_percent = 0
with open(stats_file.name, mode="r") as stats:
for line in stats:
try:
mem_percent = float(re.sub(
CONTROL_CODE_RE, '', line).replace('%', ''))
if mem_percent > max_mem_percent:
max_mem_percent = mem_percent
except ValueError:
break
_logger.info(u"[job %s] Max memory used: %iMiB", self.name,
int((max_mem_percent * max_mem) / (2 ** 20)))
if cleanup_cidfile:
os.remove(cidfile) | ['def', 'docker_monitor', '(', 'self', ',', 'cidfile', ',', 'tmpdir_prefix', ',', 'cleanup_cidfile', ',', 'process', ')', ':', '# type: (Text, Text, bool, subprocess.Popen) -> None', '# Todo: consider switching to `docker create` / `docker start`', '# instead of `docker run` as `docker create` outputs the container ID', '# to stdout, but the container is frozen, thus allowing us to start the', '# monitoring process without dealing with the cidfile or too-fast', '# container execution', 'cid', '=', 'None', 'while', 'cid', 'is', 'None', ':', 'time', '.', 'sleep', '(', '1', ')', 'if', 'process', '.', 'returncode', 'is', 'not', 'None', ':', 'if', 'cleanup_cidfile', ':', 'os', '.', 'remove', '(', 'cidfile', ')', 'return', 'try', ':', 'with', 'open', '(', 'cidfile', ')', 'as', 'cidhandle', ':', 'cid', '=', 'cidhandle', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'except', '(', 'OSError', ',', 'IOError', ')', ':', 'cid', '=', 'None', 'max_mem', '=', 'self', '.', 'docker_get_memory', '(', 'cid', ')', 'tmp_dir', ',', 'tmp_prefix', '=', 'os', '.', 'path', '.', 'split', '(', 'tmpdir_prefix', ')', 'stats_file', '=', 'tempfile', '.', 'NamedTemporaryFile', '(', 'prefix', '=', 'tmp_prefix', ',', 'dir', '=', 'tmp_dir', ')', 'with', 'open', '(', 'stats_file', '.', 'name', ',', 'mode', '=', '"w"', ')', 'as', 'stats_file_handle', ':', 'stats_proc', '=', 'subprocess', '.', 'Popen', '(', '[', "'docker'", ',', "'stats'", ',', "'--no-trunc'", ',', "'--format'", ',', "'{{.MemPerc}}'", ',', 'cid', ']', ',', 'stdout', '=', 'stats_file_handle', ',', 'stderr', '=', 'subprocess', '.', 'DEVNULL', ')', 'process', '.', 'wait', '(', ')', 'stats_proc', '.', 'kill', '(', ')', 'max_mem_percent', '=', '0', 'with', 'open', '(', 'stats_file', '.', 'name', ',', 'mode', '=', '"r"', ')', 'as', 'stats', ':', 'for', 'line', 'in', 'stats', ':', 'try', ':', 'mem_percent', '=', 'float', '(', 're', '.', 'sub', '(', 'CONTROL_CODE_RE', ',', "''", ',', 'line', ')', '.', 'replace', '(', "'%'", ',', "''", ')', ')', 'if', 'mem_percent', '>', 'max_mem_percent', ':', 'max_mem_percent', '=', 'mem_percent', 'except', 'ValueError', ':', 'break', '_logger', '.', 'info', '(', 'u"[job %s] Max memory used: %iMiB"', ',', 'self', '.', 'name', ',', 'int', '(', '(', 'max_mem_percent', '*', 'max_mem', ')', '/', '(', '2', '**', '20', ')', ')', ')', 'if', 'cleanup_cidfile', ':', 'os', '.', 'remove', '(', 'cidfile', ')'] | Record memory usage of the running Docker container. | ['Record', 'memory', 'usage', 'of', 'the', 'running', 'Docker', 'container', '.'] | train | https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/job.py#L681-L723 |
6,408 | alixnovosi/drewtilities | drewtilities/drewtilities.py | generate_downloader | def generate_downloader(headers: Dict[str, str], args: Any, max_per_hour: int=30
) -> Callable[..., None]:
"""Create function to download with rate limiting and text progress."""
def _downloader(url: str, dest: str) -> None:
@rate_limited(max_per_hour, args)
def _rate_limited_download() -> None:
# Create parent directory of file, and its parents, if they don't exist.
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
response = requests.get(url, headers=headers, stream=True)
LOG.info(f"Downloading from '{url}'.")
LOG.info(f"Trying to save to '{dest}'.")
length = response.headers.get("content-length")
if length is None:
total_length = 0
else:
total_length = int(length)
expected_size = (total_length / CHUNK_SIZE) + 1
chunks = response.iter_content(chunk_size=CHUNK_SIZE)
open(dest, "a", encoding=FORCED_ENCODING).close()
# per http://stackoverflow.com/a/20943461
with open(dest, "wb") as stream:
for chunk in tui.progress.bar(chunks, expected_size=expected_size):
if not chunk:
return
stream.write(chunk)
stream.flush()
_rate_limited_download()
return _downloader | python | def generate_downloader(headers: Dict[str, str], args: Any, max_per_hour: int=30
) -> Callable[..., None]:
"""Create function to download with rate limiting and text progress."""
def _downloader(url: str, dest: str) -> None:
@rate_limited(max_per_hour, args)
def _rate_limited_download() -> None:
# Create parent directory of file, and its parents, if they don't exist.
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
response = requests.get(url, headers=headers, stream=True)
LOG.info(f"Downloading from '{url}'.")
LOG.info(f"Trying to save to '{dest}'.")
length = response.headers.get("content-length")
if length is None:
total_length = 0
else:
total_length = int(length)
expected_size = (total_length / CHUNK_SIZE) + 1
chunks = response.iter_content(chunk_size=CHUNK_SIZE)
open(dest, "a", encoding=FORCED_ENCODING).close()
# per http://stackoverflow.com/a/20943461
with open(dest, "wb") as stream:
for chunk in tui.progress.bar(chunks, expected_size=expected_size):
if not chunk:
return
stream.write(chunk)
stream.flush()
_rate_limited_download()
return _downloader | ['def', 'generate_downloader', '(', 'headers', ':', 'Dict', '[', 'str', ',', 'str', ']', ',', 'args', ':', 'Any', ',', 'max_per_hour', ':', 'int', '=', '30', ')', '->', 'Callable', '[', '...', ',', 'None', ']', ':', 'def', '_downloader', '(', 'url', ':', 'str', ',', 'dest', ':', 'str', ')', '->', 'None', ':', '@', 'rate_limited', '(', 'max_per_hour', ',', 'args', ')', 'def', '_rate_limited_download', '(', ')', '->', 'None', ':', "# Create parent directory of file, and its parents, if they don't exist.", 'parent', '=', 'os', '.', 'path', '.', 'dirname', '(', 'dest', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'parent', ')', ':', 'os', '.', 'makedirs', '(', 'parent', ')', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'headers', '=', 'headers', ',', 'stream', '=', 'True', ')', 'LOG', '.', 'info', '(', 'f"Downloading from \'{url}\'."', ')', 'LOG', '.', 'info', '(', 'f"Trying to save to \'{dest}\'."', ')', 'length', '=', 'response', '.', 'headers', '.', 'get', '(', '"content-length"', ')', 'if', 'length', 'is', 'None', ':', 'total_length', '=', '0', 'else', ':', 'total_length', '=', 'int', '(', 'length', ')', 'expected_size', '=', '(', 'total_length', '/', 'CHUNK_SIZE', ')', '+', '1', 'chunks', '=', 'response', '.', 'iter_content', '(', 'chunk_size', '=', 'CHUNK_SIZE', ')', 'open', '(', 'dest', ',', '"a"', ',', 'encoding', '=', 'FORCED_ENCODING', ')', '.', 'close', '(', ')', '# per http://stackoverflow.com/a/20943461', 'with', 'open', '(', 'dest', ',', '"wb"', ')', 'as', 'stream', ':', 'for', 'chunk', 'in', 'tui', '.', 'progress', '.', 'bar', '(', 'chunks', ',', 'expected_size', '=', 'expected_size', ')', ':', 'if', 'not', 'chunk', ':', 'return', 'stream', '.', 'write', '(', 'chunk', ')', 'stream', '.', 'flush', '(', ')', '_rate_limited_download', '(', ')', 'return', '_downloader'] | Create function to download with rate limiting and text progress. | ['Create', 'function', 'to', 'download', 'with', 'rate', 'limiting', 'and', 'text', 'progress', '.'] | train | https://github.com/alixnovosi/drewtilities/blob/4e9b7f65f11195dc48347bf9c6ca4e56baca8b45/drewtilities/drewtilities.py#L33-L71 |
6,409 | umich-brcf-bioinf/Connor | connor/consam/readers.py | paired_reader_from_bamfile | def paired_reader_from_bamfile(args,
log,
usage_logger,
annotated_writer):
'''Given a BAM file, return a generator that yields filtered, paired reads'''
total_aligns = pysamwrapper.total_align_count(args.input_bam)
bamfile_generator = _bamfile_generator(args.input_bam)
return _paired_reader(args.umt_length,
bamfile_generator,
total_aligns,
log,
usage_logger,
annotated_writer) | python | def paired_reader_from_bamfile(args,
log,
usage_logger,
annotated_writer):
'''Given a BAM file, return a generator that yields filtered, paired reads'''
total_aligns = pysamwrapper.total_align_count(args.input_bam)
bamfile_generator = _bamfile_generator(args.input_bam)
return _paired_reader(args.umt_length,
bamfile_generator,
total_aligns,
log,
usage_logger,
annotated_writer) | ['def', 'paired_reader_from_bamfile', '(', 'args', ',', 'log', ',', 'usage_logger', ',', 'annotated_writer', ')', ':', 'total_aligns', '=', 'pysamwrapper', '.', 'total_align_count', '(', 'args', '.', 'input_bam', ')', 'bamfile_generator', '=', '_bamfile_generator', '(', 'args', '.', 'input_bam', ')', 'return', '_paired_reader', '(', 'args', '.', 'umt_length', ',', 'bamfile_generator', ',', 'total_aligns', ',', 'log', ',', 'usage_logger', ',', 'annotated_writer', ')'] | Given a BAM file, return a generator that yields filtered, paired reads | ['Given', 'a', 'BAM', 'file', 'return', 'a', 'generator', 'that', 'yields', 'filtered', 'paired', 'reads'] | train | https://github.com/umich-brcf-bioinf/Connor/blob/b20e9f36e9730c29eaa27ea5fa8b0151e58d2f13/connor/consam/readers.py#L112-L124 |
6,410 | Julius2342/pyvlx | old_api/pyvlx/scenes.py | Scenes.data_import | def data_import(self, json_response):
"""Import scenes from JSON response."""
if 'data' not in json_response:
raise PyVLXException('no element data found: {0}'.format(
json.dumps(json_response)))
data = json_response['data']
for item in data:
self.load_scene(item) | python | def data_import(self, json_response):
"""Import scenes from JSON response."""
if 'data' not in json_response:
raise PyVLXException('no element data found: {0}'.format(
json.dumps(json_response)))
data = json_response['data']
for item in data:
self.load_scene(item) | ['def', 'data_import', '(', 'self', ',', 'json_response', ')', ':', 'if', "'data'", 'not', 'in', 'json_response', ':', 'raise', 'PyVLXException', '(', "'no element data found: {0}'", '.', 'format', '(', 'json', '.', 'dumps', '(', 'json_response', ')', ')', ')', 'data', '=', 'json_response', '[', "'data'", ']', 'for', 'item', 'in', 'data', ':', 'self', '.', 'load_scene', '(', 'item', ')'] | Import scenes from JSON response. | ['Import', 'scenes', 'from', 'JSON', 'response', '.'] | train | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/scenes.py#L44-L51 |
6,411 | tanghaibao/goatools | goatools/gosubdag/rpt/wr_xlsx.py | GoDepth1LettersWr.wr_tex | def wr_tex(self, fout_tex="gos_depth01.tex"):
"""write text table of depth-01 GO terms and their letter representation."""
data_nts = self.get_d1nts()
joinchr = " & "
#pylint: disable=anomalous-backslash-in-string
eol = " \\\\\n"
with open(fout_tex, 'w') as prt:
prt.write("\\begin{table}[!ht]\n")
prt.write("\\begin{tabular}{|p{.5cm} | p{.5cm} | >{\\raggedleft}p{.9cm} ")
prt.write("|p{.7cm} |p{1.8cm} |p{9cm}|}\n")
prt.write("\multicolumn{6}{c}{} \\\\\n")
prt.write("\hline\n")
prt.write("\\rowcolor{gray!10}\n")
prt.write("{HDRS}{EOL}".format(
HDRS=joinchr.join(next(iter(data_nts))._fields), EOL=eol))
prt.write("\hline\n")
for idx, line in enumerate(get_lines(data_nts, joinchr=joinchr, eol=eol)):
if idx%2 == 1:
prt.write("\\rowcolor{gray!7}\n")
line.replace('_', '\\_')
prt.write(line)
prt.write("\hline\n")
prt.write("\end{tabular}\n")
caption = ("The descendant counts of GO terms at depth-01 are highly skewed. The "
"root term, \textit{biological\_process} has over twenty GO children at "
"depth-01 shown in the table sorted by their number of descendants "
"(dcnt) with \textit{cellular process} at the top having 18k+ "
"descendants and \textit{cell killing} near the bottom having only "
"about 100 descendants. The first column (D1) contains a letter used as "
"an alias for each depth-01 GO term. The second column represents the "
"number of descendants from the specified GO term from down to the total "
"of its descendant leaf-level GO terms, which have no child GO terms.")
prt.write("\caption{{{TEXT}}}\n\n".format(TEXT=caption))
prt.write("\label{table:supptbl_d1}\n")
prt.write("\end{table}\n")
sys.stdout.write(" {N:>5} items WROTE: {TXT}\n".format(
N=len(data_nts), TXT=fout_tex)) | python | def wr_tex(self, fout_tex="gos_depth01.tex"):
"""write text table of depth-01 GO terms and their letter representation."""
data_nts = self.get_d1nts()
joinchr = " & "
#pylint: disable=anomalous-backslash-in-string
eol = " \\\\\n"
with open(fout_tex, 'w') as prt:
prt.write("\\begin{table}[!ht]\n")
prt.write("\\begin{tabular}{|p{.5cm} | p{.5cm} | >{\\raggedleft}p{.9cm} ")
prt.write("|p{.7cm} |p{1.8cm} |p{9cm}|}\n")
prt.write("\multicolumn{6}{c}{} \\\\\n")
prt.write("\hline\n")
prt.write("\\rowcolor{gray!10}\n")
prt.write("{HDRS}{EOL}".format(
HDRS=joinchr.join(next(iter(data_nts))._fields), EOL=eol))
prt.write("\hline\n")
for idx, line in enumerate(get_lines(data_nts, joinchr=joinchr, eol=eol)):
if idx%2 == 1:
prt.write("\\rowcolor{gray!7}\n")
line.replace('_', '\\_')
prt.write(line)
prt.write("\hline\n")
prt.write("\end{tabular}\n")
caption = ("The descendant counts of GO terms at depth-01 are highly skewed. The "
"root term, \textit{biological\_process} has over twenty GO children at "
"depth-01 shown in the table sorted by their number of descendants "
"(dcnt) with \textit{cellular process} at the top having 18k+ "
"descendants and \textit{cell killing} near the bottom having only "
"about 100 descendants. The first column (D1) contains a letter used as "
"an alias for each depth-01 GO term. The second column represents the "
"number of descendants from the specified GO term from down to the total "
"of its descendant leaf-level GO terms, which have no child GO terms.")
prt.write("\caption{{{TEXT}}}\n\n".format(TEXT=caption))
prt.write("\label{table:supptbl_d1}\n")
prt.write("\end{table}\n")
sys.stdout.write(" {N:>5} items WROTE: {TXT}\n".format(
N=len(data_nts), TXT=fout_tex)) | ['def', 'wr_tex', '(', 'self', ',', 'fout_tex', '=', '"gos_depth01.tex"', ')', ':', 'data_nts', '=', 'self', '.', 'get_d1nts', '(', ')', 'joinchr', '=', '" & "', '#pylint: disable=anomalous-backslash-in-string', 'eol', '=', '" \\\\\\\\\\n"', 'with', 'open', '(', 'fout_tex', ',', "'w'", ')', 'as', 'prt', ':', 'prt', '.', 'write', '(', '"\\\\begin{table}[!ht]\\n"', ')', 'prt', '.', 'write', '(', '"\\\\begin{tabular}{|p{.5cm} | p{.5cm} | >{\\\\raggedleft}p{.9cm} "', ')', 'prt', '.', 'write', '(', '"|p{.7cm} |p{1.8cm} |p{9cm}|}\\n"', ')', 'prt', '.', 'write', '(', '"\\multicolumn{6}{c}{} \\\\\\\\\\n"', ')', 'prt', '.', 'write', '(', '"\\hline\\n"', ')', 'prt', '.', 'write', '(', '"\\\\rowcolor{gray!10}\\n"', ')', 'prt', '.', 'write', '(', '"{HDRS}{EOL}"', '.', 'format', '(', 'HDRS', '=', 'joinchr', '.', 'join', '(', 'next', '(', 'iter', '(', 'data_nts', ')', ')', '.', '_fields', ')', ',', 'EOL', '=', 'eol', ')', ')', 'prt', '.', 'write', '(', '"\\hline\\n"', ')', 'for', 'idx', ',', 'line', 'in', 'enumerate', '(', 'get_lines', '(', 'data_nts', ',', 'joinchr', '=', 'joinchr', ',', 'eol', '=', 'eol', ')', ')', ':', 'if', 'idx', '%', '2', '==', '1', ':', 'prt', '.', 'write', '(', '"\\\\rowcolor{gray!7}\\n"', ')', 'line', '.', 'replace', '(', "'_'", ',', "'\\\\_'", ')', 'prt', '.', 'write', '(', 'line', ')', 'prt', '.', 'write', '(', '"\\hline\\n"', ')', 'prt', '.', 'write', '(', '"\\end{tabular}\\n"', ')', 'caption', '=', '(', '"The descendant counts of GO terms at depth-01 are highly skewed. The "', '"root term, \\textit{biological\\_process} has over twenty GO children at "', '"depth-01 shown in the table sorted by their number of descendants "', '"(dcnt) with \\textit{cellular process} at the top having 18k+ "', '"descendants and \\textit{cell killing} near the bottom having only "', '"about 100 descendants. The first column (D1) contains a letter used as "', '"an alias for each depth-01 GO term. The second column represents the "', '"number of descendants from the specified GO term from down to the total "', '"of its descendant leaf-level GO terms, which have no child GO terms."', ')', 'prt', '.', 'write', '(', '"\\caption{{{TEXT}}}\\n\\n"', '.', 'format', '(', 'TEXT', '=', 'caption', ')', ')', 'prt', '.', 'write', '(', '"\\label{table:supptbl_d1}\\n"', ')', 'prt', '.', 'write', '(', '"\\end{table}\\n"', ')', 'sys', '.', 'stdout', '.', 'write', '(', '" {N:>5} items WROTE: {TXT}\\n"', '.', 'format', '(', 'N', '=', 'len', '(', 'data_nts', ')', ',', 'TXT', '=', 'fout_tex', ')', ')'] | write text table of depth-01 GO terms and their letter representation. | ['write', 'text', 'table', 'of', 'depth', '-', '01', 'GO', 'terms', 'and', 'their', 'letter', 'representation', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L134-L170 |
6,412 | spacetelescope/stsci.tools | lib/stsci/tools/fileutil.py | copyFile | def copyFile(input, output, replace=None):
"""Copy a file whole from input to output."""
_found = findFile(output)
if not _found or (_found and replace):
shutil.copy2(input, output) | python | def copyFile(input, output, replace=None):
"""Copy a file whole from input to output."""
_found = findFile(output)
if not _found or (_found and replace):
shutil.copy2(input, output) | ['def', 'copyFile', '(', 'input', ',', 'output', ',', 'replace', '=', 'None', ')', ':', '_found', '=', 'findFile', '(', 'output', ')', 'if', 'not', '_found', 'or', '(', '_found', 'and', 'replace', ')', ':', 'shutil', '.', 'copy2', '(', 'input', ',', 'output', ')'] | Copy a file whole from input to output. | ['Copy', 'a', 'file', 'whole', 'from', 'input', 'to', 'output', '.'] | train | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L1076-L1081 |
6,413 | fstab50/metal | metal/chkrootkit.py | compile_binary | def compile_binary(source):
"""
Prepare chkrootkit binary
$ tar xzvf chkrootkit.tar.gz
$ cd chkrootkit-0.52
$ make sense
sudo mv chkrootkit-0.52 /usr/local/chkrootkit
sudo ln -s
"""
cmd = 'make sense'
slink = '/usr/local/bin/chkrootkit'
target = '/usr/local/chkrootkit/chkrootkit'
# Tar Extraction
t = tarfile.open(source, 'r')
t.extractall(TMPDIR)
if isinstance(t.getnames(), list):
extract_dir = t.getnames()[0].split('/')[0]
os.chdir(TMPDIR + '/' + extract_dir)
logger.info('make output: \n%s' % subprocess.getoutput(cmd))
# move directory in place
os.rename(TMPDIR + '/' + extract_dir, 'usr/local/chkrootkit')
# create symlink to binary in directory
os.symlink(target, slink)
return True
return False | python | def compile_binary(source):
"""
Prepare chkrootkit binary
$ tar xzvf chkrootkit.tar.gz
$ cd chkrootkit-0.52
$ make sense
sudo mv chkrootkit-0.52 /usr/local/chkrootkit
sudo ln -s
"""
cmd = 'make sense'
slink = '/usr/local/bin/chkrootkit'
target = '/usr/local/chkrootkit/chkrootkit'
# Tar Extraction
t = tarfile.open(source, 'r')
t.extractall(TMPDIR)
if isinstance(t.getnames(), list):
extract_dir = t.getnames()[0].split('/')[0]
os.chdir(TMPDIR + '/' + extract_dir)
logger.info('make output: \n%s' % subprocess.getoutput(cmd))
# move directory in place
os.rename(TMPDIR + '/' + extract_dir, 'usr/local/chkrootkit')
# create symlink to binary in directory
os.symlink(target, slink)
return True
return False | ['def', 'compile_binary', '(', 'source', ')', ':', 'cmd', '=', "'make sense'", 'slink', '=', "'/usr/local/bin/chkrootkit'", 'target', '=', "'/usr/local/chkrootkit/chkrootkit'", '# Tar Extraction', 't', '=', 'tarfile', '.', 'open', '(', 'source', ',', "'r'", ')', 't', '.', 'extractall', '(', 'TMPDIR', ')', 'if', 'isinstance', '(', 't', '.', 'getnames', '(', ')', ',', 'list', ')', ':', 'extract_dir', '=', 't', '.', 'getnames', '(', ')', '[', '0', ']', '.', 'split', '(', "'/'", ')', '[', '0', ']', 'os', '.', 'chdir', '(', 'TMPDIR', '+', "'/'", '+', 'extract_dir', ')', 'logger', '.', 'info', '(', "'make output: \\n%s'", '%', 'subprocess', '.', 'getoutput', '(', 'cmd', ')', ')', '# move directory in place', 'os', '.', 'rename', '(', 'TMPDIR', '+', "'/'", '+', 'extract_dir', ',', "'usr/local/chkrootkit'", ')', '# create symlink to binary in directory', 'os', '.', 'symlink', '(', 'target', ',', 'slink', ')', 'return', 'True', 'return', 'False'] | Prepare chkrootkit binary
$ tar xzvf chkrootkit.tar.gz
$ cd chkrootkit-0.52
$ make sense
sudo mv chkrootkit-0.52 /usr/local/chkrootkit
sudo ln -s | ['Prepare', 'chkrootkit', 'binary', '$', 'tar', 'xzvf', 'chkrootkit', '.', 'tar', '.', 'gz', '$', 'cd', 'chkrootkit', '-', '0', '.', '52', '$', 'make', 'sense', 'sudo', 'mv', 'chkrootkit', '-', '0', '.', '52', '/', 'usr', '/', 'local', '/', 'chkrootkit', 'sudo', 'ln', '-', 's'] | train | https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/chkrootkit.py#L46-L70 |
6,414 | google/grr | grr/server/grr_response_server/databases/mysql_utils.py | ComponentsToPath | def ComponentsToPath(components):
"""Converts a list of path components to a canonical path representation.
Args:
components: A sequence of path components.
Returns:
A canonical MySQL path representation.
"""
precondition.AssertIterableType(components, Text)
for component in components:
if not component:
raise ValueError("Empty path component in: {}".format(components))
if "/" in component:
raise ValueError("Path component with '/' in: {}".format(components))
if components:
return "/" + "/".join(components)
else:
return "" | python | def ComponentsToPath(components):
"""Converts a list of path components to a canonical path representation.
Args:
components: A sequence of path components.
Returns:
A canonical MySQL path representation.
"""
precondition.AssertIterableType(components, Text)
for component in components:
if not component:
raise ValueError("Empty path component in: {}".format(components))
if "/" in component:
raise ValueError("Path component with '/' in: {}".format(components))
if components:
return "/" + "/".join(components)
else:
return "" | ['def', 'ComponentsToPath', '(', 'components', ')', ':', 'precondition', '.', 'AssertIterableType', '(', 'components', ',', 'Text', ')', 'for', 'component', 'in', 'components', ':', 'if', 'not', 'component', ':', 'raise', 'ValueError', '(', '"Empty path component in: {}"', '.', 'format', '(', 'components', ')', ')', 'if', '"/"', 'in', 'component', ':', 'raise', 'ValueError', '(', '"Path component with \'/\' in: {}"', '.', 'format', '(', 'components', ')', ')', 'if', 'components', ':', 'return', '"/"', '+', '"/"', '.', 'join', '(', 'components', ')', 'else', ':', 'return', '""'] | Converts a list of path components to a canonical path representation.
Args:
components: A sequence of path components.
Returns:
A canonical MySQL path representation. | ['Converts', 'a', 'list', 'of', 'path', 'components', 'to', 'a', 'canonical', 'path', 'representation', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_utils.py#L141-L161 |
6,415 | juju/python-libjuju | juju/model.py | _Observer.cares_about | def cares_about(self, delta):
"""Return True if this observer "cares about" (i.e. wants to be
called) for a this delta.
"""
if (self.entity_id and delta.get_id() and
not re.match(self.entity_id, str(delta.get_id()))):
return False
if self.entity_type and self.entity_type != delta.entity:
return False
if self.action and self.action != delta.type:
return False
if self.predicate and not self.predicate(delta):
return False
return True | python | def cares_about(self, delta):
"""Return True if this observer "cares about" (i.e. wants to be
called) for a this delta.
"""
if (self.entity_id and delta.get_id() and
not re.match(self.entity_id, str(delta.get_id()))):
return False
if self.entity_type and self.entity_type != delta.entity:
return False
if self.action and self.action != delta.type:
return False
if self.predicate and not self.predicate(delta):
return False
return True | ['def', 'cares_about', '(', 'self', ',', 'delta', ')', ':', 'if', '(', 'self', '.', 'entity_id', 'and', 'delta', '.', 'get_id', '(', ')', 'and', 'not', 're', '.', 'match', '(', 'self', '.', 'entity_id', ',', 'str', '(', 'delta', '.', 'get_id', '(', ')', ')', ')', ')', ':', 'return', 'False', 'if', 'self', '.', 'entity_type', 'and', 'self', '.', 'entity_type', '!=', 'delta', '.', 'entity', ':', 'return', 'False', 'if', 'self', '.', 'action', 'and', 'self', '.', 'action', '!=', 'delta', '.', 'type', ':', 'return', 'False', 'if', 'self', '.', 'predicate', 'and', 'not', 'self', '.', 'predicate', '(', 'delta', ')', ':', 'return', 'False', 'return', 'True'] | Return True if this observer "cares about" (i.e. wants to be
called) for a this delta. | ['Return', 'True', 'if', 'this', 'observer', 'cares', 'about', '(', 'i', '.', 'e', '.', 'wants', 'to', 'be', 'called', ')', 'for', 'a', 'this', 'delta', '.'] | train | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L62-L80 |
6,416 | thespacedoctor/rockAtlas | rockAtlas/positions/orbfitPositions.py | orbfitPositions.get | def get(self,
singleExposure=False):
"""
*get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring
"""
self.log.info('starting the ``get`` method')
if singleExposure:
batchSize = 1
else:
batchSize = int(self.settings["orbfit"]["batch size"])
exposureCount = 1
while exposureCount > 0:
expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions(
batchSize=batchSize)
if exposureCount:
orbfitPositions = self._get_orbfit_positions(
expsoureObjects, astorbString)
self._add_orbfit_eph_to_database(
orbfitPositions, expsoureObjects)
if singleExposure:
exposureCount = 0
self.log.info('completed the ``get`` method')
return None | python | def get(self,
singleExposure=False):
"""
*get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring
"""
self.log.info('starting the ``get`` method')
if singleExposure:
batchSize = 1
else:
batchSize = int(self.settings["orbfit"]["batch size"])
exposureCount = 1
while exposureCount > 0:
expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions(
batchSize=batchSize)
if exposureCount:
orbfitPositions = self._get_orbfit_positions(
expsoureObjects, astorbString)
self._add_orbfit_eph_to_database(
orbfitPositions, expsoureObjects)
if singleExposure:
exposureCount = 0
self.log.info('completed the ``get`` method')
return None | ['def', 'get', '(', 'self', ',', 'singleExposure', '=', 'False', ')', ':', 'self', '.', 'log', '.', 'info', '(', "'starting the ``get`` method'", ')', 'if', 'singleExposure', ':', 'batchSize', '=', '1', 'else', ':', 'batchSize', '=', 'int', '(', 'self', '.', 'settings', '[', '"orbfit"', ']', '[', '"batch size"', ']', ')', 'exposureCount', '=', '1', 'while', 'exposureCount', '>', '0', ':', 'expsoureObjects', ',', 'astorbString', ',', 'exposureCount', '=', 'self', '.', '_get_exposures_requiring_orbfit_positions', '(', 'batchSize', '=', 'batchSize', ')', 'if', 'exposureCount', ':', 'orbfitPositions', '=', 'self', '.', '_get_orbfit_positions', '(', 'expsoureObjects', ',', 'astorbString', ')', 'self', '.', '_add_orbfit_eph_to_database', '(', 'orbfitPositions', ',', 'expsoureObjects', ')', 'if', 'singleExposure', ':', 'exposureCount', '=', '0', 'self', '.', 'log', '.', 'info', '(', "'completed the ``get`` method'", ')', 'return', 'None'] | *get the orbfitPositions object*
**Key Arguments:**
- ``singleExposure`` -- only execute fot a single exposure (useful for debugging)
**Return:**
- None
**Usage:**
See class docstring | ['*', 'get', 'the', 'orbfitPositions', 'object', '*'] | train | https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/positions/orbfitPositions.py#L84-L119 |
6,417 | PMBio/limix-backup | limix/scripts/limix_runner.py | LIMIX_runner.run_experiment | def run_experiment(self):
"""
Run the job specified in experiment_script
"""
data=self.data
options=self.options
result=self.result
command = open(self.options.experiment_script).read()
result["experiment_script"]=command
t0=time.time()
exec(command) #creates variable result
t1=time.time()
print(("Elapsed time for running the experiment is %.2f seconds" % (t1-t0)))
self.result=result
return self.result | python | def run_experiment(self):
"""
Run the job specified in experiment_script
"""
data=self.data
options=self.options
result=self.result
command = open(self.options.experiment_script).read()
result["experiment_script"]=command
t0=time.time()
exec(command) #creates variable result
t1=time.time()
print(("Elapsed time for running the experiment is %.2f seconds" % (t1-t0)))
self.result=result
return self.result | ['def', 'run_experiment', '(', 'self', ')', ':', 'data', '=', 'self', '.', 'data', 'options', '=', 'self', '.', 'options', 'result', '=', 'self', '.', 'result', 'command', '=', 'open', '(', 'self', '.', 'options', '.', 'experiment_script', ')', '.', 'read', '(', ')', 'result', '[', '"experiment_script"', ']', '=', 'command', 't0', '=', 'time', '.', 'time', '(', ')', 'exec', '(', 'command', ')', '#creates variable result', 't1', '=', 'time', '.', 'time', '(', ')', 'print', '(', '(', '"Elapsed time for running the experiment is %.2f seconds"', '%', '(', 't1', '-', 't0', ')', ')', ')', 'self', '.', 'result', '=', 'result', 'return', 'self', '.', 'result'] | Run the job specified in experiment_script | ['Run', 'the', 'job', 'specified', 'in', 'experiment_script'] | train | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/scripts/limix_runner.py#L66-L81 |
6,418 | ramrod-project/database-brain | schema/brain/binary/filesystem.py | BrainStore.create | def create(self, path, mode): # pragma: no cover
"""
This is currently a read-only filessytem.
GetAttr will return a stat for everything
if getattr raises FuseOSError(ENOENT)
OS may call this function and the write function
"""
# print("create {}".format(path))
now_time = time()
with self.attr_lock:
base = NoStat()
base.staged = True
base.st_mode = stat.S_IFREG | OBJ_PERMISSION
base.st_nlink = 1
base.st_size = -1
self.attr[path] = {TIMESTAMP_KEY: now_time,
BASE_KEY: base,
STAGED_KEY: BytesIO()}
return mode | python | def create(self, path, mode): # pragma: no cover
"""
This is currently a read-only filessytem.
GetAttr will return a stat for everything
if getattr raises FuseOSError(ENOENT)
OS may call this function and the write function
"""
# print("create {}".format(path))
now_time = time()
with self.attr_lock:
base = NoStat()
base.staged = True
base.st_mode = stat.S_IFREG | OBJ_PERMISSION
base.st_nlink = 1
base.st_size = -1
self.attr[path] = {TIMESTAMP_KEY: now_time,
BASE_KEY: base,
STAGED_KEY: BytesIO()}
return mode | ['def', 'create', '(', 'self', ',', 'path', ',', 'mode', ')', ':', '# pragma: no cover', '# print("create {}".format(path))', 'now_time', '=', 'time', '(', ')', 'with', 'self', '.', 'attr_lock', ':', 'base', '=', 'NoStat', '(', ')', 'base', '.', 'staged', '=', 'True', 'base', '.', 'st_mode', '=', 'stat', '.', 'S_IFREG', '|', 'OBJ_PERMISSION', 'base', '.', 'st_nlink', '=', '1', 'base', '.', 'st_size', '=', '-', '1', 'self', '.', 'attr', '[', 'path', ']', '=', '{', 'TIMESTAMP_KEY', ':', 'now_time', ',', 'BASE_KEY', ':', 'base', ',', 'STAGED_KEY', ':', 'BytesIO', '(', ')', '}', 'return', 'mode'] | This is currently a read-only filessytem.
GetAttr will return a stat for everything
if getattr raises FuseOSError(ENOENT)
OS may call this function and the write function | ['This', 'is', 'currently', 'a', 'read', '-', 'only', 'filessytem', '.', 'GetAttr', 'will', 'return', 'a', 'stat', 'for', 'everything', 'if', 'getattr', 'raises', 'FuseOSError', '(', 'ENOENT', ')', 'OS', 'may', 'call', 'this', 'function', 'and', 'the', 'write', 'function'] | train | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/filesystem.py#L149-L167 |
6,419 | jilljenn/tryalgo | tryalgo/partition_refinement.py | PartitionItem.remove | def remove(self):
"""remove item from its class
"""
DoubleLinkedListItem.remove(self) # remove from double linked list
if self.succ is self: # list was a singleton
self.theclass.items = None # class is empty
elif self.theclass.items is self: # oups we removed the head
self.theclass.items = self.succ | python | def remove(self):
"""remove item from its class
"""
DoubleLinkedListItem.remove(self) # remove from double linked list
if self.succ is self: # list was a singleton
self.theclass.items = None # class is empty
elif self.theclass.items is self: # oups we removed the head
self.theclass.items = self.succ | ['def', 'remove', '(', 'self', ')', ':', 'DoubleLinkedListItem', '.', 'remove', '(', 'self', ')', '# remove from double linked list', 'if', 'self', '.', 'succ', 'is', 'self', ':', '# list was a singleton', 'self', '.', 'theclass', '.', 'items', '=', 'None', '# class is empty', 'elif', 'self', '.', 'theclass', '.', 'items', 'is', 'self', ':', '# oups we removed the head', 'self', '.', 'theclass', '.', 'items', '=', 'self', '.', 'succ'] | remove item from its class | ['remove', 'item', 'from', 'its', 'class'] | train | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/partition_refinement.py#L77-L84 |
6,420 | obriencj/python-javatools | javatools/manifest.py | multi_path_generator | def multi_path_generator(pathnames):
"""
yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads.
"""
for pathname in pathnames:
if isdir(pathname):
for entry in directory_generator(pathname):
yield entry
else:
yield pathname, file_chunk(pathname) | python | def multi_path_generator(pathnames):
"""
yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads.
"""
for pathname in pathnames:
if isdir(pathname):
for entry in directory_generator(pathname):
yield entry
else:
yield pathname, file_chunk(pathname) | ['def', 'multi_path_generator', '(', 'pathnames', ')', ':', 'for', 'pathname', 'in', 'pathnames', ':', 'if', 'isdir', '(', 'pathname', ')', ':', 'for', 'entry', 'in', 'directory_generator', '(', 'pathname', ')', ':', 'yield', 'entry', 'else', ':', 'yield', 'pathname', ',', 'file_chunk', '(', 'pathname', ')'] | yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads. | ['yields', '(', 'name', 'chunkgen', ')', 'for', 'all', 'of', 'the', 'files', 'found', 'under', 'the', 'list', 'of', 'pathnames', 'given', '.', 'This', 'is', 'recursive', 'so', 'directories', 'will', 'have', 'their', 'contents', 'emitted', '.', 'chunkgen', 'is', 'a', 'function', 'that', 'can', 'called', 'and', 'iterated', 'over', 'to', 'obtain', 'the', 'contents', 'of', 'the', 'file', 'in', 'multiple', 'reads', '.'] | train | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/manifest.py#L860-L874 |
6,421 | ph4r05/monero-serialize | monero_serialize/xmrrpc.py | Blobber.blobize | async def blobize(self, elem=None, elem_type=None, params=None):
"""
Main blobbing
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
await self.field(elem=elem, elem_type=elem_type, params=params)
return bytes(self.iobj.buffer)
else:
return await self.field(elem=elem, elem_type=elem_type, params=params) | python | async def blobize(self, elem=None, elem_type=None, params=None):
"""
Main blobbing
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
await self.field(elem=elem, elem_type=elem_type, params=params)
return bytes(self.iobj.buffer)
else:
return await self.field(elem=elem, elem_type=elem_type, params=params) | ['async', 'def', 'blobize', '(', 'self', ',', 'elem', '=', 'None', ',', 'elem_type', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'if', 'self', '.', 'writing', ':', 'await', 'self', '.', 'field', '(', 'elem', '=', 'elem', ',', 'elem_type', '=', 'elem_type', ',', 'params', '=', 'params', ')', 'return', 'bytes', '(', 'self', '.', 'iobj', '.', 'buffer', ')', 'else', ':', 'return', 'await', 'self', '.', 'field', '(', 'elem', '=', 'elem', ',', 'elem_type', '=', 'elem_type', ',', 'params', '=', 'params', ')'] | Main blobbing
:param elem:
:param elem_type:
:param params:
:return: | ['Main', 'blobbing', ':', 'param', 'elem', ':', ':', 'param', 'elem_type', ':', ':', 'param', 'params', ':', ':', 'return', ':'] | train | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L914-L926 |
6,422 | arne-cl/discoursegraphs | src/discoursegraphs/discoursegraph.py | create_token_mapping | def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names,
verbose=False):
"""
given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token
"""
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new | python | def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names,
verbose=False):
"""
given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token
"""
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new | ['def', 'create_token_mapping', '(', 'docgraph_with_old_names', ',', 'docgraph_with_new_names', ',', 'verbose', '=', 'False', ')', ':', 'def', 'kwic_string', '(', 'docgraph', ',', 'keyword_index', ')', ':', 'tokens', '=', '[', 'tok', 'for', '(', 'tokid', ',', 'tok', ')', 'in', 'list', '(', 'docgraph', '.', 'get_tokens', '(', ')', ')', ']', 'before', ',', 'keyword', ',', 'after', '=', 'get_kwic', '(', 'tokens', ',', 'keyword_index', ')', 'return', '"{0} (Index: {1}): {2} [[{3}]] {4}\\n"', '.', 'format', '(', 'docgraph', '.', 'name', ',', 'keyword_index', ',', "' '", '.', 'join', '(', 'before', ')', ',', 'keyword', ',', "' '", '.', 'join', '(', 'after', ')', ')', '# generators of (token ID, token) tuples', 'old_token_gen', '=', 'docgraph_with_old_names', '.', 'get_tokens', '(', ')', 'new_token_gen', '=', 'docgraph_with_new_names', '.', 'get_tokens', '(', ')', 'old2new', '=', '{', '}', 'for', 'i', ',', '(', 'new_tok_id', ',', 'new_tok', ')', 'in', 'enumerate', '(', 'new_token_gen', ')', ':', 'old_tok_id', ',', 'old_tok', '=', 'old_token_gen', '.', 'next', '(', ')', 'if', 'new_tok', '!=', 'old_tok', ':', '# token mismatch', 'if', 'verbose', ':', 'raise', 'ValueError', '(', 'u"Tokenization mismatch:\\n{0}{1}"', '.', 'format', '(', 'kwic_string', '(', 'docgraph_with_old_names', ',', 'i', ')', ',', 'kwic_string', '(', 'docgraph_with_new_names', ',', 'i', ')', ')', ')', 'raise', 'ValueError', '(', 'u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\\n"', '"\\t{4} != {5}"', '.', 'format', '(', 'docgraph_with_new_names', '.', 'name', ',', 'docgraph_with_new_names', '.', 'ns', ',', 'docgraph_with_old_names', '.', 'name', ',', 'docgraph_with_old_names', '.', 'ns', ',', 'new_tok', ',', 'old_tok', ')', '.', 'encode', '(', "'utf-8'", ')', ')', 'else', ':', 'old2new', '[', 'old_tok_id', ']', '=', 'new_tok_id', 'return', 'old2new'] | given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token | ['given', 'two', 'document', 'graphs', 'which', 'annotate', 'the', 'same', 'text', 'and', 'which', 'use', 'the', 'same', 'tokenization', 'creates', 'a', 'dictionary', 'with', 'a', 'mapping', 'from', 'the', 'token', 'IDs', 'used', 'in', 'the', 'first', 'graph', 'to', 'the', 'token', 'IDs', 'used', 'in', 'the', 'second', 'graph', '.'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L790-L838 |
6,423 | merll/docker-map | dockermap/map/client.py | MappingDockerClient.stop | def stop(self, container, instances=None, map_name=None, **kwargs):
"""
Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('stop', container, instances=instances, map_name=map_name, **kwargs) | python | def stop(self, container, instances=None, map_name=None, **kwargs):
"""
Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('stop', container, instances=instances, map_name=map_name, **kwargs) | ['def', 'stop', '(', 'self', ',', 'container', ',', 'instances', '=', 'None', ',', 'map_name', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'run_actions', '(', "'stop'", ',', 'container', ',', 'instances', '=', 'instances', ',', 'map_name', '=', 'map_name', ',', '*', '*', 'kwargs', ')'] | Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput] | ['Stops', 'instances', 'for', 'a', 'container', 'configuration', '.'] | train | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/client.py#L294-L314 |
6,424 | mushkevych/scheduler | synergy/system/utils.py | copy_and_sum_families | def copy_and_sum_families(family_source, family_target):
""" methods iterates thru source family and copies its entries to target family
in case key already exists in both families - then the values are added"""
for every in family_source:
if every not in family_target:
family_target[every] = family_source[every]
else:
family_target[every] += family_source[every] | python | def copy_and_sum_families(family_source, family_target):
""" methods iterates thru source family and copies its entries to target family
in case key already exists in both families - then the values are added"""
for every in family_source:
if every not in family_target:
family_target[every] = family_source[every]
else:
family_target[every] += family_source[every] | ['def', 'copy_and_sum_families', '(', 'family_source', ',', 'family_target', ')', ':', 'for', 'every', 'in', 'family_source', ':', 'if', 'every', 'not', 'in', 'family_target', ':', 'family_target', '[', 'every', ']', '=', 'family_source', '[', 'every', ']', 'else', ':', 'family_target', '[', 'every', ']', '+=', 'family_source', '[', 'every', ']'] | methods iterates thru source family and copies its entries to target family
in case key already exists in both families - then the values are added | ['methods', 'iterates', 'thru', 'source', 'family', 'and', 'copies', 'its', 'entries', 'to', 'target', 'family', 'in', 'case', 'key', 'already', 'exists', 'in', 'both', 'families', '-', 'then', 'the', 'values', 'are', 'added'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/utils.py#L65-L72 |
6,425 | RJT1990/pyflux | pyflux/tsm.py | TSM._bbvi_fit | def _bbvi_fit(self, posterior, optimizer='RMSProp', iterations=1000,
map_start=True, batch_size=12, mini_batch=None, learning_rate=0.001,
record_elbo=False, quiet_progress=False, **kwargs):
""" Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: one of RMSProp or ADAM.
iterations: int
How many iterations for BBVI
map_start : boolean
Whether to start values from a MAP estimate (if False, uses default starting values)
Returns
----------
BBVIResults object
"""
# Starting values
phi = self.latent_variables.get_z_starting_values()
phi = kwargs.get('start',phi).copy() # If user supplied
if self.model_type not in ['GPNARX','GPR','GP','GASRank'] and map_start is True and mini_batch is None:
p = optimize.minimize(posterior, phi, method='L-BFGS-B') # PML starting values
start_loc = 0.8*p.x + 0.2*phi
else:
start_loc = phi
start_ses = None
# Starting values for approximate distribution
for i in range(len(self.latent_variables.z_list)):
approx_dist = self.latent_variables.z_list[i].q
if isinstance(approx_dist, Normal):
if start_ses is None:
self.latent_variables.z_list[i].q.mu0 = start_loc[i]
self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0)
else:
self.latent_variables.z_list[i].q.mu0 = start_loc[i]
self.latent_variables.z_list[i].q.sigma0 = start_ses[i]
q_list = [k.q for k in self.latent_variables.z_list]
if mini_batch is None:
bbvi_obj = BBVI(posterior, q_list, batch_size, optimizer, iterations, learning_rate, record_elbo, quiet_progress)
else:
bbvi_obj = BBVIM(posterior, self.neg_logposterior, q_list, mini_batch, optimizer, iterations, learning_rate, mini_batch, record_elbo, quiet_progress)
q, q_z, q_ses, elbo_records = bbvi_obj.run()
self.latent_variables.set_z_values(q_z,'BBVI',np.exp(q_ses),None)
for k in range(len(self.latent_variables.z_list)):
self.latent_variables.z_list[k].q = q[k]
self.latent_variables.estimation_method = 'BBVI'
theta, Y, scores, states, states_var, X_names = self._categorize_model_output(q_z)
# Change this in future
try:
latent_variables_store = self.latent_variables.copy()
except:
latent_variables_store = self.latent_variables
return BBVIResults(data_name=self.data_name, X_names=X_names, model_name=self.model_name,
model_type=self.model_type, latent_variables=latent_variables_store, data=Y, index=self.index,
multivariate_model=self.multivariate_model, objective_object=self.neg_logposterior,
method='BBVI', ses=q_ses, signal=theta, scores=scores, elbo_records=elbo_records,
z_hide=self._z_hide, max_lag=self.max_lag, states=states, states_var=states_var) | python | def _bbvi_fit(self, posterior, optimizer='RMSProp', iterations=1000,
map_start=True, batch_size=12, mini_batch=None, learning_rate=0.001,
record_elbo=False, quiet_progress=False, **kwargs):
""" Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: one of RMSProp or ADAM.
iterations: int
How many iterations for BBVI
map_start : boolean
Whether to start values from a MAP estimate (if False, uses default starting values)
Returns
----------
BBVIResults object
"""
# Starting values
phi = self.latent_variables.get_z_starting_values()
phi = kwargs.get('start',phi).copy() # If user supplied
if self.model_type not in ['GPNARX','GPR','GP','GASRank'] and map_start is True and mini_batch is None:
p = optimize.minimize(posterior, phi, method='L-BFGS-B') # PML starting values
start_loc = 0.8*p.x + 0.2*phi
else:
start_loc = phi
start_ses = None
# Starting values for approximate distribution
for i in range(len(self.latent_variables.z_list)):
approx_dist = self.latent_variables.z_list[i].q
if isinstance(approx_dist, Normal):
if start_ses is None:
self.latent_variables.z_list[i].q.mu0 = start_loc[i]
self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0)
else:
self.latent_variables.z_list[i].q.mu0 = start_loc[i]
self.latent_variables.z_list[i].q.sigma0 = start_ses[i]
q_list = [k.q for k in self.latent_variables.z_list]
if mini_batch is None:
bbvi_obj = BBVI(posterior, q_list, batch_size, optimizer, iterations, learning_rate, record_elbo, quiet_progress)
else:
bbvi_obj = BBVIM(posterior, self.neg_logposterior, q_list, mini_batch, optimizer, iterations, learning_rate, mini_batch, record_elbo, quiet_progress)
q, q_z, q_ses, elbo_records = bbvi_obj.run()
self.latent_variables.set_z_values(q_z,'BBVI',np.exp(q_ses),None)
for k in range(len(self.latent_variables.z_list)):
self.latent_variables.z_list[k].q = q[k]
self.latent_variables.estimation_method = 'BBVI'
theta, Y, scores, states, states_var, X_names = self._categorize_model_output(q_z)
# Change this in future
try:
latent_variables_store = self.latent_variables.copy()
except:
latent_variables_store = self.latent_variables
return BBVIResults(data_name=self.data_name, X_names=X_names, model_name=self.model_name,
model_type=self.model_type, latent_variables=latent_variables_store, data=Y, index=self.index,
multivariate_model=self.multivariate_model, objective_object=self.neg_logposterior,
method='BBVI', ses=q_ses, signal=theta, scores=scores, elbo_records=elbo_records,
z_hide=self._z_hide, max_lag=self.max_lag, states=states, states_var=states_var) | ['def', '_bbvi_fit', '(', 'self', ',', 'posterior', ',', 'optimizer', '=', "'RMSProp'", ',', 'iterations', '=', '1000', ',', 'map_start', '=', 'True', ',', 'batch_size', '=', '12', ',', 'mini_batch', '=', 'None', ',', 'learning_rate', '=', '0.001', ',', 'record_elbo', '=', 'False', ',', 'quiet_progress', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', '# Starting values', 'phi', '=', 'self', '.', 'latent_variables', '.', 'get_z_starting_values', '(', ')', 'phi', '=', 'kwargs', '.', 'get', '(', "'start'", ',', 'phi', ')', '.', 'copy', '(', ')', '# If user supplied', 'if', 'self', '.', 'model_type', 'not', 'in', '[', "'GPNARX'", ',', "'GPR'", ',', "'GP'", ',', "'GASRank'", ']', 'and', 'map_start', 'is', 'True', 'and', 'mini_batch', 'is', 'None', ':', 'p', '=', 'optimize', '.', 'minimize', '(', 'posterior', ',', 'phi', ',', 'method', '=', "'L-BFGS-B'", ')', '# PML starting values', 'start_loc', '=', '0.8', '*', 'p', '.', 'x', '+', '0.2', '*', 'phi', 'else', ':', 'start_loc', '=', 'phi', 'start_ses', '=', 'None', '# Starting values for approximate distribution', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'latent_variables', '.', 'z_list', ')', ')', ':', 'approx_dist', '=', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'q', 'if', 'isinstance', '(', 'approx_dist', ',', 'Normal', ')', ':', 'if', 'start_ses', 'is', 'None', ':', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'q', '.', 'mu0', '=', 'start_loc', '[', 'i', ']', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'q', '.', 'sigma0', '=', 'np', '.', 'exp', '(', '-', '3.0', ')', 'else', ':', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'q', '.', 'mu0', '=', 'start_loc', '[', 'i', ']', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'q', '.', 'sigma0', '=', 'start_ses', '[', 'i', ']', 'q_list', '=', '[', 'k', '.', 'q', 'for', 'k', 'in', 'self', '.', 'latent_variables', '.', 'z_list', ']', 'if', 'mini_batch', 'is', 'None', ':', 'bbvi_obj', '=', 'BBVI', '(', 'posterior', ',', 'q_list', ',', 'batch_size', ',', 'optimizer', ',', 'iterations', ',', 'learning_rate', ',', 'record_elbo', ',', 'quiet_progress', ')', 'else', ':', 'bbvi_obj', '=', 'BBVIM', '(', 'posterior', ',', 'self', '.', 'neg_logposterior', ',', 'q_list', ',', 'mini_batch', ',', 'optimizer', ',', 'iterations', ',', 'learning_rate', ',', 'mini_batch', ',', 'record_elbo', ',', 'quiet_progress', ')', 'q', ',', 'q_z', ',', 'q_ses', ',', 'elbo_records', '=', 'bbvi_obj', '.', 'run', '(', ')', 'self', '.', 'latent_variables', '.', 'set_z_values', '(', 'q_z', ',', "'BBVI'", ',', 'np', '.', 'exp', '(', 'q_ses', ')', ',', 'None', ')', 'for', 'k', 'in', 'range', '(', 'len', '(', 'self', '.', 'latent_variables', '.', 'z_list', ')', ')', ':', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'k', ']', '.', 'q', '=', 'q', '[', 'k', ']', 'self', '.', 'latent_variables', '.', 'estimation_method', '=', "'BBVI'", 'theta', ',', 'Y', ',', 'scores', ',', 'states', ',', 'states_var', ',', 'X_names', '=', 'self', '.', '_categorize_model_output', '(', 'q_z', ')', '# Change this in future', 'try', ':', 'latent_variables_store', '=', 'self', '.', 'latent_variables', '.', 'copy', '(', ')', 'except', ':', 'latent_variables_store', '=', 'self', '.', 'latent_variables', 'return', 'BBVIResults', '(', 'data_name', '=', 'self', '.', 'data_name', ',', 'X_names', '=', 'X_names', ',', 'model_name', '=', 'self', '.', 'model_name', ',', 'model_type', '=', 'self', '.', 'model_type', ',', 'latent_variables', '=', 'latent_variables_store', ',', 'data', '=', 'Y', ',', 'index', '=', 'self', '.', 'index', ',', 'multivariate_model', '=', 'self', '.', 'multivariate_model', ',', 'objective_object', '=', 'self', '.', 'neg_logposterior', ',', 'method', '=', "'BBVI'", ',', 'ses', '=', 'q_ses', ',', 'signal', '=', 'theta', ',', 'scores', '=', 'scores', ',', 'elbo_records', '=', 'elbo_records', ',', 'z_hide', '=', 'self', '.', '_z_hide', ',', 'max_lag', '=', 'self', '.', 'max_lag', ',', 'states', '=', 'states', ',', 'states_var', '=', 'states_var', ')'] | Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: one of RMSProp or ADAM.
iterations: int
How many iterations for BBVI
map_start : boolean
Whether to start values from a MAP estimate (if False, uses default starting values)
Returns
----------
BBVIResults object | ['Performs', 'Black', 'Box', 'Variational', 'Inference'] | train | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/tsm.py#L112-L185 |
6,426 | fabioz/PyDev.Debugger | _pydevd_bundle/pydevd_comm.py | InternalThreadCommand.can_be_executed_by | def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id) | python | def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id) | ['def', 'can_be_executed_by', '(', 'self', ',', 'thread_id', ')', ':', 'return', 'self', '.', 'thread_id', '==', 'thread_id', 'or', 'self', '.', 'thread_id', '.', 'endswith', '(', "'|'", '+', 'thread_id', ')'] | By default, it must be in the same thread to be executed | ['By', 'default', 'it', 'must', 'be', 'in', 'the', 'same', 'thread', 'to', 'be', 'executed'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_comm.py#L458-L461 |
6,427 | proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.randomprune | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | python | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | ['def', 'randomprune', '(', 'self', ',', 'n', ')', ':', 'self', '.', 'data', '=', 'random', '.', 'sample', '(', 'self', '.', 'data', ',', 'n', ')'] | prune down to n items at random, disregarding their score | ['prune', 'down', 'to', 'n', 'items', 'at', 'random', 'disregarding', 'their', 'score'] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L196-L198 |
6,428 | openego/ding0 | ding0/grid/mv_grid/models/models.py | Route.allocate | def allocate(self, nodes, append=True):
# TODO: check docstring
"""Allocates all nodes from `nodes` list in this route
Parameters
----------
nodes : type
Desc
append : bool, defaults to True
Desc
"""
nodes_demand = 0
for node in [node for node in nodes]:
if node._allocation:
node._allocation.deallocate([node])
node._allocation = self
nodes_demand = nodes_demand + node.demand()
if append:
self._nodes.append(node)
else:
self._nodes.insert(0, node)
self._demand = self._demand + nodes_demand | python | def allocate(self, nodes, append=True):
# TODO: check docstring
"""Allocates all nodes from `nodes` list in this route
Parameters
----------
nodes : type
Desc
append : bool, defaults to True
Desc
"""
nodes_demand = 0
for node in [node for node in nodes]:
if node._allocation:
node._allocation.deallocate([node])
node._allocation = self
nodes_demand = nodes_demand + node.demand()
if append:
self._nodes.append(node)
else:
self._nodes.insert(0, node)
self._demand = self._demand + nodes_demand | ['def', 'allocate', '(', 'self', ',', 'nodes', ',', 'append', '=', 'True', ')', ':', '# TODO: check docstring', 'nodes_demand', '=', '0', 'for', 'node', 'in', '[', 'node', 'for', 'node', 'in', 'nodes', ']', ':', 'if', 'node', '.', '_allocation', ':', 'node', '.', '_allocation', '.', 'deallocate', '(', '[', 'node', ']', ')', 'node', '.', '_allocation', '=', 'self', 'nodes_demand', '=', 'nodes_demand', '+', 'node', '.', 'demand', '(', ')', 'if', 'append', ':', 'self', '.', '_nodes', '.', 'append', '(', 'node', ')', 'else', ':', 'self', '.', '_nodes', '.', 'insert', '(', '0', ',', 'node', ')', 'self', '.', '_demand', '=', 'self', '.', '_demand', '+', 'nodes_demand'] | Allocates all nodes from `nodes` list in this route
Parameters
----------
nodes : type
Desc
append : bool, defaults to True
Desc | ['Allocates', 'all', 'nodes', 'from', 'nodes', 'list', 'in', 'this', 'route', 'Parameters', '----------', 'nodes', ':', 'type', 'Desc', 'append', ':', 'bool', 'defaults', 'to', 'True', 'Desc'] | train | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/models/models.py#L160-L185 |
6,429 | atlassian-api/atlassian-python-api | atlassian/jira.py | Jira.delete_agile_board | def delete_agile_board(self, board_id):
"""
Delete agile board by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.delete(url) | python | def delete_agile_board(self, board_id):
"""
Delete agile board by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.delete(url) | ['def', 'delete_agile_board', '(', 'self', ',', 'board_id', ')', ':', 'url', '=', "'rest/agile/1.0/board/{}'", '.', 'format', '(', 'str', '(', 'board_id', ')', ')', 'return', 'self', '.', 'delete', '(', 'url', ')'] | Delete agile board by id
:param board_id:
:return: | ['Delete', 'agile', 'board', 'by', 'id', ':', 'param', 'board_id', ':', ':', 'return', ':'] | train | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L1167-L1174 |
6,430 | vmware/pyvmomi | pyVmomi/DynamicTypeManagerHelper.py | DynamicTypeConstructor._ConvertParamType | def _ConvertParamType(self, paramType):
"""
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
"""
if paramType:
name = paramType.name
version = paramType.version
aType = paramType.type
flags = self._ConvertAnnotations(paramType.annotation)
privId = paramType.privId
param = (name, aType, version, flags, privId)
else:
param = None
return param | python | def _ConvertParamType(self, paramType):
"""
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
"""
if paramType:
name = paramType.name
version = paramType.version
aType = paramType.type
flags = self._ConvertAnnotations(paramType.annotation)
privId = paramType.privId
param = (name, aType, version, flags, privId)
else:
param = None
return param | ['def', '_ConvertParamType', '(', 'self', ',', 'paramType', ')', ':', 'if', 'paramType', ':', 'name', '=', 'paramType', '.', 'name', 'version', '=', 'paramType', '.', 'version', 'aType', '=', 'paramType', '.', 'type', 'flags', '=', 'self', '.', '_ConvertAnnotations', '(', 'paramType', '.', 'annotation', ')', 'privId', '=', 'paramType', '.', 'privId', 'param', '=', '(', 'name', ',', 'aType', ',', 'version', ',', 'flags', ',', 'privId', ')', 'else', ':', 'param', '=', 'None', 'return', 'param'] | Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition | ['Convert', 'vmodl', '.', 'reflect', '.', 'DynamicTypeManager', '.', 'ParamTypeInfo', 'to', 'pyVmomi', 'param', 'definition'] | train | https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/DynamicTypeManagerHelper.py#L161-L175 |
6,431 | AustralianSynchrotron/lightflow | lightflow/models/task_signal.py | TaskSignal.start_dag | def start_dag(self, dag, *, data=None):
""" Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag.
"""
return self._client.send(
Request(
action='start_dag',
payload={'name': dag.name if isinstance(dag, Dag) else dag,
'data': data if isinstance(data, MultiTaskData) else None}
)
).payload['dag_name'] | python | def start_dag(self, dag, *, data=None):
""" Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag.
"""
return self._client.send(
Request(
action='start_dag',
payload={'name': dag.name if isinstance(dag, Dag) else dag,
'data': data if isinstance(data, MultiTaskData) else None}
)
).payload['dag_name'] | ['def', 'start_dag', '(', 'self', ',', 'dag', ',', '*', ',', 'data', '=', 'None', ')', ':', 'return', 'self', '.', '_client', '.', 'send', '(', 'Request', '(', 'action', '=', "'start_dag'", ',', 'payload', '=', '{', "'name'", ':', 'dag', '.', 'name', 'if', 'isinstance', '(', 'dag', ',', 'Dag', ')', 'else', 'dag', ',', "'data'", ':', 'data', 'if', 'isinstance', '(', 'data', ',', 'MultiTaskData', ')', 'else', 'None', '}', ')', ')', '.', 'payload', '[', "'dag_name'", ']'] | Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag. | ['Schedule', 'the', 'execution', 'of', 'a', 'dag', 'by', 'sending', 'a', 'signal', 'to', 'the', 'workflow', '.'] | train | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/task_signal.py#L18-L34 |
6,432 | ArchiveTeam/wpull | wpull/network/connection.py | BaseConnection.run_network_operation | def run_network_operation(self, task, wait_timeout=None,
close_timeout=None,
name='Network operation'):
'''Run the task and raise appropriate exceptions.
Coroutine.
'''
if wait_timeout is not None and close_timeout is not None:
raise Exception(
'Cannot use wait_timeout and close_timeout at the same time')
try:
if close_timeout is not None:
with self._close_timer.with_timeout():
data = yield from task
if self._close_timer.is_timeout():
raise NetworkTimedOut(
'{name} timed out.'.format(name=name))
else:
return data
elif wait_timeout is not None:
data = yield from asyncio.wait_for(task, wait_timeout)
return data
else:
return (yield from task)
except asyncio.TimeoutError as error:
self.close()
raise NetworkTimedOut(
'{name} timed out.'.format(name=name)) from error
except (tornado.netutil.SSLCertificateError, SSLVerificationError) \
as error:
self.close()
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
except AttributeError as error:
self.close()
raise NetworkError(
'{name} network error: connection closed unexpectedly: {error}'
.format(name=name, error=error)) from error
except (socket.error, ssl.SSLError, OSError, IOError) as error:
self.close()
if isinstance(error, NetworkError):
raise
if error.errno == errno.ECONNREFUSED:
raise ConnectionRefused(
error.errno, os.strerror(error.errno)) from error
# XXX: This quality case brought to you by OpenSSL and Python.
# Example: _ssl.SSLError: [Errno 1] error:14094418:SSL
# routines:SSL3_READ_BYTES:tlsv1 alert unknown ca
error_string = str(error).lower()
if 'certificate' in error_string or 'unknown ca' in error_string:
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
else:
if error.errno:
raise NetworkError(
error.errno, os.strerror(error.errno)) from error
else:
raise NetworkError(
'{name} network error: {error}'
.format(name=name, error=error)) from error | python | def run_network_operation(self, task, wait_timeout=None,
close_timeout=None,
name='Network operation'):
'''Run the task and raise appropriate exceptions.
Coroutine.
'''
if wait_timeout is not None and close_timeout is not None:
raise Exception(
'Cannot use wait_timeout and close_timeout at the same time')
try:
if close_timeout is not None:
with self._close_timer.with_timeout():
data = yield from task
if self._close_timer.is_timeout():
raise NetworkTimedOut(
'{name} timed out.'.format(name=name))
else:
return data
elif wait_timeout is not None:
data = yield from asyncio.wait_for(task, wait_timeout)
return data
else:
return (yield from task)
except asyncio.TimeoutError as error:
self.close()
raise NetworkTimedOut(
'{name} timed out.'.format(name=name)) from error
except (tornado.netutil.SSLCertificateError, SSLVerificationError) \
as error:
self.close()
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
except AttributeError as error:
self.close()
raise NetworkError(
'{name} network error: connection closed unexpectedly: {error}'
.format(name=name, error=error)) from error
except (socket.error, ssl.SSLError, OSError, IOError) as error:
self.close()
if isinstance(error, NetworkError):
raise
if error.errno == errno.ECONNREFUSED:
raise ConnectionRefused(
error.errno, os.strerror(error.errno)) from error
# XXX: This quality case brought to you by OpenSSL and Python.
# Example: _ssl.SSLError: [Errno 1] error:14094418:SSL
# routines:SSL3_READ_BYTES:tlsv1 alert unknown ca
error_string = str(error).lower()
if 'certificate' in error_string or 'unknown ca' in error_string:
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
else:
if error.errno:
raise NetworkError(
error.errno, os.strerror(error.errno)) from error
else:
raise NetworkError(
'{name} network error: {error}'
.format(name=name, error=error)) from error | ['def', 'run_network_operation', '(', 'self', ',', 'task', ',', 'wait_timeout', '=', 'None', ',', 'close_timeout', '=', 'None', ',', 'name', '=', "'Network operation'", ')', ':', 'if', 'wait_timeout', 'is', 'not', 'None', 'and', 'close_timeout', 'is', 'not', 'None', ':', 'raise', 'Exception', '(', "'Cannot use wait_timeout and close_timeout at the same time'", ')', 'try', ':', 'if', 'close_timeout', 'is', 'not', 'None', ':', 'with', 'self', '.', '_close_timer', '.', 'with_timeout', '(', ')', ':', 'data', '=', 'yield', 'from', 'task', 'if', 'self', '.', '_close_timer', '.', 'is_timeout', '(', ')', ':', 'raise', 'NetworkTimedOut', '(', "'{name} timed out.'", '.', 'format', '(', 'name', '=', 'name', ')', ')', 'else', ':', 'return', 'data', 'elif', 'wait_timeout', 'is', 'not', 'None', ':', 'data', '=', 'yield', 'from', 'asyncio', '.', 'wait_for', '(', 'task', ',', 'wait_timeout', ')', 'return', 'data', 'else', ':', 'return', '(', 'yield', 'from', 'task', ')', 'except', 'asyncio', '.', 'TimeoutError', 'as', 'error', ':', 'self', '.', 'close', '(', ')', 'raise', 'NetworkTimedOut', '(', "'{name} timed out.'", '.', 'format', '(', 'name', '=', 'name', ')', ')', 'from', 'error', 'except', '(', 'tornado', '.', 'netutil', '.', 'SSLCertificateError', ',', 'SSLVerificationError', ')', 'as', 'error', ':', 'self', '.', 'close', '(', ')', 'raise', 'SSLVerificationError', '(', "'{name} certificate error: {error}'", '.', 'format', '(', 'name', '=', 'name', ',', 'error', '=', 'error', ')', ')', 'from', 'error', 'except', 'AttributeError', 'as', 'error', ':', 'self', '.', 'close', '(', ')', 'raise', 'NetworkError', '(', "'{name} network error: connection closed unexpectedly: {error}'", '.', 'format', '(', 'name', '=', 'name', ',', 'error', '=', 'error', ')', ')', 'from', 'error', 'except', '(', 'socket', '.', 'error', ',', 'ssl', '.', 'SSLError', ',', 'OSError', ',', 'IOError', ')', 'as', 'error', ':', 'self', '.', 'close', '(', ')', 'if', 'isinstance', '(', 'error', ',', 'NetworkError', ')', ':', 'raise', 'if', 'error', '.', 'errno', '==', 'errno', '.', 'ECONNREFUSED', ':', 'raise', 'ConnectionRefused', '(', 'error', '.', 'errno', ',', 'os', '.', 'strerror', '(', 'error', '.', 'errno', ')', ')', 'from', 'error', '# XXX: This quality case brought to you by OpenSSL and Python.', '# Example: _ssl.SSLError: [Errno 1] error:14094418:SSL', '# routines:SSL3_READ_BYTES:tlsv1 alert unknown ca', 'error_string', '=', 'str', '(', 'error', ')', '.', 'lower', '(', ')', 'if', "'certificate'", 'in', 'error_string', 'or', "'unknown ca'", 'in', 'error_string', ':', 'raise', 'SSLVerificationError', '(', "'{name} certificate error: {error}'", '.', 'format', '(', 'name', '=', 'name', ',', 'error', '=', 'error', ')', ')', 'from', 'error', 'else', ':', 'if', 'error', '.', 'errno', ':', 'raise', 'NetworkError', '(', 'error', '.', 'errno', ',', 'os', '.', 'strerror', '(', 'error', '.', 'errno', ')', ')', 'from', 'error', 'else', ':', 'raise', 'NetworkError', '(', "'{name} network error: {error}'", '.', 'format', '(', 'name', '=', 'name', ',', 'error', '=', 'error', ')', ')', 'from', 'error'] | Run the task and raise appropriate exceptions.
Coroutine. | ['Run', 'the', 'task', 'and', 'raise', 'appropriate', 'exceptions', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/connection.py#L277-L345 |
6,433 | PlaidWeb/Publ | publ/image/local.py | fix_orientation | def fix_orientation(image):
""" adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [
[],
[],
[PIL.Image.FLIP_LEFT_RIGHT],
[PIL.Image.ROTATE_180],
[PIL.Image.FLIP_TOP_BOTTOM],
[PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_270],
[PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_90],
]
try:
# pylint:disable=protected-access
orientation = image._getexif()[exif_orientation_tag]
sequence = exif_transpose_sequences[orientation]
return functools.reduce(type(image).transpose, sequence, image)
except (TypeError, AttributeError, KeyError):
# either no EXIF tags or no orientation tag
pass
return image | python | def fix_orientation(image):
""" adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [
[],
[],
[PIL.Image.FLIP_LEFT_RIGHT],
[PIL.Image.ROTATE_180],
[PIL.Image.FLIP_TOP_BOTTOM],
[PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_270],
[PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90],
[PIL.Image.ROTATE_90],
]
try:
# pylint:disable=protected-access
orientation = image._getexif()[exif_orientation_tag]
sequence = exif_transpose_sequences[orientation]
return functools.reduce(type(image).transpose, sequence, image)
except (TypeError, AttributeError, KeyError):
# either no EXIF tags or no orientation tag
pass
return image | ['def', 'fix_orientation', '(', 'image', ')', ':', 'exif_orientation_tag', '=', '0x0112', 'exif_transpose_sequences', '=', '[', '[', ']', ',', '[', ']', ',', '[', 'PIL', '.', 'Image', '.', 'FLIP_LEFT_RIGHT', ']', ',', '[', 'PIL', '.', 'Image', '.', 'ROTATE_180', ']', ',', '[', 'PIL', '.', 'Image', '.', 'FLIP_TOP_BOTTOM', ']', ',', '[', 'PIL', '.', 'Image', '.', 'FLIP_LEFT_RIGHT', ',', 'PIL', '.', 'Image', '.', 'ROTATE_90', ']', ',', '[', 'PIL', '.', 'Image', '.', 'ROTATE_270', ']', ',', '[', 'PIL', '.', 'Image', '.', 'FLIP_TOP_BOTTOM', ',', 'PIL', '.', 'Image', '.', 'ROTATE_90', ']', ',', '[', 'PIL', '.', 'Image', '.', 'ROTATE_90', ']', ',', ']', 'try', ':', '# pylint:disable=protected-access', 'orientation', '=', 'image', '.', '_getexif', '(', ')', '[', 'exif_orientation_tag', ']', 'sequence', '=', 'exif_transpose_sequences', '[', 'orientation', ']', 'return', 'functools', '.', 'reduce', '(', 'type', '(', 'image', ')', '.', 'transpose', ',', 'sequence', ',', 'image', ')', 'except', '(', 'TypeError', ',', 'AttributeError', ',', 'KeyError', ')', ':', '# either no EXIF tags or no orientation tag', 'pass', 'return', 'image'] | adapted from https://stackoverflow.com/a/30462851/318857
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved. | ['adapted', 'from', 'https', ':', '//', 'stackoverflow', '.', 'com', '/', 'a', '/', '30462851', '/', '318857'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/image/local.py#L23-L55 |
6,434 | tanghaibao/goatools | goatools/gosubdag/plot/plot.py | plt_goids | def plt_goids(gosubdag, fout_img, goids, **kws_plt):
"""Plot GO IDs in a DAG (Directed Acyclic Graph)."""
gosubdag_plt = GoSubDag(goids, gosubdag.go2obj, rcntobj=gosubdag.rcntobj, **kws_plt)
godagplot = GoSubDagPlot(gosubdag_plt, **kws_plt)
godagplot.plt_dag(fout_img)
return godagplot | python | def plt_goids(gosubdag, fout_img, goids, **kws_plt):
"""Plot GO IDs in a DAG (Directed Acyclic Graph)."""
gosubdag_plt = GoSubDag(goids, gosubdag.go2obj, rcntobj=gosubdag.rcntobj, **kws_plt)
godagplot = GoSubDagPlot(gosubdag_plt, **kws_plt)
godagplot.plt_dag(fout_img)
return godagplot | ['def', 'plt_goids', '(', 'gosubdag', ',', 'fout_img', ',', 'goids', ',', '*', '*', 'kws_plt', ')', ':', 'gosubdag_plt', '=', 'GoSubDag', '(', 'goids', ',', 'gosubdag', '.', 'go2obj', ',', 'rcntobj', '=', 'gosubdag', '.', 'rcntobj', ',', '*', '*', 'kws_plt', ')', 'godagplot', '=', 'GoSubDagPlot', '(', 'gosubdag_plt', ',', '*', '*', 'kws_plt', ')', 'godagplot', '.', 'plt_dag', '(', 'fout_img', ')', 'return', 'godagplot'] | Plot GO IDs in a DAG (Directed Acyclic Graph). | ['Plot', 'GO', 'IDs', 'in', 'a', 'DAG', '(', 'Directed', 'Acyclic', 'Graph', ')', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/plot.py#L55-L60 |
6,435 | mozillazg/python-shanbay | shanbay/team.py | Team.forum_id | def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value'] | python | def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value'] | ['def', 'forum_id', '(', 'self', ')', ':', 'html', '=', 'self', '.', 'request', '(', 'self', '.', 'team_url', ')', '.', 'text', 'soup', '=', 'BeautifulSoup', '(', 'html', ')', 'return', 'soup', '.', 'find', '(', 'id', '=', "'forum_id'", ')', '.', 'attrs', '[', "'value'", ']'] | 小组发帖要用的 forum_id | ['小组发帖要用的', 'forum_id'] | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L249-L253 |
6,436 | shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py | ShexNodeExpressionParser.visitIriRange | def visitIriRange(self, ctx: ShExDocParser.IriRangeContext):
""" iriRange: iri (STEM_MARK iriExclusion*)? """
baseiri = self.context.iri_to_iriref(ctx.iri())
if not ctx.STEM_MARK():
vsvalue = baseiri # valueSetValue = objectValue / objectValue = IRI
else:
if ctx.iriExclusion(): # valueSetValue = IriStemRange / iriStemRange = stem + exclusions
vsvalue = IriStemRange(baseiri, exclusions=[])
self._iri_exclusions(vsvalue, ctx.iriExclusion())
else:
vsvalue = IriStem(baseiri) # valueSetValue = IriStem / IriStem: {stem:IRI}
self.nodeconstraint.values.append(vsvalue) | python | def visitIriRange(self, ctx: ShExDocParser.IriRangeContext):
""" iriRange: iri (STEM_MARK iriExclusion*)? """
baseiri = self.context.iri_to_iriref(ctx.iri())
if not ctx.STEM_MARK():
vsvalue = baseiri # valueSetValue = objectValue / objectValue = IRI
else:
if ctx.iriExclusion(): # valueSetValue = IriStemRange / iriStemRange = stem + exclusions
vsvalue = IriStemRange(baseiri, exclusions=[])
self._iri_exclusions(vsvalue, ctx.iriExclusion())
else:
vsvalue = IriStem(baseiri) # valueSetValue = IriStem / IriStem: {stem:IRI}
self.nodeconstraint.values.append(vsvalue) | ['def', 'visitIriRange', '(', 'self', ',', 'ctx', ':', 'ShExDocParser', '.', 'IriRangeContext', ')', ':', 'baseiri', '=', 'self', '.', 'context', '.', 'iri_to_iriref', '(', 'ctx', '.', 'iri', '(', ')', ')', 'if', 'not', 'ctx', '.', 'STEM_MARK', '(', ')', ':', 'vsvalue', '=', 'baseiri', '# valueSetValue = objectValue / objectValue = IRI', 'else', ':', 'if', 'ctx', '.', 'iriExclusion', '(', ')', ':', '# valueSetValue = IriStemRange / iriStemRange = stem + exclusions', 'vsvalue', '=', 'IriStemRange', '(', 'baseiri', ',', 'exclusions', '=', '[', ']', ')', 'self', '.', '_iri_exclusions', '(', 'vsvalue', ',', 'ctx', '.', 'iriExclusion', '(', ')', ')', 'else', ':', 'vsvalue', '=', 'IriStem', '(', 'baseiri', ')', '# valueSetValue = IriStem / IriStem: {stem:IRI}', 'self', '.', 'nodeconstraint', '.', 'values', '.', 'append', '(', 'vsvalue', ')'] | iriRange: iri (STEM_MARK iriExclusion*)? | ['iriRange', ':', 'iri', '(', 'STEM_MARK', 'iriExclusion', '*', ')', '?'] | train | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L86-L97 |
6,437 | titusjan/argos | argos/repo/memoryrtis.py | ArrayRti.elementTypeName | def elementTypeName(self):
""" String representation of the element type.
"""
if self._array is None:
return super(ArrayRti, self).elementTypeName
else:
dtype = self._array.dtype
return '<structured>' if dtype.names else str(dtype) | python | def elementTypeName(self):
""" String representation of the element type.
"""
if self._array is None:
return super(ArrayRti, self).elementTypeName
else:
dtype = self._array.dtype
return '<structured>' if dtype.names else str(dtype) | ['def', 'elementTypeName', '(', 'self', ')', ':', 'if', 'self', '.', '_array', 'is', 'None', ':', 'return', 'super', '(', 'ArrayRti', ',', 'self', ')', '.', 'elementTypeName', 'else', ':', 'dtype', '=', 'self', '.', '_array', '.', 'dtype', 'return', "'<structured>'", 'if', 'dtype', '.', 'names', 'else', 'str', '(', 'dtype', ')'] | String representation of the element type. | ['String', 'representation', 'of', 'the', 'element', 'type', '.'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/memoryrtis.py#L328-L335 |
6,438 | openstax/cnx-archive | cnxarchive/database.py | upsert_users_from_legacy_publication_trigger | def upsert_users_from_legacy_publication_trigger(plpy, td):
"""A compatibility trigger to upsert users from legacy persons table."""
modified_state = "OK"
authors = td['new']['authors'] and td['new']['authors'] or []
maintainers = td['new']['maintainers'] and td['new']['maintainers'] or []
licensors = td['new']['licensors'] and td['new']['licensors'] or []
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
return modified_state
# Upsert all roles into the users table.
users = []
users.extend(authors)
users.extend(maintainers)
users.extend(licensors)
users = list(set(users))
plan = plpy.prepare("""\
SELECT username FROM users WHERE username = any($1)""",
['text[]'])
existing_users = set([r['username'] for r in plpy.execute(plan, (users,))])
new_users = set(users).difference(existing_users)
for username in new_users:
plan = plpy.prepare("""\
INSERT INTO users (username, first_name, last_name, full_name, title)
SELECT personid, firstname, surname, fullname, honorific
FROM persons where personid = $1""", ['text'])
plpy.execute(plan, (username,))
return modified_state | python | def upsert_users_from_legacy_publication_trigger(plpy, td):
"""A compatibility trigger to upsert users from legacy persons table."""
modified_state = "OK"
authors = td['new']['authors'] and td['new']['authors'] or []
maintainers = td['new']['maintainers'] and td['new']['maintainers'] or []
licensors = td['new']['licensors'] and td['new']['licensors'] or []
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
return modified_state
# Upsert all roles into the users table.
users = []
users.extend(authors)
users.extend(maintainers)
users.extend(licensors)
users = list(set(users))
plan = plpy.prepare("""\
SELECT username FROM users WHERE username = any($1)""",
['text[]'])
existing_users = set([r['username'] for r in plpy.execute(plan, (users,))])
new_users = set(users).difference(existing_users)
for username in new_users:
plan = plpy.prepare("""\
INSERT INTO users (username, first_name, last_name, full_name, title)
SELECT personid, firstname, surname, fullname, honorific
FROM persons where personid = $1""", ['text'])
plpy.execute(plan, (username,))
return modified_state | ['def', 'upsert_users_from_legacy_publication_trigger', '(', 'plpy', ',', 'td', ')', ':', 'modified_state', '=', '"OK"', 'authors', '=', 'td', '[', "'new'", ']', '[', "'authors'", ']', 'and', 'td', '[', "'new'", ']', '[', "'authors'", ']', 'or', '[', ']', 'maintainers', '=', 'td', '[', "'new'", ']', '[', "'maintainers'", ']', 'and', 'td', '[', "'new'", ']', '[', "'maintainers'", ']', 'or', '[', ']', 'licensors', '=', 'td', '[', "'new'", ']', '[', "'licensors'", ']', 'and', 'td', '[', "'new'", ']', '[', "'licensors'", ']', 'or', '[', ']', 'is_legacy_publication', '=', 'td', '[', "'new'", ']', '[', "'version'", ']', 'is', 'not', 'None', 'if', 'not', 'is_legacy_publication', ':', 'return', 'modified_state', '# Upsert all roles into the users table.', 'users', '=', '[', ']', 'users', '.', 'extend', '(', 'authors', ')', 'users', '.', 'extend', '(', 'maintainers', ')', 'users', '.', 'extend', '(', 'licensors', ')', 'users', '=', 'list', '(', 'set', '(', 'users', ')', ')', 'plan', '=', 'plpy', '.', 'prepare', '(', '"""\\\nSELECT username FROM users WHERE username = any($1)"""', ',', '[', "'text[]'", ']', ')', 'existing_users', '=', 'set', '(', '[', 'r', '[', "'username'", ']', 'for', 'r', 'in', 'plpy', '.', 'execute', '(', 'plan', ',', '(', 'users', ',', ')', ')', ']', ')', 'new_users', '=', 'set', '(', 'users', ')', '.', 'difference', '(', 'existing_users', ')', 'for', 'username', 'in', 'new_users', ':', 'plan', '=', 'plpy', '.', 'prepare', '(', '"""\\\nINSERT INTO users (username, first_name, last_name, full_name, title)\nSELECT personid, firstname, surname, fullname, honorific\nFROM persons where personid = $1"""', ',', '[', "'text'", ']', ')', 'plpy', '.', 'execute', '(', 'plan', ',', '(', 'username', ',', ')', ')', 'return', 'modified_state'] | A compatibility trigger to upsert users from legacy persons table. | ['A', 'compatibility', 'trigger', 'to', 'upsert', 'users', 'from', 'legacy', 'persons', 'table', '.'] | train | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L639-L670 |
6,439 | rosenbrockc/fortpy | fortpy/interop/ftypes.py | _ctypes_out | def _ctypes_out(parameter):
"""Returns a parameter variable declaration for an output variable for the specified
parameter.
"""
if (parameter.dimension is not None and ":" in parameter.dimension
and "out" in parameter.direction and ("allocatable" in parameter.modifiers or
"pointer" in parameter.modifiers)):
if parameter.direction == "(inout)":
return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True)
else: #self.direction == "(out)" since that is the only other option.
return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True) | python | def _ctypes_out(parameter):
"""Returns a parameter variable declaration for an output variable for the specified
parameter.
"""
if (parameter.dimension is not None and ":" in parameter.dimension
and "out" in parameter.direction and ("allocatable" in parameter.modifiers or
"pointer" in parameter.modifiers)):
if parameter.direction == "(inout)":
return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True)
else: #self.direction == "(out)" since that is the only other option.
return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True) | ['def', '_ctypes_out', '(', 'parameter', ')', ':', 'if', '(', 'parameter', '.', 'dimension', 'is', 'not', 'None', 'and', '":"', 'in', 'parameter', '.', 'dimension', 'and', '"out"', 'in', 'parameter', '.', 'direction', 'and', '(', '"allocatable"', 'in', 'parameter', '.', 'modifiers', 'or', '"pointer"', 'in', 'parameter', '.', 'modifiers', ')', ')', ':', 'if', 'parameter', '.', 'direction', '==', '"(inout)"', ':', 'return', '(', '"type(C_PTR), intent(inout) :: {}_o"', '.', 'format', '(', 'parameter', '.', 'name', ')', ',', 'True', ')', 'else', ':', '#self.direction == "(out)" since that is the only other option.', 'return', '(', '"type(C_PTR), intent(inout) :: {}_c"', '.', 'format', '(', 'parameter', '.', 'name', ')', ',', 'True', ')'] | Returns a parameter variable declaration for an output variable for the specified
parameter. | ['Returns', 'a', 'parameter', 'variable', 'declaration', 'for', 'an', 'output', 'variable', 'for', 'the', 'specified', 'parameter', '.'] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/ftypes.py#L798-L808 |
6,440 | secdev/scapy | scapy/layers/tls/extensions.py | TLS_Ext_PrettyPacketList._show_or_dump | def _show_or_dump(self, dump=False, indent=3,
lvl="", label_lvl="", first_call=True):
""" Reproduced from packet.py """
ct = AnsiColorTheme() if dump else conf.color_theme
s = "%s%s %s %s \n" % (label_lvl, ct.punct("###["),
ct.layer_name(self.name), ct.punct("]###"))
for f in self.fields_desc[:-1]:
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name),
ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) +
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
f = self.fields_desc[-1]
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name), ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) +
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
if self.payload:
s += self.payload._show_or_dump(dump=dump, indent=indent,
lvl=lvl + (" " * indent * self.show_indent), # noqa: E501
label_lvl=label_lvl, first_call=False) # noqa: E501
if first_call and not dump:
print(s)
else:
return s | python | def _show_or_dump(self, dump=False, indent=3,
lvl="", label_lvl="", first_call=True):
""" Reproduced from packet.py """
ct = AnsiColorTheme() if dump else conf.color_theme
s = "%s%s %s %s \n" % (label_lvl, ct.punct("###["),
ct.layer_name(self.name), ct.punct("]###"))
for f in self.fields_desc[:-1]:
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name),
ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) +
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
f = self.fields_desc[-1]
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name), ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) +
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
if self.payload:
s += self.payload._show_or_dump(dump=dump, indent=indent,
lvl=lvl + (" " * indent * self.show_indent), # noqa: E501
label_lvl=label_lvl, first_call=False) # noqa: E501
if first_call and not dump:
print(s)
else:
return s | ['def', '_show_or_dump', '(', 'self', ',', 'dump', '=', 'False', ',', 'indent', '=', '3', ',', 'lvl', '=', '""', ',', 'label_lvl', '=', '""', ',', 'first_call', '=', 'True', ')', ':', 'ct', '=', 'AnsiColorTheme', '(', ')', 'if', 'dump', 'else', 'conf', '.', 'color_theme', 's', '=', '"%s%s %s %s \\n"', '%', '(', 'label_lvl', ',', 'ct', '.', 'punct', '(', '"###["', ')', ',', 'ct', '.', 'layer_name', '(', 'self', '.', 'name', ')', ',', 'ct', '.', 'punct', '(', '"]###"', ')', ')', 'for', 'f', 'in', 'self', '.', 'fields_desc', '[', ':', '-', '1', ']', ':', 'ncol', '=', 'ct', '.', 'field_name', 'vcol', '=', 'ct', '.', 'field_value', 'fvalue', '=', 'self', '.', 'getfieldval', '(', 'f', '.', 'name', ')', 'begn', '=', '"%s %-10s%s "', '%', '(', 'label_lvl', '+', 'lvl', ',', 'ncol', '(', 'f', '.', 'name', ')', ',', 'ct', '.', 'punct', '(', '"="', ')', ',', ')', 'reprval', '=', 'f', '.', 'i2repr', '(', 'self', ',', 'fvalue', ')', 'if', 'isinstance', '(', 'reprval', ',', 'str', ')', ':', 'reprval', '=', 'reprval', '.', 'replace', '(', '"\\n"', ',', '"\\n"', '+', '" "', '*', '(', 'len', '(', 'label_lvl', ')', '+', 'len', '(', 'lvl', ')', '+', 'len', '(', 'f', '.', 'name', ')', '+', '4', ')', ')', 's', '+=', '"%s%s\\n"', '%', '(', 'begn', ',', 'vcol', '(', 'reprval', ')', ')', 'f', '=', 'self', '.', 'fields_desc', '[', '-', '1', ']', 'ncol', '=', 'ct', '.', 'field_name', 'vcol', '=', 'ct', '.', 'field_value', 'fvalue', '=', 'self', '.', 'getfieldval', '(', 'f', '.', 'name', ')', 'begn', '=', '"%s %-10s%s "', '%', '(', 'label_lvl', '+', 'lvl', ',', 'ncol', '(', 'f', '.', 'name', ')', ',', 'ct', '.', 'punct', '(', '"="', ')', ',', ')', 'reprval', '=', 'f', '.', 'i2repr', '(', 'self', ',', 'fvalue', ')', 'if', 'isinstance', '(', 'reprval', ',', 'str', ')', ':', 'reprval', '=', 'reprval', '.', 'replace', '(', '"\\n"', ',', '"\\n"', '+', '" "', '*', '(', 'len', '(', 'label_lvl', ')', '+', 'len', '(', 'lvl', ')', '+', 'len', '(', 'f', '.', 'name', ')', '+', '4', ')', ')', 's', '+=', '"%s%s\\n"', '%', '(', 'begn', ',', 'vcol', '(', 'reprval', ')', ')', 'if', 'self', '.', 'payload', ':', 's', '+=', 'self', '.', 'payload', '.', '_show_or_dump', '(', 'dump', '=', 'dump', ',', 'indent', '=', 'indent', ',', 'lvl', '=', 'lvl', '+', '(', '" "', '*', 'indent', '*', 'self', '.', 'show_indent', ')', ',', '# noqa: E501', 'label_lvl', '=', 'label_lvl', ',', 'first_call', '=', 'False', ')', '# noqa: E501', 'if', 'first_call', 'and', 'not', 'dump', ':', 'print', '(', 's', ')', 'else', ':', 'return', 's'] | Reproduced from packet.py | ['Reproduced', 'from', 'packet', '.', 'py'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/extensions.py#L96-L135 |
6,441 | SBRG/ssbio | ssbio/pipeline/atlas.py | ATLAS.get_orthology_matrix | def get_orthology_matrix(self, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, filter_condition='OR',
remove_strains_with_no_orthology=True,
remove_strains_with_no_differences=False,
remove_genes_not_in_base_model=True):
"""Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
"""
# TODO: document and test other cutoffs
# Get the path to the reference genome
r_file = self.reference_gempro.genome_path
bbh_files = {}
log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...')
for strain_gempro in tqdm(self.strains):
g_file = strain_gempro.genome_path
# Run bidirectional BLAST
log.debug('{} vs {}: Running bidirectional BLAST'.format(self.reference_gempro.id, strain_gempro.id))
r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file,
other_genome=g_file,
dbtype='prot',
outdir=self.sequences_by_organism_dir)
# Using the BLAST files, find the BBH
log.debug('{} vs {}: Finding BBHs'.format(self.reference_gempro.id, strain_gempro.id))
bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r,
outdir=self.sequences_by_organism_dir)
bbh_files[strain_gempro.id] = bbh
# Make the orthologous genes matrix
log.info('Creating orthology matrix from BBHs...')
ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id,
genome_to_bbh_files=bbh_files,
pid_cutoff=pid_cutoff,
bitscore_cutoff=bitscore_cutoff,
evalue_cutoff=evalue_cutoff,
filter_condition=filter_condition,
outname='{}_{}_orthology.csv'.format(self.reference_gempro.id, 'prot'),
outdir=self.data_dir)
log.info('Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix))
self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0)
# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes
self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology,
remove_strains_with_no_differences=remove_strains_with_no_differences,
remove_genes_not_in_base_model=remove_genes_not_in_base_model) | python | def get_orthology_matrix(self, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, filter_condition='OR',
remove_strains_with_no_orthology=True,
remove_strains_with_no_differences=False,
remove_genes_not_in_base_model=True):
"""Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
"""
# TODO: document and test other cutoffs
# Get the path to the reference genome
r_file = self.reference_gempro.genome_path
bbh_files = {}
log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...')
for strain_gempro in tqdm(self.strains):
g_file = strain_gempro.genome_path
# Run bidirectional BLAST
log.debug('{} vs {}: Running bidirectional BLAST'.format(self.reference_gempro.id, strain_gempro.id))
r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file,
other_genome=g_file,
dbtype='prot',
outdir=self.sequences_by_organism_dir)
# Using the BLAST files, find the BBH
log.debug('{} vs {}: Finding BBHs'.format(self.reference_gempro.id, strain_gempro.id))
bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r,
outdir=self.sequences_by_organism_dir)
bbh_files[strain_gempro.id] = bbh
# Make the orthologous genes matrix
log.info('Creating orthology matrix from BBHs...')
ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id,
genome_to_bbh_files=bbh_files,
pid_cutoff=pid_cutoff,
bitscore_cutoff=bitscore_cutoff,
evalue_cutoff=evalue_cutoff,
filter_condition=filter_condition,
outname='{}_{}_orthology.csv'.format(self.reference_gempro.id, 'prot'),
outdir=self.data_dir)
log.info('Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix))
self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0)
# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes
self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology,
remove_strains_with_no_differences=remove_strains_with_no_differences,
remove_genes_not_in_base_model=remove_genes_not_in_base_model) | ['def', 'get_orthology_matrix', '(', 'self', ',', 'pid_cutoff', '=', 'None', ',', 'bitscore_cutoff', '=', 'None', ',', 'evalue_cutoff', '=', 'None', ',', 'filter_condition', '=', "'OR'", ',', 'remove_strains_with_no_orthology', '=', 'True', ',', 'remove_strains_with_no_differences', '=', 'False', ',', 'remove_genes_not_in_base_model', '=', 'True', ')', ':', '# TODO: document and test other cutoffs', '# Get the path to the reference genome', 'r_file', '=', 'self', '.', 'reference_gempro', '.', 'genome_path', 'bbh_files', '=', '{', '}', 'log', '.', 'info', '(', "'Running bidirectional BLAST and finding best bidirectional hits (BBH)...'", ')', 'for', 'strain_gempro', 'in', 'tqdm', '(', 'self', '.', 'strains', ')', ':', 'g_file', '=', 'strain_gempro', '.', 'genome_path', '# Run bidirectional BLAST', 'log', '.', 'debug', '(', "'{} vs {}: Running bidirectional BLAST'", '.', 'format', '(', 'self', '.', 'reference_gempro', '.', 'id', ',', 'strain_gempro', '.', 'id', ')', ')', 'r_vs_g', ',', 'g_vs_r', '=', 'ssbio', '.', 'protein', '.', 'sequence', '.', 'utils', '.', 'blast', '.', 'run_bidirectional_blast', '(', 'reference', '=', 'r_file', ',', 'other_genome', '=', 'g_file', ',', 'dbtype', '=', "'prot'", ',', 'outdir', '=', 'self', '.', 'sequences_by_organism_dir', ')', '# Using the BLAST files, find the BBH', 'log', '.', 'debug', '(', "'{} vs {}: Finding BBHs'", '.', 'format', '(', 'self', '.', 'reference_gempro', '.', 'id', ',', 'strain_gempro', '.', 'id', ')', ')', 'bbh', '=', 'ssbio', '.', 'protein', '.', 'sequence', '.', 'utils', '.', 'blast', '.', 'calculate_bbh', '(', 'blast_results_1', '=', 'r_vs_g', ',', 'blast_results_2', '=', 'g_vs_r', ',', 'outdir', '=', 'self', '.', 'sequences_by_organism_dir', ')', 'bbh_files', '[', 'strain_gempro', '.', 'id', ']', '=', 'bbh', '# Make the orthologous genes matrix', 'log', '.', 'info', '(', "'Creating orthology matrix from BBHs...'", ')', 'ortho_matrix', '=', 'ssbio', '.', 'protein', '.', 'sequence', '.', 'utils', '.', 'blast', '.', 'create_orthology_matrix', '(', 'r_name', '=', 'self', '.', 'reference_gempro', '.', 'id', ',', 'genome_to_bbh_files', '=', 'bbh_files', ',', 'pid_cutoff', '=', 'pid_cutoff', ',', 'bitscore_cutoff', '=', 'bitscore_cutoff', ',', 'evalue_cutoff', '=', 'evalue_cutoff', ',', 'filter_condition', '=', 'filter_condition', ',', 'outname', '=', "'{}_{}_orthology.csv'", '.', 'format', '(', 'self', '.', 'reference_gempro', '.', 'id', ',', "'prot'", ')', ',', 'outdir', '=', 'self', '.', 'data_dir', ')', 'log', '.', 'info', '(', '\'Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.\'', '.', 'format', '(', 'ortho_matrix', ')', ')', 'self', '.', 'df_orthology_matrix', '=', 'pd', '.', 'read_csv', '(', 'ortho_matrix', ',', 'index_col', '=', '0', ')', '# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes', 'self', '.', '_filter_orthology_matrix', '(', 'remove_strains_with_no_orthology', '=', 'remove_strains_with_no_orthology', ',', 'remove_strains_with_no_differences', '=', 'remove_strains_with_no_differences', ',', 'remove_genes_not_in_base_model', '=', 'remove_genes_not_in_base_model', ')'] | Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits. | ['Create', 'the', 'orthology', 'matrix', 'by', 'finding', 'best', 'bidirectional', 'BLAST', 'hits', '.', 'Genes', '=', 'rows', 'strains', '=', 'columns'] | train | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas.py#L252-L318 |
6,442 | caseyjlaw/rtpipe | rtpipe/parsecal.py | casa_sol.set_selection | def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=1, spwind=[], pols=['XX','YY']):
""" Set select parameter that defines time, spw, and pol solutions to apply.
time defines the time to find solutions near in mjd.
freqs defines frequencies to select bandpass solution
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
radec (radian tuple) and dist (deg) define optional location of source for filtering solutions.
spwind is list of indices to be used (e.g., [0,2,4,10])
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed.
calname not used. here for uniformity with telcal_sol.
"""
self.spwind = spwind
if calname:
self.logger.warn('calname option not used for casa_sol. Applied based on radec.')
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY']
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL']
self.polind = [polord.index(pol) for pol in pols]
self.ant1ind = [n.where(ant1 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
self.ant2ind = [n.where(ant2 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
# select by smallest time distance for source within some angular region of target
if radec:
ra, dec = radec
calra = n.array(self.radec)[:,0]
caldec = n.array(self.radec)[:,1]
fields = n.where( (n.abs(calra - ra) < n.radians(dist)) & (n.abs(caldec - dec) < n.radians(dist)) )[0]
if len(fields) == 0:
self.logger.warn('Warning: no close calibrator found. Removing radec restriction.')
fields = n.unique(self.uniquefield)
else:
fields = n.unique(self.uniquefield)
sel = []
for field in fields:
sel += list(n.where(field == self.uniquefield)[0])
mjddist = n.abs(time - self.uniquemjd[sel])
closestgain = n.where(mjddist == mjddist.min())[0][0]
self.logger.info('Using gain solution for field %d at MJD %.5f, separated by %d min ' % (self.uniquefield[n.where(self.uniquemjd == self.uniquemjd[sel][closestgain])], self.uniquemjd[closestgain], mjddist[closestgain]*24*60))
self.gain = self.gain.take(self.spwind, axis=2).take(self.polind, axis=3)[closestgain]
if hasattr(self, 'bandpass'):
bins = [n.where(n.min(n.abs(self.bpfreq-selfreq)) == n.abs(self.bpfreq-selfreq))[0][0] for selfreq in freqs]
self.bandpass = self.bandpass.take(bins, axis=1).take(self.polind, axis=2)
self.freqs = freqs
self.logger.debug('Using bandpass at BP bins (1000 bins per spw): %s', str(bins)) | python | def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=1, spwind=[], pols=['XX','YY']):
""" Set select parameter that defines time, spw, and pol solutions to apply.
time defines the time to find solutions near in mjd.
freqs defines frequencies to select bandpass solution
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
radec (radian tuple) and dist (deg) define optional location of source for filtering solutions.
spwind is list of indices to be used (e.g., [0,2,4,10])
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed.
calname not used. here for uniformity with telcal_sol.
"""
self.spwind = spwind
if calname:
self.logger.warn('calname option not used for casa_sol. Applied based on radec.')
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY']
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL']
self.polind = [polord.index(pol) for pol in pols]
self.ant1ind = [n.where(ant1 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
self.ant2ind = [n.where(ant2 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
# select by smallest time distance for source within some angular region of target
if radec:
ra, dec = radec
calra = n.array(self.radec)[:,0]
caldec = n.array(self.radec)[:,1]
fields = n.where( (n.abs(calra - ra) < n.radians(dist)) & (n.abs(caldec - dec) < n.radians(dist)) )[0]
if len(fields) == 0:
self.logger.warn('Warning: no close calibrator found. Removing radec restriction.')
fields = n.unique(self.uniquefield)
else:
fields = n.unique(self.uniquefield)
sel = []
for field in fields:
sel += list(n.where(field == self.uniquefield)[0])
mjddist = n.abs(time - self.uniquemjd[sel])
closestgain = n.where(mjddist == mjddist.min())[0][0]
self.logger.info('Using gain solution for field %d at MJD %.5f, separated by %d min ' % (self.uniquefield[n.where(self.uniquemjd == self.uniquemjd[sel][closestgain])], self.uniquemjd[closestgain], mjddist[closestgain]*24*60))
self.gain = self.gain.take(self.spwind, axis=2).take(self.polind, axis=3)[closestgain]
if hasattr(self, 'bandpass'):
bins = [n.where(n.min(n.abs(self.bpfreq-selfreq)) == n.abs(self.bpfreq-selfreq))[0][0] for selfreq in freqs]
self.bandpass = self.bandpass.take(bins, axis=1).take(self.polind, axis=2)
self.freqs = freqs
self.logger.debug('Using bandpass at BP bins (1000 bins per spw): %s', str(bins)) | ['def', 'set_selection', '(', 'self', ',', 'time', ',', 'freqs', ',', 'blarr', ',', 'calname', '=', "''", ',', 'radec', '=', '(', ')', ',', 'dist', '=', '1', ',', 'spwind', '=', '[', ']', ',', 'pols', '=', '[', "'XX'", ',', "'YY'", ']', ')', ':', 'self', '.', 'spwind', '=', 'spwind', 'if', 'calname', ':', 'self', '.', 'logger', '.', 'warn', '(', "'calname option not used for casa_sol. Applied based on radec.'", ')', '# define pol index', 'if', "'X'", 'in', "''", '.', 'join', '(', 'pols', ')', 'or', "'Y'", 'in', "''", '.', 'join', '(', 'pols', ')', ':', 'polord', '=', '[', "'XX'", ',', "'YY'", ']', 'elif', "'R'", 'in', "''", '.', 'join', '(', 'pols', ')', 'or', "'L'", 'in', "''", '.', 'join', '(', 'pols', ')', ':', 'polord', '=', '[', "'RR'", ',', "'LL'", ']', 'self', '.', 'polind', '=', '[', 'polord', '.', 'index', '(', 'pol', ')', 'for', 'pol', 'in', 'pols', ']', 'self', '.', 'ant1ind', '=', '[', 'n', '.', 'where', '(', 'ant1', '==', 'n', '.', 'unique', '(', 'blarr', ')', ')', '[', '0', ']', '[', '0', ']', 'for', '(', 'ant1', ',', 'ant2', ')', 'in', 'blarr', ']', 'self', '.', 'ant2ind', '=', '[', 'n', '.', 'where', '(', 'ant2', '==', 'n', '.', 'unique', '(', 'blarr', ')', ')', '[', '0', ']', '[', '0', ']', 'for', '(', 'ant1', ',', 'ant2', ')', 'in', 'blarr', ']', '# select by smallest time distance for source within some angular region of target', 'if', 'radec', ':', 'ra', ',', 'dec', '=', 'radec', 'calra', '=', 'n', '.', 'array', '(', 'self', '.', 'radec', ')', '[', ':', ',', '0', ']', 'caldec', '=', 'n', '.', 'array', '(', 'self', '.', 'radec', ')', '[', ':', ',', '1', ']', 'fields', '=', 'n', '.', 'where', '(', '(', 'n', '.', 'abs', '(', 'calra', '-', 'ra', ')', '<', 'n', '.', 'radians', '(', 'dist', ')', ')', '&', '(', 'n', '.', 'abs', '(', 'caldec', '-', 'dec', ')', '<', 'n', '.', 'radians', '(', 'dist', ')', ')', ')', '[', '0', ']', 'if', 'len', '(', 'fields', ')', '==', '0', ':', 'self', '.', 'logger', '.', 'warn', '(', "'Warning: no close calibrator found. Removing radec restriction.'", ')', 'fields', '=', 'n', '.', 'unique', '(', 'self', '.', 'uniquefield', ')', 'else', ':', 'fields', '=', 'n', '.', 'unique', '(', 'self', '.', 'uniquefield', ')', 'sel', '=', '[', ']', 'for', 'field', 'in', 'fields', ':', 'sel', '+=', 'list', '(', 'n', '.', 'where', '(', 'field', '==', 'self', '.', 'uniquefield', ')', '[', '0', ']', ')', 'mjddist', '=', 'n', '.', 'abs', '(', 'time', '-', 'self', '.', 'uniquemjd', '[', 'sel', ']', ')', 'closestgain', '=', 'n', '.', 'where', '(', 'mjddist', '==', 'mjddist', '.', 'min', '(', ')', ')', '[', '0', ']', '[', '0', ']', 'self', '.', 'logger', '.', 'info', '(', "'Using gain solution for field %d at MJD %.5f, separated by %d min '", '%', '(', 'self', '.', 'uniquefield', '[', 'n', '.', 'where', '(', 'self', '.', 'uniquemjd', '==', 'self', '.', 'uniquemjd', '[', 'sel', ']', '[', 'closestgain', ']', ')', ']', ',', 'self', '.', 'uniquemjd', '[', 'closestgain', ']', ',', 'mjddist', '[', 'closestgain', ']', '*', '24', '*', '60', ')', ')', 'self', '.', 'gain', '=', 'self', '.', 'gain', '.', 'take', '(', 'self', '.', 'spwind', ',', 'axis', '=', '2', ')', '.', 'take', '(', 'self', '.', 'polind', ',', 'axis', '=', '3', ')', '[', 'closestgain', ']', 'if', 'hasattr', '(', 'self', ',', "'bandpass'", ')', ':', 'bins', '=', '[', 'n', '.', 'where', '(', 'n', '.', 'min', '(', 'n', '.', 'abs', '(', 'self', '.', 'bpfreq', '-', 'selfreq', ')', ')', '==', 'n', '.', 'abs', '(', 'self', '.', 'bpfreq', '-', 'selfreq', ')', ')', '[', '0', ']', '[', '0', ']', 'for', 'selfreq', 'in', 'freqs', ']', 'self', '.', 'bandpass', '=', 'self', '.', 'bandpass', '.', 'take', '(', 'bins', ',', 'axis', '=', '1', ')', '.', 'take', '(', 'self', '.', 'polind', ',', 'axis', '=', '2', ')', 'self', '.', 'freqs', '=', 'freqs', 'self', '.', 'logger', '.', 'debug', '(', "'Using bandpass at BP bins (1000 bins per spw): %s'", ',', 'str', '(', 'bins', ')', ')'] | Set select parameter that defines time, spw, and pol solutions to apply.
time defines the time to find solutions near in mjd.
freqs defines frequencies to select bandpass solution
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
radec (radian tuple) and dist (deg) define optional location of source for filtering solutions.
spwind is list of indices to be used (e.g., [0,2,4,10])
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed.
calname not used. here for uniformity with telcal_sol. | ['Set', 'select', 'parameter', 'that', 'defines', 'time', 'spw', 'and', 'pol', 'solutions', 'to', 'apply', '.', 'time', 'defines', 'the', 'time', 'to', 'find', 'solutions', 'near', 'in', 'mjd', '.', 'freqs', 'defines', 'frequencies', 'to', 'select', 'bandpass', 'solution', 'blarr', 'is', 'array', 'of', 'size', '2xnbl', 'that', 'gives', 'pairs', 'of', 'antennas', 'in', 'each', 'baseline', '(', 'a', 'la', 'tpipe', '.', 'blarr', ')', '.', 'radec', '(', 'radian', 'tuple', ')', 'and', 'dist', '(', 'deg', ')', 'define', 'optional', 'location', 'of', 'source', 'for', 'filtering', 'solutions', '.', 'spwind', 'is', 'list', 'of', 'indices', 'to', 'be', 'used', '(', 'e', '.', 'g', '.', '[', '0', '2', '4', '10', ']', ')', 'pols', 'is', 'from', 'd', '[', 'pols', ']', '(', 'e', '.', 'g', '.', '[', 'RR', ']', ')', '.', 'single', 'or', 'dual', 'parallel', 'allowed', '.', 'calname', 'not', 'used', '.', 'here', 'for', 'uniformity', 'with', 'telcal_sol', '.'] | train | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecal.py#L187-L237 |
6,443 | yunojuno/elasticsearch-django | elasticsearch_django/models.py | SearchDocumentManagerMixin._raw_sql | def _raw_sql(self, values):
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements."""
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
) | python | def _raw_sql(self, values):
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements."""
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
) | ['def', '_raw_sql', '(', 'self', ',', 'values', ')', ':', 'if', 'isinstance', '(', 'self', '.', 'model', '.', '_meta', '.', 'pk', ',', 'CharField', ')', ':', 'when_clauses', '=', '" "', '.', 'join', '(', '[', 'self', '.', '_when', '(', '"\'{}\'"', '.', 'format', '(', 'x', ')', ',', 'y', ')', 'for', '(', 'x', ',', 'y', ')', 'in', 'values', ']', ')', 'else', ':', 'when_clauses', '=', '" "', '.', 'join', '(', '[', 'self', '.', '_when', '(', 'x', ',', 'y', ')', 'for', '(', 'x', ',', 'y', ')', 'in', 'values', ']', ')', 'table_name', '=', 'self', '.', 'model', '.', '_meta', '.', 'db_table', 'primary_key', '=', 'self', '.', 'model', '.', '_meta', '.', 'pk', '.', 'column', 'return', '\'SELECT CASE {}."{}" {} ELSE 0 END\'', '.', 'format', '(', 'table_name', ',', 'primary_key', ',', 'when_clauses', ')'] | Prepare SQL statement consisting of a sequence of WHEN .. THEN statements. | ['Prepare', 'SQL', 'statement', 'consisting', 'of', 'a', 'sequence', 'of', 'WHEN', '..', 'THEN', 'statements', '.'] | train | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L138-L150 |
6,444 | yymao/generic-catalog-reader | GCR/base.py | BaseGenericCatalog.get_input_kwargs | def get_input_kwargs(self, key=None, default=None):
"""
Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning)
return self.get_catalog_info(key, default) | python | def get_input_kwargs(self, key=None, default=None):
"""
Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning)
return self.get_catalog_info(key, default) | ['def', 'get_input_kwargs', '(', 'self', ',', 'key', '=', 'None', ',', 'default', '=', 'None', ')', ':', 'warnings', '.', 'warn', '(', '"`get_input_kwargs` is deprecated; use `get_catalog_info` instead."', ',', 'DeprecationWarning', ')', 'return', 'self', '.', 'get_catalog_info', '(', 'key', ',', 'default', ')'] | Deprecated. Use `get_catalog_info` instead.
Get information from the catalog config file.
If *key* is `None`, return the full dict. | ['Deprecated', '.', 'Use', 'get_catalog_info', 'instead', '.'] | train | https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L162-L170 |
6,445 | sassoftware/saspy | saspy/sasdata.py | SASdata.assessModel | def assessModel(self, target: str, prediction: str, nominal: bool = True, event: str = '', **kwargs):
"""
This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner.
Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns
TODO: add code example of build, score, and then assess
:param target: string that represents the target variable in the data
:param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1).
:param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different.
:param event: string which indicates which value of the nominal target variable is the event vs non-event
:param kwargs:
:return: SAS result object
"""
# submit autocall macro
self.sas.submit("%aamodel;")
objtype = "datastep"
objname = '{s:{c}^{n}}'.format(s=self.table[:3], n=3,
c='_') + self.sas._objcnt() # translate to a libname so needs to be less than 8
code = "%macro proccall(d);\n"
# build parameters
score_table = str(self.libref + '.' + self.table)
binstats = str(objname + '.' + "ASSESSMENTSTATISTICS")
out = str(objname + '.' + "ASSESSMENTBINSTATISTICS")
level = 'interval'
# var = 'P_' + target
if nominal:
level = 'class'
# the user didn't specify the event for a nominal Give them the possible choices
try:
if len(event) < 1:
raise Exception(event)
except Exception:
print("No event was specified for a nominal target. Here are possible options:\n")
event_code = "proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);" % (
self.libref, self.table, self._dsopts())
event_code += "\nclass %s ; \nrun;" % target
event_code += "data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq '') and lowcase(name)=lowcase('%s');" % target
ec = self.sas._io.submit(event_code)
HTML(ec['LST'])
# TODO: Finish output of the list of nominals variables
if nominal:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out, event)
else:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out)
rename_char = """
data {0};
set {0};
if level in ("INTERVAL", "INT") then do;
rename _sse_ = SumSquaredError
_div_ = Divsor
_ASE_ = AverageSquaredError
_RASE_ = RootAverageSquaredError
_MEANP_ = MeanPredictionValue
_STDP_ = StandardDeviationPrediction
_CVP_ = CoefficientVariationPrediction;
end;
else do;
rename CR = MaxClassificationRate
KSCut = KSCutOff
CRDEPTH = MaxClassificationDepth
MDepth = MedianClassificationDepth
MCut = MedianEventDetectionCutOff
CCut = ClassificationCutOff
_misc_ = MisClassificationRate;
end;
run;
"""
code += rename_char.format(binstats)
if nominal:
# TODO: add graphics code here to return to the SAS results object
graphics ="""
ODS PROCLABEL='ERRORPLOT' ;
proc sgplot data={0};
title "Error and Correct rate by Depth";
series x=depth y=correct_rate;
series x=depth y=error_rate;
yaxis label="Percentage" grid;
run;
/* roc chart */
ODS PROCLABEL='ROCPLOT' ;
proc sgplot data={0};
title "ROC Curve";
series x=one_minus_specificity y=sensitivity;
yaxis grid;
run;
/* Lift and Cumulative Lift */
ODS PROCLABEL='LIFTPLOT' ;
proc sgplot data={0};
Title "Lift and Cumulative Lift";
series x=depth y=c_lift;
series x=depth y=lift;
yaxis grid;
run;
"""
code += graphics.format(out)
code += "run; quit; %mend;\n"
code += "%%mangobj(%s,%s,%s);" % (objname, objtype, self.table)
if self.sas.nosub:
print(code)
return
ll = self.sas.submit(code, 'text')
obj1 = sp2.SASProcCommons._objectmethods(self, objname)
return sp2.SASresults(obj1, self.sas, objname, self.sas.nosub, ll['LOG']) | python | def assessModel(self, target: str, prediction: str, nominal: bool = True, event: str = '', **kwargs):
"""
This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner.
Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns
TODO: add code example of build, score, and then assess
:param target: string that represents the target variable in the data
:param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1).
:param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different.
:param event: string which indicates which value of the nominal target variable is the event vs non-event
:param kwargs:
:return: SAS result object
"""
# submit autocall macro
self.sas.submit("%aamodel;")
objtype = "datastep"
objname = '{s:{c}^{n}}'.format(s=self.table[:3], n=3,
c='_') + self.sas._objcnt() # translate to a libname so needs to be less than 8
code = "%macro proccall(d);\n"
# build parameters
score_table = str(self.libref + '.' + self.table)
binstats = str(objname + '.' + "ASSESSMENTSTATISTICS")
out = str(objname + '.' + "ASSESSMENTBINSTATISTICS")
level = 'interval'
# var = 'P_' + target
if nominal:
level = 'class'
# the user didn't specify the event for a nominal Give them the possible choices
try:
if len(event) < 1:
raise Exception(event)
except Exception:
print("No event was specified for a nominal target. Here are possible options:\n")
event_code = "proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);" % (
self.libref, self.table, self._dsopts())
event_code += "\nclass %s ; \nrun;" % target
event_code += "data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq '') and lowcase(name)=lowcase('%s');" % target
ec = self.sas._io.submit(event_code)
HTML(ec['LST'])
# TODO: Finish output of the list of nominals variables
if nominal:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out, event)
else:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out)
rename_char = """
data {0};
set {0};
if level in ("INTERVAL", "INT") then do;
rename _sse_ = SumSquaredError
_div_ = Divsor
_ASE_ = AverageSquaredError
_RASE_ = RootAverageSquaredError
_MEANP_ = MeanPredictionValue
_STDP_ = StandardDeviationPrediction
_CVP_ = CoefficientVariationPrediction;
end;
else do;
rename CR = MaxClassificationRate
KSCut = KSCutOff
CRDEPTH = MaxClassificationDepth
MDepth = MedianClassificationDepth
MCut = MedianEventDetectionCutOff
CCut = ClassificationCutOff
_misc_ = MisClassificationRate;
end;
run;
"""
code += rename_char.format(binstats)
if nominal:
# TODO: add graphics code here to return to the SAS results object
graphics ="""
ODS PROCLABEL='ERRORPLOT' ;
proc sgplot data={0};
title "Error and Correct rate by Depth";
series x=depth y=correct_rate;
series x=depth y=error_rate;
yaxis label="Percentage" grid;
run;
/* roc chart */
ODS PROCLABEL='ROCPLOT' ;
proc sgplot data={0};
title "ROC Curve";
series x=one_minus_specificity y=sensitivity;
yaxis grid;
run;
/* Lift and Cumulative Lift */
ODS PROCLABEL='LIFTPLOT' ;
proc sgplot data={0};
Title "Lift and Cumulative Lift";
series x=depth y=c_lift;
series x=depth y=lift;
yaxis grid;
run;
"""
code += graphics.format(out)
code += "run; quit; %mend;\n"
code += "%%mangobj(%s,%s,%s);" % (objname, objtype, self.table)
if self.sas.nosub:
print(code)
return
ll = self.sas.submit(code, 'text')
obj1 = sp2.SASProcCommons._objectmethods(self, objname)
return sp2.SASresults(obj1, self.sas, objname, self.sas.nosub, ll['LOG']) | ['def', 'assessModel', '(', 'self', ',', 'target', ':', 'str', ',', 'prediction', ':', 'str', ',', 'nominal', ':', 'bool', '=', 'True', ',', 'event', ':', 'str', '=', "''", ',', '*', '*', 'kwargs', ')', ':', '# submit autocall macro', 'self', '.', 'sas', '.', 'submit', '(', '"%aamodel;"', ')', 'objtype', '=', '"datastep"', 'objname', '=', "'{s:{c}^{n}}'", '.', 'format', '(', 's', '=', 'self', '.', 'table', '[', ':', '3', ']', ',', 'n', '=', '3', ',', 'c', '=', "'_'", ')', '+', 'self', '.', 'sas', '.', '_objcnt', '(', ')', '# translate to a libname so needs to be less than 8', 'code', '=', '"%macro proccall(d);\\n"', '# build parameters', 'score_table', '=', 'str', '(', 'self', '.', 'libref', '+', "'.'", '+', 'self', '.', 'table', ')', 'binstats', '=', 'str', '(', 'objname', '+', "'.'", '+', '"ASSESSMENTSTATISTICS"', ')', 'out', '=', 'str', '(', 'objname', '+', "'.'", '+', '"ASSESSMENTBINSTATISTICS"', ')', 'level', '=', "'interval'", "# var = 'P_' + target", 'if', 'nominal', ':', 'level', '=', "'class'", "# the user didn't specify the event for a nominal Give them the possible choices", 'try', ':', 'if', 'len', '(', 'event', ')', '<', '1', ':', 'raise', 'Exception', '(', 'event', ')', 'except', 'Exception', ':', 'print', '(', '"No event was specified for a nominal target. Here are possible options:\\n"', ')', 'event_code', '=', '"proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);"', '%', '(', 'self', '.', 'libref', ',', 'self', '.', 'table', ',', 'self', '.', '_dsopts', '(', ')', ')', 'event_code', '+=', '"\\nclass %s ; \\nrun;"', '%', 'target', 'event_code', '+=', '"data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq \'\') and lowcase(name)=lowcase(\'%s\');"', '%', 'target', 'ec', '=', 'self', '.', 'sas', '.', '_io', '.', 'submit', '(', 'event_code', ')', 'HTML', '(', 'ec', '[', "'LST'", ']', ')', '# TODO: Finish output of the list of nominals variables', 'if', 'nominal', ':', 'code', '+=', '"%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);"', '%', '(', 'score_table', ',', 'self', '.', '_dsopts', '(', ')', ',', 'target', ',', 'prediction', ',', 'level', ',', 'binstats', ',', 'out', ',', 'event', ')', 'else', ':', 'code', '+=', '"%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);"', '%', '(', 'score_table', ',', 'self', '.', '_dsopts', '(', ')', ',', 'target', ',', 'prediction', ',', 'level', ',', 'binstats', ',', 'out', ')', 'rename_char', '=', '"""\n data {0};\n set {0};\n if level in ("INTERVAL", "INT") then do;\n rename _sse_ = SumSquaredError\n _div_ = Divsor\n _ASE_ = AverageSquaredError\n _RASE_ = RootAverageSquaredError\n _MEANP_ = MeanPredictionValue\n _STDP_ = StandardDeviationPrediction\n _CVP_ = CoefficientVariationPrediction;\n end;\n else do;\n rename CR = MaxClassificationRate\n KSCut = KSCutOff\n CRDEPTH = MaxClassificationDepth\n MDepth = MedianClassificationDepth\n MCut = MedianEventDetectionCutOff\n CCut = ClassificationCutOff\n _misc_ = MisClassificationRate;\n end;\n run;\n """', 'code', '+=', 'rename_char', '.', 'format', '(', 'binstats', ')', 'if', 'nominal', ':', '# TODO: add graphics code here to return to the SAS results object', 'graphics', '=', '"""\n ODS PROCLABEL=\'ERRORPLOT\' ;\n proc sgplot data={0};\n title "Error and Correct rate by Depth";\n series x=depth y=correct_rate;\n series x=depth y=error_rate;\n yaxis label="Percentage" grid;\n run;\n /* roc chart */\n ODS PROCLABEL=\'ROCPLOT\' ;\n\n proc sgplot data={0};\n title "ROC Curve";\n series x=one_minus_specificity y=sensitivity;\n yaxis grid;\n run;\n /* Lift and Cumulative Lift */\n ODS PROCLABEL=\'LIFTPLOT\' ;\n proc sgplot data={0};\n Title "Lift and Cumulative Lift";\n series x=depth y=c_lift;\n series x=depth y=lift;\n yaxis grid;\n run;\n """', 'code', '+=', 'graphics', '.', 'format', '(', 'out', ')', 'code', '+=', '"run; quit; %mend;\\n"', 'code', '+=', '"%%mangobj(%s,%s,%s);"', '%', '(', 'objname', ',', 'objtype', ',', 'self', '.', 'table', ')', 'if', 'self', '.', 'sas', '.', 'nosub', ':', 'print', '(', 'code', ')', 'return', 'll', '=', 'self', '.', 'sas', '.', 'submit', '(', 'code', ',', "'text'", ')', 'obj1', '=', 'sp2', '.', 'SASProcCommons', '.', '_objectmethods', '(', 'self', ',', 'objname', ')', 'return', 'sp2', '.', 'SASresults', '(', 'obj1', ',', 'self', '.', 'sas', ',', 'objname', ',', 'self', '.', 'sas', '.', 'nosub', ',', 'll', '[', "'LOG'", ']', ')'] | This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner.
Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns
TODO: add code example of build, score, and then assess
:param target: string that represents the target variable in the data
:param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1).
:param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different.
:param event: string which indicates which value of the nominal target variable is the event vs non-event
:param kwargs:
:return: SAS result object | ['This', 'method', 'will', 'calculate', 'assessment', 'measures', 'using', 'the', 'SAS', 'AA_Model_Eval', 'Macro', 'used', 'for', 'SAS', 'Enterprise', 'Miner', '.', 'Not', 'all', 'datasets', 'can', 'be', 'assessed', '.', 'This', 'is', 'designed', 'for', 'scored', 'data', 'that', 'includes', 'a', 'target', 'and', 'prediction', 'columns', 'TODO', ':', 'add', 'code', 'example', 'of', 'build', 'score', 'and', 'then', 'assess'] | train | https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasdata.py#L801-L910 |
6,446 | senaite/senaite.core | bika/lims/controlpanel/bika_analysisservices.py | AnalysisServicesView.format_price | def format_price(self, price):
"""Formats the price with the set decimal mark and correct currency
"""
return u"{} {}{}{:02d}".format(
self.currency_symbol,
price[0],
self.decimal_mark,
price[1],
) | python | def format_price(self, price):
"""Formats the price with the set decimal mark and correct currency
"""
return u"{} {}{}{:02d}".format(
self.currency_symbol,
price[0],
self.decimal_mark,
price[1],
) | ['def', 'format_price', '(', 'self', ',', 'price', ')', ':', 'return', 'u"{} {}{}{:02d}"', '.', 'format', '(', 'self', '.', 'currency_symbol', ',', 'price', '[', '0', ']', ',', 'self', '.', 'decimal_mark', ',', 'price', '[', '1', ']', ',', ')'] | Formats the price with the set decimal mark and correct currency | ['Formats', 'the', 'price', 'with', 'the', 'set', 'decimal', 'mark', 'and', 'correct', 'currency'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/controlpanel/bika_analysisservices.py#L308-L316 |
6,447 | openid/JWTConnect-Python-CryptoJWT | src/cryptojwt/jws/dsa.py | ECDSASigner._cross_check | def _cross_check(self, pub_key):
"""
In Ecdsa, both the key and the algorithm define the curve.
Therefore, we must cross check them to make sure they're the same.
:param key:
:raises: ValueError is the curves are not the same
"""
if self.curve_name != pub_key.curve.name:
raise ValueError(
"The curve in private key {} and in algorithm {} don't "
"match".format(pub_key.curve.name, self.curve_name)) | python | def _cross_check(self, pub_key):
"""
In Ecdsa, both the key and the algorithm define the curve.
Therefore, we must cross check them to make sure they're the same.
:param key:
:raises: ValueError is the curves are not the same
"""
if self.curve_name != pub_key.curve.name:
raise ValueError(
"The curve in private key {} and in algorithm {} don't "
"match".format(pub_key.curve.name, self.curve_name)) | ['def', '_cross_check', '(', 'self', ',', 'pub_key', ')', ':', 'if', 'self', '.', 'curve_name', '!=', 'pub_key', '.', 'curve', '.', 'name', ':', 'raise', 'ValueError', '(', '"The curve in private key {} and in algorithm {} don\'t "', '"match"', '.', 'format', '(', 'pub_key', '.', 'curve', '.', 'name', ',', 'self', '.', 'curve_name', ')', ')'] | In Ecdsa, both the key and the algorithm define the curve.
Therefore, we must cross check them to make sure they're the same.
:param key:
:raises: ValueError is the curves are not the same | ['In', 'Ecdsa', 'both', 'the', 'key', 'and', 'the', 'algorithm', 'define', 'the', 'curve', '.', 'Therefore', 'we', 'must', 'cross', 'check', 'them', 'to', 'make', 'sure', 'they', 're', 'the', 'same', '.'] | train | https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/jws/dsa.py#L87-L98 |
6,448 | bitcraft/PyTMX | pytmx/pytmx.py | TiledMap.map_gid2 | def map_gid2(self, tiled_gid):
""" WIP. need to refactor the gid code
:param tiled_gid:
:return:
"""
tiled_gid = int(tiled_gid)
# gidmap is a default dict, so cannot trust to raise KeyError
if tiled_gid in self.gidmap:
return self.gidmap[tiled_gid]
else:
gid = self.register_gid(tiled_gid)
return [(gid, None)] | python | def map_gid2(self, tiled_gid):
""" WIP. need to refactor the gid code
:param tiled_gid:
:return:
"""
tiled_gid = int(tiled_gid)
# gidmap is a default dict, so cannot trust to raise KeyError
if tiled_gid in self.gidmap:
return self.gidmap[tiled_gid]
else:
gid = self.register_gid(tiled_gid)
return [(gid, None)] | ['def', 'map_gid2', '(', 'self', ',', 'tiled_gid', ')', ':', 'tiled_gid', '=', 'int', '(', 'tiled_gid', ')', '# gidmap is a default dict, so cannot trust to raise KeyError', 'if', 'tiled_gid', 'in', 'self', '.', 'gidmap', ':', 'return', 'self', '.', 'gidmap', '[', 'tiled_gid', ']', 'else', ':', 'gid', '=', 'self', '.', 'register_gid', '(', 'tiled_gid', ')', 'return', '[', '(', 'gid', ',', 'None', ')', ']'] | WIP. need to refactor the gid code
:param tiled_gid:
:return: | ['WIP', '.', 'need', 'to', 'refactor', 'the', 'gid', 'code'] | train | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L801-L814 |
6,449 | apache/incubator-mxnet | python/mxnet/symbol/symbol.py | eye | def eye(N, M=0, k=0, dtype=None, **kwargs):
"""Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._eye(N, M, k, dtype=dtype, **kwargs) | python | def eye(N, M=0, k=0, dtype=None, **kwargs):
"""Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol.
"""
if dtype is None:
dtype = _numpy.float32
return _internal._eye(N, M, k, dtype=dtype, **kwargs) | ['def', 'eye', '(', 'N', ',', 'M', '=', '0', ',', 'k', '=', '0', ',', 'dtype', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'dtype', 'is', 'None', ':', 'dtype', '=', '_numpy', '.', 'float32', 'return', '_internal', '.', '_eye', '(', 'N', ',', 'M', ',', 'k', ',', 'dtype', '=', 'dtype', ',', '*', '*', 'kwargs', ')'] | Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol. | ['Returns', 'a', 'new', 'symbol', 'of', '2', '-', 'D', 'shpae', 'filled', 'with', 'ones', 'on', 'the', 'diagonal', 'and', 'zeros', 'elsewhere', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L2962-L2985 |
6,450 | Kautenja/nes-py | nes_py/nes_env.py | NESEnv.render | def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg) | python | def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg) | ['def', 'render', '(', 'self', ',', 'mode', '=', "'human'", ')', ':', 'if', 'mode', '==', "'human'", ':', "# if the viewer isn't setup, import it and create one", 'if', 'self', '.', 'viewer', 'is', 'None', ':', 'from', '.', '_image_viewer', 'import', 'ImageViewer', '# get the caption for the ImageViewer', 'if', 'self', '.', 'spec', 'is', 'None', ':', '# if there is no spec, just use the .nes filename', 'caption', '=', 'self', '.', '_rom_path', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', 'else', ':', '# set the caption to the OpenAI Gym id', 'caption', '=', 'self', '.', 'spec', '.', 'id', '# create the ImageViewer to display frames', 'self', '.', 'viewer', '=', 'ImageViewer', '(', 'caption', '=', 'caption', ',', 'height', '=', 'SCREEN_HEIGHT', ',', 'width', '=', 'SCREEN_WIDTH', ',', ')', '# show the screen on the image viewer', 'self', '.', 'viewer', '.', 'show', '(', 'self', '.', 'screen', ')', 'elif', 'mode', '==', "'rgb_array'", ':', 'return', 'self', '.', 'screen', 'else', ':', "# unpack the modes as comma delineated strings ('a', 'b', ...)", 'render_modes', '=', '[', 'repr', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'metadata', '[', "'render.modes'", ']', ']', 'msg', '=', "'valid render modes are: {}'", '.', 'format', '(', "', '", '.', 'join', '(', 'render_modes', ')', ')', 'raise', 'NotImplementedError', '(', 'msg', ')'] | Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise | ['Render', 'the', 'environment', '.'] | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L347-L386 |
6,451 | hydpy-dev/hydpy | hydpy/cythons/modelutils.py | PyxWriter.calculate_single_terms | def calculate_single_terms(self):
"""Lines of model method with the same name."""
lines = self._call_methods('calculate_single_terms',
self.model.PART_ODE_METHODS)
if lines:
lines.insert(1, (' self.numvars.nmb_calls ='
'self.numvars.nmb_calls+1'))
return lines | python | def calculate_single_terms(self):
"""Lines of model method with the same name."""
lines = self._call_methods('calculate_single_terms',
self.model.PART_ODE_METHODS)
if lines:
lines.insert(1, (' self.numvars.nmb_calls ='
'self.numvars.nmb_calls+1'))
return lines | ['def', 'calculate_single_terms', '(', 'self', ')', ':', 'lines', '=', 'self', '.', '_call_methods', '(', "'calculate_single_terms'", ',', 'self', '.', 'model', '.', 'PART_ODE_METHODS', ')', 'if', 'lines', ':', 'lines', '.', 'insert', '(', '1', ',', '(', "' self.numvars.nmb_calls ='", "'self.numvars.nmb_calls+1'", ')', ')', 'return', 'lines'] | Lines of model method with the same name. | ['Lines', 'of', 'model', 'method', 'with', 'the', 'same', 'name', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/cythons/modelutils.py#L818-L825 |
6,452 | vlukes/dicom2fem | dicom2fem/ioutils.py | remove_files | def remove_files(root_dir):
"""
Remove all files and directories in supplied root directory.
"""
for dirpath, dirnames, filenames in os.walk(os.path.abspath(root_dir)):
for filename in filenames:
os.remove(os.path.join(root_dir, filename))
for dirname in dirnames:
shutil.rmtree(os.path.join(root_dir, dirname)) | python | def remove_files(root_dir):
"""
Remove all files and directories in supplied root directory.
"""
for dirpath, dirnames, filenames in os.walk(os.path.abspath(root_dir)):
for filename in filenames:
os.remove(os.path.join(root_dir, filename))
for dirname in dirnames:
shutil.rmtree(os.path.join(root_dir, dirname)) | ['def', 'remove_files', '(', 'root_dir', ')', ':', 'for', 'dirpath', ',', 'dirnames', ',', 'filenames', 'in', 'os', '.', 'walk', '(', 'os', '.', 'path', '.', 'abspath', '(', 'root_dir', ')', ')', ':', 'for', 'filename', 'in', 'filenames', ':', 'os', '.', 'remove', '(', 'os', '.', 'path', '.', 'join', '(', 'root_dir', ',', 'filename', ')', ')', 'for', 'dirname', 'in', 'dirnames', ':', 'shutil', '.', 'rmtree', '(', 'os', '.', 'path', '.', 'join', '(', 'root_dir', ',', 'dirname', ')', ')'] | Remove all files and directories in supplied root directory. | ['Remove', 'all', 'files', 'and', 'directories', 'in', 'supplied', 'root', 'directory', '.'] | train | https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/ioutils.py#L50-L59 |
6,453 | langloisjp/pysvcmetrics | statsdclient.py | StatsdClient.gauge | def gauge(self, stats, value):
"""
Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
"""
self.update_stats(stats, value, self.SC_GAUGE) | python | def gauge(self, stats, value):
"""
Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
"""
self.update_stats(stats, value, self.SC_GAUGE) | ['def', 'gauge', '(', 'self', ',', 'stats', ',', 'value', ')', ':', 'self', '.', 'update_stats', '(', 'stats', ',', 'value', ',', 'self', '.', 'SC_GAUGE', ')'] | Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47) | ['Log', 'gauges'] | train | https://github.com/langloisjp/pysvcmetrics/blob/a126fc029ab645d9db46c0f5712c416cdf80e370/statsdclient.py#L40-L48 |
6,454 | marshmallow-code/webargs | src/webargs/tornadoparser.py | TornadoParser.handle_error | def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
) | python | def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
) | ['def', 'handle_error', '(', 'self', ',', 'error', ',', 'req', ',', 'schema', ',', 'error_status_code', ',', 'error_headers', ')', ':', 'status_code', '=', 'error_status_code', 'or', 'self', '.', 'DEFAULT_VALIDATION_STATUS', 'if', 'status_code', '==', '422', ':', 'reason', '=', '"Unprocessable Entity"', 'else', ':', 'reason', '=', 'None', 'raise', 'HTTPError', '(', 'status_code', ',', 'log_message', '=', 'str', '(', 'error', '.', 'messages', ')', ',', 'reason', '=', 'reason', ',', 'messages', '=', 'error', '.', 'messages', ',', 'headers', '=', 'error_headers', ',', ')'] | Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error. | ['Handles', 'errors', 'during', 'parsing', '.', 'Raises', 'a', 'tornado', '.', 'web', '.', 'HTTPError', 'with', 'a', '400', 'error', '.'] | train | https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/tornadoparser.py#L122-L137 |
6,455 | programa-stic/barf-project | barf/core/reil/reil.py | ReilInstruction.operands | def operands(self, value):
"""Set instruction operands.
"""
if len(value) != 3:
raise Exception("Invalid instruction operands : %s" % str(value))
self._operands = value | python | def operands(self, value):
"""Set instruction operands.
"""
if len(value) != 3:
raise Exception("Invalid instruction operands : %s" % str(value))
self._operands = value | ['def', 'operands', '(', 'self', ',', 'value', ')', ':', 'if', 'len', '(', 'value', ')', '!=', '3', ':', 'raise', 'Exception', '(', '"Invalid instruction operands : %s"', '%', 'str', '(', 'value', ')', ')', 'self', '.', '_operands', '=', 'value'] | Set instruction operands. | ['Set', 'instruction', 'operands', '.'] | train | https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/reil.py#L272-L278 |
6,456 | twilio/twilio-python | twilio/rest/autopilot/v1/assistant/__init__.py | AssistantContext.field_types | def field_types(self):
"""
Access the field_types
:returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
"""
if self._field_types is None:
self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], )
return self._field_types | python | def field_types(self):
"""
Access the field_types
:returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
"""
if self._field_types is None:
self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], )
return self._field_types | ['def', 'field_types', '(', 'self', ')', ':', 'if', 'self', '.', '_field_types', 'is', 'None', ':', 'self', '.', '_field_types', '=', 'FieldTypeList', '(', 'self', '.', '_version', ',', 'assistant_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_field_types'] | Access the field_types
:returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList | ['Access', 'the', 'field_types'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/__init__.py#L328-L337 |
6,457 | pyamg/pyamg | pyamg/util/utils.py | profile_solver | def profile_solver(ml, accel=None, **kwargs):
"""Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
ml.solve(b, residuals=residuals, **kwargs)
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals) | python | def profile_solver(ml, accel=None, **kwargs):
"""Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
ml.solve(b, residuals=residuals, **kwargs)
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals) | ['def', 'profile_solver', '(', 'ml', ',', 'accel', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'A', '=', 'ml', '.', 'levels', '[', '0', ']', '.', 'A', 'b', '=', 'A', '*', 'sp', '.', 'rand', '(', 'A', '.', 'shape', '[', '0', ']', ',', '1', ')', 'residuals', '=', '[', ']', 'if', 'accel', 'is', 'None', ':', 'ml', '.', 'solve', '(', 'b', ',', 'residuals', '=', 'residuals', ',', '*', '*', 'kwargs', ')', 'else', ':', 'def', 'callback', '(', 'x', ')', ':', 'residuals', '.', 'append', '(', 'norm', '(', 'np', '.', 'ravel', '(', 'b', ')', '-', 'np', '.', 'ravel', '(', 'A', '*', 'x', ')', ')', ')', 'M', '=', 'ml', '.', 'aspreconditioner', '(', 'cycle', '=', 'kwargs', '.', 'get', '(', "'cycle'", ',', "'V'", ')', ')', 'accel', '(', 'A', ',', 'b', ',', 'M', '=', 'M', ',', 'callback', '=', 'callback', ',', '*', '*', 'kwargs', ')', 'return', 'np', '.', 'asarray', '(', 'residuals', ')'] | Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg) | ['Profile', 'a', 'particular', 'multilevel', 'object', '.'] | train | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L42-L89 |
6,458 | tanghaibao/goatools | goatools/obo_parser.py | GOTerm.get_all_parents | def get_all_parents(self):
"""Return all parent GO IDs."""
all_parents = set()
for parent in self.parents:
all_parents.add(parent.item_id)
all_parents |= parent.get_all_parents()
return all_parents | python | def get_all_parents(self):
"""Return all parent GO IDs."""
all_parents = set()
for parent in self.parents:
all_parents.add(parent.item_id)
all_parents |= parent.get_all_parents()
return all_parents | ['def', 'get_all_parents', '(', 'self', ')', ':', 'all_parents', '=', 'set', '(', ')', 'for', 'parent', 'in', 'self', '.', 'parents', ':', 'all_parents', '.', 'add', '(', 'parent', '.', 'item_id', ')', 'all_parents', '|=', 'parent', '.', 'get_all_parents', '(', ')', 'return', 'all_parents'] | Return all parent GO IDs. | ['Return', 'all', 'parent', 'GO', 'IDs', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L205-L211 |
6,459 | OnroerendErfgoed/crabpy_pyramid | crabpy_pyramid/renderers/crab.py | item_land_adapter | def item_land_adapter(obj, request):
"""
Adapter for rendering an item of
:class: `pycountry.db.Data` to json.
"""
return {
'id': obj.alpha_2,
'alpha2': obj.alpha_2,
'alpha3': obj.alpha_3,
'naam': _(obj.name)
} | python | def item_land_adapter(obj, request):
"""
Adapter for rendering an item of
:class: `pycountry.db.Data` to json.
"""
return {
'id': obj.alpha_2,
'alpha2': obj.alpha_2,
'alpha3': obj.alpha_3,
'naam': _(obj.name)
} | ['def', 'item_land_adapter', '(', 'obj', ',', 'request', ')', ':', 'return', '{', "'id'", ':', 'obj', '.', 'alpha_2', ',', "'alpha2'", ':', 'obj', '.', 'alpha_2', ',', "'alpha3'", ':', 'obj', '.', 'alpha_3', ',', "'naam'", ':', '_', '(', 'obj', '.', 'name', ')', '}'] | Adapter for rendering an item of
:class: `pycountry.db.Data` to json. | ['Adapter', 'for', 'rendering', 'an', 'item', 'of', ':', 'class', ':', 'pycountry', '.', 'db', '.', 'Data', 'to', 'json', '.'] | train | https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/crab.py#L504-L514 |
6,460 | Microsoft/nni | examples/trials/ga_squad/trial.py | generate_data | def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False):
'''
Generate data
'''
global root_path
qp_pairs = data.load_from_file(path=path, is_training=is_training)
tokenized_sent = 0
# qp_pairs = qp_pairs[:1000]1
for qp_pair in qp_pairs:
tokenized_sent += 1
data.tokenize(qp_pair, tokenizer, is_training)
for word in qp_pair['question_tokens']:
word_vcb.add(word['word'])
for char in word['word']:
char_vcb.add(char)
for word in qp_pair['passage_tokens']:
word_vcb.add(word['word'])
for char in word['word']:
char_vcb.add(char)
max_query_length = max(len(x['question_tokens']) for x in qp_pairs)
max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs)
#min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs)
cfg.max_query_length = max_query_length
cfg.max_passage_length = max_passage_length
return qp_pairs | python | def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False):
'''
Generate data
'''
global root_path
qp_pairs = data.load_from_file(path=path, is_training=is_training)
tokenized_sent = 0
# qp_pairs = qp_pairs[:1000]1
for qp_pair in qp_pairs:
tokenized_sent += 1
data.tokenize(qp_pair, tokenizer, is_training)
for word in qp_pair['question_tokens']:
word_vcb.add(word['word'])
for char in word['word']:
char_vcb.add(char)
for word in qp_pair['passage_tokens']:
word_vcb.add(word['word'])
for char in word['word']:
char_vcb.add(char)
max_query_length = max(len(x['question_tokens']) for x in qp_pairs)
max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs)
#min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs)
cfg.max_query_length = max_query_length
cfg.max_passage_length = max_passage_length
return qp_pairs | ['def', 'generate_data', '(', 'path', ',', 'tokenizer', ',', 'char_vcb', ',', 'word_vcb', ',', 'is_training', '=', 'False', ')', ':', 'global', 'root_path', 'qp_pairs', '=', 'data', '.', 'load_from_file', '(', 'path', '=', 'path', ',', 'is_training', '=', 'is_training', ')', 'tokenized_sent', '=', '0', '# qp_pairs = qp_pairs[:1000]1', 'for', 'qp_pair', 'in', 'qp_pairs', ':', 'tokenized_sent', '+=', '1', 'data', '.', 'tokenize', '(', 'qp_pair', ',', 'tokenizer', ',', 'is_training', ')', 'for', 'word', 'in', 'qp_pair', '[', "'question_tokens'", ']', ':', 'word_vcb', '.', 'add', '(', 'word', '[', "'word'", ']', ')', 'for', 'char', 'in', 'word', '[', "'word'", ']', ':', 'char_vcb', '.', 'add', '(', 'char', ')', 'for', 'word', 'in', 'qp_pair', '[', "'passage_tokens'", ']', ':', 'word_vcb', '.', 'add', '(', 'word', '[', "'word'", ']', ')', 'for', 'char', 'in', 'word', '[', "'word'", ']', ':', 'char_vcb', '.', 'add', '(', 'char', ')', 'max_query_length', '=', 'max', '(', 'len', '(', 'x', '[', "'question_tokens'", ']', ')', 'for', 'x', 'in', 'qp_pairs', ')', 'max_passage_length', '=', 'max', '(', 'len', '(', 'x', '[', "'passage_tokens'", ']', ')', 'for', 'x', 'in', 'qp_pairs', ')', "#min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs)", 'cfg', '.', 'max_query_length', '=', 'max_query_length', 'cfg', '.', 'max_passage_length', '=', 'max_passage_length', 'return', 'qp_pairs'] | Generate data | ['Generate', 'data'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/ga_squad/trial.py#L272-L299 |
6,461 | numenta/htmresearch | projects/combined_sequences/combined_sequences.py | runExperiment5A | def runExperiment5A(dirName):
"""
This runs the first experiment in the section "Simulations with Sensorimotor
Sequences", an example sensorimotor sequence.
"""
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "sensorimotor_sequence_example.pkl")
results = runExperiment(
{
"numSequences": 0,
"seqLength": 10,
"numFeatures": 100,
"trialNum": 4,
"numObjects": 50,
"numLocations": 100,
}
)
# Pickle results for plotting and possible later debugging
with open(resultsFilename, "wb") as f:
cPickle.dump(results, f) | python | def runExperiment5A(dirName):
"""
This runs the first experiment in the section "Simulations with Sensorimotor
Sequences", an example sensorimotor sequence.
"""
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "sensorimotor_sequence_example.pkl")
results = runExperiment(
{
"numSequences": 0,
"seqLength": 10,
"numFeatures": 100,
"trialNum": 4,
"numObjects": 50,
"numLocations": 100,
}
)
# Pickle results for plotting and possible later debugging
with open(resultsFilename, "wb") as f:
cPickle.dump(results, f) | ['def', 'runExperiment5A', '(', 'dirName', ')', ':', '# Results are put into a pkl file which can be used to generate the plots.', '# dirName is the absolute path where the pkl file will be placed.', 'resultsFilename', '=', 'os', '.', 'path', '.', 'join', '(', 'dirName', ',', '"sensorimotor_sequence_example.pkl"', ')', 'results', '=', 'runExperiment', '(', '{', '"numSequences"', ':', '0', ',', '"seqLength"', ':', '10', ',', '"numFeatures"', ':', '100', ',', '"trialNum"', ':', '4', ',', '"numObjects"', ':', '50', ',', '"numLocations"', ':', '100', ',', '}', ')', '# Pickle results for plotting and possible later debugging', 'with', 'open', '(', 'resultsFilename', ',', '"wb"', ')', 'as', 'f', ':', 'cPickle', '.', 'dump', '(', 'results', ',', 'f', ')'] | This runs the first experiment in the section "Simulations with Sensorimotor
Sequences", an example sensorimotor sequence. | ['This', 'runs', 'the', 'first', 'experiment', 'in', 'the', 'section', 'Simulations', 'with', 'Sensorimotor', 'Sequences', 'an', 'example', 'sensorimotor', 'sequence', '.'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/combined_sequences/combined_sequences.py#L759-L780 |
6,462 | gem/oq-engine | openquake/hazardlib/gsim/cauzzi_faccioli_2008.py | CauzziFaccioli2008._get_stddevs | def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
stddevs.append(np.log(10 ** C['sigma']) + np.zeros(num_sites))
return stddevs | python | def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
stddevs.append(np.log(10 ** C['sigma']) + np.zeros(num_sites))
return stddevs | ['def', '_get_stddevs', '(', 'self', ',', 'C', ',', 'stddev_types', ',', 'num_sites', ')', ':', 'stddevs', '=', '[', ']', 'for', 'stddev_type', 'in', 'stddev_types', ':', 'assert', 'stddev_type', 'in', 'self', '.', 'DEFINED_FOR_STANDARD_DEVIATION_TYPES', 'stddevs', '.', 'append', '(', 'np', '.', 'log', '(', '10', '**', 'C', '[', "'sigma'", ']', ')', '+', 'np', '.', 'zeros', '(', 'num_sites', ')', ')', 'return', 'stddevs'] | Return total standard deviation. | ['Return', 'total', 'standard', 'deviation', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/cauzzi_faccioli_2008.py#L177-L186 |
6,463 | tensorpack/tensorpack | tensorpack/utils/utils.py | change_env | def change_env(name, val):
"""
Args:
name(str), val(str):
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits.
"""
oldval = os.environ.get(name, None)
os.environ[name] = val
yield
if oldval is None:
del os.environ[name]
else:
os.environ[name] = oldval | python | def change_env(name, val):
"""
Args:
name(str), val(str):
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits.
"""
oldval = os.environ.get(name, None)
os.environ[name] = val
yield
if oldval is None:
del os.environ[name]
else:
os.environ[name] = oldval | ['def', 'change_env', '(', 'name', ',', 'val', ')', ':', 'oldval', '=', 'os', '.', 'environ', '.', 'get', '(', 'name', ',', 'None', ')', 'os', '.', 'environ', '[', 'name', ']', '=', 'val', 'yield', 'if', 'oldval', 'is', 'None', ':', 'del', 'os', '.', 'environ', '[', 'name', ']', 'else', ':', 'os', '.', 'environ', '[', 'name', ']', '=', 'oldval'] | Args:
name(str), val(str):
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits. | ['Args', ':', 'name', '(', 'str', ')', 'val', '(', 'str', ')', ':'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/utils.py#L69-L84 |
6,464 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAData/base_datastruct.py | _quotation_base.reindex | def reindex(self, ind):
"""reindex
Arguments:
ind {[type]} -- [description]
Raises:
RuntimeError -- [description]
RuntimeError -- [description]
Returns:
[type] -- [description]
"""
if isinstance(ind, pd.MultiIndex):
try:
return self.new(self.data.reindex(ind))
except:
raise RuntimeError('QADATASTRUCT ERROR: CANNOT REINDEX')
else:
raise RuntimeError(
'QADATASTRUCT ERROR: ONLY ACCEPT MULTI-INDEX FORMAT'
) | python | def reindex(self, ind):
"""reindex
Arguments:
ind {[type]} -- [description]
Raises:
RuntimeError -- [description]
RuntimeError -- [description]
Returns:
[type] -- [description]
"""
if isinstance(ind, pd.MultiIndex):
try:
return self.new(self.data.reindex(ind))
except:
raise RuntimeError('QADATASTRUCT ERROR: CANNOT REINDEX')
else:
raise RuntimeError(
'QADATASTRUCT ERROR: ONLY ACCEPT MULTI-INDEX FORMAT'
) | ['def', 'reindex', '(', 'self', ',', 'ind', ')', ':', 'if', 'isinstance', '(', 'ind', ',', 'pd', '.', 'MultiIndex', ')', ':', 'try', ':', 'return', 'self', '.', 'new', '(', 'self', '.', 'data', '.', 'reindex', '(', 'ind', ')', ')', 'except', ':', 'raise', 'RuntimeError', '(', "'QADATASTRUCT ERROR: CANNOT REINDEX'", ')', 'else', ':', 'raise', 'RuntimeError', '(', "'QADATASTRUCT ERROR: ONLY ACCEPT MULTI-INDEX FORMAT'", ')'] | reindex
Arguments:
ind {[type]} -- [description]
Raises:
RuntimeError -- [description]
RuntimeError -- [description]
Returns:
[type] -- [description] | ['reindex'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L863-L885 |
6,465 | ucsb-cs-education/hairball | hairball/plugins/checks.py | SaySoundSync.analyze | def analyze(self, scratch, **kwargs):
"""Categorize instances of attempted say and sound synchronization."""
errors = Counter()
for script in self.iter_scripts(scratch):
prev_name, prev_depth, prev_block = '', 0, script.blocks[0]
gen = self.iter_blocks(script.blocks)
for name, depth, block in gen:
if prev_depth == depth:
if prev_name in self.SAY_THINK:
if name == 'play sound %s until done':
if not self.is_blank(prev_block.args[0]):
errors += self.check(gen)
# TODO: What about play sound?
elif prev_name in self.SAY_THINK_DURATION and \
'play sound %s' in name:
errors['1'] += 1
elif prev_name == 'play sound %s':
if name in self.SAY_THINK:
errors[self.INCORRECT] += 1
elif name in self.SAY_THINK_DURATION:
if self.is_blank(block.args[0]):
errors[self.ERROR] += 1
else:
errors[self.HACKISH] += 1
elif prev_name == 'play sound %s until done' and \
name in self.ALL_SAY_THINK:
if not self.is_blank(block.args[0]):
errors[self.INCORRECT] += 1
# TODO: Should there be an else clause here?
prev_name, prev_depth, prev_block = name, depth, block
return {'sound': errors} | python | def analyze(self, scratch, **kwargs):
"""Categorize instances of attempted say and sound synchronization."""
errors = Counter()
for script in self.iter_scripts(scratch):
prev_name, prev_depth, prev_block = '', 0, script.blocks[0]
gen = self.iter_blocks(script.blocks)
for name, depth, block in gen:
if prev_depth == depth:
if prev_name in self.SAY_THINK:
if name == 'play sound %s until done':
if not self.is_blank(prev_block.args[0]):
errors += self.check(gen)
# TODO: What about play sound?
elif prev_name in self.SAY_THINK_DURATION and \
'play sound %s' in name:
errors['1'] += 1
elif prev_name == 'play sound %s':
if name in self.SAY_THINK:
errors[self.INCORRECT] += 1
elif name in self.SAY_THINK_DURATION:
if self.is_blank(block.args[0]):
errors[self.ERROR] += 1
else:
errors[self.HACKISH] += 1
elif prev_name == 'play sound %s until done' and \
name in self.ALL_SAY_THINK:
if not self.is_blank(block.args[0]):
errors[self.INCORRECT] += 1
# TODO: Should there be an else clause here?
prev_name, prev_depth, prev_block = name, depth, block
return {'sound': errors} | ['def', 'analyze', '(', 'self', ',', 'scratch', ',', '*', '*', 'kwargs', ')', ':', 'errors', '=', 'Counter', '(', ')', 'for', 'script', 'in', 'self', '.', 'iter_scripts', '(', 'scratch', ')', ':', 'prev_name', ',', 'prev_depth', ',', 'prev_block', '=', "''", ',', '0', ',', 'script', '.', 'blocks', '[', '0', ']', 'gen', '=', 'self', '.', 'iter_blocks', '(', 'script', '.', 'blocks', ')', 'for', 'name', ',', 'depth', ',', 'block', 'in', 'gen', ':', 'if', 'prev_depth', '==', 'depth', ':', 'if', 'prev_name', 'in', 'self', '.', 'SAY_THINK', ':', 'if', 'name', '==', "'play sound %s until done'", ':', 'if', 'not', 'self', '.', 'is_blank', '(', 'prev_block', '.', 'args', '[', '0', ']', ')', ':', 'errors', '+=', 'self', '.', 'check', '(', 'gen', ')', '# TODO: What about play sound?', 'elif', 'prev_name', 'in', 'self', '.', 'SAY_THINK_DURATION', 'and', "'play sound %s'", 'in', 'name', ':', 'errors', '[', "'1'", ']', '+=', '1', 'elif', 'prev_name', '==', "'play sound %s'", ':', 'if', 'name', 'in', 'self', '.', 'SAY_THINK', ':', 'errors', '[', 'self', '.', 'INCORRECT', ']', '+=', '1', 'elif', 'name', 'in', 'self', '.', 'SAY_THINK_DURATION', ':', 'if', 'self', '.', 'is_blank', '(', 'block', '.', 'args', '[', '0', ']', ')', ':', 'errors', '[', 'self', '.', 'ERROR', ']', '+=', '1', 'else', ':', 'errors', '[', 'self', '.', 'HACKISH', ']', '+=', '1', 'elif', 'prev_name', '==', "'play sound %s until done'", 'and', 'name', 'in', 'self', '.', 'ALL_SAY_THINK', ':', 'if', 'not', 'self', '.', 'is_blank', '(', 'block', '.', 'args', '[', '0', ']', ')', ':', 'errors', '[', 'self', '.', 'INCORRECT', ']', '+=', '1', '# TODO: Should there be an else clause here?', 'prev_name', ',', 'prev_depth', ',', 'prev_block', '=', 'name', ',', 'depth', ',', 'block', 'return', '{', "'sound'", ':', 'errors', '}'] | Categorize instances of attempted say and sound synchronization. | ['Categorize', 'instances', 'of', 'attempted', 'say', 'and', 'sound', 'synchronization', '.'] | train | https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/checks.py#L203-L233 |
6,466 | juju-solutions/charms.reactive | charms/reactive/decorators.py | _expand_endpoint_name | def _expand_endpoint_name(endpoint_name, flags):
"""
Populate any ``{endpoint_name}`` tags in the flag names for the given
handler, based on the handlers module / file name.
"""
return tuple(flag.format(endpoint_name=endpoint_name) for flag in flags) | python | def _expand_endpoint_name(endpoint_name, flags):
"""
Populate any ``{endpoint_name}`` tags in the flag names for the given
handler, based on the handlers module / file name.
"""
return tuple(flag.format(endpoint_name=endpoint_name) for flag in flags) | ['def', '_expand_endpoint_name', '(', 'endpoint_name', ',', 'flags', ')', ':', 'return', 'tuple', '(', 'flag', '.', 'format', '(', 'endpoint_name', '=', 'endpoint_name', ')', 'for', 'flag', 'in', 'flags', ')'] | Populate any ``{endpoint_name}`` tags in the flag names for the given
handler, based on the handlers module / file name. | ['Populate', 'any', '{', 'endpoint_name', '}', 'tags', 'in', 'the', 'flag', 'names', 'for', 'the', 'given', 'handler', 'based', 'on', 'the', 'handlers', 'module', '/', 'file', 'name', '.'] | train | https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/decorators.py#L289-L294 |
6,467 | johnbywater/eventsourcing | eventsourcing/utils/cipher/aes.py | AESCipher.encrypt | def encrypt(self, plaintext):
"""Return ciphertext for given plaintext."""
# String to bytes.
plainbytes = plaintext.encode('utf8')
# Compress plaintext bytes.
compressed = zlib.compress(plainbytes)
# Construct AES-GCM cipher, with 96-bit nonce.
cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12))
# Encrypt and digest.
encrypted, tag = cipher.encrypt_and_digest(compressed)
# Combine with nonce.
combined = cipher.nonce + tag + encrypted
# Encode as Base64.
cipherbytes = base64.b64encode(combined)
# Bytes to string.
ciphertext = cipherbytes.decode('utf8')
# Return ciphertext.
return ciphertext | python | def encrypt(self, plaintext):
"""Return ciphertext for given plaintext."""
# String to bytes.
plainbytes = plaintext.encode('utf8')
# Compress plaintext bytes.
compressed = zlib.compress(plainbytes)
# Construct AES-GCM cipher, with 96-bit nonce.
cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12))
# Encrypt and digest.
encrypted, tag = cipher.encrypt_and_digest(compressed)
# Combine with nonce.
combined = cipher.nonce + tag + encrypted
# Encode as Base64.
cipherbytes = base64.b64encode(combined)
# Bytes to string.
ciphertext = cipherbytes.decode('utf8')
# Return ciphertext.
return ciphertext | ['def', 'encrypt', '(', 'self', ',', 'plaintext', ')', ':', '# String to bytes.', 'plainbytes', '=', 'plaintext', '.', 'encode', '(', "'utf8'", ')', '# Compress plaintext bytes.', 'compressed', '=', 'zlib', '.', 'compress', '(', 'plainbytes', ')', '# Construct AES-GCM cipher, with 96-bit nonce.', 'cipher', '=', 'AES', '.', 'new', '(', 'self', '.', 'cipher_key', ',', 'AES', '.', 'MODE_GCM', ',', 'nonce', '=', 'random_bytes', '(', '12', ')', ')', '# Encrypt and digest.', 'encrypted', ',', 'tag', '=', 'cipher', '.', 'encrypt_and_digest', '(', 'compressed', ')', '# Combine with nonce.', 'combined', '=', 'cipher', '.', 'nonce', '+', 'tag', '+', 'encrypted', '# Encode as Base64.', 'cipherbytes', '=', 'base64', '.', 'b64encode', '(', 'combined', ')', '# Bytes to string.', 'ciphertext', '=', 'cipherbytes', '.', 'decode', '(', "'utf8'", ')', '# Return ciphertext.', 'return', 'ciphertext'] | Return ciphertext for given plaintext. | ['Return', 'ciphertext', 'for', 'given', 'plaintext', '.'] | train | https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/cipher/aes.py#L24-L49 |
6,468 | saltstack/salt | salt/modules/neutron.py | _auth | def _auth(profile=None):
'''
Set up neutron credentials
'''
if profile:
credentials = __salt__['config.option'](profile)
user = credentials['keystone.user']
password = credentials['keystone.password']
tenant = credentials['keystone.tenant']
auth_url = credentials['keystone.auth_url']
region_name = credentials.get('keystone.region_name', None)
service_type = credentials.get('keystone.service_type', 'network')
os_auth_system = credentials.get('keystone.os_auth_system', None)
use_keystoneauth = credentials.get('keystone.use_keystoneauth', False)
verify = credentials.get('keystone.verify', True)
else:
user = __salt__['config.option']('keystone.user')
password = __salt__['config.option']('keystone.password')
tenant = __salt__['config.option']('keystone.tenant')
auth_url = __salt__['config.option']('keystone.auth_url')
region_name = __salt__['config.option']('keystone.region_name')
service_type = __salt__['config.option']('keystone.service_type')
os_auth_system = __salt__['config.option']('keystone.os_auth_system')
use_keystoneauth = __salt__['config.option']('keystone.use_keystoneauth')
verify = __salt__['config.option']('keystone.verify')
if use_keystoneauth is True:
project_domain_name = credentials['keystone.project_domain_name']
user_domain_name = credentials['keystone.user_domain_name']
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system,
'use_keystoneauth': use_keystoneauth,
'verify': verify,
'project_domain_name': project_domain_name,
'user_domain_name': user_domain_name
}
else:
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system
}
return suoneu.SaltNeutron(**kwargs) | python | def _auth(profile=None):
'''
Set up neutron credentials
'''
if profile:
credentials = __salt__['config.option'](profile)
user = credentials['keystone.user']
password = credentials['keystone.password']
tenant = credentials['keystone.tenant']
auth_url = credentials['keystone.auth_url']
region_name = credentials.get('keystone.region_name', None)
service_type = credentials.get('keystone.service_type', 'network')
os_auth_system = credentials.get('keystone.os_auth_system', None)
use_keystoneauth = credentials.get('keystone.use_keystoneauth', False)
verify = credentials.get('keystone.verify', True)
else:
user = __salt__['config.option']('keystone.user')
password = __salt__['config.option']('keystone.password')
tenant = __salt__['config.option']('keystone.tenant')
auth_url = __salt__['config.option']('keystone.auth_url')
region_name = __salt__['config.option']('keystone.region_name')
service_type = __salt__['config.option']('keystone.service_type')
os_auth_system = __salt__['config.option']('keystone.os_auth_system')
use_keystoneauth = __salt__['config.option']('keystone.use_keystoneauth')
verify = __salt__['config.option']('keystone.verify')
if use_keystoneauth is True:
project_domain_name = credentials['keystone.project_domain_name']
user_domain_name = credentials['keystone.user_domain_name']
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system,
'use_keystoneauth': use_keystoneauth,
'verify': verify,
'project_domain_name': project_domain_name,
'user_domain_name': user_domain_name
}
else:
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system
}
return suoneu.SaltNeutron(**kwargs) | ['def', '_auth', '(', 'profile', '=', 'None', ')', ':', 'if', 'profile', ':', 'credentials', '=', '__salt__', '[', "'config.option'", ']', '(', 'profile', ')', 'user', '=', 'credentials', '[', "'keystone.user'", ']', 'password', '=', 'credentials', '[', "'keystone.password'", ']', 'tenant', '=', 'credentials', '[', "'keystone.tenant'", ']', 'auth_url', '=', 'credentials', '[', "'keystone.auth_url'", ']', 'region_name', '=', 'credentials', '.', 'get', '(', "'keystone.region_name'", ',', 'None', ')', 'service_type', '=', 'credentials', '.', 'get', '(', "'keystone.service_type'", ',', "'network'", ')', 'os_auth_system', '=', 'credentials', '.', 'get', '(', "'keystone.os_auth_system'", ',', 'None', ')', 'use_keystoneauth', '=', 'credentials', '.', 'get', '(', "'keystone.use_keystoneauth'", ',', 'False', ')', 'verify', '=', 'credentials', '.', 'get', '(', "'keystone.verify'", ',', 'True', ')', 'else', ':', 'user', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.user'", ')', 'password', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.password'", ')', 'tenant', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.tenant'", ')', 'auth_url', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.auth_url'", ')', 'region_name', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.region_name'", ')', 'service_type', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.service_type'", ')', 'os_auth_system', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.os_auth_system'", ')', 'use_keystoneauth', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.use_keystoneauth'", ')', 'verify', '=', '__salt__', '[', "'config.option'", ']', '(', "'keystone.verify'", ')', 'if', 'use_keystoneauth', 'is', 'True', ':', 'project_domain_name', '=', 'credentials', '[', "'keystone.project_domain_name'", ']', 'user_domain_name', '=', 'credentials', '[', "'keystone.user_domain_name'", ']', 'kwargs', '=', '{', "'username'", ':', 'user', ',', "'password'", ':', 'password', ',', "'tenant_name'", ':', 'tenant', ',', "'auth_url'", ':', 'auth_url', ',', "'region_name'", ':', 'region_name', ',', "'service_type'", ':', 'service_type', ',', "'os_auth_plugin'", ':', 'os_auth_system', ',', "'use_keystoneauth'", ':', 'use_keystoneauth', ',', "'verify'", ':', 'verify', ',', "'project_domain_name'", ':', 'project_domain_name', ',', "'user_domain_name'", ':', 'user_domain_name', '}', 'else', ':', 'kwargs', '=', '{', "'username'", ':', 'user', ',', "'password'", ':', 'password', ',', "'tenant_name'", ':', 'tenant', ',', "'auth_url'", ':', 'auth_url', ',', "'region_name'", ':', 'region_name', ',', "'service_type'", ':', 'service_type', ',', "'os_auth_plugin'", ':', 'os_auth_system', '}', 'return', 'suoneu', '.', 'SaltNeutron', '(', '*', '*', 'kwargs', ')'] | Set up neutron credentials | ['Set', 'up', 'neutron', 'credentials'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L99-L153 |
6,469 | apache/spark | python/pyspark/sql/functions.py | sequence | def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step))) | python | def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step))) | ['def', 'sequence', '(', 'start', ',', 'stop', ',', 'step', '=', 'None', ')', ':', 'sc', '=', 'SparkContext', '.', '_active_spark_context', 'if', 'step', 'is', 'None', ':', 'return', 'Column', '(', 'sc', '.', '_jvm', '.', 'functions', '.', 'sequence', '(', '_to_java_column', '(', 'start', ')', ',', '_to_java_column', '(', 'stop', ')', ')', ')', 'else', ':', 'return', 'Column', '(', 'sc', '.', '_jvm', '.', 'functions', '.', 'sequence', '(', '_to_java_column', '(', 'start', ')', ',', '_to_java_column', '(', 'stop', ')', ',', '_to_java_column', '(', 'step', ')', ')', ')'] | Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])] | ['Generate', 'a', 'sequence', 'of', 'integers', 'from', 'start', 'to', 'stop', 'incrementing', 'by', 'step', '.', 'If', 'step', 'is', 'not', 'set', 'incrementing', 'by', '1', 'if', 'start', 'is', 'less', 'than', 'or', 'equal', 'to', 'stop', 'otherwise', '-', '1', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2739-L2757 |
6,470 | sobolevn/jinja2-git | jinja2_git.py | GitExtension.parse | def parse(self, parser):
"""Main method to render data into the template."""
lineno = next(parser.stream).lineno
if parser.stream.skip_if('name:short'):
parser.stream.skip(1)
short = parser.parse_expression()
else:
short = nodes.Const(False)
result = self.call_method('_commit_hash', [short], [], lineno=lineno)
return nodes.Output([result], lineno=lineno) | python | def parse(self, parser):
"""Main method to render data into the template."""
lineno = next(parser.stream).lineno
if parser.stream.skip_if('name:short'):
parser.stream.skip(1)
short = parser.parse_expression()
else:
short = nodes.Const(False)
result = self.call_method('_commit_hash', [short], [], lineno=lineno)
return nodes.Output([result], lineno=lineno) | ['def', 'parse', '(', 'self', ',', 'parser', ')', ':', 'lineno', '=', 'next', '(', 'parser', '.', 'stream', ')', '.', 'lineno', 'if', 'parser', '.', 'stream', '.', 'skip_if', '(', "'name:short'", ')', ':', 'parser', '.', 'stream', '.', 'skip', '(', '1', ')', 'short', '=', 'parser', '.', 'parse_expression', '(', ')', 'else', ':', 'short', '=', 'nodes', '.', 'Const', '(', 'False', ')', 'result', '=', 'self', '.', 'call_method', '(', "'_commit_hash'", ',', '[', 'short', ']', ',', '[', ']', ',', 'lineno', '=', 'lineno', ')', 'return', 'nodes', '.', 'Output', '(', '[', 'result', ']', ',', 'lineno', '=', 'lineno', ')'] | Main method to render data into the template. | ['Main', 'method', 'to', 'render', 'data', 'into', 'the', 'template', '.'] | train | https://github.com/sobolevn/jinja2-git/blob/2ef8ac30efc1d73db551aaae73b2fe214761f840/jinja2_git.py#L23-L34 |
6,471 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py | MapModule.remove_rally | def remove_rally(self, key):
'''remove a rally point'''
a = key.split(' ')
if a[0] != 'Rally' or len(a) != 2:
print("Bad rally object %s" % key)
return
i = int(a[1])
self.mpstate.functions.process_stdin('rally remove %u' % i) | python | def remove_rally(self, key):
'''remove a rally point'''
a = key.split(' ')
if a[0] != 'Rally' or len(a) != 2:
print("Bad rally object %s" % key)
return
i = int(a[1])
self.mpstate.functions.process_stdin('rally remove %u' % i) | ['def', 'remove_rally', '(', 'self', ',', 'key', ')', ':', 'a', '=', 'key', '.', 'split', '(', "' '", ')', 'if', 'a', '[', '0', ']', '!=', "'Rally'", 'or', 'len', '(', 'a', ')', '!=', '2', ':', 'print', '(', '"Bad rally object %s"', '%', 'key', ')', 'return', 'i', '=', 'int', '(', 'a', '[', '1', ']', ')', 'self', '.', 'mpstate', '.', 'functions', '.', 'process_stdin', '(', "'rally remove %u'", '%', 'i', ')'] | remove a rally point | ['remove', 'a', 'rally', 'point'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py#L182-L189 |
6,472 | blockstack/blockstack-core | blockstack/lib/c32.py | c32checkDecode | def c32checkDecode(c32data):
"""
>>> c32checkDecode('P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7')
(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE')
(0, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR')
(31, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV')
(11, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG')
(17, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9')
(2, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('P37JJX3D')
(22, '')
>>> c32checkDecode('P000000000000000000002Q6VF78')
(22, '0000000000000000000000000000000000000000')
>>> c32checkDecode('P00000000000000000005JA84HQ')
(22, '0000000000000000000000000000000000000001')
>>> c32checkDecode('P80000000000000000000000000000004R0CMNV')
(22, '1000000000000000000000000000000000000001')
>>> c32checkDecode('P800000000000000000000000000000033H8YKK')
(22, '1000000000000000000000000000000000000000')
>>> c32checkDecode('04C407K6')
(0, '01')
>>> c32checkDecode('049Q1W6AP')
(0, '22')
>>> c32checkDecode('006NZP224')
(0, '0001')
>>> c32checkDecode('Z004720442')
(31, '000001')
>>> c32checkDecode('Z00073C2AR7')
(31, '00000001')
>>> c32checkDecode('B20QX4FW0')
(11, '10')
>>> c32checkDecode('B102PC6RCC')
(11, '0100')
>>> c32checkDecode('BG02G1QXCQ')
(11, '1000')
>>> c32checkDecode('H40003YJA8JD')
(17, '100000')
>>> c32checkDecode('H200001ZTRYYH')
(17, '01000000')
>>> c32checkDecode('H1000002QFX7E6')
(17, '10000000')
>>> c32checkDecode('2G000003FNKA3P')
(2, '0100000000')
"""
if not re.match(r'^[' + C32 + ']*$', c32data):
raise ValueError('Must be c32 data')
c32data = c32normalize(c32data)
data_hex = c32decode(c32data[1:])
if len(data_hex) < 8:
raise ValueError('Not a c32check string')
version_chr = c32data[0]
version = C32.index(version_chr)
version_hex = '{:02x}'.format(version)
checksum = data_hex[-8:]
if c32checksum('{}{}'.format(version_hex, data_hex[0:len(data_hex)-8])) != checksum:
raise ValueError('Invalid c32check string: checksum mismatch')
return (version, data_hex[0:len(data_hex)-8]) | python | def c32checkDecode(c32data):
"""
>>> c32checkDecode('P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7')
(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE')
(0, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR')
(31, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV')
(11, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG')
(17, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9')
(2, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('P37JJX3D')
(22, '')
>>> c32checkDecode('P000000000000000000002Q6VF78')
(22, '0000000000000000000000000000000000000000')
>>> c32checkDecode('P00000000000000000005JA84HQ')
(22, '0000000000000000000000000000000000000001')
>>> c32checkDecode('P80000000000000000000000000000004R0CMNV')
(22, '1000000000000000000000000000000000000001')
>>> c32checkDecode('P800000000000000000000000000000033H8YKK')
(22, '1000000000000000000000000000000000000000')
>>> c32checkDecode('04C407K6')
(0, '01')
>>> c32checkDecode('049Q1W6AP')
(0, '22')
>>> c32checkDecode('006NZP224')
(0, '0001')
>>> c32checkDecode('Z004720442')
(31, '000001')
>>> c32checkDecode('Z00073C2AR7')
(31, '00000001')
>>> c32checkDecode('B20QX4FW0')
(11, '10')
>>> c32checkDecode('B102PC6RCC')
(11, '0100')
>>> c32checkDecode('BG02G1QXCQ')
(11, '1000')
>>> c32checkDecode('H40003YJA8JD')
(17, '100000')
>>> c32checkDecode('H200001ZTRYYH')
(17, '01000000')
>>> c32checkDecode('H1000002QFX7E6')
(17, '10000000')
>>> c32checkDecode('2G000003FNKA3P')
(2, '0100000000')
"""
if not re.match(r'^[' + C32 + ']*$', c32data):
raise ValueError('Must be c32 data')
c32data = c32normalize(c32data)
data_hex = c32decode(c32data[1:])
if len(data_hex) < 8:
raise ValueError('Not a c32check string')
version_chr = c32data[0]
version = C32.index(version_chr)
version_hex = '{:02x}'.format(version)
checksum = data_hex[-8:]
if c32checksum('{}{}'.format(version_hex, data_hex[0:len(data_hex)-8])) != checksum:
raise ValueError('Invalid c32check string: checksum mismatch')
return (version, data_hex[0:len(data_hex)-8]) | ['def', 'c32checkDecode', '(', 'c32data', ')', ':', 'if', 'not', 're', '.', 'match', '(', "r'^['", '+', 'C32', '+', "']*$'", ',', 'c32data', ')', ':', 'raise', 'ValueError', '(', "'Must be c32 data'", ')', 'c32data', '=', 'c32normalize', '(', 'c32data', ')', 'data_hex', '=', 'c32decode', '(', 'c32data', '[', '1', ':', ']', ')', 'if', 'len', '(', 'data_hex', ')', '<', '8', ':', 'raise', 'ValueError', '(', "'Not a c32check string'", ')', 'version_chr', '=', 'c32data', '[', '0', ']', 'version', '=', 'C32', '.', 'index', '(', 'version_chr', ')', 'version_hex', '=', "'{:02x}'", '.', 'format', '(', 'version', ')', 'checksum', '=', 'data_hex', '[', '-', '8', ':', ']', 'if', 'c32checksum', '(', "'{}{}'", '.', 'format', '(', 'version_hex', ',', 'data_hex', '[', '0', ':', 'len', '(', 'data_hex', ')', '-', '8', ']', ')', ')', '!=', 'checksum', ':', 'raise', 'ValueError', '(', "'Invalid c32check string: checksum mismatch'", ')', 'return', '(', 'version', ',', 'data_hex', '[', '0', ':', 'len', '(', 'data_hex', ')', '-', '8', ']', ')'] | >>> c32checkDecode('P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7')
(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE')
(0, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR')
(31, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV')
(11, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG')
(17, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9')
(2, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
>>> c32checkDecode('P37JJX3D')
(22, '')
>>> c32checkDecode('P000000000000000000002Q6VF78')
(22, '0000000000000000000000000000000000000000')
>>> c32checkDecode('P00000000000000000005JA84HQ')
(22, '0000000000000000000000000000000000000001')
>>> c32checkDecode('P80000000000000000000000000000004R0CMNV')
(22, '1000000000000000000000000000000000000001')
>>> c32checkDecode('P800000000000000000000000000000033H8YKK')
(22, '1000000000000000000000000000000000000000')
>>> c32checkDecode('04C407K6')
(0, '01')
>>> c32checkDecode('049Q1W6AP')
(0, '22')
>>> c32checkDecode('006NZP224')
(0, '0001')
>>> c32checkDecode('Z004720442')
(31, '000001')
>>> c32checkDecode('Z00073C2AR7')
(31, '00000001')
>>> c32checkDecode('B20QX4FW0')
(11, '10')
>>> c32checkDecode('B102PC6RCC')
(11, '0100')
>>> c32checkDecode('BG02G1QXCQ')
(11, '1000')
>>> c32checkDecode('H40003YJA8JD')
(17, '100000')
>>> c32checkDecode('H200001ZTRYYH')
(17, '01000000')
>>> c32checkDecode('H1000002QFX7E6')
(17, '10000000')
>>> c32checkDecode('2G000003FNKA3P')
(2, '0100000000') | ['>>>', 'c32checkDecode', '(', 'P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7', ')', '(', '22', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', '02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE', ')', '(', '0', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', 'Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR', ')', '(', '31', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', 'B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV', ')', '(', '11', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', 'H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG', ')', '(', '17', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', '22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9', ')', '(', '2', 'a46ff88886c2ef9762d970b4d2c63678835bd39d', ')', '>>>', 'c32checkDecode', '(', 'P37JJX3D', ')', '(', '22', ')', '>>>', 'c32checkDecode', '(', 'P000000000000000000002Q6VF78', ')', '(', '22', '0000000000000000000000000000000000000000', ')', '>>>', 'c32checkDecode', '(', 'P00000000000000000005JA84HQ', ')', '(', '22', '0000000000000000000000000000000000000001', ')', '>>>', 'c32checkDecode', '(', 'P80000000000000000000000000000004R0CMNV', ')', '(', '22', '1000000000000000000000000000000000000001', ')', '>>>', 'c32checkDecode', '(', 'P800000000000000000000000000000033H8YKK', ')', '(', '22', '1000000000000000000000000000000000000000', ')', '>>>', 'c32checkDecode', '(', '04C407K6', ')', '(', '0', '01', ')', '>>>', 'c32checkDecode', '(', '049Q1W6AP', ')', '(', '0', '22', ')', '>>>', 'c32checkDecode', '(', '006NZP224', ')', '(', '0', '0001', ')', '>>>', 'c32checkDecode', '(', 'Z004720442', ')', '(', '31', '000001', ')', '>>>', 'c32checkDecode', '(', 'Z00073C2AR7', ')', '(', '31', '00000001', ')', '>>>', 'c32checkDecode', '(', 'B20QX4FW0', ')', '(', '11', '10', ')', '>>>', 'c32checkDecode', '(', 'B102PC6RCC', ')', '(', '11', '0100', ')', '>>>', 'c32checkDecode', '(', 'BG02G1QXCQ', ')', '(', '11', '1000', ')', '>>>', 'c32checkDecode', '(', 'H40003YJA8JD', ')', '(', '17', '100000', ')', '>>>', 'c32checkDecode', '(', 'H200001ZTRYYH', ')', '(', '17', '01000000', ')', '>>>', 'c32checkDecode', '(', 'H1000002QFX7E6', ')', '(', '17', '10000000', ')', '>>>', 'c32checkDecode', '(', '2G000003FNKA3P', ')', '(', '2', '0100000000', ')'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/c32.py#L333-L399 |
6,473 | django-leonardo/django-leonardo | leonardo/module/media/admin/file/admin.py | FileAdmin.delete_view | def delete_view(self, request, object_id, extra_context=None):
"""
Overrides the default to enable redirecting to the directory view after
deletion of a image.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to.
"""
parent_folder = None
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
parent_folder = obj.folder
except self.model.DoesNotExist:
obj = None
r = super(FileAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
# Check against filer_file_changelist as file deletion is always made by
# the base class
if (url in ["../../../../", "../../"] or
url == reverse("admin:media_file_changelist") or
url == reverse("admin:media_image_changelist")):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r | python | def delete_view(self, request, object_id, extra_context=None):
"""
Overrides the default to enable redirecting to the directory view after
deletion of a image.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to.
"""
parent_folder = None
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
parent_folder = obj.folder
except self.model.DoesNotExist:
obj = None
r = super(FileAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
# Check against filer_file_changelist as file deletion is always made by
# the base class
if (url in ["../../../../", "../../"] or
url == reverse("admin:media_file_changelist") or
url == reverse("admin:media_image_changelist")):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r | ['def', 'delete_view', '(', 'self', ',', 'request', ',', 'object_id', ',', 'extra_context', '=', 'None', ')', ':', 'parent_folder', '=', 'None', 'try', ':', 'obj', '=', 'self', '.', 'get_queryset', '(', 'request', ')', '.', 'get', '(', 'pk', '=', 'unquote', '(', 'object_id', ')', ')', 'parent_folder', '=', 'obj', '.', 'folder', 'except', 'self', '.', 'model', '.', 'DoesNotExist', ':', 'obj', '=', 'None', 'r', '=', 'super', '(', 'FileAdmin', ',', 'self', ')', '.', 'delete_view', '(', 'request', '=', 'request', ',', 'object_id', '=', 'object_id', ',', 'extra_context', '=', 'extra_context', ')', 'url', '=', 'r', '.', 'get', '(', '"Location"', ',', 'None', ')', '# Check against filer_file_changelist as file deletion is always made by', '# the base class', 'if', '(', 'url', 'in', '[', '"../../../../"', ',', '"../../"', ']', 'or', 'url', '==', 'reverse', '(', '"admin:media_file_changelist"', ')', 'or', 'url', '==', 'reverse', '(', '"admin:media_image_changelist"', ')', ')', ':', 'if', 'parent_folder', ':', 'url', '=', 'reverse', '(', "'admin:filer-directory_listing'", ',', 'kwargs', '=', '{', "'folder_id'", ':', 'parent_folder', '.', 'id', '}', ')', 'else', ':', 'url', '=', 'reverse', '(', "'admin:filer-directory_listing-unfiled_images'", ')', 'url', '=', '"%s%s%s"', '%', '(', 'url', ',', 'popup_param', '(', 'request', ')', ',', 'selectfolder_param', '(', 'request', ',', '"&"', ')', ')', 'return', 'HttpResponseRedirect', '(', 'url', ')', 'return', 'r'] | Overrides the default to enable redirecting to the directory view after
deletion of a image.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to. | ['Overrides', 'the', 'default', 'to', 'enable', 'redirecting', 'to', 'the', 'directory', 'view', 'after', 'deletion', 'of', 'a', 'image', '.'] | train | https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/module/media/admin/file/admin.py#L103-L138 |
6,474 | loisaidasam/pyslack | pyslack/__init__.py | SlackClient._make_request | def _make_request(self, method, params):
"""Make request to API endpoint
Note: Ignoring SSL cert validation due to intermittent failures
http://requests.readthedocs.org/en/latest/user/advanced/#ssl-cert-verification
"""
if self.blocked_until is not None and \
datetime.datetime.utcnow() < self.blocked_until:
raise SlackError("Too many requests - wait until {0}" \
.format(self.blocked_until))
url = "%s/%s" % (SlackClient.BASE_URL, method)
params['token'] = self.token
response = requests.post(url, data=params, verify=self.verify)
if response.status_code == 429:
# Too many requests
retry_after = int(response.headers.get('retry-after', '1'))
self.blocked_until = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=retry_after)
raise SlackError("Too many requests - retry after {0} second(s)" \
.format(retry_after))
result = response.json()
if not result['ok']:
raise SlackError(result['error'])
return result | python | def _make_request(self, method, params):
"""Make request to API endpoint
Note: Ignoring SSL cert validation due to intermittent failures
http://requests.readthedocs.org/en/latest/user/advanced/#ssl-cert-verification
"""
if self.blocked_until is not None and \
datetime.datetime.utcnow() < self.blocked_until:
raise SlackError("Too many requests - wait until {0}" \
.format(self.blocked_until))
url = "%s/%s" % (SlackClient.BASE_URL, method)
params['token'] = self.token
response = requests.post(url, data=params, verify=self.verify)
if response.status_code == 429:
# Too many requests
retry_after = int(response.headers.get('retry-after', '1'))
self.blocked_until = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=retry_after)
raise SlackError("Too many requests - retry after {0} second(s)" \
.format(retry_after))
result = response.json()
if not result['ok']:
raise SlackError(result['error'])
return result | ['def', '_make_request', '(', 'self', ',', 'method', ',', 'params', ')', ':', 'if', 'self', '.', 'blocked_until', 'is', 'not', 'None', 'and', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', '<', 'self', '.', 'blocked_until', ':', 'raise', 'SlackError', '(', '"Too many requests - wait until {0}"', '.', 'format', '(', 'self', '.', 'blocked_until', ')', ')', 'url', '=', '"%s/%s"', '%', '(', 'SlackClient', '.', 'BASE_URL', ',', 'method', ')', 'params', '[', "'token'", ']', '=', 'self', '.', 'token', 'response', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'params', ',', 'verify', '=', 'self', '.', 'verify', ')', 'if', 'response', '.', 'status_code', '==', '429', ':', '# Too many requests', 'retry_after', '=', 'int', '(', 'response', '.', 'headers', '.', 'get', '(', "'retry-after'", ',', "'1'", ')', ')', 'self', '.', 'blocked_until', '=', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', '+', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'retry_after', ')', 'raise', 'SlackError', '(', '"Too many requests - retry after {0} second(s)"', '.', 'format', '(', 'retry_after', ')', ')', 'result', '=', 'response', '.', 'json', '(', ')', 'if', 'not', 'result', '[', "'ok'", ']', ':', 'raise', 'SlackError', '(', 'result', '[', "'error'", ']', ')', 'return', 'result'] | Make request to API endpoint
Note: Ignoring SSL cert validation due to intermittent failures
http://requests.readthedocs.org/en/latest/user/advanced/#ssl-cert-verification | ['Make', 'request', 'to', 'API', 'endpoint'] | train | https://github.com/loisaidasam/pyslack/blob/bce0dcbe830b95ba548b58c7ceea07923589a8ec/pyslack/__init__.py#L23-L49 |
6,475 | RI-imaging/qpformat | qpformat/file_formats/single_hdf5_qpimage.py | SingleHdf5Qpimage.get_qpimage | def get_qpimage(self, idx=0):
"""Return background-corrected QPImage"""
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SingleHdf5Qpimage, self).get_qpimage()
else:
# We can use the background data stored in the qpimage hdf5 file
qpi = qpimage.QPImage(h5file=self.path,
h5mode="r",
h5dtype=self.as_type,
).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | python | def get_qpimage(self, idx=0):
"""Return background-corrected QPImage"""
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SingleHdf5Qpimage, self).get_qpimage()
else:
# We can use the background data stored in the qpimage hdf5 file
qpi = qpimage.QPImage(h5file=self.path,
h5mode="r",
h5dtype=self.as_type,
).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | ['def', 'get_qpimage', '(', 'self', ',', 'idx', '=', '0', ')', ':', 'if', 'self', '.', '_bgdata', ':', '# The user has explicitly chosen different background data', '# using `get_qpimage_raw`.', 'qpi', '=', 'super', '(', 'SingleHdf5Qpimage', ',', 'self', ')', '.', 'get_qpimage', '(', ')', 'else', ':', '# We can use the background data stored in the qpimage hdf5 file', 'qpi', '=', 'qpimage', '.', 'QPImage', '(', 'h5file', '=', 'self', '.', 'path', ',', 'h5mode', '=', '"r"', ',', 'h5dtype', '=', 'self', '.', 'as_type', ',', ')', '.', 'copy', '(', ')', '# Force meta data', 'for', 'key', 'in', 'self', '.', 'meta_data', ':', 'qpi', '[', 'key', ']', '=', 'self', '.', 'meta_data', '[', 'key', ']', '# set identifier', 'qpi', '[', '"identifier"', ']', '=', 'self', '.', 'get_identifier', '(', 'idx', ')', 'return', 'qpi'] | Return background-corrected QPImage | ['Return', 'background', '-', 'corrected', 'QPImage'] | train | https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/single_hdf5_qpimage.py#L25-L42 |
6,476 | rstoneback/pysat | pysat/_orbits.py | Orbits._equaBreaks | def _equaBreaks(self, orbit_index_period=24.):
"""Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not exist in ' +
'loaded data')
# get difference in orbit index around the orbit
lt_diff = self.sat[self.orbit_index].diff()
# universal time values, from datetime index
ut_vals = Series(self.sat.data.index)
# UT difference
ut_diff = ut_vals.diff()
# get locations where orbit index derivative is less than 0
# then do some basic checks on these locations
ind, = np.where((lt_diff < -0.1))
if len(ind) > 0:
ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])])))
# look at distance between breaks
dist = ind[1:] - ind[0:-1]
# only keep orbit breaks with a distance greater than 1
# done for robustness
if len(ind) > 1:
if min(dist) == 1:
print('There are orbit breaks right next to each other')
ind = ind[:-1][dist > 1]
# check for large positive gradients around the break that would
# suggest not a true orbit break, but rather bad orbit_index values
new_ind = []
for idx in ind:
tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1)
if len(tidx) != 0:
# there are large changes, suggests a false alarm
# iterate over samples and check
for tidx in tidx:
# look at time change vs local time change
if(ut_diff[idx - 5:idx + 6].iloc[tidx] <
lt_diff[idx - 5:idx + 6].iloc[tidx] /
orbit_index_period * self.orbit_period):
# change in ut is small compared to the change in
# the orbit index this is flagged as a false alarm,
# or dropped from consideration
pass
else:
# change in UT is significant, keep orbit break
new_ind.append(idx)
break
else:
# no large positive gradients, current orbit break passes
# the first test
new_ind.append(idx)
# replace all breaks with those that are 'good'
ind = np.array(new_ind)
# now, assemble some orbit breaks that are not triggered by changes in
# the orbit index
# check if there is a UT break that is larger than orbital period, aka
# a time gap
ut_change_vs_period = ( ut_diff > self.orbit_period )
# characterize ut change using orbital period
norm_ut = ut_diff / self.orbit_period
# now, look for breaks because the length of time between samples is
# too large, thus there is no break in slt/mlt/etc, lt_diff is small
# but UT change is big
norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values /
orbit_index_period))
# indices when one or other flag is true
ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt &
(norm_ut > 0.95)))
# added the or and check after or on 10/20/2014
# & lt_diff.notnull() ))# & (lt_diff != 0) ) )
# combine these UT determined orbit breaks with the orbit index orbit
# breaks
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
print('Time Gap')
# now that most problems in orbits should have been caught, look at
# the time difference between orbits (not individual orbits)
orbit_ut_diff = ut_vals[ind].diff()
orbit_lt_diff = self.sat[self.orbit_index][ind].diff()
# look for time gaps between partial orbits. The full orbital time
# period is not required between end of one orbit and begining of next
# if first orbit is partial. Also provides another general test of the
# orbital breaks determined.
idx, = np.where((orbit_ut_diff / self.orbit_period -
orbit_lt_diff.values / orbit_index_period) > 0.97)
# pull out breaks that pass the test, need to make sure the first one
# is always included it gets dropped via the nature of diff
if len(idx) > 0:
if idx[0] != 0:
idx = np.hstack((0, idx))
else:
idx = np.array([0])
# only keep the good indices
if len(ind) > 0:
ind = ind[idx]
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits | python | def _equaBreaks(self, orbit_index_period=24.):
"""Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not exist in ' +
'loaded data')
# get difference in orbit index around the orbit
lt_diff = self.sat[self.orbit_index].diff()
# universal time values, from datetime index
ut_vals = Series(self.sat.data.index)
# UT difference
ut_diff = ut_vals.diff()
# get locations where orbit index derivative is less than 0
# then do some basic checks on these locations
ind, = np.where((lt_diff < -0.1))
if len(ind) > 0:
ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])])))
# look at distance between breaks
dist = ind[1:] - ind[0:-1]
# only keep orbit breaks with a distance greater than 1
# done for robustness
if len(ind) > 1:
if min(dist) == 1:
print('There are orbit breaks right next to each other')
ind = ind[:-1][dist > 1]
# check for large positive gradients around the break that would
# suggest not a true orbit break, but rather bad orbit_index values
new_ind = []
for idx in ind:
tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1)
if len(tidx) != 0:
# there are large changes, suggests a false alarm
# iterate over samples and check
for tidx in tidx:
# look at time change vs local time change
if(ut_diff[idx - 5:idx + 6].iloc[tidx] <
lt_diff[idx - 5:idx + 6].iloc[tidx] /
orbit_index_period * self.orbit_period):
# change in ut is small compared to the change in
# the orbit index this is flagged as a false alarm,
# or dropped from consideration
pass
else:
# change in UT is significant, keep orbit break
new_ind.append(idx)
break
else:
# no large positive gradients, current orbit break passes
# the first test
new_ind.append(idx)
# replace all breaks with those that are 'good'
ind = np.array(new_ind)
# now, assemble some orbit breaks that are not triggered by changes in
# the orbit index
# check if there is a UT break that is larger than orbital period, aka
# a time gap
ut_change_vs_period = ( ut_diff > self.orbit_period )
# characterize ut change using orbital period
norm_ut = ut_diff / self.orbit_period
# now, look for breaks because the length of time between samples is
# too large, thus there is no break in slt/mlt/etc, lt_diff is small
# but UT change is big
norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values /
orbit_index_period))
# indices when one or other flag is true
ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt &
(norm_ut > 0.95)))
# added the or and check after or on 10/20/2014
# & lt_diff.notnull() ))# & (lt_diff != 0) ) )
# combine these UT determined orbit breaks with the orbit index orbit
# breaks
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
print('Time Gap')
# now that most problems in orbits should have been caught, look at
# the time difference between orbits (not individual orbits)
orbit_ut_diff = ut_vals[ind].diff()
orbit_lt_diff = self.sat[self.orbit_index][ind].diff()
# look for time gaps between partial orbits. The full orbital time
# period is not required between end of one orbit and begining of next
# if first orbit is partial. Also provides another general test of the
# orbital breaks determined.
idx, = np.where((orbit_ut_diff / self.orbit_period -
orbit_lt_diff.values / orbit_index_period) > 0.97)
# pull out breaks that pass the test, need to make sure the first one
# is always included it gets dropped via the nature of diff
if len(idx) > 0:
if idx[0] != 0:
idx = np.hstack((0, idx))
else:
idx = np.array([0])
# only keep the good indices
if len(ind) > 0:
ind = ind[idx]
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits | ['def', '_equaBreaks', '(', 'self', ',', 'orbit_index_period', '=', '24.', ')', ':', 'if', 'self', '.', 'orbit_index', 'is', 'None', ':', 'raise', 'ValueError', '(', "'Orbit properties must be defined at '", '+', "'pysat.Instrument object instantiation.'", '+', "'See Instrument docs.'", ')', 'else', ':', 'try', ':', 'self', '.', 'sat', '[', 'self', '.', 'orbit_index', ']', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', "'Provided orbit index does not exist in '", '+', "'loaded data'", ')', '# get difference in orbit index around the orbit', 'lt_diff', '=', 'self', '.', 'sat', '[', 'self', '.', 'orbit_index', ']', '.', 'diff', '(', ')', '# universal time values, from datetime index', 'ut_vals', '=', 'Series', '(', 'self', '.', 'sat', '.', 'data', '.', 'index', ')', '# UT difference', 'ut_diff', '=', 'ut_vals', '.', 'diff', '(', ')', '# get locations where orbit index derivative is less than 0', '# then do some basic checks on these locations', 'ind', ',', '=', 'np', '.', 'where', '(', '(', 'lt_diff', '<', '-', '0.1', ')', ')', 'if', 'len', '(', 'ind', ')', '>', '0', ':', 'ind', '=', 'np', '.', 'hstack', '(', '(', 'ind', ',', 'np', '.', 'array', '(', '[', 'len', '(', 'self', '.', 'sat', '[', 'self', '.', 'orbit_index', ']', ')', ']', ')', ')', ')', '# look at distance between breaks', 'dist', '=', 'ind', '[', '1', ':', ']', '-', 'ind', '[', '0', ':', '-', '1', ']', '# only keep orbit breaks with a distance greater than 1', '# done for robustness', 'if', 'len', '(', 'ind', ')', '>', '1', ':', 'if', 'min', '(', 'dist', ')', '==', '1', ':', 'print', '(', "'There are orbit breaks right next to each other'", ')', 'ind', '=', 'ind', '[', ':', '-', '1', ']', '[', 'dist', '>', '1', ']', '# check for large positive gradients around the break that would', '# suggest not a true orbit break, but rather bad orbit_index values', 'new_ind', '=', '[', ']', 'for', 'idx', 'in', 'ind', ':', 'tidx', ',', '=', 'np', '.', 'where', '(', 'lt_diff', '[', 'idx', '-', '5', ':', 'idx', '+', '6', ']', '>', '0.1', ')', 'if', 'len', '(', 'tidx', ')', '!=', '0', ':', '# there are large changes, suggests a false alarm', '# iterate over samples and check', 'for', 'tidx', 'in', 'tidx', ':', '# look at time change vs local time change', 'if', '(', 'ut_diff', '[', 'idx', '-', '5', ':', 'idx', '+', '6', ']', '.', 'iloc', '[', 'tidx', ']', '<', 'lt_diff', '[', 'idx', '-', '5', ':', 'idx', '+', '6', ']', '.', 'iloc', '[', 'tidx', ']', '/', 'orbit_index_period', '*', 'self', '.', 'orbit_period', ')', ':', '# change in ut is small compared to the change in', '# the orbit index this is flagged as a false alarm,', '# or dropped from consideration', 'pass', 'else', ':', '# change in UT is significant, keep orbit break', 'new_ind', '.', 'append', '(', 'idx', ')', 'break', 'else', ':', '# no large positive gradients, current orbit break passes', '# the first test', 'new_ind', '.', 'append', '(', 'idx', ')', "# replace all breaks with those that are 'good'", 'ind', '=', 'np', '.', 'array', '(', 'new_ind', ')', '# now, assemble some orbit breaks that are not triggered by changes in', '# the orbit index', '# check if there is a UT break that is larger than orbital period, aka', '# a time gap', 'ut_change_vs_period', '=', '(', 'ut_diff', '>', 'self', '.', 'orbit_period', ')', '# characterize ut change using orbital period', 'norm_ut', '=', 'ut_diff', '/', 'self', '.', 'orbit_period', '# now, look for breaks because the length of time between samples is', '# too large, thus there is no break in slt/mlt/etc, lt_diff is small', '# but UT change is big', 'norm_ut_vs_norm_lt', '=', 'norm_ut', '.', 'gt', '(', 'np', '.', 'abs', '(', 'lt_diff', '.', 'values', '/', 'orbit_index_period', ')', ')', '# indices when one or other flag is true', 'ut_ind', ',', '=', 'np', '.', 'where', '(', 'ut_change_vs_period', '|', '(', 'norm_ut_vs_norm_lt', '&', '(', 'norm_ut', '>', '0.95', ')', ')', ')', '# added the or and check after or on 10/20/2014', '# & lt_diff.notnull() ))# & (lt_diff != 0) ) )', '# combine these UT determined orbit breaks with the orbit index orbit', '# breaks', 'if', 'len', '(', 'ut_ind', ')', '>', '0', ':', 'ind', '=', 'np', '.', 'hstack', '(', '(', 'ind', ',', 'ut_ind', ')', ')', 'ind', '=', 'np', '.', 'sort', '(', 'ind', ')', 'ind', '=', 'np', '.', 'unique', '(', 'ind', ')', 'print', '(', "'Time Gap'", ')', '# now that most problems in orbits should have been caught, look at', '# the time difference between orbits (not individual orbits)', 'orbit_ut_diff', '=', 'ut_vals', '[', 'ind', ']', '.', 'diff', '(', ')', 'orbit_lt_diff', '=', 'self', '.', 'sat', '[', 'self', '.', 'orbit_index', ']', '[', 'ind', ']', '.', 'diff', '(', ')', '# look for time gaps between partial orbits. The full orbital time', '# period is not required between end of one orbit and begining of next', '# if first orbit is partial. Also provides another general test of the', '# orbital breaks determined.', 'idx', ',', '=', 'np', '.', 'where', '(', '(', 'orbit_ut_diff', '/', 'self', '.', 'orbit_period', '-', 'orbit_lt_diff', '.', 'values', '/', 'orbit_index_period', ')', '>', '0.97', ')', '# pull out breaks that pass the test, need to make sure the first one', '# is always included it gets dropped via the nature of diff', 'if', 'len', '(', 'idx', ')', '>', '0', ':', 'if', 'idx', '[', '0', ']', '!=', '0', ':', 'idx', '=', 'np', '.', 'hstack', '(', '(', '0', ',', 'idx', ')', ')', 'else', ':', 'idx', '=', 'np', '.', 'array', '(', '[', '0', ']', ')', '# only keep the good indices', 'if', 'len', '(', 'ind', ')', '>', '0', ':', 'ind', '=', 'ind', '[', 'idx', ']', '# create orbitbreak index, ensure first element is always 0', 'if', 'ind', '[', '0', ']', '!=', '0', ':', 'ind', '=', 'np', '.', 'hstack', '(', '(', 'np', '.', 'array', '(', '[', '0', ']', ')', ',', 'ind', ')', ')', 'else', ':', 'ind', '=', 'np', '.', 'array', '(', '[', '0', ']', ')', '# number of orbits', 'num_orbits', '=', 'len', '(', 'ind', ')', '# set index of orbit breaks', 'self', '.', '_orbit_breaks', '=', 'ind', '# set number of orbits for the day', 'self', '.', 'num', '=', 'num_orbits'] | Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit | ['Determine', 'where', 'breaks', 'in', 'an', 'equatorial', 'satellite', 'orbit', 'occur', '.'] | train | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_orbits.py#L171-L300 |
6,477 | flowersteam/explauto | explauto/models/pydmps/dmp_discrete.py | DMPs_discrete.gen_psi | def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system rollout.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:,None]
return np.exp(-self.h * (x - self.c)**2) | python | def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system rollout.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:,None]
return np.exp(-self.h * (x - self.c)**2) | ['def', 'gen_psi', '(', 'self', ',', 'x', ')', ':', 'if', 'isinstance', '(', 'x', ',', 'np', '.', 'ndarray', ')', ':', 'x', '=', 'x', '[', ':', ',', 'None', ']', 'return', 'np', '.', 'exp', '(', '-', 'self', '.', 'h', '*', '(', 'x', '-', 'self', '.', 'c', ')', '**', '2', ')'] | Generates the activity of the basis functions for a given
canonical system rollout.
x float, array: the canonical system state or path | ['Generates', 'the', 'activity', 'of', 'the', 'basis', 'functions', 'for', 'a', 'given', 'canonical', 'system', 'rollout', '.', 'x', 'float', 'array', ':', 'the', 'canonical', 'system', 'state', 'or', 'path'] | train | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/models/pydmps/dmp_discrete.py#L97-L106 |
6,478 | osrg/ryu | ryu/services/protocols/bgp/core_managers/table_manager.py | TableCoreManager.import_single_vpn_path_to_all_vrfs | def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None):
"""Imports *vpn_path* to qualifying VRF tables.
Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
have any common RTs we import the path into VRF.
"""
LOG.debug('Importing path %s to qualifying VRFs', vpn_path)
# If this path has no RTs we are done.
if not path_rts:
LOG.info('Encountered a path with no RTs: %s', vpn_path)
return
# We match path RTs with all VRFs that are interested in them.
interested_tables = set()
# Get route family of VRF to when this VPN Path can be imported to
if vpn_path.route_family == RF_IPv4_VPN:
route_family = RF_IPv4_UC
elif vpn_path.route_family == RF_IPv6_VPN:
route_family = RF_IPv6_UC
elif vpn_path.route_family == RF_L2_EVPN:
route_family = RF_L2_EVPN
elif vpn_path.route_family == RF_VPNv4_FLOWSPEC:
route_family = RF_IPv4_FLOWSPEC
elif vpn_path.route_family == RF_VPNv6_FLOWSPEC:
route_family = RF_IPv6_FLOWSPEC
elif vpn_path.route_family == RF_L2VPN_FLOWSPEC:
route_family = RF_L2VPN_FLOWSPEC
else:
raise ValueError('Unsupported route family for VRF: %s' %
vpn_path.route_family)
for rt in path_rts:
rt_rf_id = rt + ':' + str(route_family)
vrf_rt_tables = self._tables_for_rt.get(rt_rf_id)
if vrf_rt_tables:
interested_tables.update(vrf_rt_tables)
if interested_tables:
# We iterate over all VRF tables that are interested in the RT
# of the given path and import this path into them.
route_dist = vpn_path.nlri.route_dist
for vrf_table in interested_tables:
if (vpn_path.source is not None or
route_dist != vrf_table.vrf_conf.route_dist):
update_vrf_dest = vrf_table.import_vpn_path(vpn_path)
# Queue the destination for further processing.
if update_vrf_dest is not None:
self._signal_bus.\
dest_changed(update_vrf_dest)
else:
# If we do not have any VRF with import RT that match with path RT
LOG.debug('No VRF table found that imports RTs: %s', path_rts) | python | def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None):
"""Imports *vpn_path* to qualifying VRF tables.
Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
have any common RTs we import the path into VRF.
"""
LOG.debug('Importing path %s to qualifying VRFs', vpn_path)
# If this path has no RTs we are done.
if not path_rts:
LOG.info('Encountered a path with no RTs: %s', vpn_path)
return
# We match path RTs with all VRFs that are interested in them.
interested_tables = set()
# Get route family of VRF to when this VPN Path can be imported to
if vpn_path.route_family == RF_IPv4_VPN:
route_family = RF_IPv4_UC
elif vpn_path.route_family == RF_IPv6_VPN:
route_family = RF_IPv6_UC
elif vpn_path.route_family == RF_L2_EVPN:
route_family = RF_L2_EVPN
elif vpn_path.route_family == RF_VPNv4_FLOWSPEC:
route_family = RF_IPv4_FLOWSPEC
elif vpn_path.route_family == RF_VPNv6_FLOWSPEC:
route_family = RF_IPv6_FLOWSPEC
elif vpn_path.route_family == RF_L2VPN_FLOWSPEC:
route_family = RF_L2VPN_FLOWSPEC
else:
raise ValueError('Unsupported route family for VRF: %s' %
vpn_path.route_family)
for rt in path_rts:
rt_rf_id = rt + ':' + str(route_family)
vrf_rt_tables = self._tables_for_rt.get(rt_rf_id)
if vrf_rt_tables:
interested_tables.update(vrf_rt_tables)
if interested_tables:
# We iterate over all VRF tables that are interested in the RT
# of the given path and import this path into them.
route_dist = vpn_path.nlri.route_dist
for vrf_table in interested_tables:
if (vpn_path.source is not None or
route_dist != vrf_table.vrf_conf.route_dist):
update_vrf_dest = vrf_table.import_vpn_path(vpn_path)
# Queue the destination for further processing.
if update_vrf_dest is not None:
self._signal_bus.\
dest_changed(update_vrf_dest)
else:
# If we do not have any VRF with import RT that match with path RT
LOG.debug('No VRF table found that imports RTs: %s', path_rts) | ['def', 'import_single_vpn_path_to_all_vrfs', '(', 'self', ',', 'vpn_path', ',', 'path_rts', '=', 'None', ')', ':', 'LOG', '.', 'debug', '(', "'Importing path %s to qualifying VRFs'", ',', 'vpn_path', ')', '# If this path has no RTs we are done.', 'if', 'not', 'path_rts', ':', 'LOG', '.', 'info', '(', "'Encountered a path with no RTs: %s'", ',', 'vpn_path', ')', 'return', '# We match path RTs with all VRFs that are interested in them.', 'interested_tables', '=', 'set', '(', ')', '# Get route family of VRF to when this VPN Path can be imported to', 'if', 'vpn_path', '.', 'route_family', '==', 'RF_IPv4_VPN', ':', 'route_family', '=', 'RF_IPv4_UC', 'elif', 'vpn_path', '.', 'route_family', '==', 'RF_IPv6_VPN', ':', 'route_family', '=', 'RF_IPv6_UC', 'elif', 'vpn_path', '.', 'route_family', '==', 'RF_L2_EVPN', ':', 'route_family', '=', 'RF_L2_EVPN', 'elif', 'vpn_path', '.', 'route_family', '==', 'RF_VPNv4_FLOWSPEC', ':', 'route_family', '=', 'RF_IPv4_FLOWSPEC', 'elif', 'vpn_path', '.', 'route_family', '==', 'RF_VPNv6_FLOWSPEC', ':', 'route_family', '=', 'RF_IPv6_FLOWSPEC', 'elif', 'vpn_path', '.', 'route_family', '==', 'RF_L2VPN_FLOWSPEC', ':', 'route_family', '=', 'RF_L2VPN_FLOWSPEC', 'else', ':', 'raise', 'ValueError', '(', "'Unsupported route family for VRF: %s'", '%', 'vpn_path', '.', 'route_family', ')', 'for', 'rt', 'in', 'path_rts', ':', 'rt_rf_id', '=', 'rt', '+', "':'", '+', 'str', '(', 'route_family', ')', 'vrf_rt_tables', '=', 'self', '.', '_tables_for_rt', '.', 'get', '(', 'rt_rf_id', ')', 'if', 'vrf_rt_tables', ':', 'interested_tables', '.', 'update', '(', 'vrf_rt_tables', ')', 'if', 'interested_tables', ':', '# We iterate over all VRF tables that are interested in the RT', '# of the given path and import this path into them.', 'route_dist', '=', 'vpn_path', '.', 'nlri', '.', 'route_dist', 'for', 'vrf_table', 'in', 'interested_tables', ':', 'if', '(', 'vpn_path', '.', 'source', 'is', 'not', 'None', 'or', 'route_dist', '!=', 'vrf_table', '.', 'vrf_conf', '.', 'route_dist', ')', ':', 'update_vrf_dest', '=', 'vrf_table', '.', 'import_vpn_path', '(', 'vpn_path', ')', '# Queue the destination for further processing.', 'if', 'update_vrf_dest', 'is', 'not', 'None', ':', 'self', '.', '_signal_bus', '.', 'dest_changed', '(', 'update_vrf_dest', ')', 'else', ':', '# If we do not have any VRF with import RT that match with path RT', 'LOG', '.', 'debug', '(', "'No VRF table found that imports RTs: %s'", ',', 'path_rts', ')'] | Imports *vpn_path* to qualifying VRF tables.
Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
have any common RTs we import the path into VRF. | ['Imports', '*', 'vpn_path', '*', 'to', 'qualifying', 'VRF', 'tables', '.'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/table_manager.py#L562-L615 |
6,479 | tamasgal/km3pipe | km3pipe/shell.py | qsub | def qsub(script, job_name, dryrun=False, *args, **kwargs):
"""Submit a job via qsub."""
print("Preparing job script...")
job_string = gen_job(script=script, job_name=job_name, *args, **kwargs)
env = os.environ.copy()
if dryrun:
print(
"This is a dry run! Here is the generated job file, which will "
"not be submitted:"
)
print(job_string)
else:
print("Calling qsub with the generated job script.")
p = subprocess.Popen(
'qsub -V', stdin=subprocess.PIPE, env=env, shell=True
)
p.communicate(input=bytes(job_string.encode('ascii'))) | python | def qsub(script, job_name, dryrun=False, *args, **kwargs):
"""Submit a job via qsub."""
print("Preparing job script...")
job_string = gen_job(script=script, job_name=job_name, *args, **kwargs)
env = os.environ.copy()
if dryrun:
print(
"This is a dry run! Here is the generated job file, which will "
"not be submitted:"
)
print(job_string)
else:
print("Calling qsub with the generated job script.")
p = subprocess.Popen(
'qsub -V', stdin=subprocess.PIPE, env=env, shell=True
)
p.communicate(input=bytes(job_string.encode('ascii'))) | ['def', 'qsub', '(', 'script', ',', 'job_name', ',', 'dryrun', '=', 'False', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'print', '(', '"Preparing job script..."', ')', 'job_string', '=', 'gen_job', '(', 'script', '=', 'script', ',', 'job_name', '=', 'job_name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'env', '=', 'os', '.', 'environ', '.', 'copy', '(', ')', 'if', 'dryrun', ':', 'print', '(', '"This is a dry run! Here is the generated job file, which will "', '"not be submitted:"', ')', 'print', '(', 'job_string', ')', 'else', ':', 'print', '(', '"Calling qsub with the generated job script."', ')', 'p', '=', 'subprocess', '.', 'Popen', '(', "'qsub -V'", ',', 'stdin', '=', 'subprocess', '.', 'PIPE', ',', 'env', '=', 'env', ',', 'shell', '=', 'True', ')', 'p', '.', 'communicate', '(', 'input', '=', 'bytes', '(', 'job_string', '.', 'encode', '(', "'ascii'", ')', ')', ')'] | Submit a job via qsub. | ['Submit', 'a', 'job', 'via', 'qsub', '.'] | train | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L66-L82 |
6,480 | fhcrc/taxtastic | taxtastic/taxtable.py | TaxNode.collapse | def collapse(self, remove=False):
"""
Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy.
"""
descendants = iter(self)
# Skip this node
assert next(descendants) is self
for descendant in descendants:
self.sequence_ids.update(descendant.sequence_ids)
descendant.sequence_ids.clear()
if remove:
for node in self.children:
self.remove_child(node) | python | def collapse(self, remove=False):
"""
Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy.
"""
descendants = iter(self)
# Skip this node
assert next(descendants) is self
for descendant in descendants:
self.sequence_ids.update(descendant.sequence_ids)
descendant.sequence_ids.clear()
if remove:
for node in self.children:
self.remove_child(node) | ['def', 'collapse', '(', 'self', ',', 'remove', '=', 'False', ')', ':', 'descendants', '=', 'iter', '(', 'self', ')', '# Skip this node', 'assert', 'next', '(', 'descendants', ')', 'is', 'self', 'for', 'descendant', 'in', 'descendants', ':', 'self', '.', 'sequence_ids', '.', 'update', '(', 'descendant', '.', 'sequence_ids', ')', 'descendant', '.', 'sequence_ids', '.', 'clear', '(', ')', 'if', 'remove', ':', 'for', 'node', 'in', 'self', '.', 'children', ':', 'self', '.', 'remove_child', '(', 'node', ')'] | Move all ``sequence_ids`` in the subtree below this node to this node.
If ``remove`` is True, nodes below this one are deleted from the
taxonomy. | ['Move', 'all', 'sequence_ids', 'in', 'the', 'subtree', 'below', 'this', 'node', 'to', 'this', 'node', '.'] | train | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L235-L251 |
6,481 | bcbio/bcbio-nextgen | bcbio/cwl/create.py | _cwl_workflow_template | def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | python | def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | ['def', '_cwl_workflow_template', '(', 'inputs', ',', 'top_level', '=', 'False', ')', ':', 'ready_inputs', '=', '[', ']', 'for', 'inp', 'in', 'inputs', ':', 'cur_inp', '=', 'copy', '.', 'deepcopy', '(', 'inp', ')', 'for', 'attr', 'in', '[', '"source"', ',', '"valueFrom"', ',', '"wf_duplicate"', ']', ':', 'cur_inp', '.', 'pop', '(', 'attr', ',', 'None', ')', 'if', 'top_level', ':', 'cur_inp', '=', 'workflow', '.', '_flatten_nested_input', '(', 'cur_inp', ')', 'cur_inp', '=', '_clean_record', '(', 'cur_inp', ')', 'ready_inputs', '.', 'append', '(', 'cur_inp', ')', 'return', '{', '"class"', ':', '"Workflow"', ',', '"cwlVersion"', ':', '"v1.0"', ',', '"hints"', ':', '[', ']', ',', '"requirements"', ':', '[', '{', '"class"', ':', '"EnvVarRequirement"', ',', '"envDef"', ':', '[', '{', '"envName"', ':', '"MPLCONFIGDIR"', ',', '"envValue"', ':', '"."', '}', ']', '}', ',', '{', '"class"', ':', '"ScatterFeatureRequirement"', '}', ',', '{', '"class"', ':', '"SubworkflowFeatureRequirement"', '}', ']', ',', '"inputs"', ':', 'ready_inputs', ',', '"outputs"', ':', '[', ']', ',', '"steps"', ':', '[', ']', '}'] | Retrieve CWL inputs shared amongst different workflows. | ['Retrieve', 'CWL', 'inputs', 'shared', 'amongst', 'different', 'workflows', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L42-L63 |
6,482 | PythonCharmers/python-future | src/future/backports/email/message.py | Message.values | def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers] | python | def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers] | ['def', 'values', '(', 'self', ')', ':', 'return', '[', 'self', '.', 'policy', '.', 'header_fetch_parse', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'self', '.', '_headers', ']'] | Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list. | ['Return', 'a', 'list', 'of', 'all', 'the', 'message', 's', 'header', 'values', '.'] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/message.py#L395-L404 |
6,483 | kobejohn/PQHelper | pqhelper/base.py | StateInvestigator._board_from_game_image | def _board_from_game_image(self, game_image):
"""Return a board object matching the board in the game image.
Return None if any tiles are not identified.
"""
# board image
board_rect = self._board_tools['board_region'].region_in(game_image)
t, l, b, r = board_rect
board_image = game_image[t:b, l:r]
# board grid and tiles --> fill in a Board object
board = Board()
grid = self._board_tools['grid']
tile_id = self._board_tools['tile_id']
for p, borders in grid.borders_by_grid_position(board_image):
t, l, b, r = borders
tile = board_image[t:b, l:r]
tile_character = tile_id.identify(tile)
if tile_character is None:
return None # soft failure
board[p] = Tile.singleton(tile_character)
return board | python | def _board_from_game_image(self, game_image):
"""Return a board object matching the board in the game image.
Return None if any tiles are not identified.
"""
# board image
board_rect = self._board_tools['board_region'].region_in(game_image)
t, l, b, r = board_rect
board_image = game_image[t:b, l:r]
# board grid and tiles --> fill in a Board object
board = Board()
grid = self._board_tools['grid']
tile_id = self._board_tools['tile_id']
for p, borders in grid.borders_by_grid_position(board_image):
t, l, b, r = borders
tile = board_image[t:b, l:r]
tile_character = tile_id.identify(tile)
if tile_character is None:
return None # soft failure
board[p] = Tile.singleton(tile_character)
return board | ['def', '_board_from_game_image', '(', 'self', ',', 'game_image', ')', ':', '# board image', 'board_rect', '=', 'self', '.', '_board_tools', '[', "'board_region'", ']', '.', 'region_in', '(', 'game_image', ')', 't', ',', 'l', ',', 'b', ',', 'r', '=', 'board_rect', 'board_image', '=', 'game_image', '[', 't', ':', 'b', ',', 'l', ':', 'r', ']', '# board grid and tiles --> fill in a Board object', 'board', '=', 'Board', '(', ')', 'grid', '=', 'self', '.', '_board_tools', '[', "'grid'", ']', 'tile_id', '=', 'self', '.', '_board_tools', '[', "'tile_id'", ']', 'for', 'p', ',', 'borders', 'in', 'grid', '.', 'borders_by_grid_position', '(', 'board_image', ')', ':', 't', ',', 'l', ',', 'b', ',', 'r', '=', 'borders', 'tile', '=', 'board_image', '[', 't', ':', 'b', ',', 'l', ':', 'r', ']', 'tile_character', '=', 'tile_id', '.', 'identify', '(', 'tile', ')', 'if', 'tile_character', 'is', 'None', ':', 'return', 'None', '# soft failure', 'board', '[', 'p', ']', '=', 'Tile', '.', 'singleton', '(', 'tile_character', ')', 'return', 'board'] | Return a board object matching the board in the game image.
Return None if any tiles are not identified. | ['Return', 'a', 'board', 'object', 'matching', 'the', 'board', 'in', 'the', 'game', 'image', '.', 'Return', 'None', 'if', 'any', 'tiles', 'are', 'not', 'identified', '.'] | train | https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/base.py#L160-L179 |
6,484 | bwohlberg/sporco | docs/source/docntbk.py | script_and_notebook_to_rst | def script_and_notebook_to_rst(spth, npth, rpth):
"""
Convert a script and the corresponding executed notebook to rst.
The script is converted to notebook format *without* replacement
of sphinx cross-references with links to online docs, and the
resulting markdown cells are inserted into the executed notebook,
which is then converted to rst.
"""
# Read entire text of script at spth
with open(spth) as f:
stxt = f.read()
# Process script text
stxt = preprocess_script_string(stxt)
# Convert script text to notebook object
nbs = script_string_to_notebook_object(stxt)
# Read notebook file npth
nbn = nbformat.read(npth, as_version=4)
# Overwrite markdown cells in nbn with those from nbs
try:
replace_markdown_cells(nbs, nbn)
except ValueError:
raise ValueError('mismatch between source script %s and notebook %s' %
(spth, npth))
# Convert notebook object to rst
notebook_object_to_rst(nbn, rpth) | python | def script_and_notebook_to_rst(spth, npth, rpth):
"""
Convert a script and the corresponding executed notebook to rst.
The script is converted to notebook format *without* replacement
of sphinx cross-references with links to online docs, and the
resulting markdown cells are inserted into the executed notebook,
which is then converted to rst.
"""
# Read entire text of script at spth
with open(spth) as f:
stxt = f.read()
# Process script text
stxt = preprocess_script_string(stxt)
# Convert script text to notebook object
nbs = script_string_to_notebook_object(stxt)
# Read notebook file npth
nbn = nbformat.read(npth, as_version=4)
# Overwrite markdown cells in nbn with those from nbs
try:
replace_markdown_cells(nbs, nbn)
except ValueError:
raise ValueError('mismatch between source script %s and notebook %s' %
(spth, npth))
# Convert notebook object to rst
notebook_object_to_rst(nbn, rpth) | ['def', 'script_and_notebook_to_rst', '(', 'spth', ',', 'npth', ',', 'rpth', ')', ':', '# Read entire text of script at spth', 'with', 'open', '(', 'spth', ')', 'as', 'f', ':', 'stxt', '=', 'f', '.', 'read', '(', ')', '# Process script text', 'stxt', '=', 'preprocess_script_string', '(', 'stxt', ')', '# Convert script text to notebook object', 'nbs', '=', 'script_string_to_notebook_object', '(', 'stxt', ')', '# Read notebook file npth', 'nbn', '=', 'nbformat', '.', 'read', '(', 'npth', ',', 'as_version', '=', '4', ')', '# Overwrite markdown cells in nbn with those from nbs', 'try', ':', 'replace_markdown_cells', '(', 'nbs', ',', 'nbn', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', "'mismatch between source script %s and notebook %s'", '%', '(', 'spth', ',', 'npth', ')', ')', '# Convert notebook object to rst', 'notebook_object_to_rst', '(', 'nbn', ',', 'rpth', ')'] | Convert a script and the corresponding executed notebook to rst.
The script is converted to notebook format *without* replacement
of sphinx cross-references with links to online docs, and the
resulting markdown cells are inserted into the executed notebook,
which is then converted to rst. | ['Convert', 'a', 'script', 'and', 'the', 'corresponding', 'executed', 'notebook', 'to', 'rst', '.', 'The', 'script', 'is', 'converted', 'to', 'notebook', 'format', '*', 'without', '*', 'replacement', 'of', 'sphinx', 'cross', '-', 'references', 'with', 'links', 'to', 'online', 'docs', 'and', 'the', 'resulting', 'markdown', 'cells', 'are', 'inserted', 'into', 'the', 'executed', 'notebook', 'which', 'is', 'then', 'converted', 'to', 'rst', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L555-L583 |
6,485 | inasafe/inasafe | safe/gis/generic_expressions.py | inasafe_analysis_summary_field_value | def inasafe_analysis_summary_field_value(field, feature, parent):
"""Retrieve a value from a field in the analysis summary layer.
e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope(
QgsProject.instance())
registry = QgsProject.instance()
key = provenance_layer_analysis_impacted_id['provenance_key']
if not project_context_scope.hasVariable(key):
return None
layer = registry.mapLayer(project_context_scope.variable(key))
if not layer:
return None
index = layer.fields().lookupField(field)
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] | python | def inasafe_analysis_summary_field_value(field, feature, parent):
"""Retrieve a value from a field in the analysis summary layer.
e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3
"""
_ = feature, parent # NOQA
project_context_scope = QgsExpressionContextUtils.projectScope(
QgsProject.instance())
registry = QgsProject.instance()
key = provenance_layer_analysis_impacted_id['provenance_key']
if not project_context_scope.hasVariable(key):
return None
layer = registry.mapLayer(project_context_scope.variable(key))
if not layer:
return None
index = layer.fields().lookupField(field)
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] | ['def', 'inasafe_analysis_summary_field_value', '(', 'field', ',', 'feature', ',', 'parent', ')', ':', '_', '=', 'feature', ',', 'parent', '# NOQA', 'project_context_scope', '=', 'QgsExpressionContextUtils', '.', 'projectScope', '(', 'QgsProject', '.', 'instance', '(', ')', ')', 'registry', '=', 'QgsProject', '.', 'instance', '(', ')', 'key', '=', 'provenance_layer_analysis_impacted_id', '[', "'provenance_key'", ']', 'if', 'not', 'project_context_scope', '.', 'hasVariable', '(', 'key', ')', ':', 'return', 'None', 'layer', '=', 'registry', '.', 'mapLayer', '(', 'project_context_scope', '.', 'variable', '(', 'key', ')', ')', 'if', 'not', 'layer', ':', 'return', 'None', 'index', '=', 'layer', '.', 'fields', '(', ')', '.', 'lookupField', '(', 'field', ')', 'if', 'index', '<', '0', ':', 'return', 'None', 'feature', '=', 'next', '(', 'layer', '.', 'getFeatures', '(', ')', ')', 'return', 'feature', '[', 'index', ']'] | Retrieve a value from a field in the analysis summary layer.
e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3 | ['Retrieve', 'a', 'value', 'from', 'a', 'field', 'in', 'the', 'analysis', 'summary', 'layer', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/generic_expressions.py#L50-L74 |
6,486 | stephantul/somber | somber/sequential.py | RecursiveMixin.forward | def forward(self, x, **kwargs):
"""
Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights.
"""
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
distance_x, diff_x = self.distance_function(x, self.weights)
distance_y, diff_y = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return activation, diff_x, diff_y | python | def forward(self, x, **kwargs):
"""
Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights.
"""
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
distance_x, diff_x = self.distance_function(x, self.weights)
distance_y, diff_y = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return activation, diff_x, diff_y | ['def', 'forward', '(', 'self', ',', 'x', ',', '*', '*', 'kwargs', ')', ':', 'prev', '=', 'kwargs', '[', "'prev_activation'", ']', '# Differences is the components of the weights subtracted from', '# the weight vector.', 'distance_x', ',', 'diff_x', '=', 'self', '.', 'distance_function', '(', 'x', ',', 'self', '.', 'weights', ')', 'distance_y', ',', 'diff_y', '=', 'self', '.', 'distance_function', '(', 'prev', ',', 'self', '.', 'context_weights', ')', 'x_', '=', 'distance_x', '*', 'self', '.', 'alpha', 'y_', '=', 'distance_y', '*', 'self', '.', 'beta', 'activation', '=', 'np', '.', 'exp', '(', '-', '(', 'x_', '+', 'y_', ')', ')', 'return', 'activation', ',', 'diff_x', ',', 'diff_y'] | Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights. | ['Perform', 'a', 'forward', 'pass', 'through', 'the', 'network', '.'] | train | https://github.com/stephantul/somber/blob/b7a13e646239500cc393668c01a7169c3e50b7b5/somber/sequential.py#L167-L200 |
6,487 | letuananh/chirptext | chirptext/deko.py | MeCabToken.pos3 | def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) | python | def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) | ['def', 'pos3', '(', 'self', ')', ':', 'parts', '=', '[', 'self', '.', 'pos', ']', 'if', 'self', '.', 'sc1', 'and', 'self', '.', 'sc1', '!=', "'*'", ':', 'parts', '.', 'append', '(', 'self', '.', 'sc1', ')', 'if', 'self', '.', 'sc2', 'and', 'self', '.', 'sc2', '!=', "'*'", ':', 'parts', '.', 'append', '(', 'self', '.', 'sc2', ')', 'return', "'-'", '.', 'join', '(', 'parts', ')'] | Use pos-sc1-sc2 as POS | ['Use', 'pos', '-', 'sc1', '-', 'sc2', 'as', 'POS'] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L113-L120 |
6,488 | ga4gh/ga4gh-client | ga4gh/client/client.py | AbstractClient.search_individuals | def search_individuals(self, dataset_id, name=None):
"""
Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
"""
request = protocol.SearchIndividualsRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "individuals", protocol.SearchIndividualsResponse) | python | def search_individuals(self, dataset_id, name=None):
"""
Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
"""
request = protocol.SearchIndividualsRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "individuals", protocol.SearchIndividualsResponse) | ['def', 'search_individuals', '(', 'self', ',', 'dataset_id', ',', 'name', '=', 'None', ')', ':', 'request', '=', 'protocol', '.', 'SearchIndividualsRequest', '(', ')', 'request', '.', 'dataset_id', '=', 'dataset_id', 'request', '.', 'name', '=', 'pb', '.', 'string', '(', 'name', ')', 'request', '.', 'page_size', '=', 'pb', '.', 'int', '(', 'self', '.', '_page_size', ')', 'return', 'self', '.', '_run_search_request', '(', 'request', ',', '"individuals"', ',', 'protocol', '.', 'SearchIndividualsResponse', ')'] | Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters. | ['Returns', 'an', 'iterator', 'over', 'the', 'Individuals', 'fulfilling', 'the', 'specified', 'conditions', '.'] | train | https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/client.py#L676-L692 |
6,489 | StackStorm/pybind | pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py | map_apply._set_apply_exp_dscp_map_name | def _set_apply_exp_dscp_map_name(self, v, load=False):
"""
Setter method for apply_exp_dscp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_dscp_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_dscp_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_dscp_map_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """apply_exp_dscp_map_name must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""",
})
self.__apply_exp_dscp_map_name = t
if hasattr(self, '_set'):
self._set() | python | def _set_apply_exp_dscp_map_name(self, v, load=False):
"""
Setter method for apply_exp_dscp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_dscp_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_dscp_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_dscp_map_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """apply_exp_dscp_map_name must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""",
})
self.__apply_exp_dscp_map_name = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_apply_exp_dscp_map_name', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'apply_exp_dscp_map_name', '.', 'apply_exp_dscp_map_name', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"apply-exp-dscp-map-name"', ',', 'rest_name', '=', '"exp-dscp"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Apply exp dscp map'", ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'alt-name'", ':', "u'exp-dscp'", ',', "u'cli-incomplete-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-apply-qos-mpls'", ',', 'defining_module', '=', "'brocade-apply-qos-mpls'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""apply_exp_dscp_map_name must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container=\'container\', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Apply exp dscp map\', u\'cli-sequence-commands\': None, u\'alt-name\': u\'exp-dscp\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-apply-qos-mpls\', defining_module=\'brocade-apply-qos-mpls\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__apply_exp_dscp_map_name', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for apply_exp_dscp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_dscp_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_exp_dscp_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_exp_dscp_map_name() directly. | ['Setter', 'method', 'for', 'apply_exp_dscp_map_name', 'mapped', 'from', 'YANG', 'variable', '/', 'qos_mpls', '/', 'map_apply', '/', 'apply_exp_dscp_map_name', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_apply_exp_dscp_map_name', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_apply_exp_dscp_map_name', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py#L199-L220 |
6,490 | pantsbuild/pants | src/python/pants/net/http/fetcher.py | Fetcher.fetch | def fetch(self, url, listener, chunk_size_bytes=None, timeout_secs=None):
"""Fetches data from the given URL notifying listener of all lifecycle events.
:param string url: the url to GET data from
:param listener: the listener to notify of all download lifecycle events
:param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default
:param timeout_secs: the maximum time to wait for data to be available, 1 second by default
:raises: Fetcher.Error if there was a problem fetching all data from the given url
"""
if not isinstance(listener, self.Listener):
raise ValueError('listener must be a Listener instance, given {}'.format(listener))
chunk_size_bytes = chunk_size_bytes or 10 * 1024
timeout_secs = timeout_secs or 1.0
with closing(self._fetch(url, timeout_secs=timeout_secs)) as resp:
if resp.status_code != requests.codes.ok:
listener.status(resp.status_code)
raise self.PermanentError('Fetch of {} failed with status code {}'
.format(url, resp.status_code),
response_code=resp.status_code)
listener.status(resp.status_code, content_length=resp.size)
read_bytes = 0
for data in resp.iter_content(chunk_size_bytes=chunk_size_bytes):
listener.recv_chunk(data)
read_bytes += len(data)
if resp.size and read_bytes != resp.size:
raise self.Error('Expected {} bytes, read {}'.format(resp.size, read_bytes))
listener.finished() | python | def fetch(self, url, listener, chunk_size_bytes=None, timeout_secs=None):
"""Fetches data from the given URL notifying listener of all lifecycle events.
:param string url: the url to GET data from
:param listener: the listener to notify of all download lifecycle events
:param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default
:param timeout_secs: the maximum time to wait for data to be available, 1 second by default
:raises: Fetcher.Error if there was a problem fetching all data from the given url
"""
if not isinstance(listener, self.Listener):
raise ValueError('listener must be a Listener instance, given {}'.format(listener))
chunk_size_bytes = chunk_size_bytes or 10 * 1024
timeout_secs = timeout_secs or 1.0
with closing(self._fetch(url, timeout_secs=timeout_secs)) as resp:
if resp.status_code != requests.codes.ok:
listener.status(resp.status_code)
raise self.PermanentError('Fetch of {} failed with status code {}'
.format(url, resp.status_code),
response_code=resp.status_code)
listener.status(resp.status_code, content_length=resp.size)
read_bytes = 0
for data in resp.iter_content(chunk_size_bytes=chunk_size_bytes):
listener.recv_chunk(data)
read_bytes += len(data)
if resp.size and read_bytes != resp.size:
raise self.Error('Expected {} bytes, read {}'.format(resp.size, read_bytes))
listener.finished() | ['def', 'fetch', '(', 'self', ',', 'url', ',', 'listener', ',', 'chunk_size_bytes', '=', 'None', ',', 'timeout_secs', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'listener', ',', 'self', '.', 'Listener', ')', ':', 'raise', 'ValueError', '(', "'listener must be a Listener instance, given {}'", '.', 'format', '(', 'listener', ')', ')', 'chunk_size_bytes', '=', 'chunk_size_bytes', 'or', '10', '*', '1024', 'timeout_secs', '=', 'timeout_secs', 'or', '1.0', 'with', 'closing', '(', 'self', '.', '_fetch', '(', 'url', ',', 'timeout_secs', '=', 'timeout_secs', ')', ')', 'as', 'resp', ':', 'if', 'resp', '.', 'status_code', '!=', 'requests', '.', 'codes', '.', 'ok', ':', 'listener', '.', 'status', '(', 'resp', '.', 'status_code', ')', 'raise', 'self', '.', 'PermanentError', '(', "'Fetch of {} failed with status code {}'", '.', 'format', '(', 'url', ',', 'resp', '.', 'status_code', ')', ',', 'response_code', '=', 'resp', '.', 'status_code', ')', 'listener', '.', 'status', '(', 'resp', '.', 'status_code', ',', 'content_length', '=', 'resp', '.', 'size', ')', 'read_bytes', '=', '0', 'for', 'data', 'in', 'resp', '.', 'iter_content', '(', 'chunk_size_bytes', '=', 'chunk_size_bytes', ')', ':', 'listener', '.', 'recv_chunk', '(', 'data', ')', 'read_bytes', '+=', 'len', '(', 'data', ')', 'if', 'resp', '.', 'size', 'and', 'read_bytes', '!=', 'resp', '.', 'size', ':', 'raise', 'self', '.', 'Error', '(', "'Expected {} bytes, read {}'", '.', 'format', '(', 'resp', '.', 'size', ',', 'read_bytes', ')', ')', 'listener', '.', 'finished', '(', ')'] | Fetches data from the given URL notifying listener of all lifecycle events.
:param string url: the url to GET data from
:param listener: the listener to notify of all download lifecycle events
:param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default
:param timeout_secs: the maximum time to wait for data to be available, 1 second by default
:raises: Fetcher.Error if there was a problem fetching all data from the given url | ['Fetches', 'data', 'from', 'the', 'given', 'URL', 'notifying', 'listener', 'of', 'all', 'lifecycle', 'events', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/net/http/fetcher.py#L327-L356 |
6,491 | hyperledger/sawtooth-core | validator/sawtooth_validator/gossip/gossip.py | Gossip.add_candidate_peer_endpoints | def add_candidate_peer_endpoints(self, peer_endpoints):
"""Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with.
"""
if self._topology:
self._topology.add_candidate_peer_endpoints(peer_endpoints)
else:
LOGGER.debug("Could not add peer endpoints to topology. "
"ConnectionManager does not exist.") | python | def add_candidate_peer_endpoints(self, peer_endpoints):
"""Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with.
"""
if self._topology:
self._topology.add_candidate_peer_endpoints(peer_endpoints)
else:
LOGGER.debug("Could not add peer endpoints to topology. "
"ConnectionManager does not exist.") | ['def', 'add_candidate_peer_endpoints', '(', 'self', ',', 'peer_endpoints', ')', ':', 'if', 'self', '.', '_topology', ':', 'self', '.', '_topology', '.', 'add_candidate_peer_endpoints', '(', 'peer_endpoints', ')', 'else', ':', 'LOGGER', '.', 'debug', '(', '"Could not add peer endpoints to topology. "', '"ConnectionManager does not exist."', ')'] | Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with. | ['Adds', 'candidate', 'endpoints', 'to', 'the', 'list', 'of', 'endpoints', 'to', 'attempt', 'to', 'peer', 'with', '.'] | train | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/gossip/gossip.py#L179-L191 |
6,492 | phac-nml/sistr_cmd | sistr/src/writers.py | flatten_dict | def flatten_dict(x):
"""Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict
"""
out = {}
for k, v in x.items():
out = _recur_flatten(k, v, out)
return out | python | def flatten_dict(x):
"""Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict
"""
out = {}
for k, v in x.items():
out = _recur_flatten(k, v, out)
return out | ['def', 'flatten_dict', '(', 'x', ')', ':', 'out', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'x', '.', 'items', '(', ')', ':', 'out', '=', '_recur_flatten', '(', 'k', ',', 'v', ',', 'out', ')', 'return', 'out'] | Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict | ['Flatten', 'a', 'dict', 'Flatten', 'an', 'arbitrarily', 'nested', 'dict', 'as', 'output', 'by', 'to_dict', '..', 'note', '::', 'Keys', 'in', 'the', 'flattened', 'dict', 'may', 'get', 'very', 'long', '.', 'Args', ':', 'x', '(', 'dict', ')', ':', 'Arbitrarily', 'nested', 'dict', '(', 'maybe', 'resembling', 'a', 'tree', ')', 'with', 'literal', '/', 'scalar', 'leaf', 'values', 'Returns', ':', 'dict', ':', 'flattened', '1D', 'dict'] | train | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/writers.py#L110-L128 |
6,493 | openthread/openthread | tools/harness-automation/autothreadharness/open_thread_controller.py | OpenThreadController.run | def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0) | python | def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0) | ['def', 'run', '(', 'self', ')', ':', 'self', '.', 'viewing', '=', 'True', 'while', 'self', '.', 'viewing', 'and', 'self', '.', '_lock', '.', 'acquire', '(', ')', ':', 'try', ':', 'line', '=', 'self', '.', '_readline', '(', ')', 'except', ':', 'pass', 'else', ':', 'logger', '.', 'info', '(', 'line', ')', 'self', '.', '_lock', '.', 'release', '(', ')', 'time', '.', 'sleep', '(', '0', ')'] | Threading callback | ['Threading', 'callback'] | train | https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-automation/autothreadharness/open_thread_controller.py#L234-L246 |
6,494 | jepegit/cellpy | cellpy/utils/batch_tools/batch_core.py | Doer.info | def info(self):
"""Delivers some info to you about the class."""
print("Sorry, but I don't have much to share.")
print("This is me:")
print(self)
print("And these are the experiments assigned to me:")
print(self.experiments) | python | def info(self):
"""Delivers some info to you about the class."""
print("Sorry, but I don't have much to share.")
print("This is me:")
print(self)
print("And these are the experiments assigned to me:")
print(self.experiments) | ['def', 'info', '(', 'self', ')', ':', 'print', '(', '"Sorry, but I don\'t have much to share."', ')', 'print', '(', '"This is me:"', ')', 'print', '(', 'self', ')', 'print', '(', '"And these are the experiments assigned to me:"', ')', 'print', '(', 'self', '.', 'experiments', ')'] | Delivers some info to you about the class. | ['Delivers', 'some', 'info', 'to', 'you', 'about', 'the', 'class', '.'] | train | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/batch_core.py#L46-L53 |
6,495 | quantumlib/Cirq | cirq/optimizers/merge_interactions.py | MergeInteractions._op_to_matrix | def _op_to_matrix(self,
op: Optional[ops.Operation],
qubits: Tuple[ops.Qid, ...]
) -> Optional[np.ndarray]:
"""Determines the effect of an operation on the given qubits.
If the operation is a 1-qubit operation on one of the given qubits,
or a 2-qubit operation on both of the given qubits, and also the
operation has a known matrix, then a matrix is returned. Otherwise None
is returned.
Args:
op: The operation to understand.
qubits: The qubits we care about. Order determines matrix tensor
order.
Returns:
None, or else a matrix equivalent to the effect of the operation.
"""
q1, q2 = qubits
matrix = protocols.unitary(op, None)
if matrix is None:
return None
assert op is not None
if op.qubits == qubits:
return matrix
if op.qubits == (q2, q1):
return MergeInteractions._flip_kron_order(matrix)
if op.qubits == (q1,):
return np.kron(matrix, np.eye(2))
if op.qubits == (q2,):
return np.kron(np.eye(2), matrix)
return None | python | def _op_to_matrix(self,
op: Optional[ops.Operation],
qubits: Tuple[ops.Qid, ...]
) -> Optional[np.ndarray]:
"""Determines the effect of an operation on the given qubits.
If the operation is a 1-qubit operation on one of the given qubits,
or a 2-qubit operation on both of the given qubits, and also the
operation has a known matrix, then a matrix is returned. Otherwise None
is returned.
Args:
op: The operation to understand.
qubits: The qubits we care about. Order determines matrix tensor
order.
Returns:
None, or else a matrix equivalent to the effect of the operation.
"""
q1, q2 = qubits
matrix = protocols.unitary(op, None)
if matrix is None:
return None
assert op is not None
if op.qubits == qubits:
return matrix
if op.qubits == (q2, q1):
return MergeInteractions._flip_kron_order(matrix)
if op.qubits == (q1,):
return np.kron(matrix, np.eye(2))
if op.qubits == (q2,):
return np.kron(np.eye(2), matrix)
return None | ['def', '_op_to_matrix', '(', 'self', ',', 'op', ':', 'Optional', '[', 'ops', '.', 'Operation', ']', ',', 'qubits', ':', 'Tuple', '[', 'ops', '.', 'Qid', ',', '...', ']', ')', '->', 'Optional', '[', 'np', '.', 'ndarray', ']', ':', 'q1', ',', 'q2', '=', 'qubits', 'matrix', '=', 'protocols', '.', 'unitary', '(', 'op', ',', 'None', ')', 'if', 'matrix', 'is', 'None', ':', 'return', 'None', 'assert', 'op', 'is', 'not', 'None', 'if', 'op', '.', 'qubits', '==', 'qubits', ':', 'return', 'matrix', 'if', 'op', '.', 'qubits', '==', '(', 'q2', ',', 'q1', ')', ':', 'return', 'MergeInteractions', '.', '_flip_kron_order', '(', 'matrix', ')', 'if', 'op', '.', 'qubits', '==', '(', 'q1', ',', ')', ':', 'return', 'np', '.', 'kron', '(', 'matrix', ',', 'np', '.', 'eye', '(', '2', ')', ')', 'if', 'op', '.', 'qubits', '==', '(', 'q2', ',', ')', ':', 'return', 'np', '.', 'kron', '(', 'np', '.', 'eye', '(', '2', ')', ',', 'matrix', ')', 'return', 'None'] | Determines the effect of an operation on the given qubits.
If the operation is a 1-qubit operation on one of the given qubits,
or a 2-qubit operation on both of the given qubits, and also the
operation has a known matrix, then a matrix is returned. Otherwise None
is returned.
Args:
op: The operation to understand.
qubits: The qubits we care about. Order determines matrix tensor
order.
Returns:
None, or else a matrix equivalent to the effect of the operation. | ['Determines', 'the', 'effect', 'of', 'an', 'operation', 'on', 'the', 'given', 'qubits', '.'] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/optimizers/merge_interactions.py#L90-L125 |
6,496 | csparpa/pyowm | pyowm/weatherapi25/owm25.py | OWM25.weather_history_at_place | def weather_history_at_place(self, name, start=None, end=None):
"""
Queries the OWM Weather API for weather history for the specified location
(eg: "London,uk"). A list of *Weather* objects is returned. It is
possible to query for weather history in a closed time period, whose
boundaries can be passed as optional parameters.
:param name: the location's toponym
:type name: str or unicode
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
params = {'q': encoded_name, 'lang': self._language}
if start is None and end is None:
pass
elif start is not None and end is not None:
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
"while the other is not!")
uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['weather_history'].parse_JSON(json_data) | python | def weather_history_at_place(self, name, start=None, end=None):
"""
Queries the OWM Weather API for weather history for the specified location
(eg: "London,uk"). A list of *Weather* objects is returned. It is
possible to query for weather history in a closed time period, whose
boundaries can be passed as optional parameters.
:param name: the location's toponym
:type name: str or unicode
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
params = {'q': encoded_name, 'lang': self._language}
if start is None and end is None:
pass
elif start is not None and end is not None:
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
"while the other is not!")
uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['weather_history'].parse_JSON(json_data) | ['def', 'weather_history_at_place', '(', 'self', ',', 'name', ',', 'start', '=', 'None', ',', 'end', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'name', ',', 'str', ')', ',', '"Value must be a string"', 'encoded_name', '=', 'name', 'params', '=', '{', "'q'", ':', 'encoded_name', ',', "'lang'", ':', 'self', '.', '_language', '}', 'if', 'start', 'is', 'None', 'and', 'end', 'is', 'None', ':', 'pass', 'elif', 'start', 'is', 'not', 'None', 'and', 'end', 'is', 'not', 'None', ':', 'unix_start', '=', 'timeformatutils', '.', 'to_UNIXtime', '(', 'start', ')', 'unix_end', '=', 'timeformatutils', '.', 'to_UNIXtime', '(', 'end', ')', 'if', 'unix_start', '>=', 'unix_end', ':', 'raise', 'ValueError', '(', '"Error: the start time boundary must "', '"precede the end time!"', ')', 'current_time', '=', 'time', '(', ')', 'if', 'unix_start', '>', 'current_time', ':', 'raise', 'ValueError', '(', '"Error: the start time boundary must "', '"precede the current time!"', ')', 'params', '[', "'start'", ']', '=', 'str', '(', 'unix_start', ')', 'params', '[', "'end'", ']', '=', 'str', '(', 'unix_end', ')', 'else', ':', 'raise', 'ValueError', '(', '"Error: one of the time boundaries is None, "', '"while the other is not!"', ')', 'uri', '=', 'http_client', '.', 'HttpClient', '.', 'to_url', '(', 'CITY_WEATHER_HISTORY_URL', ',', 'self', '.', '_API_key', ',', 'self', '.', '_subscription_type', ',', 'self', '.', '_use_ssl', ')', '_', ',', 'json_data', '=', 'self', '.', '_wapi', '.', 'cacheable_get_json', '(', 'uri', ',', 'params', '=', 'params', ')', 'return', 'self', '.', '_parsers', '[', "'weather_history'", ']', '.', 'parse_JSON', '(', 'json_data', ')'] | Queries the OWM Weather API for weather history for the specified location
(eg: "London,uk"). A list of *Weather* objects is returned. It is
possible to query for weather history in a closed time period, whose
boundaries can be passed as optional parameters.
:param name: the location's toponym
:type name: str or unicode
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time | ['Queries', 'the', 'OWM', 'Weather', 'API', 'for', 'weather', 'history', 'for', 'the', 'specified', 'location', '(', 'eg', ':', 'London', 'uk', ')', '.', 'A', 'list', 'of', '*', 'Weather', '*', 'objects', 'is', 'returned', '.', 'It', 'is', 'possible', 'to', 'query', 'for', 'weather', 'history', 'in', 'a', 'closed', 'time', 'period', 'whose', 'boundaries', 'can', 'be', 'passed', 'as', 'optional', 'parameters', '.'] | train | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L769-L820 |
6,497 | senaite/senaite.core | bika/lims/browser/widgets/reflexrulewidget.py | ReflexRuleWidget._get_sorted_actions_list | def _get_sorted_actions_list(self, raw_set):
"""
This returns a list of dictionaries with the actions got in the
raw_set.
:raw_set: is the dict representing a set of rules and conditions.
"""
keys_list = raw_set.keys()
# actions_dicts_l is the final list which will contain the the
# dictionaries with the actions.
# The dictionaries will be sorted by the index obtained in the
# template.
actions_dicts_l = []
# a_count is a counter which is incremented every time a new action is
# added to the list, so we can give it a index.
a_count = 0
# actions_list will contain the keys starting with 'action-' but sorted
# by their index
actions_list = self._get_sorted_action_keys(keys_list)
for key in actions_list:
# Getting the key for otherWS element
otherWS_key = 'otherWS-'+str(a_count)
# Getting the value for otherWS selector
otherWS = raw_set.get(otherWS_key, '')
# Getting the key for analyst element
analyst_key = 'analyst-'+str(a_count)
# Getting the value for analyst
analyst = raw_set.get(analyst_key, '')
# Getting which analysis should has its result set
setresulton_key = 'setresulton-'+str(a_count)
setresulton = raw_set.get(setresulton_key, '')
# Getting the discrete result to set
setresultdiscrete_key = 'setresultdiscrete-'+str(a_count)
setresultdiscrete = raw_set.get(setresultdiscrete_key, '')
# Getting the numeric result to set
setresultvalue_key = 'setresultvalue-'+str(a_count)
setresultvalue = raw_set.get(setresultvalue_key, '')
# Getting the local analysis id
local_id_key = 'an_result_id-'+str(a_count)
local_id = raw_set.get(local_id_key, '')
# Getting the local analysis id
worksheettemplate_key = 'worksheettemplate-'+str(a_count)
worksheettemplate = raw_set.get(worksheettemplate_key, '')
# Getting the visibility in report
showinreport_key = 'showinreport-'+str(a_count)
showinreport = raw_set.get(showinreport_key, '')
# Getting the analysis to show or hide in report
setvisibilityof_key = 'setvisibilityof-'+str(a_count)
setvisibilityof = raw_set.get(setvisibilityof_key, '')
# Building the action dict
action_dict = {
'action': raw_set[key],
'act_row_idx': a_count,
'otherWS': otherWS,
'worksheettemplate': worksheettemplate,
'analyst': analyst,
'setresulton': setresulton,
'setresultdiscrete': setresultdiscrete,
'setresultvalue': setresultvalue,
'an_result_id': local_id,
'showinreport': showinreport,
'setvisibilityof': setvisibilityof,
}
# Saves the action as a new dict inside the actions list
actions_dicts_l.append(action_dict)
a_count += 1
return actions_dicts_l | python | def _get_sorted_actions_list(self, raw_set):
"""
This returns a list of dictionaries with the actions got in the
raw_set.
:raw_set: is the dict representing a set of rules and conditions.
"""
keys_list = raw_set.keys()
# actions_dicts_l is the final list which will contain the the
# dictionaries with the actions.
# The dictionaries will be sorted by the index obtained in the
# template.
actions_dicts_l = []
# a_count is a counter which is incremented every time a new action is
# added to the list, so we can give it a index.
a_count = 0
# actions_list will contain the keys starting with 'action-' but sorted
# by their index
actions_list = self._get_sorted_action_keys(keys_list)
for key in actions_list:
# Getting the key for otherWS element
otherWS_key = 'otherWS-'+str(a_count)
# Getting the value for otherWS selector
otherWS = raw_set.get(otherWS_key, '')
# Getting the key for analyst element
analyst_key = 'analyst-'+str(a_count)
# Getting the value for analyst
analyst = raw_set.get(analyst_key, '')
# Getting which analysis should has its result set
setresulton_key = 'setresulton-'+str(a_count)
setresulton = raw_set.get(setresulton_key, '')
# Getting the discrete result to set
setresultdiscrete_key = 'setresultdiscrete-'+str(a_count)
setresultdiscrete = raw_set.get(setresultdiscrete_key, '')
# Getting the numeric result to set
setresultvalue_key = 'setresultvalue-'+str(a_count)
setresultvalue = raw_set.get(setresultvalue_key, '')
# Getting the local analysis id
local_id_key = 'an_result_id-'+str(a_count)
local_id = raw_set.get(local_id_key, '')
# Getting the local analysis id
worksheettemplate_key = 'worksheettemplate-'+str(a_count)
worksheettemplate = raw_set.get(worksheettemplate_key, '')
# Getting the visibility in report
showinreport_key = 'showinreport-'+str(a_count)
showinreport = raw_set.get(showinreport_key, '')
# Getting the analysis to show or hide in report
setvisibilityof_key = 'setvisibilityof-'+str(a_count)
setvisibilityof = raw_set.get(setvisibilityof_key, '')
# Building the action dict
action_dict = {
'action': raw_set[key],
'act_row_idx': a_count,
'otherWS': otherWS,
'worksheettemplate': worksheettemplate,
'analyst': analyst,
'setresulton': setresulton,
'setresultdiscrete': setresultdiscrete,
'setresultvalue': setresultvalue,
'an_result_id': local_id,
'showinreport': showinreport,
'setvisibilityof': setvisibilityof,
}
# Saves the action as a new dict inside the actions list
actions_dicts_l.append(action_dict)
a_count += 1
return actions_dicts_l | ['def', '_get_sorted_actions_list', '(', 'self', ',', 'raw_set', ')', ':', 'keys_list', '=', 'raw_set', '.', 'keys', '(', ')', '# actions_dicts_l is the final list which will contain the the', '# dictionaries with the actions.', '# The dictionaries will be sorted by the index obtained in the', '# template.', 'actions_dicts_l', '=', '[', ']', '# a_count is a counter which is incremented every time a new action is', '# added to the list, so we can give it a index.', 'a_count', '=', '0', "# actions_list will contain the keys starting with 'action-' but sorted", '# by their index', 'actions_list', '=', 'self', '.', '_get_sorted_action_keys', '(', 'keys_list', ')', 'for', 'key', 'in', 'actions_list', ':', '# Getting the key for otherWS element', 'otherWS_key', '=', "'otherWS-'", '+', 'str', '(', 'a_count', ')', '# Getting the value for otherWS selector', 'otherWS', '=', 'raw_set', '.', 'get', '(', 'otherWS_key', ',', "''", ')', '# Getting the key for analyst element', 'analyst_key', '=', "'analyst-'", '+', 'str', '(', 'a_count', ')', '# Getting the value for analyst', 'analyst', '=', 'raw_set', '.', 'get', '(', 'analyst_key', ',', "''", ')', '# Getting which analysis should has its result set', 'setresulton_key', '=', "'setresulton-'", '+', 'str', '(', 'a_count', ')', 'setresulton', '=', 'raw_set', '.', 'get', '(', 'setresulton_key', ',', "''", ')', '# Getting the discrete result to set', 'setresultdiscrete_key', '=', "'setresultdiscrete-'", '+', 'str', '(', 'a_count', ')', 'setresultdiscrete', '=', 'raw_set', '.', 'get', '(', 'setresultdiscrete_key', ',', "''", ')', '# Getting the numeric result to set', 'setresultvalue_key', '=', "'setresultvalue-'", '+', 'str', '(', 'a_count', ')', 'setresultvalue', '=', 'raw_set', '.', 'get', '(', 'setresultvalue_key', ',', "''", ')', '# Getting the local analysis id', 'local_id_key', '=', "'an_result_id-'", '+', 'str', '(', 'a_count', ')', 'local_id', '=', 'raw_set', '.', 'get', '(', 'local_id_key', ',', "''", ')', '# Getting the local analysis id', 'worksheettemplate_key', '=', "'worksheettemplate-'", '+', 'str', '(', 'a_count', ')', 'worksheettemplate', '=', 'raw_set', '.', 'get', '(', 'worksheettemplate_key', ',', "''", ')', '# Getting the visibility in report', 'showinreport_key', '=', "'showinreport-'", '+', 'str', '(', 'a_count', ')', 'showinreport', '=', 'raw_set', '.', 'get', '(', 'showinreport_key', ',', "''", ')', '# Getting the analysis to show or hide in report', 'setvisibilityof_key', '=', "'setvisibilityof-'", '+', 'str', '(', 'a_count', ')', 'setvisibilityof', '=', 'raw_set', '.', 'get', '(', 'setvisibilityof_key', ',', "''", ')', '# Building the action dict', 'action_dict', '=', '{', "'action'", ':', 'raw_set', '[', 'key', ']', ',', "'act_row_idx'", ':', 'a_count', ',', "'otherWS'", ':', 'otherWS', ',', "'worksheettemplate'", ':', 'worksheettemplate', ',', "'analyst'", ':', 'analyst', ',', "'setresulton'", ':', 'setresulton', ',', "'setresultdiscrete'", ':', 'setresultdiscrete', ',', "'setresultvalue'", ':', 'setresultvalue', ',', "'an_result_id'", ':', 'local_id', ',', "'showinreport'", ':', 'showinreport', ',', "'setvisibilityof'", ':', 'setvisibilityof', ',', '}', '# Saves the action as a new dict inside the actions list', 'actions_dicts_l', '.', 'append', '(', 'action_dict', ')', 'a_count', '+=', '1', 'return', 'actions_dicts_l'] | This returns a list of dictionaries with the actions got in the
raw_set.
:raw_set: is the dict representing a set of rules and conditions. | ['This', 'returns', 'a', 'list', 'of', 'dictionaries', 'with', 'the', 'actions', 'got', 'in', 'the', 'raw_set', '.', ':', 'raw_set', ':', 'is', 'the', 'dict', 'representing', 'a', 'set', 'of', 'rules', 'and', 'conditions', '.'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/reflexrulewidget.py#L234-L299 |
6,498 | jaraco/jaraco.windows | jaraco/windows/dpapi.py | CryptProtectData | def CryptProtectData(
data, description=None, optional_entropy=None,
prompt_struct=None, flags=0,
):
"""
Encrypt data
"""
data_in = DATA_BLOB(data)
entropy = DATA_BLOB(optional_entropy) if optional_entropy else None
data_out = DATA_BLOB()
res = _CryptProtectData(
data_in,
description,
entropy,
None, # reserved
prompt_struct,
flags,
data_out,
)
handle_nonzero_success(res)
res = data_out.get_data()
data_out.free()
return res | python | def CryptProtectData(
data, description=None, optional_entropy=None,
prompt_struct=None, flags=0,
):
"""
Encrypt data
"""
data_in = DATA_BLOB(data)
entropy = DATA_BLOB(optional_entropy) if optional_entropy else None
data_out = DATA_BLOB()
res = _CryptProtectData(
data_in,
description,
entropy,
None, # reserved
prompt_struct,
flags,
data_out,
)
handle_nonzero_success(res)
res = data_out.get_data()
data_out.free()
return res | ['def', 'CryptProtectData', '(', 'data', ',', 'description', '=', 'None', ',', 'optional_entropy', '=', 'None', ',', 'prompt_struct', '=', 'None', ',', 'flags', '=', '0', ',', ')', ':', 'data_in', '=', 'DATA_BLOB', '(', 'data', ')', 'entropy', '=', 'DATA_BLOB', '(', 'optional_entropy', ')', 'if', 'optional_entropy', 'else', 'None', 'data_out', '=', 'DATA_BLOB', '(', ')', 'res', '=', '_CryptProtectData', '(', 'data_in', ',', 'description', ',', 'entropy', ',', 'None', ',', '# reserved', 'prompt_struct', ',', 'flags', ',', 'data_out', ',', ')', 'handle_nonzero_success', '(', 'res', ')', 'res', '=', 'data_out', '.', 'get_data', '(', ')', 'data_out', '.', 'free', '(', ')', 'return', 'res'] | Encrypt data | ['Encrypt', 'data'] | train | https://github.com/jaraco/jaraco.windows/blob/51811efed50b46ad08daa25408a1cc806bc8d519/jaraco/windows/dpapi.py#L103-L126 |
6,499 | Alignak-monitoring/alignak | alignak/external_command.py | ExternalCommandManager.disable_host_flap_detection | def disable_host_flap_detection(self, host):
"""Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.send_an_element(host.get_update_status_brok()) | python | def disable_host_flap_detection(self, host):
"""Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.send_an_element(host.get_update_status_brok()) | ['def', 'disable_host_flap_detection', '(', 'self', ',', 'host', ')', ':', 'if', 'host', '.', 'flap_detection_enabled', ':', 'host', '.', 'modified_attributes', '|=', 'DICT_MODATTR', '[', '"MODATTR_FLAP_DETECTION_ENABLED"', ']', '.', 'value', 'host', '.', 'flap_detection_enabled', '=', 'False', '# Maybe the host was flapping, if so, stop flapping', 'if', 'host', '.', 'is_flapping', ':', 'host', '.', 'is_flapping', '=', 'False', 'host', '.', 'flapping_changes', '=', '[', ']', 'self', '.', 'send_an_element', '(', 'host', '.', 'get_update_status_brok', '(', ')', ')'] | Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None | ['Disable', 'flap', 'detection', 'for', 'a', 'host', 'Format', 'of', 'the', 'line', 'that', 'triggers', 'function', 'call', '::'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2146-L2163 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.