Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,500 | mikicz/arca | arca/backend/current_environment.py | CurrentEnvironmentBackend.get_or_create_environment | def get_or_create_environment(self, repo: str, branch: str, git_repo: Repo, repo_path: Path) -> str:
""" Returns the path to the current Python executable.
"""
return sys.executable | python | def get_or_create_environment(self, repo: str, branch: str, git_repo: Repo, repo_path: Path) -> str:
""" Returns the path to the current Python executable.
"""
return sys.executable | ['def', 'get_or_create_environment', '(', 'self', ',', 'repo', ':', 'str', ',', 'branch', ':', 'str', ',', 'git_repo', ':', 'Repo', ',', 'repo_path', ':', 'Path', ')', '->', 'str', ':', 'return', 'sys', '.', 'executable'] | Returns the path to the current Python executable. | ['Returns', 'the', 'path', 'to', 'the', 'current', 'Python', 'executable', '.'] | train | https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/current_environment.py#L15-L18 |
4,501 | ktbyers/netmiko | netmiko/flexvnf/flexvnf_ssh.py | FlexvnfSSH.commit | def commit(
self,
confirm=False,
confirm_delay=None,
check=False,
comment="",
and_quit=False,
delay_factor=1,
):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
"""
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError(
"Invalid arguments supplied to commit method both confirm and check"
)
# Select proper command string based on arguments provided
command_string = "commit"
commit_marker = "Commit complete."
if check:
command_string = "commit check"
commit_marker = "Validation complete"
elif confirm:
if confirm_delay:
command_string = "commit confirmed " + str(confirm_delay)
else:
command_string = "commit confirmed"
commit_marker = "commit confirmed will be automatically rolled back in"
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
command_string += " comment " + comment
if and_quit:
command_string += " and-quit"
# Enter config mode (if necessary)
output = self.config_mode()
# and_quit will get out of config mode on commit
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(
command_string,
expect_string=prompt,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
else:
output += self.send_command_expect(
command_string,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
if commit_marker not in output:
raise ValueError(
"Commit failed with the following errors:\n\n{0}".format(output)
)
return output | python | def commit(
self,
confirm=False,
confirm_delay=None,
check=False,
comment="",
and_quit=False,
delay_factor=1,
):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
"""
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError(
"Invalid arguments supplied to commit method both confirm and check"
)
# Select proper command string based on arguments provided
command_string = "commit"
commit_marker = "Commit complete."
if check:
command_string = "commit check"
commit_marker = "Validation complete"
elif confirm:
if confirm_delay:
command_string = "commit confirmed " + str(confirm_delay)
else:
command_string = "commit confirmed"
commit_marker = "commit confirmed will be automatically rolled back in"
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
command_string += " comment " + comment
if and_quit:
command_string += " and-quit"
# Enter config mode (if necessary)
output = self.config_mode()
# and_quit will get out of config mode on commit
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(
command_string,
expect_string=prompt,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
else:
output += self.send_command_expect(
command_string,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
if commit_marker not in output:
raise ValueError(
"Commit failed with the following errors:\n\n{0}".format(output)
)
return output | ['def', 'commit', '(', 'self', ',', 'confirm', '=', 'False', ',', 'confirm_delay', '=', 'None', ',', 'check', '=', 'False', ',', 'comment', '=', '""', ',', 'and_quit', '=', 'False', ',', 'delay_factor', '=', '1', ',', ')', ':', 'delay_factor', '=', 'self', '.', 'select_delay_factor', '(', 'delay_factor', ')', 'if', 'check', 'and', '(', 'confirm', 'or', 'confirm_delay', 'or', 'comment', ')', ':', 'raise', 'ValueError', '(', '"Invalid arguments supplied with commit check"', ')', 'if', 'confirm_delay', 'and', 'not', 'confirm', ':', 'raise', 'ValueError', '(', '"Invalid arguments supplied to commit method both confirm and check"', ')', '# Select proper command string based on arguments provided', 'command_string', '=', '"commit"', 'commit_marker', '=', '"Commit complete."', 'if', 'check', ':', 'command_string', '=', '"commit check"', 'commit_marker', '=', '"Validation complete"', 'elif', 'confirm', ':', 'if', 'confirm_delay', ':', 'command_string', '=', '"commit confirmed "', '+', 'str', '(', 'confirm_delay', ')', 'else', ':', 'command_string', '=', '"commit confirmed"', 'commit_marker', '=', '"commit confirmed will be automatically rolled back in"', '# wrap the comment in quotes', 'if', 'comment', ':', 'if', '\'"\'', 'in', 'comment', ':', 'raise', 'ValueError', '(', '"Invalid comment contains double quote"', ')', 'comment', '=', '\'"{0}"\'', '.', 'format', '(', 'comment', ')', 'command_string', '+=', '" comment "', '+', 'comment', 'if', 'and_quit', ':', 'command_string', '+=', '" and-quit"', '# Enter config mode (if necessary)', 'output', '=', 'self', '.', 'config_mode', '(', ')', '# and_quit will get out of config mode on commit', 'if', 'and_quit', ':', 'prompt', '=', 'self', '.', 'base_prompt', 'output', '+=', 'self', '.', 'send_command_expect', '(', 'command_string', ',', 'expect_string', '=', 'prompt', ',', 'strip_prompt', '=', 'True', ',', 'strip_command', '=', 'True', ',', 'delay_factor', '=', 'delay_factor', ',', ')', 'else', ':', 'output', '+=', 'self', '.', 'send_command_expect', '(', 'command_string', ',', 'strip_prompt', '=', 'True', ',', 'strip_command', '=', 'True', ',', 'delay_factor', '=', 'delay_factor', ',', ')', 'if', 'commit_marker', 'not', 'in', 'output', ':', 'raise', 'ValueError', '(', '"Commit failed with the following errors:\\n\\n{0}"', '.', 'format', '(', 'output', ')', ')', 'return', 'output'] | Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check | ['Commit', 'the', 'candidate', 'configuration', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/flexvnf/flexvnf_ssh.py#L82-L171 |
4,502 | marshmallow-code/marshmallow | src/marshmallow/schema.py | BaseSchema._do_load | def _do_load(
self, data, many=None, partial=None, unknown=None,
postprocess=True,
):
"""Deserialize `data`, returning the deserialized result.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to validate required fields. If its value is an iterable,
only fields listed in that iterable will be ignored will be allowed missing.
If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A dict of deserialized data
:rtype: dict
"""
error_store = ErrorStore()
errors = {}
many = self.many if many is None else bool(many)
unknown = unknown or self.unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._has_processors(PRE_LOAD):
try:
processed_data = self._invoke_load_processors(
PRE_LOAD,
data,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
result = None
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
self.fields,
error_store,
many=many,
partial=partial,
unknown=unknown,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
# Run field-level validation
self._invoke_field_validators(error_store, data=result, many=many)
# Run schema-level validation
if self._has_processors(VALIDATES_SCHEMA):
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store,
pass_many=True,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store,
pass_many=False,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._has_processors(POST_LOAD):
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(
errors,
data=data,
valid_data=result,
)
self.handle_error(exc, data)
raise exc
return result | python | def _do_load(
self, data, many=None, partial=None, unknown=None,
postprocess=True,
):
"""Deserialize `data`, returning the deserialized result.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to validate required fields. If its value is an iterable,
only fields listed in that iterable will be ignored will be allowed missing.
If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A dict of deserialized data
:rtype: dict
"""
error_store = ErrorStore()
errors = {}
many = self.many if many is None else bool(many)
unknown = unknown or self.unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._has_processors(PRE_LOAD):
try:
processed_data = self._invoke_load_processors(
PRE_LOAD,
data,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
result = None
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
self.fields,
error_store,
many=many,
partial=partial,
unknown=unknown,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
# Run field-level validation
self._invoke_field_validators(error_store, data=result, many=many)
# Run schema-level validation
if self._has_processors(VALIDATES_SCHEMA):
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store,
pass_many=True,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store,
pass_many=False,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._has_processors(POST_LOAD):
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(
errors,
data=data,
valid_data=result,
)
self.handle_error(exc, data)
raise exc
return result | ['def', '_do_load', '(', 'self', ',', 'data', ',', 'many', '=', 'None', ',', 'partial', '=', 'None', ',', 'unknown', '=', 'None', ',', 'postprocess', '=', 'True', ',', ')', ':', 'error_store', '=', 'ErrorStore', '(', ')', 'errors', '=', '{', '}', 'many', '=', 'self', '.', 'many', 'if', 'many', 'is', 'None', 'else', 'bool', '(', 'many', ')', 'unknown', '=', 'unknown', 'or', 'self', '.', 'unknown', 'if', 'partial', 'is', 'None', ':', 'partial', '=', 'self', '.', 'partial', '# Run preprocessors', 'if', 'self', '.', '_has_processors', '(', 'PRE_LOAD', ')', ':', 'try', ':', 'processed_data', '=', 'self', '.', '_invoke_load_processors', '(', 'PRE_LOAD', ',', 'data', ',', 'many', ',', 'original_data', '=', 'data', ',', ')', 'except', 'ValidationError', 'as', 'err', ':', 'errors', '=', 'err', '.', 'normalized_messages', '(', ')', 'result', '=', 'None', 'else', ':', 'processed_data', '=', 'data', 'if', 'not', 'errors', ':', '# Deserialize data', 'result', '=', 'self', '.', '_deserialize', '(', 'processed_data', ',', 'self', '.', 'fields', ',', 'error_store', ',', 'many', '=', 'many', ',', 'partial', '=', 'partial', ',', 'unknown', '=', 'unknown', ',', 'dict_class', '=', 'self', '.', 'dict_class', ',', 'index_errors', '=', 'self', '.', 'opts', '.', 'index_errors', ',', ')', '# Run field-level validation', 'self', '.', '_invoke_field_validators', '(', 'error_store', ',', 'data', '=', 'result', ',', 'many', '=', 'many', ')', '# Run schema-level validation', 'if', 'self', '.', '_has_processors', '(', 'VALIDATES_SCHEMA', ')', ':', 'field_errors', '=', 'bool', '(', 'error_store', '.', 'errors', ')', 'self', '.', '_invoke_schema_validators', '(', 'error_store', ',', 'pass_many', '=', 'True', ',', 'data', '=', 'result', ',', 'original_data', '=', 'data', ',', 'many', '=', 'many', ',', 'field_errors', '=', 'field_errors', ',', ')', 'self', '.', '_invoke_schema_validators', '(', 'error_store', ',', 'pass_many', '=', 'False', ',', 'data', '=', 'result', ',', 'original_data', '=', 'data', ',', 'many', '=', 'many', ',', 'field_errors', '=', 'field_errors', ',', ')', 'errors', '=', 'error_store', '.', 'errors', '# Run post processors', 'if', 'not', 'errors', 'and', 'postprocess', 'and', 'self', '.', '_has_processors', '(', 'POST_LOAD', ')', ':', 'try', ':', 'result', '=', 'self', '.', '_invoke_load_processors', '(', 'POST_LOAD', ',', 'result', ',', 'many', ',', 'original_data', '=', 'data', ',', ')', 'except', 'ValidationError', 'as', 'err', ':', 'errors', '=', 'err', '.', 'normalized_messages', '(', ')', 'if', 'errors', ':', 'exc', '=', 'ValidationError', '(', 'errors', ',', 'data', '=', 'data', ',', 'valid_data', '=', 'result', ',', ')', 'self', '.', 'handle_error', '(', 'exc', ',', 'data', ')', 'raise', 'exc', 'return', 'result'] | Deserialize `data`, returning the deserialized result.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to validate required fields. If its value is an iterable,
only fields listed in that iterable will be ignored will be allowed missing.
If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A dict of deserialized data
:rtype: dict | ['Deserialize', 'data', 'returning', 'the', 'deserialized', 'result', '.'] | train | https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/schema.py#L776-L870 |
4,503 | CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList._where | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | python | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | ['def', '_where', '(', 'self', ',', 'filter_fn', ')', ':', 'assert', 'callable', '(', 'filter_fn', ')', ',', "'filter_fn needs to be callable'", 'return', 'VList', '(', 'i', 'for', 'i', 'in', 'self', 'if', 'filter_fn', '(', 'i', '(', ')', ')', ')'] | use this to filter VLists, simply provide a filter function to filter the current found objects | ['use', 'this', 'to', 'filter', 'VLists', 'simply', 'provide', 'a', 'filter', 'function', 'to', 'filter', 'the', 'current', 'found', 'objects'] | train | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L355-L358 |
4,504 | Microsoft/LightGBM | python-package/lightgbm/basic.py | Booster.dump_model | def dump_model(self, num_iteration=None, start_iteration=0):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode())
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret | python | def dump_model(self, num_iteration=None, start_iteration=0):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode())
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret | ['def', 'dump_model', '(', 'self', ',', 'num_iteration', '=', 'None', ',', 'start_iteration', '=', '0', ')', ':', 'if', 'num_iteration', 'is', 'None', ':', 'num_iteration', '=', 'self', '.', 'best_iteration', 'buffer_len', '=', '1', '<<', '20', 'tmp_out_len', '=', 'ctypes', '.', 'c_int64', '(', '0', ')', 'string_buffer', '=', 'ctypes', '.', 'create_string_buffer', '(', 'buffer_len', ')', 'ptr_string_buffer', '=', 'ctypes', '.', 'c_char_p', '(', '*', '[', 'ctypes', '.', 'addressof', '(', 'string_buffer', ')', ']', ')', '_safe_call', '(', '_LIB', '.', 'LGBM_BoosterDumpModel', '(', 'self', '.', 'handle', ',', 'ctypes', '.', 'c_int', '(', 'start_iteration', ')', ',', 'ctypes', '.', 'c_int', '(', 'num_iteration', ')', ',', 'ctypes', '.', 'c_int64', '(', 'buffer_len', ')', ',', 'ctypes', '.', 'byref', '(', 'tmp_out_len', ')', ',', 'ptr_string_buffer', ')', ')', 'actual_len', '=', 'tmp_out_len', '.', 'value', '# if buffer length is not long enough, reallocate a buffer', 'if', 'actual_len', '>', 'buffer_len', ':', 'string_buffer', '=', 'ctypes', '.', 'create_string_buffer', '(', 'actual_len', ')', 'ptr_string_buffer', '=', 'ctypes', '.', 'c_char_p', '(', '*', '[', 'ctypes', '.', 'addressof', '(', 'string_buffer', ')', ']', ')', '_safe_call', '(', '_LIB', '.', 'LGBM_BoosterDumpModel', '(', 'self', '.', 'handle', ',', 'ctypes', '.', 'c_int', '(', 'start_iteration', ')', ',', 'ctypes', '.', 'c_int', '(', 'num_iteration', ')', ',', 'ctypes', '.', 'c_int64', '(', 'actual_len', ')', ',', 'ctypes', '.', 'byref', '(', 'tmp_out_len', ')', ',', 'ptr_string_buffer', ')', ')', 'ret', '=', 'json', '.', 'loads', '(', 'string_buffer', '.', 'value', '.', 'decode', '(', ')', ')', 'ret', '[', "'pandas_categorical'", ']', '=', 'json', '.', 'loads', '(', 'json', '.', 'dumps', '(', 'self', '.', 'pandas_categorical', ',', 'default', '=', 'json_default_with_numpy', ')', ')', 'return', 'ret'] | Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
Returns
-------
json_repr : dict
JSON format of Booster. | ['Dump', 'Booster', 'to', 'JSON', 'format', '.'] | train | https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L2194-L2239 |
4,505 | PatrikValkovic/grammpy | grammpy/transforms/Manipulations.py | Manipulations.replace | def replace(oldEl, newEl):
# type: (Union[Rule, _RuleConnectable], Union[Rule, _RuleConnectable]) -> Union[Rule, _RuleConnectable]
"""
Replace element in the parsed tree. Can be nonterminal, terminal or rule.
:param oldEl: Element already in the tree.
:param newEl: Element to replace with.
:return: New element attached to the tree.
"""
if isinstance(oldEl, Rule):
return Manipulations.replaceRule(oldEl, newEl)
if isinstance(oldEl, (Nonterminal, Terminal)):
return Manipulations.replaceNode(oldEl, newEl) | python | def replace(oldEl, newEl):
# type: (Union[Rule, _RuleConnectable], Union[Rule, _RuleConnectable]) -> Union[Rule, _RuleConnectable]
"""
Replace element in the parsed tree. Can be nonterminal, terminal or rule.
:param oldEl: Element already in the tree.
:param newEl: Element to replace with.
:return: New element attached to the tree.
"""
if isinstance(oldEl, Rule):
return Manipulations.replaceRule(oldEl, newEl)
if isinstance(oldEl, (Nonterminal, Terminal)):
return Manipulations.replaceNode(oldEl, newEl) | ['def', 'replace', '(', 'oldEl', ',', 'newEl', ')', ':', '# type: (Union[Rule, _RuleConnectable], Union[Rule, _RuleConnectable]) -> Union[Rule, _RuleConnectable]', 'if', 'isinstance', '(', 'oldEl', ',', 'Rule', ')', ':', 'return', 'Manipulations', '.', 'replaceRule', '(', 'oldEl', ',', 'newEl', ')', 'if', 'isinstance', '(', 'oldEl', ',', '(', 'Nonterminal', ',', 'Terminal', ')', ')', ':', 'return', 'Manipulations', '.', 'replaceNode', '(', 'oldEl', ',', 'newEl', ')'] | Replace element in the parsed tree. Can be nonterminal, terminal or rule.
:param oldEl: Element already in the tree.
:param newEl: Element to replace with.
:return: New element attached to the tree. | ['Replace', 'element', 'in', 'the', 'parsed', 'tree', '.', 'Can', 'be', 'nonterminal', 'terminal', 'or', 'rule', '.', ':', 'param', 'oldEl', ':', 'Element', 'already', 'in', 'the', 'tree', '.', ':', 'param', 'newEl', ':', 'Element', 'to', 'replace', 'with', '.', ':', 'return', ':', 'New', 'element', 'attached', 'to', 'the', 'tree', '.'] | train | https://github.com/PatrikValkovic/grammpy/blob/879ce0ef794ac2823acc19314fcd7a8aba53e50f/grammpy/transforms/Manipulations.py#L58-L69 |
4,506 | nicferrier/md | src/mdlib/api.py | MdMessage._set_flag | def _set_flag(self, flag):
"""Turns the specified flag on"""
self.folder._invalidate_cache()
# TODO::: turn the flag off when it's already on
def replacer(m):
return "%s/%s.%s%s" % (
joinpath(self.folder.base, self.folder.folder, "cur"),
m.group("key"),
m.group("hostname"),
":2,%s" % (
"%s%s" % (m.group("flags"), flag) if m.group("flags") \
else flag
)
)
newfilename = self.msgpathre.sub(replacer, self.filename)
self.filesystem.rename(self.filename, newfilename)
self.filename = newfilename | python | def _set_flag(self, flag):
"""Turns the specified flag on"""
self.folder._invalidate_cache()
# TODO::: turn the flag off when it's already on
def replacer(m):
return "%s/%s.%s%s" % (
joinpath(self.folder.base, self.folder.folder, "cur"),
m.group("key"),
m.group("hostname"),
":2,%s" % (
"%s%s" % (m.group("flags"), flag) if m.group("flags") \
else flag
)
)
newfilename = self.msgpathre.sub(replacer, self.filename)
self.filesystem.rename(self.filename, newfilename)
self.filename = newfilename | ['def', '_set_flag', '(', 'self', ',', 'flag', ')', ':', 'self', '.', 'folder', '.', '_invalidate_cache', '(', ')', "# TODO::: turn the flag off when it's already on", 'def', 'replacer', '(', 'm', ')', ':', 'return', '"%s/%s.%s%s"', '%', '(', 'joinpath', '(', 'self', '.', 'folder', '.', 'base', ',', 'self', '.', 'folder', '.', 'folder', ',', '"cur"', ')', ',', 'm', '.', 'group', '(', '"key"', ')', ',', 'm', '.', 'group', '(', '"hostname"', ')', ',', '":2,%s"', '%', '(', '"%s%s"', '%', '(', 'm', '.', 'group', '(', '"flags"', ')', ',', 'flag', ')', 'if', 'm', '.', 'group', '(', '"flags"', ')', 'else', 'flag', ')', ')', 'newfilename', '=', 'self', '.', 'msgpathre', '.', 'sub', '(', 'replacer', ',', 'self', '.', 'filename', ')', 'self', '.', 'filesystem', '.', 'rename', '(', 'self', '.', 'filename', ',', 'newfilename', ')', 'self', '.', 'filename', '=', 'newfilename'] | Turns the specified flag on | ['Turns', 'the', 'specified', 'flag', 'on'] | train | https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/api.py#L143-L159 |
4,507 | ssalentin/plip | plip/modules/supplemental.py | nucleotide_linkage | def nucleotide_linkage(residues):
"""Support for DNA/RNA ligands by finding missing covalent linkages to stitch DNA/RNA together."""
nuc_covalent = []
#######################################
# Basic support for RNA/DNA as ligand #
#######################################
nucleotides = ['A', 'C', 'T', 'G', 'U', 'DA', 'DC', 'DT', 'DG', 'DU']
dna_rna = {} # Dictionary of DNA/RNA residues by chain
covlinkage = namedtuple("covlinkage", "id1 chain1 pos1 conf1 id2 chain2 pos2 conf2")
# Create missing covlinkage entries for DNA/RNA
for ligand in residues:
resname, chain, pos = ligand
if resname in nucleotides:
if chain not in dna_rna:
dna_rna[chain] = [(resname, pos), ]
else:
dna_rna[chain].append((resname, pos))
for chain in dna_rna:
nuc_list = dna_rna[chain]
for i, nucleotide in enumerate(nuc_list):
if not i == len(nuc_list) - 1:
name, pos = nucleotide
nextnucleotide = nuc_list[i + 1]
nextname, nextpos = nextnucleotide
newlink = covlinkage(id1=name, chain1=chain, pos1=pos, conf1='',
id2=nextname, chain2=chain, pos2=nextpos, conf2='')
nuc_covalent.append(newlink)
return nuc_covalent | python | def nucleotide_linkage(residues):
"""Support for DNA/RNA ligands by finding missing covalent linkages to stitch DNA/RNA together."""
nuc_covalent = []
#######################################
# Basic support for RNA/DNA as ligand #
#######################################
nucleotides = ['A', 'C', 'T', 'G', 'U', 'DA', 'DC', 'DT', 'DG', 'DU']
dna_rna = {} # Dictionary of DNA/RNA residues by chain
covlinkage = namedtuple("covlinkage", "id1 chain1 pos1 conf1 id2 chain2 pos2 conf2")
# Create missing covlinkage entries for DNA/RNA
for ligand in residues:
resname, chain, pos = ligand
if resname in nucleotides:
if chain not in dna_rna:
dna_rna[chain] = [(resname, pos), ]
else:
dna_rna[chain].append((resname, pos))
for chain in dna_rna:
nuc_list = dna_rna[chain]
for i, nucleotide in enumerate(nuc_list):
if not i == len(nuc_list) - 1:
name, pos = nucleotide
nextnucleotide = nuc_list[i + 1]
nextname, nextpos = nextnucleotide
newlink = covlinkage(id1=name, chain1=chain, pos1=pos, conf1='',
id2=nextname, chain2=chain, pos2=nextpos, conf2='')
nuc_covalent.append(newlink)
return nuc_covalent | ['def', 'nucleotide_linkage', '(', 'residues', ')', ':', 'nuc_covalent', '=', '[', ']', '#######################################', '# Basic support for RNA/DNA as ligand #', '#######################################', 'nucleotides', '=', '[', "'A'", ',', "'C'", ',', "'T'", ',', "'G'", ',', "'U'", ',', "'DA'", ',', "'DC'", ',', "'DT'", ',', "'DG'", ',', "'DU'", ']', 'dna_rna', '=', '{', '}', '# Dictionary of DNA/RNA residues by chain', 'covlinkage', '=', 'namedtuple', '(', '"covlinkage"', ',', '"id1 chain1 pos1 conf1 id2 chain2 pos2 conf2"', ')', '# Create missing covlinkage entries for DNA/RNA', 'for', 'ligand', 'in', 'residues', ':', 'resname', ',', 'chain', ',', 'pos', '=', 'ligand', 'if', 'resname', 'in', 'nucleotides', ':', 'if', 'chain', 'not', 'in', 'dna_rna', ':', 'dna_rna', '[', 'chain', ']', '=', '[', '(', 'resname', ',', 'pos', ')', ',', ']', 'else', ':', 'dna_rna', '[', 'chain', ']', '.', 'append', '(', '(', 'resname', ',', 'pos', ')', ')', 'for', 'chain', 'in', 'dna_rna', ':', 'nuc_list', '=', 'dna_rna', '[', 'chain', ']', 'for', 'i', ',', 'nucleotide', 'in', 'enumerate', '(', 'nuc_list', ')', ':', 'if', 'not', 'i', '==', 'len', '(', 'nuc_list', ')', '-', '1', ':', 'name', ',', 'pos', '=', 'nucleotide', 'nextnucleotide', '=', 'nuc_list', '[', 'i', '+', '1', ']', 'nextname', ',', 'nextpos', '=', 'nextnucleotide', 'newlink', '=', 'covlinkage', '(', 'id1', '=', 'name', ',', 'chain1', '=', 'chain', ',', 'pos1', '=', 'pos', ',', 'conf1', '=', "''", ',', 'id2', '=', 'nextname', ',', 'chain2', '=', 'chain', ',', 'pos2', '=', 'nextpos', ',', 'conf2', '=', "''", ')', 'nuc_covalent', '.', 'append', '(', 'newlink', ')', 'return', 'nuc_covalent'] | Support for DNA/RNA ligands by finding missing covalent linkages to stitch DNA/RNA together. | ['Support', 'for', 'DNA', '/', 'RNA', 'ligands', 'by', 'finding', 'missing', 'covalent', 'linkages', 'to', 'stitch', 'DNA', '/', 'RNA', 'together', '.'] | train | https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L242-L271 |
4,508 | aio-libs/aioredis | aioredis/commands/streams.py | StreamCommandsMixin._xread | def _xread(self, streams, timeout=0, count=None, latest_ids=None):
"""Wraps up common functionality between ``xread()``
and ``xread_group()``
You should probably be using ``xread()`` or ``xread_group()`` directly.
"""
if latest_ids is None:
latest_ids = ['$'] * len(streams)
if len(streams) != len(latest_ids):
raise ValueError(
'The streams and latest_ids parameters must be of the '
'same length'
)
count_args = [b'COUNT', count] if count else []
if timeout is None:
block_args = []
elif not isinstance(timeout, int):
raise TypeError(
"timeout argument must be int, not {!r}".format(timeout))
else:
block_args = [b'BLOCK', timeout]
return block_args + count_args + [b'STREAMS'] + streams + latest_ids | python | def _xread(self, streams, timeout=0, count=None, latest_ids=None):
"""Wraps up common functionality between ``xread()``
and ``xread_group()``
You should probably be using ``xread()`` or ``xread_group()`` directly.
"""
if latest_ids is None:
latest_ids = ['$'] * len(streams)
if len(streams) != len(latest_ids):
raise ValueError(
'The streams and latest_ids parameters must be of the '
'same length'
)
count_args = [b'COUNT', count] if count else []
if timeout is None:
block_args = []
elif not isinstance(timeout, int):
raise TypeError(
"timeout argument must be int, not {!r}".format(timeout))
else:
block_args = [b'BLOCK', timeout]
return block_args + count_args + [b'STREAMS'] + streams + latest_ids | ['def', '_xread', '(', 'self', ',', 'streams', ',', 'timeout', '=', '0', ',', 'count', '=', 'None', ',', 'latest_ids', '=', 'None', ')', ':', 'if', 'latest_ids', 'is', 'None', ':', 'latest_ids', '=', '[', "'$'", ']', '*', 'len', '(', 'streams', ')', 'if', 'len', '(', 'streams', ')', '!=', 'len', '(', 'latest_ids', ')', ':', 'raise', 'ValueError', '(', "'The streams and latest_ids parameters must be of the '", "'same length'", ')', 'count_args', '=', '[', "b'COUNT'", ',', 'count', ']', 'if', 'count', 'else', '[', ']', 'if', 'timeout', 'is', 'None', ':', 'block_args', '=', '[', ']', 'elif', 'not', 'isinstance', '(', 'timeout', ',', 'int', ')', ':', 'raise', 'TypeError', '(', '"timeout argument must be int, not {!r}"', '.', 'format', '(', 'timeout', ')', ')', 'else', ':', 'block_args', '=', '[', "b'BLOCK'", ',', 'timeout', ']', 'return', 'block_args', '+', 'count_args', '+', '[', "b'STREAMS'", ']', '+', 'streams', '+', 'latest_ids'] | Wraps up common functionality between ``xread()``
and ``xread_group()``
You should probably be using ``xread()`` or ``xread_group()`` directly. | ['Wraps', 'up', 'common', 'functionality', 'between', 'xread', '()', 'and', 'xread_group', '()'] | train | https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/streams.py#L234-L256 |
4,509 | CityOfZion/neo-python-core | neocore/Cryptography/ECCurve.py | EllipticCurve.decompress_from_curve | def decompress_from_curve(self, x, flag):
"""
calculate the y coordinate given only the x value.
there are 2 possible solutions, use 'flag' to select.
"""
cq = self.field.p
x = self.field.value(x)
ysquare = x ** 3 + self.a * x + self.b
ysquare_root = sqrtCQ(ysquare.value, cq)
bit0 = 0
if ysquare_root % 2 is not 0:
bit0 = 1
if bit0 != flag:
beta = (cq - ysquare_root) % cq
else:
beta = ysquare_root
return self.point(x, beta) | python | def decompress_from_curve(self, x, flag):
"""
calculate the y coordinate given only the x value.
there are 2 possible solutions, use 'flag' to select.
"""
cq = self.field.p
x = self.field.value(x)
ysquare = x ** 3 + self.a * x + self.b
ysquare_root = sqrtCQ(ysquare.value, cq)
bit0 = 0
if ysquare_root % 2 is not 0:
bit0 = 1
if bit0 != flag:
beta = (cq - ysquare_root) % cq
else:
beta = ysquare_root
return self.point(x, beta) | ['def', 'decompress_from_curve', '(', 'self', ',', 'x', ',', 'flag', ')', ':', 'cq', '=', 'self', '.', 'field', '.', 'p', 'x', '=', 'self', '.', 'field', '.', 'value', '(', 'x', ')', 'ysquare', '=', 'x', '**', '3', '+', 'self', '.', 'a', '*', 'x', '+', 'self', '.', 'b', 'ysquare_root', '=', 'sqrtCQ', '(', 'ysquare', '.', 'value', ',', 'cq', ')', 'bit0', '=', '0', 'if', 'ysquare_root', '%', '2', 'is', 'not', '0', ':', 'bit0', '=', '1', 'if', 'bit0', '!=', 'flag', ':', 'beta', '=', '(', 'cq', '-', 'ysquare_root', ')', '%', 'cq', 'else', ':', 'beta', '=', 'ysquare_root', 'return', 'self', '.', 'point', '(', 'x', ',', 'beta', ')'] | calculate the y coordinate given only the x value.
there are 2 possible solutions, use 'flag' to select. | ['calculate', 'the', 'y', 'coordinate', 'given', 'only', 'the', 'x', 'value', '.', 'there', 'are', '2', 'possible', 'solutions', 'use', 'flag', 'to', 'select', '.'] | train | https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/ECCurve.py#L648-L670 |
4,510 | django-treebeard/django-treebeard | treebeard/numconv.py | int2str | def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | python | def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | ['def', 'int2str', '(', 'num', ',', 'radix', '=', '10', ',', 'alphabet', '=', 'BASE85', ')', ':', 'return', 'NumConv', '(', 'radix', ',', 'alphabet', ')', '.', 'int2str', '(', 'num', ')'] | helper function for quick base conversions from integers to strings | ['helper', 'function', 'for', 'quick', 'base', 'conversions', 'from', 'integers', 'to', 'strings'] | train | https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/numconv.py#L108-L110 |
4,511 | sorgerlab/indra | indra/assemblers/pysb/kappa_util.py | im_json_to_graph | def im_json_to_graph(im_json):
"""Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map.
"""
imap_data = im_json['influence map']['map']
# Initialize the graph
graph = MultiDiGraph()
id_node_dict = {}
# Add each node to the graph
for node_dict in imap_data['nodes']:
# There is always just one entry here with the node type e.g. "rule"
# as key, and all the node data as the value
node_type, node = list(node_dict.items())[0]
# Add the node to the graph with its label and type
attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9',
'shape': 'box' if node_type == 'rule' else 'oval',
'style': 'filled'}
graph.add_node(node['label'], node_type=node_type, **attrs)
# Save the key of the node to refer to it later
new_key = '%s%s' % (node_type, node['id'])
id_node_dict[new_key] = node['label']
def add_edges(link_list, edge_sign):
attrs = {'sign': edge_sign,
'color': 'green' if edge_sign == 1 else 'red',
'arrowhead': 'normal' if edge_sign == 1 else 'tee'}
for link_dict in link_list:
source = link_dict['source']
for target_dict in link_dict['target map']:
target = target_dict['target']
src_id = '%s%s' % list(source.items())[0]
tgt_id = '%s%s' % list(target.items())[0]
graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id],
**attrs)
# Add all the edges from the positive and negative influences
add_edges(imap_data['wake-up map'], 1)
add_edges(imap_data['inhibition map'], -1)
return graph | python | def im_json_to_graph(im_json):
"""Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map.
"""
imap_data = im_json['influence map']['map']
# Initialize the graph
graph = MultiDiGraph()
id_node_dict = {}
# Add each node to the graph
for node_dict in imap_data['nodes']:
# There is always just one entry here with the node type e.g. "rule"
# as key, and all the node data as the value
node_type, node = list(node_dict.items())[0]
# Add the node to the graph with its label and type
attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9',
'shape': 'box' if node_type == 'rule' else 'oval',
'style': 'filled'}
graph.add_node(node['label'], node_type=node_type, **attrs)
# Save the key of the node to refer to it later
new_key = '%s%s' % (node_type, node['id'])
id_node_dict[new_key] = node['label']
def add_edges(link_list, edge_sign):
attrs = {'sign': edge_sign,
'color': 'green' if edge_sign == 1 else 'red',
'arrowhead': 'normal' if edge_sign == 1 else 'tee'}
for link_dict in link_list:
source = link_dict['source']
for target_dict in link_dict['target map']:
target = target_dict['target']
src_id = '%s%s' % list(source.items())[0]
tgt_id = '%s%s' % list(target.items())[0]
graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id],
**attrs)
# Add all the edges from the positive and negative influences
add_edges(imap_data['wake-up map'], 1)
add_edges(imap_data['inhibition map'], -1)
return graph | ['def', 'im_json_to_graph', '(', 'im_json', ')', ':', 'imap_data', '=', 'im_json', '[', "'influence map'", ']', '[', "'map'", ']', '# Initialize the graph', 'graph', '=', 'MultiDiGraph', '(', ')', 'id_node_dict', '=', '{', '}', '# Add each node to the graph', 'for', 'node_dict', 'in', 'imap_data', '[', "'nodes'", ']', ':', '# There is always just one entry here with the node type e.g. "rule"', '# as key, and all the node data as the value', 'node_type', ',', 'node', '=', 'list', '(', 'node_dict', '.', 'items', '(', ')', ')', '[', '0', ']', '# Add the node to the graph with its label and type', 'attrs', '=', '{', "'fillcolor'", ':', "'#b7d2ff'", 'if', 'node_type', '==', "'rule'", 'else', "'#cdffc9'", ',', "'shape'", ':', "'box'", 'if', 'node_type', '==', "'rule'", 'else', "'oval'", ',', "'style'", ':', "'filled'", '}', 'graph', '.', 'add_node', '(', 'node', '[', "'label'", ']', ',', 'node_type', '=', 'node_type', ',', '*', '*', 'attrs', ')', '# Save the key of the node to refer to it later', 'new_key', '=', "'%s%s'", '%', '(', 'node_type', ',', 'node', '[', "'id'", ']', ')', 'id_node_dict', '[', 'new_key', ']', '=', 'node', '[', "'label'", ']', 'def', 'add_edges', '(', 'link_list', ',', 'edge_sign', ')', ':', 'attrs', '=', '{', "'sign'", ':', 'edge_sign', ',', "'color'", ':', "'green'", 'if', 'edge_sign', '==', '1', 'else', "'red'", ',', "'arrowhead'", ':', "'normal'", 'if', 'edge_sign', '==', '1', 'else', "'tee'", '}', 'for', 'link_dict', 'in', 'link_list', ':', 'source', '=', 'link_dict', '[', "'source'", ']', 'for', 'target_dict', 'in', 'link_dict', '[', "'target map'", ']', ':', 'target', '=', 'target_dict', '[', "'target'", ']', 'src_id', '=', "'%s%s'", '%', 'list', '(', 'source', '.', 'items', '(', ')', ')', '[', '0', ']', 'tgt_id', '=', "'%s%s'", '%', 'list', '(', 'target', '.', 'items', '(', ')', ')', '[', '0', ']', 'graph', '.', 'add_edge', '(', 'id_node_dict', '[', 'src_id', ']', ',', 'id_node_dict', '[', 'tgt_id', ']', ',', '*', '*', 'attrs', ')', '# Add all the edges from the positive and negative influences', 'add_edges', '(', 'imap_data', '[', "'wake-up map'", ']', ',', '1', ')', 'add_edges', '(', 'imap_data', '[', "'inhibition map'", ']', ',', '-', '1', ')', 'return', 'graph'] | Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map. | ['Return', 'networkx', 'graph', 'from', 'Kappy', 's', 'influence', 'map', 'JSON', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/kappa_util.py#L7-L57 |
4,512 | thekashifmalik/packer | packer/packer.py | Packer.install | def install(cls):
"""Create the required directories in the home directory"""
[os.makedirs('{}/{}'.format(cls.home, cls.dirs[d])) for d in cls.dirs] | python | def install(cls):
"""Create the required directories in the home directory"""
[os.makedirs('{}/{}'.format(cls.home, cls.dirs[d])) for d in cls.dirs] | ['def', 'install', '(', 'cls', ')', ':', '[', 'os', '.', 'makedirs', '(', "'{}/{}'", '.', 'format', '(', 'cls', '.', 'home', ',', 'cls', '.', 'dirs', '[', 'd', ']', ')', ')', 'for', 'd', 'in', 'cls', '.', 'dirs', ']'] | Create the required directories in the home directory | ['Create', 'the', 'required', 'directories', 'in', 'the', 'home', 'directory'] | train | https://github.com/thekashifmalik/packer/blob/736d052d2536ada7733f4b8459e32fb771af2e1c/packer/packer.py#L54-L56 |
4,513 | spyder-ide/spyder | spyder/plugins/help/plugin.py | Help.toggle_wrap_mode | def toggle_wrap_mode(self, checked):
"""Toggle wrap mode"""
self.plain_text.editor.toggle_wrap_mode(checked)
self.set_option('wrap', checked) | python | def toggle_wrap_mode(self, checked):
"""Toggle wrap mode"""
self.plain_text.editor.toggle_wrap_mode(checked)
self.set_option('wrap', checked) | ['def', 'toggle_wrap_mode', '(', 'self', ',', 'checked', ')', ':', 'self', '.', 'plain_text', '.', 'editor', '.', 'toggle_wrap_mode', '(', 'checked', ')', 'self', '.', 'set_option', '(', "'wrap'", ',', 'checked', ')'] | Toggle wrap mode | ['Toggle', 'wrap', 'mode'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L341-L344 |
4,514 | sorgerlab/indra | indra/tools/reading/pmid_reading/read_pmids.py | join_json_files | def join_json_files(prefix):
"""Join different REACH output JSON files into a single JSON object.
The output of REACH is broken into three files that need to be joined
before processing. Specifically, there will be three files of the form:
`<prefix>.uaz.<subcategory>.json`.
Parameters
----------
prefix : str
The absolute path up to the extensions that reach will add.
Returns
-------
json_obj : dict
The result of joining the files, keyed by the three subcategories.
"""
try:
with open(prefix + '.uaz.entities.json', 'rt') as f:
entities = json.load(f)
with open(prefix + '.uaz.events.json', 'rt') as f:
events = json.load(f)
with open(prefix + '.uaz.sentences.json', 'rt') as f:
sentences = json.load(f)
except IOError as e:
logger.error(
'Failed to open JSON files for %s; REACH error?' % prefix
)
logger.exception(e)
return None
return {'events': events, 'entities': entities, 'sentences': sentences} | python | def join_json_files(prefix):
"""Join different REACH output JSON files into a single JSON object.
The output of REACH is broken into three files that need to be joined
before processing. Specifically, there will be three files of the form:
`<prefix>.uaz.<subcategory>.json`.
Parameters
----------
prefix : str
The absolute path up to the extensions that reach will add.
Returns
-------
json_obj : dict
The result of joining the files, keyed by the three subcategories.
"""
try:
with open(prefix + '.uaz.entities.json', 'rt') as f:
entities = json.load(f)
with open(prefix + '.uaz.events.json', 'rt') as f:
events = json.load(f)
with open(prefix + '.uaz.sentences.json', 'rt') as f:
sentences = json.load(f)
except IOError as e:
logger.error(
'Failed to open JSON files for %s; REACH error?' % prefix
)
logger.exception(e)
return None
return {'events': events, 'entities': entities, 'sentences': sentences} | ['def', 'join_json_files', '(', 'prefix', ')', ':', 'try', ':', 'with', 'open', '(', 'prefix', '+', "'.uaz.entities.json'", ',', "'rt'", ')', 'as', 'f', ':', 'entities', '=', 'json', '.', 'load', '(', 'f', ')', 'with', 'open', '(', 'prefix', '+', "'.uaz.events.json'", ',', "'rt'", ')', 'as', 'f', ':', 'events', '=', 'json', '.', 'load', '(', 'f', ')', 'with', 'open', '(', 'prefix', '+', "'.uaz.sentences.json'", ',', "'rt'", ')', 'as', 'f', ':', 'sentences', '=', 'json', '.', 'load', '(', 'f', ')', 'except', 'IOError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Failed to open JSON files for %s; REACH error?'", '%', 'prefix', ')', 'logger', '.', 'exception', '(', 'e', ')', 'return', 'None', 'return', '{', "'events'", ':', 'events', ',', "'entities'", ':', 'entities', ',', "'sentences'", ':', 'sentences', '}'] | Join different REACH output JSON files into a single JSON object.
The output of REACH is broken into three files that need to be joined
before processing. Specifically, there will be three files of the form:
`<prefix>.uaz.<subcategory>.json`.
Parameters
----------
prefix : str
The absolute path up to the extensions that reach will add.
Returns
-------
json_obj : dict
The result of joining the files, keyed by the three subcategories. | ['Join', 'different', 'REACH', 'output', 'JSON', 'files', 'into', 'a', 'single', 'JSON', 'object', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/pmid_reading/read_pmids.py#L140-L170 |
4,515 | scott-griffiths/bitstring | bitstring.py | Bits._readsie | def _readsie(self, pos):
"""Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code.
"""
codenum, pos = self._readuie(pos)
if not codenum:
return 0, pos
try:
if self[pos]:
return -codenum, pos + 1
else:
return codenum, pos + 1
except IndexError:
raise ReadError("Read off end of bitstring trying to read code.") | python | def _readsie(self, pos):
"""Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code.
"""
codenum, pos = self._readuie(pos)
if not codenum:
return 0, pos
try:
if self[pos]:
return -codenum, pos + 1
else:
return codenum, pos + 1
except IndexError:
raise ReadError("Read off end of bitstring trying to read code.") | ['def', '_readsie', '(', 'self', ',', 'pos', ')', ':', 'codenum', ',', 'pos', '=', 'self', '.', '_readuie', '(', 'pos', ')', 'if', 'not', 'codenum', ':', 'return', '0', ',', 'pos', 'try', ':', 'if', 'self', '[', 'pos', ']', ':', 'return', '-', 'codenum', ',', 'pos', '+', '1', 'else', ':', 'return', 'codenum', ',', 'pos', '+', '1', 'except', 'IndexError', ':', 'raise', 'ReadError', '(', '"Read off end of bitstring trying to read code."', ')'] | Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code. | ['Return', 'interpretation', 'of', 'next', 'bits', 'as', 'a', 'signed', 'interleaved', 'exponential', '-', 'Golomb', 'code', '.'] | train | https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1784-L1802 |
4,516 | eventifyio/eventify | eventify/drivers/crossbar.py | Component.onUserError | def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message) | python | def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message) | ['def', 'onUserError', '(', 'self', ',', 'fail', ',', 'message', ')', ':', 'self', '.', 'log', '.', 'error', '(', 'fail', ')', 'self', '.', 'log', '.', 'error', '(', 'message', ')'] | Handle user errors | ['Handle', 'user', 'errors'] | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L86-L91 |
4,517 | tensorflow/lucid | lucid/misc/gl/meshutil.py | load_obj | def load_obj(fn):
"""Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices
"""
position = [np.zeros(3, dtype=np.float32)]
normal = [np.zeros(3, dtype=np.float32)]
uv = [np.zeros(2, dtype=np.float32)]
tuple2idx = OrderedDict()
trinagle_indices = []
input_file = open(fn) if isinstance(fn, str) else fn
for line in input_file:
line = line.strip()
if not line or line[0] == '#':
continue
line = line.split(' ', 1)
tag = line[0]
if len(line) > 1:
line = line[1]
else:
line = ''
if tag == 'v':
position.append(np.fromstring(line, sep=' '))
elif tag == 'vt':
uv.append(np.fromstring(line, sep=' '))
elif tag == 'vn':
normal.append(np.fromstring(line, sep=' '))
elif tag == 'f':
output_face_indices = []
for chunk in line.split():
# tuple order: pos_idx, uv_idx, normal_idx
vt = _parse_vertex_tuple(chunk)
if vt not in tuple2idx: # create a new output vertex?
tuple2idx[vt] = len(tuple2idx)
output_face_indices.append(tuple2idx[vt])
# generate face triangles
for i in range(1, len(output_face_indices)-1):
for vi in [0, i, i+1]:
trinagle_indices.append(output_face_indices[vi])
outputs = {}
outputs['face'] = np.int32(trinagle_indices)
pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T
if np.any(pos_idx):
outputs['position'] = _unify_rows(position)[pos_idx]
if np.any(uv_idx):
outputs['uv'] = _unify_rows(uv)[uv_idx]
if np.any(normal_idx):
outputs['normal'] = _unify_rows(normal)[normal_idx]
return outputs | python | def load_obj(fn):
"""Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices
"""
position = [np.zeros(3, dtype=np.float32)]
normal = [np.zeros(3, dtype=np.float32)]
uv = [np.zeros(2, dtype=np.float32)]
tuple2idx = OrderedDict()
trinagle_indices = []
input_file = open(fn) if isinstance(fn, str) else fn
for line in input_file:
line = line.strip()
if not line or line[0] == '#':
continue
line = line.split(' ', 1)
tag = line[0]
if len(line) > 1:
line = line[1]
else:
line = ''
if tag == 'v':
position.append(np.fromstring(line, sep=' '))
elif tag == 'vt':
uv.append(np.fromstring(line, sep=' '))
elif tag == 'vn':
normal.append(np.fromstring(line, sep=' '))
elif tag == 'f':
output_face_indices = []
for chunk in line.split():
# tuple order: pos_idx, uv_idx, normal_idx
vt = _parse_vertex_tuple(chunk)
if vt not in tuple2idx: # create a new output vertex?
tuple2idx[vt] = len(tuple2idx)
output_face_indices.append(tuple2idx[vt])
# generate face triangles
for i in range(1, len(output_face_indices)-1):
for vi in [0, i, i+1]:
trinagle_indices.append(output_face_indices[vi])
outputs = {}
outputs['face'] = np.int32(trinagle_indices)
pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T
if np.any(pos_idx):
outputs['position'] = _unify_rows(position)[pos_idx]
if np.any(uv_idx):
outputs['uv'] = _unify_rows(uv)[uv_idx]
if np.any(normal_idx):
outputs['normal'] = _unify_rows(normal)[normal_idx]
return outputs | ['def', 'load_obj', '(', 'fn', ')', ':', 'position', '=', '[', 'np', '.', 'zeros', '(', '3', ',', 'dtype', '=', 'np', '.', 'float32', ')', ']', 'normal', '=', '[', 'np', '.', 'zeros', '(', '3', ',', 'dtype', '=', 'np', '.', 'float32', ')', ']', 'uv', '=', '[', 'np', '.', 'zeros', '(', '2', ',', 'dtype', '=', 'np', '.', 'float32', ')', ']', 'tuple2idx', '=', 'OrderedDict', '(', ')', 'trinagle_indices', '=', '[', ']', 'input_file', '=', 'open', '(', 'fn', ')', 'if', 'isinstance', '(', 'fn', ',', 'str', ')', 'else', 'fn', 'for', 'line', 'in', 'input_file', ':', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'not', 'line', 'or', 'line', '[', '0', ']', '==', "'#'", ':', 'continue', 'line', '=', 'line', '.', 'split', '(', "' '", ',', '1', ')', 'tag', '=', 'line', '[', '0', ']', 'if', 'len', '(', 'line', ')', '>', '1', ':', 'line', '=', 'line', '[', '1', ']', 'else', ':', 'line', '=', "''", 'if', 'tag', '==', "'v'", ':', 'position', '.', 'append', '(', 'np', '.', 'fromstring', '(', 'line', ',', 'sep', '=', "' '", ')', ')', 'elif', 'tag', '==', "'vt'", ':', 'uv', '.', 'append', '(', 'np', '.', 'fromstring', '(', 'line', ',', 'sep', '=', "' '", ')', ')', 'elif', 'tag', '==', "'vn'", ':', 'normal', '.', 'append', '(', 'np', '.', 'fromstring', '(', 'line', ',', 'sep', '=', "' '", ')', ')', 'elif', 'tag', '==', "'f'", ':', 'output_face_indices', '=', '[', ']', 'for', 'chunk', 'in', 'line', '.', 'split', '(', ')', ':', '# tuple order: pos_idx, uv_idx, normal_idx', 'vt', '=', '_parse_vertex_tuple', '(', 'chunk', ')', 'if', 'vt', 'not', 'in', 'tuple2idx', ':', '# create a new output vertex?', 'tuple2idx', '[', 'vt', ']', '=', 'len', '(', 'tuple2idx', ')', 'output_face_indices', '.', 'append', '(', 'tuple2idx', '[', 'vt', ']', ')', '# generate face triangles', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'output_face_indices', ')', '-', '1', ')', ':', 'for', 'vi', 'in', '[', '0', ',', 'i', ',', 'i', '+', '1', ']', ':', 'trinagle_indices', '.', 'append', '(', 'output_face_indices', '[', 'vi', ']', ')', 'outputs', '=', '{', '}', 'outputs', '[', "'face'", ']', '=', 'np', '.', 'int32', '(', 'trinagle_indices', ')', 'pos_idx', ',', 'uv_idx', ',', 'normal_idx', '=', 'np', '.', 'int32', '(', 'list', '(', 'tuple2idx', ')', ')', '.', 'T', 'if', 'np', '.', 'any', '(', 'pos_idx', ')', ':', 'outputs', '[', "'position'", ']', '=', '_unify_rows', '(', 'position', ')', '[', 'pos_idx', ']', 'if', 'np', '.', 'any', '(', 'uv_idx', ')', ':', 'outputs', '[', "'uv'", ']', '=', '_unify_rows', '(', 'uv', ')', '[', 'uv_idx', ']', 'if', 'np', '.', 'any', '(', 'normal_idx', ')', ':', 'outputs', '[', "'normal'", ']', '=', '_unify_rows', '(', 'normal', ')', '[', 'normal_idx', ']', 'return', 'outputs'] | Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices | ['Load', '3d', 'mesh', 'form', '.', 'obj', 'file', '.', 'Args', ':', 'fn', ':', 'Input', 'file', 'name', 'or', 'file', '-', 'like', 'object', '.', 'Returns', ':', 'dictionary', 'with', 'the', 'following', 'keys', '(', 'some', 'of', 'which', 'may', 'be', 'missing', ')', ':', 'position', ':', 'np', '.', 'float32', '(', 'n', '3', ')', 'array', 'vertex', 'positions', 'uv', ':', 'np', '.', 'float32', '(', 'n', '2', ')', 'array', 'vertex', 'uv', 'coordinates', 'normal', ':', 'np', '.', 'float32', '(', 'n', '3', ')', 'array', 'vertex', 'uv', 'normals', 'face', ':', 'np', '.', 'int32', '(', 'k', '*', '3', ')', 'traingular', 'face', 'indices'] | train | https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L99-L158 |
4,518 | limodou/uliweb | uliweb/contrib/model_config/__init__.py | get_model | def get_model(sender, model_name, model_inst, model_info, model_config):
"""
#todo Add objcache support
"""
MC = get_mc()
if MC:
model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!=''))
if model:
cached_inst = __cache__.get(model_name)
if not cached_inst or (cached_inst and cached_inst[1]!=model.uuid):
model_inst = model.get_instance()
M = orm.create_model(model_name, fields=eval(model_inst.fields or '[]'),
indexes=eval(model_inst.indexes or '[]'),
basemodel=model_inst.basemodel,
__replace__=True)
__cache__[model_name] = (M, model.uuid)
#process extension model
if model_inst.has_extension:
ext_model_name = model_name + '_extension'
fields = eval(model_inst.extension_fields or '[]')
fields.insert(0, {'name':'_parent', 'type':'OneToOne', 'reference_class':model_name, 'collection_name':'ext'})
ME = orm.create_model(ext_model_name, fields=fields,
indexes=eval(model_inst.extension_indexes or '[]'),
basemodel=model_inst.extension_model,
__replace__=True)
else:
M = cached_inst[0]
return M | python | def get_model(sender, model_name, model_inst, model_info, model_config):
"""
#todo Add objcache support
"""
MC = get_mc()
if MC:
model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!=''))
if model:
cached_inst = __cache__.get(model_name)
if not cached_inst or (cached_inst and cached_inst[1]!=model.uuid):
model_inst = model.get_instance()
M = orm.create_model(model_name, fields=eval(model_inst.fields or '[]'),
indexes=eval(model_inst.indexes or '[]'),
basemodel=model_inst.basemodel,
__replace__=True)
__cache__[model_name] = (M, model.uuid)
#process extension model
if model_inst.has_extension:
ext_model_name = model_name + '_extension'
fields = eval(model_inst.extension_fields or '[]')
fields.insert(0, {'name':'_parent', 'type':'OneToOne', 'reference_class':model_name, 'collection_name':'ext'})
ME = orm.create_model(ext_model_name, fields=fields,
indexes=eval(model_inst.extension_indexes or '[]'),
basemodel=model_inst.extension_model,
__replace__=True)
else:
M = cached_inst[0]
return M | ['def', 'get_model', '(', 'sender', ',', 'model_name', ',', 'model_inst', ',', 'model_info', ',', 'model_config', ')', ':', 'MC', '=', 'get_mc', '(', ')', 'if', 'MC', ':', 'model', '=', 'MC', '.', 'get', '(', '(', 'MC', '.', 'c', '.', 'model_name', '==', 'model_name', ')', '&', '(', 'MC', '.', 'c', '.', 'uuid', '!=', "''", ')', ')', 'if', 'model', ':', 'cached_inst', '=', '__cache__', '.', 'get', '(', 'model_name', ')', 'if', 'not', 'cached_inst', 'or', '(', 'cached_inst', 'and', 'cached_inst', '[', '1', ']', '!=', 'model', '.', 'uuid', ')', ':', 'model_inst', '=', 'model', '.', 'get_instance', '(', ')', 'M', '=', 'orm', '.', 'create_model', '(', 'model_name', ',', 'fields', '=', 'eval', '(', 'model_inst', '.', 'fields', 'or', "'[]'", ')', ',', 'indexes', '=', 'eval', '(', 'model_inst', '.', 'indexes', 'or', "'[]'", ')', ',', 'basemodel', '=', 'model_inst', '.', 'basemodel', ',', '__replace__', '=', 'True', ')', '__cache__', '[', 'model_name', ']', '=', '(', 'M', ',', 'model', '.', 'uuid', ')', '#process extension model', 'if', 'model_inst', '.', 'has_extension', ':', 'ext_model_name', '=', 'model_name', '+', "'_extension'", 'fields', '=', 'eval', '(', 'model_inst', '.', 'extension_fields', 'or', "'[]'", ')', 'fields', '.', 'insert', '(', '0', ',', '{', "'name'", ':', "'_parent'", ',', "'type'", ':', "'OneToOne'", ',', "'reference_class'", ':', 'model_name', ',', "'collection_name'", ':', "'ext'", '}', ')', 'ME', '=', 'orm', '.', 'create_model', '(', 'ext_model_name', ',', 'fields', '=', 'fields', ',', 'indexes', '=', 'eval', '(', 'model_inst', '.', 'extension_indexes', 'or', "'[]'", ')', ',', 'basemodel', '=', 'model_inst', '.', 'extension_model', ',', '__replace__', '=', 'True', ')', 'else', ':', 'M', '=', 'cached_inst', '[', '0', ']', 'return', 'M'] | #todo Add objcache support | ['#todo', 'Add', 'objcache', 'support'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/model_config/__init__.py#L37-L67 |
4,519 | prompt-toolkit/pymux | pymux/commands/commands.py | bind_key | def bind_key(pymux, variables):
"""
Bind a key sequence.
-n: Not necessary to use the prefix.
"""
key = variables['<key>']
command = variables['<command>']
arguments = variables['<arguments>']
needs_prefix = not variables['-n']
try:
pymux.key_bindings_manager.add_custom_binding(
key, command, arguments, needs_prefix=needs_prefix)
except ValueError:
raise CommandException('Invalid key: %r' % (key, )) | python | def bind_key(pymux, variables):
"""
Bind a key sequence.
-n: Not necessary to use the prefix.
"""
key = variables['<key>']
command = variables['<command>']
arguments = variables['<arguments>']
needs_prefix = not variables['-n']
try:
pymux.key_bindings_manager.add_custom_binding(
key, command, arguments, needs_prefix=needs_prefix)
except ValueError:
raise CommandException('Invalid key: %r' % (key, )) | ['def', 'bind_key', '(', 'pymux', ',', 'variables', ')', ':', 'key', '=', 'variables', '[', "'<key>'", ']', 'command', '=', 'variables', '[', "'<command>'", ']', 'arguments', '=', 'variables', '[', "'<arguments>'", ']', 'needs_prefix', '=', 'not', 'variables', '[', "'-n'", ']', 'try', ':', 'pymux', '.', 'key_bindings_manager', '.', 'add_custom_binding', '(', 'key', ',', 'command', ',', 'arguments', ',', 'needs_prefix', '=', 'needs_prefix', ')', 'except', 'ValueError', ':', 'raise', 'CommandException', '(', "'Invalid key: %r'", '%', '(', 'key', ',', ')', ')'] | Bind a key sequence.
-n: Not necessary to use the prefix. | ['Bind', 'a', 'key', 'sequence', '.', '-', 'n', ':', 'Not', 'necessary', 'to', 'use', 'the', 'prefix', '.'] | train | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L478-L492 |
4,520 | karel-brinda/rnftools | rnftools/lavender/Bam.py | Bam.create_es | def create_es(self):
"""Create an ES (intermediate) file for this BAM file.
This is the function which asses if an alignment is correct
"""
with (gzip.open(self._es_fn, "tw+") if self.compress_intermediate_files else open(self._es_fn, "w+")) as es_fo:
self.bam2es(
bam_fn=self._bam_fn,
es_fo=es_fo,
allowed_delta=self.report.allowed_delta,
) | python | def create_es(self):
"""Create an ES (intermediate) file for this BAM file.
This is the function which asses if an alignment is correct
"""
with (gzip.open(self._es_fn, "tw+") if self.compress_intermediate_files else open(self._es_fn, "w+")) as es_fo:
self.bam2es(
bam_fn=self._bam_fn,
es_fo=es_fo,
allowed_delta=self.report.allowed_delta,
) | ['def', 'create_es', '(', 'self', ')', ':', 'with', '(', 'gzip', '.', 'open', '(', 'self', '.', '_es_fn', ',', '"tw+"', ')', 'if', 'self', '.', 'compress_intermediate_files', 'else', 'open', '(', 'self', '.', '_es_fn', ',', '"w+"', ')', ')', 'as', 'es_fo', ':', 'self', '.', 'bam2es', '(', 'bam_fn', '=', 'self', '.', '_bam_fn', ',', 'es_fo', '=', 'es_fo', ',', 'allowed_delta', '=', 'self', '.', 'report', '.', 'allowed_delta', ',', ')'] | Create an ES (intermediate) file for this BAM file.
This is the function which asses if an alignment is correct | ['Create', 'an', 'ES', '(', 'intermediate', ')', 'file', 'for', 'this', 'BAM', 'file', '.', 'This', 'is', 'the', 'function', 'which', 'asses', 'if', 'an', 'alignment', 'is', 'correct'] | train | https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/lavender/Bam.py#L239-L249 |
4,521 | johnwmillr/LyricsGenius | lyricsgenius/api.py | API.get_artist | def get_artist(self, id_):
"""Data for a specific artist."""
endpoint = "artists/{id}".format(id=id_)
return self._make_request(endpoint) | python | def get_artist(self, id_):
"""Data for a specific artist."""
endpoint = "artists/{id}".format(id=id_)
return self._make_request(endpoint) | ['def', 'get_artist', '(', 'self', ',', 'id_', ')', ':', 'endpoint', '=', '"artists/{id}"', '.', 'format', '(', 'id', '=', 'id_', ')', 'return', 'self', '.', '_make_request', '(', 'endpoint', ')'] | Data for a specific artist. | ['Data', 'for', 'a', 'specific', 'artist', '.'] | train | https://github.com/johnwmillr/LyricsGenius/blob/e36482f7c42235037f3b9b7013edcd54141124e3/lyricsgenius/api.py#L76-L79 |
4,522 | CivicSpleen/ambry | ambry/bundle/files.py | BuildSourceFileAccessor.objects_to_record | def objects_to_record(self, preference=None):
"""Create file records from objects. """
from ambry.orm.file import File
raise NotImplementedError("Still uses obsolete file_info_map")
for file_const, (file_name, clz) in iteritems(file_info_map):
f = self.file(file_const)
pref = preference if preference else f.record.preference
if pref in (File.PREFERENCE.MERGE, File.PREFERENCE.OBJECT):
self._bundle.logger.debug(' otr {}'.format(file_const))
f.objects_to_record() | python | def objects_to_record(self, preference=None):
"""Create file records from objects. """
from ambry.orm.file import File
raise NotImplementedError("Still uses obsolete file_info_map")
for file_const, (file_name, clz) in iteritems(file_info_map):
f = self.file(file_const)
pref = preference if preference else f.record.preference
if pref in (File.PREFERENCE.MERGE, File.PREFERENCE.OBJECT):
self._bundle.logger.debug(' otr {}'.format(file_const))
f.objects_to_record() | ['def', 'objects_to_record', '(', 'self', ',', 'preference', '=', 'None', ')', ':', 'from', 'ambry', '.', 'orm', '.', 'file', 'import', 'File', 'raise', 'NotImplementedError', '(', '"Still uses obsolete file_info_map"', ')', 'for', 'file_const', ',', '(', 'file_name', ',', 'clz', ')', 'in', 'iteritems', '(', 'file_info_map', ')', ':', 'f', '=', 'self', '.', 'file', '(', 'file_const', ')', 'pref', '=', 'preference', 'if', 'preference', 'else', 'f', '.', 'record', '.', 'preference', 'if', 'pref', 'in', '(', 'File', '.', 'PREFERENCE', '.', 'MERGE', ',', 'File', '.', 'PREFERENCE', '.', 'OBJECT', ')', ':', 'self', '.', '_bundle', '.', 'logger', '.', 'debug', '(', "' otr {}'", '.', 'format', '(', 'file_const', ')', ')', 'f', '.', 'objects_to_record', '(', ')'] | Create file records from objects. | ['Create', 'file', 'records', 'from', 'objects', '.'] | train | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/files.py#L1354-L1366 |
4,523 | inasafe/inasafe | safe/gui/tools/options_dialog.py | OptionsDialog.restore_defaults_ratio | def restore_defaults_ratio(self):
"""Restore InaSAFE default ratio."""
# Set the flag to true because user ask to.
self.is_restore_default = True
# remove current default ratio
for i in reversed(list(range(self.container_layout.count()))):
widget = self.container_layout.itemAt(i).widget()
if widget is not None:
widget.setParent(None)
# reload default ratio
self.restore_default_values_page() | python | def restore_defaults_ratio(self):
"""Restore InaSAFE default ratio."""
# Set the flag to true because user ask to.
self.is_restore_default = True
# remove current default ratio
for i in reversed(list(range(self.container_layout.count()))):
widget = self.container_layout.itemAt(i).widget()
if widget is not None:
widget.setParent(None)
# reload default ratio
self.restore_default_values_page() | ['def', 'restore_defaults_ratio', '(', 'self', ')', ':', '# Set the flag to true because user ask to.', 'self', '.', 'is_restore_default', '=', 'True', '# remove current default ratio', 'for', 'i', 'in', 'reversed', '(', 'list', '(', 'range', '(', 'self', '.', 'container_layout', '.', 'count', '(', ')', ')', ')', ')', ':', 'widget', '=', 'self', '.', 'container_layout', '.', 'itemAt', '(', 'i', ')', '.', 'widget', '(', ')', 'if', 'widget', 'is', 'not', 'None', ':', 'widget', '.', 'setParent', '(', 'None', ')', '# reload default ratio', 'self', '.', 'restore_default_values_page', '(', ')'] | Restore InaSAFE default ratio. | ['Restore', 'InaSAFE', 'default', 'ratio', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/options_dialog.py#L845-L856 |
4,524 | edeposit/edeposit.amqp.ltp | src/edeposit/amqp/ltp/fn_composers.py | _get_suffix | def _get_suffix(path):
"""
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix.
"""
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix | python | def _get_suffix(path):
"""
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix.
"""
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix | ['def', '_get_suffix', '(', 'path', ')', ':', 'suffix', '=', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', '.', 'split', '(', '"."', ')', '[', '-', '1', ']', 'if', '"/"', 'in', 'suffix', ':', 'raise', 'UserWarning', '(', '"Filename can\'t contain \'/\' in suffix (%s)!"', '%', 'path', ')', 'return', 'suffix'] | Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix. | ['Return', 'suffix', 'from', 'path', '.'] | train | https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/fn_composers.py#L14-L34 |
4,525 | ff0000/scarlet | scarlet/cms/base_views.py | ModelCMSMixin.log_action | def log_action(self, instance, action, action_date=None, url="",
update_parent=True):
"""
Store an action in the database using the CMSLog model.
The following attributes are calculated and set on the log entry:
* **model_repr** - A unicode representation of the instance.
* **object_repr** - The verbose_name of the instance model class.
* **section** - The name of ancestor bundle that is directly \
attached to the admin site.
:param instance: The instance that this action was performed \
on.
:param action: The action type. Must be one of the options \
in CMSLog.ACTIONS.
:param action_date: The datetime the action occurred.
:param url: The url that the log entry should point to, \
Defaults to an empty string.
:param update_parent: If true this will update the last saved time \
on the object pointed to by this bundle's object_view. \
Defaults to True.
"""
section = None
if self.bundle:
bundle = self.bundle
while bundle.parent:
bundle = bundle.parent
section = bundle.name
# if we have a object view that comes from somewhere else
# save it too to update it.
changed_object = instance
bundle = self.bundle
while bundle.object_view == bundle.parent_attr:
bundle = bundle.parent
if update_parent and changed_object.__class__ != bundle._meta.model:
object_view, name = bundle.get_initialized_view_and_name(
bundle.object_view, kwargs=self.kwargs)
changed_object = object_view.get_object()
changed_object.save()
if not section:
section = ""
if url:
url = urlparse.urlparse(url).path
rep = unicode(instance)
if rep:
rep = rep[:255]
log = CMSLog(action=action, url=url, section=section,
model_repr=instance._meta.verbose_name,
object_repr=rep,
user_name=self.request.user.username,
action_date=action_date)
log.save() | python | def log_action(self, instance, action, action_date=None, url="",
update_parent=True):
"""
Store an action in the database using the CMSLog model.
The following attributes are calculated and set on the log entry:
* **model_repr** - A unicode representation of the instance.
* **object_repr** - The verbose_name of the instance model class.
* **section** - The name of ancestor bundle that is directly \
attached to the admin site.
:param instance: The instance that this action was performed \
on.
:param action: The action type. Must be one of the options \
in CMSLog.ACTIONS.
:param action_date: The datetime the action occurred.
:param url: The url that the log entry should point to, \
Defaults to an empty string.
:param update_parent: If true this will update the last saved time \
on the object pointed to by this bundle's object_view. \
Defaults to True.
"""
section = None
if self.bundle:
bundle = self.bundle
while bundle.parent:
bundle = bundle.parent
section = bundle.name
# if we have a object view that comes from somewhere else
# save it too to update it.
changed_object = instance
bundle = self.bundle
while bundle.object_view == bundle.parent_attr:
bundle = bundle.parent
if update_parent and changed_object.__class__ != bundle._meta.model:
object_view, name = bundle.get_initialized_view_and_name(
bundle.object_view, kwargs=self.kwargs)
changed_object = object_view.get_object()
changed_object.save()
if not section:
section = ""
if url:
url = urlparse.urlparse(url).path
rep = unicode(instance)
if rep:
rep = rep[:255]
log = CMSLog(action=action, url=url, section=section,
model_repr=instance._meta.verbose_name,
object_repr=rep,
user_name=self.request.user.username,
action_date=action_date)
log.save() | ['def', 'log_action', '(', 'self', ',', 'instance', ',', 'action', ',', 'action_date', '=', 'None', ',', 'url', '=', '""', ',', 'update_parent', '=', 'True', ')', ':', 'section', '=', 'None', 'if', 'self', '.', 'bundle', ':', 'bundle', '=', 'self', '.', 'bundle', 'while', 'bundle', '.', 'parent', ':', 'bundle', '=', 'bundle', '.', 'parent', 'section', '=', 'bundle', '.', 'name', '# if we have a object view that comes from somewhere else', '# save it too to update it.', 'changed_object', '=', 'instance', 'bundle', '=', 'self', '.', 'bundle', 'while', 'bundle', '.', 'object_view', '==', 'bundle', '.', 'parent_attr', ':', 'bundle', '=', 'bundle', '.', 'parent', 'if', 'update_parent', 'and', 'changed_object', '.', '__class__', '!=', 'bundle', '.', '_meta', '.', 'model', ':', 'object_view', ',', 'name', '=', 'bundle', '.', 'get_initialized_view_and_name', '(', 'bundle', '.', 'object_view', ',', 'kwargs', '=', 'self', '.', 'kwargs', ')', 'changed_object', '=', 'object_view', '.', 'get_object', '(', ')', 'changed_object', '.', 'save', '(', ')', 'if', 'not', 'section', ':', 'section', '=', '""', 'if', 'url', ':', 'url', '=', 'urlparse', '.', 'urlparse', '(', 'url', ')', '.', 'path', 'rep', '=', 'unicode', '(', 'instance', ')', 'if', 'rep', ':', 'rep', '=', 'rep', '[', ':', '255', ']', 'log', '=', 'CMSLog', '(', 'action', '=', 'action', ',', 'url', '=', 'url', ',', 'section', '=', 'section', ',', 'model_repr', '=', 'instance', '.', '_meta', '.', 'verbose_name', ',', 'object_repr', '=', 'rep', ',', 'user_name', '=', 'self', '.', 'request', '.', 'user', '.', 'username', ',', 'action_date', '=', 'action_date', ')', 'log', '.', 'save', '(', ')'] | Store an action in the database using the CMSLog model.
The following attributes are calculated and set on the log entry:
* **model_repr** - A unicode representation of the instance.
* **object_repr** - The verbose_name of the instance model class.
* **section** - The name of ancestor bundle that is directly \
attached to the admin site.
:param instance: The instance that this action was performed \
on.
:param action: The action type. Must be one of the options \
in CMSLog.ACTIONS.
:param action_date: The datetime the action occurred.
:param url: The url that the log entry should point to, \
Defaults to an empty string.
:param update_parent: If true this will update the last saved time \
on the object pointed to by this bundle's object_view. \
Defaults to True. | ['Store', 'an', 'action', 'in', 'the', 'database', 'using', 'the', 'CMSLog', 'model', '.', 'The', 'following', 'attributes', 'are', 'calculated', 'and', 'set', 'on', 'the', 'log', 'entry', ':'] | train | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/base_views.py#L469-L528 |
4,526 | chibisov/drf-extensions | docs/backdoc.py | _xml_escape_attr | def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped | python | def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped | ['def', '_xml_escape_attr', '(', 'attr', ',', 'skip_single_quote', '=', 'True', ')', ':', 'escaped', '=', '(', 'attr', '.', 'replace', '(', "'&'", ',', "'&'", ')', '.', 'replace', '(', '\'"\'', ',', "'"'", ')', '.', 'replace', '(', "'<'", ',', "'<'", ')', '.', 'replace', '(', "'>'", ',', "'>'", ')', ')', 'if', 'not', 'skip_single_quote', ':', 'escaped', '=', 'escaped', '.', 'replace', '(', '"\'"', ',', '"'"', ')', 'return', 'escaped'] | Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes. | ['Escape', 'the', 'given', 'string', 'for', 'use', 'in', 'an', 'HTML', '/', 'XML', 'tag', 'attribute', '.'] | train | https://github.com/chibisov/drf-extensions/blob/1d28a4b28890eab5cd19e93e042f8590c8c2fb8b/docs/backdoc.py#L2164-L2177 |
4,527 | spyder-ide/spyder | spyder/plugins/editor/widgets/editor.py | TabSwitcherWidget.focusOutEvent | def focusOutEvent(self, event):
"""Reimplement Qt method to close the widget when loosing focus."""
event.ignore()
# Inspired from CompletionWidget.focusOutEvent() in file
# widgets/sourcecode/base.py line 212
if sys.platform == "darwin":
if event.reason() != Qt.ActiveWindowFocusReason:
self.close()
else:
self.close() | python | def focusOutEvent(self, event):
"""Reimplement Qt method to close the widget when loosing focus."""
event.ignore()
# Inspired from CompletionWidget.focusOutEvent() in file
# widgets/sourcecode/base.py line 212
if sys.platform == "darwin":
if event.reason() != Qt.ActiveWindowFocusReason:
self.close()
else:
self.close() | ['def', 'focusOutEvent', '(', 'self', ',', 'event', ')', ':', 'event', '.', 'ignore', '(', ')', '# Inspired from CompletionWidget.focusOutEvent() in file\r', '# widgets/sourcecode/base.py line 212\r', 'if', 'sys', '.', 'platform', '==', '"darwin"', ':', 'if', 'event', '.', 'reason', '(', ')', '!=', 'Qt', '.', 'ActiveWindowFocusReason', ':', 'self', '.', 'close', '(', ')', 'else', ':', 'self', '.', 'close', '(', ')'] | Reimplement Qt method to close the widget when loosing focus. | ['Reimplement', 'Qt', 'method', 'to', 'close', 'the', 'widget', 'when', 'loosing', 'focus', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L391-L400 |
4,528 | gwpy/gwpy | gwpy/io/cache.py | sieve | def sieve(cache, segment=None):
"""Filter the cache to find those entries that overlap ``segment``
Parameters
----------
cache : `list`
Input list of file paths
segment : `~gwpy.segments.Segment`
The ``[start, stop)`` interval to match against.
"""
return type(cache)(e for e in cache if segment.intersects(file_segment(e))) | python | def sieve(cache, segment=None):
"""Filter the cache to find those entries that overlap ``segment``
Parameters
----------
cache : `list`
Input list of file paths
segment : `~gwpy.segments.Segment`
The ``[start, stop)`` interval to match against.
"""
return type(cache)(e for e in cache if segment.intersects(file_segment(e))) | ['def', 'sieve', '(', 'cache', ',', 'segment', '=', 'None', ')', ':', 'return', 'type', '(', 'cache', ')', '(', 'e', 'for', 'e', 'in', 'cache', 'if', 'segment', '.', 'intersects', '(', 'file_segment', '(', 'e', ')', ')', ')'] | Filter the cache to find those entries that overlap ``segment``
Parameters
----------
cache : `list`
Input list of file paths
segment : `~gwpy.segments.Segment`
The ``[start, stop)`` interval to match against. | ['Filter', 'the', 'cache', 'to', 'find', 'those', 'entries', 'that', 'overlap', 'segment'] | train | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L464-L475 |
4,529 | fchorney/rpI2C | rpI2C.py | I2C.write_quick | def write_quick(self):
"""
Send only the read / write bit
"""
self.bus.write_quick(self.address)
self.log.debug("write_quick: Sent the read / write bit") | python | def write_quick(self):
"""
Send only the read / write bit
"""
self.bus.write_quick(self.address)
self.log.debug("write_quick: Sent the read / write bit") | ['def', 'write_quick', '(', 'self', ')', ':', 'self', '.', 'bus', '.', 'write_quick', '(', 'self', '.', 'address', ')', 'self', '.', 'log', '.', 'debug', '(', '"write_quick: Sent the read / write bit"', ')'] | Send only the read / write bit | ['Send', 'only', 'the', 'read', '/', 'write', 'bit'] | train | https://github.com/fchorney/rpI2C/blob/7c60f82fa8c91496a74182373da0b95a13919d6e/rpI2C.py#L39-L44 |
4,530 | saltstack/salt | salt/states/pagerduty_schedule.py | present | def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs):
'''
Ensure that a pagerduty schedule exists.
This method accepts as args everything defined in
https://developer.pagerduty.com/documentation/rest/schedules/create.
This means that most arguments are in a dict called "schedule."
User id's can be pagerduty id, or name, or email address.
'''
# for convenience, we accept id, name, or email as the user id.
kwargs['schedule']['name'] = kwargs['name'] # match PD API structure
for schedule_layer in kwargs['schedule']['schedule_layers']:
for user in schedule_layer['users']:
u = __salt__['pagerduty_util.get_resource']('users',
user['user']['id'],
['email', 'name', 'id'],
profile=profile,
subdomain=subdomain,
api_key=api_key)
if u is None:
raise Exception('unknown user: {0}'.format(user))
user['user']['id'] = u['id']
r = __salt__['pagerduty_util.resource_present']('schedules',
['name', 'id'],
_diff,
profile,
subdomain,
api_key,
**kwargs)
return r | python | def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs):
'''
Ensure that a pagerduty schedule exists.
This method accepts as args everything defined in
https://developer.pagerduty.com/documentation/rest/schedules/create.
This means that most arguments are in a dict called "schedule."
User id's can be pagerduty id, or name, or email address.
'''
# for convenience, we accept id, name, or email as the user id.
kwargs['schedule']['name'] = kwargs['name'] # match PD API structure
for schedule_layer in kwargs['schedule']['schedule_layers']:
for user in schedule_layer['users']:
u = __salt__['pagerduty_util.get_resource']('users',
user['user']['id'],
['email', 'name', 'id'],
profile=profile,
subdomain=subdomain,
api_key=api_key)
if u is None:
raise Exception('unknown user: {0}'.format(user))
user['user']['id'] = u['id']
r = __salt__['pagerduty_util.resource_present']('schedules',
['name', 'id'],
_diff,
profile,
subdomain,
api_key,
**kwargs)
return r | ['def', 'present', '(', 'profile', '=', "'pagerduty'", ',', 'subdomain', '=', 'None', ',', 'api_key', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# for convenience, we accept id, name, or email as the user id.', 'kwargs', '[', "'schedule'", ']', '[', "'name'", ']', '=', 'kwargs', '[', "'name'", ']', '# match PD API structure', 'for', 'schedule_layer', 'in', 'kwargs', '[', "'schedule'", ']', '[', "'schedule_layers'", ']', ':', 'for', 'user', 'in', 'schedule_layer', '[', "'users'", ']', ':', 'u', '=', '__salt__', '[', "'pagerduty_util.get_resource'", ']', '(', "'users'", ',', 'user', '[', "'user'", ']', '[', "'id'", ']', ',', '[', "'email'", ',', "'name'", ',', "'id'", ']', ',', 'profile', '=', 'profile', ',', 'subdomain', '=', 'subdomain', ',', 'api_key', '=', 'api_key', ')', 'if', 'u', 'is', 'None', ':', 'raise', 'Exception', '(', "'unknown user: {0}'", '.', 'format', '(', 'user', ')', ')', 'user', '[', "'user'", ']', '[', "'id'", ']', '=', 'u', '[', "'id'", ']', 'r', '=', '__salt__', '[', "'pagerduty_util.resource_present'", ']', '(', "'schedules'", ',', '[', "'name'", ',', "'id'", ']', ',', '_diff', ',', 'profile', ',', 'subdomain', ',', 'api_key', ',', '*', '*', 'kwargs', ')', 'return', 'r'] | Ensure that a pagerduty schedule exists.
This method accepts as args everything defined in
https://developer.pagerduty.com/documentation/rest/schedules/create.
This means that most arguments are in a dict called "schedule."
User id's can be pagerduty id, or name, or email address. | ['Ensure', 'that', 'a', 'pagerduty', 'schedule', 'exists', '.', 'This', 'method', 'accepts', 'as', 'args', 'everything', 'defined', 'in', 'https', ':', '//', 'developer', '.', 'pagerduty', '.', 'com', '/', 'documentation', '/', 'rest', '/', 'schedules', '/', 'create', '.', 'This', 'means', 'that', 'most', 'arguments', 'are', 'in', 'a', 'dict', 'called', 'schedule', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty_schedule.py#L48-L77 |
4,531 | jobovy/galpy | galpy/util/bovy_ars.py | bovy_ars | def bovy_ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1,
hxparams=(),maxn=100):
"""bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
#First set-up the upper and lower hulls
hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams)
#Then start sampling: call sampleone repeatedly
out= []
nupdates= 0
for ii in range(int(nsamples)):
thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams)
out.append(thissample)
return out | python | def bovy_ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1,
hxparams=(),maxn=100):
"""bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
#First set-up the upper and lower hulls
hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams)
#Then start sampling: call sampleone repeatedly
out= []
nupdates= 0
for ii in range(int(nsamples)):
thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams)
out.append(thissample)
return out | ['def', 'bovy_ars', '(', 'domain', ',', 'isDomainFinite', ',', 'abcissae', ',', 'hx', ',', 'hpx', ',', 'nsamples', '=', '1', ',', 'hxparams', '=', '(', ')', ',', 'maxn', '=', '100', ')', ':', '#First set-up the upper and lower hulls', 'hull', '=', 'setup_hull', '(', 'domain', ',', 'isDomainFinite', ',', 'abcissae', ',', 'hx', ',', 'hpx', ',', 'hxparams', ')', '#Then start sampling: call sampleone repeatedly', 'out', '=', '[', ']', 'nupdates', '=', '0', 'for', 'ii', 'in', 'range', '(', 'int', '(', 'nsamples', ')', ')', ':', 'thissample', ',', 'hull', ',', 'nupdates', '=', 'sampleone', '(', 'hull', ',', 'hx', ',', 'hpx', ',', 'domain', ',', 'isDomainFinite', ',', 'maxn', ',', 'nupdates', ',', 'hxparams', ')', 'out', '.', 'append', '(', 'thissample', ')', 'return', 'out'] | bovy_ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU) | ['bovy_ars', ':', 'Implementation', 'of', 'the', 'Adaptive', '-', 'Rejection', 'Sampling', 'algorithm', 'by', 'Gilks', '&', 'Wild', '(', '1992', ')', ':', 'Adaptive', 'Rejection', 'Sampling', 'for', 'Gibbs', 'Sampling', 'Applied', 'Statistics', '41', '337', 'Based', 'on', 'Wild', '&', 'Gilks', '(', '1993', ')', 'Algorithm', 'AS', '287', ':', 'Adaptive', 'Rejection', 'Sampling', 'from', 'Log', '-', 'concave', 'Density', 'Functions', 'Applied', 'Statistics', '42', '701'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_ars.py#L36-L83 |
4,532 | DEIB-GECO/PyGMQL | gmql/dataset/GMQLDataset.py | GMQLDataset.meta_group | def meta_group(self, meta, meta_aggregates=None):
"""
*Wrapper of* ``GROUP``
Group operation only for metadata. For further information check :meth:`~.group`
"""
return self.group(meta=meta, meta_aggregates=meta_aggregates) | python | def meta_group(self, meta, meta_aggregates=None):
"""
*Wrapper of* ``GROUP``
Group operation only for metadata. For further information check :meth:`~.group`
"""
return self.group(meta=meta, meta_aggregates=meta_aggregates) | ['def', 'meta_group', '(', 'self', ',', 'meta', ',', 'meta_aggregates', '=', 'None', ')', ':', 'return', 'self', '.', 'group', '(', 'meta', '=', 'meta', ',', 'meta_aggregates', '=', 'meta_aggregates', ')'] | *Wrapper of* ``GROUP``
Group operation only for metadata. For further information check :meth:`~.group` | ['*', 'Wrapper', 'of', '*', 'GROUP'] | train | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L1345-L1351 |
4,533 | jasonrollins/shareplum | shareplum/shareplum.py | _List.GetAttachmentCollection | def GetAttachmentCollection(self, _id):
"""Get Attachments for given List Item ID"""
# Build Request
soap_request = soap('GetAttachmentCollection')
soap_request.add_parameter('listName', self.listName)
soap_request.add_parameter('listItemID', _id)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('GetAttachmentCollection'),
data=str(soap_request),
verify=False,
timeout=self.timeout)
# Parse Request
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
attaches = envelope[0][0][0][0]
attachments = []
for attachment in attaches.getchildren():
attachments.append(attachment.text)
return attachments
else:
return response | python | def GetAttachmentCollection(self, _id):
"""Get Attachments for given List Item ID"""
# Build Request
soap_request = soap('GetAttachmentCollection')
soap_request.add_parameter('listName', self.listName)
soap_request.add_parameter('listItemID', _id)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('GetAttachmentCollection'),
data=str(soap_request),
verify=False,
timeout=self.timeout)
# Parse Request
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
attaches = envelope[0][0][0][0]
attachments = []
for attachment in attaches.getchildren():
attachments.append(attachment.text)
return attachments
else:
return response | ['def', 'GetAttachmentCollection', '(', 'self', ',', '_id', ')', ':', '# Build Request', 'soap_request', '=', 'soap', '(', "'GetAttachmentCollection'", ')', 'soap_request', '.', 'add_parameter', '(', "'listName'", ',', 'self', '.', 'listName', ')', 'soap_request', '.', 'add_parameter', '(', "'listItemID'", ',', '_id', ')', 'self', '.', 'last_request', '=', 'str', '(', 'soap_request', ')', '# Send Request', 'response', '=', 'self', '.', '_session', '.', 'post', '(', 'url', '=', 'self', '.', '_url', '(', "'Lists'", ')', ',', 'headers', '=', 'self', '.', '_headers', '(', "'GetAttachmentCollection'", ')', ',', 'data', '=', 'str', '(', 'soap_request', ')', ',', 'verify', '=', 'False', ',', 'timeout', '=', 'self', '.', 'timeout', ')', '# Parse Request', 'if', 'response', '.', 'status_code', '==', '200', ':', 'envelope', '=', 'etree', '.', 'fromstring', '(', 'response', '.', 'text', '.', 'encode', '(', "'utf-8'", ')', ',', 'parser', '=', 'etree', '.', 'XMLParser', '(', 'huge_tree', '=', 'self', '.', 'huge_tree', ')', ')', 'attaches', '=', 'envelope', '[', '0', ']', '[', '0', ']', '[', '0', ']', '[', '0', ']', 'attachments', '=', '[', ']', 'for', 'attachment', 'in', 'attaches', '.', 'getchildren', '(', ')', ':', 'attachments', '.', 'append', '(', 'attachment', '.', 'text', ')', 'return', 'attachments', 'else', ':', 'return', 'response'] | Get Attachments for given List Item ID | ['Get', 'Attachments', 'for', 'given', 'List', 'Item', 'ID'] | train | https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L706-L731 |
4,534 | dfm/celerite | celerite/celerite.py | GP.log_likelihood | def log_likelihood(self, y, _const=math.log(2.0*math.pi), quiet=False):
"""
Compute the marginalized likelihood of the GP model
The factorized matrix from the previous call to :func:`GP.compute` is
used so ``compute`` must be called first.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
quiet (bool): If true, return ``-numpy.inf`` for non-positive
definite matrices instead of throwing an error.
Returns:
float: The marginalized likelihood of the GP model.
Raises:
ValueError: For mismatched dimensions.
solver.LinAlgError: For non-positive definite matrices.
"""
y = self._process_input(y)
resid = y - self.mean.get_value(self._t)
try:
self._recompute()
except solver.LinAlgError:
if quiet:
return -np.inf
raise
if len(y.shape) > 1:
raise ValueError("dimension mismatch")
logdet = self.solver.log_determinant()
if not np.isfinite(logdet):
return -np.inf
loglike = -0.5*(self.solver.dot_solve(resid)+logdet+len(y)*_const)
if not np.isfinite(loglike):
return -np.inf
return loglike | python | def log_likelihood(self, y, _const=math.log(2.0*math.pi), quiet=False):
"""
Compute the marginalized likelihood of the GP model
The factorized matrix from the previous call to :func:`GP.compute` is
used so ``compute`` must be called first.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
quiet (bool): If true, return ``-numpy.inf`` for non-positive
definite matrices instead of throwing an error.
Returns:
float: The marginalized likelihood of the GP model.
Raises:
ValueError: For mismatched dimensions.
solver.LinAlgError: For non-positive definite matrices.
"""
y = self._process_input(y)
resid = y - self.mean.get_value(self._t)
try:
self._recompute()
except solver.LinAlgError:
if quiet:
return -np.inf
raise
if len(y.shape) > 1:
raise ValueError("dimension mismatch")
logdet = self.solver.log_determinant()
if not np.isfinite(logdet):
return -np.inf
loglike = -0.5*(self.solver.dot_solve(resid)+logdet+len(y)*_const)
if not np.isfinite(loglike):
return -np.inf
return loglike | ['def', 'log_likelihood', '(', 'self', ',', 'y', ',', '_const', '=', 'math', '.', 'log', '(', '2.0', '*', 'math', '.', 'pi', ')', ',', 'quiet', '=', 'False', ')', ':', 'y', '=', 'self', '.', '_process_input', '(', 'y', ')', 'resid', '=', 'y', '-', 'self', '.', 'mean', '.', 'get_value', '(', 'self', '.', '_t', ')', 'try', ':', 'self', '.', '_recompute', '(', ')', 'except', 'solver', '.', 'LinAlgError', ':', 'if', 'quiet', ':', 'return', '-', 'np', '.', 'inf', 'raise', 'if', 'len', '(', 'y', '.', 'shape', ')', '>', '1', ':', 'raise', 'ValueError', '(', '"dimension mismatch"', ')', 'logdet', '=', 'self', '.', 'solver', '.', 'log_determinant', '(', ')', 'if', 'not', 'np', '.', 'isfinite', '(', 'logdet', ')', ':', 'return', '-', 'np', '.', 'inf', 'loglike', '=', '-', '0.5', '*', '(', 'self', '.', 'solver', '.', 'dot_solve', '(', 'resid', ')', '+', 'logdet', '+', 'len', '(', 'y', ')', '*', '_const', ')', 'if', 'not', 'np', '.', 'isfinite', '(', 'loglike', ')', ':', 'return', '-', 'np', '.', 'inf', 'return', 'loglike'] | Compute the marginalized likelihood of the GP model
The factorized matrix from the previous call to :func:`GP.compute` is
used so ``compute`` must be called first.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
quiet (bool): If true, return ``-numpy.inf`` for non-positive
definite matrices instead of throwing an error.
Returns:
float: The marginalized likelihood of the GP model.
Raises:
ValueError: For mismatched dimensions.
solver.LinAlgError: For non-positive definite matrices. | ['Compute', 'the', 'marginalized', 'likelihood', 'of', 'the', 'GP', 'model'] | train | https://github.com/dfm/celerite/blob/ad3f471f06b18d233f3dab71bb1c20a316173cae/celerite/celerite.py#L155-L192 |
4,535 | bitcraze/crazyflie-lib-python | lpslib/lopoanchor.py | LoPoAnchor.set_mode | def set_mode(self, anchor_id, mode):
"""
Send a packet to set the anchor mode. If the anchor receive the packet,
it will change mode and resets.
"""
data = struct.pack('<BB', LoPoAnchor.LPP_TYPE_MODE, mode)
self.crazyflie.loc.send_short_lpp_packet(anchor_id, data) | python | def set_mode(self, anchor_id, mode):
"""
Send a packet to set the anchor mode. If the anchor receive the packet,
it will change mode and resets.
"""
data = struct.pack('<BB', LoPoAnchor.LPP_TYPE_MODE, mode)
self.crazyflie.loc.send_short_lpp_packet(anchor_id, data) | ['def', 'set_mode', '(', 'self', ',', 'anchor_id', ',', 'mode', ')', ':', 'data', '=', 'struct', '.', 'pack', '(', "'<BB'", ',', 'LoPoAnchor', '.', 'LPP_TYPE_MODE', ',', 'mode', ')', 'self', '.', 'crazyflie', '.', 'loc', '.', 'send_short_lpp_packet', '(', 'anchor_id', ',', 'data', ')'] | Send a packet to set the anchor mode. If the anchor receive the packet,
it will change mode and resets. | ['Send', 'a', 'packet', 'to', 'set', 'the', 'anchor', 'mode', '.', 'If', 'the', 'anchor', 'receive', 'the', 'packet', 'it', 'will', 'change', 'mode', 'and', 'resets', '.'] | train | https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/lpslib/lopoanchor.py#L66-L72 |
4,536 | faucamp/python-gsmmodem | gsmmodem/modem.py | GsmModem.sendUssd | def sendUssd(self, ussdString, responseTimeout=15):
""" Starts a USSD session by dialing the the specified USSD string, or \
sends the specified string in the existing USSD session (if any)
:param ussdString: The USSD access number to dial
:param responseTimeout: Maximum time to wait a response, in seconds
:raise TimeoutException: if no response is received in time
:return: The USSD response message/session (as a Ussd object)
:rtype: gsmmodem.modem.Ussd
"""
self._ussdSessionEvent = threading.Event()
try:
cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK"
except Exception:
self._ussdSessionEvent = None # Cancel the thread sync lock
raise
# Some modems issue the +CUSD response before the acknowledgment "OK" - check for that
if len(cusdResponse) > 1:
cusdResponseFound = lineStartingWith('+CUSD', cusdResponse) != None
if cusdResponseFound:
self._ussdSessionEvent = None # Cancel thread sync lock
return self._parseCusdResponse(cusdResponse)
# Wait for the +CUSD notification message
if self._ussdSessionEvent.wait(responseTimeout):
self._ussdSessionEvent = None
return self._ussdResponse
else: # Response timed out
self._ussdSessionEvent = None
raise TimeoutException() | python | def sendUssd(self, ussdString, responseTimeout=15):
""" Starts a USSD session by dialing the the specified USSD string, or \
sends the specified string in the existing USSD session (if any)
:param ussdString: The USSD access number to dial
:param responseTimeout: Maximum time to wait a response, in seconds
:raise TimeoutException: if no response is received in time
:return: The USSD response message/session (as a Ussd object)
:rtype: gsmmodem.modem.Ussd
"""
self._ussdSessionEvent = threading.Event()
try:
cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK"
except Exception:
self._ussdSessionEvent = None # Cancel the thread sync lock
raise
# Some modems issue the +CUSD response before the acknowledgment "OK" - check for that
if len(cusdResponse) > 1:
cusdResponseFound = lineStartingWith('+CUSD', cusdResponse) != None
if cusdResponseFound:
self._ussdSessionEvent = None # Cancel thread sync lock
return self._parseCusdResponse(cusdResponse)
# Wait for the +CUSD notification message
if self._ussdSessionEvent.wait(responseTimeout):
self._ussdSessionEvent = None
return self._ussdResponse
else: # Response timed out
self._ussdSessionEvent = None
raise TimeoutException() | ['def', 'sendUssd', '(', 'self', ',', 'ussdString', ',', 'responseTimeout', '=', '15', ')', ':', 'self', '.', '_ussdSessionEvent', '=', 'threading', '.', 'Event', '(', ')', 'try', ':', 'cusdResponse', '=', 'self', '.', 'write', '(', '\'AT+CUSD=1,"{0}",15\'', '.', 'format', '(', 'ussdString', ')', ',', 'timeout', '=', 'responseTimeout', ')', '# Should respond with "OK"', 'except', 'Exception', ':', 'self', '.', '_ussdSessionEvent', '=', 'None', '# Cancel the thread sync lock', 'raise', '# Some modems issue the +CUSD response before the acknowledgment "OK" - check for that', 'if', 'len', '(', 'cusdResponse', ')', '>', '1', ':', 'cusdResponseFound', '=', 'lineStartingWith', '(', "'+CUSD'", ',', 'cusdResponse', ')', '!=', 'None', 'if', 'cusdResponseFound', ':', 'self', '.', '_ussdSessionEvent', '=', 'None', '# Cancel thread sync lock', 'return', 'self', '.', '_parseCusdResponse', '(', 'cusdResponse', ')', '# Wait for the +CUSD notification message', 'if', 'self', '.', '_ussdSessionEvent', '.', 'wait', '(', 'responseTimeout', ')', ':', 'self', '.', '_ussdSessionEvent', '=', 'None', 'return', 'self', '.', '_ussdResponse', 'else', ':', '# Response timed out', 'self', '.', '_ussdSessionEvent', '=', 'None', 'raise', 'TimeoutException', '(', ')'] | Starts a USSD session by dialing the the specified USSD string, or \
sends the specified string in the existing USSD session (if any)
:param ussdString: The USSD access number to dial
:param responseTimeout: Maximum time to wait a response, in seconds
:raise TimeoutException: if no response is received in time
:return: The USSD response message/session (as a Ussd object)
:rtype: gsmmodem.modem.Ussd | ['Starts', 'a', 'USSD', 'session', 'by', 'dialing', 'the', 'the', 'specified', 'USSD', 'string', 'or', '\\', 'sends', 'the', 'specified', 'string', 'in', 'the', 'existing', 'USSD', 'session', '(', 'if', 'any', ')', ':', 'param', 'ussdString', ':', 'The', 'USSD', 'access', 'number', 'to', 'dial', ':', 'param', 'responseTimeout', ':', 'Maximum', 'time', 'to', 'wait', 'a', 'response', 'in', 'seconds', ':', 'raise', 'TimeoutException', ':', 'if', 'no', 'response', 'is', 'received', 'in', 'time', ':', 'return', ':', 'The', 'USSD', 'response', 'message', '/', 'session', '(', 'as', 'a', 'Ussd', 'object', ')', ':', 'rtype', ':', 'gsmmodem', '.', 'modem', '.', 'Ussd'] | train | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L668-L699 |
4,537 | saltstack/salt | salt/modules/mac_desktop.py | screensaver | def screensaver():
'''
Launch the screensaver.
CLI Example:
.. code-block:: bash
salt '*' desktop.screensaver
'''
cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app'
call = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
python_shell=False
)
_check_cmd(call)
return True | python | def screensaver():
'''
Launch the screensaver.
CLI Example:
.. code-block:: bash
salt '*' desktop.screensaver
'''
cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app'
call = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
python_shell=False
)
_check_cmd(call)
return True | ['def', 'screensaver', '(', ')', ':', 'cmd', '=', "'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app'", 'call', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', 'cmd', ',', 'output_loglevel', '=', "'debug'", ',', 'python_shell', '=', 'False', ')', '_check_cmd', '(', 'call', ')', 'return', 'True'] | Launch the screensaver.
CLI Example:
.. code-block:: bash
salt '*' desktop.screensaver | ['Launch', 'the', 'screensaver', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L69-L87 |
4,538 | gwastro/pycbc | pycbc/inference/models/marginalized_gaussian_noise.py | MarginalizedGaussianNoise._margtimephase_loglr | def _margtimephase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and phase.
"""
return special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat) - 0.5*opt_snr | python | def _margtimephase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and phase.
"""
return special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat) - 0.5*opt_snr | ['def', '_margtimephase_loglr', '(', 'self', ',', 'mf_snr', ',', 'opt_snr', ')', ':', 'return', 'special', '.', 'logsumexp', '(', 'numpy', '.', 'log', '(', 'special', '.', 'i0', '(', 'mf_snr', ')', ')', ',', 'b', '=', 'self', '.', '_deltat', ')', '-', '0.5', '*', 'opt_snr'] | Returns the log likelihood ratio marginalized over time and phase. | ['Returns', 'the', 'log', 'likelihood', 'ratio', 'marginalized', 'over', 'time', 'and', 'phase', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/marginalized_gaussian_noise.py#L549-L553 |
4,539 | go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/httpbakery/_client.py | _prepare_discharge_hook | def _prepare_discharge_hook(req, client):
''' Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning.
'''
class Retry:
# Define a local class so that we can use its class variable as
# mutable state accessed by the closures below.
count = 0
def hook(response, *args, **kwargs):
''' Requests hooks system, this is the hook for the response.
'''
status_code = response.status_code
if status_code != 407 and status_code != 401:
return response
if (status_code == 401 and response.headers.get('WWW-Authenticate') !=
'Macaroon'):
return response
if response.headers.get('Content-Type') != 'application/json':
return response
errorJSON = response.json()
if errorJSON.get('Code') != ERR_DISCHARGE_REQUIRED:
return response
error = Error.from_dict(errorJSON)
Retry.count += 1
if Retry.count >= MAX_DISCHARGE_RETRIES:
raise BakeryException('too many ({}) discharge requests'.format(
Retry.count)
)
client.handle_error(error, req.url)
req.headers.pop('Cookie', None)
req.prepare_cookies(client.cookies)
req.headers[BAKERY_PROTOCOL_HEADER] = \
str(bakery.LATEST_VERSION)
with requests.Session() as s:
return s.send(req)
return hook | python | def _prepare_discharge_hook(req, client):
''' Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning.
'''
class Retry:
# Define a local class so that we can use its class variable as
# mutable state accessed by the closures below.
count = 0
def hook(response, *args, **kwargs):
''' Requests hooks system, this is the hook for the response.
'''
status_code = response.status_code
if status_code != 407 and status_code != 401:
return response
if (status_code == 401 and response.headers.get('WWW-Authenticate') !=
'Macaroon'):
return response
if response.headers.get('Content-Type') != 'application/json':
return response
errorJSON = response.json()
if errorJSON.get('Code') != ERR_DISCHARGE_REQUIRED:
return response
error = Error.from_dict(errorJSON)
Retry.count += 1
if Retry.count >= MAX_DISCHARGE_RETRIES:
raise BakeryException('too many ({}) discharge requests'.format(
Retry.count)
)
client.handle_error(error, req.url)
req.headers.pop('Cookie', None)
req.prepare_cookies(client.cookies)
req.headers[BAKERY_PROTOCOL_HEADER] = \
str(bakery.LATEST_VERSION)
with requests.Session() as s:
return s.send(req)
return hook | ['def', '_prepare_discharge_hook', '(', 'req', ',', 'client', ')', ':', 'class', 'Retry', ':', '# Define a local class so that we can use its class variable as', '# mutable state accessed by the closures below.', 'count', '=', '0', 'def', 'hook', '(', 'response', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', "''' Requests hooks system, this is the hook for the response.\n '''", 'status_code', '=', 'response', '.', 'status_code', 'if', 'status_code', '!=', '407', 'and', 'status_code', '!=', '401', ':', 'return', 'response', 'if', '(', 'status_code', '==', '401', 'and', 'response', '.', 'headers', '.', 'get', '(', "'WWW-Authenticate'", ')', '!=', "'Macaroon'", ')', ':', 'return', 'response', 'if', 'response', '.', 'headers', '.', 'get', '(', "'Content-Type'", ')', '!=', "'application/json'", ':', 'return', 'response', 'errorJSON', '=', 'response', '.', 'json', '(', ')', 'if', 'errorJSON', '.', 'get', '(', "'Code'", ')', '!=', 'ERR_DISCHARGE_REQUIRED', ':', 'return', 'response', 'error', '=', 'Error', '.', 'from_dict', '(', 'errorJSON', ')', 'Retry', '.', 'count', '+=', '1', 'if', 'Retry', '.', 'count', '>=', 'MAX_DISCHARGE_RETRIES', ':', 'raise', 'BakeryException', '(', "'too many ({}) discharge requests'", '.', 'format', '(', 'Retry', '.', 'count', ')', ')', 'client', '.', 'handle_error', '(', 'error', ',', 'req', '.', 'url', ')', 'req', '.', 'headers', '.', 'pop', '(', "'Cookie'", ',', 'None', ')', 'req', '.', 'prepare_cookies', '(', 'client', '.', 'cookies', ')', 'req', '.', 'headers', '[', 'BAKERY_PROTOCOL_HEADER', ']', '=', 'str', '(', 'bakery', '.', 'LATEST_VERSION', ')', 'with', 'requests', '.', 'Session', '(', ')', 'as', 's', ':', 'return', 's', '.', 'send', '(', 'req', ')', 'return', 'hook'] | Return the hook function (called when the response is received.)
This allows us to intercept the response and do any necessary
macaroon discharge before returning. | ['Return', 'the', 'hook', 'function', '(', 'called', 'when', 'the', 'response', 'is', 'received', '.', ')'] | train | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/_client.py#L259-L299 |
4,540 | saltstack/salt | salt/modules/rabbitmq.py | _safe_output | def _safe_output(line):
'''
Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output.
'''
return not any([
line.startswith('Listing') and line.endswith('...'),
line.startswith('Listing') and '\t' not in line,
'...done' in line,
line.startswith('WARNING:')
]) | python | def _safe_output(line):
'''
Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output.
'''
return not any([
line.startswith('Listing') and line.endswith('...'),
line.startswith('Listing') and '\t' not in line,
'...done' in line,
line.startswith('WARNING:')
]) | ['def', '_safe_output', '(', 'line', ')', ':', 'return', 'not', 'any', '(', '[', 'line', '.', 'startswith', '(', "'Listing'", ')', 'and', 'line', '.', 'endswith', '(', "'...'", ')', ',', 'line', '.', 'startswith', '(', "'Listing'", ')', 'and', "'\\t'", 'not', 'in', 'line', ',', "'...done'", 'in', 'line', ',', 'line', '.', 'startswith', '(', "'WARNING:'", ')', ']', ')'] | Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output. | ['Looks', 'for', 'rabbitmqctl', 'warning', 'or', 'general', 'formatting', 'strings', 'that', 'aren', 't', 'intended', 'to', 'be', 'parsed', 'as', 'output', '.', 'Returns', 'a', 'boolean', 'whether', 'the', 'line', 'can', 'be', 'parsed', 'as', 'rabbitmqctl', 'output', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L132-L143 |
4,541 | theislab/scanpy | scanpy/tools/_sim.py | GRNsim.set_coupl_old | def set_coupl_old(self):
""" Using the adjacency matrix, sample a coupling matrix.
"""
if self.model == 'krumsiek11' or self.model == 'var':
# we already built the coupling matrix in set_coupl20()
return
self.Coupl = np.zeros((self.dim,self.dim))
for i in range(self.Adj.shape[0]):
for j,a in enumerate(self.Adj[i]):
# if there is a 1 in Adj, specify co and antiregulation
# and strength of regulation
if a != 0:
co_anti = np.random.randint(2)
# set a lower bound for the coupling parameters
# they ought not to be smaller than 0.1
# and not be larger than 0.4
self.Coupl[i,j] = 0.0*np.random.rand() + 0.1
# set sign for coupling
if co_anti == 1:
self.Coupl[i,j] *= -1
# enforce certain requirements on models
if self.model == 1:
self.coupl_model1()
elif self.model == 5:
self.coupl_model5()
elif self.model in [6,7]:
self.coupl_model6()
elif self.model in [8,9,10]:
self.coupl_model8()
# output
if self.verbosity > 1:
settings.m(0,self.Coupl) | python | def set_coupl_old(self):
""" Using the adjacency matrix, sample a coupling matrix.
"""
if self.model == 'krumsiek11' or self.model == 'var':
# we already built the coupling matrix in set_coupl20()
return
self.Coupl = np.zeros((self.dim,self.dim))
for i in range(self.Adj.shape[0]):
for j,a in enumerate(self.Adj[i]):
# if there is a 1 in Adj, specify co and antiregulation
# and strength of regulation
if a != 0:
co_anti = np.random.randint(2)
# set a lower bound for the coupling parameters
# they ought not to be smaller than 0.1
# and not be larger than 0.4
self.Coupl[i,j] = 0.0*np.random.rand() + 0.1
# set sign for coupling
if co_anti == 1:
self.Coupl[i,j] *= -1
# enforce certain requirements on models
if self.model == 1:
self.coupl_model1()
elif self.model == 5:
self.coupl_model5()
elif self.model in [6,7]:
self.coupl_model6()
elif self.model in [8,9,10]:
self.coupl_model8()
# output
if self.verbosity > 1:
settings.m(0,self.Coupl) | ['def', 'set_coupl_old', '(', 'self', ')', ':', 'if', 'self', '.', 'model', '==', "'krumsiek11'", 'or', 'self', '.', 'model', '==', "'var'", ':', '# we already built the coupling matrix in set_coupl20()', 'return', 'self', '.', 'Coupl', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'dim', ',', 'self', '.', 'dim', ')', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'Adj', '.', 'shape', '[', '0', ']', ')', ':', 'for', 'j', ',', 'a', 'in', 'enumerate', '(', 'self', '.', 'Adj', '[', 'i', ']', ')', ':', '# if there is a 1 in Adj, specify co and antiregulation', '# and strength of regulation', 'if', 'a', '!=', '0', ':', 'co_anti', '=', 'np', '.', 'random', '.', 'randint', '(', '2', ')', '# set a lower bound for the coupling parameters', '# they ought not to be smaller than 0.1', '# and not be larger than 0.4', 'self', '.', 'Coupl', '[', 'i', ',', 'j', ']', '=', '0.0', '*', 'np', '.', 'random', '.', 'rand', '(', ')', '+', '0.1', '# set sign for coupling', 'if', 'co_anti', '==', '1', ':', 'self', '.', 'Coupl', '[', 'i', ',', 'j', ']', '*=', '-', '1', '# enforce certain requirements on models', 'if', 'self', '.', 'model', '==', '1', ':', 'self', '.', 'coupl_model1', '(', ')', 'elif', 'self', '.', 'model', '==', '5', ':', 'self', '.', 'coupl_model5', '(', ')', 'elif', 'self', '.', 'model', 'in', '[', '6', ',', '7', ']', ':', 'self', '.', 'coupl_model6', '(', ')', 'elif', 'self', '.', 'model', 'in', '[', '8', ',', '9', ',', '10', ']', ':', 'self', '.', 'coupl_model8', '(', ')', '# output', 'if', 'self', '.', 'verbosity', '>', '1', ':', 'settings', '.', 'm', '(', '0', ',', 'self', '.', 'Coupl', ')'] | Using the adjacency matrix, sample a coupling matrix. | ['Using', 'the', 'adjacency', 'matrix', 'sample', 'a', 'coupling', 'matrix', '.'] | train | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_sim.py#L619-L650 |
4,542 | internetarchive/doublethink | doublethink/orm.py | Document.table_ensure | def table_ensure(cls, rr):
'''
Creates the table if it doesn't exist.
'''
dbs = rr.db_list().run()
if not rr.dbname in dbs:
logging.info('creating rethinkdb database %s', repr(rr.dbname))
rr.db_create(rr.dbname).run()
tables = rr.table_list().run()
if not cls.table in tables:
logging.info(
'creating rethinkdb table %s in database %s',
repr(cls.table), repr(rr.dbname))
cls.table_create(rr) | python | def table_ensure(cls, rr):
'''
Creates the table if it doesn't exist.
'''
dbs = rr.db_list().run()
if not rr.dbname in dbs:
logging.info('creating rethinkdb database %s', repr(rr.dbname))
rr.db_create(rr.dbname).run()
tables = rr.table_list().run()
if not cls.table in tables:
logging.info(
'creating rethinkdb table %s in database %s',
repr(cls.table), repr(rr.dbname))
cls.table_create(rr) | ['def', 'table_ensure', '(', 'cls', ',', 'rr', ')', ':', 'dbs', '=', 'rr', '.', 'db_list', '(', ')', '.', 'run', '(', ')', 'if', 'not', 'rr', '.', 'dbname', 'in', 'dbs', ':', 'logging', '.', 'info', '(', "'creating rethinkdb database %s'", ',', 'repr', '(', 'rr', '.', 'dbname', ')', ')', 'rr', '.', 'db_create', '(', 'rr', '.', 'dbname', ')', '.', 'run', '(', ')', 'tables', '=', 'rr', '.', 'table_list', '(', ')', '.', 'run', '(', ')', 'if', 'not', 'cls', '.', 'table', 'in', 'tables', ':', 'logging', '.', 'info', '(', "'creating rethinkdb table %s in database %s'", ',', 'repr', '(', 'cls', '.', 'table', ')', ',', 'repr', '(', 'rr', '.', 'dbname', ')', ')', 'cls', '.', 'table_create', '(', 'rr', ')'] | Creates the table if it doesn't exist. | ['Creates', 'the', 'table', 'if', 'it', 'doesn', 't', 'exist', '.'] | train | https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/orm.py#L182-L195 |
4,543 | project-ncl/pnc-cli | pnc_cli/projects.py | get_project | def get_project(id=None, name=None):
"""
Get a specific Project by ID or name
"""
content = get_project_raw(id, name)
if content:
return utils.format_json(content) | python | def get_project(id=None, name=None):
"""
Get a specific Project by ID or name
"""
content = get_project_raw(id, name)
if content:
return utils.format_json(content) | ['def', 'get_project', '(', 'id', '=', 'None', ',', 'name', '=', 'None', ')', ':', 'content', '=', 'get_project_raw', '(', 'id', ',', 'name', ')', 'if', 'content', ':', 'return', 'utils', '.', 'format_json', '(', 'content', ')'] | Get a specific Project by ID or name | ['Get', 'a', 'specific', 'Project', 'by', 'ID', 'or', 'name'] | train | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/projects.py#L69-L75 |
4,544 | google/grr | grr/server/grr_response_server/databases/mysql_users.py | MySQLDBUsersMixin.ReadGRRUser | def ReadGRRUser(self, username, cursor=None):
"""Reads a user object corresponding to a given name."""
cursor.execute(
"SELECT username, password, ui_mode, canary_mode, user_type "
"FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)])
row = cursor.fetchone()
if row is None:
raise db.UnknownGRRUserError(username)
return self._RowToGRRUser(row) | python | def ReadGRRUser(self, username, cursor=None):
"""Reads a user object corresponding to a given name."""
cursor.execute(
"SELECT username, password, ui_mode, canary_mode, user_type "
"FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)])
row = cursor.fetchone()
if row is None:
raise db.UnknownGRRUserError(username)
return self._RowToGRRUser(row) | ['def', 'ReadGRRUser', '(', 'self', ',', 'username', ',', 'cursor', '=', 'None', ')', ':', 'cursor', '.', 'execute', '(', '"SELECT username, password, ui_mode, canary_mode, user_type "', '"FROM grr_users WHERE username_hash = %s"', ',', '[', 'mysql_utils', '.', 'Hash', '(', 'username', ')', ']', ')', 'row', '=', 'cursor', '.', 'fetchone', '(', ')', 'if', 'row', 'is', 'None', ':', 'raise', 'db', '.', 'UnknownGRRUserError', '(', 'username', ')', 'return', 'self', '.', '_RowToGRRUser', '(', 'row', ')'] | Reads a user object corresponding to a given name. | ['Reads', 'a', 'user', 'object', 'corresponding', 'to', 'a', 'given', 'name', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_users.py#L106-L116 |
4,545 | fastai/fastai | fastai/vision/image.py | Image.refresh | def refresh(self)->None:
"Apply any logit, flow, or affine transfers that have been sent to the `Image`."
if self._logit_px is not None:
self._px = self._logit_px.sigmoid_()
self._logit_px = None
if self._affine_mat is not None or self._flow is not None:
self._px = _grid_sample(self._px, self.flow, **self.sample_kwargs)
self.sample_kwargs = {}
self._flow = None
return self | python | def refresh(self)->None:
"Apply any logit, flow, or affine transfers that have been sent to the `Image`."
if self._logit_px is not None:
self._px = self._logit_px.sigmoid_()
self._logit_px = None
if self._affine_mat is not None or self._flow is not None:
self._px = _grid_sample(self._px, self.flow, **self.sample_kwargs)
self.sample_kwargs = {}
self._flow = None
return self | ['def', 'refresh', '(', 'self', ')', '->', 'None', ':', 'if', 'self', '.', '_logit_px', 'is', 'not', 'None', ':', 'self', '.', '_px', '=', 'self', '.', '_logit_px', '.', 'sigmoid_', '(', ')', 'self', '.', '_logit_px', '=', 'None', 'if', 'self', '.', '_affine_mat', 'is', 'not', 'None', 'or', 'self', '.', '_flow', 'is', 'not', 'None', ':', 'self', '.', '_px', '=', '_grid_sample', '(', 'self', '.', '_px', ',', 'self', '.', 'flow', ',', '*', '*', 'self', '.', 'sample_kwargs', ')', 'self', '.', 'sample_kwargs', '=', '{', '}', 'self', '.', '_flow', '=', 'None', 'return', 'self'] | Apply any logit, flow, or affine transfers that have been sent to the `Image`. | ['Apply', 'any', 'logit', 'flow', 'or', 'affine', 'transfers', 'that', 'have', 'been', 'sent', 'to', 'the', 'Image', '.'] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L126-L135 |
4,546 | Alignak-monitoring/alignak | alignak/external_command.py | ExternalCommandManager.enable_host_svc_notifications | def enable_host_svc_notifications(self, host):
"""Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.enable_svc_notifications(service)
self.send_an_element(service.get_update_status_brok()) | python | def enable_host_svc_notifications(self, host):
"""Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.enable_svc_notifications(service)
self.send_an_element(service.get_update_status_brok()) | ['def', 'enable_host_svc_notifications', '(', 'self', ',', 'host', ')', ':', 'for', 'service_id', 'in', 'host', '.', 'services', ':', 'if', 'service_id', 'in', 'self', '.', 'daemon', '.', 'services', ':', 'service', '=', 'self', '.', 'daemon', '.', 'services', '[', 'service_id', ']', 'self', '.', 'enable_svc_notifications', '(', 'service', ')', 'self', '.', 'send_an_element', '(', 'service', '.', 'get_update_status_brok', '(', ')', ')'] | Enable services notifications for a host
Format of the line that triggers function call::
ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None | ['Enable', 'services', 'notifications', 'for', 'a', 'host', 'Format', 'of', 'the', 'line', 'that', 'triggers', 'function', 'call', '::'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2821-L2835 |
4,547 | thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_action_bound_constraints | def compile_action_bound_constraints(self,
state: Sequence[tf.Tensor]) -> Dict[str, Bounds]:
'''Compiles all actions bounds for the given `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from action names to a pair of
:obj:`rddl2tf.fluent.TensorFluent` representing
its lower and upper bounds.
'''
scope = self.action_precondition_scope(state)
lower_bounds = self.rddl.domain.action_lower_bound_constraints
upper_bounds = self.rddl.domain.action_upper_bound_constraints
with self.graph.as_default():
with tf.name_scope('action_bound_constraints'):
bounds = {}
for name in self.rddl.domain.action_fluent_ordering:
lower_expr = lower_bounds.get(name)
lower = None
if lower_expr is not None:
with tf.name_scope('lower_bound'):
lower = self._compile_expression(lower_expr, scope)
upper_expr = upper_bounds.get(name)
upper = None
if upper_expr is not None:
with tf.name_scope('upper_bound'):
upper = self._compile_expression(upper_expr, scope)
bounds[name] = (lower, upper)
return bounds | python | def compile_action_bound_constraints(self,
state: Sequence[tf.Tensor]) -> Dict[str, Bounds]:
'''Compiles all actions bounds for the given `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from action names to a pair of
:obj:`rddl2tf.fluent.TensorFluent` representing
its lower and upper bounds.
'''
scope = self.action_precondition_scope(state)
lower_bounds = self.rddl.domain.action_lower_bound_constraints
upper_bounds = self.rddl.domain.action_upper_bound_constraints
with self.graph.as_default():
with tf.name_scope('action_bound_constraints'):
bounds = {}
for name in self.rddl.domain.action_fluent_ordering:
lower_expr = lower_bounds.get(name)
lower = None
if lower_expr is not None:
with tf.name_scope('lower_bound'):
lower = self._compile_expression(lower_expr, scope)
upper_expr = upper_bounds.get(name)
upper = None
if upper_expr is not None:
with tf.name_scope('upper_bound'):
upper = self._compile_expression(upper_expr, scope)
bounds[name] = (lower, upper)
return bounds | ['def', 'compile_action_bound_constraints', '(', 'self', ',', 'state', ':', 'Sequence', '[', 'tf', '.', 'Tensor', ']', ')', '->', 'Dict', '[', 'str', ',', 'Bounds', ']', ':', 'scope', '=', 'self', '.', 'action_precondition_scope', '(', 'state', ')', 'lower_bounds', '=', 'self', '.', 'rddl', '.', 'domain', '.', 'action_lower_bound_constraints', 'upper_bounds', '=', 'self', '.', 'rddl', '.', 'domain', '.', 'action_upper_bound_constraints', 'with', 'self', '.', 'graph', '.', 'as_default', '(', ')', ':', 'with', 'tf', '.', 'name_scope', '(', "'action_bound_constraints'", ')', ':', 'bounds', '=', '{', '}', 'for', 'name', 'in', 'self', '.', 'rddl', '.', 'domain', '.', 'action_fluent_ordering', ':', 'lower_expr', '=', 'lower_bounds', '.', 'get', '(', 'name', ')', 'lower', '=', 'None', 'if', 'lower_expr', 'is', 'not', 'None', ':', 'with', 'tf', '.', 'name_scope', '(', "'lower_bound'", ')', ':', 'lower', '=', 'self', '.', '_compile_expression', '(', 'lower_expr', ',', 'scope', ')', 'upper_expr', '=', 'upper_bounds', '.', 'get', '(', 'name', ')', 'upper', '=', 'None', 'if', 'upper_expr', 'is', 'not', 'None', ':', 'with', 'tf', '.', 'name_scope', '(', "'upper_bound'", ')', ':', 'upper', '=', 'self', '.', '_compile_expression', '(', 'upper_expr', ',', 'scope', ')', 'bounds', '[', 'name', ']', '=', '(', 'lower', ',', 'upper', ')', 'return', 'bounds'] | Compiles all actions bounds for the given `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from action names to a pair of
:obj:`rddl2tf.fluent.TensorFluent` representing
its lower and upper bounds. | ['Compiles', 'all', 'actions', 'bounds', 'for', 'the', 'given', 'state', '.'] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L340-L377 |
4,548 | MonashBI/arcana | arcana/utils/base.py | iscontainer | def iscontainer(*items):
"""
Checks whether all the provided items are containers (i.e of class list,
dict, tuple, etc...)
"""
return all(isinstance(i, Iterable) and not isinstance(i, basestring)
for i in items) | python | def iscontainer(*items):
"""
Checks whether all the provided items are containers (i.e of class list,
dict, tuple, etc...)
"""
return all(isinstance(i, Iterable) and not isinstance(i, basestring)
for i in items) | ['def', 'iscontainer', '(', '*', 'items', ')', ':', 'return', 'all', '(', 'isinstance', '(', 'i', ',', 'Iterable', ')', 'and', 'not', 'isinstance', '(', 'i', ',', 'basestring', ')', 'for', 'i', 'in', 'items', ')'] | Checks whether all the provided items are containers (i.e of class list,
dict, tuple, etc...) | ['Checks', 'whether', 'all', 'the', 'provided', 'items', 'are', 'containers', '(', 'i', '.', 'e', 'of', 'class', 'list', 'dict', 'tuple', 'etc', '...', ')'] | train | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L161-L167 |
4,549 | materialsproject/pymatgen | pymatgen/analysis/chemenv/coordination_environments/structure_environments.py | ChemicalEnvironments.minimum_geometries | def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None):
"""
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:param n: Number of geometries to be included in the list
:return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
isorted = np.argsort(csms)
if max_csm is not None:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm]
else:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]] | python | def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None):
"""
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:param n: Number of geometries to be included in the list
:return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
isorted = np.argsort(csms)
if max_csm is not None:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm]
else:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]] | ['def', 'minimum_geometries', '(', 'self', ',', 'n', '=', 'None', ',', 'symmetry_measure_type', '=', 'None', ',', 'max_csm', '=', 'None', ')', ':', 'cglist', '=', '[', 'cg', 'for', 'cg', 'in', 'self', '.', 'coord_geoms', ']', 'if', 'symmetry_measure_type', 'is', 'None', ':', 'csms', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'coord_geoms', '[', 'cg', ']', '[', "'other_symmetry_measures'", ']', '[', "'csm_wcs_ctwcc'", ']', 'for', 'cg', 'in', 'cglist', ']', ')', 'else', ':', 'csms', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'coord_geoms', '[', 'cg', ']', '[', "'other_symmetry_measures'", ']', '[', 'symmetry_measure_type', ']', 'for', 'cg', 'in', 'cglist', ']', ')', 'csmlist', '=', '[', 'self', '.', 'coord_geoms', '[', 'cg', ']', 'for', 'cg', 'in', 'cglist', ']', 'isorted', '=', 'np', '.', 'argsort', '(', 'csms', ')', 'if', 'max_csm', 'is', 'not', 'None', ':', 'if', 'n', 'is', 'None', ':', 'return', '[', '(', 'cglist', '[', 'ii', ']', ',', 'csmlist', '[', 'ii', ']', ')', 'for', 'ii', 'in', 'isorted', 'if', 'csms', '[', 'ii', ']', '<=', 'max_csm', ']', 'else', ':', 'return', '[', '(', 'cglist', '[', 'ii', ']', ',', 'csmlist', '[', 'ii', ']', ')', 'for', 'ii', 'in', 'isorted', '[', ':', 'n', ']', 'if', 'csms', '[', 'ii', ']', '<=', 'max_csm', ']', 'else', ':', 'if', 'n', 'is', 'None', ':', 'return', '[', '(', 'cglist', '[', 'ii', ']', ',', 'csmlist', '[', 'ii', ']', ')', 'for', 'ii', 'in', 'isorted', ']', 'else', ':', 'return', '[', '(', 'cglist', '[', 'ii', ']', ',', 'csmlist', '[', 'ii', ']', ')', 'for', 'ii', 'in', 'isorted', '[', ':', 'n', ']', ']'] | Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:param n: Number of geometries to be included in the list
:return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object | ['Returns', 'a', 'list', 'of', 'geometries', 'with', 'increasing', 'continuous', 'symmetry', 'measure', 'in', 'this', 'ChemicalEnvironments', 'object', ':', 'param', 'n', ':', 'Number', 'of', 'geometries', 'to', 'be', 'included', 'in', 'the', 'list', ':', 'return', ':', 'list', 'of', 'geometries', 'with', 'increasing', 'continuous', 'symmetry', 'measure', 'in', 'this', 'ChemicalEnvironments', 'object', ':', 'raise', ':', 'ValueError', 'if', 'no', 'coordination', 'geometry', 'is', 'found', 'in', 'this', 'ChemicalEnvironments', 'object'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/structure_environments.py#L1571-L1594 |
4,550 | horejsek/python-webdriverwrapper | webdriverwrapper/wrapper.py | _WebdriverWrapper.close_window | def close_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements only closing current window. If you want to close
some window without having to switch to it, use this method.
"""
main_window_handle = self.current_window_handle
self.switch_to_window(window_name, title, url)
self.close()
self.switch_to_window(main_window_handle) | python | def close_window(self, window_name=None, title=None, url=None):
"""
WebDriver implements only closing current window. If you want to close
some window without having to switch to it, use this method.
"""
main_window_handle = self.current_window_handle
self.switch_to_window(window_name, title, url)
self.close()
self.switch_to_window(main_window_handle) | ['def', 'close_window', '(', 'self', ',', 'window_name', '=', 'None', ',', 'title', '=', 'None', ',', 'url', '=', 'None', ')', ':', 'main_window_handle', '=', 'self', '.', 'current_window_handle', 'self', '.', 'switch_to_window', '(', 'window_name', ',', 'title', ',', 'url', ')', 'self', '.', 'close', '(', ')', 'self', '.', 'switch_to_window', '(', 'main_window_handle', ')'] | WebDriver implements only closing current window. If you want to close
some window without having to switch to it, use this method. | ['WebDriver', 'implements', 'only', 'closing', 'current', 'window', '.', 'If', 'you', 'want', 'to', 'close', 'some', 'window', 'without', 'having', 'to', 'switch', 'to', 'it', 'use', 'this', 'method', '.'] | train | https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/wrapper.py#L516-L524 |
4,551 | teepark/greenhouse | greenhouse/backdoor.py | backdoor_handler | def backdoor_handler(clientsock, namespace=None):
"""start an interactive python interpreter on an existing connection
.. note::
this function will block for as long as the connection remains alive.
:param sock: the socket on which to serve the interpreter
:type sock: :class:`Socket<greenhouse.io.sockets.Socket>`
:param namespace:
the local namespace dict for the interpreter, or None to have the
function create its own empty namespace
:type namespace: dict or None
"""
namespace = {} if namespace is None else namespace.copy()
console = code.InteractiveConsole(namespace)
multiline_statement = []
stdout, stderr = StringIO(), StringIO()
clientsock.sendall(PREAMBLE + "\n" + PS1)
for input_line in _produce_lines(clientsock):
input_line = input_line.rstrip()
if input_line:
input_line = '\n' + input_line
source = '\n'.join(multiline_statement) + input_line
response = ''
with _wrap_stdio(stdout, stderr):
result = console.runsource(source)
response += stdout.getvalue()
err = stderr.getvalue()
if err:
response += err
if err or not result:
multiline_statement = []
response += PS1
else:
multiline_statement.append(input_line)
response += PS2
clientsock.sendall(response) | python | def backdoor_handler(clientsock, namespace=None):
"""start an interactive python interpreter on an existing connection
.. note::
this function will block for as long as the connection remains alive.
:param sock: the socket on which to serve the interpreter
:type sock: :class:`Socket<greenhouse.io.sockets.Socket>`
:param namespace:
the local namespace dict for the interpreter, or None to have the
function create its own empty namespace
:type namespace: dict or None
"""
namespace = {} if namespace is None else namespace.copy()
console = code.InteractiveConsole(namespace)
multiline_statement = []
stdout, stderr = StringIO(), StringIO()
clientsock.sendall(PREAMBLE + "\n" + PS1)
for input_line in _produce_lines(clientsock):
input_line = input_line.rstrip()
if input_line:
input_line = '\n' + input_line
source = '\n'.join(multiline_statement) + input_line
response = ''
with _wrap_stdio(stdout, stderr):
result = console.runsource(source)
response += stdout.getvalue()
err = stderr.getvalue()
if err:
response += err
if err or not result:
multiline_statement = []
response += PS1
else:
multiline_statement.append(input_line)
response += PS2
clientsock.sendall(response) | ['def', 'backdoor_handler', '(', 'clientsock', ',', 'namespace', '=', 'None', ')', ':', 'namespace', '=', '{', '}', 'if', 'namespace', 'is', 'None', 'else', 'namespace', '.', 'copy', '(', ')', 'console', '=', 'code', '.', 'InteractiveConsole', '(', 'namespace', ')', 'multiline_statement', '=', '[', ']', 'stdout', ',', 'stderr', '=', 'StringIO', '(', ')', ',', 'StringIO', '(', ')', 'clientsock', '.', 'sendall', '(', 'PREAMBLE', '+', '"\\n"', '+', 'PS1', ')', 'for', 'input_line', 'in', '_produce_lines', '(', 'clientsock', ')', ':', 'input_line', '=', 'input_line', '.', 'rstrip', '(', ')', 'if', 'input_line', ':', 'input_line', '=', "'\\n'", '+', 'input_line', 'source', '=', "'\\n'", '.', 'join', '(', 'multiline_statement', ')', '+', 'input_line', 'response', '=', "''", 'with', '_wrap_stdio', '(', 'stdout', ',', 'stderr', ')', ':', 'result', '=', 'console', '.', 'runsource', '(', 'source', ')', 'response', '+=', 'stdout', '.', 'getvalue', '(', ')', 'err', '=', 'stderr', '.', 'getvalue', '(', ')', 'if', 'err', ':', 'response', '+=', 'err', 'if', 'err', 'or', 'not', 'result', ':', 'multiline_statement', '=', '[', ']', 'response', '+=', 'PS1', 'else', ':', 'multiline_statement', '.', 'append', '(', 'input_line', ')', 'response', '+=', 'PS2', 'clientsock', '.', 'sendall', '(', 'response', ')'] | start an interactive python interpreter on an existing connection
.. note::
this function will block for as long as the connection remains alive.
:param sock: the socket on which to serve the interpreter
:type sock: :class:`Socket<greenhouse.io.sockets.Socket>`
:param namespace:
the local namespace dict for the interpreter, or None to have the
function create its own empty namespace
:type namespace: dict or None | ['start', 'an', 'interactive', 'python', 'interpreter', 'on', 'an', 'existing', 'connection'] | train | https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/backdoor.py#L70-L112 |
4,552 | linuxsoftware/ls.joyous | ls/joyous/models/events.py | EventBase._removeContentPanels | def _removeContentPanels(cls, remove):
"""
Remove the panels and so hide the fields named.
"""
if type(remove) is str:
remove = [remove]
cls.content_panels = [panel for panel in cls.content_panels
if getattr(panel, "field_name", None) not in remove] | python | def _removeContentPanels(cls, remove):
"""
Remove the panels and so hide the fields named.
"""
if type(remove) is str:
remove = [remove]
cls.content_panels = [panel for panel in cls.content_panels
if getattr(panel, "field_name", None) not in remove] | ['def', '_removeContentPanels', '(', 'cls', ',', 'remove', ')', ':', 'if', 'type', '(', 'remove', ')', 'is', 'str', ':', 'remove', '=', '[', 'remove', ']', 'cls', '.', 'content_panels', '=', '[', 'panel', 'for', 'panel', 'in', 'cls', '.', 'content_panels', 'if', 'getattr', '(', 'panel', ',', '"field_name"', ',', 'None', ')', 'not', 'in', 'remove', ']'] | Remove the panels and so hide the fields named. | ['Remove', 'the', 'panels', 'and', 'so', 'hide', 'the', 'fields', 'named', '.'] | train | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L553-L560 |
4,553 | saltstack/salt | salt/modules/boto_ec2.py | disassociate_eip_address | def disassociate_eip_address(public_ip=None, association_id=None, region=None,
key=None, keyid=None, profile=None):
'''
Disassociate an Elastic IP address from a currently running instance. This
requires exactly one of either 'association_id' or 'public_ip', depending
on whether you’re dealing with a VPC or EC2 Classic address.
public_ip
(string) – Public IP address, for EC2 Classic allocations.
association_id
(string) – Association ID for a VPC-bound EIP.
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16
.. versionadded:: 2016.3.0
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.disassociate_address(public_ip, association_id)
except boto.exception.BotoServerError as e:
log.error(e)
return False | python | def disassociate_eip_address(public_ip=None, association_id=None, region=None,
key=None, keyid=None, profile=None):
'''
Disassociate an Elastic IP address from a currently running instance. This
requires exactly one of either 'association_id' or 'public_ip', depending
on whether you’re dealing with a VPC or EC2 Classic address.
public_ip
(string) – Public IP address, for EC2 Classic allocations.
association_id
(string) – Association ID for a VPC-bound EIP.
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16
.. versionadded:: 2016.3.0
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.disassociate_address(public_ip, association_id)
except boto.exception.BotoServerError as e:
log.error(e)
return False | ['def', 'disassociate_eip_address', '(', 'public_ip', '=', 'None', ',', 'association_id', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'try', ':', 'return', 'conn', '.', 'disassociate_address', '(', 'public_ip', ',', 'association_id', ')', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'log', '.', 'error', '(', 'e', ')', 'return', 'False'] | Disassociate an Elastic IP address from a currently running instance. This
requires exactly one of either 'association_id' or 'public_ip', depending
on whether you’re dealing with a VPC or EC2 Classic address.
public_ip
(string) – Public IP address, for EC2 Classic allocations.
association_id
(string) – Association ID for a VPC-bound EIP.
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16
.. versionadded:: 2016.3.0 | ['Disassociate', 'an', 'Elastic', 'IP', 'address', 'from', 'a', 'currently', 'running', 'instance', '.', 'This', 'requires', 'exactly', 'one', 'of', 'either', 'association_id', 'or', 'public_ip', 'depending', 'on', 'whether', 'you’re', 'dealing', 'with', 'a', 'VPC', 'or', 'EC2', 'Classic', 'address', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L396-L425 |
4,554 | sosreport/sos | sos/plugins/__init__.py | SCLPlugin.add_copy_spec_scl | def add_copy_spec_scl(self, scl, copyspecs):
"""Same as add_copy_spec, except that it prepends path to SCL root
to "copyspecs".
"""
if isinstance(copyspecs, six.string_types):
copyspecs = [copyspecs]
scl_copyspecs = []
for copyspec in copyspecs:
scl_copyspecs.append(self.convert_copyspec_scl(scl, copyspec))
self.add_copy_spec(scl_copyspecs) | python | def add_copy_spec_scl(self, scl, copyspecs):
"""Same as add_copy_spec, except that it prepends path to SCL root
to "copyspecs".
"""
if isinstance(copyspecs, six.string_types):
copyspecs = [copyspecs]
scl_copyspecs = []
for copyspec in copyspecs:
scl_copyspecs.append(self.convert_copyspec_scl(scl, copyspec))
self.add_copy_spec(scl_copyspecs) | ['def', 'add_copy_spec_scl', '(', 'self', ',', 'scl', ',', 'copyspecs', ')', ':', 'if', 'isinstance', '(', 'copyspecs', ',', 'six', '.', 'string_types', ')', ':', 'copyspecs', '=', '[', 'copyspecs', ']', 'scl_copyspecs', '=', '[', ']', 'for', 'copyspec', 'in', 'copyspecs', ':', 'scl_copyspecs', '.', 'append', '(', 'self', '.', 'convert_copyspec_scl', '(', 'scl', ',', 'copyspec', ')', ')', 'self', '.', 'add_copy_spec', '(', 'scl_copyspecs', ')'] | Same as add_copy_spec, except that it prepends path to SCL root
to "copyspecs". | ['Same', 'as', 'add_copy_spec', 'except', 'that', 'it', 'prepends', 'path', 'to', 'SCL', 'root', 'to', 'copyspecs', '.'] | train | https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/__init__.py#L1492-L1501 |
4,555 | CamDavidsonPilon/lifelines | lifelines/fitters/__init__.py | ParametericUnivariateFitter.confidence_interval_hazard_ | def confidence_interval_hazard_(self):
"""
The confidence interval of the hazard.
"""
return self._compute_confidence_bounds_of_transform(self._hazard, self.alpha, self._ci_labels) | python | def confidence_interval_hazard_(self):
"""
The confidence interval of the hazard.
"""
return self._compute_confidence_bounds_of_transform(self._hazard, self.alpha, self._ci_labels) | ['def', 'confidence_interval_hazard_', '(', 'self', ')', ':', 'return', 'self', '.', '_compute_confidence_bounds_of_transform', '(', 'self', '.', '_hazard', ',', 'self', '.', 'alpha', ',', 'self', '.', '_ci_labels', ')'] | The confidence interval of the hazard. | ['The', 'confidence', 'interval', 'of', 'the', 'hazard', '.'] | train | https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/__init__.py#L1032-L1036 |
4,556 | vinci1it2000/schedula | schedula/utils/alg.py | remove_edge_fun | def remove_edge_fun(graph):
"""
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v) # Remove the edge.
if is_isolate(graph, v): # Check if v is isolate.
rm_node(v) # Remove the isolate out node.
return remove_edge | python | def remove_edge_fun(graph):
"""
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v) # Remove the edge.
if is_isolate(graph, v): # Check if v is isolate.
rm_node(v) # Remove the isolate out node.
return remove_edge | ['def', 'remove_edge_fun', '(', 'graph', ')', ':', '# Namespace shortcut for speed.', 'rm_edge', ',', 'rm_node', '=', 'graph', '.', 'remove_edge', ',', 'graph', '.', 'remove_node', 'from', 'networkx', 'import', 'is_isolate', 'def', 'remove_edge', '(', 'u', ',', 'v', ')', ':', 'rm_edge', '(', 'u', ',', 'v', ')', '# Remove the edge.', 'if', 'is_isolate', '(', 'graph', ',', 'v', ')', ':', '# Check if v is isolate.', 'rm_node', '(', 'v', ')', '# Remove the isolate out node.', 'return', 'remove_edge'] | Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable | ['Returns', 'a', 'function', 'that', 'removes', 'an', 'edge', 'from', 'the', 'graph', '.'] | train | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L48-L72 |
4,557 | mlperf/training | rnn_translator/pytorch/seq2seq/models/decoder.py | ResidualRecurrentDecoder.package_hidden | def package_hidden(self):
"""
Flattens the hidden state from all LSTM layers into one tensor (for
the sequence generator).
"""
if self.inference:
hidden = torch.cat(tuple(itertools.chain(*self.next_hidden)))
else:
hidden = None
return hidden | python | def package_hidden(self):
"""
Flattens the hidden state from all LSTM layers into one tensor (for
the sequence generator).
"""
if self.inference:
hidden = torch.cat(tuple(itertools.chain(*self.next_hidden)))
else:
hidden = None
return hidden | ['def', 'package_hidden', '(', 'self', ')', ':', 'if', 'self', '.', 'inference', ':', 'hidden', '=', 'torch', '.', 'cat', '(', 'tuple', '(', 'itertools', '.', 'chain', '(', '*', 'self', '.', 'next_hidden', ')', ')', ')', 'else', ':', 'hidden', '=', 'None', 'return', 'hidden'] | Flattens the hidden state from all LSTM layers into one tensor (for
the sequence generator). | ['Flattens', 'the', 'hidden', 'state', 'from', 'all', 'LSTM', 'layers', 'into', 'one', 'tensor', '(', 'for', 'the', 'sequence', 'generator', ')', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/models/decoder.py#L176-L185 |
4,558 | RomelTorres/alpha_vantage | alpha_vantage/alphavantage.py | AlphaVantage.map_to_matype | def map_to_matype(self, matype):
""" Convert to the alpha vantage math type integer. It returns an
integer correspondent to the type of math to apply to a function. It
raises ValueError if an integer greater than the supported math types
is given.
Keyword Arguments:
matype: The math type of the alpha vantage api. It accepts
integers or a string representing the math type.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
# Check if it is an integer or a string
try:
value = int(matype)
if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP):
raise ValueError("The value {} is not supported".format(value))
except ValueError:
value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype)
return value | python | def map_to_matype(self, matype):
""" Convert to the alpha vantage math type integer. It returns an
integer correspondent to the type of math to apply to a function. It
raises ValueError if an integer greater than the supported math types
is given.
Keyword Arguments:
matype: The math type of the alpha vantage api. It accepts
integers or a string representing the math type.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
# Check if it is an integer or a string
try:
value = int(matype)
if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP):
raise ValueError("The value {} is not supported".format(value))
except ValueError:
value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype)
return value | ['def', 'map_to_matype', '(', 'self', ',', 'matype', ')', ':', '# Check if it is an integer or a string', 'try', ':', 'value', '=', 'int', '(', 'matype', ')', 'if', 'abs', '(', 'value', ')', '>', 'len', '(', 'AlphaVantage', '.', '_ALPHA_VANTAGE_MATH_MAP', ')', ':', 'raise', 'ValueError', '(', '"The value {} is not supported"', '.', 'format', '(', 'value', ')', ')', 'except', 'ValueError', ':', 'value', '=', 'AlphaVantage', '.', '_ALPHA_VANTAGE_MATH_MAP', '.', 'index', '(', 'matype', ')', 'return', 'value'] | Convert to the alpha vantage math type integer. It returns an
integer correspondent to the type of math to apply to a function. It
raises ValueError if an integer greater than the supported math types
is given.
Keyword Arguments:
matype: The math type of the alpha vantage api. It accepts
integers or a string representing the math type.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA) | ['Convert', 'to', 'the', 'alpha', 'vantage', 'math', 'type', 'integer', '.', 'It', 'returns', 'an', 'integer', 'correspondent', 'to', 'the', 'type', 'of', 'math', 'to', 'apply', 'to', 'a', 'function', '.', 'It', 'raises', 'ValueError', 'if', 'an', 'integer', 'greater', 'than', 'the', 'supported', 'math', 'types', 'is', 'given', '.'] | train | https://github.com/RomelTorres/alpha_vantage/blob/4e0b5057e520e3e3de69cf947301765817290121/alpha_vantage/alphavantage.py#L214-L241 |
4,559 | zarr-developers/zarr | zarr/creation.py | open_like | def open_like(a, path, **kwargs):
"""Open a persistent array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return open_array(path, **kwargs) | python | def open_like(a, path, **kwargs):
"""Open a persistent array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return open_array(path, **kwargs) | ['def', 'open_like', '(', 'a', ',', 'path', ',', '*', '*', 'kwargs', ')', ':', '_like_args', '(', 'a', ',', 'kwargs', ')', 'if', 'isinstance', '(', 'a', ',', 'Array', ')', ':', 'kwargs', '.', 'setdefault', '(', "'fill_value'", ',', 'a', '.', 'fill_value', ')', 'return', 'open_array', '(', 'path', ',', '*', '*', 'kwargs', ')'] | Open a persistent array like `a`. | ['Open', 'a', 'persistent', 'array', 'like', 'a', '.'] | train | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L538-L543 |
4,560 | Godley/MuseParse | MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py | PartNode.toLily | def toLily(self):
'''
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
'''
self.CheckDivisions()
self.CheckTotals()
staves = self.GetChildrenIndexes()
name = ""
shortname = ""
if hasattr(self.item, "name"):
name = self.item.name
name = helpers.SplitString(name)
if hasattr(self.item, "shortname"):
shortname = helpers.SplitString(self.item.shortname)
variables = self.CalculateVariable(str(self.index), staves)
first_part = ""
for staff, variable in zip(staves, variables):
staffstring = variable
if hasattr(
self.GetChild(staff),
"tab") and self.GetChild(staff).tab:
staffstring += " = \\new TabStaff"
elif hasattr(self.GetChild(staff), "drum") and self.GetChild(staff).drum:
staffstring += " = \\drums"
else:
staffstring += " = \\new Staff"
if len(staves) == 1:
if name != "":
staffstring += " \with {\n"
staffstring += "instrumentName = " + name + " \n"
if shortname != "":
staffstring += "shortInstrumentName = " + \
shortname + " \n"
staffstring += " }"
staffstring += "{" + self.GetChild(staff).toLily() + " }\n\n"
first_part += staffstring
second_part = ""
if len(variables) > 1:
second_part += "\\new StaffGroup "
if name != "":
second_part += "\with {\n"
second_part += "instrumentName = " + name + " \n"
second_part += " }"
second_part += "<<"
second_part += "\n".join(["\\" + var for var in variables])
if len(variables) > 1:
second_part += ">>"
return [first_part, second_part] | python | def toLily(self):
'''
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
'''
self.CheckDivisions()
self.CheckTotals()
staves = self.GetChildrenIndexes()
name = ""
shortname = ""
if hasattr(self.item, "name"):
name = self.item.name
name = helpers.SplitString(name)
if hasattr(self.item, "shortname"):
shortname = helpers.SplitString(self.item.shortname)
variables = self.CalculateVariable(str(self.index), staves)
first_part = ""
for staff, variable in zip(staves, variables):
staffstring = variable
if hasattr(
self.GetChild(staff),
"tab") and self.GetChild(staff).tab:
staffstring += " = \\new TabStaff"
elif hasattr(self.GetChild(staff), "drum") and self.GetChild(staff).drum:
staffstring += " = \\drums"
else:
staffstring += " = \\new Staff"
if len(staves) == 1:
if name != "":
staffstring += " \with {\n"
staffstring += "instrumentName = " + name + " \n"
if shortname != "":
staffstring += "shortInstrumentName = " + \
shortname + " \n"
staffstring += " }"
staffstring += "{" + self.GetChild(staff).toLily() + " }\n\n"
first_part += staffstring
second_part = ""
if len(variables) > 1:
second_part += "\\new StaffGroup "
if name != "":
second_part += "\with {\n"
second_part += "instrumentName = " + name + " \n"
second_part += " }"
second_part += "<<"
second_part += "\n".join(["\\" + var for var in variables])
if len(variables) > 1:
second_part += ">>"
return [first_part, second_part] | ['def', 'toLily', '(', 'self', ')', ':', 'self', '.', 'CheckDivisions', '(', ')', 'self', '.', 'CheckTotals', '(', ')', 'staves', '=', 'self', '.', 'GetChildrenIndexes', '(', ')', 'name', '=', '""', 'shortname', '=', '""', 'if', 'hasattr', '(', 'self', '.', 'item', ',', '"name"', ')', ':', 'name', '=', 'self', '.', 'item', '.', 'name', 'name', '=', 'helpers', '.', 'SplitString', '(', 'name', ')', 'if', 'hasattr', '(', 'self', '.', 'item', ',', '"shortname"', ')', ':', 'shortname', '=', 'helpers', '.', 'SplitString', '(', 'self', '.', 'item', '.', 'shortname', ')', 'variables', '=', 'self', '.', 'CalculateVariable', '(', 'str', '(', 'self', '.', 'index', ')', ',', 'staves', ')', 'first_part', '=', '""', 'for', 'staff', ',', 'variable', 'in', 'zip', '(', 'staves', ',', 'variables', ')', ':', 'staffstring', '=', 'variable', 'if', 'hasattr', '(', 'self', '.', 'GetChild', '(', 'staff', ')', ',', '"tab"', ')', 'and', 'self', '.', 'GetChild', '(', 'staff', ')', '.', 'tab', ':', 'staffstring', '+=', '" = \\\\new TabStaff"', 'elif', 'hasattr', '(', 'self', '.', 'GetChild', '(', 'staff', ')', ',', '"drum"', ')', 'and', 'self', '.', 'GetChild', '(', 'staff', ')', '.', 'drum', ':', 'staffstring', '+=', '" = \\\\drums"', 'else', ':', 'staffstring', '+=', '" = \\\\new Staff"', 'if', 'len', '(', 'staves', ')', '==', '1', ':', 'if', 'name', '!=', '""', ':', 'staffstring', '+=', '" \\with {\\n"', 'staffstring', '+=', '"instrumentName = "', '+', 'name', '+', '" \\n"', 'if', 'shortname', '!=', '""', ':', 'staffstring', '+=', '"shortInstrumentName = "', '+', 'shortname', '+', '" \\n"', 'staffstring', '+=', '" }"', 'staffstring', '+=', '"{"', '+', 'self', '.', 'GetChild', '(', 'staff', ')', '.', 'toLily', '(', ')', '+', '" }\\n\\n"', 'first_part', '+=', 'staffstring', 'second_part', '=', '""', 'if', 'len', '(', 'variables', ')', '>', '1', ':', 'second_part', '+=', '"\\\\new StaffGroup "', 'if', 'name', '!=', '""', ':', 'second_part', '+=', '"\\with {\\n"', 'second_part', '+=', '"instrumentName = "', '+', 'name', '+', '" \\n"', 'second_part', '+=', '" }"', 'second_part', '+=', '"<<"', 'second_part', '+=', '"\\n"', '.', 'join', '(', '[', '"\\\\"', '+', 'var', 'for', 'var', 'in', 'variables', ']', ')', 'if', 'len', '(', 'variables', ')', '>', '1', ':', 'second_part', '+=', '">>"', 'return', '[', 'first_part', ',', 'second_part', ']'] | Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code | ['Method', 'which', 'converts', 'the', 'object', 'instance', 'its', 'attributes', 'and', 'children', 'to', 'a', 'string', 'of', 'lilypond', 'code'] | train | https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py#L200-L250 |
4,561 | nicolargo/glances | glances/stats.py | GlancesStats._load_plugin | def _load_plugin(self, plugin_script, args=None, config=None):
"""Load the plugin (script), init it and add to the _plugin dict."""
# The key is the plugin name
# for example, the file glances_xxx.py
# generate self._plugins_list["xxx"] = ...
name = plugin_script[len(self.header):-3].lower()
try:
# Import the plugin
plugin = __import__(plugin_script[:-3])
# Init and add the plugin to the dictionary
if name in ('help', 'amps', 'ports', 'folders'):
self._plugins[name] = plugin.Plugin(args=args, config=config)
else:
self._plugins[name] = plugin.Plugin(args=args)
# Set the disable_<name> to False by default
if self.args is not None:
setattr(self.args,
'disable_' + name,
getattr(self.args, 'disable_' + name, False))
except Exception as e:
# If a plugin can not be log, display a critical message
# on the console but do not crash
logger.critical("Error while initializing the {} plugin ({})".format(name, e))
logger.error(traceback.format_exc()) | python | def _load_plugin(self, plugin_script, args=None, config=None):
"""Load the plugin (script), init it and add to the _plugin dict."""
# The key is the plugin name
# for example, the file glances_xxx.py
# generate self._plugins_list["xxx"] = ...
name = plugin_script[len(self.header):-3].lower()
try:
# Import the plugin
plugin = __import__(plugin_script[:-3])
# Init and add the plugin to the dictionary
if name in ('help', 'amps', 'ports', 'folders'):
self._plugins[name] = plugin.Plugin(args=args, config=config)
else:
self._plugins[name] = plugin.Plugin(args=args)
# Set the disable_<name> to False by default
if self.args is not None:
setattr(self.args,
'disable_' + name,
getattr(self.args, 'disable_' + name, False))
except Exception as e:
# If a plugin can not be log, display a critical message
# on the console but do not crash
logger.critical("Error while initializing the {} plugin ({})".format(name, e))
logger.error(traceback.format_exc()) | ['def', '_load_plugin', '(', 'self', ',', 'plugin_script', ',', 'args', '=', 'None', ',', 'config', '=', 'None', ')', ':', '# The key is the plugin name', '# for example, the file glances_xxx.py', '# generate self._plugins_list["xxx"] = ...', 'name', '=', 'plugin_script', '[', 'len', '(', 'self', '.', 'header', ')', ':', '-', '3', ']', '.', 'lower', '(', ')', 'try', ':', '# Import the plugin', 'plugin', '=', '__import__', '(', 'plugin_script', '[', ':', '-', '3', ']', ')', '# Init and add the plugin to the dictionary', 'if', 'name', 'in', '(', "'help'", ',', "'amps'", ',', "'ports'", ',', "'folders'", ')', ':', 'self', '.', '_plugins', '[', 'name', ']', '=', 'plugin', '.', 'Plugin', '(', 'args', '=', 'args', ',', 'config', '=', 'config', ')', 'else', ':', 'self', '.', '_plugins', '[', 'name', ']', '=', 'plugin', '.', 'Plugin', '(', 'args', '=', 'args', ')', '# Set the disable_<name> to False by default', 'if', 'self', '.', 'args', 'is', 'not', 'None', ':', 'setattr', '(', 'self', '.', 'args', ',', "'disable_'", '+', 'name', ',', 'getattr', '(', 'self', '.', 'args', ',', "'disable_'", '+', 'name', ',', 'False', ')', ')', 'except', 'Exception', 'as', 'e', ':', '# If a plugin can not be log, display a critical message', '# on the console but do not crash', 'logger', '.', 'critical', '(', '"Error while initializing the {} plugin ({})"', '.', 'format', '(', 'name', ',', 'e', ')', ')', 'logger', '.', 'error', '(', 'traceback', '.', 'format_exc', '(', ')', ')'] | Load the plugin (script), init it and add to the _plugin dict. | ['Load', 'the', 'plugin', '(', 'script', ')', 'init', 'it', 'and', 'add', 'to', 'the', '_plugin', 'dict', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats.py#L106-L129 |
4,562 | amzn/ion-python | amazon/ion/reader_managed.py | _ManagedContext.resolve | def resolve(self, token):
"""Attempts to resolve the :class:`SymbolToken` against the current table.
If the ``text`` is not None, the token is returned, otherwise, a token
in the table is attempted to be retrieved. If not token is found, then
this method will raise.
"""
if token.text is not None:
return token
resolved_token = self.symbol_table.get(token.sid, None)
if resolved_token is None:
raise IonException('Out of range SID: %d' % token.sid)
return resolved_token | python | def resolve(self, token):
"""Attempts to resolve the :class:`SymbolToken` against the current table.
If the ``text`` is not None, the token is returned, otherwise, a token
in the table is attempted to be retrieved. If not token is found, then
this method will raise.
"""
if token.text is not None:
return token
resolved_token = self.symbol_table.get(token.sid, None)
if resolved_token is None:
raise IonException('Out of range SID: %d' % token.sid)
return resolved_token | ['def', 'resolve', '(', 'self', ',', 'token', ')', ':', 'if', 'token', '.', 'text', 'is', 'not', 'None', ':', 'return', 'token', 'resolved_token', '=', 'self', '.', 'symbol_table', '.', 'get', '(', 'token', '.', 'sid', ',', 'None', ')', 'if', 'resolved_token', 'is', 'None', ':', 'raise', 'IonException', '(', "'Out of range SID: %d'", '%', 'token', '.', 'sid', ')', 'return', 'resolved_token'] | Attempts to resolve the :class:`SymbolToken` against the current table.
If the ``text`` is not None, the token is returned, otherwise, a token
in the table is attempted to be retrieved. If not token is found, then
this method will raise. | ['Attempts', 'to', 'resolve', 'the', ':', 'class', ':', 'SymbolToken', 'against', 'the', 'current', 'table', '.'] | train | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_managed.py#L40-L52 |
4,563 | biolink/biolink-model | metamodel/utils/loadschema.py | DupCheckYamlLoader.map_constructor | def map_constructor(self, loader, node, deep=False):
""" Walk the mapping, recording any duplicate keys.
"""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ValueError(f"Duplicate key: \"{key}\"")
mapping[key] = value
return mapping | python | def map_constructor(self, loader, node, deep=False):
""" Walk the mapping, recording any duplicate keys.
"""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ValueError(f"Duplicate key: \"{key}\"")
mapping[key] = value
return mapping | ['def', 'map_constructor', '(', 'self', ',', 'loader', ',', 'node', ',', 'deep', '=', 'False', ')', ':', 'mapping', '=', '{', '}', 'for', 'key_node', ',', 'value_node', 'in', 'node', '.', 'value', ':', 'key', '=', 'loader', '.', 'construct_object', '(', 'key_node', ',', 'deep', '=', 'deep', ')', 'value', '=', 'loader', '.', 'construct_object', '(', 'value_node', ',', 'deep', '=', 'deep', ')', 'if', 'key', 'in', 'mapping', ':', 'raise', 'ValueError', '(', 'f"Duplicate key: \\"{key}\\""', ')', 'mapping', '[', 'key', ']', '=', 'value', 'return', 'mapping'] | Walk the mapping, recording any duplicate keys. | ['Walk', 'the', 'mapping', 'recording', 'any', 'duplicate', 'keys', '.'] | train | https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/loadschema.py#L69-L81 |
4,564 | pybel/pybel | src/pybel/canonicalize.py | group_citation_edges | def group_citation_edges(edges: Iterable[EdgeTuple]) -> Iterable[Tuple[str, Iterable[EdgeTuple]]]:
"""Return an iterator over pairs of citation values and their corresponding edge iterators."""
return itt.groupby(edges, key=_citation_sort_key) | python | def group_citation_edges(edges: Iterable[EdgeTuple]) -> Iterable[Tuple[str, Iterable[EdgeTuple]]]:
"""Return an iterator over pairs of citation values and their corresponding edge iterators."""
return itt.groupby(edges, key=_citation_sort_key) | ['def', 'group_citation_edges', '(', 'edges', ':', 'Iterable', '[', 'EdgeTuple', ']', ')', '->', 'Iterable', '[', 'Tuple', '[', 'str', ',', 'Iterable', '[', 'EdgeTuple', ']', ']', ']', ':', 'return', 'itt', '.', 'groupby', '(', 'edges', ',', 'key', '=', '_citation_sort_key', ')'] | Return an iterator over pairs of citation values and their corresponding edge iterators. | ['Return', 'an', 'iterator', 'over', 'pairs', 'of', 'citation', 'values', 'and', 'their', 'corresponding', 'edge', 'iterators', '.'] | train | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L204-L206 |
4,565 | datamachine/twx.botapi | twx/botapi/botapi.py | TelegramBot.set_webhook | def set_webhook(self, *args, **kwargs):
"""See :func:`set_webhook`"""
return set_webhook(*args, **self._merge_overrides(**kwargs)).run() | python | def set_webhook(self, *args, **kwargs):
"""See :func:`set_webhook`"""
return set_webhook(*args, **self._merge_overrides(**kwargs)).run() | ['def', 'set_webhook', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'set_webhook', '(', '*', 'args', ',', '*', '*', 'self', '.', '_merge_overrides', '(', '*', '*', 'kwargs', ')', ')', '.', 'run', '(', ')'] | See :func:`set_webhook` | ['See', ':', 'func', ':', 'set_webhook'] | train | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4422-L4424 |
4,566 | horejsek/python-webdriverwrapper | webdriverwrapper/wrapper.py | _WebdriverWrapper.html | def html(self):
"""
Returns ``innerHTML`` of whole page. On page have to be tag ``body``.
.. versionadded:: 2.2
"""
try:
body = self.get_elm(tag_name='body')
except selenium_exc.NoSuchElementException:
return None
else:
return body.get_attribute('innerHTML') | python | def html(self):
"""
Returns ``innerHTML`` of whole page. On page have to be tag ``body``.
.. versionadded:: 2.2
"""
try:
body = self.get_elm(tag_name='body')
except selenium_exc.NoSuchElementException:
return None
else:
return body.get_attribute('innerHTML') | ['def', 'html', '(', 'self', ')', ':', 'try', ':', 'body', '=', 'self', '.', 'get_elm', '(', 'tag_name', '=', "'body'", ')', 'except', 'selenium_exc', '.', 'NoSuchElementException', ':', 'return', 'None', 'else', ':', 'return', 'body', '.', 'get_attribute', '(', "'innerHTML'", ')'] | Returns ``innerHTML`` of whole page. On page have to be tag ``body``.
.. versionadded:: 2.2 | ['Returns', 'innerHTML', 'of', 'whole', 'page', '.', 'On', 'page', 'have', 'to', 'be', 'tag', 'body', '.'] | train | https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/wrapper.py#L408-L419 |
4,567 | astraw38/lint | lint/validators/pylint_validator.py | no_new_errors | def no_new_errors(new_data, old_data, strict=False):
"""
Pylint Validator that will fail any review if there are
new Pylint errors in it (Pylint message starts with 'E:')
:param new_data:
:param old_data:
:return:
"""
success = True
score = 0
message = ''
if new_data['errors'] > old_data['errors']:
success = False
message = "Failed, More errors than prior runs!({} > {})\n" \
"Average Score: {}".format(new_data['errors'],
old_data['errors'],
new_data['average'])
score = -1
return success, score, message | python | def no_new_errors(new_data, old_data, strict=False):
"""
Pylint Validator that will fail any review if there are
new Pylint errors in it (Pylint message starts with 'E:')
:param new_data:
:param old_data:
:return:
"""
success = True
score = 0
message = ''
if new_data['errors'] > old_data['errors']:
success = False
message = "Failed, More errors than prior runs!({} > {})\n" \
"Average Score: {}".format(new_data['errors'],
old_data['errors'],
new_data['average'])
score = -1
return success, score, message | ['def', 'no_new_errors', '(', 'new_data', ',', 'old_data', ',', 'strict', '=', 'False', ')', ':', 'success', '=', 'True', 'score', '=', '0', 'message', '=', "''", 'if', 'new_data', '[', "'errors'", ']', '>', 'old_data', '[', "'errors'", ']', ':', 'success', '=', 'False', 'message', '=', '"Failed, More errors than prior runs!({} > {})\\n"', '"Average Score: {}"', '.', 'format', '(', 'new_data', '[', "'errors'", ']', ',', 'old_data', '[', "'errors'", ']', ',', 'new_data', '[', "'average'", ']', ')', 'score', '=', '-', '1', 'return', 'success', ',', 'score', ',', 'message'] | Pylint Validator that will fail any review if there are
new Pylint errors in it (Pylint message starts with 'E:')
:param new_data:
:param old_data:
:return: | ['Pylint', 'Validator', 'that', 'will', 'fail', 'any', 'review', 'if', 'there', 'are', 'new', 'Pylint', 'errors', 'in', 'it', '(', 'Pylint', 'message', 'starts', 'with', 'E', ':', ')'] | train | https://github.com/astraw38/lint/blob/162ceefcb812f07d18544aaa887b9ec4f102cfb1/lint/validators/pylint_validator.py#L49-L69 |
4,568 | StackStorm/pybind | pybind/nos/v7_2_0/rbridge_id/ipv6/static_ag_ipv6_config/__init__.py | static_ag_ipv6_config._set_anycast_gateway_mac | def _set_anycast_gateway_mac(self, v, load=False):
"""
Setter method for anycast_gateway_mac, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config/anycast_gateway_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast_gateway_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast_gateway_mac() directly.
YANG Description: Anycast gateway MAC address.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast_gateway_mac must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""",
})
self.__anycast_gateway_mac = t
if hasattr(self, '_set'):
self._set() | python | def _set_anycast_gateway_mac(self, v, load=False):
"""
Setter method for anycast_gateway_mac, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config/anycast_gateway_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast_gateway_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast_gateway_mac() directly.
YANG Description: Anycast gateway MAC address.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast_gateway_mac must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""",
})
self.__anycast_gateway_mac = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_anycast_gateway_mac', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'anycast_gateway_mac', '.', 'anycast_gateway_mac', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"anycast-gateway-mac"', ',', 'rest_name', '=', '"anycast-gateway-mac"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Anycast gateway MAC address.'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-vrrp'", ',', 'defining_module', '=', "'brocade-vrrp'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""anycast_gateway_mac must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=anycast_gateway_mac.anycast_gateway_mac, is_container=\'container\', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Anycast gateway MAC address.\'}}, namespace=\'urn:brocade.com:mgmt:brocade-vrrp\', defining_module=\'brocade-vrrp\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__anycast_gateway_mac', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for anycast_gateway_mac, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config/anycast_gateway_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast_gateway_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast_gateway_mac() directly.
YANG Description: Anycast gateway MAC address. | ['Setter', 'method', 'for', 'anycast_gateway_mac', 'mapped', 'from', 'YANG', 'variable', '/', 'rbridge_id', '/', 'ipv6', '/', 'static_ag_ipv6_config', '/', 'anycast_gateway_mac', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_anycast_gateway_mac', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_anycast_gateway_mac', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/ipv6/static_ag_ipv6_config/__init__.py#L94-L117 |
4,569 | kubernetes-client/python | kubernetes/client/api_client.py | ApiClient.update_params_for_auth | def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
) | python | def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
) | ['def', 'update_params_for_auth', '(', 'self', ',', 'headers', ',', 'querys', ',', 'auth_settings', ')', ':', 'if', 'not', 'auth_settings', ':', 'return', 'for', 'auth', 'in', 'auth_settings', ':', 'auth_setting', '=', 'self', '.', 'configuration', '.', 'auth_settings', '(', ')', '.', 'get', '(', 'auth', ')', 'if', 'auth_setting', ':', 'if', 'not', 'auth_setting', '[', "'value'", ']', ':', 'continue', 'elif', 'auth_setting', '[', "'in'", ']', '==', "'header'", ':', 'headers', '[', 'auth_setting', '[', "'key'", ']', ']', '=', 'auth_setting', '[', "'value'", ']', 'elif', 'auth_setting', '[', "'in'", ']', '==', "'query'", ':', 'querys', '.', 'append', '(', '(', 'auth_setting', '[', "'key'", ']', ',', 'auth_setting', '[', "'value'", ']', ')', ')', 'else', ':', 'raise', 'ValueError', '(', "'Authentication token must be in `query` or `header`'", ')'] | Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list. | ['Updates', 'header', 'and', 'query', 'params', 'based', 'on', 'authentication', 'setting', '.'] | train | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/api_client.py#L500-L523 |
4,570 | fermiPy/fermipy | fermipy/diffuse/residual_cr.py | ResidualCR._compute_counts_from_intensity | def _compute_counts_from_intensity(intensity, bexpcube):
""" Make the counts map from the intensity
"""
data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1])
return HpxMap(data, intensity.hpx) | python | def _compute_counts_from_intensity(intensity, bexpcube):
""" Make the counts map from the intensity
"""
data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1])
return HpxMap(data, intensity.hpx) | ['def', '_compute_counts_from_intensity', '(', 'intensity', ',', 'bexpcube', ')', ':', 'data', '=', 'intensity', '.', 'data', '*', 'np', '.', 'sqrt', '(', 'bexpcube', '.', 'data', '[', '1', ':', ']', '*', 'bexpcube', '.', 'data', '[', '0', ':', '-', '1', ']', ')', 'return', 'HpxMap', '(', 'data', ',', 'intensity', '.', 'hpx', ')'] | Make the counts map from the intensity | ['Make', 'the', 'counts', 'map', 'from', 'the', 'intensity'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/residual_cr.py#L142-L146 |
4,571 | saltstack/salt | salt/utils/process.py | get_pidfile | def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
try:
with salt.utils.files.fopen(pidfile) as pdf:
pid = pdf.read().strip()
return int(pid)
except (OSError, IOError, TypeError, ValueError):
return -1 | python | def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
try:
with salt.utils.files.fopen(pidfile) as pdf:
pid = pdf.read().strip()
return int(pid)
except (OSError, IOError, TypeError, ValueError):
return -1 | ['def', 'get_pidfile', '(', 'pidfile', ')', ':', 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'pidfile', ')', 'as', 'pdf', ':', 'pid', '=', 'pdf', '.', 'read', '(', ')', '.', 'strip', '(', ')', 'return', 'int', '(', 'pid', ')', 'except', '(', 'OSError', ',', 'IOError', ',', 'TypeError', ',', 'ValueError', ')', ':', 'return', '-', '1'] | Return the pid from a pidfile as an integer | ['Return', 'the', 'pid', 'from', 'a', 'pidfile', 'as', 'an', 'integer'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/process.py#L225-L234 |
4,572 | mikeywaites/flask-arrested | arrested/mixins.py | ObjectMixin.obj | def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj | python | def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj | ['def', 'obj', '(', 'self', ')', ':', 'if', 'not', 'getattr', '(', 'self', ',', "'_obj'", ',', 'None', ')', ':', 'self', '.', '_obj', '=', 'self', '.', 'get_object', '(', ')', 'if', 'self', '.', '_obj', 'is', 'None', 'and', 'not', 'self', '.', 'allow_none', ':', 'self', '.', 'return_error', '(', '404', ')', 'return', 'self', '.', '_obj'] | Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object` | ['Returns', 'the', 'value', 'of', ':', 'meth', ':', 'ObjectMixin', '.', 'get_object', 'and', 'sets', 'a', 'private', 'property', 'called', '_obj', '.', 'This', 'property', 'ensures', 'the', 'logic', 'around', 'allow_none', 'is', 'enforced', 'across', 'Endpoints', 'using', 'the', 'Object', 'interface', '.'] | train | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L122-L135 |
4,573 | lambdamusic/Ontospy | ontospy/core/utils.py | try_sort_fmt_opts | def try_sort_fmt_opts(rdf_format_opts_list, uri):
"""reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF
NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa']
"""
filename, file_extension = os.path.splitext(uri)
# print(filename, file_extension)
if file_extension == ".ttl" or file_extension == ".turtle":
return ['turtle', 'n3', 'nt', 'json-ld', 'rdfa', 'xml']
elif file_extension == ".xml" or file_extension == ".rdf":
return ['xml', 'turtle', 'n3', 'nt', 'json-ld', 'rdfa']
elif file_extension == ".nt" or file_extension == ".n3":
return ['n3', 'nt', 'turtle', 'xml', 'json-ld', 'rdfa']
elif file_extension == ".json" or file_extension == ".jsonld":
return [
'json-ld',
'rdfa',
'n3',
'nt',
'turtle',
'xml',
]
elif file_extension == ".rdfa":
return [
'rdfa',
'json-ld',
'n3',
'nt',
'turtle',
'xml',
]
else:
return rdf_format_opts_list | python | def try_sort_fmt_opts(rdf_format_opts_list, uri):
"""reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF
NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa']
"""
filename, file_extension = os.path.splitext(uri)
# print(filename, file_extension)
if file_extension == ".ttl" or file_extension == ".turtle":
return ['turtle', 'n3', 'nt', 'json-ld', 'rdfa', 'xml']
elif file_extension == ".xml" or file_extension == ".rdf":
return ['xml', 'turtle', 'n3', 'nt', 'json-ld', 'rdfa']
elif file_extension == ".nt" or file_extension == ".n3":
return ['n3', 'nt', 'turtle', 'xml', 'json-ld', 'rdfa']
elif file_extension == ".json" or file_extension == ".jsonld":
return [
'json-ld',
'rdfa',
'n3',
'nt',
'turtle',
'xml',
]
elif file_extension == ".rdfa":
return [
'rdfa',
'json-ld',
'n3',
'nt',
'turtle',
'xml',
]
else:
return rdf_format_opts_list | ['def', 'try_sort_fmt_opts', '(', 'rdf_format_opts_list', ',', 'uri', ')', ':', 'filename', ',', 'file_extension', '=', 'os', '.', 'path', '.', 'splitext', '(', 'uri', ')', '# print(filename, file_extension)', 'if', 'file_extension', '==', '".ttl"', 'or', 'file_extension', '==', '".turtle"', ':', 'return', '[', "'turtle'", ',', "'n3'", ',', "'nt'", ',', "'json-ld'", ',', "'rdfa'", ',', "'xml'", ']', 'elif', 'file_extension', '==', '".xml"', 'or', 'file_extension', '==', '".rdf"', ':', 'return', '[', "'xml'", ',', "'turtle'", ',', "'n3'", ',', "'nt'", ',', "'json-ld'", ',', "'rdfa'", ']', 'elif', 'file_extension', '==', '".nt"', 'or', 'file_extension', '==', '".n3"', ':', 'return', '[', "'n3'", ',', "'nt'", ',', "'turtle'", ',', "'xml'", ',', "'json-ld'", ',', "'rdfa'", ']', 'elif', 'file_extension', '==', '".json"', 'or', 'file_extension', '==', '".jsonld"', ':', 'return', '[', "'json-ld'", ',', "'rdfa'", ',', "'n3'", ',', "'nt'", ',', "'turtle'", ',', "'xml'", ',', ']', 'elif', 'file_extension', '==', '".rdfa"', ':', 'return', '[', "'rdfa'", ',', "'json-ld'", ',', "'n3'", ',', "'nt'", ',', "'turtle'", ',', "'xml'", ',', ']', 'else', ':', 'return', 'rdf_format_opts_list'] | reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF
NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa'] | ['reorder', 'fmt', 'options', 'based', 'on', 'uri', 'file', 'type', 'suffix', '-', 'if', 'available', '-', 'so', 'to', 'test', 'most', 'likely', 'serialization', 'first', 'when', 'parsing', 'some', 'RDF'] | train | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L893-L926 |
4,574 | hollenstein/maspy | maspy/reader.py | applySiiRanking | def applySiiRanking(siiContainer, specfile):
"""Iterates over all Sii entries of a specfile in siiContainer and sorts Sii
elements of the same spectrum according to the score attribute specified in
``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then
ranked according to their sorted position, if multiple Sii have the same
score, all get the same rank and the next entries rank is its list position.
:param siiContainer: instance of :class:`maspy.core.SiiContainer`
:param specfile: unambiguous identifier of a ms-run file. Is also used as
a reference to other MasPy file containers.
"""
attr = siiContainer.info[specfile]['rankAttr']
reverse = siiContainer.info[specfile]['rankLargerBetter']
for itemList in listvalues(siiContainer.container[specfile]):
sortList = [(getattr(sii, attr), sii) for sii in itemList]
itemList = [sii for score, sii in sorted(sortList, reverse=reverse)]
#Rank Sii according to their position
lastValue = None
for itemPosition, item in enumerate(itemList, 1):
if getattr(item, attr) != lastValue:
rank = itemPosition
item.rank = rank
lastValue = getattr(item, attr) | python | def applySiiRanking(siiContainer, specfile):
"""Iterates over all Sii entries of a specfile in siiContainer and sorts Sii
elements of the same spectrum according to the score attribute specified in
``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then
ranked according to their sorted position, if multiple Sii have the same
score, all get the same rank and the next entries rank is its list position.
:param siiContainer: instance of :class:`maspy.core.SiiContainer`
:param specfile: unambiguous identifier of a ms-run file. Is also used as
a reference to other MasPy file containers.
"""
attr = siiContainer.info[specfile]['rankAttr']
reverse = siiContainer.info[specfile]['rankLargerBetter']
for itemList in listvalues(siiContainer.container[specfile]):
sortList = [(getattr(sii, attr), sii) for sii in itemList]
itemList = [sii for score, sii in sorted(sortList, reverse=reverse)]
#Rank Sii according to their position
lastValue = None
for itemPosition, item in enumerate(itemList, 1):
if getattr(item, attr) != lastValue:
rank = itemPosition
item.rank = rank
lastValue = getattr(item, attr) | ['def', 'applySiiRanking', '(', 'siiContainer', ',', 'specfile', ')', ':', 'attr', '=', 'siiContainer', '.', 'info', '[', 'specfile', ']', '[', "'rankAttr'", ']', 'reverse', '=', 'siiContainer', '.', 'info', '[', 'specfile', ']', '[', "'rankLargerBetter'", ']', 'for', 'itemList', 'in', 'listvalues', '(', 'siiContainer', '.', 'container', '[', 'specfile', ']', ')', ':', 'sortList', '=', '[', '(', 'getattr', '(', 'sii', ',', 'attr', ')', ',', 'sii', ')', 'for', 'sii', 'in', 'itemList', ']', 'itemList', '=', '[', 'sii', 'for', 'score', ',', 'sii', 'in', 'sorted', '(', 'sortList', ',', 'reverse', '=', 'reverse', ')', ']', '#Rank Sii according to their position', 'lastValue', '=', 'None', 'for', 'itemPosition', ',', 'item', 'in', 'enumerate', '(', 'itemList', ',', '1', ')', ':', 'if', 'getattr', '(', 'item', ',', 'attr', ')', '!=', 'lastValue', ':', 'rank', '=', 'itemPosition', 'item', '.', 'rank', '=', 'rank', 'lastValue', '=', 'getattr', '(', 'item', ',', 'attr', ')'] | Iterates over all Sii entries of a specfile in siiContainer and sorts Sii
elements of the same spectrum according to the score attribute specified in
``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then
ranked according to their sorted position, if multiple Sii have the same
score, all get the same rank and the next entries rank is its list position.
:param siiContainer: instance of :class:`maspy.core.SiiContainer`
:param specfile: unambiguous identifier of a ms-run file. Is also used as
a reference to other MasPy file containers. | ['Iterates', 'over', 'all', 'Sii', 'entries', 'of', 'a', 'specfile', 'in', 'siiContainer', 'and', 'sorts', 'Sii', 'elements', 'of', 'the', 'same', 'spectrum', 'according', 'to', 'the', 'score', 'attribute', 'specified', 'in', 'siiContainer', '.', 'info', '[', 'specfile', ']', '[', 'rankAttr', ']', '.', 'Sorted', 'Sii', 'elements', 'are', 'then', 'ranked', 'according', 'to', 'their', 'sorted', 'position', 'if', 'multiple', 'Sii', 'have', 'the', 'same', 'score', 'all', 'get', 'the', 'same', 'rank', 'and', 'the', 'next', 'entries', 'rank', 'is', 'its', 'list', 'position', '.'] | train | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/reader.py#L336-L359 |
4,575 | RPi-Distro/python-sense-hat | sense_hat/sense_hat.py | SenseHat._unpack_bin | def _unpack_bin(self, packed):
"""
Internal. Decodes 16 bit RGB565 into python list [R,G,B]
"""
output = struct.unpack('H', packed)
bits16 = output[0]
r = (bits16 & 0xF800) >> 11
g = (bits16 & 0x7E0) >> 5
b = (bits16 & 0x1F)
return [int(r << 3), int(g << 2), int(b << 3)] | python | def _unpack_bin(self, packed):
"""
Internal. Decodes 16 bit RGB565 into python list [R,G,B]
"""
output = struct.unpack('H', packed)
bits16 = output[0]
r = (bits16 & 0xF800) >> 11
g = (bits16 & 0x7E0) >> 5
b = (bits16 & 0x1F)
return [int(r << 3), int(g << 2), int(b << 3)] | ['def', '_unpack_bin', '(', 'self', ',', 'packed', ')', ':', 'output', '=', 'struct', '.', 'unpack', '(', "'H'", ',', 'packed', ')', 'bits16', '=', 'output', '[', '0', ']', 'r', '=', '(', 'bits16', '&', '0xF800', ')', '>>', '11', 'g', '=', '(', 'bits16', '&', '0x7E0', ')', '>>', '5', 'b', '=', '(', 'bits16', '&', '0x1F', ')', 'return', '[', 'int', '(', 'r', '<<', '3', ')', ',', 'int', '(', 'g', '<<', '2', ')', ',', 'int', '(', 'b', '<<', '3', ')', ']'] | Internal. Decodes 16 bit RGB565 into python list [R,G,B] | ['Internal', '.', 'Decodes', '16', 'bit', 'RGB565', 'into', 'python', 'list', '[', 'R', 'G', 'B', ']'] | train | https://github.com/RPi-Distro/python-sense-hat/blob/9a37f0923ce8dbde69514c3b8d58d30de01c9ee7/sense_hat/sense_hat.py#L232-L242 |
4,576 | Fizzadar/pyinfra | pyinfra/modules/postgresql.py | database | def database(
state, host, name,
present=True, owner=None,
template=None, encoding=None,
lc_collate=None, lc_ctype=None, tablespace=None,
connection_limit=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Add/remove PostgreSQL databases.
+ name: name of the database
+ present: whether the database should exist or not
+ owner: the PostgreSQL role that owns the database
+ template: name of the PostgreSQL template to use
+ encoding: encoding of the database
+ lc_collate: lc_collate of the database
+ lc_ctype: lc_ctype of the database
+ tablespace: the tablespace to use for the template
+ connection_limit: the connection limit to apply to the database
+ postgresql_*: global module arguments, see above
Updates:
pyinfra will not attempt to change existing databases - it will either
create or drop databases, but not alter them (if the db exists this
operation will make no changes).
'''
current_databases = host.fact.postgresql_databases(
postgresql_user, postgresql_password,
postgresql_host, postgresql_port,
)
is_present = name in current_databases
if not present:
if is_present:
yield make_execute_psql_command(
'DROP DATABASE {0}'.format(name),
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
)
return
# We want the database but it doesn't exist
if present and not is_present:
sql_bits = ['CREATE DATABASE {0}'.format(name)]
for key, value in (
('OWNER', owner),
('TEMPLATE', template),
('ENCODING', encoding),
('LC_COLLATE', lc_collate),
('LC_CTYPE', lc_ctype),
('TABLESPACE', tablespace),
('CONNECTION LIMIT', connection_limit),
):
if value:
sql_bits.append('{0} {1}'.format(key, value))
yield make_execute_psql_command(
' '.join(sql_bits),
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
) | python | def database(
state, host, name,
present=True, owner=None,
template=None, encoding=None,
lc_collate=None, lc_ctype=None, tablespace=None,
connection_limit=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Add/remove PostgreSQL databases.
+ name: name of the database
+ present: whether the database should exist or not
+ owner: the PostgreSQL role that owns the database
+ template: name of the PostgreSQL template to use
+ encoding: encoding of the database
+ lc_collate: lc_collate of the database
+ lc_ctype: lc_ctype of the database
+ tablespace: the tablespace to use for the template
+ connection_limit: the connection limit to apply to the database
+ postgresql_*: global module arguments, see above
Updates:
pyinfra will not attempt to change existing databases - it will either
create or drop databases, but not alter them (if the db exists this
operation will make no changes).
'''
current_databases = host.fact.postgresql_databases(
postgresql_user, postgresql_password,
postgresql_host, postgresql_port,
)
is_present = name in current_databases
if not present:
if is_present:
yield make_execute_psql_command(
'DROP DATABASE {0}'.format(name),
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
)
return
# We want the database but it doesn't exist
if present and not is_present:
sql_bits = ['CREATE DATABASE {0}'.format(name)]
for key, value in (
('OWNER', owner),
('TEMPLATE', template),
('ENCODING', encoding),
('LC_COLLATE', lc_collate),
('LC_CTYPE', lc_ctype),
('TABLESPACE', tablespace),
('CONNECTION LIMIT', connection_limit),
):
if value:
sql_bits.append('{0} {1}'.format(key, value))
yield make_execute_psql_command(
' '.join(sql_bits),
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
) | ['def', 'database', '(', 'state', ',', 'host', ',', 'name', ',', 'present', '=', 'True', ',', 'owner', '=', 'None', ',', 'template', '=', 'None', ',', 'encoding', '=', 'None', ',', 'lc_collate', '=', 'None', ',', 'lc_ctype', '=', 'None', ',', 'tablespace', '=', 'None', ',', 'connection_limit', '=', 'None', ',', '# Details for speaking to PostgreSQL via `psql` CLI', 'postgresql_user', '=', 'None', ',', 'postgresql_password', '=', 'None', ',', 'postgresql_host', '=', 'None', ',', 'postgresql_port', '=', 'None', ',', ')', ':', 'current_databases', '=', 'host', '.', 'fact', '.', 'postgresql_databases', '(', 'postgresql_user', ',', 'postgresql_password', ',', 'postgresql_host', ',', 'postgresql_port', ',', ')', 'is_present', '=', 'name', 'in', 'current_databases', 'if', 'not', 'present', ':', 'if', 'is_present', ':', 'yield', 'make_execute_psql_command', '(', "'DROP DATABASE {0}'", '.', 'format', '(', 'name', ')', ',', 'user', '=', 'postgresql_user', ',', 'password', '=', 'postgresql_password', ',', 'host', '=', 'postgresql_host', ',', 'port', '=', 'postgresql_port', ',', ')', 'return', "# We want the database but it doesn't exist", 'if', 'present', 'and', 'not', 'is_present', ':', 'sql_bits', '=', '[', "'CREATE DATABASE {0}'", '.', 'format', '(', 'name', ')', ']', 'for', 'key', ',', 'value', 'in', '(', '(', "'OWNER'", ',', 'owner', ')', ',', '(', "'TEMPLATE'", ',', 'template', ')', ',', '(', "'ENCODING'", ',', 'encoding', ')', ',', '(', "'LC_COLLATE'", ',', 'lc_collate', ')', ',', '(', "'LC_CTYPE'", ',', 'lc_ctype', ')', ',', '(', "'TABLESPACE'", ',', 'tablespace', ')', ',', '(', "'CONNECTION LIMIT'", ',', 'connection_limit', ')', ',', ')', ':', 'if', 'value', ':', 'sql_bits', '.', 'append', '(', "'{0} {1}'", '.', 'format', '(', 'key', ',', 'value', ')', ')', 'yield', 'make_execute_psql_command', '(', "' '", '.', 'join', '(', 'sql_bits', ')', ',', 'user', '=', 'postgresql_user', ',', 'password', '=', 'postgresql_password', ',', 'host', '=', 'postgresql_host', ',', 'port', '=', 'postgresql_port', ',', ')'] | Add/remove PostgreSQL databases.
+ name: name of the database
+ present: whether the database should exist or not
+ owner: the PostgreSQL role that owns the database
+ template: name of the PostgreSQL template to use
+ encoding: encoding of the database
+ lc_collate: lc_collate of the database
+ lc_ctype: lc_ctype of the database
+ tablespace: the tablespace to use for the template
+ connection_limit: the connection limit to apply to the database
+ postgresql_*: global module arguments, see above
Updates:
pyinfra will not attempt to change existing databases - it will either
create or drop databases, but not alter them (if the db exists this
operation will make no changes). | ['Add', '/', 'remove', 'PostgreSQL', 'databases', '.'] | train | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/postgresql.py#L127-L197 |
4,577 | skyfielders/python-skyfield | skyfield/units.py | Distance.to | def to(self, unit):
"""Convert this distance to the given AstroPy unit."""
from astropy.units import au
return (self.au * au).to(unit) | python | def to(self, unit):
"""Convert this distance to the given AstroPy unit."""
from astropy.units import au
return (self.au * au).to(unit) | ['def', 'to', '(', 'self', ',', 'unit', ')', ':', 'from', 'astropy', '.', 'units', 'import', 'au', 'return', '(', 'self', '.', 'au', '*', 'au', ')', '.', 'to', '(', 'unit', ')'] | Convert this distance to the given AstroPy unit. | ['Convert', 'this', 'distance', 'to', 'the', 'given', 'AstroPy', 'unit', '.'] | train | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/units.py#L76-L79 |
4,578 | yangl1996/libpagure | libpagure/libpagure.py | Pagure.project_branches | def project_branches(self):
"""
List all branches associated with a repository.
:return:
"""
request_url = "{}git/branches".format(self.create_basic_url())
return_value = self._call_api(request_url)
return return_value['branches'] | python | def project_branches(self):
"""
List all branches associated with a repository.
:return:
"""
request_url = "{}git/branches".format(self.create_basic_url())
return_value = self._call_api(request_url)
return return_value['branches'] | ['def', 'project_branches', '(', 'self', ')', ':', 'request_url', '=', '"{}git/branches"', '.', 'format', '(', 'self', '.', 'create_basic_url', '(', ')', ')', 'return_value', '=', 'self', '.', '_call_api', '(', 'request_url', ')', 'return', 'return_value', '[', "'branches'", ']'] | List all branches associated with a repository.
:return: | ['List', 'all', 'branches', 'associated', 'with', 'a', 'repository', '.', ':', 'return', ':'] | train | https://github.com/yangl1996/libpagure/blob/dd96ed29142407463790c66ed321984a6ea7465a/libpagure/libpagure.py#L661-L670 |
4,579 | amoffat/sh | sh.py | ob_is_tty | def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno:
is_tty = os.isatty(fileno)
return is_tty | python | def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno:
is_tty = os.isatty(fileno)
return is_tty | ['def', 'ob_is_tty', '(', 'ob', ')', ':', 'fileno', '=', 'get_fileno', '(', 'ob', ')', 'is_tty', '=', 'False', 'if', 'fileno', ':', 'is_tty', '=', 'os', '.', 'isatty', '(', 'fileno', ')', 'return', 'is_tty'] | checks if an object (like a file-like object) is a tty. | ['checks', 'if', 'an', 'object', '(', 'like', 'a', 'file', '-', 'like', 'object', ')', 'is', 'a', 'tty', '.'] | train | https://github.com/amoffat/sh/blob/858adf0c682af4c40e41f34d6926696b7a5d3b12/sh.py#L997-L1003 |
4,580 | rackerlabs/simpl | simpl/config.py | Option.add_argument | def add_argument(self, parser, permissive=False, **override_kwargs):
"""Add an option to a an argparse parser.
:keyword permissive: when true, build a parser that does not validate
required arguments.
"""
kwargs = {}
required = None
if self.kwargs:
kwargs = copy.copy(self.kwargs)
if 'env' in kwargs and 'help' in kwargs:
kwargs['help'] = "%s (or set %s)" % (kwargs['help'],
kwargs['env'])
if permissive:
required = kwargs.pop('required', None)
try:
del kwargs['env']
except KeyError:
pass
try:
del kwargs['ini_section']
except KeyError:
pass
# allow custom and/or exclusive argument groups
if kwargs.get('group') or kwargs.get('mutually_exclusive'):
groupname = kwargs.pop('group', None) or kwargs.get('dest')
mutually_exclusive = kwargs.pop('mutually_exclusive', None)
if not groupname:
raise NoGroupForOption(
"%s requires either 'group' or 'dest'." % self)
description = kwargs.pop('group_description', None)
exists = [grp for grp in parser._action_groups
if grp.title == groupname]
if exists:
group = exists[0]
if description and not group.description:
group.description = description
else:
group = parser.add_argument_group(
title=groupname, description=description)
if mutually_exclusive:
if not required:
required = kwargs.pop('required', None)
mutexg_title = '%s mutually-exclusive-group' % groupname
exists = [grp for grp in group._mutually_exclusive_groups
if grp.title == mutexg_title]
if exists:
group = exists[0]
else:
# extend parent group
group = group.add_mutually_exclusive_group(
required=required)
group.title = mutexg_title
# if any in the same group are required, then the
# mutually exclusive group should be set to required
if required and not group.required:
group.required = required
self._mutexgroup = group
self._action = group.add_argument(*self.args, **kwargs)
return
kwargs.update(override_kwargs)
self._action = parser.add_argument(*self.args, **kwargs) | python | def add_argument(self, parser, permissive=False, **override_kwargs):
"""Add an option to a an argparse parser.
:keyword permissive: when true, build a parser that does not validate
required arguments.
"""
kwargs = {}
required = None
if self.kwargs:
kwargs = copy.copy(self.kwargs)
if 'env' in kwargs and 'help' in kwargs:
kwargs['help'] = "%s (or set %s)" % (kwargs['help'],
kwargs['env'])
if permissive:
required = kwargs.pop('required', None)
try:
del kwargs['env']
except KeyError:
pass
try:
del kwargs['ini_section']
except KeyError:
pass
# allow custom and/or exclusive argument groups
if kwargs.get('group') or kwargs.get('mutually_exclusive'):
groupname = kwargs.pop('group', None) or kwargs.get('dest')
mutually_exclusive = kwargs.pop('mutually_exclusive', None)
if not groupname:
raise NoGroupForOption(
"%s requires either 'group' or 'dest'." % self)
description = kwargs.pop('group_description', None)
exists = [grp for grp in parser._action_groups
if grp.title == groupname]
if exists:
group = exists[0]
if description and not group.description:
group.description = description
else:
group = parser.add_argument_group(
title=groupname, description=description)
if mutually_exclusive:
if not required:
required = kwargs.pop('required', None)
mutexg_title = '%s mutually-exclusive-group' % groupname
exists = [grp for grp in group._mutually_exclusive_groups
if grp.title == mutexg_title]
if exists:
group = exists[0]
else:
# extend parent group
group = group.add_mutually_exclusive_group(
required=required)
group.title = mutexg_title
# if any in the same group are required, then the
# mutually exclusive group should be set to required
if required and not group.required:
group.required = required
self._mutexgroup = group
self._action = group.add_argument(*self.args, **kwargs)
return
kwargs.update(override_kwargs)
self._action = parser.add_argument(*self.args, **kwargs) | ['def', 'add_argument', '(', 'self', ',', 'parser', ',', 'permissive', '=', 'False', ',', '*', '*', 'override_kwargs', ')', ':', 'kwargs', '=', '{', '}', 'required', '=', 'None', 'if', 'self', '.', 'kwargs', ':', 'kwargs', '=', 'copy', '.', 'copy', '(', 'self', '.', 'kwargs', ')', 'if', "'env'", 'in', 'kwargs', 'and', "'help'", 'in', 'kwargs', ':', 'kwargs', '[', "'help'", ']', '=', '"%s (or set %s)"', '%', '(', 'kwargs', '[', "'help'", ']', ',', 'kwargs', '[', "'env'", ']', ')', 'if', 'permissive', ':', 'required', '=', 'kwargs', '.', 'pop', '(', "'required'", ',', 'None', ')', 'try', ':', 'del', 'kwargs', '[', "'env'", ']', 'except', 'KeyError', ':', 'pass', 'try', ':', 'del', 'kwargs', '[', "'ini_section'", ']', 'except', 'KeyError', ':', 'pass', '# allow custom and/or exclusive argument groups', 'if', 'kwargs', '.', 'get', '(', "'group'", ')', 'or', 'kwargs', '.', 'get', '(', "'mutually_exclusive'", ')', ':', 'groupname', '=', 'kwargs', '.', 'pop', '(', "'group'", ',', 'None', ')', 'or', 'kwargs', '.', 'get', '(', "'dest'", ')', 'mutually_exclusive', '=', 'kwargs', '.', 'pop', '(', "'mutually_exclusive'", ',', 'None', ')', 'if', 'not', 'groupname', ':', 'raise', 'NoGroupForOption', '(', '"%s requires either \'group\' or \'dest\'."', '%', 'self', ')', 'description', '=', 'kwargs', '.', 'pop', '(', "'group_description'", ',', 'None', ')', 'exists', '=', '[', 'grp', 'for', 'grp', 'in', 'parser', '.', '_action_groups', 'if', 'grp', '.', 'title', '==', 'groupname', ']', 'if', 'exists', ':', 'group', '=', 'exists', '[', '0', ']', 'if', 'description', 'and', 'not', 'group', '.', 'description', ':', 'group', '.', 'description', '=', 'description', 'else', ':', 'group', '=', 'parser', '.', 'add_argument_group', '(', 'title', '=', 'groupname', ',', 'description', '=', 'description', ')', 'if', 'mutually_exclusive', ':', 'if', 'not', 'required', ':', 'required', '=', 'kwargs', '.', 'pop', '(', "'required'", ',', 'None', ')', 'mutexg_title', '=', "'%s mutually-exclusive-group'", '%', 'groupname', 'exists', '=', '[', 'grp', 'for', 'grp', 'in', 'group', '.', '_mutually_exclusive_groups', 'if', 'grp', '.', 'title', '==', 'mutexg_title', ']', 'if', 'exists', ':', 'group', '=', 'exists', '[', '0', ']', 'else', ':', '# extend parent group', 'group', '=', 'group', '.', 'add_mutually_exclusive_group', '(', 'required', '=', 'required', ')', 'group', '.', 'title', '=', 'mutexg_title', '# if any in the same group are required, then the', '# mutually exclusive group should be set to required', 'if', 'required', 'and', 'not', 'group', '.', 'required', ':', 'group', '.', 'required', '=', 'required', 'self', '.', '_mutexgroup', '=', 'group', 'self', '.', '_action', '=', 'group', '.', 'add_argument', '(', '*', 'self', '.', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'kwargs', '.', 'update', '(', 'override_kwargs', ')', 'self', '.', '_action', '=', 'parser', '.', 'add_argument', '(', '*', 'self', '.', 'args', ',', '*', '*', 'kwargs', ')'] | Add an option to a an argparse parser.
:keyword permissive: when true, build a parser that does not validate
required arguments. | ['Add', 'an', 'option', 'to', 'a', 'an', 'argparse', 'parser', '.'] | train | https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L248-L311 |
4,581 | jonhadfield/python-hosts | python_hosts/hosts.py | HostsEntry.get_entry_type | def get_entry_type(hosts_entry=None):
"""
Return the type of entry for the line of hosts file passed
:param hosts_entry: A line from the hosts file
:return: 'comment' | 'blank' | 'ipv4' | 'ipv6'
"""
if hosts_entry and isinstance(hosts_entry, str):
entry = hosts_entry.strip()
if not entry or not entry[0] or entry[0] == "\n":
return 'blank'
if entry[0] == "#":
return 'comment'
entry_chunks = entry.split()
if is_ipv6(entry_chunks[0]):
return 'ipv6'
if is_ipv4(entry_chunks[0]):
return 'ipv4' | python | def get_entry_type(hosts_entry=None):
"""
Return the type of entry for the line of hosts file passed
:param hosts_entry: A line from the hosts file
:return: 'comment' | 'blank' | 'ipv4' | 'ipv6'
"""
if hosts_entry and isinstance(hosts_entry, str):
entry = hosts_entry.strip()
if not entry or not entry[0] or entry[0] == "\n":
return 'blank'
if entry[0] == "#":
return 'comment'
entry_chunks = entry.split()
if is_ipv6(entry_chunks[0]):
return 'ipv6'
if is_ipv4(entry_chunks[0]):
return 'ipv4' | ['def', 'get_entry_type', '(', 'hosts_entry', '=', 'None', ')', ':', 'if', 'hosts_entry', 'and', 'isinstance', '(', 'hosts_entry', ',', 'str', ')', ':', 'entry', '=', 'hosts_entry', '.', 'strip', '(', ')', 'if', 'not', 'entry', 'or', 'not', 'entry', '[', '0', ']', 'or', 'entry', '[', '0', ']', '==', '"\\n"', ':', 'return', "'blank'", 'if', 'entry', '[', '0', ']', '==', '"#"', ':', 'return', "'comment'", 'entry_chunks', '=', 'entry', '.', 'split', '(', ')', 'if', 'is_ipv6', '(', 'entry_chunks', '[', '0', ']', ')', ':', 'return', "'ipv6'", 'if', 'is_ipv4', '(', 'entry_chunks', '[', '0', ']', ')', ':', 'return', "'ipv4'"] | Return the type of entry for the line of hosts file passed
:param hosts_entry: A line from the hosts file
:return: 'comment' | 'blank' | 'ipv4' | 'ipv6' | ['Return', 'the', 'type', 'of', 'entry', 'for', 'the', 'line', 'of', 'hosts', 'file', 'passed', ':', 'param', 'hosts_entry', ':', 'A', 'line', 'from', 'the', 'hosts', 'file', ':', 'return', ':', 'comment', '|', 'blank', '|', 'ipv4', '|', 'ipv6'] | train | https://github.com/jonhadfield/python-hosts/blob/9ccaa8edc63418a91f10bf732b26070f21dd2ad0/python_hosts/hosts.py#L90-L106 |
4,582 | readbeyond/aeneas | aeneas/tree.py | Tree.level_at_index | def level_at_index(self, index):
"""
Return the list of nodes at level ``index``,
in DFS order.
:param int index: the index
:rtype: list of :class:`~aeneas.tree.Tree`
:raises: ValueError if the given ``index`` is not valid
"""
if not isinstance(index, int):
self.log_exc(u"Index is not an integer", None, True, TypeError)
levels = self.levels
if (index < 0) or (index >= len(levels)):
self.log_exc(u"The given level index '%d' is not valid" % (index), None, True, ValueError)
return self.levels[index] | python | def level_at_index(self, index):
"""
Return the list of nodes at level ``index``,
in DFS order.
:param int index: the index
:rtype: list of :class:`~aeneas.tree.Tree`
:raises: ValueError if the given ``index`` is not valid
"""
if not isinstance(index, int):
self.log_exc(u"Index is not an integer", None, True, TypeError)
levels = self.levels
if (index < 0) or (index >= len(levels)):
self.log_exc(u"The given level index '%d' is not valid" % (index), None, True, ValueError)
return self.levels[index] | ['def', 'level_at_index', '(', 'self', ',', 'index', ')', ':', 'if', 'not', 'isinstance', '(', 'index', ',', 'int', ')', ':', 'self', '.', 'log_exc', '(', 'u"Index is not an integer"', ',', 'None', ',', 'True', ',', 'TypeError', ')', 'levels', '=', 'self', '.', 'levels', 'if', '(', 'index', '<', '0', ')', 'or', '(', 'index', '>=', 'len', '(', 'levels', ')', ')', ':', 'self', '.', 'log_exc', '(', 'u"The given level index \'%d\' is not valid"', '%', '(', 'index', ')', ',', 'None', ',', 'True', ',', 'ValueError', ')', 'return', 'self', '.', 'levels', '[', 'index', ']'] | Return the list of nodes at level ``index``,
in DFS order.
:param int index: the index
:rtype: list of :class:`~aeneas.tree.Tree`
:raises: ValueError if the given ``index`` is not valid | ['Return', 'the', 'list', 'of', 'nodes', 'at', 'level', 'index', 'in', 'DFS', 'order', '.'] | train | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/tree.py#L425-L440 |
4,583 | glut23/webvtt-py | webvtt/parsers.py | TextBasedParser._parse_timeframe_line | def _parse_timeframe_line(self, line):
"""Parse timeframe line and return start and end timestamps."""
tf = self._validate_timeframe_line(line)
if not tf:
raise MalformedCaptionError('Invalid time format')
return tf.group(1), tf.group(2) | python | def _parse_timeframe_line(self, line):
"""Parse timeframe line and return start and end timestamps."""
tf = self._validate_timeframe_line(line)
if not tf:
raise MalformedCaptionError('Invalid time format')
return tf.group(1), tf.group(2) | ['def', '_parse_timeframe_line', '(', 'self', ',', 'line', ')', ':', 'tf', '=', 'self', '.', '_validate_timeframe_line', '(', 'line', ')', 'if', 'not', 'tf', ':', 'raise', 'MalformedCaptionError', '(', "'Invalid time format'", ')', 'return', 'tf', '.', 'group', '(', '1', ')', ',', 'tf', '.', 'group', '(', '2', ')'] | Parse timeframe line and return start and end timestamps. | ['Parse', 'timeframe', 'line', 'and', 'return', 'start', 'and', 'end', 'timestamps', '.'] | train | https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/parsers.py#L49-L55 |
4,584 | linuxsoftware/ls.joyous | ls/joyous/models/events.py | RecurringEventPage.status | def status(self):
"""
The current status of the event (started, finished or pending).
"""
myNow = timezone.localtime(timezone=self.tz)
daysDelta = dt.timedelta(days=self.num_days - 1)
# NB: postponements can be created after the until date
# so ignore that
todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz)
eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta)
if eventStart is None:
return "finished"
eventFinish = getAwareDatetime(eventStart.date() + daysDelta,
event.time_to, self.tz)
if event.time_from is None:
eventStart += _1day
if eventStart < myNow < eventFinish:
# if there are two occurences on the same day then we may miss
# that one of them has started
return "started"
if (self.repeat.until and eventFinish < myNow and
self.__afterOrPostponedTo(myNow)[0] is None):
# only just wound up, the last occurence was earlier today
return "finished" | python | def status(self):
"""
The current status of the event (started, finished or pending).
"""
myNow = timezone.localtime(timezone=self.tz)
daysDelta = dt.timedelta(days=self.num_days - 1)
# NB: postponements can be created after the until date
# so ignore that
todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz)
eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta)
if eventStart is None:
return "finished"
eventFinish = getAwareDatetime(eventStart.date() + daysDelta,
event.time_to, self.tz)
if event.time_from is None:
eventStart += _1day
if eventStart < myNow < eventFinish:
# if there are two occurences on the same day then we may miss
# that one of them has started
return "started"
if (self.repeat.until and eventFinish < myNow and
self.__afterOrPostponedTo(myNow)[0] is None):
# only just wound up, the last occurence was earlier today
return "finished" | ['def', 'status', '(', 'self', ')', ':', 'myNow', '=', 'timezone', '.', 'localtime', '(', 'timezone', '=', 'self', '.', 'tz', ')', 'daysDelta', '=', 'dt', '.', 'timedelta', '(', 'days', '=', 'self', '.', 'num_days', '-', '1', ')', '# NB: postponements can be created after the until date', '# so ignore that', 'todayStart', '=', 'getAwareDatetime', '(', 'myNow', '.', 'date', '(', ')', ',', 'dt', '.', 'time', '.', 'min', ',', 'self', '.', 'tz', ')', 'eventStart', ',', 'event', '=', 'self', '.', '__afterOrPostponedTo', '(', 'todayStart', '-', 'daysDelta', ')', 'if', 'eventStart', 'is', 'None', ':', 'return', '"finished"', 'eventFinish', '=', 'getAwareDatetime', '(', 'eventStart', '.', 'date', '(', ')', '+', 'daysDelta', ',', 'event', '.', 'time_to', ',', 'self', '.', 'tz', ')', 'if', 'event', '.', 'time_from', 'is', 'None', ':', 'eventStart', '+=', '_1day', 'if', 'eventStart', '<', 'myNow', '<', 'eventFinish', ':', '# if there are two occurences on the same day then we may miss', '# that one of them has started', 'return', '"started"', 'if', '(', 'self', '.', 'repeat', '.', 'until', 'and', 'eventFinish', '<', 'myNow', 'and', 'self', '.', '__afterOrPostponedTo', '(', 'myNow', ')', '[', '0', ']', 'is', 'None', ')', ':', '# only just wound up, the last occurence was earlier today', 'return', '"finished"'] | The current status of the event (started, finished or pending). | ['The', 'current', 'status', 'of', 'the', 'event', '(', 'started', 'finished', 'or', 'pending', ')', '.'] | train | https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1003-L1026 |
4,585 | benoitkugler/abstractDataLibrary | pyDLib/Core/formats.py | abstractSearch.in_date | def in_date(objet, pattern):
""" abstractSearch dans une date datetime.date"""
if objet:
pattern = re.sub(" ", '', pattern)
objet_str = abstractRender.date(objet)
return bool(re.search(pattern, objet_str))
return False | python | def in_date(objet, pattern):
""" abstractSearch dans une date datetime.date"""
if objet:
pattern = re.sub(" ", '', pattern)
objet_str = abstractRender.date(objet)
return bool(re.search(pattern, objet_str))
return False | ['def', 'in_date', '(', 'objet', ',', 'pattern', ')', ':', 'if', 'objet', ':', 'pattern', '=', 're', '.', 'sub', '(', '" "', ',', "''", ',', 'pattern', ')', 'objet_str', '=', 'abstractRender', '.', 'date', '(', 'objet', ')', 'return', 'bool', '(', 're', '.', 'search', '(', 'pattern', ',', 'objet_str', ')', ')', 'return', 'False'] | abstractSearch dans une date datetime.date | ['abstractSearch', 'dans', 'une', 'date', 'datetime', '.', 'date'] | train | https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L204-L210 |
4,586 | libChEBI/libChEBIpy | libchebipy/_parsers.py | get_mol_filename | def get_mol_filename(chebi_id):
'''Returns mol file'''
mol = get_mol(chebi_id)
if mol is None:
return None
file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) +
'_', '.mol')
mol_file = open(mol_filename, 'w')
mol_file.write(mol.get_structure())
mol_file.close()
os.close(file_descriptor)
return mol_filename | python | def get_mol_filename(chebi_id):
'''Returns mol file'''
mol = get_mol(chebi_id)
if mol is None:
return None
file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) +
'_', '.mol')
mol_file = open(mol_filename, 'w')
mol_file.write(mol.get_structure())
mol_file.close()
os.close(file_descriptor)
return mol_filename | ['def', 'get_mol_filename', '(', 'chebi_id', ')', ':', 'mol', '=', 'get_mol', '(', 'chebi_id', ')', 'if', 'mol', 'is', 'None', ':', 'return', 'None', 'file_descriptor', ',', 'mol_filename', '=', 'tempfile', '.', 'mkstemp', '(', 'str', '(', 'chebi_id', ')', '+', "'_'", ',', "'.mol'", ')', 'mol_file', '=', 'open', '(', 'mol_filename', ',', "'w'", ')', 'mol_file', '.', 'write', '(', 'mol', '.', 'get_structure', '(', ')', ')', 'mol_file', '.', 'close', '(', ')', 'os', '.', 'close', '(', 'file_descriptor', ')', 'return', 'mol_filename'] | Returns mol file | ['Returns', 'mol', 'file'] | train | https://github.com/libChEBI/libChEBIpy/blob/89f223a91f518619d5e3910070d283adcac1626e/libchebipy/_parsers.py#L570-L584 |
4,587 | pyeve/cerberus | cerberus/validator.py | BareValidator.__validate_definitions | def __validate_definitions(self, definitions, field):
""" Validate a field's value against its defined rules. """
def validate_rule(rule):
validator = self.__get_rule_handler('validate', rule)
return validator(definitions.get(rule, None), field, value)
definitions = self._resolve_rules_set(definitions)
value = self.document[field]
rules_queue = [
x
for x in self.priority_validations
if x in definitions or x in self.mandatory_validations
]
rules_queue.extend(
x for x in self.mandatory_validations if x not in rules_queue
)
rules_queue.extend(
x
for x in definitions
if x not in rules_queue
and x not in self.normalization_rules
and x not in ('allow_unknown', 'require_all', 'meta', 'required')
)
self._remaining_rules = rules_queue
while self._remaining_rules:
rule = self._remaining_rules.pop(0)
try:
result = validate_rule(rule)
# TODO remove on next breaking release
if result:
break
except _SchemaRuleTypeError:
break
self._drop_remaining_rules() | python | def __validate_definitions(self, definitions, field):
""" Validate a field's value against its defined rules. """
def validate_rule(rule):
validator = self.__get_rule_handler('validate', rule)
return validator(definitions.get(rule, None), field, value)
definitions = self._resolve_rules_set(definitions)
value = self.document[field]
rules_queue = [
x
for x in self.priority_validations
if x in definitions or x in self.mandatory_validations
]
rules_queue.extend(
x for x in self.mandatory_validations if x not in rules_queue
)
rules_queue.extend(
x
for x in definitions
if x not in rules_queue
and x not in self.normalization_rules
and x not in ('allow_unknown', 'require_all', 'meta', 'required')
)
self._remaining_rules = rules_queue
while self._remaining_rules:
rule = self._remaining_rules.pop(0)
try:
result = validate_rule(rule)
# TODO remove on next breaking release
if result:
break
except _SchemaRuleTypeError:
break
self._drop_remaining_rules() | ['def', '__validate_definitions', '(', 'self', ',', 'definitions', ',', 'field', ')', ':', 'def', 'validate_rule', '(', 'rule', ')', ':', 'validator', '=', 'self', '.', '__get_rule_handler', '(', "'validate'", ',', 'rule', ')', 'return', 'validator', '(', 'definitions', '.', 'get', '(', 'rule', ',', 'None', ')', ',', 'field', ',', 'value', ')', 'definitions', '=', 'self', '.', '_resolve_rules_set', '(', 'definitions', ')', 'value', '=', 'self', '.', 'document', '[', 'field', ']', 'rules_queue', '=', '[', 'x', 'for', 'x', 'in', 'self', '.', 'priority_validations', 'if', 'x', 'in', 'definitions', 'or', 'x', 'in', 'self', '.', 'mandatory_validations', ']', 'rules_queue', '.', 'extend', '(', 'x', 'for', 'x', 'in', 'self', '.', 'mandatory_validations', 'if', 'x', 'not', 'in', 'rules_queue', ')', 'rules_queue', '.', 'extend', '(', 'x', 'for', 'x', 'in', 'definitions', 'if', 'x', 'not', 'in', 'rules_queue', 'and', 'x', 'not', 'in', 'self', '.', 'normalization_rules', 'and', 'x', 'not', 'in', '(', "'allow_unknown'", ',', "'require_all'", ',', "'meta'", ',', "'required'", ')', ')', 'self', '.', '_remaining_rules', '=', 'rules_queue', 'while', 'self', '.', '_remaining_rules', ':', 'rule', '=', 'self', '.', '_remaining_rules', '.', 'pop', '(', '0', ')', 'try', ':', 'result', '=', 'validate_rule', '(', 'rule', ')', '# TODO remove on next breaking release', 'if', 'result', ':', 'break', 'except', '_SchemaRuleTypeError', ':', 'break', 'self', '.', '_drop_remaining_rules', '(', ')'] | Validate a field's value against its defined rules. | ['Validate', 'a', 'field', 's', 'value', 'against', 'its', 'defined', 'rules', '.'] | train | https://github.com/pyeve/cerberus/blob/688a67a4069e88042ed424bda7be0f4fa5fc3910/cerberus/validator.py#L1036-L1073 |
4,588 | iotile/coretools | iotileship/iotile/ship/autobuild/ship_file.py | autobuild_shiparchive | def autobuild_shiparchive(src_file):
"""Create a ship file archive containing a yaml_file and its dependencies.
If yaml_file depends on any build products as external files, it must
be a jinja2 template that references the file using the find_product
filter so that we can figure out where those build products are going
and create the right dependency graph.
Args:
src_file (str): The path to the input yaml file template. This
file path must end .yaml.tpl and is rendered into a .yaml
file and then packaged into a .ship file along with any
products that are referenced in it.
"""
if not src_file.endswith('.tpl'):
raise BuildError("You must pass a .tpl file to autobuild_shiparchive", src_file=src_file)
env = Environment(tools=[])
family = ArchitectureGroup('module_settings.json')
target = family.platform_independent_target()
resolver = ProductResolver.Create()
#Parse through build_step products to see what needs to imported
custom_steps = []
for build_step in family.tile.find_products('build_step'):
full_file_name = build_step.split(":")[0]
basename = os.path.splitext(os.path.basename(full_file_name))[0]
folder = os.path.dirname(full_file_name)
fileobj, pathname, description = imp.find_module(basename, [folder])
mod = imp.load_module(basename, fileobj, pathname, description)
full_file_name, class_name = build_step.split(":")
custom_steps.append((class_name, getattr(mod, class_name)))
env['CUSTOM_STEPS'] = custom_steps
env["RESOLVER"] = resolver
base_name, tpl_name = _find_basename(src_file)
yaml_name = tpl_name[:-4]
ship_name = yaml_name[:-5] + ".ship"
output_dir = target.build_dirs()['output']
build_dir = os.path.join(target.build_dirs()['build'], base_name)
tpl_path = os.path.join(build_dir, tpl_name)
yaml_path = os.path.join(build_dir, yaml_name)
ship_path = os.path.join(build_dir, ship_name)
output_path = os.path.join(output_dir, ship_name)
# We want to build up all related files in
# <build_dir>/<ship archive_folder>/
# - First copy the template yaml over
# - Then render the template yaml
# - Then find all products referenced in the template yaml and copy them
# - over
# - Then build a .ship archive
# - Then copy that archive into output_dir
ship_deps = [yaml_path]
env.Command([tpl_path], [src_file], Copy("$TARGET", "$SOURCE"))
prod_deps = _find_product_dependencies(src_file, resolver)
env.Command([yaml_path], [tpl_path], action=Action(template_shipfile_action, "Rendering $TARGET"))
for prod in prod_deps:
dest_file = os.path.join(build_dir, prod.short_name)
ship_deps.append(dest_file)
env.Command([dest_file], [prod.full_path], Copy("$TARGET", "$SOURCE"))
env.Command([ship_path], [ship_deps], action=Action(create_shipfile, "Archiving Ship Recipe $TARGET"))
env.Command([output_path], [ship_path], Copy("$TARGET", "$SOURCE")) | python | def autobuild_shiparchive(src_file):
"""Create a ship file archive containing a yaml_file and its dependencies.
If yaml_file depends on any build products as external files, it must
be a jinja2 template that references the file using the find_product
filter so that we can figure out where those build products are going
and create the right dependency graph.
Args:
src_file (str): The path to the input yaml file template. This
file path must end .yaml.tpl and is rendered into a .yaml
file and then packaged into a .ship file along with any
products that are referenced in it.
"""
if not src_file.endswith('.tpl'):
raise BuildError("You must pass a .tpl file to autobuild_shiparchive", src_file=src_file)
env = Environment(tools=[])
family = ArchitectureGroup('module_settings.json')
target = family.platform_independent_target()
resolver = ProductResolver.Create()
#Parse through build_step products to see what needs to imported
custom_steps = []
for build_step in family.tile.find_products('build_step'):
full_file_name = build_step.split(":")[0]
basename = os.path.splitext(os.path.basename(full_file_name))[0]
folder = os.path.dirname(full_file_name)
fileobj, pathname, description = imp.find_module(basename, [folder])
mod = imp.load_module(basename, fileobj, pathname, description)
full_file_name, class_name = build_step.split(":")
custom_steps.append((class_name, getattr(mod, class_name)))
env['CUSTOM_STEPS'] = custom_steps
env["RESOLVER"] = resolver
base_name, tpl_name = _find_basename(src_file)
yaml_name = tpl_name[:-4]
ship_name = yaml_name[:-5] + ".ship"
output_dir = target.build_dirs()['output']
build_dir = os.path.join(target.build_dirs()['build'], base_name)
tpl_path = os.path.join(build_dir, tpl_name)
yaml_path = os.path.join(build_dir, yaml_name)
ship_path = os.path.join(build_dir, ship_name)
output_path = os.path.join(output_dir, ship_name)
# We want to build up all related files in
# <build_dir>/<ship archive_folder>/
# - First copy the template yaml over
# - Then render the template yaml
# - Then find all products referenced in the template yaml and copy them
# - over
# - Then build a .ship archive
# - Then copy that archive into output_dir
ship_deps = [yaml_path]
env.Command([tpl_path], [src_file], Copy("$TARGET", "$SOURCE"))
prod_deps = _find_product_dependencies(src_file, resolver)
env.Command([yaml_path], [tpl_path], action=Action(template_shipfile_action, "Rendering $TARGET"))
for prod in prod_deps:
dest_file = os.path.join(build_dir, prod.short_name)
ship_deps.append(dest_file)
env.Command([dest_file], [prod.full_path], Copy("$TARGET", "$SOURCE"))
env.Command([ship_path], [ship_deps], action=Action(create_shipfile, "Archiving Ship Recipe $TARGET"))
env.Command([output_path], [ship_path], Copy("$TARGET", "$SOURCE")) | ['def', 'autobuild_shiparchive', '(', 'src_file', ')', ':', 'if', 'not', 'src_file', '.', 'endswith', '(', "'.tpl'", ')', ':', 'raise', 'BuildError', '(', '"You must pass a .tpl file to autobuild_shiparchive"', ',', 'src_file', '=', 'src_file', ')', 'env', '=', 'Environment', '(', 'tools', '=', '[', ']', ')', 'family', '=', 'ArchitectureGroup', '(', "'module_settings.json'", ')', 'target', '=', 'family', '.', 'platform_independent_target', '(', ')', 'resolver', '=', 'ProductResolver', '.', 'Create', '(', ')', '#Parse through build_step products to see what needs to imported', 'custom_steps', '=', '[', ']', 'for', 'build_step', 'in', 'family', '.', 'tile', '.', 'find_products', '(', "'build_step'", ')', ':', 'full_file_name', '=', 'build_step', '.', 'split', '(', '":"', ')', '[', '0', ']', 'basename', '=', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'basename', '(', 'full_file_name', ')', ')', '[', '0', ']', 'folder', '=', 'os', '.', 'path', '.', 'dirname', '(', 'full_file_name', ')', 'fileobj', ',', 'pathname', ',', 'description', '=', 'imp', '.', 'find_module', '(', 'basename', ',', '[', 'folder', ']', ')', 'mod', '=', 'imp', '.', 'load_module', '(', 'basename', ',', 'fileobj', ',', 'pathname', ',', 'description', ')', 'full_file_name', ',', 'class_name', '=', 'build_step', '.', 'split', '(', '":"', ')', 'custom_steps', '.', 'append', '(', '(', 'class_name', ',', 'getattr', '(', 'mod', ',', 'class_name', ')', ')', ')', 'env', '[', "'CUSTOM_STEPS'", ']', '=', 'custom_steps', 'env', '[', '"RESOLVER"', ']', '=', 'resolver', 'base_name', ',', 'tpl_name', '=', '_find_basename', '(', 'src_file', ')', 'yaml_name', '=', 'tpl_name', '[', ':', '-', '4', ']', 'ship_name', '=', 'yaml_name', '[', ':', '-', '5', ']', '+', '".ship"', 'output_dir', '=', 'target', '.', 'build_dirs', '(', ')', '[', "'output'", ']', 'build_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'target', '.', 'build_dirs', '(', ')', '[', "'build'", ']', ',', 'base_name', ')', 'tpl_path', '=', 'os', '.', 'path', '.', 'join', '(', 'build_dir', ',', 'tpl_name', ')', 'yaml_path', '=', 'os', '.', 'path', '.', 'join', '(', 'build_dir', ',', 'yaml_name', ')', 'ship_path', '=', 'os', '.', 'path', '.', 'join', '(', 'build_dir', ',', 'ship_name', ')', 'output_path', '=', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', 'ship_name', ')', '# We want to build up all related files in', '# <build_dir>/<ship archive_folder>/', '# - First copy the template yaml over', '# - Then render the template yaml', '# - Then find all products referenced in the template yaml and copy them', '# - over', '# - Then build a .ship archive', '# - Then copy that archive into output_dir', 'ship_deps', '=', '[', 'yaml_path', ']', 'env', '.', 'Command', '(', '[', 'tpl_path', ']', ',', '[', 'src_file', ']', ',', 'Copy', '(', '"$TARGET"', ',', '"$SOURCE"', ')', ')', 'prod_deps', '=', '_find_product_dependencies', '(', 'src_file', ',', 'resolver', ')', 'env', '.', 'Command', '(', '[', 'yaml_path', ']', ',', '[', 'tpl_path', ']', ',', 'action', '=', 'Action', '(', 'template_shipfile_action', ',', '"Rendering $TARGET"', ')', ')', 'for', 'prod', 'in', 'prod_deps', ':', 'dest_file', '=', 'os', '.', 'path', '.', 'join', '(', 'build_dir', ',', 'prod', '.', 'short_name', ')', 'ship_deps', '.', 'append', '(', 'dest_file', ')', 'env', '.', 'Command', '(', '[', 'dest_file', ']', ',', '[', 'prod', '.', 'full_path', ']', ',', 'Copy', '(', '"$TARGET"', ',', '"$SOURCE"', ')', ')', 'env', '.', 'Command', '(', '[', 'ship_path', ']', ',', '[', 'ship_deps', ']', ',', 'action', '=', 'Action', '(', 'create_shipfile', ',', '"Archiving Ship Recipe $TARGET"', ')', ')', 'env', '.', 'Command', '(', '[', 'output_path', ']', ',', '[', 'ship_path', ']', ',', 'Copy', '(', '"$TARGET"', ',', '"$SOURCE"', ')', ')'] | Create a ship file archive containing a yaml_file and its dependencies.
If yaml_file depends on any build products as external files, it must
be a jinja2 template that references the file using the find_product
filter so that we can figure out where those build products are going
and create the right dependency graph.
Args:
src_file (str): The path to the input yaml file template. This
file path must end .yaml.tpl and is rendered into a .yaml
file and then packaged into a .ship file along with any
products that are referenced in it. | ['Create', 'a', 'ship', 'file', 'archive', 'containing', 'a', 'yaml_file', 'and', 'its', 'dependencies', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileship/iotile/ship/autobuild/ship_file.py#L15-L88 |
4,589 | richardkiss/pycoin | pycoin/crack/bip32.py | ascend_bip32 | def ascend_bip32(bip32_pub_node, secret_exponent, child):
"""
Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node.
"""
i_as_bytes = struct.pack(">l", child)
sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order() | python | def ascend_bip32(bip32_pub_node, secret_exponent, child):
"""
Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node.
"""
i_as_bytes = struct.pack(">l", child)
sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order() | ['def', 'ascend_bip32', '(', 'bip32_pub_node', ',', 'secret_exponent', ',', 'child', ')', ':', 'i_as_bytes', '=', 'struct', '.', 'pack', '(', '">l"', ',', 'child', ')', 'sec', '=', 'public_pair_to_sec', '(', 'bip32_pub_node', '.', 'public_pair', '(', ')', ',', 'compressed', '=', 'True', ')', 'data', '=', 'sec', '+', 'i_as_bytes', 'I64', '=', 'hmac', '.', 'HMAC', '(', 'key', '=', 'bip32_pub_node', '.', '_chain_code', ',', 'msg', '=', 'data', ',', 'digestmod', '=', 'hashlib', '.', 'sha512', ')', '.', 'digest', '(', ')', 'I_left_as_exponent', '=', 'from_bytes_32', '(', 'I64', '[', ':', '32', ']', ')', 'return', '(', 'secret_exponent', '-', 'I_left_as_exponent', ')', '%', 'bip32_pub_node', '.', '_generator', '.', 'order', '(', ')'] | Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node. | ['Given', 'a', 'BIP32Node', 'with', 'public', 'derivation', 'child', 'child', 'with', 'a', 'known', 'private', 'key', 'return', 'the', 'secret', 'exponent', 'for', 'the', 'bip32_pub_node', '.'] | train | https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/crack/bip32.py#L9-L19 |
4,590 | DataONEorg/d1_python | lib_common/src/d1_common/types/scripts/pyxbgen_all.py | GenerateBindings.run_pyxbgen | def run_pyxbgen(self, args):
"""Args:
args:
"""
cmd = 'pyxbgen {}'.format(' '.join(args))
print(cmd)
os.system(cmd) | python | def run_pyxbgen(self, args):
"""Args:
args:
"""
cmd = 'pyxbgen {}'.format(' '.join(args))
print(cmd)
os.system(cmd) | ['def', 'run_pyxbgen', '(', 'self', ',', 'args', ')', ':', 'cmd', '=', "'pyxbgen {}'", '.', 'format', '(', "' '", '.', 'join', '(', 'args', ')', ')', 'print', '(', 'cmd', ')', 'os', '.', 'system', '(', 'cmd', ')'] | Args:
args: | ['Args', ':'] | train | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/scripts/pyxbgen_all.py#L149-L157 |
4,591 | cuihantao/andes | andes/models/base.py | ModelBase.link_bus | def link_bus(self, bus_idx):
"""
Return the indices of elements linking the given buses
:param bus_idx:
:return:
"""
ret = []
if not self._config['is_series']:
self.log(
'link_bus function is not valid for non-series model <{}>'.
format(self.name))
return []
if isinstance(bus_idx, (int, float, str)):
bus_idx = [bus_idx]
fkey = list(self._ac.keys())
if 'bus' in fkey:
fkey.remove('bus')
nfkey = len(fkey)
fkey_val = [self.__dict__[i] for i in fkey]
for item in bus_idx:
idx = []
key = []
for i in range(self.n):
for j in range(nfkey):
if fkey_val[j][i] == item:
idx.append(self.idx[i])
key.append(fkey[j])
# <= 1 terminal should connect to the same bus
break
if len(idx) == 0:
idx = None
if len(key) == 0:
key = None
ret.append((idx, key))
return ret | python | def link_bus(self, bus_idx):
"""
Return the indices of elements linking the given buses
:param bus_idx:
:return:
"""
ret = []
if not self._config['is_series']:
self.log(
'link_bus function is not valid for non-series model <{}>'.
format(self.name))
return []
if isinstance(bus_idx, (int, float, str)):
bus_idx = [bus_idx]
fkey = list(self._ac.keys())
if 'bus' in fkey:
fkey.remove('bus')
nfkey = len(fkey)
fkey_val = [self.__dict__[i] for i in fkey]
for item in bus_idx:
idx = []
key = []
for i in range(self.n):
for j in range(nfkey):
if fkey_val[j][i] == item:
idx.append(self.idx[i])
key.append(fkey[j])
# <= 1 terminal should connect to the same bus
break
if len(idx) == 0:
idx = None
if len(key) == 0:
key = None
ret.append((idx, key))
return ret | ['def', 'link_bus', '(', 'self', ',', 'bus_idx', ')', ':', 'ret', '=', '[', ']', 'if', 'not', 'self', '.', '_config', '[', "'is_series'", ']', ':', 'self', '.', 'log', '(', "'link_bus function is not valid for non-series model <{}>'", '.', 'format', '(', 'self', '.', 'name', ')', ')', 'return', '[', ']', 'if', 'isinstance', '(', 'bus_idx', ',', '(', 'int', ',', 'float', ',', 'str', ')', ')', ':', 'bus_idx', '=', '[', 'bus_idx', ']', 'fkey', '=', 'list', '(', 'self', '.', '_ac', '.', 'keys', '(', ')', ')', 'if', "'bus'", 'in', 'fkey', ':', 'fkey', '.', 'remove', '(', "'bus'", ')', 'nfkey', '=', 'len', '(', 'fkey', ')', 'fkey_val', '=', '[', 'self', '.', '__dict__', '[', 'i', ']', 'for', 'i', 'in', 'fkey', ']', 'for', 'item', 'in', 'bus_idx', ':', 'idx', '=', '[', ']', 'key', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'self', '.', 'n', ')', ':', 'for', 'j', 'in', 'range', '(', 'nfkey', ')', ':', 'if', 'fkey_val', '[', 'j', ']', '[', 'i', ']', '==', 'item', ':', 'idx', '.', 'append', '(', 'self', '.', 'idx', '[', 'i', ']', ')', 'key', '.', 'append', '(', 'fkey', '[', 'j', ']', ')', '# <= 1 terminal should connect to the same bus', 'break', 'if', 'len', '(', 'idx', ')', '==', '0', ':', 'idx', '=', 'None', 'if', 'len', '(', 'key', ')', '==', '0', ':', 'key', '=', 'None', 'ret', '.', 'append', '(', '(', 'idx', ',', 'key', ')', ')', 'return', 'ret'] | Return the indices of elements linking the given buses
:param bus_idx:
:return: | ['Return', 'the', 'indices', 'of', 'elements', 'linking', 'the', 'given', 'buses'] | train | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/base.py#L1398-L1441 |
4,592 | saltstack/salt | salt/states/azurearm_compute.py | availability_set_present | def availability_set_present(name, resource_group, tags=None, platform_update_domain_count=None,
platform_fault_domain_count=None, virtual_machines=None, sku=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure an availability set exists.
:param name:
Name of the availability set.
:param resource_group:
The resource group assigned to the availability set.
:param tags:
A dictionary of strings can be passed as tag metadata to the availability set object.
:param platform_update_domain_count:
An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be
rebooted at the same time.
:param platform_fault_domain_count:
An optional parameter which defines the group of virtual machines that share a common power source and network
switch.
:param virtual_machines:
A list of names of existing virtual machines to be included in the availability set.
:param sku:
The availability set SKU, which specifies whether the availability set is managed or not. Possible values are
'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure availability set exists:
azurearm_compute.availability_set_present:
- name: aset1
- resource_group: group1
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: aligned
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
aset = __salt__['azurearm_compute.availability_set_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in aset:
tag_changes = __utils__['dictdiffer.deep_diff'](aset.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if platform_update_domain_count and (int(platform_update_domain_count) != aset.get('platform_update_domain_count')):
ret['changes']['platform_update_domain_count'] = {
'old': aset.get('platform_update_domain_count'),
'new': platform_update_domain_count
}
if platform_fault_domain_count and (int(platform_fault_domain_count) != aset.get('platform_fault_domain_count')):
ret['changes']['platform_fault_domain_count'] = {
'old': aset.get('platform_fault_domain_count'),
'new': platform_fault_domain_count
}
if sku and (sku['name'] != aset.get('sku', {}).get('name')):
ret['changes']['sku'] = {
'old': aset.get('sku'),
'new': sku
}
if virtual_machines:
if not isinstance(virtual_machines, list):
ret['comment'] = 'Virtual machines must be supplied as a list!'
return ret
aset_vms = aset.get('virtual_machines', [])
remote_vms = sorted([vm['id'].split('/')[-1].lower() for vm in aset_vms if 'id' in aset_vms])
local_vms = sorted([vm.lower() for vm in virtual_machines or []])
if local_vms != remote_vms:
ret['changes']['virtual_machines'] = {
'old': aset_vms,
'new': virtual_machines
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Availability set {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Availability set {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'virtual_machines': virtual_machines,
'platform_update_domain_count': platform_update_domain_count,
'platform_fault_domain_count': platform_fault_domain_count,
'sku': sku,
'tags': tags
}
}
if __opts__['test']:
ret['comment'] = 'Availability set {0} would be created.'.format(name)
ret['result'] = None
return ret
aset_kwargs = kwargs.copy()
aset_kwargs.update(connection_auth)
aset = __salt__['azurearm_compute.availability_set_create_or_update'](
name=name,
resource_group=resource_group,
virtual_machines=virtual_machines,
platform_update_domain_count=platform_update_domain_count,
platform_fault_domain_count=platform_fault_domain_count,
sku=sku,
tags=tags,
**aset_kwargs
)
if 'error' not in aset:
ret['result'] = True
ret['comment'] = 'Availability set {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create availability set {0}! ({1})'.format(name, aset.get('error'))
return ret | python | def availability_set_present(name, resource_group, tags=None, platform_update_domain_count=None,
platform_fault_domain_count=None, virtual_machines=None, sku=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure an availability set exists.
:param name:
Name of the availability set.
:param resource_group:
The resource group assigned to the availability set.
:param tags:
A dictionary of strings can be passed as tag metadata to the availability set object.
:param platform_update_domain_count:
An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be
rebooted at the same time.
:param platform_fault_domain_count:
An optional parameter which defines the group of virtual machines that share a common power source and network
switch.
:param virtual_machines:
A list of names of existing virtual machines to be included in the availability set.
:param sku:
The availability set SKU, which specifies whether the availability set is managed or not. Possible values are
'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure availability set exists:
azurearm_compute.availability_set_present:
- name: aset1
- resource_group: group1
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: aligned
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
aset = __salt__['azurearm_compute.availability_set_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in aset:
tag_changes = __utils__['dictdiffer.deep_diff'](aset.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if platform_update_domain_count and (int(platform_update_domain_count) != aset.get('platform_update_domain_count')):
ret['changes']['platform_update_domain_count'] = {
'old': aset.get('platform_update_domain_count'),
'new': platform_update_domain_count
}
if platform_fault_domain_count and (int(platform_fault_domain_count) != aset.get('platform_fault_domain_count')):
ret['changes']['platform_fault_domain_count'] = {
'old': aset.get('platform_fault_domain_count'),
'new': platform_fault_domain_count
}
if sku and (sku['name'] != aset.get('sku', {}).get('name')):
ret['changes']['sku'] = {
'old': aset.get('sku'),
'new': sku
}
if virtual_machines:
if not isinstance(virtual_machines, list):
ret['comment'] = 'Virtual machines must be supplied as a list!'
return ret
aset_vms = aset.get('virtual_machines', [])
remote_vms = sorted([vm['id'].split('/')[-1].lower() for vm in aset_vms if 'id' in aset_vms])
local_vms = sorted([vm.lower() for vm in virtual_machines or []])
if local_vms != remote_vms:
ret['changes']['virtual_machines'] = {
'old': aset_vms,
'new': virtual_machines
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Availability set {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Availability set {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'virtual_machines': virtual_machines,
'platform_update_domain_count': platform_update_domain_count,
'platform_fault_domain_count': platform_fault_domain_count,
'sku': sku,
'tags': tags
}
}
if __opts__['test']:
ret['comment'] = 'Availability set {0} would be created.'.format(name)
ret['result'] = None
return ret
aset_kwargs = kwargs.copy()
aset_kwargs.update(connection_auth)
aset = __salt__['azurearm_compute.availability_set_create_or_update'](
name=name,
resource_group=resource_group,
virtual_machines=virtual_machines,
platform_update_domain_count=platform_update_domain_count,
platform_fault_domain_count=platform_fault_domain_count,
sku=sku,
tags=tags,
**aset_kwargs
)
if 'error' not in aset:
ret['result'] = True
ret['comment'] = 'Availability set {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create availability set {0}! ({1})'.format(name, aset.get('error'))
return ret | ['def', 'availability_set_present', '(', 'name', ',', 'resource_group', ',', 'tags', '=', 'None', ',', 'platform_update_domain_count', '=', 'None', ',', 'platform_fault_domain_count', '=', 'None', ',', 'virtual_machines', '=', 'None', ',', 'sku', '=', 'None', ',', 'connection_auth', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'False', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'if', 'not', 'isinstance', '(', 'connection_auth', ',', 'dict', ')', ':', 'ret', '[', "'comment'", ']', '=', "'Connection information must be specified via connection_auth dictionary!'", 'return', 'ret', 'if', 'sku', ':', 'sku', '=', '{', "'name'", ':', 'sku', '.', 'capitalize', '(', ')', '}', 'aset', '=', '__salt__', '[', "'azurearm_compute.availability_set_get'", ']', '(', 'name', ',', 'resource_group', ',', 'azurearm_log_level', '=', "'info'", ',', '*', '*', 'connection_auth', ')', 'if', "'error'", 'not', 'in', 'aset', ':', 'tag_changes', '=', '__utils__', '[', "'dictdiffer.deep_diff'", ']', '(', 'aset', '.', 'get', '(', "'tags'", ',', '{', '}', ')', ',', 'tags', 'or', '{', '}', ')', 'if', 'tag_changes', ':', 'ret', '[', "'changes'", ']', '[', "'tags'", ']', '=', 'tag_changes', 'if', 'platform_update_domain_count', 'and', '(', 'int', '(', 'platform_update_domain_count', ')', '!=', 'aset', '.', 'get', '(', "'platform_update_domain_count'", ')', ')', ':', 'ret', '[', "'changes'", ']', '[', "'platform_update_domain_count'", ']', '=', '{', "'old'", ':', 'aset', '.', 'get', '(', "'platform_update_domain_count'", ')', ',', "'new'", ':', 'platform_update_domain_count', '}', 'if', 'platform_fault_domain_count', 'and', '(', 'int', '(', 'platform_fault_domain_count', ')', '!=', 'aset', '.', 'get', '(', "'platform_fault_domain_count'", ')', ')', ':', 'ret', '[', "'changes'", ']', '[', "'platform_fault_domain_count'", ']', '=', '{', "'old'", ':', 'aset', '.', 'get', '(', "'platform_fault_domain_count'", ')', ',', "'new'", ':', 'platform_fault_domain_count', '}', 'if', 'sku', 'and', '(', 'sku', '[', "'name'", ']', '!=', 'aset', '.', 'get', '(', "'sku'", ',', '{', '}', ')', '.', 'get', '(', "'name'", ')', ')', ':', 'ret', '[', "'changes'", ']', '[', "'sku'", ']', '=', '{', "'old'", ':', 'aset', '.', 'get', '(', "'sku'", ')', ',', "'new'", ':', 'sku', '}', 'if', 'virtual_machines', ':', 'if', 'not', 'isinstance', '(', 'virtual_machines', ',', 'list', ')', ':', 'ret', '[', "'comment'", ']', '=', "'Virtual machines must be supplied as a list!'", 'return', 'ret', 'aset_vms', '=', 'aset', '.', 'get', '(', "'virtual_machines'", ',', '[', ']', ')', 'remote_vms', '=', 'sorted', '(', '[', 'vm', '[', "'id'", ']', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', '.', 'lower', '(', ')', 'for', 'vm', 'in', 'aset_vms', 'if', "'id'", 'in', 'aset_vms', ']', ')', 'local_vms', '=', 'sorted', '(', '[', 'vm', '.', 'lower', '(', ')', 'for', 'vm', 'in', 'virtual_machines', 'or', '[', ']', ']', ')', 'if', 'local_vms', '!=', 'remote_vms', ':', 'ret', '[', "'changes'", ']', '[', "'virtual_machines'", ']', '=', '{', "'old'", ':', 'aset_vms', ',', "'new'", ':', 'virtual_machines', '}', 'if', 'not', 'ret', '[', "'changes'", ']', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'Availability set {0} is already present.'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'None', 'ret', '[', "'comment'", ']', '=', "'Availability set {0} would be updated.'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'else', ':', 'ret', '[', "'changes'", ']', '=', '{', "'old'", ':', '{', '}', ',', "'new'", ':', '{', "'name'", ':', 'name', ',', "'virtual_machines'", ':', 'virtual_machines', ',', "'platform_update_domain_count'", ':', 'platform_update_domain_count', ',', "'platform_fault_domain_count'", ':', 'platform_fault_domain_count', ',', "'sku'", ':', 'sku', ',', "'tags'", ':', 'tags', '}', '}', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'Availability set {0} would be created.'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'None', 'return', 'ret', 'aset_kwargs', '=', 'kwargs', '.', 'copy', '(', ')', 'aset_kwargs', '.', 'update', '(', 'connection_auth', ')', 'aset', '=', '__salt__', '[', "'azurearm_compute.availability_set_create_or_update'", ']', '(', 'name', '=', 'name', ',', 'resource_group', '=', 'resource_group', ',', 'virtual_machines', '=', 'virtual_machines', ',', 'platform_update_domain_count', '=', 'platform_update_domain_count', ',', 'platform_fault_domain_count', '=', 'platform_fault_domain_count', ',', 'sku', '=', 'sku', ',', 'tags', '=', 'tags', ',', '*', '*', 'aset_kwargs', ')', 'if', "'error'", 'not', 'in', 'aset', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'Availability set {0} has been created.'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'ret', '[', "'comment'", ']', '=', "'Failed to create availability set {0}! ({1})'", '.', 'format', '(', 'name', ',', 'aset', '.', 'get', '(', "'error'", ')', ')', 'return', 'ret'] | .. versionadded:: 2019.2.0
Ensure an availability set exists.
:param name:
Name of the availability set.
:param resource_group:
The resource group assigned to the availability set.
:param tags:
A dictionary of strings can be passed as tag metadata to the availability set object.
:param platform_update_domain_count:
An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be
rebooted at the same time.
:param platform_fault_domain_count:
An optional parameter which defines the group of virtual machines that share a common power source and network
switch.
:param virtual_machines:
A list of names of existing virtual machines to be included in the availability set.
:param sku:
The availability set SKU, which specifies whether the availability set is managed or not. Possible values are
'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure availability set exists:
azurearm_compute.availability_set_present:
- name: aset1
- resource_group: group1
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: aligned
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists | ['..', 'versionadded', '::', '2019', '.', '2', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_compute.py#L104-L263 |
4,593 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py | add_f90_to_env | def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print("Adding %s to f90 suffixes" % F90Suffixes)
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1) | python | def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print("Adding %s to f90 suffixes" % F90Suffixes)
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1) | ['def', 'add_f90_to_env', '(', 'env', ')', ':', 'try', ':', 'F90Suffixes', '=', 'env', '[', "'F90FILESUFFIXES'", ']', 'except', 'KeyError', ':', 'F90Suffixes', '=', '[', "'.f90'", ']', '#print("Adding %s to f90 suffixes" % F90Suffixes)', 'try', ':', 'F90PPSuffixes', '=', 'env', '[', "'F90PPFILESUFFIXES'", ']', 'except', 'KeyError', ':', 'F90PPSuffixes', '=', '[', ']', 'DialectAddToEnv', '(', 'env', ',', '"F90"', ',', 'F90Suffixes', ',', 'F90PPSuffixes', ',', 'support_module', '=', '1', ')'] | Add Builders and construction variables for f90 to an Environment. | ['Add', 'Builders', 'and', 'construction', 'variables', 'for', 'f90', 'to', 'an', 'Environment', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py#L202-L216 |
4,594 | gwastro/pycbc | pycbc/dq.py | query_flag | def query_flag(ifo, name, start_time, end_time,
source='any', server="segments.ligo.org",
veto_definer=None, cache=False):
"""Return the times where the flag is active
Parameters
----------
ifo: string
The interferometer to query (H1, L1).
name: string
The status flag to query from LOSC.
start_time: int
The starting gps time to begin querying from LOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
cache: bool
If true cache the query. Default is not to cache
Returns
---------
segments: glue.segments.segmentlist
List of segments
"""
info = name.split(':')
if len(info) == 2:
segment_name, version = info
elif len(info) == 1:
segment_name = info[0]
version = 1
flag_segments = segmentlist([])
if source in ['GWOSC', 'any']:
# Special cases as the LOSC convention is backwards from normal
# LIGO / Virgo operation!!!!
if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or
'VETO' in segment_name):
data = query_flag(ifo, 'DATA', start_time, end_time)
if '_HW_INJ' in segment_name:
name = 'NO_' + segment_name
else:
name = segment_name.replace('_VETO', '')
negate = query_flag(ifo, name, start_time, end_time, cache=cache)
return (data - negate).coalesce()
duration = end_time - start_time
url = GWOSC_URL.format(get_run(start_time + duration/2),
ifo, segment_name,
int(start_time), int(duration))
try:
fname = download_file(url, cache=cache)
data = json.load(open(fname, 'r'))
if 'segments' in data:
flag_segments = data['segments']
except Exception as e:
msg = "Unable to find segments in GWOSC, check flag name or times"
print(e)
if source != 'any':
raise ValueError(msg)
else:
print("Tried and failed GWOSC {}, trying dqsegdb", name)
return query_flag(ifo, segment_name, start_time, end_time,
source='dqsegdb', server=server,
veto_definer=veto_definer)
elif source == 'dqsegdb':
# Let's not hard require dqsegdb to be installed if we never get here.
try:
from dqsegdb.apicalls import dqsegdbQueryTimes as query
except ImportError:
raise ValueError("Could not query flag. Install dqsegdb"
":'pip install dqsegdb'")
# The veto definer will allow the use of MACRO names
# These directly correspond the name defined in the veto definer file.
if veto_definer is not None:
veto_def = parse_veto_definer(veto_definer)
# We treat the veto definer name as if it were its own flag and
# a process the flags in the veto definer
if veto_definer is not None and segment_name in veto_def[ifo]:
for flag in veto_def[ifo][segment_name]:
segs = query("https", server, ifo, flag['name'],
flag['version'], 'active',
int(start_time), int(end_time))[0]['active']
# Apply padding to each segment
for rseg in segs:
seg_start = rseg[0] + flag['start_pad']
seg_end = rseg[1] + flag['end_pad']
flag_segments.append(segment(seg_start, seg_end))
# Apply start / end of the veto definer segment
send = segmentlist([segment([veto_def['start'], veto_def['end']])])
flag_segments = (flag_segments.coalesce() & send)
else: # Standard case just query directly.
try:
segs = query("https", server, ifo, name, version,
'active', int(start_time),
int(end_time))[0]['active']
for rseg in segs:
flag_segments.append(segment(rseg[0], rseg[1]))
except Exception as e:
print("Could not query flag, check name "
" (%s) or times" % segment_name)
raise e
else:
raise ValueError("Source must be dqsegdb or GWOSC."
" Got {}".format(source))
return segmentlist(flag_segments).coalesce() | python | def query_flag(ifo, name, start_time, end_time,
source='any', server="segments.ligo.org",
veto_definer=None, cache=False):
"""Return the times where the flag is active
Parameters
----------
ifo: string
The interferometer to query (H1, L1).
name: string
The status flag to query from LOSC.
start_time: int
The starting gps time to begin querying from LOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
cache: bool
If true cache the query. Default is not to cache
Returns
---------
segments: glue.segments.segmentlist
List of segments
"""
info = name.split(':')
if len(info) == 2:
segment_name, version = info
elif len(info) == 1:
segment_name = info[0]
version = 1
flag_segments = segmentlist([])
if source in ['GWOSC', 'any']:
# Special cases as the LOSC convention is backwards from normal
# LIGO / Virgo operation!!!!
if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or
'VETO' in segment_name):
data = query_flag(ifo, 'DATA', start_time, end_time)
if '_HW_INJ' in segment_name:
name = 'NO_' + segment_name
else:
name = segment_name.replace('_VETO', '')
negate = query_flag(ifo, name, start_time, end_time, cache=cache)
return (data - negate).coalesce()
duration = end_time - start_time
url = GWOSC_URL.format(get_run(start_time + duration/2),
ifo, segment_name,
int(start_time), int(duration))
try:
fname = download_file(url, cache=cache)
data = json.load(open(fname, 'r'))
if 'segments' in data:
flag_segments = data['segments']
except Exception as e:
msg = "Unable to find segments in GWOSC, check flag name or times"
print(e)
if source != 'any':
raise ValueError(msg)
else:
print("Tried and failed GWOSC {}, trying dqsegdb", name)
return query_flag(ifo, segment_name, start_time, end_time,
source='dqsegdb', server=server,
veto_definer=veto_definer)
elif source == 'dqsegdb':
# Let's not hard require dqsegdb to be installed if we never get here.
try:
from dqsegdb.apicalls import dqsegdbQueryTimes as query
except ImportError:
raise ValueError("Could not query flag. Install dqsegdb"
":'pip install dqsegdb'")
# The veto definer will allow the use of MACRO names
# These directly correspond the name defined in the veto definer file.
if veto_definer is not None:
veto_def = parse_veto_definer(veto_definer)
# We treat the veto definer name as if it were its own flag and
# a process the flags in the veto definer
if veto_definer is not None and segment_name in veto_def[ifo]:
for flag in veto_def[ifo][segment_name]:
segs = query("https", server, ifo, flag['name'],
flag['version'], 'active',
int(start_time), int(end_time))[0]['active']
# Apply padding to each segment
for rseg in segs:
seg_start = rseg[0] + flag['start_pad']
seg_end = rseg[1] + flag['end_pad']
flag_segments.append(segment(seg_start, seg_end))
# Apply start / end of the veto definer segment
send = segmentlist([segment([veto_def['start'], veto_def['end']])])
flag_segments = (flag_segments.coalesce() & send)
else: # Standard case just query directly.
try:
segs = query("https", server, ifo, name, version,
'active', int(start_time),
int(end_time))[0]['active']
for rseg in segs:
flag_segments.append(segment(rseg[0], rseg[1]))
except Exception as e:
print("Could not query flag, check name "
" (%s) or times" % segment_name)
raise e
else:
raise ValueError("Source must be dqsegdb or GWOSC."
" Got {}".format(source))
return segmentlist(flag_segments).coalesce() | ['def', 'query_flag', '(', 'ifo', ',', 'name', ',', 'start_time', ',', 'end_time', ',', 'source', '=', "'any'", ',', 'server', '=', '"segments.ligo.org"', ',', 'veto_definer', '=', 'None', ',', 'cache', '=', 'False', ')', ':', 'info', '=', 'name', '.', 'split', '(', "':'", ')', 'if', 'len', '(', 'info', ')', '==', '2', ':', 'segment_name', ',', 'version', '=', 'info', 'elif', 'len', '(', 'info', ')', '==', '1', ':', 'segment_name', '=', 'info', '[', '0', ']', 'version', '=', '1', 'flag_segments', '=', 'segmentlist', '(', '[', ']', ')', 'if', 'source', 'in', '[', "'GWOSC'", ',', "'any'", ']', ':', '# Special cases as the LOSC convention is backwards from normal', '# LIGO / Virgo operation!!!!', 'if', '(', '(', "'_HW_INJ'", 'in', 'segment_name', 'and', "'NO'", 'not', 'in', 'segment_name', ')', 'or', "'VETO'", 'in', 'segment_name', ')', ':', 'data', '=', 'query_flag', '(', 'ifo', ',', "'DATA'", ',', 'start_time', ',', 'end_time', ')', 'if', "'_HW_INJ'", 'in', 'segment_name', ':', 'name', '=', "'NO_'", '+', 'segment_name', 'else', ':', 'name', '=', 'segment_name', '.', 'replace', '(', "'_VETO'", ',', "''", ')', 'negate', '=', 'query_flag', '(', 'ifo', ',', 'name', ',', 'start_time', ',', 'end_time', ',', 'cache', '=', 'cache', ')', 'return', '(', 'data', '-', 'negate', ')', '.', 'coalesce', '(', ')', 'duration', '=', 'end_time', '-', 'start_time', 'url', '=', 'GWOSC_URL', '.', 'format', '(', 'get_run', '(', 'start_time', '+', 'duration', '/', '2', ')', ',', 'ifo', ',', 'segment_name', ',', 'int', '(', 'start_time', ')', ',', 'int', '(', 'duration', ')', ')', 'try', ':', 'fname', '=', 'download_file', '(', 'url', ',', 'cache', '=', 'cache', ')', 'data', '=', 'json', '.', 'load', '(', 'open', '(', 'fname', ',', "'r'", ')', ')', 'if', "'segments'", 'in', 'data', ':', 'flag_segments', '=', 'data', '[', "'segments'", ']', 'except', 'Exception', 'as', 'e', ':', 'msg', '=', '"Unable to find segments in GWOSC, check flag name or times"', 'print', '(', 'e', ')', 'if', 'source', '!=', "'any'", ':', 'raise', 'ValueError', '(', 'msg', ')', 'else', ':', 'print', '(', '"Tried and failed GWOSC {}, trying dqsegdb"', ',', 'name', ')', 'return', 'query_flag', '(', 'ifo', ',', 'segment_name', ',', 'start_time', ',', 'end_time', ',', 'source', '=', "'dqsegdb'", ',', 'server', '=', 'server', ',', 'veto_definer', '=', 'veto_definer', ')', 'elif', 'source', '==', "'dqsegdb'", ':', "# Let's not hard require dqsegdb to be installed if we never get here.", 'try', ':', 'from', 'dqsegdb', '.', 'apicalls', 'import', 'dqsegdbQueryTimes', 'as', 'query', 'except', 'ImportError', ':', 'raise', 'ValueError', '(', '"Could not query flag. Install dqsegdb"', '":\'pip install dqsegdb\'"', ')', '# The veto definer will allow the use of MACRO names', '# These directly correspond the name defined in the veto definer file.', 'if', 'veto_definer', 'is', 'not', 'None', ':', 'veto_def', '=', 'parse_veto_definer', '(', 'veto_definer', ')', '# We treat the veto definer name as if it were its own flag and', '# a process the flags in the veto definer', 'if', 'veto_definer', 'is', 'not', 'None', 'and', 'segment_name', 'in', 'veto_def', '[', 'ifo', ']', ':', 'for', 'flag', 'in', 'veto_def', '[', 'ifo', ']', '[', 'segment_name', ']', ':', 'segs', '=', 'query', '(', '"https"', ',', 'server', ',', 'ifo', ',', 'flag', '[', "'name'", ']', ',', 'flag', '[', "'version'", ']', ',', "'active'", ',', 'int', '(', 'start_time', ')', ',', 'int', '(', 'end_time', ')', ')', '[', '0', ']', '[', "'active'", ']', '# Apply padding to each segment', 'for', 'rseg', 'in', 'segs', ':', 'seg_start', '=', 'rseg', '[', '0', ']', '+', 'flag', '[', "'start_pad'", ']', 'seg_end', '=', 'rseg', '[', '1', ']', '+', 'flag', '[', "'end_pad'", ']', 'flag_segments', '.', 'append', '(', 'segment', '(', 'seg_start', ',', 'seg_end', ')', ')', '# Apply start / end of the veto definer segment', 'send', '=', 'segmentlist', '(', '[', 'segment', '(', '[', 'veto_def', '[', "'start'", ']', ',', 'veto_def', '[', "'end'", ']', ']', ')', ']', ')', 'flag_segments', '=', '(', 'flag_segments', '.', 'coalesce', '(', ')', '&', 'send', ')', 'else', ':', '# Standard case just query directly.', 'try', ':', 'segs', '=', 'query', '(', '"https"', ',', 'server', ',', 'ifo', ',', 'name', ',', 'version', ',', "'active'", ',', 'int', '(', 'start_time', ')', ',', 'int', '(', 'end_time', ')', ')', '[', '0', ']', '[', "'active'", ']', 'for', 'rseg', 'in', 'segs', ':', 'flag_segments', '.', 'append', '(', 'segment', '(', 'rseg', '[', '0', ']', ',', 'rseg', '[', '1', ']', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '"Could not query flag, check name "', '" (%s) or times"', '%', 'segment_name', ')', 'raise', 'e', 'else', ':', 'raise', 'ValueError', '(', '"Source must be dqsegdb or GWOSC."', '" Got {}"', '.', 'format', '(', 'source', ')', ')', 'return', 'segmentlist', '(', 'flag_segments', ')', '.', 'coalesce', '(', ')'] | Return the times where the flag is active
Parameters
----------
ifo: string
The interferometer to query (H1, L1).
name: string
The status flag to query from LOSC.
start_time: int
The starting gps time to begin querying from LOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
cache: bool
If true cache the query. Default is not to cache
Returns
---------
segments: glue.segments.segmentlist
List of segments | ['Return', 'the', 'times', 'where', 'the', 'flag', 'is', 'active'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/dq.py#L98-L224 |
4,595 | samuel-phan/mssh-copy-id | tasks.py | build_src | def build_src(ctx, dest=None):
"""
build source archive
"""
if dest:
if not dest.startswith('/'):
# Relative
dest = os.path.join(os.getcwd(), dest)
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist --dist-dir {0}'.format(dest))
else:
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist') | python | def build_src(ctx, dest=None):
"""
build source archive
"""
if dest:
if not dest.startswith('/'):
# Relative
dest = os.path.join(os.getcwd(), dest)
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist --dist-dir {0}'.format(dest))
else:
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist') | ['def', 'build_src', '(', 'ctx', ',', 'dest', '=', 'None', ')', ':', 'if', 'dest', ':', 'if', 'not', 'dest', '.', 'startswith', '(', "'/'", ')', ':', '# Relative', 'dest', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'getcwd', '(', ')', ',', 'dest', ')', 'os', '.', 'chdir', '(', 'PROJECT_DIR', ')', 'ctx', '.', 'run', '(', "'python setup.py sdist --dist-dir {0}'", '.', 'format', '(', 'dest', ')', ')', 'else', ':', 'os', '.', 'chdir', '(', 'PROJECT_DIR', ')', 'ctx', '.', 'run', '(', "'python setup.py sdist'", ')'] | build source archive | ['build', 'source', 'archive'] | train | https://github.com/samuel-phan/mssh-copy-id/blob/59c50eabb74c4e0eeb729266df57c285e6661b0b/tasks.py#L142-L155 |
4,596 | radjkarl/imgProcessor | DUMP/interpolationMethods.py | polynomial | def polynomial(img, mask, inplace=False, replace_all=False,
max_dev=1e-5, max_iter=20, order=2):
'''
replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
if inplace:
out = img
else:
out = img.copy()
lastm = 0
for _ in range(max_iter):
out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace,
replace_all=replace_all)
if replace_all:
out = out2
break
res = (np.abs(out2 - out)).mean()
print('residuum: ', res)
if res < max_dev:
out = out2
break
out = out2
mask = _highGrad(out)
m = mask.sum()
if m == lastm or m == img.size:
break
lastm = m
out = np.clip(out, 0, 1, out=out) # if inplace else None)
return out | python | def polynomial(img, mask, inplace=False, replace_all=False,
max_dev=1e-5, max_iter=20, order=2):
'''
replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask
'''
if inplace:
out = img
else:
out = img.copy()
lastm = 0
for _ in range(max_iter):
out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace,
replace_all=replace_all)
if replace_all:
out = out2
break
res = (np.abs(out2 - out)).mean()
print('residuum: ', res)
if res < max_dev:
out = out2
break
out = out2
mask = _highGrad(out)
m = mask.sum()
if m == lastm or m == img.size:
break
lastm = m
out = np.clip(out, 0, 1, out=out) # if inplace else None)
return out | ['def', 'polynomial', '(', 'img', ',', 'mask', ',', 'inplace', '=', 'False', ',', 'replace_all', '=', 'False', ',', 'max_dev', '=', '1e-5', ',', 'max_iter', '=', '20', ',', 'order', '=', '2', ')', ':', 'if', 'inplace', ':', 'out', '=', 'img', 'else', ':', 'out', '=', 'img', '.', 'copy', '(', ')', 'lastm', '=', '0', 'for', '_', 'in', 'range', '(', 'max_iter', ')', ':', 'out2', '=', 'polyfit2dGrid', '(', 'out', ',', 'mask', ',', 'order', '=', 'order', ',', 'copy', '=', 'not', 'inplace', ',', 'replace_all', '=', 'replace_all', ')', 'if', 'replace_all', ':', 'out', '=', 'out2', 'break', 'res', '=', '(', 'np', '.', 'abs', '(', 'out2', '-', 'out', ')', ')', '.', 'mean', '(', ')', 'print', '(', "'residuum: '", ',', 'res', ')', 'if', 'res', '<', 'max_dev', ':', 'out', '=', 'out2', 'break', 'out', '=', 'out2', 'mask', '=', '_highGrad', '(', 'out', ')', 'm', '=', 'mask', '.', 'sum', '(', ')', 'if', 'm', '==', 'lastm', 'or', 'm', '==', 'img', '.', 'size', ':', 'break', 'lastm', '=', 'm', 'out', '=', 'np', '.', 'clip', '(', 'out', ',', '0', ',', '1', ',', 'out', '=', 'out', ')', '# if inplace else None)\r', 'return', 'out'] | replace all masked values
calculate flatField from 2d-polynomal fit filling
all high gradient areas within averaged fit-image
returns flatField, average background level, fitted image, valid indices mask | ['replace', 'all', 'masked', 'values', 'calculate', 'flatField', 'from', '2d', '-', 'polynomal', 'fit', 'filling', 'all', 'high', 'gradient', 'areas', 'within', 'averaged', 'fit', '-', 'image', 'returns', 'flatField', 'average', 'background', 'level', 'fitted', 'image', 'valid', 'indices', 'mask'] | train | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/DUMP/interpolationMethods.py#L55-L88 |
4,597 | AguaClara/aguaclara | aguaclara/core/head_loss.py | _k_value_tapered_reduction | def _k_value_tapered_reduction(ent_pipe_id, exit_pipe_id, fitting_angle, re, f):
"""Returns the minor loss coefficient for a tapered reducer.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter.
exit_pipe_id: Exit pipe's inner diameter.
fitting_angle: Fitting angle between entrance and exit pipes.
re: Reynold's number.
f: Darcy friction factor.
"""
k_value_square_reduction = _k_value_square_reduction(ent_pipe_id, exit_pipe_id,
re, f)
if 45 < fitting_angle <= 180:
return k_value_square_reduction * np.sqrt(np.sin(fitting_angle / 2))
elif 0 < fitting_angle <= 45:
return k_value_square_reduction * 1.6 * np.sin(fitting_angle / 2)
else:
raise ValueError('k_value_tapered_reduction: The reducer angle ('
+ fitting_angle + ') cannot be outside of [0,180].') | python | def _k_value_tapered_reduction(ent_pipe_id, exit_pipe_id, fitting_angle, re, f):
"""Returns the minor loss coefficient for a tapered reducer.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter.
exit_pipe_id: Exit pipe's inner diameter.
fitting_angle: Fitting angle between entrance and exit pipes.
re: Reynold's number.
f: Darcy friction factor.
"""
k_value_square_reduction = _k_value_square_reduction(ent_pipe_id, exit_pipe_id,
re, f)
if 45 < fitting_angle <= 180:
return k_value_square_reduction * np.sqrt(np.sin(fitting_angle / 2))
elif 0 < fitting_angle <= 45:
return k_value_square_reduction * 1.6 * np.sin(fitting_angle / 2)
else:
raise ValueError('k_value_tapered_reduction: The reducer angle ('
+ fitting_angle + ') cannot be outside of [0,180].') | ['def', '_k_value_tapered_reduction', '(', 'ent_pipe_id', ',', 'exit_pipe_id', ',', 'fitting_angle', ',', 're', ',', 'f', ')', ':', 'k_value_square_reduction', '=', '_k_value_square_reduction', '(', 'ent_pipe_id', ',', 'exit_pipe_id', ',', 're', ',', 'f', ')', 'if', '45', '<', 'fitting_angle', '<=', '180', ':', 'return', 'k_value_square_reduction', '*', 'np', '.', 'sqrt', '(', 'np', '.', 'sin', '(', 'fitting_angle', '/', '2', ')', ')', 'elif', '0', '<', 'fitting_angle', '<=', '45', ':', 'return', 'k_value_square_reduction', '*', '1.6', '*', 'np', '.', 'sin', '(', 'fitting_angle', '/', '2', ')', 'else', ':', 'raise', 'ValueError', '(', "'k_value_tapered_reduction: The reducer angle ('", '+', 'fitting_angle', '+', "') cannot be outside of [0,180].'", ')'] | Returns the minor loss coefficient for a tapered reducer.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter.
exit_pipe_id: Exit pipe's inner diameter.
fitting_angle: Fitting angle between entrance and exit pipes.
re: Reynold's number.
f: Darcy friction factor. | ['Returns', 'the', 'minor', 'loss', 'coefficient', 'for', 'a', 'tapered', 'reducer', '.'] | train | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/head_loss.py#L175-L195 |
4,598 | SheffieldML/GPy | GPy/models/state_space_main.py | DescreteStateSpace._kalman_prediction_step_SVD | def _kalman_prediction_step_SVD(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False,
p_dm = None, p_dP = None):
"""
Desctrete prediction function
Input:
k:int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P: tuple (Prev_cov, S, V)
Covariance matrix from the previous step and its SVD decomposition.
Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V)
p_dyn_model_callable: object
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the previous step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: 3D array (state_dim, state_dim, parameters_no)
Mean derivatives from the previous step
Output:
----------------------------
m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects
Results of the prediction steps.
"""
# covariance from the previous step and its SVD decomposition
# p_prev_cov = v * S * V.T
Prev_cov, S_old, V_old = p_P
#p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step
# index correspond to values from previous iteration.
A = p_dyn_model_callable.Ak(k,p_m,Prev_cov) # state transition matrix (or Jacobian)
Q = p_dyn_model_callable.Qk(k) # state noise matrx. This is necessary for the square root calculation (next step)
Q_sr = p_dyn_model_callable.Q_srk(k)
# Prediction step ->
m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean
# coavariance prediction have changed:
svd_1_matr = np.vstack( ( (np.sqrt(S_old)* np.dot(A,V_old)).T , Q_sr.T) )
(U,S,Vh) = sp.linalg.svd( svd_1_matr,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
# predicted variance computed by the regular method. For testing
#P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q
V_new = Vh.T
S_new = S**2
P_pred = np.dot(V_new * S_new, V_new.T) # prediction covariance
P_pred = (P_pred, S_new, Vh.T)
# Prediction step <-
# derivatives
if calc_grad_log_likelihood:
dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters
dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters
param_number = p_dP.shape[2]
# p_dm, p_dP - derivatives form the previoius step
dm_pred = np.empty(p_dm.shape)
dP_pred = np.empty(p_dP.shape)
for j in range(param_number):
dA = dA_all_params[:,:,j]
dQ = dQ_all_params[:,:,j]
#dP = p_dP[:,:,j]
#dm = p_dm[:,:,j]
dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, p_dm[:,:,j])
# prediction step derivatives for current parameter:
dP_pred[:,:,j] = np.dot( dA ,np.dot(Prev_cov, A.T))
dP_pred[:,:,j] += dP_pred[:,:,j].T
dP_pred[:,:,j] += np.dot( A ,np.dot(p_dP[:,:,j], A.T)) + dQ
dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize
else:
dm_pred = None
dP_pred = None
return m_pred, P_pred, dm_pred, dP_pred | python | def _kalman_prediction_step_SVD(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False,
p_dm = None, p_dP = None):
"""
Desctrete prediction function
Input:
k:int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P: tuple (Prev_cov, S, V)
Covariance matrix from the previous step and its SVD decomposition.
Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V)
p_dyn_model_callable: object
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the previous step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: 3D array (state_dim, state_dim, parameters_no)
Mean derivatives from the previous step
Output:
----------------------------
m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects
Results of the prediction steps.
"""
# covariance from the previous step and its SVD decomposition
# p_prev_cov = v * S * V.T
Prev_cov, S_old, V_old = p_P
#p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step
# index correspond to values from previous iteration.
A = p_dyn_model_callable.Ak(k,p_m,Prev_cov) # state transition matrix (or Jacobian)
Q = p_dyn_model_callable.Qk(k) # state noise matrx. This is necessary for the square root calculation (next step)
Q_sr = p_dyn_model_callable.Q_srk(k)
# Prediction step ->
m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean
# coavariance prediction have changed:
svd_1_matr = np.vstack( ( (np.sqrt(S_old)* np.dot(A,V_old)).T , Q_sr.T) )
(U,S,Vh) = sp.linalg.svd( svd_1_matr,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
# predicted variance computed by the regular method. For testing
#P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q
V_new = Vh.T
S_new = S**2
P_pred = np.dot(V_new * S_new, V_new.T) # prediction covariance
P_pred = (P_pred, S_new, Vh.T)
# Prediction step <-
# derivatives
if calc_grad_log_likelihood:
dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters
dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters
param_number = p_dP.shape[2]
# p_dm, p_dP - derivatives form the previoius step
dm_pred = np.empty(p_dm.shape)
dP_pred = np.empty(p_dP.shape)
for j in range(param_number):
dA = dA_all_params[:,:,j]
dQ = dQ_all_params[:,:,j]
#dP = p_dP[:,:,j]
#dm = p_dm[:,:,j]
dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, p_dm[:,:,j])
# prediction step derivatives for current parameter:
dP_pred[:,:,j] = np.dot( dA ,np.dot(Prev_cov, A.T))
dP_pred[:,:,j] += dP_pred[:,:,j].T
dP_pred[:,:,j] += np.dot( A ,np.dot(p_dP[:,:,j], A.T)) + dQ
dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize
else:
dm_pred = None
dP_pred = None
return m_pred, P_pred, dm_pred, dP_pred | ['def', '_kalman_prediction_step_SVD', '(', 'k', ',', 'p_m', ',', 'p_P', ',', 'p_dyn_model_callable', ',', 'calc_grad_log_likelihood', '=', 'False', ',', 'p_dm', '=', 'None', ',', 'p_dP', '=', 'None', ')', ':', '# covariance from the previous step and its SVD decomposition', '# p_prev_cov = v * S * V.T', 'Prev_cov', ',', 'S_old', ',', 'V_old', '=', 'p_P', '#p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step', '# index correspond to values from previous iteration.', 'A', '=', 'p_dyn_model_callable', '.', 'Ak', '(', 'k', ',', 'p_m', ',', 'Prev_cov', ')', '# state transition matrix (or Jacobian)', 'Q', '=', 'p_dyn_model_callable', '.', 'Qk', '(', 'k', ')', '# state noise matrx. This is necessary for the square root calculation (next step)', 'Q_sr', '=', 'p_dyn_model_callable', '.', 'Q_srk', '(', 'k', ')', '# Prediction step ->', 'm_pred', '=', 'p_dyn_model_callable', '.', 'f_a', '(', 'k', ',', 'p_m', ',', 'A', ')', '# predicted mean', '# coavariance prediction have changed:', 'svd_1_matr', '=', 'np', '.', 'vstack', '(', '(', '(', 'np', '.', 'sqrt', '(', 'S_old', ')', '*', 'np', '.', 'dot', '(', 'A', ',', 'V_old', ')', ')', '.', 'T', ',', 'Q_sr', '.', 'T', ')', ')', '(', 'U', ',', 'S', ',', 'Vh', ')', '=', 'sp', '.', 'linalg', '.', 'svd', '(', 'svd_1_matr', ',', 'full_matrices', '=', 'False', ',', 'compute_uv', '=', 'True', ',', 'overwrite_a', '=', 'False', ',', 'check_finite', '=', 'True', ')', '# predicted variance computed by the regular method. For testing', '#P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q', 'V_new', '=', 'Vh', '.', 'T', 'S_new', '=', 'S', '**', '2', 'P_pred', '=', 'np', '.', 'dot', '(', 'V_new', '*', 'S_new', ',', 'V_new', '.', 'T', ')', '# prediction covariance', 'P_pred', '=', '(', 'P_pred', ',', 'S_new', ',', 'Vh', '.', 'T', ')', '# Prediction step <-', '# derivatives', 'if', 'calc_grad_log_likelihood', ':', 'dA_all_params', '=', 'p_dyn_model_callable', '.', 'dAk', '(', 'k', ')', '# derivatives of A wrt parameters', 'dQ_all_params', '=', 'p_dyn_model_callable', '.', 'dQk', '(', 'k', ')', '# derivatives of Q wrt parameters', 'param_number', '=', 'p_dP', '.', 'shape', '[', '2', ']', '# p_dm, p_dP - derivatives form the previoius step', 'dm_pred', '=', 'np', '.', 'empty', '(', 'p_dm', '.', 'shape', ')', 'dP_pred', '=', 'np', '.', 'empty', '(', 'p_dP', '.', 'shape', ')', 'for', 'j', 'in', 'range', '(', 'param_number', ')', ':', 'dA', '=', 'dA_all_params', '[', ':', ',', ':', ',', 'j', ']', 'dQ', '=', 'dQ_all_params', '[', ':', ',', ':', ',', 'j', ']', '#dP = p_dP[:,:,j]', '#dm = p_dm[:,:,j]', 'dm_pred', '[', ':', ',', ':', ',', 'j', ']', '=', 'np', '.', 'dot', '(', 'dA', ',', 'p_m', ')', '+', 'np', '.', 'dot', '(', 'A', ',', 'p_dm', '[', ':', ',', ':', ',', 'j', ']', ')', '# prediction step derivatives for current parameter:', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '=', 'np', '.', 'dot', '(', 'dA', ',', 'np', '.', 'dot', '(', 'Prev_cov', ',', 'A', '.', 'T', ')', ')', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '+=', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '.', 'T', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '+=', 'np', '.', 'dot', '(', 'A', ',', 'np', '.', 'dot', '(', 'p_dP', '[', ':', ',', ':', ',', 'j', ']', ',', 'A', '.', 'T', ')', ')', '+', 'dQ', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '=', '0.5', '*', '(', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '+', 'dP_pred', '[', ':', ',', ':', ',', 'j', ']', '.', 'T', ')', '#symmetrize', 'else', ':', 'dm_pred', '=', 'None', 'dP_pred', '=', 'None', 'return', 'm_pred', ',', 'P_pred', ',', 'dm_pred', ',', 'dP_pred'] | Desctrete prediction function
Input:
k:int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P: tuple (Prev_cov, S, V)
Covariance matrix from the previous step and its SVD decomposition.
Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V)
p_dyn_model_callable: object
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the previous step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: 3D array (state_dim, state_dim, parameters_no)
Mean derivatives from the previous step
Output:
----------------------------
m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects
Results of the prediction steps. | ['Desctrete', 'prediction', 'function'] | train | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/models/state_space_main.py#L1357-L1454 |
4,599 | KelSolaar/Foundations | foundations/trace.py | evaluate_trace_request | def evaluate_trace_request(data, tracer=tracer):
"""
Evaluate given string trace request.
Usage::
Umbra -t "{'umbra.engine' : ('.*', 0), 'umbra.preferences' : (r'.*', 0)}"
Umbra -t "['umbra.engine', 'umbra.preferences']"
Umbra -t "'umbra.engine, umbra.preferences"
:param data: Trace request.
:type data: unicode
:param tracer: Tracer.
:type tracer: object
:return: Definition success.
:rtype: bool
"""
data = ast.literal_eval(data)
if isinstance(data, str):
modules = dict.fromkeys(map(lambda x: x.strip(), data.split(",")), (None, None))
elif isinstance(data, list):
modules = dict.fromkeys(data, (None, None))
elif isinstance(data, dict):
modules = data
for module, (pattern, flags) in modules.iteritems():
__import__(module)
pattern = pattern if pattern is not None else r".*"
flags = flags if flags is not None else re.IGNORECASE
trace_module(sys.modules[module], tracer, pattern, flags)
return True | python | def evaluate_trace_request(data, tracer=tracer):
"""
Evaluate given string trace request.
Usage::
Umbra -t "{'umbra.engine' : ('.*', 0), 'umbra.preferences' : (r'.*', 0)}"
Umbra -t "['umbra.engine', 'umbra.preferences']"
Umbra -t "'umbra.engine, umbra.preferences"
:param data: Trace request.
:type data: unicode
:param tracer: Tracer.
:type tracer: object
:return: Definition success.
:rtype: bool
"""
data = ast.literal_eval(data)
if isinstance(data, str):
modules = dict.fromkeys(map(lambda x: x.strip(), data.split(",")), (None, None))
elif isinstance(data, list):
modules = dict.fromkeys(data, (None, None))
elif isinstance(data, dict):
modules = data
for module, (pattern, flags) in modules.iteritems():
__import__(module)
pattern = pattern if pattern is not None else r".*"
flags = flags if flags is not None else re.IGNORECASE
trace_module(sys.modules[module], tracer, pattern, flags)
return True | ['def', 'evaluate_trace_request', '(', 'data', ',', 'tracer', '=', 'tracer', ')', ':', 'data', '=', 'ast', '.', 'literal_eval', '(', 'data', ')', 'if', 'isinstance', '(', 'data', ',', 'str', ')', ':', 'modules', '=', 'dict', '.', 'fromkeys', '(', 'map', '(', 'lambda', 'x', ':', 'x', '.', 'strip', '(', ')', ',', 'data', '.', 'split', '(', '","', ')', ')', ',', '(', 'None', ',', 'None', ')', ')', 'elif', 'isinstance', '(', 'data', ',', 'list', ')', ':', 'modules', '=', 'dict', '.', 'fromkeys', '(', 'data', ',', '(', 'None', ',', 'None', ')', ')', 'elif', 'isinstance', '(', 'data', ',', 'dict', ')', ':', 'modules', '=', 'data', 'for', 'module', ',', '(', 'pattern', ',', 'flags', ')', 'in', 'modules', '.', 'iteritems', '(', ')', ':', '__import__', '(', 'module', ')', 'pattern', '=', 'pattern', 'if', 'pattern', 'is', 'not', 'None', 'else', 'r".*"', 'flags', '=', 'flags', 'if', 'flags', 'is', 'not', 'None', 'else', 're', '.', 'IGNORECASE', 'trace_module', '(', 'sys', '.', 'modules', '[', 'module', ']', ',', 'tracer', ',', 'pattern', ',', 'flags', ')', 'return', 'True'] | Evaluate given string trace request.
Usage::
Umbra -t "{'umbra.engine' : ('.*', 0), 'umbra.preferences' : (r'.*', 0)}"
Umbra -t "['umbra.engine', 'umbra.preferences']"
Umbra -t "'umbra.engine, umbra.preferences"
:param data: Trace request.
:type data: unicode
:param tracer: Tracer.
:type tracer: object
:return: Definition success.
:rtype: bool | ['Evaluate', 'given', 'string', 'trace', 'request', '.'] | train | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/trace.py#L825-L857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.