Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,100 | ynop/audiomate | audiomate/utils/textfile.py | read_key_value_lines | def read_key_value_lines(path, separator=' ', default_value=''):
"""
Reads lines of a text file with two columns as key/value dictionary.
Parameters:
path (str): Path to the file.
separator (str): Separator that is used to split key and value.
default_value (str): If no value is given this value is used.
Returns:
dict: A dictionary with first column as key and second as value.
"""
gen = read_separated_lines_generator(path, separator, 2)
dic = {}
for record in gen:
if len(record) > 1:
dic[record[0]] = record[1]
elif len(record) > 0:
dic[record[0]] = default_value
return dic | python | def read_key_value_lines(path, separator=' ', default_value=''):
"""
Reads lines of a text file with two columns as key/value dictionary.
Parameters:
path (str): Path to the file.
separator (str): Separator that is used to split key and value.
default_value (str): If no value is given this value is used.
Returns:
dict: A dictionary with first column as key and second as value.
"""
gen = read_separated_lines_generator(path, separator, 2)
dic = {}
for record in gen:
if len(record) > 1:
dic[record[0]] = record[1]
elif len(record) > 0:
dic[record[0]] = default_value
return dic | ['def', 'read_key_value_lines', '(', 'path', ',', 'separator', '=', "' '", ',', 'default_value', '=', "''", ')', ':', 'gen', '=', 'read_separated_lines_generator', '(', 'path', ',', 'separator', ',', '2', ')', 'dic', '=', '{', '}', 'for', 'record', 'in', 'gen', ':', 'if', 'len', '(', 'record', ')', '>', '1', ':', 'dic', '[', 'record', '[', '0', ']', ']', '=', 'record', '[', '1', ']', 'elif', 'len', '(', 'record', ')', '>', '0', ':', 'dic', '[', 'record', '[', '0', ']', ']', '=', 'default_value', 'return', 'dic'] | Reads lines of a text file with two columns as key/value dictionary.
Parameters:
path (str): Path to the file.
separator (str): Separator that is used to split key and value.
default_value (str): If no value is given this value is used.
Returns:
dict: A dictionary with first column as key and second as value. | ['Reads', 'lines', 'of', 'a', 'text', 'file', 'with', 'two', 'columns', 'as', 'key', '/', 'value', 'dictionary', '.'] | train | https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/textfile.py#L54-L76 |
5,101 | ff0000/scarlet | scarlet/versioning/view_mixins.py | PreviewableObject.get_object | def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
Copied from SingleObjectMixin except that this allows
us to lookup preview objects.
"""
schema = manager.get_schema()
vid = None
if self.request.GET.get('vid') and self.request.user.is_staff and \
self.request.user.is_active:
try:
schema = 'public'
vid = int(self.request.GET.get('vid'))
queryset = self.model.normal.filter(vid=vid)
except ValueError:
pass
with manager.SwitchSchema(schema):
# Use a custom queryset if provided
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
if vid:
queryset = queryset.filter(vid=vid)
else:
queryset = queryset.filter(object_id=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"View %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
raise http.Http404(
u"No %(verbose_name)s found matching the query" %
{'verbose_name': queryset.model._meta.verbose_name})
return obj | python | def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
Copied from SingleObjectMixin except that this allows
us to lookup preview objects.
"""
schema = manager.get_schema()
vid = None
if self.request.GET.get('vid') and self.request.user.is_staff and \
self.request.user.is_active:
try:
schema = 'public'
vid = int(self.request.GET.get('vid'))
queryset = self.model.normal.filter(vid=vid)
except ValueError:
pass
with manager.SwitchSchema(schema):
# Use a custom queryset if provided
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
if vid:
queryset = queryset.filter(vid=vid)
else:
queryset = queryset.filter(object_id=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"View %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
raise http.Http404(
u"No %(verbose_name)s found matching the query" %
{'verbose_name': queryset.model._meta.verbose_name})
return obj | ['def', 'get_object', '(', 'self', ',', 'queryset', '=', 'None', ')', ':', 'schema', '=', 'manager', '.', 'get_schema', '(', ')', 'vid', '=', 'None', 'if', 'self', '.', 'request', '.', 'GET', '.', 'get', '(', "'vid'", ')', 'and', 'self', '.', 'request', '.', 'user', '.', 'is_staff', 'and', 'self', '.', 'request', '.', 'user', '.', 'is_active', ':', 'try', ':', 'schema', '=', "'public'", 'vid', '=', 'int', '(', 'self', '.', 'request', '.', 'GET', '.', 'get', '(', "'vid'", ')', ')', 'queryset', '=', 'self', '.', 'model', '.', 'normal', '.', 'filter', '(', 'vid', '=', 'vid', ')', 'except', 'ValueError', ':', 'pass', 'with', 'manager', '.', 'SwitchSchema', '(', 'schema', ')', ':', '# Use a custom queryset if provided', 'if', 'queryset', 'is', 'None', ':', 'queryset', '=', 'self', '.', 'get_queryset', '(', ')', '# Next, try looking up by primary key.', 'pk', '=', 'self', '.', 'kwargs', '.', 'get', '(', 'self', '.', 'pk_url_kwarg', ',', 'None', ')', 'slug', '=', 'self', '.', 'kwargs', '.', 'get', '(', 'self', '.', 'slug_url_kwarg', ',', 'None', ')', 'if', 'pk', 'is', 'not', 'None', ':', 'if', 'vid', ':', 'queryset', '=', 'queryset', '.', 'filter', '(', 'vid', '=', 'vid', ')', 'else', ':', 'queryset', '=', 'queryset', '.', 'filter', '(', 'object_id', '=', 'pk', ')', '# Next, try looking up by slug.', 'elif', 'slug', 'is', 'not', 'None', ':', 'slug_field', '=', 'self', '.', 'get_slug_field', '(', ')', 'queryset', '=', 'queryset', '.', 'filter', '(', '*', '*', '{', 'slug_field', ':', 'slug', '}', ')', "# If none of those are defined, it's an error.", 'else', ':', 'raise', 'AttributeError', '(', 'u"View %s must be called with "', 'u"either an object pk or a slug."', '%', 'self', '.', '__class__', '.', '__name__', ')', 'try', ':', 'obj', '=', 'queryset', '.', 'get', '(', ')', 'except', 'queryset', '.', 'model', '.', 'DoesNotExist', ':', 'raise', 'http', '.', 'Http404', '(', 'u"No %(verbose_name)s found matching the query"', '%', '{', "'verbose_name'", ':', 'queryset', '.', 'model', '.', '_meta', '.', 'verbose_name', '}', ')', 'return', 'obj'] | Returns the object the view is displaying.
Copied from SingleObjectMixin except that this allows
us to lookup preview objects. | ['Returns', 'the', 'object', 'the', 'view', 'is', 'displaying', '.'] | train | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/versioning/view_mixins.py#L13-L64 |
5,102 | miccoli/pyownet | src/pyownet/protocol.py | _Proxy.present | def present(self, path, timeout=0):
"""returns True if there is an entity at path"""
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path),
timeout=timeout)
assert ret <= 0 and not data, (ret, data)
if ret < 0:
return False
else:
return True | python | def present(self, path, timeout=0):
"""returns True if there is an entity at path"""
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path),
timeout=timeout)
assert ret <= 0 and not data, (ret, data)
if ret < 0:
return False
else:
return True | ['def', 'present', '(', 'self', ',', 'path', ',', 'timeout', '=', '0', ')', ':', 'ret', ',', 'data', '=', 'self', '.', 'sendmess', '(', 'MSG_PRESENCE', ',', 'str2bytez', '(', 'path', ')', ',', 'timeout', '=', 'timeout', ')', 'assert', 'ret', '<=', '0', 'and', 'not', 'data', ',', '(', 'ret', ',', 'data', ')', 'if', 'ret', '<', '0', ':', 'return', 'False', 'else', ':', 'return', 'True'] | returns True if there is an entity at path | ['returns', 'True', 'if', 'there', 'is', 'an', 'entity', 'at', 'path'] | train | https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L586-L595 |
5,103 | Stewori/pytypes | pytypes/type_util.py | is_Type | def is_Type(tp):
"""Python version independent check if an object is a type.
For Python 3.7 onwards(?) this is not equivalent to
``isinstance(tp, type)`` any more, as that call would return
``False`` for PEP 484 types.
Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1.
"""
if isinstance(tp, type):
return True
try:
typing._type_check(tp, '')
return True
except TypeError:
return False | python | def is_Type(tp):
"""Python version independent check if an object is a type.
For Python 3.7 onwards(?) this is not equivalent to
``isinstance(tp, type)`` any more, as that call would return
``False`` for PEP 484 types.
Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1.
"""
if isinstance(tp, type):
return True
try:
typing._type_check(tp, '')
return True
except TypeError:
return False | ['def', 'is_Type', '(', 'tp', ')', ':', 'if', 'isinstance', '(', 'tp', ',', 'type', ')', ':', 'return', 'True', 'try', ':', 'typing', '.', '_type_check', '(', 'tp', ',', "''", ')', 'return', 'True', 'except', 'TypeError', ':', 'return', 'False'] | Python version independent check if an object is a type.
For Python 3.7 onwards(?) this is not equivalent to
``isinstance(tp, type)`` any more, as that call would return
``False`` for PEP 484 types.
Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1. | ['Python', 'version', 'independent', 'check', 'if', 'an', 'object', 'is', 'a', 'type', '.', 'For', 'Python', '3', '.', '7', 'onwards', '(', '?', ')', 'this', 'is', 'not', 'equivalent', 'to', 'isinstance', '(', 'tp', 'type', ')', 'any', 'more', 'as', 'that', 'call', 'would', 'return', 'False', 'for', 'PEP', '484', 'types', '.', 'Tested', 'with', 'CPython', '2', '.', '7', '3', '.', '5', '3', '.', '6', '3', '.', '7', 'and', 'Jython', '2', '.', '7', '.', '1', '.'] | train | https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L320-L333 |
5,104 | angr/angr | angr/analyses/calling_convention.py | CallingConventionAnalysis._analyze_function | def _analyze_function(self):
"""
Go over the variable information in variable manager for this function, and return all uninitialized
register/stack variables.
:return:
"""
if not self._function.is_simprocedure \
and not self._function.is_plt \
and not self._variable_manager.has_function_manager(self._function.addr):
l.warning("Please run variable recovery on %s before analyzing its calling conventions.",
repr(self._function))
return None
vm = self._variable_manager[self._function.addr]
input_variables = vm.input_variables()
input_args = self._args_from_vars(input_variables)
# TODO: properly decide sp_delta
sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0
cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta)
if cc is None:
l.warning('_analyze_function(): Cannot find a calling convention that fits the given arguments.')
return cc | python | def _analyze_function(self):
"""
Go over the variable information in variable manager for this function, and return all uninitialized
register/stack variables.
:return:
"""
if not self._function.is_simprocedure \
and not self._function.is_plt \
and not self._variable_manager.has_function_manager(self._function.addr):
l.warning("Please run variable recovery on %s before analyzing its calling conventions.",
repr(self._function))
return None
vm = self._variable_manager[self._function.addr]
input_variables = vm.input_variables()
input_args = self._args_from_vars(input_variables)
# TODO: properly decide sp_delta
sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0
cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta)
if cc is None:
l.warning('_analyze_function(): Cannot find a calling convention that fits the given arguments.')
return cc | ['def', '_analyze_function', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_function', '.', 'is_simprocedure', 'and', 'not', 'self', '.', '_function', '.', 'is_plt', 'and', 'not', 'self', '.', '_variable_manager', '.', 'has_function_manager', '(', 'self', '.', '_function', '.', 'addr', ')', ':', 'l', '.', 'warning', '(', '"Please run variable recovery on %s before analyzing its calling conventions."', ',', 'repr', '(', 'self', '.', '_function', ')', ')', 'return', 'None', 'vm', '=', 'self', '.', '_variable_manager', '[', 'self', '.', '_function', '.', 'addr', ']', 'input_variables', '=', 'vm', '.', 'input_variables', '(', ')', 'input_args', '=', 'self', '.', '_args_from_vars', '(', 'input_variables', ')', '# TODO: properly decide sp_delta', 'sp_delta', '=', 'self', '.', 'project', '.', 'arch', '.', 'bytes', 'if', 'self', '.', 'project', '.', 'arch', '.', 'call_pushes_ret', 'else', '0', 'cc', '=', 'SimCC', '.', 'find_cc', '(', 'self', '.', 'project', '.', 'arch', ',', 'list', '(', 'input_args', ')', ',', 'sp_delta', ')', 'if', 'cc', 'is', 'None', ':', 'l', '.', 'warning', '(', "'_analyze_function(): Cannot find a calling convention that fits the given arguments.'", ')', 'return', 'cc'] | Go over the variable information in variable manager for this function, and return all uninitialized
register/stack variables.
:return: | ['Go', 'over', 'the', 'variable', 'information', 'in', 'variable', 'manager', 'for', 'this', 'function', 'and', 'return', 'all', 'uninitialized', 'register', '/', 'stack', 'variables', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/calling_convention.py#L48-L77 |
5,105 | IdentityPython/pysaml2 | src/saml2/cache.py | Cache.entities | def entities(self, name_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param name_id: The subject identifier, a NameID instance
:return: A possibly empty list of entity identifiers
"""
cni = code(name_id)
return list(self._db[cni].keys()) | python | def entities(self, name_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param name_id: The subject identifier, a NameID instance
:return: A possibly empty list of entity identifiers
"""
cni = code(name_id)
return list(self._db[cni].keys()) | ['def', 'entities', '(', 'self', ',', 'name_id', ')', ':', 'cni', '=', 'code', '(', 'name_id', ')', 'return', 'list', '(', 'self', '.', '_db', '[', 'cni', ']', '.', 'keys', '(', ')', ')'] | Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param name_id: The subject identifier, a NameID instance
:return: A possibly empty list of entity identifiers | ['Returns', 'all', 'the', 'entities', 'of', 'assertions', 'for', 'a', 'subject', 'disregarding', 'whether', 'the', 'assertion', 'still', 'is', 'valid', 'or', 'not', '.'] | train | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/cache.py#L143-L151 |
5,106 | JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgetdelegate.py | WD_TreeView.index_at_event | def index_at_event(self, event):
"""Get the index under the position of the given MouseEvent
This implementation takes the indentation into account.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
# find index at mouse position
globalpos = event.globalPos()
viewport = self.viewport()
pos = viewport.mapFromGlobal(globalpos)
i = self.indexAt(pos)
n = self.get_total_indentation(i)
if pos.x() > n:
return i
else:
return QtCore.QModelIndex() | python | def index_at_event(self, event):
"""Get the index under the position of the given MouseEvent
This implementation takes the indentation into account.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
# find index at mouse position
globalpos = event.globalPos()
viewport = self.viewport()
pos = viewport.mapFromGlobal(globalpos)
i = self.indexAt(pos)
n = self.get_total_indentation(i)
if pos.x() > n:
return i
else:
return QtCore.QModelIndex() | ['def', 'index_at_event', '(', 'self', ',', 'event', ')', ':', '# find index at mouse position', 'globalpos', '=', 'event', '.', 'globalPos', '(', ')', 'viewport', '=', 'self', '.', 'viewport', '(', ')', 'pos', '=', 'viewport', '.', 'mapFromGlobal', '(', 'globalpos', ')', 'i', '=', 'self', '.', 'indexAt', '(', 'pos', ')', 'n', '=', 'self', '.', 'get_total_indentation', '(', 'i', ')', 'if', 'pos', '.', 'x', '(', ')', '>', 'n', ':', 'return', 'i', 'else', ':', 'return', 'QtCore', '.', 'QModelIndex', '(', ')'] | Get the index under the position of the given MouseEvent
This implementation takes the indentation into account.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None | ['Get', 'the', 'index', 'under', 'the', 'position', 'of', 'the', 'given', 'MouseEvent'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgetdelegate.py#L549-L569 |
5,107 | TeamHG-Memex/eli5 | eli5/sklearn/explain_prediction.py | explain_prediction_sklearn | def explain_prediction_sklearn(estimator, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Return an explanation of a scikit-learn estimator """
return explain_prediction_sklearn_not_supported(estimator, doc) | python | def explain_prediction_sklearn(estimator, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Return an explanation of a scikit-learn estimator """
return explain_prediction_sklearn_not_supported(estimator, doc) | ['def', 'explain_prediction_sklearn', '(', 'estimator', ',', 'doc', ',', 'vec', '=', 'None', ',', 'top', '=', 'None', ',', 'top_targets', '=', 'None', ',', 'target_names', '=', 'None', ',', 'targets', '=', 'None', ',', 'feature_names', '=', 'None', ',', 'feature_re', '=', 'None', ',', 'feature_filter', '=', 'None', ',', 'vectorized', '=', 'False', ')', ':', 'return', 'explain_prediction_sklearn_not_supported', '(', 'estimator', ',', 'doc', ')'] | Return an explanation of a scikit-learn estimator | ['Return', 'an', 'explanation', 'of', 'a', 'scikit', '-', 'learn', 'estimator'] | train | https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/explain_prediction.py#L77-L88 |
5,108 | polysquare/cmake-ast | cmakeast/ast.py | _is_really_comment | def _is_really_comment(tokens, index):
"""Return true if the token at index is really a comment."""
if tokens[index].type == TokenType.Comment:
return True
# Really a comment in disguise!
try:
if tokens[index].content.lstrip()[0] == "#":
return True
except IndexError:
return False | python | def _is_really_comment(tokens, index):
"""Return true if the token at index is really a comment."""
if tokens[index].type == TokenType.Comment:
return True
# Really a comment in disguise!
try:
if tokens[index].content.lstrip()[0] == "#":
return True
except IndexError:
return False | ['def', '_is_really_comment', '(', 'tokens', ',', 'index', ')', ':', 'if', 'tokens', '[', 'index', ']', '.', 'type', '==', 'TokenType', '.', 'Comment', ':', 'return', 'True', '# Really a comment in disguise!', 'try', ':', 'if', 'tokens', '[', 'index', ']', '.', 'content', '.', 'lstrip', '(', ')', '[', '0', ']', '==', '"#"', ':', 'return', 'True', 'except', 'IndexError', ':', 'return', 'False'] | Return true if the token at index is really a comment. | ['Return', 'true', 'if', 'the', 'token', 'at', 'index', 'is', 'really', 'a', 'comment', '.'] | train | https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/ast.py#L522-L532 |
5,109 | PMBio/limix-backup | limix/deprecated/utils/preprocess.py | variance_K | def variance_K(K, verbose=False):
"""estimate the variance explained by K"""
c = SP.sum((SP.eye(len(K)) - (1.0 / len(K)) * SP.ones(K.shape)) * SP.array(K))
scalar = (len(K) - 1) / c
return 1.0/scalar | python | def variance_K(K, verbose=False):
"""estimate the variance explained by K"""
c = SP.sum((SP.eye(len(K)) - (1.0 / len(K)) * SP.ones(K.shape)) * SP.array(K))
scalar = (len(K) - 1) / c
return 1.0/scalar | ['def', 'variance_K', '(', 'K', ',', 'verbose', '=', 'False', ')', ':', 'c', '=', 'SP', '.', 'sum', '(', '(', 'SP', '.', 'eye', '(', 'len', '(', 'K', ')', ')', '-', '(', '1.0', '/', 'len', '(', 'K', ')', ')', '*', 'SP', '.', 'ones', '(', 'K', '.', 'shape', ')', ')', '*', 'SP', '.', 'array', '(', 'K', ')', ')', 'scalar', '=', '(', 'len', '(', 'K', ')', '-', '1', ')', '/', 'c', 'return', '1.0', '/', 'scalar'] | estimate the variance explained by K | ['estimate', 'the', 'variance', 'explained', 'by', 'K'] | train | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/utils/preprocess.py#L22-L26 |
5,110 | scottrice/pysteam | pysteam/legacy/game.py | Game.set_image | def set_image(self, user, image_path):
"""Sets a custom image for the game. `image_path` should refer to
an image file on disk"""
_, ext = os.path.splitext(image_path)
shutil.copy(image_path, self._custom_image_path(user, ext)) | python | def set_image(self, user, image_path):
"""Sets a custom image for the game. `image_path` should refer to
an image file on disk"""
_, ext = os.path.splitext(image_path)
shutil.copy(image_path, self._custom_image_path(user, ext)) | ['def', 'set_image', '(', 'self', ',', 'user', ',', 'image_path', ')', ':', '_', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'image_path', ')', 'shutil', '.', 'copy', '(', 'image_path', ',', 'self', '.', '_custom_image_path', '(', 'user', ',', 'ext', ')', ')'] | Sets a custom image for the game. `image_path` should refer to
an image file on disk | ['Sets', 'a', 'custom', 'image', 'for', 'the', 'game', '.', 'image_path', 'should', 'refer', 'to', 'an', 'image', 'file', 'on', 'disk'] | train | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/game.py#L50-L54 |
5,111 | etcher-be/elib_config | elib_config/_file/_config_example.py | _aggregate_config_values | def _aggregate_config_values(config_values: typing.List[ConfigValue]) -> dict:
"""
Returns a (sorted)
:param config_values:
:type config_values:
:return:
:rtype:
"""
_keys: defaultdict = _nested_default_dict()
_sorted_values = sorted(config_values, key=lambda x: x.name)
for value in _sorted_values:
value_keys = value.path.split(ELIBConfig.config_sep_str)
this_config_key = _keys
for sub_key in value_keys[:-1]:
this_config_key = this_config_key[sub_key]
this_config_key[value_keys[-1]] = value
return _default_dict_to_dict(_keys) | python | def _aggregate_config_values(config_values: typing.List[ConfigValue]) -> dict:
"""
Returns a (sorted)
:param config_values:
:type config_values:
:return:
:rtype:
"""
_keys: defaultdict = _nested_default_dict()
_sorted_values = sorted(config_values, key=lambda x: x.name)
for value in _sorted_values:
value_keys = value.path.split(ELIBConfig.config_sep_str)
this_config_key = _keys
for sub_key in value_keys[:-1]:
this_config_key = this_config_key[sub_key]
this_config_key[value_keys[-1]] = value
return _default_dict_to_dict(_keys) | ['def', '_aggregate_config_values', '(', 'config_values', ':', 'typing', '.', 'List', '[', 'ConfigValue', ']', ')', '->', 'dict', ':', '_keys', ':', 'defaultdict', '=', '_nested_default_dict', '(', ')', '_sorted_values', '=', 'sorted', '(', 'config_values', ',', 'key', '=', 'lambda', 'x', ':', 'x', '.', 'name', ')', 'for', 'value', 'in', '_sorted_values', ':', 'value_keys', '=', 'value', '.', 'path', '.', 'split', '(', 'ELIBConfig', '.', 'config_sep_str', ')', 'this_config_key', '=', '_keys', 'for', 'sub_key', 'in', 'value_keys', '[', ':', '-', '1', ']', ':', 'this_config_key', '=', 'this_config_key', '[', 'sub_key', ']', 'this_config_key', '[', 'value_keys', '[', '-', '1', ']', ']', '=', 'value', 'return', '_default_dict_to_dict', '(', '_keys', ')'] | Returns a (sorted)
:param config_values:
:type config_values:
:return:
:rtype: | ['Returns', 'a', '(', 'sorted', ')'] | train | https://github.com/etcher-be/elib_config/blob/5d8c839e84d70126620ab0186dc1f717e5868bd0/elib_config/_file/_config_example.py#L41-L58 |
5,112 | eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | match_filter | def match_filter(template_names, template_list, st, threshold,
threshold_type, trig_int, plotvar, plotdir='.',
xcorr_func=None, concurrency=None, cores=None,
debug=0, plot_format='png', output_cat=False,
output_event=True, extract_detections=False,
arg_check=True, full_peaks=False, peak_cores=None, **kwargs):
"""
Main matched-filter detection function.
Over-arching code to run the correlations of given templates with a
day of seismic data and output the detections based on a given threshold.
For a functional example see the tutorials.
:type template_names: list
:param template_names:
List of template names in the same order as template_list
:type template_list: list
:param template_list:
A list of templates of which each template is a
:class:`obspy.core.stream.Stream` of obspy traces containing seismic
data and header information.
:type st: :class:`obspy.core.stream.Stream`
:param st:
A Stream object containing all the data available and
required for the correlations with templates given. For efficiency
this should contain no excess traces which are not in one or more of
the templates. This will now remove excess traces internally, but
will copy the stream and work on the copy, leaving your input stream
untouched.
:type threshold: float
:param threshold: A threshold value set based on the threshold_type
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or av_chan_corr.
See Note on thresholding below.
:type trig_int: float
:param trig_int: Minimum gap between detections in seconds.
:type plotvar: bool
:param plotvar: Turn plotting on or off
:type plotdir: str
:param plotdir:
Path to plotting folder, plots will be output here, defaults to run
location.
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of cores to use
:type debug: int
:param debug:
Debug output level, the bigger the number, the more the output.
:type plot_format: str
:param plot_format: Specify format of output plots if saved
:type output_cat: bool
:param output_cat:
Specifies if matched_filter will output an obspy.Catalog class
containing events for each detection. Default is False, in which case
matched_filter will output a list of detection classes, as normal.
:type output_event: bool
:param output_event:
Whether to include events in the Detection objects, defaults to True,
but for large cases you may want to turn this off as Event objects
can be quite memory intensive.
:type extract_detections: bool
:param extract_detections:
Specifies whether or not to return a list of streams, one stream per
detection.
:type arg_check: bool
:param arg_check:
Check arguments, defaults to True, but if running in bulk, and you are
certain of your arguments, then set to False.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.core.findpeaks.find_peaks2_short`.
:type peak_cores: int
:param peak_cores:
Number of processes to use for parallel peak-finding (if different to
`cores`).
.. note::
**Returns:**
If neither `output_cat` or `extract_detections` are set to `True`,
then only the list of :class:`eqcorrscan.core.match_filter.Detection`'s
will be output:
:return:
:class:`eqcorrscan.core.match_filter.Detection` detections for each
detection made.
:rtype: list
If `output_cat` is set to `True`, then the
:class:`obspy.core.event.Catalog` will also be output:
:return: Catalog containing events for each detection, see above.
:rtype: :class:`obspy.core.event.Catalog`
If `extract_detections` is set to `True` then the list of
:class:`obspy.core.stream.Stream`'s will also be output.
:return:
list of :class:`obspy.core.stream.Stream`'s for each detection, see
above.
:rtype: list
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps.
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after trace 1
in the template then the continuous data will be shifted by 2 seconds
to align peak correlations prior to summing). Because of this,
detections at the start and end of continuous data streams
**may be missed**. The maximum time-period that might be missing
detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter detections
through long-duration continuous data, we suggest using some overlap
(a few seconds, on the order of the maximum offset in the templates)
in the continous data. You will then need to post-process the
detections (which should be done anyway to remove duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for the
template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum\ /\ len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. note::
The output_cat flag will create an :class:`obspy.core.event.Catalog`
containing one event for each
:class:`eqcorrscan.core.match_filter.Detection`'s generated by
match_filter. Each event will contain a number of comments dealing
with correlation values and channels used for the detection. Each
channel used for the detection will have a corresponding
:class:`obspy.core.event.Pick` which will contain time and
waveform information. **HOWEVER**, the user should note that
the pick times do not account for the prepick times inherent in
each template. For example, if a template trace starts 0.1 seconds
before the actual arrival of that phase, then the pick time generated
by match_filter for that phase will be 0.1 seconds early.
.. Note::
xcorr_func can be used as follows:
.. rubric::xcorr_func argument example
>>> import obspy
>>> import numpy as np
>>> from eqcorrscan.core.match_filter import match_filter
>>> from eqcorrscan.utils.correlate import time_multi_normxcorr
>>> # define a custom xcorr function
>>> def custom_normxcorr(templates, stream, pads, *args, **kwargs):
... # Just to keep example short call other xcorr function
... # in practice you would define your own function here
... print('calling custom xcorr function')
... return time_multi_normxcorr(templates, stream, pads)
>>> # generate some toy templates and stream
>>> random = np.random.RandomState(42)
>>> template = obspy.read()
>>> stream = obspy.read()
>>> for num, tr in enumerate(stream): # iter st and embed templates
... data = tr.data
... tr.data = random.randn(6000) * 5
... tr.data[100: 100 + len(data)] = data
>>> # call match_filter ane ensure the custom function is used
>>> detections = match_filter(
... template_names=['1'], template_list=[template], st=stream,
... threshold=.5, threshold_type='absolute', trig_int=1,
... plotvar=False,
... xcorr_func=custom_normxcorr) # doctest:+ELLIPSIS
calling custom xcorr function...
"""
from eqcorrscan.utils.plotting import _match_filter_plot
if arg_check:
# Check the arguments to be nice - if arguments wrong type the parallel
# output for the error won't be useful
if not isinstance(template_names, list):
raise MatchFilterError('template_names must be of type: list')
if not isinstance(template_list, list):
raise MatchFilterError('templates must be of type: list')
if not len(template_list) == len(template_names):
raise MatchFilterError('Not the same number of templates as names')
for template in template_list:
if not isinstance(template, Stream):
msg = 'template in template_list must be of type: ' + \
'obspy.core.stream.Stream'
raise MatchFilterError(msg)
if not isinstance(st, Stream):
msg = 'st must be of type: obspy.core.stream.Stream'
raise MatchFilterError(msg)
if str(threshold_type) not in [str('MAD'), str('absolute'),
str('av_chan_corr')]:
msg = 'threshold_type must be one of: MAD, absolute, av_chan_corr'
raise MatchFilterError(msg)
for tr in st:
if not tr.stats.sampling_rate == st[0].stats.sampling_rate:
raise MatchFilterError('Sampling rates are not equal %f: %f' %
(tr.stats.sampling_rate,
st[0].stats.sampling_rate))
for template in template_list:
for tr in template:
if not tr.stats.sampling_rate == st[0].stats.sampling_rate:
raise MatchFilterError(
'Template sampling rate does not '
'match continuous data')
_spike_test(st)
if cores is not None:
parallel = True
else:
parallel = False
# Copy the stream here because we will muck about with it
stream = st.copy()
templates = copy.deepcopy(template_list)
_template_names = copy.deepcopy(template_names)
# Debug option to confirm that the channel names match those in the
# templates
if debug >= 2:
template_stachan = []
data_stachan = []
for template in templates:
for tr in template:
if isinstance(tr.data, np.ma.core.MaskedArray):
raise MatchFilterError('Template contains masked array,'
' split first')
template_stachan.append(tr.stats.station + '.' +
tr.stats.channel)
for tr in stream:
data_stachan.append(tr.stats.station + '.' + tr.stats.channel)
template_stachan = list(set(template_stachan))
data_stachan = list(set(data_stachan))
debug_print('I have template info for these stations:\n' +
template_stachan.__str__() +
'\nI have daylong data for these stations:\n' +
data_stachan.__str__(), 3, debug)
# Perform a check that the continuous data are all the same length
min_start_time = min([tr.stats.starttime for tr in stream])
max_end_time = max([tr.stats.endtime for tr in stream])
longest_trace_length = stream[0].stats.sampling_rate * (max_end_time -
min_start_time)
longest_trace_length += 1
for tr in stream:
if not tr.stats.npts == longest_trace_length:
msg = 'Data are not equal length, padding short traces'
warnings.warn(msg)
start_pad = np.zeros(int(tr.stats.sampling_rate *
(tr.stats.starttime - min_start_time)))
end_pad = np.zeros(int(tr.stats.sampling_rate *
(max_end_time - tr.stats.endtime)))
# In some cases there will be one sample missing when sampling
# time-stamps are not set consistently between channels, this
# results in start_pad and end_pad being len==0
if len(start_pad) == 0 and len(end_pad) == 0:
debug_print("start and end pad are both zero, padding at one "
"end", 2, debug)
if (tr.stats.starttime - min_start_time) > (
max_end_time - tr.stats.endtime):
start_pad = np.zeros(
int(longest_trace_length - tr.stats.npts))
else:
end_pad = np.zeros(
int(longest_trace_length - tr.stats.npts))
tr.data = np.concatenate([start_pad, tr.data, end_pad])
# Perform check that all template lengths are internally consistent
for i, temp in enumerate(template_list):
if len(set([tr.stats.npts for tr in temp])) > 1:
msg = ('Template %s contains traces of differing length, this is '
'not currently supported' % _template_names[i])
raise MatchFilterError(msg)
outtic = time.clock()
debug_print('Ensuring all template channels have matches in'
' continuous data', 2, debug)
template_stachan = {}
# Work out what station-channel pairs are in the templates, including
# duplicate station-channel pairs. We will use this information to fill
# all templates with the same station-channel pairs as required by
# _template_loop.
for template in templates:
stachans_in_template = []
for tr in template:
stachans_in_template.append((tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel))
stachans_in_template = dict(Counter(stachans_in_template))
for stachan in stachans_in_template.keys():
stachans = stachans_in_template[stachan]
if stachan not in template_stachan.keys():
template_stachan.update({stachan: stachans})
elif stachans_in_template[stachan] > template_stachan[stachan]:
template_stachan.update({stachan: stachans})
# Remove un-matched channels from templates.
_template_stachan = copy.deepcopy(template_stachan)
for stachan in template_stachan.keys():
if not stream.select(network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
# Remove stachan from list of dictionary of template_stachans
_template_stachan.pop(stachan)
# Remove template traces rather than adding NaN data
for template in templates:
if template.select(network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
for tr in template.select(
network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
template.remove(tr)
print('Removing template channel %s.%s.%s.%s due to'
' no matches in continuous data' %
(stachan[0], stachan[1], stachan[2], stachan[3]))
template_stachan = _template_stachan
# Remove un-needed channels from continuous data.
for tr in stream:
if not (tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel) in \
template_stachan.keys():
print('Removing channel in continuous data for %s.%s.%s.%s:'
' no match in template' %
(tr.stats.network, tr.stats.station, tr.stats.location,
tr.stats.channel))
stream.remove(tr)
# Check for duplicate channels
stachans = [(tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel) for tr in stream]
c_stachans = Counter(stachans)
for key in c_stachans.keys():
if c_stachans[key] > 1:
msg = ('Multiple channels for %s.%s.%s.%s, likely a data issue'
% (key[0], key[1], key[2], key[3]))
raise MatchFilterError(msg)
# Pad out templates to have all channels
_templates = []
used_template_names = []
for template, template_name in zip(templates, _template_names):
if len(template) == 0:
msg = ('No channels matching in continuous data for ' +
'template' + template_name)
warnings.warn(msg)
continue
for stachan in template_stachan.keys():
number_of_channels = len(template.select(
network=stachan[0], station=stachan[1], location=stachan[2],
channel=stachan[3]))
if number_of_channels < template_stachan[stachan]:
missed_channels = template_stachan[stachan] - \
number_of_channels
nulltrace = Trace()
nulltrace.stats.update(
{'network': stachan[0], 'station': stachan[1],
'location': stachan[2], 'channel': stachan[3],
'sampling_rate': template[0].stats.sampling_rate,
'starttime': template[0].stats.starttime,
'not_in_original': True})
nulltrace.data = np.array([np.NaN] * len(template[0].data),
dtype=np.float32)
for dummy in range(missed_channels):
template += nulltrace
template.sort()
_templates.append(template)
used_template_names.append(template_name)
# Quick check that this has all worked
if len(template) != max([len(t) for t in templates]):
raise MatchFilterError('Internal error forcing same template '
'lengths, report this error.')
templates = _templates
_template_names = used_template_names
debug_print('Starting the correlation run for these data', 2, debug)
for template in templates:
debug_print(template.__str__(), 3, debug)
debug_print(stream.__str__(), 3, debug)
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, chans] = multichannel_normxcorr(
templates=templates, stream=stream, cores=cores, **kwargs)
if len(cccsums[0]) == 0:
raise MatchFilterError('Correlation has not run, zero length cccsum')
outtoc = time.clock()
debug_print(' '.join(['Looping over templates and streams took:',
str(outtoc - outtic), 's']), 0, debug)
debug_print('The shape of the returned cccsums is: %s\n'
'This is from %i templates\nCorrelated with %i channels of '
'data' % (cccsums.shape, len(templates), len(stream)), 2,
debug)
detections = []
if output_cat:
det_cat = Catalog()
if str(threshold_type) == str("absolute"):
thresholds = [threshold for _ in range(len(cccsums))]
elif str(threshold_type) == str('MAD'):
thresholds = [threshold * np.median(np.abs(cccsum))
for cccsum in cccsums]
else:
thresholds = [threshold * no_chans[i] for i in range(len(cccsums))]
if peak_cores is None:
peak_cores = cores
all_peaks = multi_find_peaks(
arr=cccsums, thresh=thresholds, debug=debug, parallel=parallel,
trig_int=int(trig_int * stream[0].stats.sampling_rate),
full_peaks=full_peaks, cores=peak_cores)
for i, cccsum in enumerate(cccsums):
if np.abs(np.mean(cccsum)) > 0.05:
warnings.warn('Mean is not zero! Check this!')
# Set up a trace object for the cccsum as this is easier to plot and
# maintains timing
if plotvar:
_match_filter_plot(
stream=stream, cccsum=cccsum, template_names=_template_names,
rawthresh=thresholds[i], plotdir=plotdir,
plot_format=plot_format, i=i)
if debug >= 4:
np.save(_template_names[i] +
stream[0].stats.starttime.datetime.strftime('%Y%j'),
cccsum)
debug_print(
' '.join(['Saved the cccsum to:', _template_names[i],
stream[0].stats.starttime.datetime.strftime('%Y%j')]),
4, debug)
if all_peaks[i]:
for peak in all_peaks[i]:
detecttime = (
stream[0].stats.starttime +
peak[1] / stream[0].stats.sampling_rate)
detection = Detection(
template_name=_template_names[i], detect_time=detecttime,
no_chans=no_chans[i], detect_val=peak[0],
threshold=thresholds[i], typeofdet='corr', chans=chans[i],
threshold_type=threshold_type, threshold_input=threshold)
if output_cat or output_event:
detection._calculate_event(template_st=templates[i])
detections.append(detection)
if output_cat:
det_cat.append(detection.event)
if extract_detections:
detection_streams = extract_from_stream(stream, detections)
del stream, templates
if output_cat and not extract_detections:
return detections, det_cat
elif not extract_detections:
return detections
elif extract_detections and not output_cat:
return detections, detection_streams
else:
return detections, det_cat, detection_streams | python | def match_filter(template_names, template_list, st, threshold,
threshold_type, trig_int, plotvar, plotdir='.',
xcorr_func=None, concurrency=None, cores=None,
debug=0, plot_format='png', output_cat=False,
output_event=True, extract_detections=False,
arg_check=True, full_peaks=False, peak_cores=None, **kwargs):
"""
Main matched-filter detection function.
Over-arching code to run the correlations of given templates with a
day of seismic data and output the detections based on a given threshold.
For a functional example see the tutorials.
:type template_names: list
:param template_names:
List of template names in the same order as template_list
:type template_list: list
:param template_list:
A list of templates of which each template is a
:class:`obspy.core.stream.Stream` of obspy traces containing seismic
data and header information.
:type st: :class:`obspy.core.stream.Stream`
:param st:
A Stream object containing all the data available and
required for the correlations with templates given. For efficiency
this should contain no excess traces which are not in one or more of
the templates. This will now remove excess traces internally, but
will copy the stream and work on the copy, leaving your input stream
untouched.
:type threshold: float
:param threshold: A threshold value set based on the threshold_type
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or av_chan_corr.
See Note on thresholding below.
:type trig_int: float
:param trig_int: Minimum gap between detections in seconds.
:type plotvar: bool
:param plotvar: Turn plotting on or off
:type plotdir: str
:param plotdir:
Path to plotting folder, plots will be output here, defaults to run
location.
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of cores to use
:type debug: int
:param debug:
Debug output level, the bigger the number, the more the output.
:type plot_format: str
:param plot_format: Specify format of output plots if saved
:type output_cat: bool
:param output_cat:
Specifies if matched_filter will output an obspy.Catalog class
containing events for each detection. Default is False, in which case
matched_filter will output a list of detection classes, as normal.
:type output_event: bool
:param output_event:
Whether to include events in the Detection objects, defaults to True,
but for large cases you may want to turn this off as Event objects
can be quite memory intensive.
:type extract_detections: bool
:param extract_detections:
Specifies whether or not to return a list of streams, one stream per
detection.
:type arg_check: bool
:param arg_check:
Check arguments, defaults to True, but if running in bulk, and you are
certain of your arguments, then set to False.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.core.findpeaks.find_peaks2_short`.
:type peak_cores: int
:param peak_cores:
Number of processes to use for parallel peak-finding (if different to
`cores`).
.. note::
**Returns:**
If neither `output_cat` or `extract_detections` are set to `True`,
then only the list of :class:`eqcorrscan.core.match_filter.Detection`'s
will be output:
:return:
:class:`eqcorrscan.core.match_filter.Detection` detections for each
detection made.
:rtype: list
If `output_cat` is set to `True`, then the
:class:`obspy.core.event.Catalog` will also be output:
:return: Catalog containing events for each detection, see above.
:rtype: :class:`obspy.core.event.Catalog`
If `extract_detections` is set to `True` then the list of
:class:`obspy.core.stream.Stream`'s will also be output.
:return:
list of :class:`obspy.core.stream.Stream`'s for each detection, see
above.
:rtype: list
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps.
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after trace 1
in the template then the continuous data will be shifted by 2 seconds
to align peak correlations prior to summing). Because of this,
detections at the start and end of continuous data streams
**may be missed**. The maximum time-period that might be missing
detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter detections
through long-duration continuous data, we suggest using some overlap
(a few seconds, on the order of the maximum offset in the templates)
in the continous data. You will then need to post-process the
detections (which should be done anyway to remove duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for the
template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum\ /\ len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. note::
The output_cat flag will create an :class:`obspy.core.event.Catalog`
containing one event for each
:class:`eqcorrscan.core.match_filter.Detection`'s generated by
match_filter. Each event will contain a number of comments dealing
with correlation values and channels used for the detection. Each
channel used for the detection will have a corresponding
:class:`obspy.core.event.Pick` which will contain time and
waveform information. **HOWEVER**, the user should note that
the pick times do not account for the prepick times inherent in
each template. For example, if a template trace starts 0.1 seconds
before the actual arrival of that phase, then the pick time generated
by match_filter for that phase will be 0.1 seconds early.
.. Note::
xcorr_func can be used as follows:
.. rubric::xcorr_func argument example
>>> import obspy
>>> import numpy as np
>>> from eqcorrscan.core.match_filter import match_filter
>>> from eqcorrscan.utils.correlate import time_multi_normxcorr
>>> # define a custom xcorr function
>>> def custom_normxcorr(templates, stream, pads, *args, **kwargs):
... # Just to keep example short call other xcorr function
... # in practice you would define your own function here
... print('calling custom xcorr function')
... return time_multi_normxcorr(templates, stream, pads)
>>> # generate some toy templates and stream
>>> random = np.random.RandomState(42)
>>> template = obspy.read()
>>> stream = obspy.read()
>>> for num, tr in enumerate(stream): # iter st and embed templates
... data = tr.data
... tr.data = random.randn(6000) * 5
... tr.data[100: 100 + len(data)] = data
>>> # call match_filter ane ensure the custom function is used
>>> detections = match_filter(
... template_names=['1'], template_list=[template], st=stream,
... threshold=.5, threshold_type='absolute', trig_int=1,
... plotvar=False,
... xcorr_func=custom_normxcorr) # doctest:+ELLIPSIS
calling custom xcorr function...
"""
from eqcorrscan.utils.plotting import _match_filter_plot
if arg_check:
# Check the arguments to be nice - if arguments wrong type the parallel
# output for the error won't be useful
if not isinstance(template_names, list):
raise MatchFilterError('template_names must be of type: list')
if not isinstance(template_list, list):
raise MatchFilterError('templates must be of type: list')
if not len(template_list) == len(template_names):
raise MatchFilterError('Not the same number of templates as names')
for template in template_list:
if not isinstance(template, Stream):
msg = 'template in template_list must be of type: ' + \
'obspy.core.stream.Stream'
raise MatchFilterError(msg)
if not isinstance(st, Stream):
msg = 'st must be of type: obspy.core.stream.Stream'
raise MatchFilterError(msg)
if str(threshold_type) not in [str('MAD'), str('absolute'),
str('av_chan_corr')]:
msg = 'threshold_type must be one of: MAD, absolute, av_chan_corr'
raise MatchFilterError(msg)
for tr in st:
if not tr.stats.sampling_rate == st[0].stats.sampling_rate:
raise MatchFilterError('Sampling rates are not equal %f: %f' %
(tr.stats.sampling_rate,
st[0].stats.sampling_rate))
for template in template_list:
for tr in template:
if not tr.stats.sampling_rate == st[0].stats.sampling_rate:
raise MatchFilterError(
'Template sampling rate does not '
'match continuous data')
_spike_test(st)
if cores is not None:
parallel = True
else:
parallel = False
# Copy the stream here because we will muck about with it
stream = st.copy()
templates = copy.deepcopy(template_list)
_template_names = copy.deepcopy(template_names)
# Debug option to confirm that the channel names match those in the
# templates
if debug >= 2:
template_stachan = []
data_stachan = []
for template in templates:
for tr in template:
if isinstance(tr.data, np.ma.core.MaskedArray):
raise MatchFilterError('Template contains masked array,'
' split first')
template_stachan.append(tr.stats.station + '.' +
tr.stats.channel)
for tr in stream:
data_stachan.append(tr.stats.station + '.' + tr.stats.channel)
template_stachan = list(set(template_stachan))
data_stachan = list(set(data_stachan))
debug_print('I have template info for these stations:\n' +
template_stachan.__str__() +
'\nI have daylong data for these stations:\n' +
data_stachan.__str__(), 3, debug)
# Perform a check that the continuous data are all the same length
min_start_time = min([tr.stats.starttime for tr in stream])
max_end_time = max([tr.stats.endtime for tr in stream])
longest_trace_length = stream[0].stats.sampling_rate * (max_end_time -
min_start_time)
longest_trace_length += 1
for tr in stream:
if not tr.stats.npts == longest_trace_length:
msg = 'Data are not equal length, padding short traces'
warnings.warn(msg)
start_pad = np.zeros(int(tr.stats.sampling_rate *
(tr.stats.starttime - min_start_time)))
end_pad = np.zeros(int(tr.stats.sampling_rate *
(max_end_time - tr.stats.endtime)))
# In some cases there will be one sample missing when sampling
# time-stamps are not set consistently between channels, this
# results in start_pad and end_pad being len==0
if len(start_pad) == 0 and len(end_pad) == 0:
debug_print("start and end pad are both zero, padding at one "
"end", 2, debug)
if (tr.stats.starttime - min_start_time) > (
max_end_time - tr.stats.endtime):
start_pad = np.zeros(
int(longest_trace_length - tr.stats.npts))
else:
end_pad = np.zeros(
int(longest_trace_length - tr.stats.npts))
tr.data = np.concatenate([start_pad, tr.data, end_pad])
# Perform check that all template lengths are internally consistent
for i, temp in enumerate(template_list):
if len(set([tr.stats.npts for tr in temp])) > 1:
msg = ('Template %s contains traces of differing length, this is '
'not currently supported' % _template_names[i])
raise MatchFilterError(msg)
outtic = time.clock()
debug_print('Ensuring all template channels have matches in'
' continuous data', 2, debug)
template_stachan = {}
# Work out what station-channel pairs are in the templates, including
# duplicate station-channel pairs. We will use this information to fill
# all templates with the same station-channel pairs as required by
# _template_loop.
for template in templates:
stachans_in_template = []
for tr in template:
stachans_in_template.append((tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel))
stachans_in_template = dict(Counter(stachans_in_template))
for stachan in stachans_in_template.keys():
stachans = stachans_in_template[stachan]
if stachan not in template_stachan.keys():
template_stachan.update({stachan: stachans})
elif stachans_in_template[stachan] > template_stachan[stachan]:
template_stachan.update({stachan: stachans})
# Remove un-matched channels from templates.
_template_stachan = copy.deepcopy(template_stachan)
for stachan in template_stachan.keys():
if not stream.select(network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
# Remove stachan from list of dictionary of template_stachans
_template_stachan.pop(stachan)
# Remove template traces rather than adding NaN data
for template in templates:
if template.select(network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
for tr in template.select(
network=stachan[0], station=stachan[1],
location=stachan[2], channel=stachan[3]):
template.remove(tr)
print('Removing template channel %s.%s.%s.%s due to'
' no matches in continuous data' %
(stachan[0], stachan[1], stachan[2], stachan[3]))
template_stachan = _template_stachan
# Remove un-needed channels from continuous data.
for tr in stream:
if not (tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel) in \
template_stachan.keys():
print('Removing channel in continuous data for %s.%s.%s.%s:'
' no match in template' %
(tr.stats.network, tr.stats.station, tr.stats.location,
tr.stats.channel))
stream.remove(tr)
# Check for duplicate channels
stachans = [(tr.stats.network, tr.stats.station,
tr.stats.location, tr.stats.channel) for tr in stream]
c_stachans = Counter(stachans)
for key in c_stachans.keys():
if c_stachans[key] > 1:
msg = ('Multiple channels for %s.%s.%s.%s, likely a data issue'
% (key[0], key[1], key[2], key[3]))
raise MatchFilterError(msg)
# Pad out templates to have all channels
_templates = []
used_template_names = []
for template, template_name in zip(templates, _template_names):
if len(template) == 0:
msg = ('No channels matching in continuous data for ' +
'template' + template_name)
warnings.warn(msg)
continue
for stachan in template_stachan.keys():
number_of_channels = len(template.select(
network=stachan[0], station=stachan[1], location=stachan[2],
channel=stachan[3]))
if number_of_channels < template_stachan[stachan]:
missed_channels = template_stachan[stachan] - \
number_of_channels
nulltrace = Trace()
nulltrace.stats.update(
{'network': stachan[0], 'station': stachan[1],
'location': stachan[2], 'channel': stachan[3],
'sampling_rate': template[0].stats.sampling_rate,
'starttime': template[0].stats.starttime,
'not_in_original': True})
nulltrace.data = np.array([np.NaN] * len(template[0].data),
dtype=np.float32)
for dummy in range(missed_channels):
template += nulltrace
template.sort()
_templates.append(template)
used_template_names.append(template_name)
# Quick check that this has all worked
if len(template) != max([len(t) for t in templates]):
raise MatchFilterError('Internal error forcing same template '
'lengths, report this error.')
templates = _templates
_template_names = used_template_names
debug_print('Starting the correlation run for these data', 2, debug)
for template in templates:
debug_print(template.__str__(), 3, debug)
debug_print(stream.__str__(), 3, debug)
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, chans] = multichannel_normxcorr(
templates=templates, stream=stream, cores=cores, **kwargs)
if len(cccsums[0]) == 0:
raise MatchFilterError('Correlation has not run, zero length cccsum')
outtoc = time.clock()
debug_print(' '.join(['Looping over templates and streams took:',
str(outtoc - outtic), 's']), 0, debug)
debug_print('The shape of the returned cccsums is: %s\n'
'This is from %i templates\nCorrelated with %i channels of '
'data' % (cccsums.shape, len(templates), len(stream)), 2,
debug)
detections = []
if output_cat:
det_cat = Catalog()
if str(threshold_type) == str("absolute"):
thresholds = [threshold for _ in range(len(cccsums))]
elif str(threshold_type) == str('MAD'):
thresholds = [threshold * np.median(np.abs(cccsum))
for cccsum in cccsums]
else:
thresholds = [threshold * no_chans[i] for i in range(len(cccsums))]
if peak_cores is None:
peak_cores = cores
all_peaks = multi_find_peaks(
arr=cccsums, thresh=thresholds, debug=debug, parallel=parallel,
trig_int=int(trig_int * stream[0].stats.sampling_rate),
full_peaks=full_peaks, cores=peak_cores)
for i, cccsum in enumerate(cccsums):
if np.abs(np.mean(cccsum)) > 0.05:
warnings.warn('Mean is not zero! Check this!')
# Set up a trace object for the cccsum as this is easier to plot and
# maintains timing
if plotvar:
_match_filter_plot(
stream=stream, cccsum=cccsum, template_names=_template_names,
rawthresh=thresholds[i], plotdir=plotdir,
plot_format=plot_format, i=i)
if debug >= 4:
np.save(_template_names[i] +
stream[0].stats.starttime.datetime.strftime('%Y%j'),
cccsum)
debug_print(
' '.join(['Saved the cccsum to:', _template_names[i],
stream[0].stats.starttime.datetime.strftime('%Y%j')]),
4, debug)
if all_peaks[i]:
for peak in all_peaks[i]:
detecttime = (
stream[0].stats.starttime +
peak[1] / stream[0].stats.sampling_rate)
detection = Detection(
template_name=_template_names[i], detect_time=detecttime,
no_chans=no_chans[i], detect_val=peak[0],
threshold=thresholds[i], typeofdet='corr', chans=chans[i],
threshold_type=threshold_type, threshold_input=threshold)
if output_cat or output_event:
detection._calculate_event(template_st=templates[i])
detections.append(detection)
if output_cat:
det_cat.append(detection.event)
if extract_detections:
detection_streams = extract_from_stream(stream, detections)
del stream, templates
if output_cat and not extract_detections:
return detections, det_cat
elif not extract_detections:
return detections
elif extract_detections and not output_cat:
return detections, detection_streams
else:
return detections, det_cat, detection_streams | ['def', 'match_filter', '(', 'template_names', ',', 'template_list', ',', 'st', ',', 'threshold', ',', 'threshold_type', ',', 'trig_int', ',', 'plotvar', ',', 'plotdir', '=', "'.'", ',', 'xcorr_func', '=', 'None', ',', 'concurrency', '=', 'None', ',', 'cores', '=', 'None', ',', 'debug', '=', '0', ',', 'plot_format', '=', "'png'", ',', 'output_cat', '=', 'False', ',', 'output_event', '=', 'True', ',', 'extract_detections', '=', 'False', ',', 'arg_check', '=', 'True', ',', 'full_peaks', '=', 'False', ',', 'peak_cores', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'from', 'eqcorrscan', '.', 'utils', '.', 'plotting', 'import', '_match_filter_plot', 'if', 'arg_check', ':', '# Check the arguments to be nice - if arguments wrong type the parallel', "# output for the error won't be useful", 'if', 'not', 'isinstance', '(', 'template_names', ',', 'list', ')', ':', 'raise', 'MatchFilterError', '(', "'template_names must be of type: list'", ')', 'if', 'not', 'isinstance', '(', 'template_list', ',', 'list', ')', ':', 'raise', 'MatchFilterError', '(', "'templates must be of type: list'", ')', 'if', 'not', 'len', '(', 'template_list', ')', '==', 'len', '(', 'template_names', ')', ':', 'raise', 'MatchFilterError', '(', "'Not the same number of templates as names'", ')', 'for', 'template', 'in', 'template_list', ':', 'if', 'not', 'isinstance', '(', 'template', ',', 'Stream', ')', ':', 'msg', '=', "'template in template_list must be of type: '", '+', "'obspy.core.stream.Stream'", 'raise', 'MatchFilterError', '(', 'msg', ')', 'if', 'not', 'isinstance', '(', 'st', ',', 'Stream', ')', ':', 'msg', '=', "'st must be of type: obspy.core.stream.Stream'", 'raise', 'MatchFilterError', '(', 'msg', ')', 'if', 'str', '(', 'threshold_type', ')', 'not', 'in', '[', 'str', '(', "'MAD'", ')', ',', 'str', '(', "'absolute'", ')', ',', 'str', '(', "'av_chan_corr'", ')', ']', ':', 'msg', '=', "'threshold_type must be one of: MAD, absolute, av_chan_corr'", 'raise', 'MatchFilterError', '(', 'msg', ')', 'for', 'tr', 'in', 'st', ':', 'if', 'not', 'tr', '.', 'stats', '.', 'sampling_rate', '==', 'st', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ':', 'raise', 'MatchFilterError', '(', "'Sampling rates are not equal %f: %f'", '%', '(', 'tr', '.', 'stats', '.', 'sampling_rate', ',', 'st', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ')', ')', 'for', 'template', 'in', 'template_list', ':', 'for', 'tr', 'in', 'template', ':', 'if', 'not', 'tr', '.', 'stats', '.', 'sampling_rate', '==', 'st', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ':', 'raise', 'MatchFilterError', '(', "'Template sampling rate does not '", "'match continuous data'", ')', '_spike_test', '(', 'st', ')', 'if', 'cores', 'is', 'not', 'None', ':', 'parallel', '=', 'True', 'else', ':', 'parallel', '=', 'False', '# Copy the stream here because we will muck about with it', 'stream', '=', 'st', '.', 'copy', '(', ')', 'templates', '=', 'copy', '.', 'deepcopy', '(', 'template_list', ')', '_template_names', '=', 'copy', '.', 'deepcopy', '(', 'template_names', ')', '# Debug option to confirm that the channel names match those in the', '# templates', 'if', 'debug', '>=', '2', ':', 'template_stachan', '=', '[', ']', 'data_stachan', '=', '[', ']', 'for', 'template', 'in', 'templates', ':', 'for', 'tr', 'in', 'template', ':', 'if', 'isinstance', '(', 'tr', '.', 'data', ',', 'np', '.', 'ma', '.', 'core', '.', 'MaskedArray', ')', ':', 'raise', 'MatchFilterError', '(', "'Template contains masked array,'", "' split first'", ')', 'template_stachan', '.', 'append', '(', 'tr', '.', 'stats', '.', 'station', '+', "'.'", '+', 'tr', '.', 'stats', '.', 'channel', ')', 'for', 'tr', 'in', 'stream', ':', 'data_stachan', '.', 'append', '(', 'tr', '.', 'stats', '.', 'station', '+', "'.'", '+', 'tr', '.', 'stats', '.', 'channel', ')', 'template_stachan', '=', 'list', '(', 'set', '(', 'template_stachan', ')', ')', 'data_stachan', '=', 'list', '(', 'set', '(', 'data_stachan', ')', ')', 'debug_print', '(', "'I have template info for these stations:\\n'", '+', 'template_stachan', '.', '__str__', '(', ')', '+', "'\\nI have daylong data for these stations:\\n'", '+', 'data_stachan', '.', '__str__', '(', ')', ',', '3', ',', 'debug', ')', '# Perform a check that the continuous data are all the same length', 'min_start_time', '=', 'min', '(', '[', 'tr', '.', 'stats', '.', 'starttime', 'for', 'tr', 'in', 'stream', ']', ')', 'max_end_time', '=', 'max', '(', '[', 'tr', '.', 'stats', '.', 'endtime', 'for', 'tr', 'in', 'stream', ']', ')', 'longest_trace_length', '=', 'stream', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', '*', '(', 'max_end_time', '-', 'min_start_time', ')', 'longest_trace_length', '+=', '1', 'for', 'tr', 'in', 'stream', ':', 'if', 'not', 'tr', '.', 'stats', '.', 'npts', '==', 'longest_trace_length', ':', 'msg', '=', "'Data are not equal length, padding short traces'", 'warnings', '.', 'warn', '(', 'msg', ')', 'start_pad', '=', 'np', '.', 'zeros', '(', 'int', '(', 'tr', '.', 'stats', '.', 'sampling_rate', '*', '(', 'tr', '.', 'stats', '.', 'starttime', '-', 'min_start_time', ')', ')', ')', 'end_pad', '=', 'np', '.', 'zeros', '(', 'int', '(', 'tr', '.', 'stats', '.', 'sampling_rate', '*', '(', 'max_end_time', '-', 'tr', '.', 'stats', '.', 'endtime', ')', ')', ')', '# In some cases there will be one sample missing when sampling', '# time-stamps are not set consistently between channels, this', '# results in start_pad and end_pad being len==0', 'if', 'len', '(', 'start_pad', ')', '==', '0', 'and', 'len', '(', 'end_pad', ')', '==', '0', ':', 'debug_print', '(', '"start and end pad are both zero, padding at one "', '"end"', ',', '2', ',', 'debug', ')', 'if', '(', 'tr', '.', 'stats', '.', 'starttime', '-', 'min_start_time', ')', '>', '(', 'max_end_time', '-', 'tr', '.', 'stats', '.', 'endtime', ')', ':', 'start_pad', '=', 'np', '.', 'zeros', '(', 'int', '(', 'longest_trace_length', '-', 'tr', '.', 'stats', '.', 'npts', ')', ')', 'else', ':', 'end_pad', '=', 'np', '.', 'zeros', '(', 'int', '(', 'longest_trace_length', '-', 'tr', '.', 'stats', '.', 'npts', ')', ')', 'tr', '.', 'data', '=', 'np', '.', 'concatenate', '(', '[', 'start_pad', ',', 'tr', '.', 'data', ',', 'end_pad', ']', ')', '# Perform check that all template lengths are internally consistent', 'for', 'i', ',', 'temp', 'in', 'enumerate', '(', 'template_list', ')', ':', 'if', 'len', '(', 'set', '(', '[', 'tr', '.', 'stats', '.', 'npts', 'for', 'tr', 'in', 'temp', ']', ')', ')', '>', '1', ':', 'msg', '=', '(', "'Template %s contains traces of differing length, this is '", "'not currently supported'", '%', '_template_names', '[', 'i', ']', ')', 'raise', 'MatchFilterError', '(', 'msg', ')', 'outtic', '=', 'time', '.', 'clock', '(', ')', 'debug_print', '(', "'Ensuring all template channels have matches in'", "' continuous data'", ',', '2', ',', 'debug', ')', 'template_stachan', '=', '{', '}', '# Work out what station-channel pairs are in the templates, including', '# duplicate station-channel pairs. We will use this information to fill', '# all templates with the same station-channel pairs as required by', '# _template_loop.', 'for', 'template', 'in', 'templates', ':', 'stachans_in_template', '=', '[', ']', 'for', 'tr', 'in', 'template', ':', 'stachans_in_template', '.', 'append', '(', '(', 'tr', '.', 'stats', '.', 'network', ',', 'tr', '.', 'stats', '.', 'station', ',', 'tr', '.', 'stats', '.', 'location', ',', 'tr', '.', 'stats', '.', 'channel', ')', ')', 'stachans_in_template', '=', 'dict', '(', 'Counter', '(', 'stachans_in_template', ')', ')', 'for', 'stachan', 'in', 'stachans_in_template', '.', 'keys', '(', ')', ':', 'stachans', '=', 'stachans_in_template', '[', 'stachan', ']', 'if', 'stachan', 'not', 'in', 'template_stachan', '.', 'keys', '(', ')', ':', 'template_stachan', '.', 'update', '(', '{', 'stachan', ':', 'stachans', '}', ')', 'elif', 'stachans_in_template', '[', 'stachan', ']', '>', 'template_stachan', '[', 'stachan', ']', ':', 'template_stachan', '.', 'update', '(', '{', 'stachan', ':', 'stachans', '}', ')', '# Remove un-matched channels from templates.', '_template_stachan', '=', 'copy', '.', 'deepcopy', '(', 'template_stachan', ')', 'for', 'stachan', 'in', 'template_stachan', '.', 'keys', '(', ')', ':', 'if', 'not', 'stream', '.', 'select', '(', 'network', '=', 'stachan', '[', '0', ']', ',', 'station', '=', 'stachan', '[', '1', ']', ',', 'location', '=', 'stachan', '[', '2', ']', ',', 'channel', '=', 'stachan', '[', '3', ']', ')', ':', '# Remove stachan from list of dictionary of template_stachans', '_template_stachan', '.', 'pop', '(', 'stachan', ')', '# Remove template traces rather than adding NaN data', 'for', 'template', 'in', 'templates', ':', 'if', 'template', '.', 'select', '(', 'network', '=', 'stachan', '[', '0', ']', ',', 'station', '=', 'stachan', '[', '1', ']', ',', 'location', '=', 'stachan', '[', '2', ']', ',', 'channel', '=', 'stachan', '[', '3', ']', ')', ':', 'for', 'tr', 'in', 'template', '.', 'select', '(', 'network', '=', 'stachan', '[', '0', ']', ',', 'station', '=', 'stachan', '[', '1', ']', ',', 'location', '=', 'stachan', '[', '2', ']', ',', 'channel', '=', 'stachan', '[', '3', ']', ')', ':', 'template', '.', 'remove', '(', 'tr', ')', 'print', '(', "'Removing template channel %s.%s.%s.%s due to'", "' no matches in continuous data'", '%', '(', 'stachan', '[', '0', ']', ',', 'stachan', '[', '1', ']', ',', 'stachan', '[', '2', ']', ',', 'stachan', '[', '3', ']', ')', ')', 'template_stachan', '=', '_template_stachan', '# Remove un-needed channels from continuous data.', 'for', 'tr', 'in', 'stream', ':', 'if', 'not', '(', 'tr', '.', 'stats', '.', 'network', ',', 'tr', '.', 'stats', '.', 'station', ',', 'tr', '.', 'stats', '.', 'location', ',', 'tr', '.', 'stats', '.', 'channel', ')', 'in', 'template_stachan', '.', 'keys', '(', ')', ':', 'print', '(', "'Removing channel in continuous data for %s.%s.%s.%s:'", "' no match in template'", '%', '(', 'tr', '.', 'stats', '.', 'network', ',', 'tr', '.', 'stats', '.', 'station', ',', 'tr', '.', 'stats', '.', 'location', ',', 'tr', '.', 'stats', '.', 'channel', ')', ')', 'stream', '.', 'remove', '(', 'tr', ')', '# Check for duplicate channels', 'stachans', '=', '[', '(', 'tr', '.', 'stats', '.', 'network', ',', 'tr', '.', 'stats', '.', 'station', ',', 'tr', '.', 'stats', '.', 'location', ',', 'tr', '.', 'stats', '.', 'channel', ')', 'for', 'tr', 'in', 'stream', ']', 'c_stachans', '=', 'Counter', '(', 'stachans', ')', 'for', 'key', 'in', 'c_stachans', '.', 'keys', '(', ')', ':', 'if', 'c_stachans', '[', 'key', ']', '>', '1', ':', 'msg', '=', '(', "'Multiple channels for %s.%s.%s.%s, likely a data issue'", '%', '(', 'key', '[', '0', ']', ',', 'key', '[', '1', ']', ',', 'key', '[', '2', ']', ',', 'key', '[', '3', ']', ')', ')', 'raise', 'MatchFilterError', '(', 'msg', ')', '# Pad out templates to have all channels', '_templates', '=', '[', ']', 'used_template_names', '=', '[', ']', 'for', 'template', ',', 'template_name', 'in', 'zip', '(', 'templates', ',', '_template_names', ')', ':', 'if', 'len', '(', 'template', ')', '==', '0', ':', 'msg', '=', '(', "'No channels matching in continuous data for '", '+', "'template'", '+', 'template_name', ')', 'warnings', '.', 'warn', '(', 'msg', ')', 'continue', 'for', 'stachan', 'in', 'template_stachan', '.', 'keys', '(', ')', ':', 'number_of_channels', '=', 'len', '(', 'template', '.', 'select', '(', 'network', '=', 'stachan', '[', '0', ']', ',', 'station', '=', 'stachan', '[', '1', ']', ',', 'location', '=', 'stachan', '[', '2', ']', ',', 'channel', '=', 'stachan', '[', '3', ']', ')', ')', 'if', 'number_of_channels', '<', 'template_stachan', '[', 'stachan', ']', ':', 'missed_channels', '=', 'template_stachan', '[', 'stachan', ']', '-', 'number_of_channels', 'nulltrace', '=', 'Trace', '(', ')', 'nulltrace', '.', 'stats', '.', 'update', '(', '{', "'network'", ':', 'stachan', '[', '0', ']', ',', "'station'", ':', 'stachan', '[', '1', ']', ',', "'location'", ':', 'stachan', '[', '2', ']', ',', "'channel'", ':', 'stachan', '[', '3', ']', ',', "'sampling_rate'", ':', 'template', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ',', "'starttime'", ':', 'template', '[', '0', ']', '.', 'stats', '.', 'starttime', ',', "'not_in_original'", ':', 'True', '}', ')', 'nulltrace', '.', 'data', '=', 'np', '.', 'array', '(', '[', 'np', '.', 'NaN', ']', '*', 'len', '(', 'template', '[', '0', ']', '.', 'data', ')', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'for', 'dummy', 'in', 'range', '(', 'missed_channels', ')', ':', 'template', '+=', 'nulltrace', 'template', '.', 'sort', '(', ')', '_templates', '.', 'append', '(', 'template', ')', 'used_template_names', '.', 'append', '(', 'template_name', ')', '# Quick check that this has all worked', 'if', 'len', '(', 'template', ')', '!=', 'max', '(', '[', 'len', '(', 't', ')', 'for', 't', 'in', 'templates', ']', ')', ':', 'raise', 'MatchFilterError', '(', "'Internal error forcing same template '", "'lengths, report this error.'", ')', 'templates', '=', '_templates', '_template_names', '=', 'used_template_names', 'debug_print', '(', "'Starting the correlation run for these data'", ',', '2', ',', 'debug', ')', 'for', 'template', 'in', 'templates', ':', 'debug_print', '(', 'template', '.', '__str__', '(', ')', ',', '3', ',', 'debug', ')', 'debug_print', '(', 'stream', '.', '__str__', '(', ')', ',', '3', ',', 'debug', ')', 'multichannel_normxcorr', '=', 'get_stream_xcorr', '(', 'xcorr_func', ',', 'concurrency', ')', '[', 'cccsums', ',', 'no_chans', ',', 'chans', ']', '=', 'multichannel_normxcorr', '(', 'templates', '=', 'templates', ',', 'stream', '=', 'stream', ',', 'cores', '=', 'cores', ',', '*', '*', 'kwargs', ')', 'if', 'len', '(', 'cccsums', '[', '0', ']', ')', '==', '0', ':', 'raise', 'MatchFilterError', '(', "'Correlation has not run, zero length cccsum'", ')', 'outtoc', '=', 'time', '.', 'clock', '(', ')', 'debug_print', '(', "' '", '.', 'join', '(', '[', "'Looping over templates and streams took:'", ',', 'str', '(', 'outtoc', '-', 'outtic', ')', ',', "'s'", ']', ')', ',', '0', ',', 'debug', ')', 'debug_print', '(', "'The shape of the returned cccsums is: %s\\n'", "'This is from %i templates\\nCorrelated with %i channels of '", "'data'", '%', '(', 'cccsums', '.', 'shape', ',', 'len', '(', 'templates', ')', ',', 'len', '(', 'stream', ')', ')', ',', '2', ',', 'debug', ')', 'detections', '=', '[', ']', 'if', 'output_cat', ':', 'det_cat', '=', 'Catalog', '(', ')', 'if', 'str', '(', 'threshold_type', ')', '==', 'str', '(', '"absolute"', ')', ':', 'thresholds', '=', '[', 'threshold', 'for', '_', 'in', 'range', '(', 'len', '(', 'cccsums', ')', ')', ']', 'elif', 'str', '(', 'threshold_type', ')', '==', 'str', '(', "'MAD'", ')', ':', 'thresholds', '=', '[', 'threshold', '*', 'np', '.', 'median', '(', 'np', '.', 'abs', '(', 'cccsum', ')', ')', 'for', 'cccsum', 'in', 'cccsums', ']', 'else', ':', 'thresholds', '=', '[', 'threshold', '*', 'no_chans', '[', 'i', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'cccsums', ')', ')', ']', 'if', 'peak_cores', 'is', 'None', ':', 'peak_cores', '=', 'cores', 'all_peaks', '=', 'multi_find_peaks', '(', 'arr', '=', 'cccsums', ',', 'thresh', '=', 'thresholds', ',', 'debug', '=', 'debug', ',', 'parallel', '=', 'parallel', ',', 'trig_int', '=', 'int', '(', 'trig_int', '*', 'stream', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ')', ',', 'full_peaks', '=', 'full_peaks', ',', 'cores', '=', 'peak_cores', ')', 'for', 'i', ',', 'cccsum', 'in', 'enumerate', '(', 'cccsums', ')', ':', 'if', 'np', '.', 'abs', '(', 'np', '.', 'mean', '(', 'cccsum', ')', ')', '>', '0.05', ':', 'warnings', '.', 'warn', '(', "'Mean is not zero! Check this!'", ')', '# Set up a trace object for the cccsum as this is easier to plot and', '# maintains timing', 'if', 'plotvar', ':', '_match_filter_plot', '(', 'stream', '=', 'stream', ',', 'cccsum', '=', 'cccsum', ',', 'template_names', '=', '_template_names', ',', 'rawthresh', '=', 'thresholds', '[', 'i', ']', ',', 'plotdir', '=', 'plotdir', ',', 'plot_format', '=', 'plot_format', ',', 'i', '=', 'i', ')', 'if', 'debug', '>=', '4', ':', 'np', '.', 'save', '(', '_template_names', '[', 'i', ']', '+', 'stream', '[', '0', ']', '.', 'stats', '.', 'starttime', '.', 'datetime', '.', 'strftime', '(', "'%Y%j'", ')', ',', 'cccsum', ')', 'debug_print', '(', "' '", '.', 'join', '(', '[', "'Saved the cccsum to:'", ',', '_template_names', '[', 'i', ']', ',', 'stream', '[', '0', ']', '.', 'stats', '.', 'starttime', '.', 'datetime', '.', 'strftime', '(', "'%Y%j'", ')', ']', ')', ',', '4', ',', 'debug', ')', 'if', 'all_peaks', '[', 'i', ']', ':', 'for', 'peak', 'in', 'all_peaks', '[', 'i', ']', ':', 'detecttime', '=', '(', 'stream', '[', '0', ']', '.', 'stats', '.', 'starttime', '+', 'peak', '[', '1', ']', '/', 'stream', '[', '0', ']', '.', 'stats', '.', 'sampling_rate', ')', 'detection', '=', 'Detection', '(', 'template_name', '=', '_template_names', '[', 'i', ']', ',', 'detect_time', '=', 'detecttime', ',', 'no_chans', '=', 'no_chans', '[', 'i', ']', ',', 'detect_val', '=', 'peak', '[', '0', ']', ',', 'threshold', '=', 'thresholds', '[', 'i', ']', ',', 'typeofdet', '=', "'corr'", ',', 'chans', '=', 'chans', '[', 'i', ']', ',', 'threshold_type', '=', 'threshold_type', ',', 'threshold_input', '=', 'threshold', ')', 'if', 'output_cat', 'or', 'output_event', ':', 'detection', '.', '_calculate_event', '(', 'template_st', '=', 'templates', '[', 'i', ']', ')', 'detections', '.', 'append', '(', 'detection', ')', 'if', 'output_cat', ':', 'det_cat', '.', 'append', '(', 'detection', '.', 'event', ')', 'if', 'extract_detections', ':', 'detection_streams', '=', 'extract_from_stream', '(', 'stream', ',', 'detections', ')', 'del', 'stream', ',', 'templates', 'if', 'output_cat', 'and', 'not', 'extract_detections', ':', 'return', 'detections', ',', 'det_cat', 'elif', 'not', 'extract_detections', ':', 'return', 'detections', 'elif', 'extract_detections', 'and', 'not', 'output_cat', ':', 'return', 'detections', ',', 'detection_streams', 'else', ':', 'return', 'detections', ',', 'det_cat', ',', 'detection_streams'] | Main matched-filter detection function.
Over-arching code to run the correlations of given templates with a
day of seismic data and output the detections based on a given threshold.
For a functional example see the tutorials.
:type template_names: list
:param template_names:
List of template names in the same order as template_list
:type template_list: list
:param template_list:
A list of templates of which each template is a
:class:`obspy.core.stream.Stream` of obspy traces containing seismic
data and header information.
:type st: :class:`obspy.core.stream.Stream`
:param st:
A Stream object containing all the data available and
required for the correlations with templates given. For efficiency
this should contain no excess traces which are not in one or more of
the templates. This will now remove excess traces internally, but
will copy the stream and work on the copy, leaving your input stream
untouched.
:type threshold: float
:param threshold: A threshold value set based on the threshold_type
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or av_chan_corr.
See Note on thresholding below.
:type trig_int: float
:param trig_int: Minimum gap between detections in seconds.
:type plotvar: bool
:param plotvar: Turn plotting on or off
:type plotdir: str
:param plotdir:
Path to plotting folder, plots will be output here, defaults to run
location.
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of cores to use
:type debug: int
:param debug:
Debug output level, the bigger the number, the more the output.
:type plot_format: str
:param plot_format: Specify format of output plots if saved
:type output_cat: bool
:param output_cat:
Specifies if matched_filter will output an obspy.Catalog class
containing events for each detection. Default is False, in which case
matched_filter will output a list of detection classes, as normal.
:type output_event: bool
:param output_event:
Whether to include events in the Detection objects, defaults to True,
but for large cases you may want to turn this off as Event objects
can be quite memory intensive.
:type extract_detections: bool
:param extract_detections:
Specifies whether or not to return a list of streams, one stream per
detection.
:type arg_check: bool
:param arg_check:
Check arguments, defaults to True, but if running in bulk, and you are
certain of your arguments, then set to False.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.core.findpeaks.find_peaks2_short`.
:type peak_cores: int
:param peak_cores:
Number of processes to use for parallel peak-finding (if different to
`cores`).
.. note::
**Returns:**
If neither `output_cat` or `extract_detections` are set to `True`,
then only the list of :class:`eqcorrscan.core.match_filter.Detection`'s
will be output:
:return:
:class:`eqcorrscan.core.match_filter.Detection` detections for each
detection made.
:rtype: list
If `output_cat` is set to `True`, then the
:class:`obspy.core.event.Catalog` will also be output:
:return: Catalog containing events for each detection, see above.
:rtype: :class:`obspy.core.event.Catalog`
If `extract_detections` is set to `True` then the list of
:class:`obspy.core.stream.Stream`'s will also be output.
:return:
list of :class:`obspy.core.stream.Stream`'s for each detection, see
above.
:rtype: list
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps.
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after trace 1
in the template then the continuous data will be shifted by 2 seconds
to align peak correlations prior to summing). Because of this,
detections at the start and end of continuous data streams
**may be missed**. The maximum time-period that might be missing
detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter detections
through long-duration continuous data, we suggest using some overlap
(a few seconds, on the order of the maximum offset in the templates)
in the continous data. You will then need to post-process the
detections (which should be done anyway to remove duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for the
template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum\ /\ len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. note::
The output_cat flag will create an :class:`obspy.core.event.Catalog`
containing one event for each
:class:`eqcorrscan.core.match_filter.Detection`'s generated by
match_filter. Each event will contain a number of comments dealing
with correlation values and channels used for the detection. Each
channel used for the detection will have a corresponding
:class:`obspy.core.event.Pick` which will contain time and
waveform information. **HOWEVER**, the user should note that
the pick times do not account for the prepick times inherent in
each template. For example, if a template trace starts 0.1 seconds
before the actual arrival of that phase, then the pick time generated
by match_filter for that phase will be 0.1 seconds early.
.. Note::
xcorr_func can be used as follows:
.. rubric::xcorr_func argument example
>>> import obspy
>>> import numpy as np
>>> from eqcorrscan.core.match_filter import match_filter
>>> from eqcorrscan.utils.correlate import time_multi_normxcorr
>>> # define a custom xcorr function
>>> def custom_normxcorr(templates, stream, pads, *args, **kwargs):
... # Just to keep example short call other xcorr function
... # in practice you would define your own function here
... print('calling custom xcorr function')
... return time_multi_normxcorr(templates, stream, pads)
>>> # generate some toy templates and stream
>>> random = np.random.RandomState(42)
>>> template = obspy.read()
>>> stream = obspy.read()
>>> for num, tr in enumerate(stream): # iter st and embed templates
... data = tr.data
... tr.data = random.randn(6000) * 5
... tr.data[100: 100 + len(data)] = data
>>> # call match_filter ane ensure the custom function is used
>>> detections = match_filter(
... template_names=['1'], template_list=[template], st=stream,
... threshold=.5, threshold_type='absolute', trig_int=1,
... plotvar=False,
... xcorr_func=custom_normxcorr) # doctest:+ELLIPSIS
calling custom xcorr function... | ['Main', 'matched', '-', 'filter', 'detection', 'function', '.'] | train | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L4000-L4476 |
5,113 | webrecorder/pywb | pywb/apps/frontendapp.py | FrontEndApp.is_valid_coll | def is_valid_coll(self, coll):
"""Determines if the collection name for a request is valid (exists)
:param str coll: The name of the collection to check
:return: True if the collection is valid, false otherwise
:rtype: bool
"""
#if coll == self.all_coll:
# return True
return (coll in self.warcserver.list_fixed_routes() or
coll in self.warcserver.list_dynamic_routes()) | python | def is_valid_coll(self, coll):
"""Determines if the collection name for a request is valid (exists)
:param str coll: The name of the collection to check
:return: True if the collection is valid, false otherwise
:rtype: bool
"""
#if coll == self.all_coll:
# return True
return (coll in self.warcserver.list_fixed_routes() or
coll in self.warcserver.list_dynamic_routes()) | ['def', 'is_valid_coll', '(', 'self', ',', 'coll', ')', ':', '#if coll == self.all_coll:', '# return True', 'return', '(', 'coll', 'in', 'self', '.', 'warcserver', '.', 'list_fixed_routes', '(', ')', 'or', 'coll', 'in', 'self', '.', 'warcserver', '.', 'list_dynamic_routes', '(', ')', ')'] | Determines if the collection name for a request is valid (exists)
:param str coll: The name of the collection to check
:return: True if the collection is valid, false otherwise
:rtype: bool | ['Determines', 'if', 'the', 'collection', 'name', 'for', 'a', 'request', 'is', 'valid', '(', 'exists', ')'] | train | https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/apps/frontendapp.py#L426-L437 |
5,114 | pantsbuild/pants | src/python/pants/backend/python/subsystems/pex_build_util.py | PexBuilderWrapper.extract_single_dist_for_current_platform | def extract_single_dist_for_current_platform(self, reqs, dist_key):
"""Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`.
"""
distributions = self._resolve_distributions_by_platform(reqs, platforms=['current'])
try:
matched_dist = assert_single_element(list(
dist
for _, dists in distributions.items()
for dist in dists
if dist.key == dist_key
))
except (StopIteration, ValueError) as e:
raise self.SingleDistExtractionError(
"Exactly one dist was expected to match name {} in requirements {}: {}"
.format(dist_key, reqs, e))
return matched_dist | python | def extract_single_dist_for_current_platform(self, reqs, dist_key):
"""Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`.
"""
distributions = self._resolve_distributions_by_platform(reqs, platforms=['current'])
try:
matched_dist = assert_single_element(list(
dist
for _, dists in distributions.items()
for dist in dists
if dist.key == dist_key
))
except (StopIteration, ValueError) as e:
raise self.SingleDistExtractionError(
"Exactly one dist was expected to match name {} in requirements {}: {}"
.format(dist_key, reqs, e))
return matched_dist | ['def', 'extract_single_dist_for_current_platform', '(', 'self', ',', 'reqs', ',', 'dist_key', ')', ':', 'distributions', '=', 'self', '.', '_resolve_distributions_by_platform', '(', 'reqs', ',', 'platforms', '=', '[', "'current'", ']', ')', 'try', ':', 'matched_dist', '=', 'assert_single_element', '(', 'list', '(', 'dist', 'for', '_', ',', 'dists', 'in', 'distributions', '.', 'items', '(', ')', 'for', 'dist', 'in', 'dists', 'if', 'dist', '.', 'key', '==', 'dist_key', ')', ')', 'except', '(', 'StopIteration', ',', 'ValueError', ')', 'as', 'e', ':', 'raise', 'self', '.', 'SingleDistExtractionError', '(', '"Exactly one dist was expected to match name {} in requirements {}: {}"', '.', 'format', '(', 'dist_key', ',', 'reqs', ',', 'e', ')', ')', 'return', 'matched_dist'] | Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`. | ['Resolve', 'a', 'specific', 'distribution', 'from', 'a', 'set', 'of', 'requirements', 'matching', 'the', 'current', 'platform', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/python/subsystems/pex_build_util.py#L189-L211 |
5,115 | graphistry/pygraphistry | graphistry/plotter.py | Plotter.bind | def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res | python | def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res | ['def', 'bind', '(', 'self', ',', 'source', '=', 'None', ',', 'destination', '=', 'None', ',', 'node', '=', 'None', ',', 'edge_title', '=', 'None', ',', 'edge_label', '=', 'None', ',', 'edge_color', '=', 'None', ',', 'edge_weight', '=', 'None', ',', 'point_title', '=', 'None', ',', 'point_label', '=', 'None', ',', 'point_color', '=', 'None', ',', 'point_size', '=', 'None', ')', ':', 'res', '=', 'copy', '.', 'copy', '(', 'self', ')', 'res', '.', '_source', '=', 'source', 'or', 'self', '.', '_source', 'res', '.', '_destination', '=', 'destination', 'or', 'self', '.', '_destination', 'res', '.', '_node', '=', 'node', 'or', 'self', '.', '_node', 'res', '.', '_edge_title', '=', 'edge_title', 'or', 'self', '.', '_edge_title', 'res', '.', '_edge_label', '=', 'edge_label', 'or', 'self', '.', '_edge_label', 'res', '.', '_edge_color', '=', 'edge_color', 'or', 'self', '.', '_edge_color', 'res', '.', '_edge_weight', '=', 'edge_weight', 'or', 'self', '.', '_edge_weight', 'res', '.', '_point_title', '=', 'point_title', 'or', 'self', '.', '_point_title', 'res', '.', '_point_label', '=', 'point_label', 'or', 'self', '.', '_point_label', 'res', '.', '_point_color', '=', 'point_color', 'or', 'self', '.', '_point_color', 'res', '.', '_point_size', '=', 'point_size', 'or', 'self', '.', '_point_size', 'return', 'res'] | Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b | ['Relate', 'data', 'attributes', 'to', 'graph', 'structure', 'and', 'visual', 'representation', '.'] | train | https://github.com/graphistry/pygraphistry/blob/3dfc50e60232c6f5fedd6e5fa9d3048b606944b8/graphistry/plotter.py#L68-L170 |
5,116 | pkgw/pwkit | pwkit/lmmin.py | _qrd_solve_full | def _qrd_solve_full(a, b, ddiag, dtype=np.float):
"""Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
"""
a = np.asarray(a, dtype)
b = np.asarray(b, dtype)
ddiag = np.asarray(ddiag, dtype)
n, m = a.shape
assert m >= n
assert b.shape == (m, )
assert ddiag.shape == (n, )
# The computation is straightforward.
q, r, pmut = _qr_factor_full(a)
bqt = np.dot(b, q.T)
x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt,
dtype=dtype, build_s=True)
return x, s, pmut | python | def _qrd_solve_full(a, b, ddiag, dtype=np.float):
"""Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way.
"""
a = np.asarray(a, dtype)
b = np.asarray(b, dtype)
ddiag = np.asarray(ddiag, dtype)
n, m = a.shape
assert m >= n
assert b.shape == (m, )
assert ddiag.shape == (n, )
# The computation is straightforward.
q, r, pmut = _qr_factor_full(a)
bqt = np.dot(b, q.T)
x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt,
dtype=dtype, build_s=True)
return x, s, pmut | ['def', '_qrd_solve_full', '(', 'a', ',', 'b', ',', 'ddiag', ',', 'dtype', '=', 'np', '.', 'float', ')', ':', 'a', '=', 'np', '.', 'asarray', '(', 'a', ',', 'dtype', ')', 'b', '=', 'np', '.', 'asarray', '(', 'b', ',', 'dtype', ')', 'ddiag', '=', 'np', '.', 'asarray', '(', 'ddiag', ',', 'dtype', ')', 'n', ',', 'm', '=', 'a', '.', 'shape', 'assert', 'm', '>=', 'n', 'assert', 'b', '.', 'shape', '==', '(', 'm', ',', ')', 'assert', 'ddiag', '.', 'shape', '==', '(', 'n', ',', ')', '# The computation is straightforward.', 'q', ',', 'r', ',', 'pmut', '=', '_qr_factor_full', '(', 'a', ')', 'bqt', '=', 'np', '.', 'dot', '(', 'b', ',', 'q', '.', 'T', ')', 'x', ',', 's', '=', '_manual_qrd_solve', '(', 'r', '[', ':', ',', ':', 'n', ']', ',', 'pmut', ',', 'ddiag', ',', 'bqt', ',', 'dtype', '=', 'dtype', ',', 'build_s', '=', 'True', ')', 'return', 'x', ',', 's', ',', 'pmut'] | Solve the equation A^T x = B, D x = 0.
Parameters:
a - an n-by-m array, m >= n
b - an m-vector
ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.)
Returns:
x - n-vector solving the equation.
s - the n-by-n supplementary matrix s.
pmut - n-element permutation vector defining the permutation matrix P.
The equations are solved in a least-squares sense if the system is
rank-deficient. D is a diagonal matrix and hence only its diagonal is
in fact supplied as an argument. The matrix s is full lower triangular
and solves the equation
P^T (A A^T + D D) P = S^T S (needs transposition?)
where P is the permutation matrix defined by the vector pmut; it puts
the rows of 'a' in order of nonincreasing rank, so that a[pmut]
has its rows sorted that way. | ['Solve', 'the', 'equation', 'A^T', 'x', '=', 'B', 'D', 'x', '=', '0', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L774-L815 |
5,117 | pypa/pipenv | pipenv/vendor/jinja2/filters.py | do_unique | def do_unique(environment, value, case_sensitive=False, attribute=None):
"""Returns a list of unique items from the the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
environment, attribute,
postprocess=ignore_case if not case_sensitive else None
)
seen = set()
for item in value:
key = getter(item)
if key not in seen:
seen.add(key)
yield item | python | def do_unique(environment, value, case_sensitive=False, attribute=None):
"""Returns a list of unique items from the the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
environment, attribute,
postprocess=ignore_case if not case_sensitive else None
)
seen = set()
for item in value:
key = getter(item)
if key not in seen:
seen.add(key)
yield item | ['def', 'do_unique', '(', 'environment', ',', 'value', ',', 'case_sensitive', '=', 'False', ',', 'attribute', '=', 'None', ')', ':', 'getter', '=', 'make_attrgetter', '(', 'environment', ',', 'attribute', ',', 'postprocess', '=', 'ignore_case', 'if', 'not', 'case_sensitive', 'else', 'None', ')', 'seen', '=', 'set', '(', ')', 'for', 'item', 'in', 'value', ':', 'key', '=', 'getter', '(', 'item', ')', 'if', 'key', 'not', 'in', 'seen', ':', 'seen', '.', 'add', '(', 'key', ')', 'yield', 'item'] | Returns a list of unique items from the the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute. | ['Returns', 'a', 'list', 'of', 'unique', 'items', 'from', 'the', 'the', 'given', 'iterable', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/filters.py#L282-L307 |
5,118 | CivicSpleen/ambry | ambry/bundle/bundle.py | Bundle._resolve_sources | def _resolve_sources(self, sources, tables, stage=None, predicate=None):
"""
Determine what sources to run from an input of sources and tables
:param sources: A collection of source objects, source names, or source vids
:param tables: A collection of table names
:param stage: If not None, select only sources from this stage
:param predicate: If not none, a callable that selects a source to return when True
:return:
"""
assert sources is None or tables is None
if not sources:
if tables:
sources = list(s for s in self.sources if s.dest_table_name in tables)
else:
sources = self.sources
elif not isinstance(sources, (list, tuple)):
sources = [sources]
def objectify(source):
if isinstance(source, basestring):
source_name = source
return self.source(source_name)
else:
return source
sources = [objectify(s) for s in sources]
if predicate:
sources = [s for s in sources if predicate(s)]
if stage:
sources = [s for s in sources if str(s.stage) == str(stage)]
return sources | python | def _resolve_sources(self, sources, tables, stage=None, predicate=None):
"""
Determine what sources to run from an input of sources and tables
:param sources: A collection of source objects, source names, or source vids
:param tables: A collection of table names
:param stage: If not None, select only sources from this stage
:param predicate: If not none, a callable that selects a source to return when True
:return:
"""
assert sources is None or tables is None
if not sources:
if tables:
sources = list(s for s in self.sources if s.dest_table_name in tables)
else:
sources = self.sources
elif not isinstance(sources, (list, tuple)):
sources = [sources]
def objectify(source):
if isinstance(source, basestring):
source_name = source
return self.source(source_name)
else:
return source
sources = [objectify(s) for s in sources]
if predicate:
sources = [s for s in sources if predicate(s)]
if stage:
sources = [s for s in sources if str(s.stage) == str(stage)]
return sources | ['def', '_resolve_sources', '(', 'self', ',', 'sources', ',', 'tables', ',', 'stage', '=', 'None', ',', 'predicate', '=', 'None', ')', ':', 'assert', 'sources', 'is', 'None', 'or', 'tables', 'is', 'None', 'if', 'not', 'sources', ':', 'if', 'tables', ':', 'sources', '=', 'list', '(', 's', 'for', 's', 'in', 'self', '.', 'sources', 'if', 's', '.', 'dest_table_name', 'in', 'tables', ')', 'else', ':', 'sources', '=', 'self', '.', 'sources', 'elif', 'not', 'isinstance', '(', 'sources', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'sources', '=', '[', 'sources', ']', 'def', 'objectify', '(', 'source', ')', ':', 'if', 'isinstance', '(', 'source', ',', 'basestring', ')', ':', 'source_name', '=', 'source', 'return', 'self', '.', 'source', '(', 'source_name', ')', 'else', ':', 'return', 'source', 'sources', '=', '[', 'objectify', '(', 's', ')', 'for', 's', 'in', 'sources', ']', 'if', 'predicate', ':', 'sources', '=', '[', 's', 'for', 's', 'in', 'sources', 'if', 'predicate', '(', 's', ')', ']', 'if', 'stage', ':', 'sources', '=', '[', 's', 'for', 's', 'in', 'sources', 'if', 'str', '(', 's', '.', 'stage', ')', '==', 'str', '(', 'stage', ')', ']', 'return', 'sources'] | Determine what sources to run from an input of sources and tables
:param sources: A collection of source objects, source names, or source vids
:param tables: A collection of table names
:param stage: If not None, select only sources from this stage
:param predicate: If not none, a callable that selects a source to return when True
:return: | ['Determine', 'what', 'sources', 'to', 'run', 'from', 'an', 'input', 'of', 'sources', 'and', 'tables'] | train | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L507-L544 |
5,119 | fabioz/PyDev.Debugger | _pydev_imps/_pydev_SimpleXMLRPCServer.py | SimpleXMLRPCDispatcher.system_listMethods | def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods | python | def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods | ['def', 'system_listMethods', '(', 'self', ')', ':', 'methods', '=', 'self', '.', 'funcs', '.', 'keys', '(', ')', 'if', 'self', '.', 'instance', 'is', 'not', 'None', ':', '# Instance can implement _listMethod to return a list of', '# methods', 'if', 'hasattr', '(', 'self', '.', 'instance', ',', "'_listMethods'", ')', ':', 'methods', '=', 'remove_duplicates', '(', 'methods', '+', 'self', '.', 'instance', '.', '_listMethods', '(', ')', ')', '# if the instance has a _dispatch method then we', "# don't have enough information to provide a list", '# of methods', 'elif', 'not', 'hasattr', '(', 'self', '.', 'instance', ',', "'_dispatch'", ')', ':', 'methods', '=', 'remove_duplicates', '(', 'methods', '+', 'list_public_methods', '(', 'self', '.', 'instance', ')', ')', 'methods', '.', 'sort', '(', ')', 'return', 'methods'] | system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server. | ['system', '.', 'listMethods', '()', '=', '>', '[', 'add', 'subtract', 'multiple', ']'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_SimpleXMLRPCServer.py#L275-L296 |
5,120 | mitsei/dlkit | dlkit/handcar/relationship/managers.py | RelationshipManager.get_family_admin_session | def get_family_admin_session(self):
"""Gets the ``OsidSession`` associated with the family administrative service.
return: (osid.relationship.FamilyAdminSession) - a
``FamilyAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_admin()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.*
"""
if not self.supports_family_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.FamilyAdminSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | python | def get_family_admin_session(self):
"""Gets the ``OsidSession`` associated with the family administrative service.
return: (osid.relationship.FamilyAdminSession) - a
``FamilyAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_admin()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.*
"""
if not self.supports_family_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.FamilyAdminSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | ['def', 'get_family_admin_session', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'supports_family_admin', '(', ')', ':', 'raise', 'Unimplemented', '(', ')', 'try', ':', 'from', '.', 'import', 'sessions', 'except', 'ImportError', ':', 'raise', 'OperationFailed', '(', ')', 'try', ':', 'session', '=', 'sessions', '.', 'FamilyAdminSession', '(', 'proxy', '=', 'self', '.', '_proxy', ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'except', 'AttributeError', ':', 'raise', 'OperationFailed', '(', ')', 'return', 'session'] | Gets the ``OsidSession`` associated with the family administrative service.
return: (osid.relationship.FamilyAdminSession) - a
``FamilyAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_admin()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.* | ['Gets', 'the', 'OsidSession', 'associated', 'with', 'the', 'family', 'administrative', 'service', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/managers.py#L743-L764 |
5,121 | mikemaccana/python-docx | docx.py | table | def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table | python | def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table | ['def', 'table', '(', 'contents', ',', 'heading', '=', 'True', ',', 'colw', '=', 'None', ',', 'cwunit', '=', "'dxa'", ',', 'tblw', '=', '0', ',', 'twunit', '=', "'auto'", ',', 'borders', '=', '{', '}', ',', 'celstyle', '=', 'None', ')', ':', 'table', '=', 'makeelement', '(', "'tbl'", ')', 'columns', '=', 'len', '(', 'contents', '[', '0', ']', ')', '# Table properties', 'tableprops', '=', 'makeelement', '(', "'tblPr'", ')', 'tablestyle', '=', 'makeelement', '(', "'tblStyle'", ',', 'attributes', '=', '{', "'val'", ':', "''", '}', ')', 'tableprops', '.', 'append', '(', 'tablestyle', ')', 'tablewidth', '=', 'makeelement', '(', "'tblW'", ',', 'attributes', '=', '{', "'w'", ':', 'str', '(', 'tblw', ')', ',', "'type'", ':', 'str', '(', 'twunit', ')', '}', ')', 'tableprops', '.', 'append', '(', 'tablewidth', ')', 'if', 'len', '(', 'borders', '.', 'keys', '(', ')', ')', ':', 'tableborders', '=', 'makeelement', '(', "'tblBorders'", ')', 'for', 'b', 'in', '[', "'top'", ',', "'left'", ',', "'bottom'", ',', "'right'", ',', "'insideH'", ',', "'insideV'", ']', ':', 'if', 'b', 'in', 'borders', '.', 'keys', '(', ')', 'or', "'all'", 'in', 'borders', '.', 'keys', '(', ')', ':', 'k', '=', "'all'", 'if', "'all'", 'in', 'borders', '.', 'keys', '(', ')', 'else', 'b', 'attrs', '=', '{', '}', 'for', 'a', 'in', 'borders', '[', 'k', ']', '.', 'keys', '(', ')', ':', 'attrs', '[', 'a', ']', '=', 'unicode', '(', 'borders', '[', 'k', ']', '[', 'a', ']', ')', 'borderelem', '=', 'makeelement', '(', 'b', ',', 'attributes', '=', 'attrs', ')', 'tableborders', '.', 'append', '(', 'borderelem', ')', 'tableprops', '.', 'append', '(', 'tableborders', ')', 'tablelook', '=', 'makeelement', '(', "'tblLook'", ',', 'attributes', '=', '{', "'val'", ':', "'0400'", '}', ')', 'tableprops', '.', 'append', '(', 'tablelook', ')', 'table', '.', 'append', '(', 'tableprops', ')', '# Table Grid', 'tablegrid', '=', 'makeelement', '(', "'tblGrid'", ')', 'for', 'i', 'in', 'range', '(', 'columns', ')', ':', 'attrs', '=', '{', "'w'", ':', 'str', '(', 'colw', '[', 'i', ']', ')', 'if', 'colw', 'else', "'2390'", '}', 'tablegrid', '.', 'append', '(', 'makeelement', '(', "'gridCol'", ',', 'attributes', '=', 'attrs', ')', ')', 'table', '.', 'append', '(', 'tablegrid', ')', '# Heading Row', 'row', '=', 'makeelement', '(', "'tr'", ')', 'rowprops', '=', 'makeelement', '(', "'trPr'", ')', 'cnfStyle', '=', 'makeelement', '(', "'cnfStyle'", ',', 'attributes', '=', '{', "'val'", ':', "'000000100000'", '}', ')', 'rowprops', '.', 'append', '(', 'cnfStyle', ')', 'row', '.', 'append', '(', 'rowprops', ')', 'if', 'heading', ':', 'i', '=', '0', 'for', 'heading', 'in', 'contents', '[', '0', ']', ':', 'cell', '=', 'makeelement', '(', "'tc'", ')', '# Cell properties', 'cellprops', '=', 'makeelement', '(', "'tcPr'", ')', 'if', 'colw', ':', 'wattr', '=', '{', "'w'", ':', 'str', '(', 'colw', '[', 'i', ']', ')', ',', "'type'", ':', 'cwunit', '}', 'else', ':', 'wattr', '=', '{', "'w'", ':', "'0'", ',', "'type'", ':', "'auto'", '}', 'cellwidth', '=', 'makeelement', '(', "'tcW'", ',', 'attributes', '=', 'wattr', ')', 'cellstyle', '=', 'makeelement', '(', "'shd'", ',', 'attributes', '=', '{', "'val'", ':', "'clear'", ',', "'color'", ':', "'auto'", ',', "'fill'", ':', "'FFFFFF'", ',', "'themeFill'", ':', "'text2'", ',', "'themeFillTint'", ':', "'99'", '}', ')', 'cellprops', '.', 'append', '(', 'cellwidth', ')', 'cellprops', '.', 'append', '(', 'cellstyle', ')', 'cell', '.', 'append', '(', 'cellprops', ')', '# Paragraph (Content)', 'if', 'not', 'isinstance', '(', 'heading', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'heading', '=', '[', 'heading', ']', 'for', 'h', 'in', 'heading', ':', 'if', 'isinstance', '(', 'h', ',', 'etree', '.', '_Element', ')', ':', 'cell', '.', 'append', '(', 'h', ')', 'else', ':', 'cell', '.', 'append', '(', 'paragraph', '(', 'h', ',', 'jc', '=', "'center'", ')', ')', 'row', '.', 'append', '(', 'cell', ')', 'i', '+=', '1', 'table', '.', 'append', '(', 'row', ')', '# Contents Rows', 'for', 'contentrow', 'in', 'contents', '[', '1', 'if', 'heading', 'else', '0', ':', ']', ':', 'row', '=', 'makeelement', '(', "'tr'", ')', 'i', '=', '0', 'for', 'content', 'in', 'contentrow', ':', 'cell', '=', 'makeelement', '(', "'tc'", ')', '# Properties', 'cellprops', '=', 'makeelement', '(', "'tcPr'", ')', 'if', 'colw', ':', 'wattr', '=', '{', "'w'", ':', 'str', '(', 'colw', '[', 'i', ']', ')', ',', "'type'", ':', 'cwunit', '}', 'else', ':', 'wattr', '=', '{', "'w'", ':', "'0'", ',', "'type'", ':', "'auto'", '}', 'cellwidth', '=', 'makeelement', '(', "'tcW'", ',', 'attributes', '=', 'wattr', ')', 'cellprops', '.', 'append', '(', 'cellwidth', ')', 'cell', '.', 'append', '(', 'cellprops', ')', '# Paragraph (Content)', 'if', 'not', 'isinstance', '(', 'content', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'content', '=', '[', 'content', ']', 'for', 'c', 'in', 'content', ':', 'if', 'isinstance', '(', 'c', ',', 'etree', '.', '_Element', ')', ':', 'cell', '.', 'append', '(', 'c', ')', 'else', ':', 'if', 'celstyle', 'and', "'align'", 'in', 'celstyle', '[', 'i', ']', '.', 'keys', '(', ')', ':', 'align', '=', 'celstyle', '[', 'i', ']', '[', "'align'", ']', 'else', ':', 'align', '=', "'left'", 'cell', '.', 'append', '(', 'paragraph', '(', 'c', ',', 'jc', '=', 'align', ')', ')', 'row', '.', 'append', '(', 'cell', ')', 'i', '+=', '1', 'table', '.', 'append', '(', 'row', ')', 'return', 'table'] | Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element | ['Return', 'a', 'table', 'element', 'based', 'on', 'specified', 'parameters'] | train | https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L297-L431 |
5,122 | FujiMakoto/IPS-Vagrant | ips_vagrant/common/__init__.py | cookiejar | def cookiejar(name='session'):
"""
Ready the CookieJar, loading a saved session if available
@rtype: cookielib.LWPCookieJar
"""
log = logging.getLogger('ipsv.common.cookiejar')
spath = os.path.join(config().get('Paths', 'Data'), '{n}.txt'.format(n=name))
cj = cookielib.LWPCookieJar(spath)
log.debug('Attempting to load session file: %s', spath)
if os.path.exists(spath):
try:
cj.load()
log.info('Successfully loaded a saved session / cookie file')
except cookielib.LoadError as e:
log.warn('Session / cookie file exists, but could not be loaded', exc_info=e)
return cj | python | def cookiejar(name='session'):
"""
Ready the CookieJar, loading a saved session if available
@rtype: cookielib.LWPCookieJar
"""
log = logging.getLogger('ipsv.common.cookiejar')
spath = os.path.join(config().get('Paths', 'Data'), '{n}.txt'.format(n=name))
cj = cookielib.LWPCookieJar(spath)
log.debug('Attempting to load session file: %s', spath)
if os.path.exists(spath):
try:
cj.load()
log.info('Successfully loaded a saved session / cookie file')
except cookielib.LoadError as e:
log.warn('Session / cookie file exists, but could not be loaded', exc_info=e)
return cj | ['def', 'cookiejar', '(', 'name', '=', "'session'", ')', ':', 'log', '=', 'logging', '.', 'getLogger', '(', "'ipsv.common.cookiejar'", ')', 'spath', '=', 'os', '.', 'path', '.', 'join', '(', 'config', '(', ')', '.', 'get', '(', "'Paths'", ',', "'Data'", ')', ',', "'{n}.txt'", '.', 'format', '(', 'n', '=', 'name', ')', ')', 'cj', '=', 'cookielib', '.', 'LWPCookieJar', '(', 'spath', ')', 'log', '.', 'debug', '(', "'Attempting to load session file: %s'", ',', 'spath', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'spath', ')', ':', 'try', ':', 'cj', '.', 'load', '(', ')', 'log', '.', 'info', '(', "'Successfully loaded a saved session / cookie file'", ')', 'except', 'cookielib', '.', 'LoadError', 'as', 'e', ':', 'log', '.', 'warn', '(', "'Session / cookie file exists, but could not be loaded'", ',', 'exc_info', '=', 'e', ')', 'return', 'cj'] | Ready the CookieJar, loading a saved session if available
@rtype: cookielib.LWPCookieJar | ['Ready', 'the', 'CookieJar', 'loading', 'a', 'saved', 'session', 'if', 'available'] | train | https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/common/__init__.py#L88-L104 |
5,123 | saltstack/salt | salt/modules/cp.py | recv | def recv(files, dest):
'''
Used with salt-cp, pass the files dict, and the destination.
This function receives small fast copy files from the master via salt-cp.
It does not work via the CLI.
'''
ret = {}
for path, data in six.iteritems(files):
if os.path.basename(path) == os.path.basename(dest) \
and not os.path.isdir(dest):
final = dest
elif os.path.isdir(dest):
final = os.path.join(dest, os.path.basename(path))
elif os.path.isdir(os.path.dirname(dest)):
final = dest
else:
return 'Destination unavailable'
try:
with salt.utils.files.fopen(final, 'w+') as fp_:
fp_.write(data)
ret[final] = True
except IOError:
ret[final] = False
return ret | python | def recv(files, dest):
'''
Used with salt-cp, pass the files dict, and the destination.
This function receives small fast copy files from the master via salt-cp.
It does not work via the CLI.
'''
ret = {}
for path, data in six.iteritems(files):
if os.path.basename(path) == os.path.basename(dest) \
and not os.path.isdir(dest):
final = dest
elif os.path.isdir(dest):
final = os.path.join(dest, os.path.basename(path))
elif os.path.isdir(os.path.dirname(dest)):
final = dest
else:
return 'Destination unavailable'
try:
with salt.utils.files.fopen(final, 'w+') as fp_:
fp_.write(data)
ret[final] = True
except IOError:
ret[final] = False
return ret | ['def', 'recv', '(', 'files', ',', 'dest', ')', ':', 'ret', '=', '{', '}', 'for', 'path', ',', 'data', 'in', 'six', '.', 'iteritems', '(', 'files', ')', ':', 'if', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', '==', 'os', '.', 'path', '.', 'basename', '(', 'dest', ')', 'and', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dest', ')', ':', 'final', '=', 'dest', 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'dest', ')', ':', 'final', '=', 'os', '.', 'path', '.', 'join', '(', 'dest', ',', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ')', 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'os', '.', 'path', '.', 'dirname', '(', 'dest', ')', ')', ':', 'final', '=', 'dest', 'else', ':', 'return', "'Destination unavailable'", 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'final', ',', "'w+'", ')', 'as', 'fp_', ':', 'fp_', '.', 'write', '(', 'data', ')', 'ret', '[', 'final', ']', '=', 'True', 'except', 'IOError', ':', 'ret', '[', 'final', ']', '=', 'False', 'return', 'ret'] | Used with salt-cp, pass the files dict, and the destination.
This function receives small fast copy files from the master via salt-cp.
It does not work via the CLI. | ['Used', 'with', 'salt', '-', 'cp', 'pass', 'the', 'files', 'dict', 'and', 'the', 'destination', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L63-L89 |
5,124 | alvarogzp/telegram-bot-framework | bot/action/util/format.py | UserFormatter.default_format | def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) | python | def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) | ['def', 'default_format', '(', 'self', ')', ':', 'user', '=', 'self', '.', 'user', 'if', 'user', '.', 'first_name', 'is', 'not', 'None', ':', 'return', 'self', '.', 'full_name', 'elif', 'user', '.', 'username', 'is', 'not', 'None', ':', 'return', 'user', '.', 'username', 'else', ':', 'return', 'str', '(', 'user', '.', 'id', ')'] | Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string. | ['Returns', 'full', 'name', '(', 'first', 'and', 'last', ')', 'if', 'name', 'is', 'available', '.', 'If', 'not', 'returns', 'username', 'if', 'available', '.', 'If', 'not', 'available', 'too', 'returns', 'the', 'user', 'id', 'as', 'a', 'string', '.'] | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L31-L43 |
5,125 | tanghaibao/jcvi | jcvi/apps/uclust.py | cons | def cons(f, mindepth):
"""
Makes a list of lists of reads at each site
"""
C = ClustFile(f)
for data in C:
names, seqs, nreps = zip(*data)
total_nreps = sum(nreps)
# Depth filter
if total_nreps < mindepth:
continue
S = []
for name, seq, nrep in data:
# Append sequence * number of dereps
S.append([seq, nrep])
# Make list for each site in sequences
res = stack(S)
yield [x[:4] for x in res if sum(x[:4]) >= mindepth] | python | def cons(f, mindepth):
"""
Makes a list of lists of reads at each site
"""
C = ClustFile(f)
for data in C:
names, seqs, nreps = zip(*data)
total_nreps = sum(nreps)
# Depth filter
if total_nreps < mindepth:
continue
S = []
for name, seq, nrep in data:
# Append sequence * number of dereps
S.append([seq, nrep])
# Make list for each site in sequences
res = stack(S)
yield [x[:4] for x in res if sum(x[:4]) >= mindepth] | ['def', 'cons', '(', 'f', ',', 'mindepth', ')', ':', 'C', '=', 'ClustFile', '(', 'f', ')', 'for', 'data', 'in', 'C', ':', 'names', ',', 'seqs', ',', 'nreps', '=', 'zip', '(', '*', 'data', ')', 'total_nreps', '=', 'sum', '(', 'nreps', ')', '# Depth filter', 'if', 'total_nreps', '<', 'mindepth', ':', 'continue', 'S', '=', '[', ']', 'for', 'name', ',', 'seq', ',', 'nrep', 'in', 'data', ':', '# Append sequence * number of dereps', 'S', '.', 'append', '(', '[', 'seq', ',', 'nrep', ']', ')', '# Make list for each site in sequences', 'res', '=', 'stack', '(', 'S', ')', 'yield', '[', 'x', '[', ':', '4', ']', 'for', 'x', 'in', 'res', 'if', 'sum', '(', 'x', '[', ':', '4', ']', ')', '>=', 'mindepth', ']'] | Makes a list of lists of reads at each site | ['Makes', 'a', 'list', 'of', 'lists', 'of', 'reads', 'at', 'each', 'site'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/uclust.py#L639-L658 |
5,126 | cloud-custodian/cloud-custodian | c7n/utils.py | camelResource | def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
list(map(camelResource, v))
return obj | python | def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
list(map(camelResource, v))
return obj | ['def', 'camelResource', '(', 'obj', ')', ':', 'if', 'not', 'isinstance', '(', 'obj', ',', 'dict', ')', ':', 'return', 'obj', 'for', 'k', 'in', 'list', '(', 'obj', '.', 'keys', '(', ')', ')', ':', 'v', '=', 'obj', '.', 'pop', '(', 'k', ')', 'obj', '[', '"%s%s"', '%', '(', 'k', '[', '0', ']', '.', 'upper', '(', ')', ',', 'k', '[', '1', ':', ']', ')', ']', '=', 'v', 'if', 'isinstance', '(', 'v', ',', 'dict', ')', ':', 'camelResource', '(', 'v', ')', 'elif', 'isinstance', '(', 'v', ',', 'list', ')', ':', 'list', '(', 'map', '(', 'camelResource', ',', 'v', ')', ')', 'return', 'obj'] | Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later | ['Some', 'sources', 'from', 'apis', 'return', 'lowerCased', 'where', 'as', 'describe', 'calls'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/utils.py#L214-L228 |
5,127 | JoseAntFer/pyny3d | pyny3d/geoms.py | Space.explode_map | def explode_map(self, map_):
"""
Much faster version of ``pyny.Space.explode()`` method for
previously locked ``pyny.Space``.
:param map_: the points, and the same order, that appear at
``pyny.Space.get_map()``. There is no need for the index if
locked.
:type map_: ndarray (shape=(N, 3))
:returns: The polygons, the holes and the points.
:rtype: list
.. seealso::
* :func:`get_seed`
* :func:`get_map`
* :func:`map2pyny`
* :func:`map2seed`
"""
if self.explode_map_schedule is None:
index = map_[0]
points = map_[1]
# points
k = index[:, 1] == -1
sop = points[k] # Set of points
index = index[np.logical_not(k)]
points = points[np.logical_not(k)]
# new index
index_bool = np.diff(index[:, 2]*1e12
+index[:, 1]*1e8
+index[:, 2]*1e4).astype(bool)
# Dissemination loop
polygons = []
holes = []
dif = np.arange(index_bool.shape[0], dtype=int)[index_bool]+1
dif = np.append(dif, index_bool.shape[0]+1)
i = 0
for j in dif:
if index[i, 2] < 0: # hole
holes.append(points[i:j, :])
if index[i, 2] >= 0: # polygon
polygons.append(points[i:j, :])
i = j
return [polygons, holes, sop]
else:
# Only points (without index) allowed
if type(map_) == list:
points = map_[1]
else:
points = map_
ex = self.explode_map_schedule
polygons = [ points[p ,:] for p in ex[0] ]
holes = [ points[p ,:] for p in ex[1] ]
sop = points[ex[2] ,:]
return [polygons, holes, sop] | python | def explode_map(self, map_):
"""
Much faster version of ``pyny.Space.explode()`` method for
previously locked ``pyny.Space``.
:param map_: the points, and the same order, that appear at
``pyny.Space.get_map()``. There is no need for the index if
locked.
:type map_: ndarray (shape=(N, 3))
:returns: The polygons, the holes and the points.
:rtype: list
.. seealso::
* :func:`get_seed`
* :func:`get_map`
* :func:`map2pyny`
* :func:`map2seed`
"""
if self.explode_map_schedule is None:
index = map_[0]
points = map_[1]
# points
k = index[:, 1] == -1
sop = points[k] # Set of points
index = index[np.logical_not(k)]
points = points[np.logical_not(k)]
# new index
index_bool = np.diff(index[:, 2]*1e12
+index[:, 1]*1e8
+index[:, 2]*1e4).astype(bool)
# Dissemination loop
polygons = []
holes = []
dif = np.arange(index_bool.shape[0], dtype=int)[index_bool]+1
dif = np.append(dif, index_bool.shape[0]+1)
i = 0
for j in dif:
if index[i, 2] < 0: # hole
holes.append(points[i:j, :])
if index[i, 2] >= 0: # polygon
polygons.append(points[i:j, :])
i = j
return [polygons, holes, sop]
else:
# Only points (without index) allowed
if type(map_) == list:
points = map_[1]
else:
points = map_
ex = self.explode_map_schedule
polygons = [ points[p ,:] for p in ex[0] ]
holes = [ points[p ,:] for p in ex[1] ]
sop = points[ex[2] ,:]
return [polygons, holes, sop] | ['def', 'explode_map', '(', 'self', ',', 'map_', ')', ':', 'if', 'self', '.', 'explode_map_schedule', 'is', 'None', ':', 'index', '=', 'map_', '[', '0', ']', 'points', '=', 'map_', '[', '1', ']', '# points\r', 'k', '=', 'index', '[', ':', ',', '1', ']', '==', '-', '1', 'sop', '=', 'points', '[', 'k', ']', '# Set of points\r', 'index', '=', 'index', '[', 'np', '.', 'logical_not', '(', 'k', ')', ']', 'points', '=', 'points', '[', 'np', '.', 'logical_not', '(', 'k', ')', ']', '# new index\r', 'index_bool', '=', 'np', '.', 'diff', '(', 'index', '[', ':', ',', '2', ']', '*', '1e12', '+', 'index', '[', ':', ',', '1', ']', '*', '1e8', '+', 'index', '[', ':', ',', '2', ']', '*', '1e4', ')', '.', 'astype', '(', 'bool', ')', '# Dissemination loop\r', 'polygons', '=', '[', ']', 'holes', '=', '[', ']', 'dif', '=', 'np', '.', 'arange', '(', 'index_bool', '.', 'shape', '[', '0', ']', ',', 'dtype', '=', 'int', ')', '[', 'index_bool', ']', '+', '1', 'dif', '=', 'np', '.', 'append', '(', 'dif', ',', 'index_bool', '.', 'shape', '[', '0', ']', '+', '1', ')', 'i', '=', '0', 'for', 'j', 'in', 'dif', ':', 'if', 'index', '[', 'i', ',', '2', ']', '<', '0', ':', '# hole\r', 'holes', '.', 'append', '(', 'points', '[', 'i', ':', 'j', ',', ':', ']', ')', 'if', 'index', '[', 'i', ',', '2', ']', '>=', '0', ':', '# polygon\r', 'polygons', '.', 'append', '(', 'points', '[', 'i', ':', 'j', ',', ':', ']', ')', 'i', '=', 'j', 'return', '[', 'polygons', ',', 'holes', ',', 'sop', ']', 'else', ':', '# Only points (without index) allowed\r', 'if', 'type', '(', 'map_', ')', '==', 'list', ':', 'points', '=', 'map_', '[', '1', ']', 'else', ':', 'points', '=', 'map_', 'ex', '=', 'self', '.', 'explode_map_schedule', 'polygons', '=', '[', 'points', '[', 'p', ',', ':', ']', 'for', 'p', 'in', 'ex', '[', '0', ']', ']', 'holes', '=', '[', 'points', '[', 'p', ',', ':', ']', 'for', 'p', 'in', 'ex', '[', '1', ']', ']', 'sop', '=', 'points', '[', 'ex', '[', '2', ']', ',', ':', ']', 'return', '[', 'polygons', ',', 'holes', ',', 'sop', ']'] | Much faster version of ``pyny.Space.explode()`` method for
previously locked ``pyny.Space``.
:param map_: the points, and the same order, that appear at
``pyny.Space.get_map()``. There is no need for the index if
locked.
:type map_: ndarray (shape=(N, 3))
:returns: The polygons, the holes and the points.
:rtype: list
.. seealso::
* :func:`get_seed`
* :func:`get_map`
* :func:`map2pyny`
* :func:`map2seed` | ['Much', 'faster', 'version', 'of', 'pyny', '.', 'Space', '.', 'explode', '()', 'method', 'for', 'previously', 'locked', 'pyny', '.', 'Space', '.', ':', 'param', 'map_', ':', 'the', 'points', 'and', 'the', 'same', 'order', 'that', 'appear', 'at', 'pyny', '.', 'Space', '.', 'get_map', '()', '.', 'There', 'is', 'no', 'need', 'for', 'the', 'index', 'if', 'locked', '.', ':', 'type', 'map_', ':', 'ndarray', '(', 'shape', '=', '(', 'N', '3', '))', ':', 'returns', ':', 'The', 'polygons', 'the', 'holes', 'and', 'the', 'points', '.', ':', 'rtype', ':', 'list', '..', 'seealso', '::', '*', ':', 'func', ':', 'get_seed', '*', ':', 'func', ':', 'get_map', '*', ':', 'func', ':', 'map2pyny', '*', ':', 'func', ':', 'map2seed'] | train | https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L2076-L2134 |
5,128 | awslabs/serverless-application-model | samtranslator/model/eventsources/push.py | S3.to_cloudformation | def to_cloudformation(self, **kwargs):
"""Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers.
:param dict kwargs: S3 bucket resource
:returns: a list of vanilla CloudFormation Resources, to which this S3 event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
if 'bucket' not in kwargs or kwargs['bucket'] is None:
raise TypeError("Missing required keyword argument: bucket")
if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None:
raise TypeError("Missing required keyword argument: bucket_id")
bucket = kwargs['bucket']
bucket_id = kwargs['bucket_id']
resources = []
source_account = ref('AWS::AccountId')
permission = self._construct_permission(function, source_account=source_account)
if CONDITION in permission.resource_attributes:
self._depend_on_lambda_permissions_using_tag(bucket, permission)
else:
self._depend_on_lambda_permissions(bucket, permission)
resources.append(permission)
# NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are
# multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification
# configuration for each event. This is the reason why we continue to use existing bucket dict and append onto
# it.
#
# NOTE: There is some fragile logic here where we will append multiple resources to output
# SAM template but de-dupe them when merging into output CFN template. This is scary because the order of
# merging is literally "last one wins", which works fine because we linearly loop through the template once.
# The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of
# to_cloudformation() to output template.
self._inject_notification_configuration(function, bucket)
resources.append(S3Bucket.from_dict(bucket_id, bucket))
return resources | python | def to_cloudformation(self, **kwargs):
"""Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers.
:param dict kwargs: S3 bucket resource
:returns: a list of vanilla CloudFormation Resources, to which this S3 event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
if 'bucket' not in kwargs or kwargs['bucket'] is None:
raise TypeError("Missing required keyword argument: bucket")
if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None:
raise TypeError("Missing required keyword argument: bucket_id")
bucket = kwargs['bucket']
bucket_id = kwargs['bucket_id']
resources = []
source_account = ref('AWS::AccountId')
permission = self._construct_permission(function, source_account=source_account)
if CONDITION in permission.resource_attributes:
self._depend_on_lambda_permissions_using_tag(bucket, permission)
else:
self._depend_on_lambda_permissions(bucket, permission)
resources.append(permission)
# NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are
# multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification
# configuration for each event. This is the reason why we continue to use existing bucket dict and append onto
# it.
#
# NOTE: There is some fragile logic here where we will append multiple resources to output
# SAM template but de-dupe them when merging into output CFN template. This is scary because the order of
# merging is literally "last one wins", which works fine because we linearly loop through the template once.
# The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of
# to_cloudformation() to output template.
self._inject_notification_configuration(function, bucket)
resources.append(S3Bucket.from_dict(bucket_id, bucket))
return resources | ['def', 'to_cloudformation', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'function', '=', 'kwargs', '.', 'get', '(', "'function'", ')', 'if', 'not', 'function', ':', 'raise', 'TypeError', '(', '"Missing required keyword argument: function"', ')', 'if', "'bucket'", 'not', 'in', 'kwargs', 'or', 'kwargs', '[', "'bucket'", ']', 'is', 'None', ':', 'raise', 'TypeError', '(', '"Missing required keyword argument: bucket"', ')', 'if', "'bucket_id'", 'not', 'in', 'kwargs', 'or', 'kwargs', '[', "'bucket_id'", ']', 'is', 'None', ':', 'raise', 'TypeError', '(', '"Missing required keyword argument: bucket_id"', ')', 'bucket', '=', 'kwargs', '[', "'bucket'", ']', 'bucket_id', '=', 'kwargs', '[', "'bucket_id'", ']', 'resources', '=', '[', ']', 'source_account', '=', 'ref', '(', "'AWS::AccountId'", ')', 'permission', '=', 'self', '.', '_construct_permission', '(', 'function', ',', 'source_account', '=', 'source_account', ')', 'if', 'CONDITION', 'in', 'permission', '.', 'resource_attributes', ':', 'self', '.', '_depend_on_lambda_permissions_using_tag', '(', 'bucket', ',', 'permission', ')', 'else', ':', 'self', '.', '_depend_on_lambda_permissions', '(', 'bucket', ',', 'permission', ')', 'resources', '.', 'append', '(', 'permission', ')', '# NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are', '# multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification', '# configuration for each event. This is the reason why we continue to use existing bucket dict and append onto', '# it.', '#', '# NOTE: There is some fragile logic here where we will append multiple resources to output', '# SAM template but de-dupe them when merging into output CFN template. This is scary because the order of', '# merging is literally "last one wins", which works fine because we linearly loop through the template once.', '# The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of', '# to_cloudformation() to output template.', 'self', '.', '_inject_notification_configuration', '(', 'function', ',', 'bucket', ')', 'resources', '.', 'append', '(', 'S3Bucket', '.', 'from_dict', '(', 'bucket_id', ',', 'bucket', ')', ')', 'return', 'resources'] | Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers.
:param dict kwargs: S3 bucket resource
:returns: a list of vanilla CloudFormation Resources, to which this S3 event expands
:rtype: list | ['Returns', 'the', 'Lambda', 'Permission', 'resource', 'allowing', 'S3', 'to', 'invoke', 'the', 'function', 'this', 'event', 'source', 'triggers', '.'] | train | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/eventsources/push.py#L197-L241 |
5,129 | lowandrew/OLCTools | databasesetup/database_setup.py | DatabaseSetup.clark | def clark(self, databasepath):
"""
Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database
type, and species for taxonomic level
:param databasepath: path to use to save the database
"""
if self.clarkpath:
logging.info('Downloading CLARK database')
# Create the folder in which the database is to be stored
databasepath = self.create_database_folder(databasepath, 'clark')
# Set the call to create the database - use the --light option, as we don't require the full database
targetcall = 'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'\
.format(clarkpath=self.clarkpath,
dbpath=databasepath)
# Download the database
self.database_clone(targetcall, databasepath)
else:
logging.warning('No CLARK scripts detected in $PATH. Cannot download database.') | python | def clark(self, databasepath):
"""
Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database
type, and species for taxonomic level
:param databasepath: path to use to save the database
"""
if self.clarkpath:
logging.info('Downloading CLARK database')
# Create the folder in which the database is to be stored
databasepath = self.create_database_folder(databasepath, 'clark')
# Set the call to create the database - use the --light option, as we don't require the full database
targetcall = 'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'\
.format(clarkpath=self.clarkpath,
dbpath=databasepath)
# Download the database
self.database_clone(targetcall, databasepath)
else:
logging.warning('No CLARK scripts detected in $PATH. Cannot download database.') | ['def', 'clark', '(', 'self', ',', 'databasepath', ')', ':', 'if', 'self', '.', 'clarkpath', ':', 'logging', '.', 'info', '(', "'Downloading CLARK database'", ')', '# Create the folder in which the database is to be stored', 'databasepath', '=', 'self', '.', 'create_database_folder', '(', 'databasepath', ',', "'clark'", ')', "# Set the call to create the database - use the --light option, as we don't require the full database", 'targetcall', '=', "'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'", '.', 'format', '(', 'clarkpath', '=', 'self', '.', 'clarkpath', ',', 'dbpath', '=', 'databasepath', ')', '# Download the database', 'self', '.', 'database_clone', '(', 'targetcall', ',', 'databasepath', ')', 'else', ':', 'logging', '.', 'warning', '(', "'No CLARK scripts detected in $PATH. Cannot download database.'", ')'] | Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database
type, and species for taxonomic level
:param databasepath: path to use to save the database | ['Download', 'and', 'set', '-', 'up', 'the', 'CLARK', 'database', 'using', 'the', 'set_targets', '.', 'sh', 'script', '.', 'Use', 'defaults', 'of', 'bacteria', 'for', 'database', 'type', 'and', 'species', 'for', 'taxonomic', 'level', ':', 'param', 'databasepath', ':', 'path', 'to', 'use', 'to', 'save', 'the', 'database'] | train | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L197-L214 |
5,130 | AltSchool/dynamic-rest | dynamic_rest/fields/fields.py | DynamicRelationField.get_serializer | def get_serializer(self, *args, **kwargs):
"""Get an instance of the child serializer."""
init_args = {
k: v for k, v in six.iteritems(self.kwargs)
if k in self.SERIALIZER_KWARGS
}
kwargs = self._inherit_parent_kwargs(kwargs)
init_args.update(kwargs)
if self.embed and self._is_dynamic:
init_args['embed'] = True
return self._get_cached_serializer(args, init_args) | python | def get_serializer(self, *args, **kwargs):
"""Get an instance of the child serializer."""
init_args = {
k: v for k, v in six.iteritems(self.kwargs)
if k in self.SERIALIZER_KWARGS
}
kwargs = self._inherit_parent_kwargs(kwargs)
init_args.update(kwargs)
if self.embed and self._is_dynamic:
init_args['embed'] = True
return self._get_cached_serializer(args, init_args) | ['def', 'get_serializer', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'init_args', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'six', '.', 'iteritems', '(', 'self', '.', 'kwargs', ')', 'if', 'k', 'in', 'self', '.', 'SERIALIZER_KWARGS', '}', 'kwargs', '=', 'self', '.', '_inherit_parent_kwargs', '(', 'kwargs', ')', 'init_args', '.', 'update', '(', 'kwargs', ')', 'if', 'self', '.', 'embed', 'and', 'self', '.', '_is_dynamic', ':', 'init_args', '[', "'embed'", ']', '=', 'True', 'return', 'self', '.', '_get_cached_serializer', '(', 'args', ',', 'init_args', ')'] | Get an instance of the child serializer. | ['Get', 'an', 'instance', 'of', 'the', 'child', 'serializer', '.'] | train | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/fields.py#L241-L254 |
5,131 | aguinane/nem-reader | print_examples.py | print_meter_record | def print_meter_record(file_path, rows=5):
""" Output readings for specified number of rows to console """
m = nr.read_nem_file(file_path)
print('Header:', m.header)
print('Transactions:', m.transactions)
for nmi in m.readings:
for channel in m.readings[nmi]:
print(nmi, 'Channel', channel)
for reading in m.readings[nmi][channel][-rows:]:
print('', reading) | python | def print_meter_record(file_path, rows=5):
""" Output readings for specified number of rows to console """
m = nr.read_nem_file(file_path)
print('Header:', m.header)
print('Transactions:', m.transactions)
for nmi in m.readings:
for channel in m.readings[nmi]:
print(nmi, 'Channel', channel)
for reading in m.readings[nmi][channel][-rows:]:
print('', reading) | ['def', 'print_meter_record', '(', 'file_path', ',', 'rows', '=', '5', ')', ':', 'm', '=', 'nr', '.', 'read_nem_file', '(', 'file_path', ')', 'print', '(', "'Header:'", ',', 'm', '.', 'header', ')', 'print', '(', "'Transactions:'", ',', 'm', '.', 'transactions', ')', 'for', 'nmi', 'in', 'm', '.', 'readings', ':', 'for', 'channel', 'in', 'm', '.', 'readings', '[', 'nmi', ']', ':', 'print', '(', 'nmi', ',', "'Channel'", ',', 'channel', ')', 'for', 'reading', 'in', 'm', '.', 'readings', '[', 'nmi', ']', '[', 'channel', ']', '[', '-', 'rows', ':', ']', ':', 'print', '(', "''", ',', 'reading', ')'] | Output readings for specified number of rows to console | ['Output', 'readings', 'for', 'specified', 'number', 'of', 'rows', 'to', 'console'] | train | https://github.com/aguinane/nem-reader/blob/5405a5cba4bb8ebdad05c28455d12bb34a6d3ce5/print_examples.py#L4-L13 |
5,132 | taskcluster/taskcluster-client.py | taskcluster/queueevents.py | QueueEvents.taskRunning | def taskRunning(self, *args, **kwargs):
"""
Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-running',
'name': 'taskRunning',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-running-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def taskRunning(self, *args, **kwargs):
"""
Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-running',
'name': 'taskRunning',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-running-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | ['def', 'taskRunning', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ref', '=', '{', "'exchange'", ':', "'task-running'", ',', "'name'", ':', "'taskRunning'", ',', "'routingKey'", ':', '[', '{', "'constant'", ':', "'primary'", ',', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'routingKeyKind'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'taskId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'runId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerGroup'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'provisionerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerType'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'schedulerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'taskGroupId'", ',', '}', ',', '{', "'multipleWords'", ':', 'True', ',', "'name'", ':', "'reserved'", ',', '}', ',', ']', ',', "'schema'", ':', "'v1/task-running-message.json#'", ',', '}', 'return', 'self', '.', '_makeTopicExchange', '(', 'ref', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. | ['Task', 'Running', 'Messages'] | train | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queueevents.py#L242-L320 |
5,133 | scanny/python-pptx | pptx/api.py | Presentation | def Presentation(pptx=None):
"""
Return a |Presentation| object loaded from *pptx*, where *pptx* can be
either a path to a ``.pptx`` file (a string) or a file-like object. If
*pptx* is missing or ``None``, the built-in default presentation
"template" is loaded.
"""
if pptx is None:
pptx = _default_pptx_path()
presentation_part = Package.open(pptx).main_document_part
if not _is_pptx_package(presentation_part):
tmpl = "file '%s' is not a PowerPoint file, content type is '%s'"
raise ValueError(tmpl % (pptx, presentation_part.content_type))
return presentation_part.presentation | python | def Presentation(pptx=None):
"""
Return a |Presentation| object loaded from *pptx*, where *pptx* can be
either a path to a ``.pptx`` file (a string) or a file-like object. If
*pptx* is missing or ``None``, the built-in default presentation
"template" is loaded.
"""
if pptx is None:
pptx = _default_pptx_path()
presentation_part = Package.open(pptx).main_document_part
if not _is_pptx_package(presentation_part):
tmpl = "file '%s' is not a PowerPoint file, content type is '%s'"
raise ValueError(tmpl % (pptx, presentation_part.content_type))
return presentation_part.presentation | ['def', 'Presentation', '(', 'pptx', '=', 'None', ')', ':', 'if', 'pptx', 'is', 'None', ':', 'pptx', '=', '_default_pptx_path', '(', ')', 'presentation_part', '=', 'Package', '.', 'open', '(', 'pptx', ')', '.', 'main_document_part', 'if', 'not', '_is_pptx_package', '(', 'presentation_part', ')', ':', 'tmpl', '=', '"file \'%s\' is not a PowerPoint file, content type is \'%s\'"', 'raise', 'ValueError', '(', 'tmpl', '%', '(', 'pptx', ',', 'presentation_part', '.', 'content_type', ')', ')', 'return', 'presentation_part', '.', 'presentation'] | Return a |Presentation| object loaded from *pptx*, where *pptx* can be
either a path to a ``.pptx`` file (a string) or a file-like object. If
*pptx* is missing or ``None``, the built-in default presentation
"template" is loaded. | ['Return', 'a', '|Presentation|', 'object', 'loaded', 'from', '*', 'pptx', '*', 'where', '*', 'pptx', '*', 'can', 'be', 'either', 'a', 'path', 'to', 'a', '.', 'pptx', 'file', '(', 'a', 'string', ')', 'or', 'a', 'file', '-', 'like', 'object', '.', 'If', '*', 'pptx', '*', 'is', 'missing', 'or', 'None', 'the', 'built', '-', 'in', 'default', 'presentation', 'template', 'is', 'loaded', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/api.py#L20-L36 |
5,134 | wangsix/vmo | vmo/generate.py | generate | def generate(oracle, seq_len, p=0.5, k=1, LRS=0, weight=None):
""" Generate a sequence based on traversing an oracle.
:param oracle: a indexed vmo object
:param seq_len: the length of the returned improvisation sequence
:param p: a float between (0,1) representing the probability using the forward links.
:param k: the starting improvisation time step in oracle
:param LRS: the length of minimum longest repeated suffixes allowed to jump
:param weight:
None: choose uniformly among all the possible sfx/rsfx given
current state.
"max": always choose the sfx/rsfx having the longest LRS.
"weight": choose sfx/rsfx in a way that favors longer ones than
shorter ones.
:return:
s: a list containing the sequence generated, each element represents a
state.
kend: the ending state.
ktrace:
"""
trn = oracle.trn[:]
sfx = oracle.sfx[:]
lrs = oracle.lrs[:]
rsfx = oracle.rsfx[:]
s = []
ktrace = [1]
for _i in range(seq_len):
# generate each state
if sfx[k] != 0 and sfx[k] is not None:
if (random.random() < p):
# copy forward according to transitions
I = trn[k]
if len(I) == 0:
# if last state, choose a suffix
k = sfx[k]
ktrace.append(k)
I = trn[k]
sym = I[int(np.floor(random.random() * len(I)))]
s.append(sym) # Why (sym-1) before?
k = sym
ktrace.append(k)
else:
# copy any of the next symbols
ktrace.append(k)
_k = k
k_vec = []
k_vec = _find_links(k_vec, sfx, rsfx, _k)
k_vec = [_i for _i in k_vec if lrs[_i] >= LRS]
lrs_vec = [lrs[_i] for _i in k_vec]
if len(k_vec) > 0: # if a possibility found, len(I)
if weight == 'weight':
max_lrs = np.amax(lrs_vec)
query_lrs = max_lrs - np.floor(random.expovariate(1))
if query_lrs in lrs_vec:
_tmp = np.where(lrs_vec == query_lrs)[0]
_tmp = _tmp[int(
np.floor(random.random() * len(_tmp)))]
sym = k_vec[_tmp]
else:
_tmp = np.argmin(abs(
np.subtract(lrs_vec, query_lrs)))
sym = k_vec[_tmp]
elif weight == 'max':
sym = k_vec[np.argmax([lrs[_i] for _i in k_vec])]
else:
sym = k_vec[int(np.floor(random.random() * len(k_vec)))]
if sym == len(sfx) - 1:
sym = sfx[sym] + 1
else:
s.append(sym + 1)
k = sym + 1
ktrace.append(k)
else: # otherwise continue
if k < len(sfx) - 1:
sym = k + 1
else:
sym = sfx[k] + 1
s.append(sym)
k = sym
ktrace.append(k)
else:
if k < len(sfx) - 1:
s.append(k + 1)
k += 1
ktrace.append(k)
else:
sym = sfx[k] + 1
s.append(sym)
k = sym
ktrace.append(k)
if k >= len(sfx) - 1:
k = 0
kend = k
return s, kend, ktrace | python | def generate(oracle, seq_len, p=0.5, k=1, LRS=0, weight=None):
""" Generate a sequence based on traversing an oracle.
:param oracle: a indexed vmo object
:param seq_len: the length of the returned improvisation sequence
:param p: a float between (0,1) representing the probability using the forward links.
:param k: the starting improvisation time step in oracle
:param LRS: the length of minimum longest repeated suffixes allowed to jump
:param weight:
None: choose uniformly among all the possible sfx/rsfx given
current state.
"max": always choose the sfx/rsfx having the longest LRS.
"weight": choose sfx/rsfx in a way that favors longer ones than
shorter ones.
:return:
s: a list containing the sequence generated, each element represents a
state.
kend: the ending state.
ktrace:
"""
trn = oracle.trn[:]
sfx = oracle.sfx[:]
lrs = oracle.lrs[:]
rsfx = oracle.rsfx[:]
s = []
ktrace = [1]
for _i in range(seq_len):
# generate each state
if sfx[k] != 0 and sfx[k] is not None:
if (random.random() < p):
# copy forward according to transitions
I = trn[k]
if len(I) == 0:
# if last state, choose a suffix
k = sfx[k]
ktrace.append(k)
I = trn[k]
sym = I[int(np.floor(random.random() * len(I)))]
s.append(sym) # Why (sym-1) before?
k = sym
ktrace.append(k)
else:
# copy any of the next symbols
ktrace.append(k)
_k = k
k_vec = []
k_vec = _find_links(k_vec, sfx, rsfx, _k)
k_vec = [_i for _i in k_vec if lrs[_i] >= LRS]
lrs_vec = [lrs[_i] for _i in k_vec]
if len(k_vec) > 0: # if a possibility found, len(I)
if weight == 'weight':
max_lrs = np.amax(lrs_vec)
query_lrs = max_lrs - np.floor(random.expovariate(1))
if query_lrs in lrs_vec:
_tmp = np.where(lrs_vec == query_lrs)[0]
_tmp = _tmp[int(
np.floor(random.random() * len(_tmp)))]
sym = k_vec[_tmp]
else:
_tmp = np.argmin(abs(
np.subtract(lrs_vec, query_lrs)))
sym = k_vec[_tmp]
elif weight == 'max':
sym = k_vec[np.argmax([lrs[_i] for _i in k_vec])]
else:
sym = k_vec[int(np.floor(random.random() * len(k_vec)))]
if sym == len(sfx) - 1:
sym = sfx[sym] + 1
else:
s.append(sym + 1)
k = sym + 1
ktrace.append(k)
else: # otherwise continue
if k < len(sfx) - 1:
sym = k + 1
else:
sym = sfx[k] + 1
s.append(sym)
k = sym
ktrace.append(k)
else:
if k < len(sfx) - 1:
s.append(k + 1)
k += 1
ktrace.append(k)
else:
sym = sfx[k] + 1
s.append(sym)
k = sym
ktrace.append(k)
if k >= len(sfx) - 1:
k = 0
kend = k
return s, kend, ktrace | ['def', 'generate', '(', 'oracle', ',', 'seq_len', ',', 'p', '=', '0.5', ',', 'k', '=', '1', ',', 'LRS', '=', '0', ',', 'weight', '=', 'None', ')', ':', 'trn', '=', 'oracle', '.', 'trn', '[', ':', ']', 'sfx', '=', 'oracle', '.', 'sfx', '[', ':', ']', 'lrs', '=', 'oracle', '.', 'lrs', '[', ':', ']', 'rsfx', '=', 'oracle', '.', 'rsfx', '[', ':', ']', 's', '=', '[', ']', 'ktrace', '=', '[', '1', ']', 'for', '_i', 'in', 'range', '(', 'seq_len', ')', ':', '# generate each state', 'if', 'sfx', '[', 'k', ']', '!=', '0', 'and', 'sfx', '[', 'k', ']', 'is', 'not', 'None', ':', 'if', '(', 'random', '.', 'random', '(', ')', '<', 'p', ')', ':', '# copy forward according to transitions', 'I', '=', 'trn', '[', 'k', ']', 'if', 'len', '(', 'I', ')', '==', '0', ':', '# if last state, choose a suffix', 'k', '=', 'sfx', '[', 'k', ']', 'ktrace', '.', 'append', '(', 'k', ')', 'I', '=', 'trn', '[', 'k', ']', 'sym', '=', 'I', '[', 'int', '(', 'np', '.', 'floor', '(', 'random', '.', 'random', '(', ')', '*', 'len', '(', 'I', ')', ')', ')', ']', 's', '.', 'append', '(', 'sym', ')', '# Why (sym-1) before?', 'k', '=', 'sym', 'ktrace', '.', 'append', '(', 'k', ')', 'else', ':', '# copy any of the next symbols', 'ktrace', '.', 'append', '(', 'k', ')', '_k', '=', 'k', 'k_vec', '=', '[', ']', 'k_vec', '=', '_find_links', '(', 'k_vec', ',', 'sfx', ',', 'rsfx', ',', '_k', ')', 'k_vec', '=', '[', '_i', 'for', '_i', 'in', 'k_vec', 'if', 'lrs', '[', '_i', ']', '>=', 'LRS', ']', 'lrs_vec', '=', '[', 'lrs', '[', '_i', ']', 'for', '_i', 'in', 'k_vec', ']', 'if', 'len', '(', 'k_vec', ')', '>', '0', ':', '# if a possibility found, len(I)', 'if', 'weight', '==', "'weight'", ':', 'max_lrs', '=', 'np', '.', 'amax', '(', 'lrs_vec', ')', 'query_lrs', '=', 'max_lrs', '-', 'np', '.', 'floor', '(', 'random', '.', 'expovariate', '(', '1', ')', ')', 'if', 'query_lrs', 'in', 'lrs_vec', ':', '_tmp', '=', 'np', '.', 'where', '(', 'lrs_vec', '==', 'query_lrs', ')', '[', '0', ']', '_tmp', '=', '_tmp', '[', 'int', '(', 'np', '.', 'floor', '(', 'random', '.', 'random', '(', ')', '*', 'len', '(', '_tmp', ')', ')', ')', ']', 'sym', '=', 'k_vec', '[', '_tmp', ']', 'else', ':', '_tmp', '=', 'np', '.', 'argmin', '(', 'abs', '(', 'np', '.', 'subtract', '(', 'lrs_vec', ',', 'query_lrs', ')', ')', ')', 'sym', '=', 'k_vec', '[', '_tmp', ']', 'elif', 'weight', '==', "'max'", ':', 'sym', '=', 'k_vec', '[', 'np', '.', 'argmax', '(', '[', 'lrs', '[', '_i', ']', 'for', '_i', 'in', 'k_vec', ']', ')', ']', 'else', ':', 'sym', '=', 'k_vec', '[', 'int', '(', 'np', '.', 'floor', '(', 'random', '.', 'random', '(', ')', '*', 'len', '(', 'k_vec', ')', ')', ')', ']', 'if', 'sym', '==', 'len', '(', 'sfx', ')', '-', '1', ':', 'sym', '=', 'sfx', '[', 'sym', ']', '+', '1', 'else', ':', 's', '.', 'append', '(', 'sym', '+', '1', ')', 'k', '=', 'sym', '+', '1', 'ktrace', '.', 'append', '(', 'k', ')', 'else', ':', '# otherwise continue', 'if', 'k', '<', 'len', '(', 'sfx', ')', '-', '1', ':', 'sym', '=', 'k', '+', '1', 'else', ':', 'sym', '=', 'sfx', '[', 'k', ']', '+', '1', 's', '.', 'append', '(', 'sym', ')', 'k', '=', 'sym', 'ktrace', '.', 'append', '(', 'k', ')', 'else', ':', 'if', 'k', '<', 'len', '(', 'sfx', ')', '-', '1', ':', 's', '.', 'append', '(', 'k', '+', '1', ')', 'k', '+=', '1', 'ktrace', '.', 'append', '(', 'k', ')', 'else', ':', 'sym', '=', 'sfx', '[', 'k', ']', '+', '1', 's', '.', 'append', '(', 'sym', ')', 'k', '=', 'sym', 'ktrace', '.', 'append', '(', 'k', ')', 'if', 'k', '>=', 'len', '(', 'sfx', ')', '-', '1', ':', 'k', '=', '0', 'kend', '=', 'k', 'return', 's', ',', 'kend', ',', 'ktrace'] | Generate a sequence based on traversing an oracle.
:param oracle: a indexed vmo object
:param seq_len: the length of the returned improvisation sequence
:param p: a float between (0,1) representing the probability using the forward links.
:param k: the starting improvisation time step in oracle
:param LRS: the length of minimum longest repeated suffixes allowed to jump
:param weight:
None: choose uniformly among all the possible sfx/rsfx given
current state.
"max": always choose the sfx/rsfx having the longest LRS.
"weight": choose sfx/rsfx in a way that favors longer ones than
shorter ones.
:return:
s: a list containing the sequence generated, each element represents a
state.
kend: the ending state.
ktrace: | ['Generate', 'a', 'sequence', 'based', 'on', 'traversing', 'an', 'oracle', '.'] | train | https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/generate.py#L106-L204 |
5,135 | resonai/ybt | yabt/extend.py | Plugin.remove_builder | def remove_builder(cls, builder_name: str):
"""Remove a registered builder `builder_name`.
No reason to use this except for tests.
"""
cls.builders.pop(builder_name, None)
for hook_spec in cls.hooks.values():
hook_spec.pop(builder_name, None) | python | def remove_builder(cls, builder_name: str):
"""Remove a registered builder `builder_name`.
No reason to use this except for tests.
"""
cls.builders.pop(builder_name, None)
for hook_spec in cls.hooks.values():
hook_spec.pop(builder_name, None) | ['def', 'remove_builder', '(', 'cls', ',', 'builder_name', ':', 'str', ')', ':', 'cls', '.', 'builders', '.', 'pop', '(', 'builder_name', ',', 'None', ')', 'for', 'hook_spec', 'in', 'cls', '.', 'hooks', '.', 'values', '(', ')', ':', 'hook_spec', '.', 'pop', '(', 'builder_name', ',', 'None', ')'] | Remove a registered builder `builder_name`.
No reason to use this except for tests. | ['Remove', 'a', 'registered', 'builder', 'builder_name', '.'] | train | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/extend.py#L198-L205 |
5,136 | quadrismegistus/prosodic | prosodic/Text.py | Text.validlines | def validlines(self):
"""Return all lines within which Prosodic understood all words."""
return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)] | python | def validlines(self):
"""Return all lines within which Prosodic understood all words."""
return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)] | ['def', 'validlines', '(', 'self', ')', ':', 'return', '[', 'ln', 'for', 'ln', 'in', 'self', '.', 'lines', '(', ')', 'if', '(', 'not', 'ln', '.', 'isBroken', '(', ')', 'and', 'not', 'ln', '.', 'ignoreMe', ')', ']'] | Return all lines within which Prosodic understood all words. | ['Return', 'all', 'lines', 'within', 'which', 'Prosodic', 'understood', 'all', 'words', '.'] | train | https://github.com/quadrismegistus/prosodic/blob/8af66ed9be40c922d03a0b09bc11c87d2061b618/prosodic/Text.py#L843-L846 |
5,137 | Autodesk/pyccc | pyccc/engines/subproc.py | Subprocess._check_file_is_under_workingdir | def _check_file_is_under_workingdir(filename, wdir):
""" Raise error if input is being staged to a location not underneath the working dir
"""
p = filename
if not os.path.isabs(p):
p = os.path.join(wdir, p)
targetpath = os.path.realpath(p)
wdir = os.path.realpath(wdir)
common = os.path.commonprefix([wdir, targetpath])
if len(common) < len(wdir):
raise exceptions.PathError(
"The subprocess engine does not support input files with absolute paths")
return p | python | def _check_file_is_under_workingdir(filename, wdir):
""" Raise error if input is being staged to a location not underneath the working dir
"""
p = filename
if not os.path.isabs(p):
p = os.path.join(wdir, p)
targetpath = os.path.realpath(p)
wdir = os.path.realpath(wdir)
common = os.path.commonprefix([wdir, targetpath])
if len(common) < len(wdir):
raise exceptions.PathError(
"The subprocess engine does not support input files with absolute paths")
return p | ['def', '_check_file_is_under_workingdir', '(', 'filename', ',', 'wdir', ')', ':', 'p', '=', 'filename', 'if', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'p', ')', ':', 'p', '=', 'os', '.', 'path', '.', 'join', '(', 'wdir', ',', 'p', ')', 'targetpath', '=', 'os', '.', 'path', '.', 'realpath', '(', 'p', ')', 'wdir', '=', 'os', '.', 'path', '.', 'realpath', '(', 'wdir', ')', 'common', '=', 'os', '.', 'path', '.', 'commonprefix', '(', '[', 'wdir', ',', 'targetpath', ']', ')', 'if', 'len', '(', 'common', ')', '<', 'len', '(', 'wdir', ')', ':', 'raise', 'exceptions', '.', 'PathError', '(', '"The subprocess engine does not support input files with absolute paths"', ')', 'return', 'p'] | Raise error if input is being staged to a location not underneath the working dir | ['Raise', 'error', 'if', 'input', 'is', 'being', 'staged', 'to', 'a', 'location', 'not', 'underneath', 'the', 'working', 'dir'] | train | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/subproc.py#L89-L101 |
5,138 | samjabrahams/anchorhub | anchorhub/messages.py | print_duplicate_anchor_information | def print_duplicate_anchor_information(duplicate_tags):
"""
Prints information about duplicate AnchorHub tags found during collection.
:param duplicate_tags: Dictionary mapping string file path keys to a list of
tuples. The tuples contain the following information, in order:
1. The string AnchorHub tag that was repeated
2. The line in the file that the duplicate was found, as a number
3. The string generated anchor that first used the repeated tag
"""
print("Duplicate anchors specified within file(s)")
print("Please modify your code to remove duplicates.\r\n")
for file_path in duplicate_tags:
print("File: " + file_path)
for line_info in duplicate_tags[file_path]:
print("\tLine " + str(line_info[1]) + # Line number
"\t#" + line_info[0] + # Repeated AnchorHub tag
" :\t" + line_info[2]) | python | def print_duplicate_anchor_information(duplicate_tags):
"""
Prints information about duplicate AnchorHub tags found during collection.
:param duplicate_tags: Dictionary mapping string file path keys to a list of
tuples. The tuples contain the following information, in order:
1. The string AnchorHub tag that was repeated
2. The line in the file that the duplicate was found, as a number
3. The string generated anchor that first used the repeated tag
"""
print("Duplicate anchors specified within file(s)")
print("Please modify your code to remove duplicates.\r\n")
for file_path in duplicate_tags:
print("File: " + file_path)
for line_info in duplicate_tags[file_path]:
print("\tLine " + str(line_info[1]) + # Line number
"\t#" + line_info[0] + # Repeated AnchorHub tag
" :\t" + line_info[2]) | ['def', 'print_duplicate_anchor_information', '(', 'duplicate_tags', ')', ':', 'print', '(', '"Duplicate anchors specified within file(s)"', ')', 'print', '(', '"Please modify your code to remove duplicates.\\r\\n"', ')', 'for', 'file_path', 'in', 'duplicate_tags', ':', 'print', '(', '"File: "', '+', 'file_path', ')', 'for', 'line_info', 'in', 'duplicate_tags', '[', 'file_path', ']', ':', 'print', '(', '"\\tLine "', '+', 'str', '(', 'line_info', '[', '1', ']', ')', '+', '# Line number', '"\\t#"', '+', 'line_info', '[', '0', ']', '+', '# Repeated AnchorHub tag', '" :\\t"', '+', 'line_info', '[', '2', ']', ')'] | Prints information about duplicate AnchorHub tags found during collection.
:param duplicate_tags: Dictionary mapping string file path keys to a list of
tuples. The tuples contain the following information, in order:
1. The string AnchorHub tag that was repeated
2. The line in the file that the duplicate was found, as a number
3. The string generated anchor that first used the repeated tag | ['Prints', 'information', 'about', 'duplicate', 'AnchorHub', 'tags', 'found', 'during', 'collection', '.'] | train | https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/messages.py#L50-L68 |
5,139 | scanny/python-pptx | pptx/oxml/chart/datalabel.py | CT_DLbl.remove_tx_rich | def remove_tx_rich(self):
"""
Remove any `c:tx[c:rich]` child, or do nothing if not present.
"""
matches = self.xpath('c:tx[c:rich]')
if not matches:
return
tx = matches[0]
self.remove(tx) | python | def remove_tx_rich(self):
"""
Remove any `c:tx[c:rich]` child, or do nothing if not present.
"""
matches = self.xpath('c:tx[c:rich]')
if not matches:
return
tx = matches[0]
self.remove(tx) | ['def', 'remove_tx_rich', '(', 'self', ')', ':', 'matches', '=', 'self', '.', 'xpath', '(', "'c:tx[c:rich]'", ')', 'if', 'not', 'matches', ':', 'return', 'tx', '=', 'matches', '[', '0', ']', 'self', '.', 'remove', '(', 'tx', ')'] | Remove any `c:tx[c:rich]` child, or do nothing if not present. | ['Remove', 'any', 'c', ':', 'tx', '[', 'c', ':', 'rich', ']', 'child', 'or', 'do', 'nothing', 'if', 'not', 'present', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/datalabel.py#L95-L103 |
5,140 | nickmckay/LiPD-utilities | Python/lipd/bag.py | create_bag | def create_bag(dir_bag):
"""
Create a Bag out of given files.
:param str dir_bag: Directory that contains csv, jsonld, and changelog files.
:return obj: Bag
"""
logger_bagit.info("enter create_bag")
# if not dir_bag:
# dir_bag = os.getcwd()
try:
bag = bagit.make_bag(dir_bag, {'Name': 'LiPD Project', 'Reference': 'www.lipds.net', 'DOI-Resolved': 'True'})
logger_bagit.info("created bag")
return bag
except FileNotFoundError as e:
print("Error: directory not found to create bagit")
logger_bagit.debug("create_bag: FileNotFoundError: failed to create bagit, {}".format(e))
except Exception as e:
print("Error: failed to create bagit bag")
logger_bagit.debug("create_bag: Exception: failed to create bag, {}".format(e))
return None | python | def create_bag(dir_bag):
"""
Create a Bag out of given files.
:param str dir_bag: Directory that contains csv, jsonld, and changelog files.
:return obj: Bag
"""
logger_bagit.info("enter create_bag")
# if not dir_bag:
# dir_bag = os.getcwd()
try:
bag = bagit.make_bag(dir_bag, {'Name': 'LiPD Project', 'Reference': 'www.lipds.net', 'DOI-Resolved': 'True'})
logger_bagit.info("created bag")
return bag
except FileNotFoundError as e:
print("Error: directory not found to create bagit")
logger_bagit.debug("create_bag: FileNotFoundError: failed to create bagit, {}".format(e))
except Exception as e:
print("Error: failed to create bagit bag")
logger_bagit.debug("create_bag: Exception: failed to create bag, {}".format(e))
return None | ['def', 'create_bag', '(', 'dir_bag', ')', ':', 'logger_bagit', '.', 'info', '(', '"enter create_bag"', ')', '# if not dir_bag:', '# dir_bag = os.getcwd()', 'try', ':', 'bag', '=', 'bagit', '.', 'make_bag', '(', 'dir_bag', ',', '{', "'Name'", ':', "'LiPD Project'", ',', "'Reference'", ':', "'www.lipds.net'", ',', "'DOI-Resolved'", ':', "'True'", '}', ')', 'logger_bagit', '.', 'info', '(', '"created bag"', ')', 'return', 'bag', 'except', 'FileNotFoundError', 'as', 'e', ':', 'print', '(', '"Error: directory not found to create bagit"', ')', 'logger_bagit', '.', 'debug', '(', '"create_bag: FileNotFoundError: failed to create bagit, {}"', '.', 'format', '(', 'e', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '"Error: failed to create bagit bag"', ')', 'logger_bagit', '.', 'debug', '(', '"create_bag: Exception: failed to create bag, {}"', '.', 'format', '(', 'e', ')', ')', 'return', 'None'] | Create a Bag out of given files.
:param str dir_bag: Directory that contains csv, jsonld, and changelog files.
:return obj: Bag | ['Create', 'a', 'Bag', 'out', 'of', 'given', 'files', '.', ':', 'param', 'str', 'dir_bag', ':', 'Directory', 'that', 'contains', 'csv', 'jsonld', 'and', 'changelog', 'files', '.', ':', 'return', 'obj', ':', 'Bag'] | train | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/bag.py#L8-L27 |
5,141 | lorien/grab | grab/spider/task.py | Task.clone | def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
if not attr_copy['priority_set_explicitly']:
attr_copy['priority'] = None
task = Task(**attr_copy)
# Reset some task properties if they have not
# been set explicitly in kwargs
if 'network_try_count' not in kwargs:
task.network_try_count = 0
if 'task_try_count' not in kwargs:
task.task_try_count = self.task_try_count + 1
if 'refresh_cache' not in kwargs:
task.refresh_cache = False
if 'disable_cache' not in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be '
'used together')
if (kwargs.get('url') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options url and grab_config could not '
'be used together')
if (kwargs.get('grab') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options grab and grab_config could not '
'be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(None)
return task | python | def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
if not attr_copy['priority_set_explicitly']:
attr_copy['priority'] = None
task = Task(**attr_copy)
# Reset some task properties if they have not
# been set explicitly in kwargs
if 'network_try_count' not in kwargs:
task.network_try_count = 0
if 'task_try_count' not in kwargs:
task.task_try_count = self.task_try_count + 1
if 'refresh_cache' not in kwargs:
task.refresh_cache = False
if 'disable_cache' not in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be '
'used together')
if (kwargs.get('url') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options url and grab_config could not '
'be used together')
if (kwargs.get('grab') is not None and
kwargs.get('grab_config') is not None):
raise SpiderMisuseError('Options grab and grab_config could not '
'be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(None)
return task | ['def', 'clone', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', '# First, create exact copy of the current Task object', 'attr_copy', '=', 'self', '.', '__dict__', '.', 'copy', '(', ')', 'if', 'attr_copy', '.', 'get', '(', "'grab_config'", ')', 'is', 'not', 'None', ':', 'del', 'attr_copy', '[', "'url'", ']', 'if', 'not', 'attr_copy', '[', "'priority_set_explicitly'", ']', ':', 'attr_copy', '[', "'priority'", ']', '=', 'None', 'task', '=', 'Task', '(', '*', '*', 'attr_copy', ')', '# Reset some task properties if they have not', '# been set explicitly in kwargs', 'if', "'network_try_count'", 'not', 'in', 'kwargs', ':', 'task', '.', 'network_try_count', '=', '0', 'if', "'task_try_count'", 'not', 'in', 'kwargs', ':', 'task', '.', 'task_try_count', '=', 'self', '.', 'task_try_count', '+', '1', 'if', "'refresh_cache'", 'not', 'in', 'kwargs', ':', 'task', '.', 'refresh_cache', '=', 'False', 'if', "'disable_cache'", 'not', 'in', 'kwargs', ':', 'task', '.', 'disable_cache', '=', 'False', 'if', 'kwargs', '.', 'get', '(', "'url'", ')', 'is', 'not', 'None', 'and', 'kwargs', '.', 'get', '(', "'grab'", ')', 'is', 'not', 'None', ':', 'raise', 'SpiderMisuseError', '(', "'Options url and grab could not be '", "'used together'", ')', 'if', '(', 'kwargs', '.', 'get', '(', "'url'", ')', 'is', 'not', 'None', 'and', 'kwargs', '.', 'get', '(', "'grab_config'", ')', 'is', 'not', 'None', ')', ':', 'raise', 'SpiderMisuseError', '(', "'Options url and grab_config could not '", "'be used together'", ')', 'if', '(', 'kwargs', '.', 'get', '(', "'grab'", ')', 'is', 'not', 'None', 'and', 'kwargs', '.', 'get', '(', "'grab_config'", ')', 'is', 'not', 'None', ')', ':', 'raise', 'SpiderMisuseError', '(', "'Options grab and grab_config could not '", "'be used together'", ')', 'if', 'kwargs', '.', 'get', '(', "'grab'", ')', ':', 'task', '.', 'setup_grab_config', '(', 'kwargs', '[', "'grab'", ']', '.', 'dump_config', '(', ')', ')', 'del', 'kwargs', '[', "'grab'", ']', 'elif', 'kwargs', '.', 'get', '(', "'grab_config'", ')', ':', 'task', '.', 'setup_grab_config', '(', 'kwargs', '[', "'grab_config'", ']', ')', 'del', 'kwargs', '[', "'grab_config'", ']', 'elif', 'kwargs', '.', 'get', '(', "'url'", ')', ':', 'task', '.', 'url', '=', 'kwargs', '[', "'url'", ']', 'if', 'task', '.', 'grab_config', ':', 'task', '.', 'grab_config', '[', "'url'", ']', '=', 'kwargs', '[', "'url'", ']', 'del', 'kwargs', '[', "'url'", ']', 'for', 'key', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'setattr', '(', 'task', ',', 'key', ',', 'value', ')', 'task', '.', 'process_delay_option', '(', 'None', ')', 'return', 'task'] | Clone Task instance.
Reset network_try_count, increase task_try_count.
Reset priority attribute if it was not set explicitly. | ['Clone', 'Task', 'instance', '.'] | train | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/spider/task.py#L170-L228 |
5,142 | eandersson/amqpstorm | amqpstorm/channel.py | Channel.write_frames | def write_frames(self, frames_out):
"""Write multiple pamqp frames from the current channel.
:param list frames_out: A list of pamqp frames.
:return:
"""
self.check_for_errors()
self._connection.write_frames(self.channel_id, frames_out) | python | def write_frames(self, frames_out):
"""Write multiple pamqp frames from the current channel.
:param list frames_out: A list of pamqp frames.
:return:
"""
self.check_for_errors()
self._connection.write_frames(self.channel_id, frames_out) | ['def', 'write_frames', '(', 'self', ',', 'frames_out', ')', ':', 'self', '.', 'check_for_errors', '(', ')', 'self', '.', '_connection', '.', 'write_frames', '(', 'self', '.', 'channel_id', ',', 'frames_out', ')'] | Write multiple pamqp frames from the current channel.
:param list frames_out: A list of pamqp frames.
:return: | ['Write', 'multiple', 'pamqp', 'frames', 'from', 'the', 'current', 'channel', '.'] | train | https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L346-L354 |
5,143 | gnullByte/dotcolors | dotcolors/getdots.py | get_urls | def get_urls(htmlDoc, limit=200):
'''takes in html document as string, returns links to dots'''
soup = BeautifulSoup( htmlDoc )
anchors = soup.findAll( 'a' )
urls = {}
counter = 0
for i,v in enumerate( anchors ):
href = anchors[i].get( 'href' )
if ('dots' in href and counter < limit):
href = href.split('/')[2]
text = anchors[i].text.split(' ')[0].replace('/', '_')
urls[ text ] = href
counter += 1
return urls | python | def get_urls(htmlDoc, limit=200):
'''takes in html document as string, returns links to dots'''
soup = BeautifulSoup( htmlDoc )
anchors = soup.findAll( 'a' )
urls = {}
counter = 0
for i,v in enumerate( anchors ):
href = anchors[i].get( 'href' )
if ('dots' in href and counter < limit):
href = href.split('/')[2]
text = anchors[i].text.split(' ')[0].replace('/', '_')
urls[ text ] = href
counter += 1
return urls | ['def', 'get_urls', '(', 'htmlDoc', ',', 'limit', '=', '200', ')', ':', 'soup', '=', 'BeautifulSoup', '(', 'htmlDoc', ')', 'anchors', '=', 'soup', '.', 'findAll', '(', "'a'", ')', 'urls', '=', '{', '}', 'counter', '=', '0', 'for', 'i', ',', 'v', 'in', 'enumerate', '(', 'anchors', ')', ':', 'href', '=', 'anchors', '[', 'i', ']', '.', 'get', '(', "'href'", ')', 'if', '(', "'dots'", 'in', 'href', 'and', 'counter', '<', 'limit', ')', ':', 'href', '=', 'href', '.', 'split', '(', "'/'", ')', '[', '2', ']', 'text', '=', 'anchors', '[', 'i', ']', '.', 'text', '.', 'split', '(', "' '", ')', '[', '0', ']', '.', 'replace', '(', "'/'", ',', "'_'", ')', 'urls', '[', 'text', ']', '=', 'href', 'counter', '+=', '1', 'return', 'urls'] | takes in html document as string, returns links to dots | ['takes', 'in', 'html', 'document', 'as', 'string', 'returns', 'links', 'to', 'dots'] | train | https://github.com/gnullByte/dotcolors/blob/4b09ff9862b88b3125fe9cd86aa054694ed3e46e/dotcolors/getdots.py#L42-L59 |
5,144 | kylejusticemagnuson/pyti | pyti/exponential_moving_average.py | exponential_moving_average | def exponential_moving_average(data, period):
"""
Exponential Moving Average.
Formula:
p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +...
/ 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +...
where: w = 2 / (N + 1)
"""
catch_errors.check_for_period_error(data, period)
emas = [exponential_moving_average_helper(
data[idx - period + 1:idx + 1], period) for idx in range(period - 1, len(data))]
emas = fill_for_noncomputable_vals(data, emas)
return emas | python | def exponential_moving_average(data, period):
"""
Exponential Moving Average.
Formula:
p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +...
/ 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +...
where: w = 2 / (N + 1)
"""
catch_errors.check_for_period_error(data, period)
emas = [exponential_moving_average_helper(
data[idx - period + 1:idx + 1], period) for idx in range(period - 1, len(data))]
emas = fill_for_noncomputable_vals(data, emas)
return emas | ['def', 'exponential_moving_average', '(', 'data', ',', 'period', ')', ':', 'catch_errors', '.', 'check_for_period_error', '(', 'data', ',', 'period', ')', 'emas', '=', '[', 'exponential_moving_average_helper', '(', 'data', '[', 'idx', '-', 'period', '+', '1', ':', 'idx', '+', '1', ']', ',', 'period', ')', 'for', 'idx', 'in', 'range', '(', 'period', '-', '1', ',', 'len', '(', 'data', ')', ')', ']', 'emas', '=', 'fill_for_noncomputable_vals', '(', 'data', ',', 'emas', ')', 'return', 'emas'] | Exponential Moving Average.
Formula:
p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +...
/ 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +...
where: w = 2 / (N + 1) | ['Exponential', 'Moving', 'Average', '.'] | train | https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/exponential_moving_average.py#L7-L21 |
5,145 | exxeleron/qPython | qpython/qconnection.py | QConnection.sendSync | def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected') | python | def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected') | ['def', 'sendSync', '(', 'self', ',', 'query', ',', '*', 'parameters', ',', '*', '*', 'options', ')', ':', 'self', '.', 'query', '(', 'MessageType', '.', 'SYNC', ',', 'query', ',', '*', 'parameters', ',', '*', '*', 'options', ')', 'response', '=', 'self', '.', 'receive', '(', 'data_only', '=', 'False', ',', '*', '*', 'options', ')', 'if', 'response', '.', 'type', '==', 'MessageType', '.', 'RESPONSE', ':', 'return', 'response', '.', 'data', 'else', ':', 'self', '.', '_writer', '.', 'write', '(', 'QException', '(', "'nyi: qPython expected response message'", ')', ',', 'MessageType', '.', 'ASYNC', 'if', 'response', '.', 'type', '==', 'MessageType', '.', 'ASYNC', 'else', 'MessageType', '.', 'RESPONSE', ')', 'raise', 'QReaderException', '(', "'Received message of type: %s where response was expected'", ')'] | Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException` | ['Performs', 'a', 'synchronous', 'query', 'against', 'a', 'q', 'service', 'and', 'returns', 'parsed', 'data', '.', 'In', 'typical', 'use', 'case', 'query', 'is', 'the', 'name', 'of', 'the', 'function', 'to', 'call', 'and', 'parameters', 'are', 'its', 'parameters', '.', 'When', 'parameters', 'list', 'is', 'empty', 'the', 'query', 'can', 'be', 'an', 'arbitrary', 'q', 'expression', '(', 'e', '.', 'g', '.', '0', '+', '/', 'til', '100', ')', '.', 'Executes', 'a', 'q', 'expression', ':', '>>>', 'print', '(', 'q', '.', 'sendSync', '(', 'til', '10', '))', '[', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ']', 'Executes', 'an', 'anonymous', 'q', 'function', 'with', 'a', 'single', 'parameter', ':', '>>>', 'print', '(', 'q', '.', 'sendSync', '(', '{', 'til', 'x', '}', '10', '))', '[', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ']', 'Executes', 'an', 'anonymous', 'q', 'function', 'with', 'two', 'parameters', ':', '>>>', 'print', '(', 'q', '.', 'sendSync', '(', '{', 'y', '+', 'til', 'x', '}', '10', '1', '))', '[', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', ']', '>>>', 'print', '(', 'q', '.', 'sendSync', '(', '{', 'y', '+', 'til', 'x', '}', '*', '[', '10', '1', ']', '))', '[', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', ']', 'The', ':', 'func', ':', '.', 'sendSync', 'is', 'called', 'from', 'the', 'overloaded', ':', 'func', ':', '.', '__call__', 'function', '.', 'This', 'allows', ':', 'class', ':', '.', 'QConnection', 'instance', 'to', 'be', 'called', 'as', 'a', 'function', ':', '>>>', 'print', '(', 'q', '(', '{', 'y', '+', 'til', 'x', '}', '10', '1', '))', '[', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', ']', ':', 'Parameters', ':', '-', 'query', '(', 'string', ')', '-', 'query', 'to', 'be', 'executed', '-', 'parameters', '(', 'list', 'or', 'None', ')', '-', 'parameters', 'for', 'the', 'query', ':', 'Options', ':', '-', 'raw', '(', 'boolean', ')', '-', 'if', 'True', 'returns', 'raw', 'data', 'chunk', 'instead', 'of', 'parsed', 'data', '**', 'Default', '**', ':', 'False', '-', 'numpy_temporals', '(', 'boolean', ')', '-', 'if', 'False', 'temporal', 'vectors', 'are', 'backed', 'by', 'raw', 'q', 'representation', '(', ':', 'class', ':', '.', 'QTemporalList', ':', 'class', ':', '.', 'QTemporal', ')', 'instances', 'otherwise', 'are', 'represented', 'as', 'numpy', 'datetime64', '/', 'timedelta64', 'arrays', 'and', 'atoms', '**', 'Default', '**', ':', 'False', '-', 'single_char_strings', '(', 'boolean', ')', '-', 'if', 'True', 'single', 'char', 'Python', 'strings', 'are', 'encoded', 'as', 'q', 'strings', 'instead', 'of', 'chars', '**', 'Default', '**', ':', 'False'] | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L248-L309 |
5,146 | dw/mitogen | mitogen/parent.py | Stream.construct | def construct(self, max_message_size, remote_name=None, python_path=None,
debug=False, connect_timeout=None, profiling=False,
unidirectional=False, old_router=None, **kwargs):
"""Get the named context running on the local machine, creating it if
it does not exist."""
super(Stream, self).construct(**kwargs)
self.max_message_size = max_message_size
if python_path:
self.python_path = python_path
if connect_timeout:
self.connect_timeout = connect_timeout
if remote_name is None:
remote_name = get_default_remote_name()
if '/' in remote_name or '\\' in remote_name:
raise ValueError('remote_name= cannot contain slashes')
self.remote_name = remote_name
self.debug = debug
self.profiling = profiling
self.unidirectional = unidirectional
self.max_message_size = max_message_size
self.connect_deadline = time.time() + self.connect_timeout | python | def construct(self, max_message_size, remote_name=None, python_path=None,
debug=False, connect_timeout=None, profiling=False,
unidirectional=False, old_router=None, **kwargs):
"""Get the named context running on the local machine, creating it if
it does not exist."""
super(Stream, self).construct(**kwargs)
self.max_message_size = max_message_size
if python_path:
self.python_path = python_path
if connect_timeout:
self.connect_timeout = connect_timeout
if remote_name is None:
remote_name = get_default_remote_name()
if '/' in remote_name or '\\' in remote_name:
raise ValueError('remote_name= cannot contain slashes')
self.remote_name = remote_name
self.debug = debug
self.profiling = profiling
self.unidirectional = unidirectional
self.max_message_size = max_message_size
self.connect_deadline = time.time() + self.connect_timeout | ['def', 'construct', '(', 'self', ',', 'max_message_size', ',', 'remote_name', '=', 'None', ',', 'python_path', '=', 'None', ',', 'debug', '=', 'False', ',', 'connect_timeout', '=', 'None', ',', 'profiling', '=', 'False', ',', 'unidirectional', '=', 'False', ',', 'old_router', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'super', '(', 'Stream', ',', 'self', ')', '.', 'construct', '(', '*', '*', 'kwargs', ')', 'self', '.', 'max_message_size', '=', 'max_message_size', 'if', 'python_path', ':', 'self', '.', 'python_path', '=', 'python_path', 'if', 'connect_timeout', ':', 'self', '.', 'connect_timeout', '=', 'connect_timeout', 'if', 'remote_name', 'is', 'None', ':', 'remote_name', '=', 'get_default_remote_name', '(', ')', 'if', "'/'", 'in', 'remote_name', 'or', "'\\\\'", 'in', 'remote_name', ':', 'raise', 'ValueError', '(', "'remote_name= cannot contain slashes'", ')', 'self', '.', 'remote_name', '=', 'remote_name', 'self', '.', 'debug', '=', 'debug', 'self', '.', 'profiling', '=', 'profiling', 'self', '.', 'unidirectional', '=', 'unidirectional', 'self', '.', 'max_message_size', '=', 'max_message_size', 'self', '.', 'connect_deadline', '=', 'time', '.', 'time', '(', ')', '+', 'self', '.', 'connect_timeout'] | Get the named context running on the local machine, creating it if
it does not exist. | ['Get', 'the', 'named', 'context', 'running', 'on', 'the', 'local', 'machine', 'creating', 'it', 'if', 'it', 'does', 'not', 'exist', '.'] | train | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L1210-L1230 |
5,147 | ludwiktrammer/applamp | applamp/rgb.py | RgbLight.fade_out | def fade_out(self, duration=3):
"""Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds)"""
super(RgbLight, self).fade_out(duration)
self.off() | python | def fade_out(self, duration=3):
"""Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds)"""
super(RgbLight, self).fade_out(duration)
self.off() | ['def', 'fade_out', '(', 'self', ',', 'duration', '=', '3', ')', ':', 'super', '(', 'RgbLight', ',', 'self', ')', '.', 'fade_out', '(', 'duration', ')', 'self', '.', 'off', '(', ')'] | Turns off the light by gradually fading it out.
The optional `duration` parameter allows for control
of the fade out duration (in seconds) | ['Turns', 'off', 'the', 'light', 'by', 'gradually', 'fading', 'it', 'out', '.', 'The', 'optional', 'duration', 'parameter', 'allows', 'for', 'control', 'of', 'the', 'fade', 'out', 'duration', '(', 'in', 'seconds', ')'] | train | https://github.com/ludwiktrammer/applamp/blob/90d7d463826f0c8dcd33dfdbc5efc9fa44b0b484/applamp/rgb.py#L51-L56 |
5,148 | pgjones/quart | quart/app.py | Quart.make_default_options_response | async def make_default_options_response(self) -> Response:
"""This is the default route function for OPTIONS requests."""
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class('', headers={'Allow': ', '.join(methods)}) | python | async def make_default_options_response(self) -> Response:
"""This is the default route function for OPTIONS requests."""
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class('', headers={'Allow': ', '.join(methods)}) | ['async', 'def', 'make_default_options_response', '(', 'self', ')', '->', 'Response', ':', 'methods', '=', '_request_ctx_stack', '.', 'top', '.', 'url_adapter', '.', 'allowed_methods', '(', ')', 'return', 'self', '.', 'response_class', '(', "''", ',', 'headers', '=', '{', "'Allow'", ':', "', '", '.', 'join', '(', 'methods', ')', '}', ')'] | This is the default route function for OPTIONS requests. | ['This', 'is', 'the', 'default', 'route', 'function', 'for', 'OPTIONS', 'requests', '.'] | train | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1456-L1459 |
5,149 | qubell/contrib-python-qubell-client | qubell/monitor/monitor.py | Monitor.launch | def launch(self, timeout=2):
"""
Hierapp instance, with environment dependencies:
- can be launched within short timeout
- auto-destroys shortly
"""
self.start_time = time.time()
self.end_time = time.time()
instance = self.app.launch(environment=self.env)
time.sleep(2) # Instance need time to appear in ui
assert instance.running(timeout=timeout), "Monitor didn't get Active state"
launched = instance.status == 'Active'
instance.reschedule_workflow(workflow_name='destroy', timestamp=self.destroy_interval)
assert instance.destroyed(timeout=timeout), "Monitor didn't get Destroyed after short time"
stopped = instance.status == 'Destroyed'
instance.force_remove()
self.end_time = time.time()
self.status = launched and stopped | python | def launch(self, timeout=2):
"""
Hierapp instance, with environment dependencies:
- can be launched within short timeout
- auto-destroys shortly
"""
self.start_time = time.time()
self.end_time = time.time()
instance = self.app.launch(environment=self.env)
time.sleep(2) # Instance need time to appear in ui
assert instance.running(timeout=timeout), "Monitor didn't get Active state"
launched = instance.status == 'Active'
instance.reschedule_workflow(workflow_name='destroy', timestamp=self.destroy_interval)
assert instance.destroyed(timeout=timeout), "Monitor didn't get Destroyed after short time"
stopped = instance.status == 'Destroyed'
instance.force_remove()
self.end_time = time.time()
self.status = launched and stopped | ['def', 'launch', '(', 'self', ',', 'timeout', '=', '2', ')', ':', 'self', '.', 'start_time', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'end_time', '=', 'time', '.', 'time', '(', ')', 'instance', '=', 'self', '.', 'app', '.', 'launch', '(', 'environment', '=', 'self', '.', 'env', ')', 'time', '.', 'sleep', '(', '2', ')', '# Instance need time to appear in ui', 'assert', 'instance', '.', 'running', '(', 'timeout', '=', 'timeout', ')', ',', '"Monitor didn\'t get Active state"', 'launched', '=', 'instance', '.', 'status', '==', "'Active'", 'instance', '.', 'reschedule_workflow', '(', 'workflow_name', '=', "'destroy'", ',', 'timestamp', '=', 'self', '.', 'destroy_interval', ')', 'assert', 'instance', '.', 'destroyed', '(', 'timeout', '=', 'timeout', ')', ',', '"Monitor didn\'t get Destroyed after short time"', 'stopped', '=', 'instance', '.', 'status', '==', "'Destroyed'", 'instance', '.', 'force_remove', '(', ')', 'self', '.', 'end_time', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'status', '=', 'launched', 'and', 'stopped'] | Hierapp instance, with environment dependencies:
- can be launched within short timeout
- auto-destroys shortly | ['Hierapp', 'instance', 'with', 'environment', 'dependencies', ':', '-', 'can', 'be', 'launched', 'within', 'short', 'timeout', '-', 'auto', '-', 'destroys', 'shortly'] | train | https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/monitor/monitor.py#L138-L156 |
5,150 | gccxml/pygccxml | pygccxml/declarations/traits_impl_details.py | impl_details.find_value_type | def find_value_type(global_ns, value_type_str):
"""implementation details"""
if not value_type_str.startswith('::'):
value_type_str = '::' + value_type_str
found = global_ns.decls(
name=value_type_str,
function=lambda decl: not isinstance(decl, calldef.calldef_t),
allow_empty=True)
if not found:
no_global_ns_value_type_str = value_type_str[2:]
if no_global_ns_value_type_str in cpptypes.FUNDAMENTAL_TYPES:
return cpptypes.FUNDAMENTAL_TYPES[no_global_ns_value_type_str]
elif type_traits.is_std_string(value_type_str):
string_ = global_ns.typedef('::std::string')
return type_traits.remove_declarated(string_)
elif type_traits.is_std_wstring(value_type_str):
string_ = global_ns.typedef('::std::wstring')
return type_traits.remove_declarated(string_)
else:
value_type_str = no_global_ns_value_type_str
has_const = value_type_str.startswith('const ')
if has_const:
value_type_str = value_type_str[len('const '):]
has_pointer = value_type_str.endswith('*')
if has_pointer:
value_type_str = value_type_str[:-1]
found = None
if has_const or has_pointer:
found = impl_details.find_value_type(
global_ns,
value_type_str)
if not found:
return None
else:
if isinstance(found, class_declaration.class_types):
return cpptypes.declarated_t(found)
if has_const:
return cpptypes.const_t(found)
if has_pointer:
return cpptypes.pointer_t(found)
if len(found) == 1:
return found[0]
return None | python | def find_value_type(global_ns, value_type_str):
"""implementation details"""
if not value_type_str.startswith('::'):
value_type_str = '::' + value_type_str
found = global_ns.decls(
name=value_type_str,
function=lambda decl: not isinstance(decl, calldef.calldef_t),
allow_empty=True)
if not found:
no_global_ns_value_type_str = value_type_str[2:]
if no_global_ns_value_type_str in cpptypes.FUNDAMENTAL_TYPES:
return cpptypes.FUNDAMENTAL_TYPES[no_global_ns_value_type_str]
elif type_traits.is_std_string(value_type_str):
string_ = global_ns.typedef('::std::string')
return type_traits.remove_declarated(string_)
elif type_traits.is_std_wstring(value_type_str):
string_ = global_ns.typedef('::std::wstring')
return type_traits.remove_declarated(string_)
else:
value_type_str = no_global_ns_value_type_str
has_const = value_type_str.startswith('const ')
if has_const:
value_type_str = value_type_str[len('const '):]
has_pointer = value_type_str.endswith('*')
if has_pointer:
value_type_str = value_type_str[:-1]
found = None
if has_const or has_pointer:
found = impl_details.find_value_type(
global_ns,
value_type_str)
if not found:
return None
else:
if isinstance(found, class_declaration.class_types):
return cpptypes.declarated_t(found)
if has_const:
return cpptypes.const_t(found)
if has_pointer:
return cpptypes.pointer_t(found)
if len(found) == 1:
return found[0]
return None | ['def', 'find_value_type', '(', 'global_ns', ',', 'value_type_str', ')', ':', 'if', 'not', 'value_type_str', '.', 'startswith', '(', "'::'", ')', ':', 'value_type_str', '=', "'::'", '+', 'value_type_str', 'found', '=', 'global_ns', '.', 'decls', '(', 'name', '=', 'value_type_str', ',', 'function', '=', 'lambda', 'decl', ':', 'not', 'isinstance', '(', 'decl', ',', 'calldef', '.', 'calldef_t', ')', ',', 'allow_empty', '=', 'True', ')', 'if', 'not', 'found', ':', 'no_global_ns_value_type_str', '=', 'value_type_str', '[', '2', ':', ']', 'if', 'no_global_ns_value_type_str', 'in', 'cpptypes', '.', 'FUNDAMENTAL_TYPES', ':', 'return', 'cpptypes', '.', 'FUNDAMENTAL_TYPES', '[', 'no_global_ns_value_type_str', ']', 'elif', 'type_traits', '.', 'is_std_string', '(', 'value_type_str', ')', ':', 'string_', '=', 'global_ns', '.', 'typedef', '(', "'::std::string'", ')', 'return', 'type_traits', '.', 'remove_declarated', '(', 'string_', ')', 'elif', 'type_traits', '.', 'is_std_wstring', '(', 'value_type_str', ')', ':', 'string_', '=', 'global_ns', '.', 'typedef', '(', "'::std::wstring'", ')', 'return', 'type_traits', '.', 'remove_declarated', '(', 'string_', ')', 'else', ':', 'value_type_str', '=', 'no_global_ns_value_type_str', 'has_const', '=', 'value_type_str', '.', 'startswith', '(', "'const '", ')', 'if', 'has_const', ':', 'value_type_str', '=', 'value_type_str', '[', 'len', '(', "'const '", ')', ':', ']', 'has_pointer', '=', 'value_type_str', '.', 'endswith', '(', "'*'", ')', 'if', 'has_pointer', ':', 'value_type_str', '=', 'value_type_str', '[', ':', '-', '1', ']', 'found', '=', 'None', 'if', 'has_const', 'or', 'has_pointer', ':', 'found', '=', 'impl_details', '.', 'find_value_type', '(', 'global_ns', ',', 'value_type_str', ')', 'if', 'not', 'found', ':', 'return', 'None', 'else', ':', 'if', 'isinstance', '(', 'found', ',', 'class_declaration', '.', 'class_types', ')', ':', 'return', 'cpptypes', '.', 'declarated_t', '(', 'found', ')', 'if', 'has_const', ':', 'return', 'cpptypes', '.', 'const_t', '(', 'found', ')', 'if', 'has_pointer', ':', 'return', 'cpptypes', '.', 'pointer_t', '(', 'found', ')', 'if', 'len', '(', 'found', ')', '==', '1', ':', 'return', 'found', '[', '0', ']', 'return', 'None'] | implementation details | ['implementation', 'details'] | train | https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/traits_impl_details.py#L45-L88 |
5,151 | iwanbk/nyamuk | nyamuk/nyamuk.py | Nyamuk.unsubscribe | def unsubscribe(self, topic):
"""Unsubscribe to some topic."""
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("UNSUBSCRIBE: %s", topic)
return self.send_unsubscribe(False, [utf8encode(topic)]) | python | def unsubscribe(self, topic):
"""Unsubscribe to some topic."""
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("UNSUBSCRIBE: %s", topic)
return self.send_unsubscribe(False, [utf8encode(topic)]) | ['def', 'unsubscribe', '(', 'self', ',', 'topic', ')', ':', 'if', 'self', '.', 'sock', '==', 'NC', '.', 'INVALID_SOCKET', ':', 'return', 'NC', '.', 'ERR_NO_CONN', 'self', '.', 'logger', '.', 'info', '(', '"UNSUBSCRIBE: %s"', ',', 'topic', ')', 'return', 'self', '.', 'send_unsubscribe', '(', 'False', ',', '[', 'utf8encode', '(', 'topic', ')', ']', ')'] | Unsubscribe to some topic. | ['Unsubscribe', 'to', 'some', 'topic', '.'] | train | https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L212-L218 |
5,152 | ynop/audiomate | audiomate/containers/audio.py | AudioContainer.set | def set(self, key, samples, sampling_rate):
"""
Set the samples and sampling-rate for the given key.
Existing data will be overwritten.
The samples have to have ``np.float32`` datatype and values in
the range of -1.0 and 1.0.
Args:
key (str): A key to store the data for.
samples (numpy.ndarray): 1-D array of audio samples (np.float32).
sampling_rate (int): The sampling-rate of the audio samples.
Note:
The container has to be opened in advance.
"""
if not np.issubdtype(samples.dtype, np.floating):
raise ValueError('Samples are required as np.float32!')
if len(samples.shape) > 1:
raise ValueError('Only single channel supported!')
self.raise_error_if_not_open()
if key in self._file:
del self._file[key]
samples = (samples * MAX_INT16_VALUE).astype(np.int16)
dset = self._file.create_dataset(key, data=samples)
dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate | python | def set(self, key, samples, sampling_rate):
"""
Set the samples and sampling-rate for the given key.
Existing data will be overwritten.
The samples have to have ``np.float32`` datatype and values in
the range of -1.0 and 1.0.
Args:
key (str): A key to store the data for.
samples (numpy.ndarray): 1-D array of audio samples (np.float32).
sampling_rate (int): The sampling-rate of the audio samples.
Note:
The container has to be opened in advance.
"""
if not np.issubdtype(samples.dtype, np.floating):
raise ValueError('Samples are required as np.float32!')
if len(samples.shape) > 1:
raise ValueError('Only single channel supported!')
self.raise_error_if_not_open()
if key in self._file:
del self._file[key]
samples = (samples * MAX_INT16_VALUE).astype(np.int16)
dset = self._file.create_dataset(key, data=samples)
dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate | ['def', 'set', '(', 'self', ',', 'key', ',', 'samples', ',', 'sampling_rate', ')', ':', 'if', 'not', 'np', '.', 'issubdtype', '(', 'samples', '.', 'dtype', ',', 'np', '.', 'floating', ')', ':', 'raise', 'ValueError', '(', "'Samples are required as np.float32!'", ')', 'if', 'len', '(', 'samples', '.', 'shape', ')', '>', '1', ':', 'raise', 'ValueError', '(', "'Only single channel supported!'", ')', 'self', '.', 'raise_error_if_not_open', '(', ')', 'if', 'key', 'in', 'self', '.', '_file', ':', 'del', 'self', '.', '_file', '[', 'key', ']', 'samples', '=', '(', 'samples', '*', 'MAX_INT16_VALUE', ')', '.', 'astype', '(', 'np', '.', 'int16', ')', 'dset', '=', 'self', '.', '_file', '.', 'create_dataset', '(', 'key', ',', 'data', '=', 'samples', ')', 'dset', '.', 'attrs', '[', 'SAMPLING_RATE_ATTR', ']', '=', 'sampling_rate'] | Set the samples and sampling-rate for the given key.
Existing data will be overwritten.
The samples have to have ``np.float32`` datatype and values in
the range of -1.0 and 1.0.
Args:
key (str): A key to store the data for.
samples (numpy.ndarray): 1-D array of audio samples (np.float32).
sampling_rate (int): The sampling-rate of the audio samples.
Note:
The container has to be opened in advance. | ['Set', 'the', 'samples', 'and', 'sampling', '-', 'rate', 'for', 'the', 'given', 'key', '.', 'Existing', 'data', 'will', 'be', 'overwritten', '.', 'The', 'samples', 'have', 'to', 'have', 'np', '.', 'float32', 'datatype', 'and', 'values', 'in', 'the', 'range', 'of', '-', '1', '.', '0', 'and', '1', '.', '0', '.'] | train | https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/containers/audio.py#L48-L77 |
5,153 | python-wink/python-wink | src/pywink/devices/siren.py | WinkSiren.set_chime_volume | def set_chime_volume(self, volume):
"""
:param volume: one of [low, medium, high]
"""
values = {
"desired_state": {
"chime_volume": volume
}
}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | python | def set_chime_volume(self, volume):
"""
:param volume: one of [low, medium, high]
"""
values = {
"desired_state": {
"chime_volume": volume
}
}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | ['def', 'set_chime_volume', '(', 'self', ',', 'volume', ')', ':', 'values', '=', '{', '"desired_state"', ':', '{', '"chime_volume"', ':', 'volume', '}', '}', 'response', '=', 'self', '.', 'api_interface', '.', 'set_device_state', '(', 'self', ',', 'values', ')', 'self', '.', '_update_state_from_response', '(', 'response', ')'] | :param volume: one of [low, medium, high] | [':', 'param', 'volume', ':', 'one', 'of', '[', 'low', 'medium', 'high', ']'] | train | https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/siren.py#L51-L61 |
5,154 | mikedh/trimesh | trimesh/path/path.py | Path.merge_vertices | def merge_vertices(self, digits=None):
"""
Merges vertices which are identical and replace references.
Parameters
--------------
digits : None, or int
How many digits to consider when merging vertices
Alters
-----------
self.entities : entity.points re- referenced
self.vertices : duplicates removed
"""
if len(self.vertices) == 0:
return
if digits is None:
digits = util.decimal_to_digits(tol.merge * self.scale,
min_digits=1)
unique, inverse = grouping.unique_rows(self.vertices,
digits=digits)
self.vertices = self.vertices[unique]
entities_ok = np.ones(len(self.entities), dtype=np.bool)
for index, entity in enumerate(self.entities):
# what kind of entity are we dealing with
kind = type(entity).__name__
# entities that don't need runs merged
# don't screw up control- point- knot relationship
if kind in 'BSpline Bezier Text':
entity.points = inverse[entity.points]
continue
# if we merged duplicate vertices, the entity may
# have multiple references to the same vertex
points = grouping.merge_runs(inverse[entity.points])
# if there are three points and two are identical fix it
if kind == 'Line':
if len(points) == 3 and points[0] == points[-1]:
points = points[:2]
elif len(points) < 2:
# lines need two or more vertices
entities_ok[index] = False
elif kind == 'Arc' and len(points) != 3:
# three point arcs need three points
entities_ok[index] = False
# store points in entity
entity.points = points
# remove degenerate entities
self.entities = self.entities[entities_ok] | python | def merge_vertices(self, digits=None):
"""
Merges vertices which are identical and replace references.
Parameters
--------------
digits : None, or int
How many digits to consider when merging vertices
Alters
-----------
self.entities : entity.points re- referenced
self.vertices : duplicates removed
"""
if len(self.vertices) == 0:
return
if digits is None:
digits = util.decimal_to_digits(tol.merge * self.scale,
min_digits=1)
unique, inverse = grouping.unique_rows(self.vertices,
digits=digits)
self.vertices = self.vertices[unique]
entities_ok = np.ones(len(self.entities), dtype=np.bool)
for index, entity in enumerate(self.entities):
# what kind of entity are we dealing with
kind = type(entity).__name__
# entities that don't need runs merged
# don't screw up control- point- knot relationship
if kind in 'BSpline Bezier Text':
entity.points = inverse[entity.points]
continue
# if we merged duplicate vertices, the entity may
# have multiple references to the same vertex
points = grouping.merge_runs(inverse[entity.points])
# if there are three points and two are identical fix it
if kind == 'Line':
if len(points) == 3 and points[0] == points[-1]:
points = points[:2]
elif len(points) < 2:
# lines need two or more vertices
entities_ok[index] = False
elif kind == 'Arc' and len(points) != 3:
# three point arcs need three points
entities_ok[index] = False
# store points in entity
entity.points = points
# remove degenerate entities
self.entities = self.entities[entities_ok] | ['def', 'merge_vertices', '(', 'self', ',', 'digits', '=', 'None', ')', ':', 'if', 'len', '(', 'self', '.', 'vertices', ')', '==', '0', ':', 'return', 'if', 'digits', 'is', 'None', ':', 'digits', '=', 'util', '.', 'decimal_to_digits', '(', 'tol', '.', 'merge', '*', 'self', '.', 'scale', ',', 'min_digits', '=', '1', ')', 'unique', ',', 'inverse', '=', 'grouping', '.', 'unique_rows', '(', 'self', '.', 'vertices', ',', 'digits', '=', 'digits', ')', 'self', '.', 'vertices', '=', 'self', '.', 'vertices', '[', 'unique', ']', 'entities_ok', '=', 'np', '.', 'ones', '(', 'len', '(', 'self', '.', 'entities', ')', ',', 'dtype', '=', 'np', '.', 'bool', ')', 'for', 'index', ',', 'entity', 'in', 'enumerate', '(', 'self', '.', 'entities', ')', ':', '# what kind of entity are we dealing with', 'kind', '=', 'type', '(', 'entity', ')', '.', '__name__', "# entities that don't need runs merged", "# don't screw up control- point- knot relationship", 'if', 'kind', 'in', "'BSpline Bezier Text'", ':', 'entity', '.', 'points', '=', 'inverse', '[', 'entity', '.', 'points', ']', 'continue', '# if we merged duplicate vertices, the entity may', '# have multiple references to the same vertex', 'points', '=', 'grouping', '.', 'merge_runs', '(', 'inverse', '[', 'entity', '.', 'points', ']', ')', '# if there are three points and two are identical fix it', 'if', 'kind', '==', "'Line'", ':', 'if', 'len', '(', 'points', ')', '==', '3', 'and', 'points', '[', '0', ']', '==', 'points', '[', '-', '1', ']', ':', 'points', '=', 'points', '[', ':', '2', ']', 'elif', 'len', '(', 'points', ')', '<', '2', ':', '# lines need two or more vertices', 'entities_ok', '[', 'index', ']', '=', 'False', 'elif', 'kind', '==', "'Arc'", 'and', 'len', '(', 'points', ')', '!=', '3', ':', '# three point arcs need three points', 'entities_ok', '[', 'index', ']', '=', 'False', '# store points in entity', 'entity', '.', 'points', '=', 'points', '# remove degenerate entities', 'self', '.', 'entities', '=', 'self', '.', 'entities', '[', 'entities_ok', ']'] | Merges vertices which are identical and replace references.
Parameters
--------------
digits : None, or int
How many digits to consider when merging vertices
Alters
-----------
self.entities : entity.points re- referenced
self.vertices : duplicates removed | ['Merges', 'vertices', 'which', 'are', 'identical', 'and', 'replace', 'references', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L485-L538 |
5,155 | cggh/scikit-allel | allel/stats/window.py | windowed_statistic | def windowed_statistic(pos, values, statistic, size=None, start=None,
stop=None, step=None, windows=None, fill=np.nan):
"""Calculate a statistic from items in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
values : array_like, int, shape (n_items,)
The values to summarise. May also be a tuple of values arrays,
in which case each array will be sliced and passed through to the
statistic function as separate arguments.
statistic : function
The statistic to compute.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Count non-zero (i.e., True) items in non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> values = [True, False, True, False, False]
>>> nnz, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.count_nonzero, size=10
... )
>>> nnz
array([1, 1, 0])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
>>> counts
array([2, 2, 1])
Compute a sum over items in half-overlapping windows::
>>> values = [3, 4, 2, 6, 9]
>>> x, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.sum, size=10, step=5, fill=0
... )
>>> x
array([ 7, 12, 8, 0, 9])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
>>> counts
array([2, 3, 2, 0, 1])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# check lengths are equal
if isinstance(values, tuple):
# assume multiple values arrays
check_equal_length(pos, *values)
else:
# assume a single values array
check_equal_length(pos, values)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# setup outputs
out = []
counts = []
# iterate over windows
for start_idx, stop_idx in locs:
# calculate number of values in window
n = stop_idx - start_idx
if n == 0:
# window is empty
s = fill
else:
if isinstance(values, tuple):
# assume multiple values arrays
wv = [v[start_idx:stop_idx] for v in values]
s = statistic(*wv)
else:
# assume a single values array
wv = values[start_idx:stop_idx]
s = statistic(wv)
# store outputs
out.append(s)
counts.append(n)
# convert to arrays for output
return np.asarray(out), windows, np.asarray(counts) | python | def windowed_statistic(pos, values, statistic, size=None, start=None,
stop=None, step=None, windows=None, fill=np.nan):
"""Calculate a statistic from items in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
values : array_like, int, shape (n_items,)
The values to summarise. May also be a tuple of values arrays,
in which case each array will be sliced and passed through to the
statistic function as separate arguments.
statistic : function
The statistic to compute.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Count non-zero (i.e., True) items in non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> values = [True, False, True, False, False]
>>> nnz, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.count_nonzero, size=10
... )
>>> nnz
array([1, 1, 0])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
>>> counts
array([2, 2, 1])
Compute a sum over items in half-overlapping windows::
>>> values = [3, 4, 2, 6, 9]
>>> x, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.sum, size=10, step=5, fill=0
... )
>>> x
array([ 7, 12, 8, 0, 9])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
>>> counts
array([2, 3, 2, 0, 1])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# check lengths are equal
if isinstance(values, tuple):
# assume multiple values arrays
check_equal_length(pos, *values)
else:
# assume a single values array
check_equal_length(pos, values)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# setup outputs
out = []
counts = []
# iterate over windows
for start_idx, stop_idx in locs:
# calculate number of values in window
n = stop_idx - start_idx
if n == 0:
# window is empty
s = fill
else:
if isinstance(values, tuple):
# assume multiple values arrays
wv = [v[start_idx:stop_idx] for v in values]
s = statistic(*wv)
else:
# assume a single values array
wv = values[start_idx:stop_idx]
s = statistic(wv)
# store outputs
out.append(s)
counts.append(n)
# convert to arrays for output
return np.asarray(out), windows, np.asarray(counts) | ['def', 'windowed_statistic', '(', 'pos', ',', 'values', ',', 'statistic', ',', 'size', '=', 'None', ',', 'start', '=', 'None', ',', 'stop', '=', 'None', ',', 'step', '=', 'None', ',', 'windows', '=', 'None', ',', 'fill', '=', 'np', '.', 'nan', ')', ':', '# assume sorted positions', 'if', 'not', 'isinstance', '(', 'pos', ',', 'SortedIndex', ')', ':', 'pos', '=', 'SortedIndex', '(', 'pos', ',', 'copy', '=', 'False', ')', '# check lengths are equal', 'if', 'isinstance', '(', 'values', ',', 'tuple', ')', ':', '# assume multiple values arrays', 'check_equal_length', '(', 'pos', ',', '*', 'values', ')', 'else', ':', '# assume a single values array', 'check_equal_length', '(', 'pos', ',', 'values', ')', '# setup windows', 'if', 'windows', 'is', 'None', ':', 'windows', '=', 'position_windows', '(', 'pos', ',', 'size', ',', 'start', ',', 'stop', ',', 'step', ')', 'else', ':', 'windows', '=', 'asarray_ndim', '(', 'windows', ',', '2', ')', '# find window locations', 'locs', '=', 'window_locations', '(', 'pos', ',', 'windows', ')', '# setup outputs', 'out', '=', '[', ']', 'counts', '=', '[', ']', '# iterate over windows', 'for', 'start_idx', ',', 'stop_idx', 'in', 'locs', ':', '# calculate number of values in window', 'n', '=', 'stop_idx', '-', 'start_idx', 'if', 'n', '==', '0', ':', '# window is empty', 's', '=', 'fill', 'else', ':', 'if', 'isinstance', '(', 'values', ',', 'tuple', ')', ':', '# assume multiple values arrays', 'wv', '=', '[', 'v', '[', 'start_idx', ':', 'stop_idx', ']', 'for', 'v', 'in', 'values', ']', 's', '=', 'statistic', '(', '*', 'wv', ')', 'else', ':', '# assume a single values array', 'wv', '=', 'values', '[', 'start_idx', ':', 'stop_idx', ']', 's', '=', 'statistic', '(', 'wv', ')', '# store outputs', 'out', '.', 'append', '(', 's', ')', 'counts', '.', 'append', '(', 'n', ')', '# convert to arrays for output', 'return', 'np', '.', 'asarray', '(', 'out', ')', ',', 'windows', ',', 'np', '.', 'asarray', '(', 'counts', ')'] | Calculate a statistic from items in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
values : array_like, int, shape (n_items,)
The values to summarise. May also be a tuple of values arrays,
in which case each array will be sliced and passed through to the
statistic function as separate arguments.
statistic : function
The statistic to compute.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Count non-zero (i.e., True) items in non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> values = [True, False, True, False, False]
>>> nnz, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.count_nonzero, size=10
... )
>>> nnz
array([1, 1, 0])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
>>> counts
array([2, 2, 1])
Compute a sum over items in half-overlapping windows::
>>> values = [3, 4, 2, 6, 9]
>>> x, windows, counts = allel.windowed_statistic(
... pos, values, statistic=np.sum, size=10, step=5, fill=0
... )
>>> x
array([ 7, 12, 8, 0, 9])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
>>> counts
array([2, 3, 2, 0, 1]) | ['Calculate', 'a', 'statistic', 'from', 'items', 'in', 'windows', 'over', 'a', 'single', 'chromosome', '/', 'contig', '.'] | train | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L234-L376 |
5,156 | galactics/beyond | beyond/orbits/orbit.py | Orbit.iter | def iter(self, **kwargs):
"""see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>`
"""
if self.propagator.orbit is not self:
self.propagator.orbit = self
return self.propagator.iter(**kwargs) | python | def iter(self, **kwargs):
"""see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>`
"""
if self.propagator.orbit is not self:
self.propagator.orbit = self
return self.propagator.iter(**kwargs) | ['def', 'iter', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', 'propagator', '.', 'orbit', 'is', 'not', 'self', ':', 'self', '.', 'propagator', '.', 'orbit', '=', 'self', 'return', 'self', '.', 'propagator', '.', 'iter', '(', '*', '*', 'kwargs', ')'] | see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>` | ['see', ':', 'py', ':', 'meth', ':', 'Propagator', '.', 'iter', '()', '<beyond', '.', 'propagators', '.', 'base', '.', 'Propagator', '.', 'iter', '>'] | train | https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/orbit.py#L322-L328 |
5,157 | restran/mountains | mountains/http/__init__.py | query_str_2_dict | def query_str_2_dict(query_str):
"""
将查询字符串,转换成字典
a=123&b=456
{'a': '123', 'b': '456'}
:param query_str:
:return:
"""
if query_str:
query_list = query_str.split('&')
query_dict = {}
for t in query_list:
x = t.split('=')
query_dict[x[0]] = x[1]
else:
query_dict = {}
return query_dict | python | def query_str_2_dict(query_str):
"""
将查询字符串,转换成字典
a=123&b=456
{'a': '123', 'b': '456'}
:param query_str:
:return:
"""
if query_str:
query_list = query_str.split('&')
query_dict = {}
for t in query_list:
x = t.split('=')
query_dict[x[0]] = x[1]
else:
query_dict = {}
return query_dict | ['def', 'query_str_2_dict', '(', 'query_str', ')', ':', 'if', 'query_str', ':', 'query_list', '=', 'query_str', '.', 'split', '(', "'&'", ')', 'query_dict', '=', '{', '}', 'for', 't', 'in', 'query_list', ':', 'x', '=', 't', '.', 'split', '(', "'='", ')', 'query_dict', '[', 'x', '[', '0', ']', ']', '=', 'x', '[', '1', ']', 'else', ':', 'query_dict', '=', '{', '}', 'return', 'query_dict'] | 将查询字符串,转换成字典
a=123&b=456
{'a': '123', 'b': '456'}
:param query_str:
:return: | ['将查询字符串,转换成字典', 'a', '=', '123&b', '=', '456', '{', 'a', ':', '123', 'b', ':', '456', '}', ':', 'param', 'query_str', ':', ':', 'return', ':'] | train | https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/http/__init__.py#L122-L138 |
5,158 | StanfordVL/robosuite | robosuite/models/base.py | MujocoXML.resolve_asset_dependency | def resolve_asset_dependency(self):
"""
Converts every file dependency into absolute path so when we merge we don't break things.
"""
for node in self.asset.findall("./*[@file]"):
file = node.get("file")
abs_path = os.path.abspath(self.folder)
abs_path = os.path.join(abs_path, file)
node.set("file", abs_path) | python | def resolve_asset_dependency(self):
"""
Converts every file dependency into absolute path so when we merge we don't break things.
"""
for node in self.asset.findall("./*[@file]"):
file = node.get("file")
abs_path = os.path.abspath(self.folder)
abs_path = os.path.join(abs_path, file)
node.set("file", abs_path) | ['def', 'resolve_asset_dependency', '(', 'self', ')', ':', 'for', 'node', 'in', 'self', '.', 'asset', '.', 'findall', '(', '"./*[@file]"', ')', ':', 'file', '=', 'node', '.', 'get', '(', '"file"', ')', 'abs_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'self', '.', 'folder', ')', 'abs_path', '=', 'os', '.', 'path', '.', 'join', '(', 'abs_path', ',', 'file', ')', 'node', '.', 'set', '(', '"file"', ',', 'abs_path', ')'] | Converts every file dependency into absolute path so when we merge we don't break things. | ['Converts', 'every', 'file', 'dependency', 'into', 'absolute', 'path', 'so', 'when', 'we', 'merge', 'we', 'don', 't', 'break', 'things', '.'] | train | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/base.py#L37-L46 |
5,159 | gmr/rejected | rejected/controller.py | Controller.setup | def setup(self):
"""Continue the run process blocking on MasterControlProgram.run"""
# If the app was invoked to specified to prepend the path, do so now
if self.args.prepend_path:
self._prepend_python_path(self.args.prepend_path) | python | def setup(self):
"""Continue the run process blocking on MasterControlProgram.run"""
# If the app was invoked to specified to prepend the path, do so now
if self.args.prepend_path:
self._prepend_python_path(self.args.prepend_path) | ['def', 'setup', '(', 'self', ')', ':', '# If the app was invoked to specified to prepend the path, do so now', 'if', 'self', '.', 'args', '.', 'prepend_path', ':', 'self', '.', '_prepend_python_path', '(', 'self', '.', 'args', '.', 'prepend_path', ')'] | Continue the run process blocking on MasterControlProgram.run | ['Continue', 'the', 'run', 'process', 'blocking', 'on', 'MasterControlProgram', '.', 'run'] | train | https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/controller.py#L63-L67 |
5,160 | raymondEhlers/pachyderm | pachyderm/projectors.py | HistProjector.output_key_name | def output_key_name(self, input_key: str, output_hist: Hist, projection_name: str, **kwargs) -> str:
""" Returns the key under which the output object should be stored.
Note:
This function is just a basic placeholder which returns the projection name
and likely should be overridden.
Args:
input_key: Key of the input hist in the input dict
output_hist: The output histogram
projection_name: Projection name for the output histogram
kwargs: Projection information dict combined with additional arguments passed to
the projection function.
Returns:
Key under which the output object should be stored. By default, it returns the
projection name.
"""
return projection_name | python | def output_key_name(self, input_key: str, output_hist: Hist, projection_name: str, **kwargs) -> str:
""" Returns the key under which the output object should be stored.
Note:
This function is just a basic placeholder which returns the projection name
and likely should be overridden.
Args:
input_key: Key of the input hist in the input dict
output_hist: The output histogram
projection_name: Projection name for the output histogram
kwargs: Projection information dict combined with additional arguments passed to
the projection function.
Returns:
Key under which the output object should be stored. By default, it returns the
projection name.
"""
return projection_name | ['def', 'output_key_name', '(', 'self', ',', 'input_key', ':', 'str', ',', 'output_hist', ':', 'Hist', ',', 'projection_name', ':', 'str', ',', '*', '*', 'kwargs', ')', '->', 'str', ':', 'return', 'projection_name'] | Returns the key under which the output object should be stored.
Note:
This function is just a basic placeholder which returns the projection name
and likely should be overridden.
Args:
input_key: Key of the input hist in the input dict
output_hist: The output histogram
projection_name: Projection name for the output histogram
kwargs: Projection information dict combined with additional arguments passed to
the projection function.
Returns:
Key under which the output object should be stored. By default, it returns the
projection name. | ['Returns', 'the', 'key', 'under', 'which', 'the', 'output', 'object', 'should', 'be', 'stored', '.'] | train | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/projectors.py#L707-L724 |
5,161 | timster/peewee-validates | peewee_validates.py | Validator.validate | def validate(self, data=None, only=None, exclude=None):
"""
Validate the data for all fields and return whether the validation was successful.
This method also retains the validated data in ``self.data`` so that it can be accessed later.
This is usually the method you want to call after creating the validator instance.
:param data: Dictionary of data to validate.
:param only: List or tuple of fields to validate.
:param exclude: List or tuple of fields to exclude from validation.
:return: True if validation was successful. Otherwise False.
"""
only = only or []
exclude = exclude or []
data = data or {}
self.errors = {}
self.data = {}
# Validate individual fields.
for name, field in self._meta.fields.items():
if name in exclude or (only and name not in only):
continue
try:
field.validate(name, data)
except ValidationError as err:
self.add_error(name, err)
continue
self.data[name] = field.value
# Clean individual fields.
if not self.errors:
self.clean_fields(self.data)
# Then finally clean the whole data dict.
if not self.errors:
try:
self.data = self.clean(self.data)
except ValidationError as err:
self.add_error('__base__', err)
return (not self.errors) | python | def validate(self, data=None, only=None, exclude=None):
"""
Validate the data for all fields and return whether the validation was successful.
This method also retains the validated data in ``self.data`` so that it can be accessed later.
This is usually the method you want to call after creating the validator instance.
:param data: Dictionary of data to validate.
:param only: List or tuple of fields to validate.
:param exclude: List or tuple of fields to exclude from validation.
:return: True if validation was successful. Otherwise False.
"""
only = only or []
exclude = exclude or []
data = data or {}
self.errors = {}
self.data = {}
# Validate individual fields.
for name, field in self._meta.fields.items():
if name in exclude or (only and name not in only):
continue
try:
field.validate(name, data)
except ValidationError as err:
self.add_error(name, err)
continue
self.data[name] = field.value
# Clean individual fields.
if not self.errors:
self.clean_fields(self.data)
# Then finally clean the whole data dict.
if not self.errors:
try:
self.data = self.clean(self.data)
except ValidationError as err:
self.add_error('__base__', err)
return (not self.errors) | ['def', 'validate', '(', 'self', ',', 'data', '=', 'None', ',', 'only', '=', 'None', ',', 'exclude', '=', 'None', ')', ':', 'only', '=', 'only', 'or', '[', ']', 'exclude', '=', 'exclude', 'or', '[', ']', 'data', '=', 'data', 'or', '{', '}', 'self', '.', 'errors', '=', '{', '}', 'self', '.', 'data', '=', '{', '}', '# Validate individual fields.', 'for', 'name', ',', 'field', 'in', 'self', '.', '_meta', '.', 'fields', '.', 'items', '(', ')', ':', 'if', 'name', 'in', 'exclude', 'or', '(', 'only', 'and', 'name', 'not', 'in', 'only', ')', ':', 'continue', 'try', ':', 'field', '.', 'validate', '(', 'name', ',', 'data', ')', 'except', 'ValidationError', 'as', 'err', ':', 'self', '.', 'add_error', '(', 'name', ',', 'err', ')', 'continue', 'self', '.', 'data', '[', 'name', ']', '=', 'field', '.', 'value', '# Clean individual fields.', 'if', 'not', 'self', '.', 'errors', ':', 'self', '.', 'clean_fields', '(', 'self', '.', 'data', ')', '# Then finally clean the whole data dict.', 'if', 'not', 'self', '.', 'errors', ':', 'try', ':', 'self', '.', 'data', '=', 'self', '.', 'clean', '(', 'self', '.', 'data', ')', 'except', 'ValidationError', 'as', 'err', ':', 'self', '.', 'add_error', '(', "'__base__'", ',', 'err', ')', 'return', '(', 'not', 'self', '.', 'errors', ')'] | Validate the data for all fields and return whether the validation was successful.
This method also retains the validated data in ``self.data`` so that it can be accessed later.
This is usually the method you want to call after creating the validator instance.
:param data: Dictionary of data to validate.
:param only: List or tuple of fields to validate.
:param exclude: List or tuple of fields to exclude from validation.
:return: True if validation was successful. Otherwise False. | ['Validate', 'the', 'data', 'for', 'all', 'fields', 'and', 'return', 'whether', 'the', 'validation', 'was', 'successful', '.', 'This', 'method', 'also', 'retains', 'the', 'validated', 'data', 'in', 'self', '.', 'data', 'so', 'that', 'it', 'can', 'be', 'accessed', 'later', '.'] | train | https://github.com/timster/peewee-validates/blob/417f0fafb87fe9209439d65bc279d86a3d9e8028/peewee_validates.py#L755-L795 |
5,162 | assamite/creamas | creamas/mp.py | spawn_container | def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) | python | def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) | ['def', 'spawn_container', '(', 'addr', ',', 'env_cls', '=', 'Environment', ',', 'mgr_cls', '=', 'EnvManager', ',', 'set_seed', '=', 'True', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Try setting the process name to easily recognize the spawned', "# environments with 'ps -x' or 'top'", 'try', ':', 'import', 'setproctitle', 'as', 'spt', 'title', '=', "'creamas: {}({})'", '.', 'format', '(', 'env_cls', '.', '__class__', '.', '__name__', ',', '_get_base_url', '(', 'addr', ')', ')', 'spt', '.', 'setproctitle', '(', 'title', ')', 'except', ':', 'pass', 'if', 'set_seed', ':', '_set_random_seeds', '(', ')', "# kwargs['codec'] = aiomas.MsgPack", 'task', '=', 'start', '(', 'addr', ',', 'env_cls', ',', 'mgr_cls', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'loop', '=', 'asyncio', '.', 'new_event_loop', '(', ')', 'asyncio', '.', 'set_event_loop', '(', 'loop', ')', 'loop', '.', 'run_until_complete', '(', 'task', ')'] | Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``. | ['Spawn', 'a', 'new', 'environment', 'in', 'a', 'given', 'address', 'as', 'a', 'coroutine', '.'] | train | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L886-L915 |
5,163 | PSPC-SPAC-buyandsell/von_anchor | von_anchor/wallet/wallet.py | Wallet.create_link_secret | async def create_link_secret(self, label: str) -> None:
"""
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the
current link secret does not already correspond to the input link secret label.
Raise WalletState if wallet is closed, or any other IndyError causing failure
to set link secret in wallet.
:param label: label for link secret; indy-sdk uses label to generate link secret
"""
LOGGER.debug('Wallet.create_link_secret >>> label: %s', label)
if not self.handle:
LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
await anoncreds.prover_create_master_secret(self.handle, label)
await self._write_link_secret_label(label)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError:
LOGGER.warning(
'Wallet %s link secret already current: abstaining from updating label record', self.name)
await self._write_link_secret_label(label)
else:
LOGGER.debug(
'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('Wallet.create_link_secret <<<') | python | async def create_link_secret(self, label: str) -> None:
"""
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the
current link secret does not already correspond to the input link secret label.
Raise WalletState if wallet is closed, or any other IndyError causing failure
to set link secret in wallet.
:param label: label for link secret; indy-sdk uses label to generate link secret
"""
LOGGER.debug('Wallet.create_link_secret >>> label: %s', label)
if not self.handle:
LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
await anoncreds.prover_create_master_secret(self.handle, label)
await self._write_link_secret_label(label)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError:
LOGGER.warning(
'Wallet %s link secret already current: abstaining from updating label record', self.name)
await self._write_link_secret_label(label)
else:
LOGGER.debug(
'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('Wallet.create_link_secret <<<') | ['async', 'def', 'create_link_secret', '(', 'self', ',', 'label', ':', 'str', ')', '->', 'None', ':', 'LOGGER', '.', 'debug', '(', "'Wallet.create_link_secret >>> label: %s'", ',', 'label', ')', 'if', 'not', 'self', '.', 'handle', ':', 'LOGGER', '.', 'debug', '(', "'Wallet.create_link_secret <!< Wallet %s is closed'", ',', 'self', '.', 'name', ')', 'raise', 'WalletState', '(', "'Wallet {} is closed'", '.', 'format', '(', 'self', '.', 'name', ')', ')', 'try', ':', 'await', 'anoncreds', '.', 'prover_create_master_secret', '(', 'self', '.', 'handle', ',', 'label', ')', 'await', 'self', '.', '_write_link_secret_label', '(', 'label', ')', 'except', 'IndyError', 'as', 'x_indy', ':', 'if', 'x_indy', '.', 'error_code', '==', 'ErrorCode', '.', 'AnoncredsMasterSecretDuplicateNameError', ':', 'LOGGER', '.', 'warning', '(', "'Wallet %s link secret already current: abstaining from updating label record'", ',', 'self', '.', 'name', ')', 'await', 'self', '.', '_write_link_secret_label', '(', 'label', ')', 'else', ':', 'LOGGER', '.', 'debug', '(', "'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s'", ',', 'self', '.', 'name', ',', 'x_indy', '.', 'error_code', ')', 'raise', 'LOGGER', '.', 'debug', '(', "'Wallet.create_link_secret <<<'", ')'] | Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the
current link secret does not already correspond to the input link secret label.
Raise WalletState if wallet is closed, or any other IndyError causing failure
to set link secret in wallet.
:param label: label for link secret; indy-sdk uses label to generate link secret | ['Create', 'link', 'secret', '(', 'a', '.', 'k', '.', 'a', '.', 'master', 'secret', ')', 'used', 'in', 'proofs', 'by', 'HolderProver', 'if', 'the', 'current', 'link', 'secret', 'does', 'not', 'already', 'correspond', 'to', 'the', 'input', 'link', 'secret', 'label', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L492-L524 |
5,164 | gem/oq-engine | openquake/calculators/export/risk.py | export_agg_losses | def export_agg_losses(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
dskey = ekey[0]
oq = dstore['oqparam']
dt = oq.loss_dt()
name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items())
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
expvalue = dstore['exposed_value'].value # shape (T1, T2, ..., L)
tagcol = dstore['assetcol/tagcol']
tagnames = tuple(dstore['oqparam'].aggregate_by)
header = ('loss_type',) + tagnames + (
'loss_value', 'exposed_value', 'loss_ratio')
for r, tag in enumerate(tags):
rows = []
for multi_idx, loss in numpy.ndenumerate(value[:, r]):
l, *tagidxs = multi_idx
evalue = expvalue[tuple(tagidxs) + (l,)]
row = tagcol.get_tagvalues(tagnames, tagidxs) + (
loss, evalue, loss / evalue)
rows.append((dt.names[l],) + row)
dest = dstore.build_fname(name, tag, 'csv')
writer.save(rows, dest, header)
return writer.getsaved() | python | def export_agg_losses(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
dskey = ekey[0]
oq = dstore['oqparam']
dt = oq.loss_dt()
name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items())
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
expvalue = dstore['exposed_value'].value # shape (T1, T2, ..., L)
tagcol = dstore['assetcol/tagcol']
tagnames = tuple(dstore['oqparam'].aggregate_by)
header = ('loss_type',) + tagnames + (
'loss_value', 'exposed_value', 'loss_ratio')
for r, tag in enumerate(tags):
rows = []
for multi_idx, loss in numpy.ndenumerate(value[:, r]):
l, *tagidxs = multi_idx
evalue = expvalue[tuple(tagidxs) + (l,)]
row = tagcol.get_tagvalues(tagnames, tagidxs) + (
loss, evalue, loss / evalue)
rows.append((dt.names[l],) + row)
dest = dstore.build_fname(name, tag, 'csv')
writer.save(rows, dest, header)
return writer.getsaved() | ['def', 'export_agg_losses', '(', 'ekey', ',', 'dstore', ')', ':', 'dskey', '=', 'ekey', '[', '0', ']', 'oq', '=', 'dstore', '[', "'oqparam'", ']', 'dt', '=', 'oq', '.', 'loss_dt', '(', ')', 'name', ',', 'value', ',', 'tags', '=', '_get_data', '(', 'dstore', ',', 'dskey', ',', 'oq', '.', 'hazard_stats', '(', ')', '.', 'items', '(', ')', ')', 'writer', '=', 'writers', '.', 'CsvWriter', '(', 'fmt', '=', 'writers', '.', 'FIVEDIGITS', ')', 'expvalue', '=', 'dstore', '[', "'exposed_value'", ']', '.', 'value', '# shape (T1, T2, ..., L)', 'tagcol', '=', 'dstore', '[', "'assetcol/tagcol'", ']', 'tagnames', '=', 'tuple', '(', 'dstore', '[', "'oqparam'", ']', '.', 'aggregate_by', ')', 'header', '=', '(', "'loss_type'", ',', ')', '+', 'tagnames', '+', '(', "'loss_value'", ',', "'exposed_value'", ',', "'loss_ratio'", ')', 'for', 'r', ',', 'tag', 'in', 'enumerate', '(', 'tags', ')', ':', 'rows', '=', '[', ']', 'for', 'multi_idx', ',', 'loss', 'in', 'numpy', '.', 'ndenumerate', '(', 'value', '[', ':', ',', 'r', ']', ')', ':', 'l', ',', '', '*', 'tagidxs', '=', 'multi_idx', 'evalue', '=', 'expvalue', '[', 'tuple', '(', 'tagidxs', ')', '+', '(', 'l', ',', ')', ']', 'row', '=', 'tagcol', '.', 'get_tagvalues', '(', 'tagnames', ',', 'tagidxs', ')', '+', '(', 'loss', ',', 'evalue', ',', 'loss', '/', 'evalue', ')', 'rows', '.', 'append', '(', '(', 'dt', '.', 'names', '[', 'l', ']', ',', ')', '+', 'row', ')', 'dest', '=', 'dstore', '.', 'build_fname', '(', 'name', ',', 'tag', ',', "'csv'", ')', 'writer', '.', 'save', '(', 'rows', ',', 'dest', ',', 'header', ')', 'return', 'writer', '.', 'getsaved', '(', ')'] | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object | [':', 'param', 'ekey', ':', 'export', 'key', 'i', '.', 'e', '.', 'a', 'pair', '(', 'datastore', 'key', 'fmt', ')', ':', 'param', 'dstore', ':', 'datastore', 'object'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/risk.py#L153-L178 |
5,165 | elifesciences/proofreader-python | proofreader/runner.py | _run_command | def _run_command(command, targets, options):
# type: (str, List[str], List[str]) -> bool
"""Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool
"""
print('{0}: targets={1} options={2}'.format(command, targets, options))
cmd = [command] + targets + options
process = Popen(cmd)
process.wait()
return bool(process.returncode) | python | def _run_command(command, targets, options):
# type: (str, List[str], List[str]) -> bool
"""Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool
"""
print('{0}: targets={1} options={2}'.format(command, targets, options))
cmd = [command] + targets + options
process = Popen(cmd)
process.wait()
return bool(process.returncode) | ['def', '_run_command', '(', 'command', ',', 'targets', ',', 'options', ')', ':', '# type: (str, List[str], List[str]) -> bool', 'print', '(', "'{0}: targets={1} options={2}'", '.', 'format', '(', 'command', ',', 'targets', ',', 'options', ')', ')', 'cmd', '=', '[', 'command', ']', '+', 'targets', '+', 'options', 'process', '=', 'Popen', '(', 'cmd', ')', 'process', '.', 'wait', '(', ')', 'return', 'bool', '(', 'process', '.', 'returncode', ')'] | Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool | ['Runs', 'command', '+', 'targets', '+', 'options', 'in', 'a', 'subprocess', 'and', 'returns', 'a', 'boolean', 'determined', 'by', 'the', 'process', 'return', 'code', '.'] | train | https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/runner.py#L55-L75 |
5,166 | hydraplatform/hydra-base | hydra_base/lib/template.py | validate_attr | def validate_attr(resource_attr_id, scenario_id, template_id=None):
"""
Check that a resource attribute satisfies the requirements of all the types of the
resource.
"""
rs = db.DBSession.query(ResourceScenario).\
filter(ResourceScenario.resource_attr_id==resource_attr_id,
ResourceScenario.scenario_id==scenario_id).options(
joinedload_all("resourceattr")).options(
joinedload_all("dataset")
).one()
error = None
try:
_do_validate_resourcescenario(rs, template_id)
except HydraError as e:
error = JSONObject(dict(
ref_key = rs.resourceattr.ref_key,
ref_id = rs.resourceattr.get_resource_id(),
ref_name = rs.resourceattr.get_resource().get_name(),
resource_attr_id = rs.resource_attr_id,
attr_id = rs.resourceattr.attr.id,
attr_name = rs.resourceattr.attr.name,
dataset_id = rs.dataset_id,
scenario_id=scenario_id,
template_id=template_id,
error_text=e.args[0]))
return error | python | def validate_attr(resource_attr_id, scenario_id, template_id=None):
"""
Check that a resource attribute satisfies the requirements of all the types of the
resource.
"""
rs = db.DBSession.query(ResourceScenario).\
filter(ResourceScenario.resource_attr_id==resource_attr_id,
ResourceScenario.scenario_id==scenario_id).options(
joinedload_all("resourceattr")).options(
joinedload_all("dataset")
).one()
error = None
try:
_do_validate_resourcescenario(rs, template_id)
except HydraError as e:
error = JSONObject(dict(
ref_key = rs.resourceattr.ref_key,
ref_id = rs.resourceattr.get_resource_id(),
ref_name = rs.resourceattr.get_resource().get_name(),
resource_attr_id = rs.resource_attr_id,
attr_id = rs.resourceattr.attr.id,
attr_name = rs.resourceattr.attr.name,
dataset_id = rs.dataset_id,
scenario_id=scenario_id,
template_id=template_id,
error_text=e.args[0]))
return error | ['def', 'validate_attr', '(', 'resource_attr_id', ',', 'scenario_id', ',', 'template_id', '=', 'None', ')', ':', 'rs', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'ResourceScenario', ')', '.', 'filter', '(', 'ResourceScenario', '.', 'resource_attr_id', '==', 'resource_attr_id', ',', 'ResourceScenario', '.', 'scenario_id', '==', 'scenario_id', ')', '.', 'options', '(', 'joinedload_all', '(', '"resourceattr"', ')', ')', '.', 'options', '(', 'joinedload_all', '(', '"dataset"', ')', ')', '.', 'one', '(', ')', 'error', '=', 'None', 'try', ':', '_do_validate_resourcescenario', '(', 'rs', ',', 'template_id', ')', 'except', 'HydraError', 'as', 'e', ':', 'error', '=', 'JSONObject', '(', 'dict', '(', 'ref_key', '=', 'rs', '.', 'resourceattr', '.', 'ref_key', ',', 'ref_id', '=', 'rs', '.', 'resourceattr', '.', 'get_resource_id', '(', ')', ',', 'ref_name', '=', 'rs', '.', 'resourceattr', '.', 'get_resource', '(', ')', '.', 'get_name', '(', ')', ',', 'resource_attr_id', '=', 'rs', '.', 'resource_attr_id', ',', 'attr_id', '=', 'rs', '.', 'resourceattr', '.', 'attr', '.', 'id', ',', 'attr_name', '=', 'rs', '.', 'resourceattr', '.', 'attr', '.', 'name', ',', 'dataset_id', '=', 'rs', '.', 'dataset_id', ',', 'scenario_id', '=', 'scenario_id', ',', 'template_id', '=', 'template_id', ',', 'error_text', '=', 'e', '.', 'args', '[', '0', ']', ')', ')', 'return', 'error'] | Check that a resource attribute satisfies the requirements of all the types of the
resource. | ['Check', 'that', 'a', 'resource', 'attribute', 'satisfies', 'the', 'requirements', 'of', 'all', 'the', 'types', 'of', 'the', 'resource', '.'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L1724-L1753 |
5,167 | pricingassistant/mrq | mrq/processes.py | ProcessPool.stop_watch | def stop_watch(self):
""" Stops the periodic watch greenlet, thus the pool itself """
if self.greenlet_watch:
self.greenlet_watch.kill(block=False)
self.greenlet_watch = None | python | def stop_watch(self):
""" Stops the periodic watch greenlet, thus the pool itself """
if self.greenlet_watch:
self.greenlet_watch.kill(block=False)
self.greenlet_watch = None | ['def', 'stop_watch', '(', 'self', ')', ':', 'if', 'self', '.', 'greenlet_watch', ':', 'self', '.', 'greenlet_watch', '.', 'kill', '(', 'block', '=', 'False', ')', 'self', '.', 'greenlet_watch', '=', 'None'] | Stops the periodic watch greenlet, thus the pool itself | ['Stops', 'the', 'periodic', 'watch', 'greenlet', 'thus', 'the', 'pool', 'itself'] | train | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L204-L209 |
5,168 | bcbio/bcbio-nextgen | bcbio/utils.py | partition_all | def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk | python | def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk | ['def', 'partition_all', '(', 'n', ',', 'iterable', ')', ':', 'it', '=', 'iter', '(', 'iterable', ')', 'while', 'True', ':', 'chunk', '=', 'list', '(', 'itertools', '.', 'islice', '(', 'it', ',', 'n', ')', ')', 'if', 'not', 'chunk', ':', 'break', 'yield', 'chunk'] | Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all | ['Partition', 'a', 'list', 'into', 'equally', 'sized', 'pieces', 'including', 'last', 'smaller', 'parts', 'http', ':', '//', 'stackoverflow', '.', 'com', '/', 'questions', '/', '5129102', '/', 'python', '-', 'equivalent', '-', 'to', '-', 'clojures', '-', 'partition', '-', 'all'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L433-L442 |
5,169 | SheffieldML/GPyOpt | GPyOpt/models/gpmodel.py | GPModel.updateModel | def updateModel(self, X_all, Y_all, X_new, Y_new):
"""
Updates the model with new observations.
"""
if self.model is None:
self._create_model(X_all, Y_all)
else:
self.model.set_XY(X_all, Y_all)
# WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...
if self.max_iters > 0:
# --- update the model maximizing the marginal likelihood.
if self.optimize_restarts==1:
self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)
else:
self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose) | python | def updateModel(self, X_all, Y_all, X_new, Y_new):
"""
Updates the model with new observations.
"""
if self.model is None:
self._create_model(X_all, Y_all)
else:
self.model.set_XY(X_all, Y_all)
# WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...
if self.max_iters > 0:
# --- update the model maximizing the marginal likelihood.
if self.optimize_restarts==1:
self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)
else:
self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose) | ['def', 'updateModel', '(', 'self', ',', 'X_all', ',', 'Y_all', ',', 'X_new', ',', 'Y_new', ')', ':', 'if', 'self', '.', 'model', 'is', 'None', ':', 'self', '.', '_create_model', '(', 'X_all', ',', 'Y_all', ')', 'else', ':', 'self', '.', 'model', '.', 'set_XY', '(', 'X_all', ',', 'Y_all', ')', '# WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...', 'if', 'self', '.', 'max_iters', '>', '0', ':', '# --- update the model maximizing the marginal likelihood.', 'if', 'self', '.', 'optimize_restarts', '==', '1', ':', 'self', '.', 'model', '.', 'optimize', '(', 'optimizer', '=', 'self', '.', 'optimizer', ',', 'max_iters', '=', 'self', '.', 'max_iters', ',', 'messages', '=', 'False', ',', 'ipython_notebook', '=', 'False', ')', 'else', ':', 'self', '.', 'model', '.', 'optimize_restarts', '(', 'num_restarts', '=', 'self', '.', 'optimize_restarts', ',', 'optimizer', '=', 'self', '.', 'optimizer', ',', 'max_iters', '=', 'self', '.', 'max_iters', ',', 'verbose', '=', 'self', '.', 'verbose', ')'] | Updates the model with new observations. | ['Updates', 'the', 'model', 'with', 'new', 'observations', '.'] | train | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/models/gpmodel.py#L76-L91 |
5,170 | timothydmorton/isochrones | isochrones/starmodel_old.py | TripleStarModel.triangle | def triangle(self, params=None, **kwargs):
"""
Makes a nifty corner plot.
"""
if params is None:
params = ['mass_A', 'mass_B', 'mass_C',
'age', 'feh', 'distance', 'AV']
super(TripleStarModel, self).triangle(params=params, **kwargs) | python | def triangle(self, params=None, **kwargs):
"""
Makes a nifty corner plot.
"""
if params is None:
params = ['mass_A', 'mass_B', 'mass_C',
'age', 'feh', 'distance', 'AV']
super(TripleStarModel, self).triangle(params=params, **kwargs) | ['def', 'triangle', '(', 'self', ',', 'params', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'params', 'is', 'None', ':', 'params', '=', '[', "'mass_A'", ',', "'mass_B'", ',', "'mass_C'", ',', "'age'", ',', "'feh'", ',', "'distance'", ',', "'AV'", ']', 'super', '(', 'TripleStarModel', ',', 'self', ')', '.', 'triangle', '(', 'params', '=', 'params', ',', '*', '*', 'kwargs', ')'] | Makes a nifty corner plot. | ['Makes', 'a', 'nifty', 'corner', 'plot', '.'] | train | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L1818-L1827 |
5,171 | mmp2/megaman | megaman/utils/spectral_clustering.py | spectral_clustering | def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None,
renormalize = True, stabalize = True, additional_vectors = 0):
"""
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
"""
# Step 1: get similarity matrix
if geom.affinity_matrix is None:
S = geom.compute_affinity_matrix()
else:
S = geom.affinity_matrix
# Check for stability method, symmetric solvers require this
if eigen_solver in ['lobpcg', 'amg']:
stabalize = True
if stabalize:
geom.laplacian_type = 'symmetricnormalized'
return_lapsym = True
else:
geom.laplacian_type = 'randomwalk'
return_lapsym = False
# Step 2: get the Laplacian matrix
P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym)
# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)
P += identity(P.shape[0])
# Step 3: Compute the top K eigenvectors and drop the first
if eigen_solver in ['auto', 'amg', 'lobpcg']:
n_components = 2*int(np.log(P.shape[0]))*K + 1
n_components += int(additional_vectors)
else:
n_components = K
n_components = min(n_components, P.shape[0])
(lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver,
random_state=random_state, drop_first = True,
solver_kwds=solver_kwds)
# the first vector is usually uninformative
if eigen_solver in ['auto', 'lobpcg', 'amg']:
if np.abs(lambdas[0] - 1) > 1e-4:
warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter")
eigen_vectors = eigen_vectors[:, 1:K]
lambdas = lambdas[1:K]
# If stability method chosen, adjust eigenvectors
if stabalize:
w = np.array(geom.laplacian_weights)
eigen_vectors /= np.sqrt(w[:,np.newaxis])
eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0)
# If renormalize: set each data point to unit length
if renormalize:
norms = np.linalg.norm(eigen_vectors, axis=1)
eigen_vectors /= norms[:,np.newaxis]
# Step 4: run k-means clustering
labels = k_means_clustering(eigen_vectors,K)
return labels, eigen_vectors, P | python | def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None,
renormalize = True, stabalize = True, additional_vectors = 0):
"""
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
"""
# Step 1: get similarity matrix
if geom.affinity_matrix is None:
S = geom.compute_affinity_matrix()
else:
S = geom.affinity_matrix
# Check for stability method, symmetric solvers require this
if eigen_solver in ['lobpcg', 'amg']:
stabalize = True
if stabalize:
geom.laplacian_type = 'symmetricnormalized'
return_lapsym = True
else:
geom.laplacian_type = 'randomwalk'
return_lapsym = False
# Step 2: get the Laplacian matrix
P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym)
# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)
P += identity(P.shape[0])
# Step 3: Compute the top K eigenvectors and drop the first
if eigen_solver in ['auto', 'amg', 'lobpcg']:
n_components = 2*int(np.log(P.shape[0]))*K + 1
n_components += int(additional_vectors)
else:
n_components = K
n_components = min(n_components, P.shape[0])
(lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver,
random_state=random_state, drop_first = True,
solver_kwds=solver_kwds)
# the first vector is usually uninformative
if eigen_solver in ['auto', 'lobpcg', 'amg']:
if np.abs(lambdas[0] - 1) > 1e-4:
warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter")
eigen_vectors = eigen_vectors[:, 1:K]
lambdas = lambdas[1:K]
# If stability method chosen, adjust eigenvectors
if stabalize:
w = np.array(geom.laplacian_weights)
eigen_vectors /= np.sqrt(w[:,np.newaxis])
eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0)
# If renormalize: set each data point to unit length
if renormalize:
norms = np.linalg.norm(eigen_vectors, axis=1)
eigen_vectors /= norms[:,np.newaxis]
# Step 4: run k-means clustering
labels = k_means_clustering(eigen_vectors,K)
return labels, eigen_vectors, P | ['def', 'spectral_clustering', '(', 'geom', ',', 'K', ',', 'eigen_solver', '=', "'dense'", ',', 'random_state', '=', 'None', ',', 'solver_kwds', '=', 'None', ',', 'renormalize', '=', 'True', ',', 'stabalize', '=', 'True', ',', 'additional_vectors', '=', '0', ')', ':', '# Step 1: get similarity matrix', 'if', 'geom', '.', 'affinity_matrix', 'is', 'None', ':', 'S', '=', 'geom', '.', 'compute_affinity_matrix', '(', ')', 'else', ':', 'S', '=', 'geom', '.', 'affinity_matrix', '# Check for stability method, symmetric solvers require this', 'if', 'eigen_solver', 'in', '[', "'lobpcg'", ',', "'amg'", ']', ':', 'stabalize', '=', 'True', 'if', 'stabalize', ':', 'geom', '.', 'laplacian_type', '=', "'symmetricnormalized'", 'return_lapsym', '=', 'True', 'else', ':', 'geom', '.', 'laplacian_type', '=', "'randomwalk'", 'return_lapsym', '=', 'False', '# Step 2: get the Laplacian matrix', 'P', '=', 'geom', '.', 'compute_laplacian_matrix', '(', 'return_lapsym', '=', 'return_lapsym', ')', '# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)', 'P', '+=', 'identity', '(', 'P', '.', 'shape', '[', '0', ']', ')', '# Step 3: Compute the top K eigenvectors and drop the first ', 'if', 'eigen_solver', 'in', '[', "'auto'", ',', "'amg'", ',', "'lobpcg'", ']', ':', 'n_components', '=', '2', '*', 'int', '(', 'np', '.', 'log', '(', 'P', '.', 'shape', '[', '0', ']', ')', ')', '*', 'K', '+', '1', 'n_components', '+=', 'int', '(', 'additional_vectors', ')', 'else', ':', 'n_components', '=', 'K', 'n_components', '=', 'min', '(', 'n_components', ',', 'P', '.', 'shape', '[', '0', ']', ')', '(', 'lambdas', ',', 'eigen_vectors', ')', '=', 'eigen_decomposition', '(', 'P', ',', 'n_components', '=', 'n_components', ',', 'eigen_solver', '=', 'eigen_solver', ',', 'random_state', '=', 'random_state', ',', 'drop_first', '=', 'True', ',', 'solver_kwds', '=', 'solver_kwds', ')', '# the first vector is usually uninformative ', 'if', 'eigen_solver', 'in', '[', "'auto'", ',', "'lobpcg'", ',', "'amg'", ']', ':', 'if', 'np', '.', 'abs', '(', 'lambdas', '[', '0', ']', '-', '1', ')', '>', '1e-4', ':', 'warnings', '.', 'warn', '(', '"largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter"', ')', 'eigen_vectors', '=', 'eigen_vectors', '[', ':', ',', '1', ':', 'K', ']', 'lambdas', '=', 'lambdas', '[', '1', ':', 'K', ']', '# If stability method chosen, adjust eigenvectors', 'if', 'stabalize', ':', 'w', '=', 'np', '.', 'array', '(', 'geom', '.', 'laplacian_weights', ')', 'eigen_vectors', '/=', 'np', '.', 'sqrt', '(', 'w', '[', ':', ',', 'np', '.', 'newaxis', ']', ')', 'eigen_vectors', '/=', 'np', '.', 'linalg', '.', 'norm', '(', 'eigen_vectors', ',', 'axis', '=', '0', ')', '# If renormalize: set each data point to unit length', 'if', 'renormalize', ':', 'norms', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'eigen_vectors', ',', 'axis', '=', '1', ')', 'eigen_vectors', '/=', 'norms', '[', ':', ',', 'np', '.', 'newaxis', ']', '# Step 4: run k-means clustering', 'labels', '=', 'k_means_clustering', '(', 'eigen_vectors', ',', 'K', ')', 'return', 'labels', ',', 'eigen_vectors', ',', 'P'] | Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples) | ['Spectral', 'clustering', 'for', 'find', 'K', 'clusters', 'by', 'using', 'the', 'eigenvectors', 'of', 'a', 'matrix', 'which', 'is', 'derived', 'from', 'a', 'set', 'of', 'similarities', 'S', '.'] | train | https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/spectral_clustering.py#L94-L193 |
5,172 | ronaldguillen/wave | wave/utils/html.py | parse_html_list | def parse_html_list(dictionary, prefix=''):
"""
Used to suport list values in HTML forms.
Supports lists of primitives and/or dictionaries.
* List of primitives.
{
'[0]': 'abc',
'[1]': 'def',
'[2]': 'hij'
}
-->
[
'abc',
'def',
'hij'
]
* List of dictionaries.
{
'[0]foo': 'abc',
'[0]bar': 'def',
'[1]foo': 'hij',
'[1]bar': 'klm',
}
-->
[
{'foo': 'abc', 'bar': 'def'},
{'foo': 'hij', 'bar': 'klm'}
]
"""
ret = {}
regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix))
for field, value in dictionary.items():
match = regex.match(field)
if not match:
continue
index, key = match.groups()
index = int(index)
if not key:
ret[index] = value
elif isinstance(ret.get(index), dict):
ret[index][key] = value
else:
ret[index] = MultiValueDict({key: [value]})
return [ret[item] for item in sorted(ret.keys())] | python | def parse_html_list(dictionary, prefix=''):
"""
Used to suport list values in HTML forms.
Supports lists of primitives and/or dictionaries.
* List of primitives.
{
'[0]': 'abc',
'[1]': 'def',
'[2]': 'hij'
}
-->
[
'abc',
'def',
'hij'
]
* List of dictionaries.
{
'[0]foo': 'abc',
'[0]bar': 'def',
'[1]foo': 'hij',
'[1]bar': 'klm',
}
-->
[
{'foo': 'abc', 'bar': 'def'},
{'foo': 'hij', 'bar': 'klm'}
]
"""
ret = {}
regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix))
for field, value in dictionary.items():
match = regex.match(field)
if not match:
continue
index, key = match.groups()
index = int(index)
if not key:
ret[index] = value
elif isinstance(ret.get(index), dict):
ret[index][key] = value
else:
ret[index] = MultiValueDict({key: [value]})
return [ret[item] for item in sorted(ret.keys())] | ['def', 'parse_html_list', '(', 'dictionary', ',', 'prefix', '=', "''", ')', ':', 'ret', '=', '{', '}', 'regex', '=', 're', '.', 'compile', '(', "r'^%s\\[([0-9]+)\\](.*)$'", '%', 're', '.', 'escape', '(', 'prefix', ')', ')', 'for', 'field', ',', 'value', 'in', 'dictionary', '.', 'items', '(', ')', ':', 'match', '=', 'regex', '.', 'match', '(', 'field', ')', 'if', 'not', 'match', ':', 'continue', 'index', ',', 'key', '=', 'match', '.', 'groups', '(', ')', 'index', '=', 'int', '(', 'index', ')', 'if', 'not', 'key', ':', 'ret', '[', 'index', ']', '=', 'value', 'elif', 'isinstance', '(', 'ret', '.', 'get', '(', 'index', ')', ',', 'dict', ')', ':', 'ret', '[', 'index', ']', '[', 'key', ']', '=', 'value', 'else', ':', 'ret', '[', 'index', ']', '=', 'MultiValueDict', '(', '{', 'key', ':', '[', 'value', ']', '}', ')', 'return', '[', 'ret', '[', 'item', ']', 'for', 'item', 'in', 'sorted', '(', 'ret', '.', 'keys', '(', ')', ')', ']'] | Used to suport list values in HTML forms.
Supports lists of primitives and/or dictionaries.
* List of primitives.
{
'[0]': 'abc',
'[1]': 'def',
'[2]': 'hij'
}
-->
[
'abc',
'def',
'hij'
]
* List of dictionaries.
{
'[0]foo': 'abc',
'[0]bar': 'def',
'[1]foo': 'hij',
'[1]bar': 'klm',
}
-->
[
{'foo': 'abc', 'bar': 'def'},
{'foo': 'hij', 'bar': 'klm'}
] | ['Used', 'to', 'suport', 'list', 'values', 'in', 'HTML', 'forms', '.', 'Supports', 'lists', 'of', 'primitives', 'and', '/', 'or', 'dictionaries', '.'] | train | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/html.py#L15-L62 |
5,173 | ornlneutronimaging/ImagingReso | ImagingReso/_utilities.py | set_distance_units | def set_distance_units(value=np.NaN, from_units='mm', to_units='cm'):
"""convert distance into new units
Parameters:
===========
value: float. value to convert
from_units: string. Must be 'mm', 'cm' or 'm'
to_units: string. must be 'mm','cm' or 'm'
Returns:
========
converted value
Raises:
=======
ValueError if from_units is not a valid unit (see above)
ValueError if to_units is not a valid unit
"""
if from_units == to_units:
return value
if from_units == 'cm':
if to_units == 'mm':
coeff = 10
elif to_units == 'm':
coeff = 0.01
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
elif from_units == 'mm':
if to_units == 'cm':
coeff = 0.1
elif to_units == 'm':
coeff = 0.001
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
elif from_units == 'm':
if to_units == 'mm':
coeff = 1000
elif to_units == 'cm':
coeff = 100
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
return coeff * value | python | def set_distance_units(value=np.NaN, from_units='mm', to_units='cm'):
"""convert distance into new units
Parameters:
===========
value: float. value to convert
from_units: string. Must be 'mm', 'cm' or 'm'
to_units: string. must be 'mm','cm' or 'm'
Returns:
========
converted value
Raises:
=======
ValueError if from_units is not a valid unit (see above)
ValueError if to_units is not a valid unit
"""
if from_units == to_units:
return value
if from_units == 'cm':
if to_units == 'mm':
coeff = 10
elif to_units == 'm':
coeff = 0.01
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
elif from_units == 'mm':
if to_units == 'cm':
coeff = 0.1
elif to_units == 'm':
coeff = 0.001
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
elif from_units == 'm':
if to_units == 'mm':
coeff = 1000
elif to_units == 'cm':
coeff = 100
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
else:
raise ValueError("to_units not supported ['cm','m','mm']!")
return coeff * value | ['def', 'set_distance_units', '(', 'value', '=', 'np', '.', 'NaN', ',', 'from_units', '=', "'mm'", ',', 'to_units', '=', "'cm'", ')', ':', 'if', 'from_units', '==', 'to_units', ':', 'return', 'value', 'if', 'from_units', '==', "'cm'", ':', 'if', 'to_units', '==', "'mm'", ':', 'coeff', '=', '10', 'elif', 'to_units', '==', "'m'", ':', 'coeff', '=', '0.01', 'else', ':', 'raise', 'ValueError', '(', '"to_units not supported [\'cm\',\'m\',\'mm\']!"', ')', 'elif', 'from_units', '==', "'mm'", ':', 'if', 'to_units', '==', "'cm'", ':', 'coeff', '=', '0.1', 'elif', 'to_units', '==', "'m'", ':', 'coeff', '=', '0.001', 'else', ':', 'raise', 'ValueError', '(', '"to_units not supported [\'cm\',\'m\',\'mm\']!"', ')', 'elif', 'from_units', '==', "'m'", ':', 'if', 'to_units', '==', "'mm'", ':', 'coeff', '=', '1000', 'elif', 'to_units', '==', "'cm'", ':', 'coeff', '=', '100', 'else', ':', 'raise', 'ValueError', '(', '"to_units not supported [\'cm\',\'m\',\'mm\']!"', ')', 'else', ':', 'raise', 'ValueError', '(', '"to_units not supported [\'cm\',\'m\',\'mm\']!"', ')', 'return', 'coeff', '*', 'value'] | convert distance into new units
Parameters:
===========
value: float. value to convert
from_units: string. Must be 'mm', 'cm' or 'm'
to_units: string. must be 'mm','cm' or 'm'
Returns:
========
converted value
Raises:
=======
ValueError if from_units is not a valid unit (see above)
ValueError if to_units is not a valid unit | ['convert', 'distance', 'into', 'new', 'units', 'Parameters', ':', '===========', 'value', ':', 'float', '.', 'value', 'to', 'convert', 'from_units', ':', 'string', '.', 'Must', 'be', 'mm', 'cm', 'or', 'm', 'to_units', ':', 'string', '.', 'must', 'be', 'mm', 'cm', 'or', 'm', 'Returns', ':', '========', 'converted', 'value', 'Raises', ':', '=======', 'ValueError', 'if', 'from_units', 'is', 'not', 'a', 'valid', 'unit', '(', 'see', 'above', ')', 'ValueError', 'if', 'to_units', 'is', 'not', 'a', 'valid', 'unit'] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L623-L668 |
5,174 | bykof/billomapy | billomapy/billomapy.py | Billomapy.get_email_receivers_of_recurring_per_page | def get_email_receivers_of_recurring_per_page(self, recurring_id, per_page=1000, page=1):
"""
Get email receivers of recurring per page
:param recurring_id: the recurring id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=RECURRING_EMAIL_RECEIVERS,
per_page=per_page,
page=page,
params={'recurring_id': recurring_id},
) | python | def get_email_receivers_of_recurring_per_page(self, recurring_id, per_page=1000, page=1):
"""
Get email receivers of recurring per page
:param recurring_id: the recurring id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=RECURRING_EMAIL_RECEIVERS,
per_page=per_page,
page=page,
params={'recurring_id': recurring_id},
) | ['def', 'get_email_receivers_of_recurring_per_page', '(', 'self', ',', 'recurring_id', ',', 'per_page', '=', '1000', ',', 'page', '=', '1', ')', ':', 'return', 'self', '.', '_get_resource_per_page', '(', 'resource', '=', 'RECURRING_EMAIL_RECEIVERS', ',', 'per_page', '=', 'per_page', ',', 'page', '=', 'page', ',', 'params', '=', '{', "'recurring_id'", ':', 'recurring_id', '}', ',', ')'] | Get email receivers of recurring per page
:param recurring_id: the recurring id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list | ['Get', 'email', 'receivers', 'of', 'recurring', 'per', 'page'] | train | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L1606-L1620 |
5,175 | rauenzi/discordbot.py | discordbot/cogs/reactions.py | Reactions.viewreaction | async def viewreaction(self, ctx, *, reactor : str):
"""Views a specific reaction"""
data = self.config.get(ctx.message.server.id, {})
keyword = data.get(reactor, {})
if not keyword:
await self.bot.responses.failure(message="Reaction '{}' was not found.".format(reactor))
return
response = data.get(reactor, {}).get("response", "")
reacts = data.get(reactor, {}).get("reaction", [])
for i, r in enumerate(reacts):
if ":" in r:
reacts[i] = "<:" + r + ">"
reacts = " ".join(reacts) if reacts else "-"
response = response if response else "-"
string = "Here's what I say to '{reactor}': {response}\n"\
"I'll react to this message how I react to '{reactor}'.".format(reactor=reactor,response=response)
await self.bot.responses.full(sections=[{"name": "Response", "value": response},
{"name": "Reactions", "value": reacts, "inline": False}]) | python | async def viewreaction(self, ctx, *, reactor : str):
"""Views a specific reaction"""
data = self.config.get(ctx.message.server.id, {})
keyword = data.get(reactor, {})
if not keyword:
await self.bot.responses.failure(message="Reaction '{}' was not found.".format(reactor))
return
response = data.get(reactor, {}).get("response", "")
reacts = data.get(reactor, {}).get("reaction", [])
for i, r in enumerate(reacts):
if ":" in r:
reacts[i] = "<:" + r + ">"
reacts = " ".join(reacts) if reacts else "-"
response = response if response else "-"
string = "Here's what I say to '{reactor}': {response}\n"\
"I'll react to this message how I react to '{reactor}'.".format(reactor=reactor,response=response)
await self.bot.responses.full(sections=[{"name": "Response", "value": response},
{"name": "Reactions", "value": reacts, "inline": False}]) | ['async', 'def', 'viewreaction', '(', 'self', ',', 'ctx', ',', '*', ',', 'reactor', ':', 'str', ')', ':', 'data', '=', 'self', '.', 'config', '.', 'get', '(', 'ctx', '.', 'message', '.', 'server', '.', 'id', ',', '{', '}', ')', 'keyword', '=', 'data', '.', 'get', '(', 'reactor', ',', '{', '}', ')', 'if', 'not', 'keyword', ':', 'await', 'self', '.', 'bot', '.', 'responses', '.', 'failure', '(', 'message', '=', '"Reaction \'{}\' was not found."', '.', 'format', '(', 'reactor', ')', ')', 'return', 'response', '=', 'data', '.', 'get', '(', 'reactor', ',', '{', '}', ')', '.', 'get', '(', '"response"', ',', '""', ')', 'reacts', '=', 'data', '.', 'get', '(', 'reactor', ',', '{', '}', ')', '.', 'get', '(', '"reaction"', ',', '[', ']', ')', 'for', 'i', ',', 'r', 'in', 'enumerate', '(', 'reacts', ')', ':', 'if', '":"', 'in', 'r', ':', 'reacts', '[', 'i', ']', '=', '"<:"', '+', 'r', '+', '">"', 'reacts', '=', '" "', '.', 'join', '(', 'reacts', ')', 'if', 'reacts', 'else', '"-"', 'response', '=', 'response', 'if', 'response', 'else', '"-"', 'string', '=', '"Here\'s what I say to \'{reactor}\': {response}\\n"', '"I\'ll react to this message how I react to \'{reactor}\'."', '.', 'format', '(', 'reactor', '=', 'reactor', ',', 'response', '=', 'response', ')', 'await', 'self', '.', 'bot', '.', 'responses', '.', 'full', '(', 'sections', '=', '[', '{', '"name"', ':', '"Response"', ',', '"value"', ':', 'response', '}', ',', '{', '"name"', ':', '"Reactions"', ',', '"value"', ':', 'reacts', ',', '"inline"', ':', 'False', '}', ']', ')'] | Views a specific reaction | ['Views', 'a', 'specific', 'reaction'] | train | https://github.com/rauenzi/discordbot.py/blob/39bb98dae4e49487e6c6c597f85fc41c74b62bb8/discordbot/cogs/reactions.py#L112-L135 |
5,176 | monarch-initiative/dipper | dipper/sources/FlyBase.py | FlyBase._process_disease_models | def _process_disease_models(self, limit):
"""
Here we make associations between a disease and the supplied "model".
In this case it's an allele.
FIXME consider changing this... are alleles really models?
Perhaps map these alleles into actual animals/strains or genotypes?
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, self.files['disease_models']['file']))
LOG.info("processing disease models")
line_counter = 0
geno = Genotype(graph)
fly_taxon = self.globaltt["Drosophila melanogaster"]
with gzip.open(raw, 'rb') as f:
filereader = csv.reader(
io.TextIOWrapper(f, newline=""),
delimiter='\t', quotechar='\"')
for line in filereader:
# skip comments
if re.match(r'#', ''.join(line)) or ''.join(line) == '':
continue
(allele_id, allele_symbol, qualifier, doid_label, doid_id,
evidence_or_interacting_allele, pub_id) = line
line_counter += 1
if self.test_mode and self.test_ids['disease'] is not None \
and doid_id not in self.test_ids['disease']:
continue
rel = None
allele_id = 'FlyBase:' + allele_id
if qualifier == 'model of':
rel = self.globaltt['is model of']
else:
# TODO amelorates, exacerbates, and DOES NOT *
continue
animal_id = geno.make_experimental_model_with_genotype(
allele_id, allele_symbol, fly_taxon, 'fly')
assoc = G2PAssoc(graph, self.name, animal_id, doid_id, rel)
if pub_id != '':
pub_id = 'FlyBase:'+pub_id
assoc.add_source(pub_id)
if evidence_or_interacting_allele == 'inferred from mutant phenotype':
evidence_id = self.globaltt['mutant phenotype evidence']
assoc.add_evidence(evidence_id)
else:
assoc.set_description(evidence_or_interacting_allele)
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return | python | def _process_disease_models(self, limit):
"""
Here we make associations between a disease and the supplied "model".
In this case it's an allele.
FIXME consider changing this... are alleles really models?
Perhaps map these alleles into actual animals/strains or genotypes?
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, self.files['disease_models']['file']))
LOG.info("processing disease models")
line_counter = 0
geno = Genotype(graph)
fly_taxon = self.globaltt["Drosophila melanogaster"]
with gzip.open(raw, 'rb') as f:
filereader = csv.reader(
io.TextIOWrapper(f, newline=""),
delimiter='\t', quotechar='\"')
for line in filereader:
# skip comments
if re.match(r'#', ''.join(line)) or ''.join(line) == '':
continue
(allele_id, allele_symbol, qualifier, doid_label, doid_id,
evidence_or_interacting_allele, pub_id) = line
line_counter += 1
if self.test_mode and self.test_ids['disease'] is not None \
and doid_id not in self.test_ids['disease']:
continue
rel = None
allele_id = 'FlyBase:' + allele_id
if qualifier == 'model of':
rel = self.globaltt['is model of']
else:
# TODO amelorates, exacerbates, and DOES NOT *
continue
animal_id = geno.make_experimental_model_with_genotype(
allele_id, allele_symbol, fly_taxon, 'fly')
assoc = G2PAssoc(graph, self.name, animal_id, doid_id, rel)
if pub_id != '':
pub_id = 'FlyBase:'+pub_id
assoc.add_source(pub_id)
if evidence_or_interacting_allele == 'inferred from mutant phenotype':
evidence_id = self.globaltt['mutant phenotype evidence']
assoc.add_evidence(evidence_id)
else:
assoc.set_description(evidence_or_interacting_allele)
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return | ['def', '_process_disease_models', '(', 'self', ',', 'limit', ')', ':', 'if', 'self', '.', 'test_mode', ':', 'graph', '=', 'self', '.', 'testgraph', 'else', ':', 'graph', '=', 'self', '.', 'graph', 'raw', '=', "'/'", '.', 'join', '(', '(', 'self', '.', 'rawdir', ',', 'self', '.', 'files', '[', "'disease_models'", ']', '[', "'file'", ']', ')', ')', 'LOG', '.', 'info', '(', '"processing disease models"', ')', 'line_counter', '=', '0', 'geno', '=', 'Genotype', '(', 'graph', ')', 'fly_taxon', '=', 'self', '.', 'globaltt', '[', '"Drosophila melanogaster"', ']', 'with', 'gzip', '.', 'open', '(', 'raw', ',', "'rb'", ')', 'as', 'f', ':', 'filereader', '=', 'csv', '.', 'reader', '(', 'io', '.', 'TextIOWrapper', '(', 'f', ',', 'newline', '=', '""', ')', ',', 'delimiter', '=', "'\\t'", ',', 'quotechar', '=', '\'\\"\'', ')', 'for', 'line', 'in', 'filereader', ':', '# skip comments', 'if', 're', '.', 'match', '(', "r'#'", ',', "''", '.', 'join', '(', 'line', ')', ')', 'or', "''", '.', 'join', '(', 'line', ')', '==', "''", ':', 'continue', '(', 'allele_id', ',', 'allele_symbol', ',', 'qualifier', ',', 'doid_label', ',', 'doid_id', ',', 'evidence_or_interacting_allele', ',', 'pub_id', ')', '=', 'line', 'line_counter', '+=', '1', 'if', 'self', '.', 'test_mode', 'and', 'self', '.', 'test_ids', '[', "'disease'", ']', 'is', 'not', 'None', 'and', 'doid_id', 'not', 'in', 'self', '.', 'test_ids', '[', "'disease'", ']', ':', 'continue', 'rel', '=', 'None', 'allele_id', '=', "'FlyBase:'", '+', 'allele_id', 'if', 'qualifier', '==', "'model of'", ':', 'rel', '=', 'self', '.', 'globaltt', '[', "'is model of'", ']', 'else', ':', '# TODO amelorates, exacerbates, and DOES NOT *', 'continue', 'animal_id', '=', 'geno', '.', 'make_experimental_model_with_genotype', '(', 'allele_id', ',', 'allele_symbol', ',', 'fly_taxon', ',', "'fly'", ')', 'assoc', '=', 'G2PAssoc', '(', 'graph', ',', 'self', '.', 'name', ',', 'animal_id', ',', 'doid_id', ',', 'rel', ')', 'if', 'pub_id', '!=', "''", ':', 'pub_id', '=', "'FlyBase:'", '+', 'pub_id', 'assoc', '.', 'add_source', '(', 'pub_id', ')', 'if', 'evidence_or_interacting_allele', '==', "'inferred from mutant phenotype'", ':', 'evidence_id', '=', 'self', '.', 'globaltt', '[', "'mutant phenotype evidence'", ']', 'assoc', '.', 'add_evidence', '(', 'evidence_id', ')', 'else', ':', 'assoc', '.', 'set_description', '(', 'evidence_or_interacting_allele', ')', 'assoc', '.', 'add_association_to_graph', '(', ')', 'if', 'not', 'self', '.', 'test_mode', 'and', 'limit', 'is', 'not', 'None', 'and', 'line_counter', '>', 'limit', ':', 'break', 'return'] | Here we make associations between a disease and the supplied "model".
In this case it's an allele.
FIXME consider changing this... are alleles really models?
Perhaps map these alleles into actual animals/strains or genotypes?
:param limit:
:return: | ['Here', 'we', 'make', 'associations', 'between', 'a', 'disease', 'and', 'the', 'supplied', 'model', '.', 'In', 'this', 'case', 'it', 's', 'an', 'allele', '.', 'FIXME', 'consider', 'changing', 'this', '...', 'are', 'alleles', 'really', 'models?', 'Perhaps', 'map', 'these', 'alleles', 'into', 'actual', 'animals', '/', 'strains', 'or', 'genotypes?', ':', 'param', 'limit', ':', ':', 'return', ':'] | train | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1923-L1987 |
5,177 | spyder-ide/spyder | spyder/plugins/projects/widgets/projectdialog.py | is_writable | def is_writable(path):
"""Check if path has write access"""
try:
testfile = tempfile.TemporaryFile(dir=path)
testfile.close()
except OSError as e:
if e.errno == errno.EACCES: # 13
return False
return True | python | def is_writable(path):
"""Check if path has write access"""
try:
testfile = tempfile.TemporaryFile(dir=path)
testfile.close()
except OSError as e:
if e.errno == errno.EACCES: # 13
return False
return True | ['def', 'is_writable', '(', 'path', ')', ':', 'try', ':', 'testfile', '=', 'tempfile', '.', 'TemporaryFile', '(', 'dir', '=', 'path', ')', 'testfile', '.', 'close', '(', ')', 'except', 'OSError', 'as', 'e', ':', 'if', 'e', '.', 'errno', '==', 'errno', '.', 'EACCES', ':', '# 13\r', 'return', 'False', 'return', 'True'] | Check if path has write access | ['Check', 'if', 'path', 'has', 'write', 'access'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/widgets/projectdialog.py#L35-L43 |
5,178 | blockstack/blockstack-core | blockstack/lib/nameset/namedb.py | BlockstackDB.is_name_owner | def is_name_owner( self, name, sender_script_pubkey ):
"""
Given the fully-qualified name and a sender's script pubkey,
determine if the sender owns the name.
The name must exist and not be revoked or expired at the
current block.
"""
if not self.is_name_registered( name ):
# no one owns it
return False
owner = self.get_name_owner( name )
if owner != sender_script_pubkey:
return False
else:
return True | python | def is_name_owner( self, name, sender_script_pubkey ):
"""
Given the fully-qualified name and a sender's script pubkey,
determine if the sender owns the name.
The name must exist and not be revoked or expired at the
current block.
"""
if not self.is_name_registered( name ):
# no one owns it
return False
owner = self.get_name_owner( name )
if owner != sender_script_pubkey:
return False
else:
return True | ['def', 'is_name_owner', '(', 'self', ',', 'name', ',', 'sender_script_pubkey', ')', ':', 'if', 'not', 'self', '.', 'is_name_registered', '(', 'name', ')', ':', '# no one owns it ', 'return', 'False', 'owner', '=', 'self', '.', 'get_name_owner', '(', 'name', ')', 'if', 'owner', '!=', 'sender_script_pubkey', ':', 'return', 'False', 'else', ':', 'return', 'True'] | Given the fully-qualified name and a sender's script pubkey,
determine if the sender owns the name.
The name must exist and not be revoked or expired at the
current block. | ['Given', 'the', 'fully', '-', 'qualified', 'name', 'and', 'a', 'sender', 's', 'script', 'pubkey', 'determine', 'if', 'the', 'sender', 'owns', 'the', 'name', '.'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1443-L1459 |
5,179 | rlabbe/filterpy | filterpy/hinfinity/hinfinity_filter.py | HInfinityFilter.predict | def predict(self, u=0):
"""
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u) | python | def predict(self, u=0):
"""
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u) | ['def', 'predict', '(', 'self', ',', 'u', '=', '0', ')', ':', '# x = Fx + Bu', 'self', '.', 'x', '=', 'dot', '(', 'self', '.', 'F', ',', 'self', '.', 'x', ')', '+', 'dot', '(', 'self', '.', 'B', ',', 'u', ')'] | Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system. | ['Predict', 'next', 'position', '.'] | train | https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/hinfinity/hinfinity_filter.py#L145-L157 |
5,180 | pepkit/peppy | peppy/utils.py | check_bam | def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired | python | def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired | ['def', 'check_bam', '(', 'bam', ',', 'o', ')', ':', 'try', ':', 'p', '=', 'sp', '.', 'Popen', '(', '[', "'samtools'", ',', "'view'", ',', 'bam', ']', ',', 'stdout', '=', 'sp', '.', 'PIPE', ')', '# Count paired alignments', 'paired', '=', '0', 'read_lengths', '=', 'defaultdict', '(', 'int', ')', 'while', 'o', '>', '0', ':', '# Count down number of lines', 'line', '=', 'p', '.', 'stdout', '.', 'readline', '(', ')', '.', 'decode', '(', ')', '.', 'split', '(', '"\\t"', ')', 'flag', '=', 'int', '(', 'line', '[', '1', ']', ')', 'read_lengths', '[', 'len', '(', 'line', '[', '9', ']', ')', ']', '+=', '1', 'if', '1', '&', 'flag', ':', '# check decimal flag contains 1 (paired)', 'paired', '+=', '1', 'o', '-=', '1', 'p', '.', 'kill', '(', ')', 'except', 'OSError', ':', 'reason', '=', '"Note (samtools not in path): For NGS inputs, "', '"pep needs samtools to auto-populate "', '"\'read_length\' and \'read_type\' attributes; "', '"these attributes were not populated."', 'raise', 'OSError', '(', 'reason', ')', '_LOGGER', '.', 'debug', '(', '"Read lengths: {}"', '.', 'format', '(', 'read_lengths', ')', ')', '_LOGGER', '.', 'debug', '(', '"paired: {}"', '.', 'format', '(', 'paired', ')', ')', 'return', 'read_lengths', ',', 'paired'] | Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation. | ['Check', 'reads', 'in', 'BAM', 'file', 'for', 'read', 'type', 'and', 'lengths', '.'] | train | https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L62-L91 |
5,181 | log2timeline/dfwinreg | dfwinreg/virtual.py | VirtualWinRegistryKey.offset | def offset(self):
"""int: offset of the key within the Windows Registry file or None."""
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
if not self._registry_key:
return None
return self._registry_key.offset | python | def offset(self):
"""int: offset of the key within the Windows Registry file or None."""
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
if not self._registry_key:
return None
return self._registry_key.offset | ['def', 'offset', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_registry_key', 'and', 'self', '.', '_registry', ':', 'self', '.', '_GetKeyFromRegistry', '(', ')', 'if', 'not', 'self', '.', '_registry_key', ':', 'return', 'None', 'return', 'self', '.', '_registry_key', '.', 'offset'] | int: offset of the key within the Windows Registry file or None. | ['int', ':', 'offset', 'of', 'the', 'key', 'within', 'the', 'Windows', 'Registry', 'file', 'or', 'None', '.'] | train | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/virtual.py#L84-L92 |
5,182 | pudo/dataset | dataset/table.py | Table.drop | def drop(self):
"""Drop the table from the database.
Deletes both the schema and all the contents within it.
"""
with self.db.lock:
if self.exists:
self._threading_warn()
self.table.drop(self.db.executable, checkfirst=True)
self._table = None | python | def drop(self):
"""Drop the table from the database.
Deletes both the schema and all the contents within it.
"""
with self.db.lock:
if self.exists:
self._threading_warn()
self.table.drop(self.db.executable, checkfirst=True)
self._table = None | ['def', 'drop', '(', 'self', ')', ':', 'with', 'self', '.', 'db', '.', 'lock', ':', 'if', 'self', '.', 'exists', ':', 'self', '.', '_threading_warn', '(', ')', 'self', '.', 'table', '.', 'drop', '(', 'self', '.', 'db', '.', 'executable', ',', 'checkfirst', '=', 'True', ')', 'self', '.', '_table', '=', 'None'] | Drop the table from the database.
Deletes both the schema and all the contents within it. | ['Drop', 'the', 'table', 'from', 'the', 'database', '.'] | train | https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/table.py#L390-L399 |
5,183 | ev3dev/ev3dev-lang-python | ev3dev2/motor.py | MoveTank.on | def on(self, left_speed, right_speed):
"""
Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation.
"""
(left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed)
# Set all parameters
self.left_motor.speed_sp = int(round(left_speed_native_units))
self.right_motor.speed_sp = int(round(right_speed_native_units))
# This debug involves disk I/O to pull speed_sp so only uncomment
# if you need to troubleshoot in more detail.
# log.debug("%s: on at left-speed %s, right-speed %s" %
# (self, self.left_motor.speed_sp, self.right_motor.speed_sp))
# Start the motors
self.left_motor.run_forever()
self.right_motor.run_forever() | python | def on(self, left_speed, right_speed):
"""
Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation.
"""
(left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed)
# Set all parameters
self.left_motor.speed_sp = int(round(left_speed_native_units))
self.right_motor.speed_sp = int(round(right_speed_native_units))
# This debug involves disk I/O to pull speed_sp so only uncomment
# if you need to troubleshoot in more detail.
# log.debug("%s: on at left-speed %s, right-speed %s" %
# (self, self.left_motor.speed_sp, self.right_motor.speed_sp))
# Start the motors
self.left_motor.run_forever()
self.right_motor.run_forever() | ['def', 'on', '(', 'self', ',', 'left_speed', ',', 'right_speed', ')', ':', '(', 'left_speed_native_units', ',', 'right_speed_native_units', ')', '=', 'self', '.', '_unpack_speeds_to_native_units', '(', 'left_speed', ',', 'right_speed', ')', '# Set all parameters', 'self', '.', 'left_motor', '.', 'speed_sp', '=', 'int', '(', 'round', '(', 'left_speed_native_units', ')', ')', 'self', '.', 'right_motor', '.', 'speed_sp', '=', 'int', '(', 'round', '(', 'right_speed_native_units', ')', ')', '# This debug involves disk I/O to pull speed_sp so only uncomment', '# if you need to troubleshoot in more detail.', '# log.debug("%s: on at left-speed %s, right-speed %s" %', '# (self, self.left_motor.speed_sp, self.right_motor.speed_sp))', '# Start the motors', 'self', '.', 'left_motor', '.', 'run_forever', '(', ')', 'self', '.', 'right_motor', '.', 'run_forever', '(', ')'] | Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation. | ['Start', 'rotating', 'the', 'motors', 'according', 'to', 'left_speed', 'and', 'right_speed', 'forever', '.', 'Speeds', 'can', 'be', 'percentages', 'or', 'any', 'SpeedValue', 'implementation', '.'] | train | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1904-L1922 |
5,184 | dhermes/bezier | src/bezier/surface.py | _make_intersection | def _make_intersection(edge_info, all_edge_nodes):
"""Convert a description of edges into a curved polygon.
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Args:
edge_info (Tuple[Tuple[int, float, float], ...]): Information
describing each edge in the curved polygon by indicating which
surface / edge on the surface and then start and end parameters
along that edge. (See :func:`.ends_to_curve`.)
all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges
of the first surface being intersected followed by the nodes of
the three edges of the second.
Returns:
.CurvedPolygon: The intersection corresponding to ``edge_info``.
"""
edges = []
for index, start, end in edge_info:
nodes = all_edge_nodes[index]
new_nodes = _curve_helpers.specialize_curve(nodes, start, end)
degree = new_nodes.shape[1] - 1
edge = _curve_mod.Curve(new_nodes, degree, _copy=False)
edges.append(edge)
return curved_polygon.CurvedPolygon(
*edges, metadata=edge_info, _verify=False
) | python | def _make_intersection(edge_info, all_edge_nodes):
"""Convert a description of edges into a curved polygon.
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Args:
edge_info (Tuple[Tuple[int, float, float], ...]): Information
describing each edge in the curved polygon by indicating which
surface / edge on the surface and then start and end parameters
along that edge. (See :func:`.ends_to_curve`.)
all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges
of the first surface being intersected followed by the nodes of
the three edges of the second.
Returns:
.CurvedPolygon: The intersection corresponding to ``edge_info``.
"""
edges = []
for index, start, end in edge_info:
nodes = all_edge_nodes[index]
new_nodes = _curve_helpers.specialize_curve(nodes, start, end)
degree = new_nodes.shape[1] - 1
edge = _curve_mod.Curve(new_nodes, degree, _copy=False)
edges.append(edge)
return curved_polygon.CurvedPolygon(
*edges, metadata=edge_info, _verify=False
) | ['def', '_make_intersection', '(', 'edge_info', ',', 'all_edge_nodes', ')', ':', 'edges', '=', '[', ']', 'for', 'index', ',', 'start', ',', 'end', 'in', 'edge_info', ':', 'nodes', '=', 'all_edge_nodes', '[', 'index', ']', 'new_nodes', '=', '_curve_helpers', '.', 'specialize_curve', '(', 'nodes', ',', 'start', ',', 'end', ')', 'degree', '=', 'new_nodes', '.', 'shape', '[', '1', ']', '-', '1', 'edge', '=', '_curve_mod', '.', 'Curve', '(', 'new_nodes', ',', 'degree', ',', '_copy', '=', 'False', ')', 'edges', '.', 'append', '(', 'edge', ')', 'return', 'curved_polygon', '.', 'CurvedPolygon', '(', '*', 'edges', ',', 'metadata', '=', 'edge_info', ',', '_verify', '=', 'False', ')'] | Convert a description of edges into a curved polygon.
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Args:
edge_info (Tuple[Tuple[int, float, float], ...]): Information
describing each edge in the curved polygon by indicating which
surface / edge on the surface and then start and end parameters
along that edge. (See :func:`.ends_to_curve`.)
all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges
of the first surface being intersected followed by the nodes of
the three edges of the second.
Returns:
.CurvedPolygon: The intersection corresponding to ``edge_info``. | ['Convert', 'a', 'description', 'of', 'edges', 'into', 'a', 'curved', 'polygon', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L1100-L1128 |
5,185 | tcalmant/ipopo | pelix/framework.py | BundleContext.install_package | def install_package(self, path, recursive=False):
# type: (str, bool) -> tuple
"""
Installs all the modules found in the given package (directory).
It is a utility method working like
:meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor
accepting every module found.
:param path: Path of the package (folder)
:param recursive: If True, installs the modules found in sub-directories
:return: A 2-tuple, with the list of installed bundles
(:class:`~pelix.framework.Bundle`) and the list of the names
of the modules which import failed.
:raise ValueError: The given path is invalid
"""
return self.__framework.install_package(path, recursive) | python | def install_package(self, path, recursive=False):
# type: (str, bool) -> tuple
"""
Installs all the modules found in the given package (directory).
It is a utility method working like
:meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor
accepting every module found.
:param path: Path of the package (folder)
:param recursive: If True, installs the modules found in sub-directories
:return: A 2-tuple, with the list of installed bundles
(:class:`~pelix.framework.Bundle`) and the list of the names
of the modules which import failed.
:raise ValueError: The given path is invalid
"""
return self.__framework.install_package(path, recursive) | ['def', 'install_package', '(', 'self', ',', 'path', ',', 'recursive', '=', 'False', ')', ':', '# type: (str, bool) -> tuple', 'return', 'self', '.', '__framework', '.', 'install_package', '(', 'path', ',', 'recursive', ')'] | Installs all the modules found in the given package (directory).
It is a utility method working like
:meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor
accepting every module found.
:param path: Path of the package (folder)
:param recursive: If True, installs the modules found in sub-directories
:return: A 2-tuple, with the list of installed bundles
(:class:`~pelix.framework.Bundle`) and the list of the names
of the modules which import failed.
:raise ValueError: The given path is invalid | ['Installs', 'all', 'the', 'modules', 'found', 'in', 'the', 'given', 'package', '(', 'directory', ')', '.', 'It', 'is', 'a', 'utility', 'method', 'working', 'like', ':', 'meth', ':', '~pelix', '.', 'framework', '.', 'BundleContext', '.', 'install_visiting', 'with', 'a', 'visitor', 'accepting', 'every', 'module', 'found', '.'] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L1686-L1701 |
5,186 | SYNHAK/spiff | spiff/payment/models.py | LineDiscountItem.value | def value(self):
"""Returns the positive value to subtract from the total."""
originalPrice = self.lineItem.totalPrice
if self.flatRate == 0:
return originalPrice * self.percent
return self.flatRate | python | def value(self):
"""Returns the positive value to subtract from the total."""
originalPrice = self.lineItem.totalPrice
if self.flatRate == 0:
return originalPrice * self.percent
return self.flatRate | ['def', 'value', '(', 'self', ')', ':', 'originalPrice', '=', 'self', '.', 'lineItem', '.', 'totalPrice', 'if', 'self', '.', 'flatRate', '==', '0', ':', 'return', 'originalPrice', '*', 'self', '.', 'percent', 'return', 'self', '.', 'flatRate'] | Returns the positive value to subtract from the total. | ['Returns', 'the', 'positive', 'value', 'to', 'subtract', 'from', 'the', 'total', '.'] | train | https://github.com/SYNHAK/spiff/blob/5e5c731f67954ddc11d2fb75371cfcfd0fef49b7/spiff/payment/models.py#L142-L147 |
5,187 | noisyboiler/wampy | wampy/mixins.py | ParseUrlMixin.parse_url | def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource | python | def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource | ['def', 'parse_url', '(', 'self', ')', ':', 'self', '.', 'scheme', '=', 'None', 'self', '.', 'resource', '=', 'None', 'self', '.', 'host', '=', 'None', 'self', '.', 'port', '=', 'None', 'if', 'self', '.', 'url', 'is', 'None', ':', 'return', 'scheme', ',', 'url', '=', 'self', '.', 'url', '.', 'split', '(', '":"', ',', '1', ')', 'parsed', '=', 'urlsplit', '(', 'url', ',', 'scheme', '=', '"http"', ')', 'if', 'parsed', '.', 'hostname', ':', 'self', '.', 'host', '=', 'parsed', '.', 'hostname', 'elif', "'+unix'", 'in', 'scheme', ':', 'self', '.', 'host', '=', "'localhost'", 'else', ':', 'raise', 'ValueError', '(', '"Invalid hostname from: %s"', ',', 'self', '.', 'url', ')', 'if', 'parsed', '.', 'port', ':', 'self', '.', 'port', '=', 'parsed', '.', 'port', 'if', 'scheme', '==', '"ws"', ':', 'if', 'not', 'self', '.', 'port', ':', 'self', '.', 'port', '=', '8080', 'elif', 'scheme', '==', '"wss"', ':', 'if', 'not', 'self', '.', 'port', ':', 'self', '.', 'port', '=', '443', 'elif', 'scheme', 'in', '(', "'ws+unix'", ',', "'wss+unix'", ')', ':', 'pass', 'else', ':', 'raise', 'ValueError', '(', '"Invalid scheme: %s"', '%', 'scheme', ')', 'if', 'parsed', '.', 'path', ':', 'resource', '=', 'parsed', '.', 'path', 'else', ':', 'resource', '=', '"/"', 'if', "'+unix'", 'in', 'scheme', ':', 'self', '.', 'unix_socket_path', '=', 'resource', 'resource', '=', "'/'", 'if', 'parsed', '.', 'query', ':', 'resource', '+=', '"?"', '+', 'parsed', '.', 'query', 'self', '.', 'scheme', '=', 'scheme', 'self', '.', 'resource', '=', 'resource'] | Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket | ['Parses', 'a', 'URL', 'of', 'the', 'form', ':'] | train | https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/mixins.py#L12-L64 |
5,188 | blue-yonder/tsfresh | tsfresh/feature_extraction/feature_calculators.py | _get_length_sequences_where | def _get_length_sequences_where(x):
"""
This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned.
"""
if len(x) == 0:
return [0]
else:
res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1]
return res if len(res) > 0 else [0] | python | def _get_length_sequences_where(x):
"""
This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned.
"""
if len(x) == 0:
return [0]
else:
res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1]
return res if len(res) > 0 else [0] | ['def', '_get_length_sequences_where', '(', 'x', ')', ':', 'if', 'len', '(', 'x', ')', '==', '0', ':', 'return', '[', '0', ']', 'else', ':', 'res', '=', '[', 'len', '(', 'list', '(', 'group', ')', ')', 'for', 'value', ',', 'group', 'in', 'itertools', '.', 'groupby', '(', 'x', ')', 'if', 'value', '==', '1', ']', 'return', 'res', 'if', 'len', '(', 'res', ')', '>', '0', 'else', '[', '0', ']'] | This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned. | ['This', 'method', 'calculates', 'the', 'length', 'of', 'all', 'sub', '-', 'sequences', 'where', 'the', 'array', 'x', 'is', 'either', 'True', 'or', '1', '.'] | train | https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L81-L107 |
5,189 | openstack/networking-cisco | networking_cisco/plugins/cisco/db/l3/ha_db.py | HA_db_mixin._teardown_redundancy_router_gw_connectivity | def _teardown_redundancy_router_gw_connectivity(self, context, router,
router_db,
plugging_driver):
"""To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB .
"""
if not router[ha.ENABLED]:
# No HA currently enabled so we're done
return
e_context = context.elevated()
# since gateway is about to change the ha group for the current gateway
# is removed, a new one will be created later
self._delete_ha_group(e_context, router_db.gw_port_id)
# teardown connectivity for the gw ports on the redundancy routers
# and remove those ports as new ones will be created later
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
if plugging_driver is not None:
plugging_driver.teardown_logical_port_connectivity(
e_context, r_b_db.redundancy_router.gw_port,
r_b_db.redundancy_router.hosting_info.hosting_device_id)
self._update_router_no_notify(
e_context, r_b_db.redundancy_router_id,
{'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}})
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(e_context, rr_ids) | python | def _teardown_redundancy_router_gw_connectivity(self, context, router,
router_db,
plugging_driver):
"""To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB .
"""
if not router[ha.ENABLED]:
# No HA currently enabled so we're done
return
e_context = context.elevated()
# since gateway is about to change the ha group for the current gateway
# is removed, a new one will be created later
self._delete_ha_group(e_context, router_db.gw_port_id)
# teardown connectivity for the gw ports on the redundancy routers
# and remove those ports as new ones will be created later
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
if plugging_driver is not None:
plugging_driver.teardown_logical_port_connectivity(
e_context, r_b_db.redundancy_router.gw_port,
r_b_db.redundancy_router.hosting_info.hosting_device_id)
self._update_router_no_notify(
e_context, r_b_db.redundancy_router_id,
{'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}})
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(e_context, rr_ids) | ['def', '_teardown_redundancy_router_gw_connectivity', '(', 'self', ',', 'context', ',', 'router', ',', 'router_db', ',', 'plugging_driver', ')', ':', 'if', 'not', 'router', '[', 'ha', '.', 'ENABLED', ']', ':', "# No HA currently enabled so we're done", 'return', 'e_context', '=', 'context', '.', 'elevated', '(', ')', '# since gateway is about to change the ha group for the current gateway', '# is removed, a new one will be created later', 'self', '.', '_delete_ha_group', '(', 'e_context', ',', 'router_db', '.', 'gw_port_id', ')', '# teardown connectivity for the gw ports on the redundancy routers', '# and remove those ports as new ones will be created later', 'rr_ids', '=', '[', ']', 'for', 'r_b_db', 'in', 'router_db', '.', 'redundancy_bindings', ':', 'if', 'plugging_driver', 'is', 'not', 'None', ':', 'plugging_driver', '.', 'teardown_logical_port_connectivity', '(', 'e_context', ',', 'r_b_db', '.', 'redundancy_router', '.', 'gw_port', ',', 'r_b_db', '.', 'redundancy_router', '.', 'hosting_info', '.', 'hosting_device_id', ')', 'self', '.', '_update_router_no_notify', '(', 'e_context', ',', 'r_b_db', '.', 'redundancy_router_id', ',', '{', "'router'", ':', '{', 'EXTERNAL_GW_INFO', ':', 'None', ',', 'ha', '.', 'ENABLED', ':', 'False', '}', '}', ')', 'rr_ids', '.', 'append', '(', 'r_b_db', '.', 'redundancy_router_id', ')', 'self', '.', 'notify_routers_updated', '(', 'e_context', ',', 'rr_ids', ')'] | To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB . | ['To', 'be', 'called', 'in', 'update_router', '()', 'if', 'the', 'router', 'gateway', 'is', 'to', 'change', 'BEFORE', 'router', 'has', 'been', 'updated', 'in', 'DB', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L309-L334 |
5,190 | PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | cred_def_id | def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str:
"""
Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier
"""
return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no) | python | def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str:
"""
Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier
"""
return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no) | ['def', 'cred_def_id', '(', 'issuer_did', ':', 'str', ',', 'schema_seq_no', ':', 'int', ',', 'protocol', ':', 'Protocol', '=', 'None', ')', '->', 'str', ':', 'return', '(', 'protocol', 'or', 'Protocol', '.', 'DEFAULT', ')', '.', 'cred_def_id', '(', 'issuer_did', ',', 'schema_seq_no', ')'] | Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier | ['Return', 'credential', 'definition', 'identifier', 'for', 'input', 'issuer', 'DID', 'and', 'schema', 'sequence', 'number', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L120-L132 |
5,191 | saltstack/salt | salt/states/junos.py | install_os | def install_os(name, **kwargs):
'''
Installs the given image on the device. After the installation is complete
the device is rebooted, if reboot=True is given as a keyworded argument.
.. code-block:: yaml
salt://images/junos_image.tgz:
junos:
- install_os
- timeout: 100
- reboot: True
Parameters:
Required
* path:
Path where the image file is present on the pro\
xy minion.
Optional
* kwargs: keyworded arguments to be given such as timeout, reboot etc
* timeout:
Set NETCONF RPC timeout. Can be used to RPCs which
take a while to execute. (default = 30 seconds)
* reboot:
Whether to reboot after installation (default = False)
* no_copy:
When True the software package will not be SCP’d to the device. \
(default = False)
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.install_os'](name, **kwargs)
return ret | python | def install_os(name, **kwargs):
'''
Installs the given image on the device. After the installation is complete
the device is rebooted, if reboot=True is given as a keyworded argument.
.. code-block:: yaml
salt://images/junos_image.tgz:
junos:
- install_os
- timeout: 100
- reboot: True
Parameters:
Required
* path:
Path where the image file is present on the pro\
xy minion.
Optional
* kwargs: keyworded arguments to be given such as timeout, reboot etc
* timeout:
Set NETCONF RPC timeout. Can be used to RPCs which
take a while to execute. (default = 30 seconds)
* reboot:
Whether to reboot after installation (default = False)
* no_copy:
When True the software package will not be SCP’d to the device. \
(default = False)
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.install_os'](name, **kwargs)
return ret | ['def', 'install_os', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'True', ',', "'comment'", ':', "''", '}', 'ret', '[', "'changes'", ']', '=', '__salt__', '[', "'junos.install_os'", ']', '(', 'name', ',', '*', '*', 'kwargs', ')', 'return', 'ret'] | Installs the given image on the device. After the installation is complete
the device is rebooted, if reboot=True is given as a keyworded argument.
.. code-block:: yaml
salt://images/junos_image.tgz:
junos:
- install_os
- timeout: 100
- reboot: True
Parameters:
Required
* path:
Path where the image file is present on the pro\
xy minion.
Optional
* kwargs: keyworded arguments to be given such as timeout, reboot etc
* timeout:
Set NETCONF RPC timeout. Can be used to RPCs which
take a while to execute. (default = 30 seconds)
* reboot:
Whether to reboot after installation (default = False)
* no_copy:
When True the software package will not be SCP’d to the device. \
(default = False) | ['Installs', 'the', 'given', 'image', 'on', 'the', 'device', '.', 'After', 'the', 'installation', 'is', 'complete', 'the', 'device', 'is', 'rebooted', 'if', 'reboot', '=', 'True', 'is', 'given', 'as', 'a', 'keyworded', 'argument', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/junos.py#L363-L395 |
5,192 | mwouts/jupytext | jupytext/cell_reader.py | DoublePercentScriptCellReader.metadata_and_language_from_option_line | def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:])
elif self.alternative_start_code_re.match(line):
self.metadata = {} | python | def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:])
elif self.alternative_start_code_re.match(line):
self.metadata = {} | ['def', 'metadata_and_language_from_option_line', '(', 'self', ',', 'line', ')', ':', 'if', 'self', '.', 'start_code_re', '.', 'match', '(', 'line', ')', ':', 'self', '.', 'language', ',', 'self', '.', 'metadata', '=', 'self', '.', 'options_to_metadata', '(', 'line', '[', 'line', '.', 'find', '(', "'%%'", ')', '+', '2', ':', ']', ')', 'elif', 'self', '.', 'alternative_start_code_re', '.', 'match', '(', 'line', ')', ':', 'self', '.', 'metadata', '=', '{', '}'] | Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary. | ['Parse', 'code', 'options', 'on', 'the', 'given', 'line', '.', 'When', 'a', 'start', 'of', 'a', 'code', 'cell', 'is', 'found', 'self', '.', 'metadata', 'is', 'set', 'to', 'a', 'dictionary', '.'] | train | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_reader.py#L572-L578 |
5,193 | cltk/cltk | cltk/phonology/syllabify.py | get_onsets | def get_onsets(text, vowels="aeiou", threshold=0.0002):
"""
Source: Resonances in Middle High German: New Methodologies in Prosody,
2017, C. L. Hench
:param text: str list: text to be analysed
:param vowels: str: valid vowels constituting the syllable
:param threshold: minimum frequency count for valid onset, C. Hench noted
that the algorithm produces the best result for an untagged wordset of MHG,
when retaining onsets which appear in at least 0.02% of the words
Example:
Let's test it on the opening lines of Nibelungenlied
>>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen']
>>> vowels = "aeiouæœôîöü"
>>> get_onsets(text, vowels=vowels)
['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str']
Of course, this is an insignificant sample, but we could try and see
how modifying the threshold affects the returned onset:
>>> get_onsets(text, threshold = 0.05, vowels=vowels)
['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n']
"""
onset_dict = defaultdict(lambda: 0)
n = len(text)
for word in text:
onset = ''
candidates = []
for l in word:
if l not in vowels:
onset += l
else:
if onset != '':
candidates.append(onset)
onset = ''
for c in candidates:
onset_dict[c] += 1
return [onset for onset, i in onset_dict.items() if i/n > threshold] | python | def get_onsets(text, vowels="aeiou", threshold=0.0002):
"""
Source: Resonances in Middle High German: New Methodologies in Prosody,
2017, C. L. Hench
:param text: str list: text to be analysed
:param vowels: str: valid vowels constituting the syllable
:param threshold: minimum frequency count for valid onset, C. Hench noted
that the algorithm produces the best result for an untagged wordset of MHG,
when retaining onsets which appear in at least 0.02% of the words
Example:
Let's test it on the opening lines of Nibelungenlied
>>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen']
>>> vowels = "aeiouæœôîöü"
>>> get_onsets(text, vowels=vowels)
['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str']
Of course, this is an insignificant sample, but we could try and see
how modifying the threshold affects the returned onset:
>>> get_onsets(text, threshold = 0.05, vowels=vowels)
['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n']
"""
onset_dict = defaultdict(lambda: 0)
n = len(text)
for word in text:
onset = ''
candidates = []
for l in word:
if l not in vowels:
onset += l
else:
if onset != '':
candidates.append(onset)
onset = ''
for c in candidates:
onset_dict[c] += 1
return [onset for onset, i in onset_dict.items() if i/n > threshold] | ['def', 'get_onsets', '(', 'text', ',', 'vowels', '=', '"aeiou"', ',', 'threshold', '=', '0.0002', ')', ':', 'onset_dict', '=', 'defaultdict', '(', 'lambda', ':', '0', ')', 'n', '=', 'len', '(', 'text', ')', 'for', 'word', 'in', 'text', ':', 'onset', '=', "''", 'candidates', '=', '[', ']', 'for', 'l', 'in', 'word', ':', 'if', 'l', 'not', 'in', 'vowels', ':', 'onset', '+=', 'l', 'else', ':', 'if', 'onset', '!=', "''", ':', 'candidates', '.', 'append', '(', 'onset', ')', 'onset', '=', "''", 'for', 'c', 'in', 'candidates', ':', 'onset_dict', '[', 'c', ']', '+=', '1', 'return', '[', 'onset', 'for', 'onset', ',', 'i', 'in', 'onset_dict', '.', 'items', '(', ')', 'if', 'i', '/', 'n', '>', 'threshold', ']'] | Source: Resonances in Middle High German: New Methodologies in Prosody,
2017, C. L. Hench
:param text: str list: text to be analysed
:param vowels: str: valid vowels constituting the syllable
:param threshold: minimum frequency count for valid onset, C. Hench noted
that the algorithm produces the best result for an untagged wordset of MHG,
when retaining onsets which appear in at least 0.02% of the words
Example:
Let's test it on the opening lines of Nibelungenlied
>>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen']
>>> vowels = "aeiouæœôîöü"
>>> get_onsets(text, vowels=vowels)
['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str']
Of course, this is an insignificant sample, but we could try and see
how modifying the threshold affects the returned onset:
>>> get_onsets(text, threshold = 0.05, vowels=vowels)
['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n'] | ['Source', ':', 'Resonances', 'in', 'Middle', 'High', 'German', ':', 'New', 'Methodologies', 'in', 'Prosody', '2017', 'C', '.', 'L', '.', 'Hench'] | train | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/syllabify.py#L23-L72 |
5,194 | CamDavidsonPilon/lifelines | lifelines/utils/concordance.py | concordance_index | def concordance_index(event_times, predicted_scores, event_observed=None):
"""
Calculates the concordance index (C-index) between two series
of event times. The first is the real survival times from
the experimental data, and the other is the predicted survival
times from a model of some kind.
The c-index is the average of how often a model says X is greater than Y when, in the observed
data, X is indeed greater than Y. The c-index also handles how to handle censored values
(obviously, if Y is censored, it's hard to know if X is truly greater than Y).
The concordance index is a value between 0 and 1 where:
- 0.5 is the expected result from random predictions,
- 1.0 is perfect concordance and,
- 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)
Parameters
----------
event_times: iterable
a length-n iterable of observed survival times.
predicted_scores: iterable
a length-n iterable of predicted scores - these could be survival times, or hazards, etc. See https://stats.stackexchange.com/questions/352183/use-median-survival-time-to-calculate-cph-c-statistic/352435#352435
event_observed: iterable, optional
a length-n iterable censorship flags, 1 if observed, 0 if not. Default None assumes all observed.
Returns
-------
c-index: float
a value between 0 and 1.
References
-----------
Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in
developing models, evaluating assumptions and adequacy, and measuring and
reducing errors. Statistics in Medicine 1996;15(4):361-87.
Examples
--------
>>> from lifelines.utils import concordance_index
>>> cph = CoxPHFitter().fit(df, 'T', 'E')
>>> concordance_index(df['T'], -cph.predict_partial_hazard(df), df['E'])
"""
event_times = np.asarray(event_times, dtype=float)
predicted_scores = np.asarray(predicted_scores, dtype=float)
# Allow for (n, 1) or (1, n) arrays
if event_times.ndim == 2 and (event_times.shape[0] == 1 or event_times.shape[1] == 1):
# Flatten array
event_times = event_times.ravel()
# Allow for (n, 1) or (1, n) arrays
if predicted_scores.ndim == 2 and (predicted_scores.shape[0] == 1 or predicted_scores.shape[1] == 1):
# Flatten array
predicted_scores = predicted_scores.ravel()
if event_times.shape != predicted_scores.shape:
raise ValueError("Event times and predictions must have the same shape")
if event_times.ndim != 1:
raise ValueError("Event times can only be 1-dimensional: (n,)")
if event_observed is None:
event_observed = np.ones(event_times.shape[0], dtype=float)
else:
event_observed = np.asarray(event_observed, dtype=float).ravel()
if event_observed.shape != event_times.shape:
raise ValueError("Observed events must be 1-dimensional of same length as event times")
num_correct, num_tied, num_pairs = _concordance_summary_statistics(event_times, predicted_scores, event_observed)
return _concordance_ratio(num_correct, num_tied, num_pairs) | python | def concordance_index(event_times, predicted_scores, event_observed=None):
"""
Calculates the concordance index (C-index) between two series
of event times. The first is the real survival times from
the experimental data, and the other is the predicted survival
times from a model of some kind.
The c-index is the average of how often a model says X is greater than Y when, in the observed
data, X is indeed greater than Y. The c-index also handles how to handle censored values
(obviously, if Y is censored, it's hard to know if X is truly greater than Y).
The concordance index is a value between 0 and 1 where:
- 0.5 is the expected result from random predictions,
- 1.0 is perfect concordance and,
- 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)
Parameters
----------
event_times: iterable
a length-n iterable of observed survival times.
predicted_scores: iterable
a length-n iterable of predicted scores - these could be survival times, or hazards, etc. See https://stats.stackexchange.com/questions/352183/use-median-survival-time-to-calculate-cph-c-statistic/352435#352435
event_observed: iterable, optional
a length-n iterable censorship flags, 1 if observed, 0 if not. Default None assumes all observed.
Returns
-------
c-index: float
a value between 0 and 1.
References
-----------
Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in
developing models, evaluating assumptions and adequacy, and measuring and
reducing errors. Statistics in Medicine 1996;15(4):361-87.
Examples
--------
>>> from lifelines.utils import concordance_index
>>> cph = CoxPHFitter().fit(df, 'T', 'E')
>>> concordance_index(df['T'], -cph.predict_partial_hazard(df), df['E'])
"""
event_times = np.asarray(event_times, dtype=float)
predicted_scores = np.asarray(predicted_scores, dtype=float)
# Allow for (n, 1) or (1, n) arrays
if event_times.ndim == 2 and (event_times.shape[0] == 1 or event_times.shape[1] == 1):
# Flatten array
event_times = event_times.ravel()
# Allow for (n, 1) or (1, n) arrays
if predicted_scores.ndim == 2 and (predicted_scores.shape[0] == 1 or predicted_scores.shape[1] == 1):
# Flatten array
predicted_scores = predicted_scores.ravel()
if event_times.shape != predicted_scores.shape:
raise ValueError("Event times and predictions must have the same shape")
if event_times.ndim != 1:
raise ValueError("Event times can only be 1-dimensional: (n,)")
if event_observed is None:
event_observed = np.ones(event_times.shape[0], dtype=float)
else:
event_observed = np.asarray(event_observed, dtype=float).ravel()
if event_observed.shape != event_times.shape:
raise ValueError("Observed events must be 1-dimensional of same length as event times")
num_correct, num_tied, num_pairs = _concordance_summary_statistics(event_times, predicted_scores, event_observed)
return _concordance_ratio(num_correct, num_tied, num_pairs) | ['def', 'concordance_index', '(', 'event_times', ',', 'predicted_scores', ',', 'event_observed', '=', 'None', ')', ':', 'event_times', '=', 'np', '.', 'asarray', '(', 'event_times', ',', 'dtype', '=', 'float', ')', 'predicted_scores', '=', 'np', '.', 'asarray', '(', 'predicted_scores', ',', 'dtype', '=', 'float', ')', '# Allow for (n, 1) or (1, n) arrays', 'if', 'event_times', '.', 'ndim', '==', '2', 'and', '(', 'event_times', '.', 'shape', '[', '0', ']', '==', '1', 'or', 'event_times', '.', 'shape', '[', '1', ']', '==', '1', ')', ':', '# Flatten array', 'event_times', '=', 'event_times', '.', 'ravel', '(', ')', '# Allow for (n, 1) or (1, n) arrays', 'if', 'predicted_scores', '.', 'ndim', '==', '2', 'and', '(', 'predicted_scores', '.', 'shape', '[', '0', ']', '==', '1', 'or', 'predicted_scores', '.', 'shape', '[', '1', ']', '==', '1', ')', ':', '# Flatten array', 'predicted_scores', '=', 'predicted_scores', '.', 'ravel', '(', ')', 'if', 'event_times', '.', 'shape', '!=', 'predicted_scores', '.', 'shape', ':', 'raise', 'ValueError', '(', '"Event times and predictions must have the same shape"', ')', 'if', 'event_times', '.', 'ndim', '!=', '1', ':', 'raise', 'ValueError', '(', '"Event times can only be 1-dimensional: (n,)"', ')', 'if', 'event_observed', 'is', 'None', ':', 'event_observed', '=', 'np', '.', 'ones', '(', 'event_times', '.', 'shape', '[', '0', ']', ',', 'dtype', '=', 'float', ')', 'else', ':', 'event_observed', '=', 'np', '.', 'asarray', '(', 'event_observed', ',', 'dtype', '=', 'float', ')', '.', 'ravel', '(', ')', 'if', 'event_observed', '.', 'shape', '!=', 'event_times', '.', 'shape', ':', 'raise', 'ValueError', '(', '"Observed events must be 1-dimensional of same length as event times"', ')', 'num_correct', ',', 'num_tied', ',', 'num_pairs', '=', '_concordance_summary_statistics', '(', 'event_times', ',', 'predicted_scores', ',', 'event_observed', ')', 'return', '_concordance_ratio', '(', 'num_correct', ',', 'num_tied', ',', 'num_pairs', ')'] | Calculates the concordance index (C-index) between two series
of event times. The first is the real survival times from
the experimental data, and the other is the predicted survival
times from a model of some kind.
The c-index is the average of how often a model says X is greater than Y when, in the observed
data, X is indeed greater than Y. The c-index also handles how to handle censored values
(obviously, if Y is censored, it's hard to know if X is truly greater than Y).
The concordance index is a value between 0 and 1 where:
- 0.5 is the expected result from random predictions,
- 1.0 is perfect concordance and,
- 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)
Parameters
----------
event_times: iterable
a length-n iterable of observed survival times.
predicted_scores: iterable
a length-n iterable of predicted scores - these could be survival times, or hazards, etc. See https://stats.stackexchange.com/questions/352183/use-median-survival-time-to-calculate-cph-c-statistic/352435#352435
event_observed: iterable, optional
a length-n iterable censorship flags, 1 if observed, 0 if not. Default None assumes all observed.
Returns
-------
c-index: float
a value between 0 and 1.
References
-----------
Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in
developing models, evaluating assumptions and adequacy, and measuring and
reducing errors. Statistics in Medicine 1996;15(4):361-87.
Examples
--------
>>> from lifelines.utils import concordance_index
>>> cph = CoxPHFitter().fit(df, 'T', 'E')
>>> concordance_index(df['T'], -cph.predict_partial_hazard(df), df['E']) | ['Calculates', 'the', 'concordance', 'index', '(', 'C', '-', 'index', ')', 'between', 'two', 'series', 'of', 'event', 'times', '.', 'The', 'first', 'is', 'the', 'real', 'survival', 'times', 'from', 'the', 'experimental', 'data', 'and', 'the', 'other', 'is', 'the', 'predicted', 'survival', 'times', 'from', 'a', 'model', 'of', 'some', 'kind', '.'] | train | https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/concordance.py#L7-L79 |
5,195 | bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _merge_out_from_infiles | def _merge_out_from_infiles(in_files):
"""Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts.
"""
fname = os.path.commonprefix([os.path.basename(f) for f in in_files])
while fname.endswith(("-", "_", ".")):
fname = fname[:-1]
ext = os.path.splitext(in_files[0])[-1]
dirname = os.path.dirname(in_files[0])
while dirname.endswith(("split", "merge")):
dirname = os.path.dirname(dirname)
return os.path.join(dirname, "%s%s" % (fname, ext)) | python | def _merge_out_from_infiles(in_files):
"""Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts.
"""
fname = os.path.commonprefix([os.path.basename(f) for f in in_files])
while fname.endswith(("-", "_", ".")):
fname = fname[:-1]
ext = os.path.splitext(in_files[0])[-1]
dirname = os.path.dirname(in_files[0])
while dirname.endswith(("split", "merge")):
dirname = os.path.dirname(dirname)
return os.path.join(dirname, "%s%s" % (fname, ext)) | ['def', '_merge_out_from_infiles', '(', 'in_files', ')', ':', 'fname', '=', 'os', '.', 'path', '.', 'commonprefix', '(', '[', 'os', '.', 'path', '.', 'basename', '(', 'f', ')', 'for', 'f', 'in', 'in_files', ']', ')', 'while', 'fname', '.', 'endswith', '(', '(', '"-"', ',', '"_"', ',', '"."', ')', ')', ':', 'fname', '=', 'fname', '[', ':', '-', '1', ']', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'in_files', '[', '0', ']', ')', '[', '-', '1', ']', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'in_files', '[', '0', ']', ')', 'while', 'dirname', '.', 'endswith', '(', '(', '"split"', ',', '"merge"', ')', ')', ':', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'dirname', ')', 'return', 'os', '.', 'path', '.', 'join', '(', 'dirname', ',', '"%s%s"', '%', '(', 'fname', ',', 'ext', ')', ')'] | Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts. | ['Generate', 'output', 'merged', 'file', 'name', 'from', 'set', 'of', 'input', 'files', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L272-L285 |
5,196 | GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | Pipeline.complete | def complete(self, default_output=None):
"""Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async.
"""
# TODO: Enforce that all outputs expected by this async pipeline were
# filled before this complete() function was called. May required all
# async functions to declare their outputs upfront.
if not self.async:
raise UnexpectedPipelineError(
'May only call complete() method for asynchronous pipelines.')
self._context.fill_slot(
self._pipeline_key, self.outputs.default, default_output) | python | def complete(self, default_output=None):
"""Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async.
"""
# TODO: Enforce that all outputs expected by this async pipeline were
# filled before this complete() function was called. May required all
# async functions to declare their outputs upfront.
if not self.async:
raise UnexpectedPipelineError(
'May only call complete() method for asynchronous pipelines.')
self._context.fill_slot(
self._pipeline_key, self.outputs.default, default_output) | ['def', 'complete', '(', 'self', ',', 'default_output', '=', 'None', ')', ':', '# TODO: Enforce that all outputs expected by this async pipeline were', '# filled before this complete() function was called. May required all', '# async functions to declare their outputs upfront.', 'if', 'not', 'self', '.', 'async', ':', 'raise', 'UnexpectedPipelineError', '(', "'May only call complete() method for asynchronous pipelines.'", ')', 'self', '.', '_context', '.', 'fill_slot', '(', 'self', '.', '_pipeline_key', ',', 'self', '.', 'outputs', '.', 'default', ',', 'default_output', ')'] | Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async. | ['Marks', 'this', 'asynchronous', 'Pipeline', 'as', 'complete', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L817-L834 |
5,197 | fastai/fastai | fastai/vision/data.py | ImageDataBunch.from_csv | def from_csv(cls, path:PathOrStr, folder:PathOrStr=None, label_delim:str=None, csv_labels:PathOrStr='labels.csv',
valid_pct:float=0.2, fn_col:int=0, label_col:int=1, suffix:str='', delimiter:str=None,
header:Optional[Union[int,str]]='infer', **kwargs:Any)->'ImageDataBunch':
"Create from a csv file in `path/csv_labels`."
path = Path(path)
df = pd.read_csv(path/csv_labels, header=header, delimiter=delimiter)
return cls.from_df(path, df, folder=folder, label_delim=label_delim, valid_pct=valid_pct,
fn_col=fn_col, label_col=label_col, suffix=suffix, **kwargs) | python | def from_csv(cls, path:PathOrStr, folder:PathOrStr=None, label_delim:str=None, csv_labels:PathOrStr='labels.csv',
valid_pct:float=0.2, fn_col:int=0, label_col:int=1, suffix:str='', delimiter:str=None,
header:Optional[Union[int,str]]='infer', **kwargs:Any)->'ImageDataBunch':
"Create from a csv file in `path/csv_labels`."
path = Path(path)
df = pd.read_csv(path/csv_labels, header=header, delimiter=delimiter)
return cls.from_df(path, df, folder=folder, label_delim=label_delim, valid_pct=valid_pct,
fn_col=fn_col, label_col=label_col, suffix=suffix, **kwargs) | ['def', 'from_csv', '(', 'cls', ',', 'path', ':', 'PathOrStr', ',', 'folder', ':', 'PathOrStr', '=', 'None', ',', 'label_delim', ':', 'str', '=', 'None', ',', 'csv_labels', ':', 'PathOrStr', '=', "'labels.csv'", ',', 'valid_pct', ':', 'float', '=', '0.2', ',', 'fn_col', ':', 'int', '=', '0', ',', 'label_col', ':', 'int', '=', '1', ',', 'suffix', ':', 'str', '=', "''", ',', 'delimiter', ':', 'str', '=', 'None', ',', 'header', ':', 'Optional', '[', 'Union', '[', 'int', ',', 'str', ']', ']', '=', "'infer'", ',', '*', '*', 'kwargs', ':', 'Any', ')', '->', "'ImageDataBunch'", ':', 'path', '=', 'Path', '(', 'path', ')', 'df', '=', 'pd', '.', 'read_csv', '(', 'path', '/', 'csv_labels', ',', 'header', '=', 'header', ',', 'delimiter', '=', 'delimiter', ')', 'return', 'cls', '.', 'from_df', '(', 'path', ',', 'df', ',', 'folder', '=', 'folder', ',', 'label_delim', '=', 'label_delim', ',', 'valid_pct', '=', 'valid_pct', ',', 'fn_col', '=', 'fn_col', ',', 'label_col', '=', 'label_col', ',', 'suffix', '=', 'suffix', ',', '*', '*', 'kwargs', ')'] | Create from a csv file in `path/csv_labels`. | ['Create', 'from', 'a', 'csv', 'file', 'in', 'path', '/', 'csv_labels', '.'] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L123-L130 |
5,198 | cnelson/python-fleet | fleet/v1/client.py | Client._get_proxy_info | def _get_proxy_info(self, _=None):
"""Generate a ProxyInfo class from a connected SSH transport
Args:
_ (None): Ignored. This is just here as the ProxyInfo spec requires it.
Returns:
SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
"""
# parse the fleet endpoint url, to establish a tunnel to that host
(target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint)
# implement the proxy_info interface from httplib which requires
# that we accept a scheme, and return a ProxyInfo object
# we do :P
# This is called once per request, so we keep this here
# so that we can keep one ssh connection open, and allocate
# new channels as needed per-request
sock = None
if target_path:
sock = self._ssh_tunnel.forward_unix(path=target_path)
else:
sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port)
# Return a ProxyInfo class with this socket
return SSHTunnelProxyInfo(sock=sock) | python | def _get_proxy_info(self, _=None):
"""Generate a ProxyInfo class from a connected SSH transport
Args:
_ (None): Ignored. This is just here as the ProxyInfo spec requires it.
Returns:
SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
"""
# parse the fleet endpoint url, to establish a tunnel to that host
(target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint)
# implement the proxy_info interface from httplib which requires
# that we accept a scheme, and return a ProxyInfo object
# we do :P
# This is called once per request, so we keep this here
# so that we can keep one ssh connection open, and allocate
# new channels as needed per-request
sock = None
if target_path:
sock = self._ssh_tunnel.forward_unix(path=target_path)
else:
sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port)
# Return a ProxyInfo class with this socket
return SSHTunnelProxyInfo(sock=sock) | ['def', '_get_proxy_info', '(', 'self', ',', '_', '=', 'None', ')', ':', '# parse the fleet endpoint url, to establish a tunnel to that host', '(', 'target_host', ',', 'target_port', ',', 'target_path', ')', '=', 'self', '.', '_endpoint_to_target', '(', 'self', '.', '_endpoint', ')', '# implement the proxy_info interface from httplib which requires', '# that we accept a scheme, and return a ProxyInfo object', '# we do :P', '# This is called once per request, so we keep this here', '# so that we can keep one ssh connection open, and allocate', '# new channels as needed per-request', 'sock', '=', 'None', 'if', 'target_path', ':', 'sock', '=', 'self', '.', '_ssh_tunnel', '.', 'forward_unix', '(', 'path', '=', 'target_path', ')', 'else', ':', 'sock', '=', 'self', '.', '_ssh_tunnel', '.', 'forward_tcp', '(', 'target_host', ',', 'port', '=', 'target_port', ')', '# Return a ProxyInfo class with this socket', 'return', 'SSHTunnelProxyInfo', '(', 'sock', '=', 'sock', ')'] | Generate a ProxyInfo class from a connected SSH transport
Args:
_ (None): Ignored. This is just here as the ProxyInfo spec requires it.
Returns:
SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH | ['Generate', 'a', 'ProxyInfo', 'class', 'from', 'a', 'connected', 'SSH', 'transport'] | train | https://github.com/cnelson/python-fleet/blob/a11dcd8bb3986d1d8f0af90d2da7399c9cc54b4d/fleet/v1/client.py#L357-L385 |
5,199 | elehcimd/pynb | pynb/notebook.py | CachedExecutePreprocessor.run_cell | def run_cell(self, cell, cell_index=0):
"""
Run cell with caching
:param cell: cell to run
:param cell_index: cell index (optional)
:return:
"""
hash = self.cell_hash(cell, cell_index)
fname_session = '/tmp/pynb-cache-{}-session.dill'.format(hash)
fname_value = '/tmp/pynb-cache-{}-value.dill'.format(hash)
cell_snippet = str(" ".join(cell.source.split())).strip()[:40]
if self.disable_cache:
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
return super().run_cell(cell, cell_index)
if not self.ignore_cache:
if self.cache_valid and os.path.isfile(fname_session) and os.path.isfile(fname_value):
logging.info('Cell {}: Loading: "{}.."'.format(hash, cell_snippet))
self.prev_fname_session = fname_session
with open(fname_value, 'rb') as f:
value = dill.load(f)
return value
# If cache does not exist or not valid:
#
# 1) Invalidate subsequent cell caches
# 2) Load session from previous cached cell (if existing)
# 3) Run cell
# 4) Cache cell session
# 5) Cache cell value
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
# 1) Invalidate subsequent cell caches
self.cache_valid = False
# 2) Load session from previous cached cell (if existing and required)
if self.prev_fname_session:
if self.prev_fname_session_loaded != self.prev_fname_session:
self.session_load(hash, self.prev_fname_session)
# 2) Run cell
value = super().run_cell(cell, cell_index)
# We make sure that injected cells do not interfere with the cell index...
# value[0]['content']['execution_count'] = cell_index
# 3) Cache cell session
cached = self.session_dump(cell, hash, fname_session)
# 4) Cache cell value, if no errors while dumping the cell session in 3).
if cached:
self.prev_fname_session_loaded = fname_session
self.prev_fname_session = fname_session
logging.debug('Cell {}: dumping value to {}'.format(hash, fname_value))
with open(fname_value, 'wb') as f:
dill.dump(value, f)
logging.debug('Cell {}: cached'.format(hash))
return value | python | def run_cell(self, cell, cell_index=0):
"""
Run cell with caching
:param cell: cell to run
:param cell_index: cell index (optional)
:return:
"""
hash = self.cell_hash(cell, cell_index)
fname_session = '/tmp/pynb-cache-{}-session.dill'.format(hash)
fname_value = '/tmp/pynb-cache-{}-value.dill'.format(hash)
cell_snippet = str(" ".join(cell.source.split())).strip()[:40]
if self.disable_cache:
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
return super().run_cell(cell, cell_index)
if not self.ignore_cache:
if self.cache_valid and os.path.isfile(fname_session) and os.path.isfile(fname_value):
logging.info('Cell {}: Loading: "{}.."'.format(hash, cell_snippet))
self.prev_fname_session = fname_session
with open(fname_value, 'rb') as f:
value = dill.load(f)
return value
# If cache does not exist or not valid:
#
# 1) Invalidate subsequent cell caches
# 2) Load session from previous cached cell (if existing)
# 3) Run cell
# 4) Cache cell session
# 5) Cache cell value
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
# 1) Invalidate subsequent cell caches
self.cache_valid = False
# 2) Load session from previous cached cell (if existing and required)
if self.prev_fname_session:
if self.prev_fname_session_loaded != self.prev_fname_session:
self.session_load(hash, self.prev_fname_session)
# 2) Run cell
value = super().run_cell(cell, cell_index)
# We make sure that injected cells do not interfere with the cell index...
# value[0]['content']['execution_count'] = cell_index
# 3) Cache cell session
cached = self.session_dump(cell, hash, fname_session)
# 4) Cache cell value, if no errors while dumping the cell session in 3).
if cached:
self.prev_fname_session_loaded = fname_session
self.prev_fname_session = fname_session
logging.debug('Cell {}: dumping value to {}'.format(hash, fname_value))
with open(fname_value, 'wb') as f:
dill.dump(value, f)
logging.debug('Cell {}: cached'.format(hash))
return value | ['def', 'run_cell', '(', 'self', ',', 'cell', ',', 'cell_index', '=', '0', ')', ':', 'hash', '=', 'self', '.', 'cell_hash', '(', 'cell', ',', 'cell_index', ')', 'fname_session', '=', "'/tmp/pynb-cache-{}-session.dill'", '.', 'format', '(', 'hash', ')', 'fname_value', '=', "'/tmp/pynb-cache-{}-value.dill'", '.', 'format', '(', 'hash', ')', 'cell_snippet', '=', 'str', '(', '" "', '.', 'join', '(', 'cell', '.', 'source', '.', 'split', '(', ')', ')', ')', '.', 'strip', '(', ')', '[', ':', '40', ']', 'if', 'self', '.', 'disable_cache', ':', 'logging', '.', 'info', '(', '\'Cell {}: Running: "{}.."\'', '.', 'format', '(', 'hash', ',', 'cell_snippet', ')', ')', 'return', 'super', '(', ')', '.', 'run_cell', '(', 'cell', ',', 'cell_index', ')', 'if', 'not', 'self', '.', 'ignore_cache', ':', 'if', 'self', '.', 'cache_valid', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'fname_session', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'fname_value', ')', ':', 'logging', '.', 'info', '(', '\'Cell {}: Loading: "{}.."\'', '.', 'format', '(', 'hash', ',', 'cell_snippet', ')', ')', 'self', '.', 'prev_fname_session', '=', 'fname_session', 'with', 'open', '(', 'fname_value', ',', "'rb'", ')', 'as', 'f', ':', 'value', '=', 'dill', '.', 'load', '(', 'f', ')', 'return', 'value', '# If cache does not exist or not valid:', '#', '# 1) Invalidate subsequent cell caches', '# 2) Load session from previous cached cell (if existing)', '# 3) Run cell', '# 4) Cache cell session', '# 5) Cache cell value', 'logging', '.', 'info', '(', '\'Cell {}: Running: "{}.."\'', '.', 'format', '(', 'hash', ',', 'cell_snippet', ')', ')', '# 1) Invalidate subsequent cell caches', 'self', '.', 'cache_valid', '=', 'False', '# 2) Load session from previous cached cell (if existing and required)', 'if', 'self', '.', 'prev_fname_session', ':', 'if', 'self', '.', 'prev_fname_session_loaded', '!=', 'self', '.', 'prev_fname_session', ':', 'self', '.', 'session_load', '(', 'hash', ',', 'self', '.', 'prev_fname_session', ')', '# 2) Run cell', 'value', '=', 'super', '(', ')', '.', 'run_cell', '(', 'cell', ',', 'cell_index', ')', '# We make sure that injected cells do not interfere with the cell index...', "# value[0]['content']['execution_count'] = cell_index", '# 3) Cache cell session', 'cached', '=', 'self', '.', 'session_dump', '(', 'cell', ',', 'hash', ',', 'fname_session', ')', '# 4) Cache cell value, if no errors while dumping the cell session in 3).', 'if', 'cached', ':', 'self', '.', 'prev_fname_session_loaded', '=', 'fname_session', 'self', '.', 'prev_fname_session', '=', 'fname_session', 'logging', '.', 'debug', '(', "'Cell {}: dumping value to {}'", '.', 'format', '(', 'hash', ',', 'fname_value', ')', ')', 'with', 'open', '(', 'fname_value', ',', "'wb'", ')', 'as', 'f', ':', 'dill', '.', 'dump', '(', 'value', ',', 'f', ')', 'logging', '.', 'debug', '(', "'Cell {}: cached'", '.', 'format', '(', 'hash', ')', ')', 'return', 'value'] | Run cell with caching
:param cell: cell to run
:param cell_index: cell index (optional)
:return: | ['Run', 'cell', 'with', 'caching', ':', 'param', 'cell', ':', 'cell', 'to', 'run', ':', 'param', 'cell_index', ':', 'cell', 'index', '(', 'optional', ')', ':', 'return', ':'] | train | https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/pynb/notebook.py#L54-L119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.