Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,900 | ddorn/GUI | GUI/vracabulous.py | FocusSelector.on_select | def on_select(self, item, action):
"""
Add an action to make when an object is selected.
Only one action can be stored this way.
"""
if not isinstance(item, int):
item = self.items.index(item)
self._on_select[item] = action | python | def on_select(self, item, action):
"""
Add an action to make when an object is selected.
Only one action can be stored this way.
"""
if not isinstance(item, int):
item = self.items.index(item)
self._on_select[item] = action | ['def', 'on_select', '(', 'self', ',', 'item', ',', 'action', ')', ':', 'if', 'not', 'isinstance', '(', 'item', ',', 'int', ')', ':', 'item', '=', 'self', '.', 'items', '.', 'index', '(', 'item', ')', 'self', '.', '_on_select', '[', 'item', ']', '=', 'action'] | Add an action to make when an object is selected.
Only one action can be stored this way. | ['Add', 'an', 'action', 'to', 'make', 'when', 'an', 'object', 'is', 'selected', '.', 'Only', 'one', 'action', 'can', 'be', 'stored', 'this', 'way', '.'] | train | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/vracabulous.py#L96-L105 |
4,901 | ktbyers/netmiko | netmiko/_textfsm/_texttable.py | TextTable.Remove | def Remove(self, row):
"""Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row.
"""
if row == 0 or row > self.size:
raise TableError("Attempt to remove header row")
new_table = []
# pylint: disable=E1103
for t_row in self._table:
if t_row.row != row:
new_table.append(t_row)
if t_row.row > row:
t_row.row -= 1
self._table = new_table | python | def Remove(self, row):
"""Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row.
"""
if row == 0 or row > self.size:
raise TableError("Attempt to remove header row")
new_table = []
# pylint: disable=E1103
for t_row in self._table:
if t_row.row != row:
new_table.append(t_row)
if t_row.row > row:
t_row.row -= 1
self._table = new_table | ['def', 'Remove', '(', 'self', ',', 'row', ')', ':', 'if', 'row', '==', '0', 'or', 'row', '>', 'self', '.', 'size', ':', 'raise', 'TableError', '(', '"Attempt to remove header row"', ')', 'new_table', '=', '[', ']', '# pylint: disable=E1103', 'for', 't_row', 'in', 'self', '.', '_table', ':', 'if', 't_row', '.', 'row', '!=', 'row', ':', 'new_table', '.', 'append', '(', 't_row', ')', 'if', 't_row', '.', 'row', '>', 'row', ':', 't_row', '.', 'row', '-=', '1', 'self', '.', '_table', '=', 'new_table'] | Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row. | ['Removes', 'a', 'row', 'from', 'the', 'table', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_texttable.py#L528-L547 |
4,902 | denniskempin/safetynet | safetynet.py | _TypecheckFunction | def _TypecheckFunction(function, parent_type_check_dict, stack_location,
self_name):
"""Decorator function to collect and execute type checks."""
type_check_dict = _CollectTypeChecks(function, parent_type_check_dict,
stack_location + 1, self_name)
if not type_check_dict:
return function
def TypecheckWrapper(*args, **kwargs):
arg_dict = _CollectArguments(function, args, kwargs)
errors = _ValidateArguments(arg_dict, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return_value = function(*args, **kwargs)
errors = _ValidateReturnValue(return_value, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return return_value
TypecheckWrapper.__doc__ = function.__doc__
TypecheckWrapper.__name__ = function.__name__
TypecheckWrapper.type_check_dict = type_check_dict
TypecheckWrapper.wrapped_function = function
return TypecheckWrapper | python | def _TypecheckFunction(function, parent_type_check_dict, stack_location,
self_name):
"""Decorator function to collect and execute type checks."""
type_check_dict = _CollectTypeChecks(function, parent_type_check_dict,
stack_location + 1, self_name)
if not type_check_dict:
return function
def TypecheckWrapper(*args, **kwargs):
arg_dict = _CollectArguments(function, args, kwargs)
errors = _ValidateArguments(arg_dict, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return_value = function(*args, **kwargs)
errors = _ValidateReturnValue(return_value, type_check_dict)
if errors:
raise TypeError("\n".join(errors))
return return_value
TypecheckWrapper.__doc__ = function.__doc__
TypecheckWrapper.__name__ = function.__name__
TypecheckWrapper.type_check_dict = type_check_dict
TypecheckWrapper.wrapped_function = function
return TypecheckWrapper | ['def', '_TypecheckFunction', '(', 'function', ',', 'parent_type_check_dict', ',', 'stack_location', ',', 'self_name', ')', ':', 'type_check_dict', '=', '_CollectTypeChecks', '(', 'function', ',', 'parent_type_check_dict', ',', 'stack_location', '+', '1', ',', 'self_name', ')', 'if', 'not', 'type_check_dict', ':', 'return', 'function', 'def', 'TypecheckWrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'arg_dict', '=', '_CollectArguments', '(', 'function', ',', 'args', ',', 'kwargs', ')', 'errors', '=', '_ValidateArguments', '(', 'arg_dict', ',', 'type_check_dict', ')', 'if', 'errors', ':', 'raise', 'TypeError', '(', '"\\n"', '.', 'join', '(', 'errors', ')', ')', 'return_value', '=', 'function', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'errors', '=', '_ValidateReturnValue', '(', 'return_value', ',', 'type_check_dict', ')', 'if', 'errors', ':', 'raise', 'TypeError', '(', '"\\n"', '.', 'join', '(', 'errors', ')', ')', 'return', 'return_value', 'TypecheckWrapper', '.', '__doc__', '=', 'function', '.', '__doc__', 'TypecheckWrapper', '.', '__name__', '=', 'function', '.', '__name__', 'TypecheckWrapper', '.', 'type_check_dict', '=', 'type_check_dict', 'TypecheckWrapper', '.', 'wrapped_function', '=', 'function', 'return', 'TypecheckWrapper'] | Decorator function to collect and execute type checks. | ['Decorator', 'function', 'to', 'collect', 'and', 'execute', 'type', 'checks', '.'] | train | https://github.com/denniskempin/safetynet/blob/fbcc4a112370fc20696f003d901114b4fe26d984/safetynet.py#L425-L451 |
4,903 | apache/incubator-mxnet | python/mxnet/recordio.py | MXRecordIO.close | def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
self.pid = None | python | def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
self.pid = None | ['def', 'close', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_open', ':', 'return', 'if', 'self', '.', 'writable', ':', 'check_call', '(', '_LIB', '.', 'MXRecordIOWriterFree', '(', 'self', '.', 'handle', ')', ')', 'else', ':', 'check_call', '(', '_LIB', '.', 'MXRecordIOReaderFree', '(', 'self', '.', 'handle', ')', ')', 'self', '.', 'is_open', '=', 'False', 'self', '.', 'pid', '=', 'None'] | Closes the record file. | ['Closes', 'the', 'record', 'file', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L123-L132 |
4,904 | loli/medpy | doc/numpydoc/numpydoc/docscrape_sphinx.py | SphinxDocString._str_member_list | def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out | python | def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out | ['def', '_str_member_list', '(', 'self', ',', 'name', ')', ':', 'out', '=', '[', ']', 'if', 'self', '[', 'name', ']', ':', 'out', '+=', '[', "'.. rubric:: %s'", '%', 'name', ',', "''", ']', 'prefix', '=', 'getattr', '(', 'self', ',', "'_name'", ',', "''", ')', 'if', 'prefix', ':', 'prefix', '=', "'~%s.'", '%', 'prefix', 'autosum', '=', '[', ']', 'others', '=', '[', ']', 'for', 'param', ',', 'param_type', ',', 'desc', 'in', 'self', '[', 'name', ']', ':', 'param', '=', 'param', '.', 'strip', '(', ')', '# Check if the referenced member can have a docstring or not', 'param_obj', '=', 'getattr', '(', 'self', '.', '_obj', ',', 'param', ',', 'None', ')', 'if', 'not', '(', 'isinstance', '(', 'param_obj', ',', 'collections', '.', 'Callable', ')', 'or', 'isinstance', '(', 'param_obj', ',', 'property', ')', 'or', 'inspect', '.', 'isgetsetdescriptor', '(', 'param_obj', ')', ')', ':', 'param_obj', '=', 'None', 'if', 'param_obj', 'and', '(', 'pydoc', '.', 'getdoc', '(', 'param_obj', ')', 'or', 'not', 'desc', ')', ':', '# Referenced object has a docstring', 'autosum', '+=', '[', '" %s%s"', '%', '(', 'prefix', ',', 'param', ')', ']', 'else', ':', 'others', '.', 'append', '(', '(', 'param', ',', 'param_type', ',', 'desc', ')', ')', 'if', 'autosum', ':', 'out', '+=', '[', "'.. autosummary::'", ']', 'if', 'self', '.', 'class_members_toctree', ':', 'out', '+=', '[', "' :toctree:'", ']', 'out', '+=', '[', "''", ']', '+', 'autosum', 'if', 'others', ':', 'maxlen_0', '=', 'max', '(', '3', ',', 'max', '(', '[', 'len', '(', 'x', '[', '0', ']', ')', 'for', 'x', 'in', 'others', ']', ')', ')', 'hdr', '=', 'sixu', '(', '"="', ')', '*', 'maxlen_0', '+', 'sixu', '(', '" "', ')', '+', 'sixu', '(', '"="', ')', '*', '10', 'fmt', '=', 'sixu', '(', "'%%%ds %%s '", ')', '%', '(', 'maxlen_0', ',', ')', 'out', '+=', '[', "''", ',', 'hdr', ']', 'for', 'param', ',', 'param_type', ',', 'desc', 'in', 'others', ':', 'desc', '=', 'sixu', '(', '" "', ')', '.', 'join', '(', 'x', '.', 'strip', '(', ')', 'for', 'x', 'in', 'desc', ')', '.', 'strip', '(', ')', 'if', 'param_type', ':', 'desc', '=', '"(%s) %s"', '%', '(', 'param_type', ',', 'desc', ')', 'out', '+=', '[', 'fmt', '%', '(', 'param', '.', 'strip', '(', ')', ',', 'desc', ')', ']', 'out', '+=', '[', 'hdr', ']', 'out', '+=', '[', "''", ']', 'return', 'out'] | Generate a member listing, autosummary:: table where possible,
and a table where not. | ['Generate', 'a', 'member', 'listing', 'autosummary', '::', 'table', 'where', 'possible', 'and', 'a', 'table', 'where', 'not', '.'] | train | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/docscrape_sphinx.py#L91-L141 |
4,905 | HewlettPackard/python-hpOneView | hpOneView/oneview_client.py | OneViewClient.managed_sans | def managed_sans(self):
"""
Gets the Managed SANs API client.
Returns:
ManagedSANs:
"""
if not self.__managed_sans:
self.__managed_sans = ManagedSANs(self.__connection)
return self.__managed_sans | python | def managed_sans(self):
"""
Gets the Managed SANs API client.
Returns:
ManagedSANs:
"""
if not self.__managed_sans:
self.__managed_sans = ManagedSANs(self.__connection)
return self.__managed_sans | ['def', 'managed_sans', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__managed_sans', ':', 'self', '.', '__managed_sans', '=', 'ManagedSANs', '(', 'self', '.', '__connection', ')', 'return', 'self', '.', '__managed_sans'] | Gets the Managed SANs API client.
Returns:
ManagedSANs: | ['Gets', 'the', 'Managed', 'SANs', 'API', 'client', '.'] | train | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L939-L948 |
4,906 | PyPSA/PyPSA | pypsa/components.py | Network.set_snapshots | def set_snapshots(self,snapshots):
"""
Set the snapshots and reindex all time-dependent data.
This will reindex all pandas.Panels of time-dependent data; NaNs are filled
with the default value for that quantity.
Parameters
----------
snapshots : list or pandas.Index
All time steps.
Returns
-------
None
"""
self.snapshots = pd.Index(snapshots)
self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.)
if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0':
snapshots = pd.Index(snapshots.values)
for component in self.all_components:
pnl = self.pnl(component)
attrs = self.components[component]["attrs"]
for k,default in attrs.default[attrs.varying].iteritems():
pnl[k] = pnl[k].reindex(self.snapshots).fillna(default) | python | def set_snapshots(self,snapshots):
"""
Set the snapshots and reindex all time-dependent data.
This will reindex all pandas.Panels of time-dependent data; NaNs are filled
with the default value for that quantity.
Parameters
----------
snapshots : list or pandas.Index
All time steps.
Returns
-------
None
"""
self.snapshots = pd.Index(snapshots)
self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.)
if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0':
snapshots = pd.Index(snapshots.values)
for component in self.all_components:
pnl = self.pnl(component)
attrs = self.components[component]["attrs"]
for k,default in attrs.default[attrs.varying].iteritems():
pnl[k] = pnl[k].reindex(self.snapshots).fillna(default) | ['def', 'set_snapshots', '(', 'self', ',', 'snapshots', ')', ':', 'self', '.', 'snapshots', '=', 'pd', '.', 'Index', '(', 'snapshots', ')', 'self', '.', 'snapshot_weightings', '=', 'self', '.', 'snapshot_weightings', '.', 'reindex', '(', 'self', '.', 'snapshots', ',', 'fill_value', '=', '1.', ')', 'if', 'isinstance', '(', 'snapshots', ',', 'pd', '.', 'DatetimeIndex', ')', 'and', '_pd_version', '<', "'0.18.0'", ':', 'snapshots', '=', 'pd', '.', 'Index', '(', 'snapshots', '.', 'values', ')', 'for', 'component', 'in', 'self', '.', 'all_components', ':', 'pnl', '=', 'self', '.', 'pnl', '(', 'component', ')', 'attrs', '=', 'self', '.', 'components', '[', 'component', ']', '[', '"attrs"', ']', 'for', 'k', ',', 'default', 'in', 'attrs', '.', 'default', '[', 'attrs', '.', 'varying', ']', '.', 'iteritems', '(', ')', ':', 'pnl', '[', 'k', ']', '=', 'pnl', '[', 'k', ']', '.', 'reindex', '(', 'self', '.', 'snapshots', ')', '.', 'fillna', '(', 'default', ')'] | Set the snapshots and reindex all time-dependent data.
This will reindex all pandas.Panels of time-dependent data; NaNs are filled
with the default value for that quantity.
Parameters
----------
snapshots : list or pandas.Index
All time steps.
Returns
-------
None | ['Set', 'the', 'snapshots', 'and', 'reindex', 'all', 'time', '-', 'dependent', 'data', '.'] | train | https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L378-L406 |
4,907 | ddorn/GUI | GUI/text.py | InLineTextBox.update | def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed."""
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode) | python | def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed."""
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode) | ['def', 'update', '(', 'self', ',', 'event_or_list', ')', ':', 'event_or_list', '=', 'super', '(', ')', '.', 'update', '(', 'event_or_list', ')', 'for', 'e', 'in', 'event_or_list', ':', 'if', 'e', '.', 'type', '==', 'KEYDOWN', ':', 'if', 'e', '.', 'key', '==', 'K_RIGHT', ':', 'if', 'e', '.', 'mod', '*', 'KMOD_CTRL', ':', 'self', '.', 'move_cursor_one_word', '(', 'self', '.', 'RIGHT', ')', 'else', ':', 'self', '.', 'move_cursor_one_letter', '(', 'self', '.', 'RIGHT', ')', 'elif', 'e', '.', 'key', '==', 'K_LEFT', ':', 'if', 'e', '.', 'mod', '*', 'KMOD_CTRL', ':', 'self', '.', 'move_cursor_one_word', '(', 'self', '.', 'LEFT', ')', 'else', ':', 'self', '.', 'move_cursor_one_letter', '(', 'self', '.', 'LEFT', ')', 'elif', 'e', '.', 'key', '==', 'K_BACKSPACE', ':', 'if', 'self', '.', 'cursor', '==', '0', ':', 'continue', 'if', 'e', '.', 'mod', '&', 'KMOD_CTRL', ':', 'self', '.', 'delete_one_word', '(', 'self', '.', 'LEFT', ')', 'else', ':', 'self', '.', 'delete_one_letter', '(', 'self', '.', 'LEFT', ')', 'elif', 'e', '.', 'key', '==', 'K_DELETE', ':', 'if', 'e', '.', 'mod', '&', 'KMOD_CTRL', ':', 'self', '.', 'delete_one_word', '(', 'self', '.', 'RIGHT', ')', 'else', ':', 'self', '.', 'delete_one_letter', '(', 'self', '.', 'RIGHT', ')', 'elif', 'e', '.', 'unicode', '!=', "''", 'and', 'e', '.', 'unicode', '.', 'isprintable', '(', ')', ':', 'self', '.', 'add_letter', '(', 'e', '.', 'unicode', ')'] | Update the text and position of cursor according to the event passed. | ['Update', 'the', 'text', 'and', 'position', 'of', 'cursor', 'according', 'to', 'the', 'event', 'passed', '.'] | train | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/text.py#L256-L291 |
4,908 | apple/turicreate | deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py | plot_temp_diagrams | def plot_temp_diagrams(config, results, temp_dir):
"""Plot temporary diagrams"""
display_name = {
'time': 'Compilation time (s)',
'memory': 'Compiler memory usage (MB)',
}
files = config['files']
img_files = []
if any('slt' in result for result in results) and 'bmp' in files.values()[0]:
config['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates'
for f in files.values():
f['slt'] = f['bmp'].replace('bmp', 'slt')
for measured in ['time', 'memory']:
mpts = sorted(int(k) for k in files.keys())
img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured)))
plot(
{
m: [(x, results[files[str(x)][m]][measured]) for x in mpts]
for m in config['modes'].keys()
},
config['modes'],
display_name[measured],
(config['x_axis_label'], display_name[measured]),
img_files[-1]
)
return img_files | python | def plot_temp_diagrams(config, results, temp_dir):
"""Plot temporary diagrams"""
display_name = {
'time': 'Compilation time (s)',
'memory': 'Compiler memory usage (MB)',
}
files = config['files']
img_files = []
if any('slt' in result for result in results) and 'bmp' in files.values()[0]:
config['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates'
for f in files.values():
f['slt'] = f['bmp'].replace('bmp', 'slt')
for measured in ['time', 'memory']:
mpts = sorted(int(k) for k in files.keys())
img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured)))
plot(
{
m: [(x, results[files[str(x)][m]][measured]) for x in mpts]
for m in config['modes'].keys()
},
config['modes'],
display_name[measured],
(config['x_axis_label'], display_name[measured]),
img_files[-1]
)
return img_files | ['def', 'plot_temp_diagrams', '(', 'config', ',', 'results', ',', 'temp_dir', ')', ':', 'display_name', '=', '{', "'time'", ':', "'Compilation time (s)'", ',', "'memory'", ':', "'Compiler memory usage (MB)'", ',', '}', 'files', '=', 'config', '[', "'files'", ']', 'img_files', '=', '[', ']', 'if', 'any', '(', "'slt'", 'in', 'result', 'for', 'result', 'in', 'results', ')', 'and', "'bmp'", 'in', 'files', '.', 'values', '(', ')', '[', '0', ']', ':', 'config', '[', "'modes'", ']', '[', "'slt'", ']', '=', "'Using BOOST_METAPARSE_STRING with string literal templates'", 'for', 'f', 'in', 'files', '.', 'values', '(', ')', ':', 'f', '[', "'slt'", ']', '=', 'f', '[', "'bmp'", ']', '.', 'replace', '(', "'bmp'", ',', "'slt'", ')', 'for', 'measured', 'in', '[', "'time'", ',', "'memory'", ']', ':', 'mpts', '=', 'sorted', '(', 'int', '(', 'k', ')', 'for', 'k', 'in', 'files', '.', 'keys', '(', ')', ')', 'img_files', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'temp_dir', ',', "'_{0}.png'", '.', 'format', '(', 'measured', ')', ')', ')', 'plot', '(', '{', 'm', ':', '[', '(', 'x', ',', 'results', '[', 'files', '[', 'str', '(', 'x', ')', ']', '[', 'm', ']', ']', '[', 'measured', ']', ')', 'for', 'x', 'in', 'mpts', ']', 'for', 'm', 'in', 'config', '[', "'modes'", ']', '.', 'keys', '(', ')', '}', ',', 'config', '[', "'modes'", ']', ',', 'display_name', '[', 'measured', ']', ',', '(', 'config', '[', "'x_axis_label'", ']', ',', 'display_name', '[', 'measured', ']', ')', ',', 'img_files', '[', '-', '1', ']', ')', 'return', 'img_files'] | Plot temporary diagrams | ['Plot', 'temporary', 'diagrams'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L229-L257 |
4,909 | alexflint/process-isolation | process_isolation.py | Client.terminate | def terminate(self):
'''Stop the server process and change our state to TERMINATING. Only valid if state=READY.'''
logger.debug('client.terminate() called (state=%s)', self.strstate)
if self.state == ClientState.WAITING_FOR_RESULT:
raise ClientStateError('terimate() called while state='+self.strstate)
if self.state == ClientState.TERMINATING:
raise ClientStateError('terimate() called while state='+self.strstate)
elif self.state in ClientState.TerminatedSet:
assert not self._server_process.is_alive()
return
elif self.state == ClientState.READY:
# Check that the process itself is still alive
self._assert_alive()
# Make sure the SIGCHLD signal handler doesn't throw any exceptions
self.state = ClientState.TERMINATING
# Do not call execute() because that function will check
# whether the process is alive and throw an exception if not
# TODO: can the queue itself throw exceptions?
self._delegate_channel.put(FunctionCallDelegate(_raise_terminate))
# Wait for acknowledgement
try:
self._read_result(num_retries=5)
except ProcessTerminationError as ex:
pass
except ChannelError as ex:
# Was interrupted five times in a row! Ignore for now
logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway')
self.state = ClientState.TERMINATED_CLEANLY | python | def terminate(self):
'''Stop the server process and change our state to TERMINATING. Only valid if state=READY.'''
logger.debug('client.terminate() called (state=%s)', self.strstate)
if self.state == ClientState.WAITING_FOR_RESULT:
raise ClientStateError('terimate() called while state='+self.strstate)
if self.state == ClientState.TERMINATING:
raise ClientStateError('terimate() called while state='+self.strstate)
elif self.state in ClientState.TerminatedSet:
assert not self._server_process.is_alive()
return
elif self.state == ClientState.READY:
# Check that the process itself is still alive
self._assert_alive()
# Make sure the SIGCHLD signal handler doesn't throw any exceptions
self.state = ClientState.TERMINATING
# Do not call execute() because that function will check
# whether the process is alive and throw an exception if not
# TODO: can the queue itself throw exceptions?
self._delegate_channel.put(FunctionCallDelegate(_raise_terminate))
# Wait for acknowledgement
try:
self._read_result(num_retries=5)
except ProcessTerminationError as ex:
pass
except ChannelError as ex:
# Was interrupted five times in a row! Ignore for now
logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway')
self.state = ClientState.TERMINATED_CLEANLY | ['def', 'terminate', '(', 'self', ')', ':', 'logger', '.', 'debug', '(', "'client.terminate() called (state=%s)'", ',', 'self', '.', 'strstate', ')', 'if', 'self', '.', 'state', '==', 'ClientState', '.', 'WAITING_FOR_RESULT', ':', 'raise', 'ClientStateError', '(', "'terimate() called while state='", '+', 'self', '.', 'strstate', ')', 'if', 'self', '.', 'state', '==', 'ClientState', '.', 'TERMINATING', ':', 'raise', 'ClientStateError', '(', "'terimate() called while state='", '+', 'self', '.', 'strstate', ')', 'elif', 'self', '.', 'state', 'in', 'ClientState', '.', 'TerminatedSet', ':', 'assert', 'not', 'self', '.', '_server_process', '.', 'is_alive', '(', ')', 'return', 'elif', 'self', '.', 'state', '==', 'ClientState', '.', 'READY', ':', '# Check that the process itself is still alive', 'self', '.', '_assert_alive', '(', ')', "# Make sure the SIGCHLD signal handler doesn't throw any exceptions", 'self', '.', 'state', '=', 'ClientState', '.', 'TERMINATING', '# Do not call execute() because that function will check', '# whether the process is alive and throw an exception if not', '# TODO: can the queue itself throw exceptions?', 'self', '.', '_delegate_channel', '.', 'put', '(', 'FunctionCallDelegate', '(', '_raise_terminate', ')', ')', '# Wait for acknowledgement', 'try', ':', 'self', '.', '_read_result', '(', 'num_retries', '=', '5', ')', 'except', 'ProcessTerminationError', 'as', 'ex', ':', 'pass', 'except', 'ChannelError', 'as', 'ex', ':', '# Was interrupted five times in a row! Ignore for now', 'logger', '.', 'debug', '(', "'client failed to read sentinel from channel after 5 retries - will terminate anyway'", ')', 'self', '.', 'state', '=', 'ClientState', '.', 'TERMINATED_CLEANLY'] | Stop the server process and change our state to TERMINATING. Only valid if state=READY. | ['Stop', 'the', 'server', 'process', 'and', 'change', 'our', 'state', 'to', 'TERMINATING', '.', 'Only', 'valid', 'if', 'state', '=', 'READY', '.'] | train | https://github.com/alexflint/process-isolation/blob/1b09862a5ed63be71049dfa8ad22f7c5fc75745c/process_isolation.py#L808-L839 |
4,910 | atztogo/phonopy | phonopy/interface/__init__.py | get_default_physical_units | def get_default_physical_units(interface_mode=None):
"""Return physical units used for calculators
Physical units: energy, distance, atomic mass, force
vasp : eV, Angstrom, AMU, eV/Angstrom
wien2k : Ry, au(=borh), AMU, mRy/au
abinit : hartree, au, AMU, eV/Angstrom
elk : hartree, au, AMU, hartree/au
qe : Ry, au, AMU, Ry/au
siesta : eV, au, AMU, eV/Angstroem
CRYSTAL : eV, Angstrom, AMU, eV/Angstroem
DFTB+ : hartree, au, AMU hartree/au
TURBOMOLE : hartree, au, AMU, hartree/au
"""
from phonopy.units import (Wien2kToTHz, AbinitToTHz, PwscfToTHz, ElkToTHz,
SiestaToTHz, VaspToTHz, CP2KToTHz, CrystalToTHz,
DftbpToTHz, TurbomoleToTHz, Hartree, Bohr)
units = {'factor': None,
'nac_factor': None,
'distance_to_A': None,
'force_constants_unit': None,
'length_unit': None}
if interface_mode is None or interface_mode == 'vasp':
units['factor'] = VaspToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = 1.0
units['force_constants_unit'] = 'eV/Angstrom^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'abinit':
units['factor'] = AbinitToTHz
units['nac_factor'] = Hartree / Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'eV/Angstrom.au'
units['length_unit'] = 'au'
elif interface_mode == 'qe':
units['factor'] = PwscfToTHz
units['nac_factor'] = 2.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'Ry/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'wien2k':
units['factor'] = Wien2kToTHz
units['nac_factor'] = 2000.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'mRy/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'elk':
units['factor'] = ElkToTHz
units['nac_factor'] = 1.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'siesta':
units['factor'] = SiestaToTHz
units['nac_factor'] = Hartree / Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'eV/Angstrom.au'
units['length_unit'] = 'au'
elif interface_mode == 'cp2k':
units['factor'] = CP2KToTHz
units['nac_factor'] = Hartree / Bohr # in a.u.
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'crystal':
units['factor'] = CrystalToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = 1.0
units['force_constants_unit'] = 'eV/Angstrom^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'dftbp':
units['factor'] = DftbpToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'turbomole':
units['factor'] = TurbomoleToTHz
units['nac_factor'] = 1.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
return units | python | def get_default_physical_units(interface_mode=None):
"""Return physical units used for calculators
Physical units: energy, distance, atomic mass, force
vasp : eV, Angstrom, AMU, eV/Angstrom
wien2k : Ry, au(=borh), AMU, mRy/au
abinit : hartree, au, AMU, eV/Angstrom
elk : hartree, au, AMU, hartree/au
qe : Ry, au, AMU, Ry/au
siesta : eV, au, AMU, eV/Angstroem
CRYSTAL : eV, Angstrom, AMU, eV/Angstroem
DFTB+ : hartree, au, AMU hartree/au
TURBOMOLE : hartree, au, AMU, hartree/au
"""
from phonopy.units import (Wien2kToTHz, AbinitToTHz, PwscfToTHz, ElkToTHz,
SiestaToTHz, VaspToTHz, CP2KToTHz, CrystalToTHz,
DftbpToTHz, TurbomoleToTHz, Hartree, Bohr)
units = {'factor': None,
'nac_factor': None,
'distance_to_A': None,
'force_constants_unit': None,
'length_unit': None}
if interface_mode is None or interface_mode == 'vasp':
units['factor'] = VaspToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = 1.0
units['force_constants_unit'] = 'eV/Angstrom^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'abinit':
units['factor'] = AbinitToTHz
units['nac_factor'] = Hartree / Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'eV/Angstrom.au'
units['length_unit'] = 'au'
elif interface_mode == 'qe':
units['factor'] = PwscfToTHz
units['nac_factor'] = 2.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'Ry/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'wien2k':
units['factor'] = Wien2kToTHz
units['nac_factor'] = 2000.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'mRy/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'elk':
units['factor'] = ElkToTHz
units['nac_factor'] = 1.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'siesta':
units['factor'] = SiestaToTHz
units['nac_factor'] = Hartree / Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'eV/Angstrom.au'
units['length_unit'] = 'au'
elif interface_mode == 'cp2k':
units['factor'] = CP2KToTHz
units['nac_factor'] = Hartree / Bohr # in a.u.
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'crystal':
units['factor'] = CrystalToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = 1.0
units['force_constants_unit'] = 'eV/Angstrom^2'
units['length_unit'] = 'Angstrom'
elif interface_mode == 'dftbp':
units['factor'] = DftbpToTHz
units['nac_factor'] = Hartree * Bohr
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
elif interface_mode == 'turbomole':
units['factor'] = TurbomoleToTHz
units['nac_factor'] = 1.0
units['distance_to_A'] = Bohr
units['force_constants_unit'] = 'hartree/au^2'
units['length_unit'] = 'au'
return units | ['def', 'get_default_physical_units', '(', 'interface_mode', '=', 'None', ')', ':', 'from', 'phonopy', '.', 'units', 'import', '(', 'Wien2kToTHz', ',', 'AbinitToTHz', ',', 'PwscfToTHz', ',', 'ElkToTHz', ',', 'SiestaToTHz', ',', 'VaspToTHz', ',', 'CP2KToTHz', ',', 'CrystalToTHz', ',', 'DftbpToTHz', ',', 'TurbomoleToTHz', ',', 'Hartree', ',', 'Bohr', ')', 'units', '=', '{', "'factor'", ':', 'None', ',', "'nac_factor'", ':', 'None', ',', "'distance_to_A'", ':', 'None', ',', "'force_constants_unit'", ':', 'None', ',', "'length_unit'", ':', 'None', '}', 'if', 'interface_mode', 'is', 'None', 'or', 'interface_mode', '==', "'vasp'", ':', 'units', '[', "'factor'", ']', '=', 'VaspToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '*', 'Bohr', 'units', '[', "'distance_to_A'", ']', '=', '1.0', 'units', '[', "'force_constants_unit'", ']', '=', "'eV/Angstrom^2'", 'units', '[', "'length_unit'", ']', '=', "'Angstrom'", 'elif', 'interface_mode', '==', "'abinit'", ':', 'units', '[', "'factor'", ']', '=', 'AbinitToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '/', 'Bohr', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'eV/Angstrom.au'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'qe'", ':', 'units', '[', "'factor'", ']', '=', 'PwscfToTHz', 'units', '[', "'nac_factor'", ']', '=', '2.0', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'Ry/au^2'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'wien2k'", ':', 'units', '[', "'factor'", ']', '=', 'Wien2kToTHz', 'units', '[', "'nac_factor'", ']', '=', '2000.0', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'mRy/au^2'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'elk'", ':', 'units', '[', "'factor'", ']', '=', 'ElkToTHz', 'units', '[', "'nac_factor'", ']', '=', '1.0', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'hartree/au^2'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'siesta'", ':', 'units', '[', "'factor'", ']', '=', 'SiestaToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '/', 'Bohr', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'eV/Angstrom.au'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'cp2k'", ':', 'units', '[', "'factor'", ']', '=', 'CP2KToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '/', 'Bohr', '# in a.u.', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'hartree/au^2'", 'units', '[', "'length_unit'", ']', '=', "'Angstrom'", 'elif', 'interface_mode', '==', "'crystal'", ':', 'units', '[', "'factor'", ']', '=', 'CrystalToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '*', 'Bohr', 'units', '[', "'distance_to_A'", ']', '=', '1.0', 'units', '[', "'force_constants_unit'", ']', '=', "'eV/Angstrom^2'", 'units', '[', "'length_unit'", ']', '=', "'Angstrom'", 'elif', 'interface_mode', '==', "'dftbp'", ':', 'units', '[', "'factor'", ']', '=', 'DftbpToTHz', 'units', '[', "'nac_factor'", ']', '=', 'Hartree', '*', 'Bohr', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'hartree/au^2'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'elif', 'interface_mode', '==', "'turbomole'", ':', 'units', '[', "'factor'", ']', '=', 'TurbomoleToTHz', 'units', '[', "'nac_factor'", ']', '=', '1.0', 'units', '[', "'distance_to_A'", ']', '=', 'Bohr', 'units', '[', "'force_constants_unit'", ']', '=', "'hartree/au^2'", 'units', '[', "'length_unit'", ']', '=', "'au'", 'return', 'units'] | Return physical units used for calculators
Physical units: energy, distance, atomic mass, force
vasp : eV, Angstrom, AMU, eV/Angstrom
wien2k : Ry, au(=borh), AMU, mRy/au
abinit : hartree, au, AMU, eV/Angstrom
elk : hartree, au, AMU, hartree/au
qe : Ry, au, AMU, Ry/au
siesta : eV, au, AMU, eV/Angstroem
CRYSTAL : eV, Angstrom, AMU, eV/Angstroem
DFTB+ : hartree, au, AMU hartree/au
TURBOMOLE : hartree, au, AMU, hartree/au | ['Return', 'physical', 'units', 'used', 'for', 'calculators'] | train | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/interface/__init__.py#L231-L318 |
4,911 | econ-ark/HARK | HARK/core.py | Market.store | def store(self):
'''
Record the current value of each variable X named in track_vars in an
attribute named X_hist.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.track_vars:
value_now = getattr(self,var_name)
getattr(self,var_name + '_hist').append(value_now) | python | def store(self):
'''
Record the current value of each variable X named in track_vars in an
attribute named X_hist.
Parameters
----------
none
Returns
-------
none
'''
for var_name in self.track_vars:
value_now = getattr(self,var_name)
getattr(self,var_name + '_hist').append(value_now) | ['def', 'store', '(', 'self', ')', ':', 'for', 'var_name', 'in', 'self', '.', 'track_vars', ':', 'value_now', '=', 'getattr', '(', 'self', ',', 'var_name', ')', 'getattr', '(', 'self', ',', 'var_name', '+', "'_hist'", ')', '.', 'append', '(', 'value_now', ')'] | Record the current value of each variable X named in track_vars in an
attribute named X_hist.
Parameters
----------
none
Returns
-------
none | ['Record', 'the', 'current', 'value', 'of', 'each', 'variable', 'X', 'named', 'in', 'track_vars', 'in', 'an', 'attribute', 'named', 'X_hist', '.'] | train | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/core.py#L1115-L1130 |
4,912 | iamaziz/PyDataset | pydataset/__init__.py | data | def data(item=None, show_doc=False):
"""loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets.
"""
if item:
try:
if show_doc:
__print_item_docs(item)
return
df = __read_csv(item)
return df
except KeyError:
find_similar(item)
else:
return __datasets_desc() | python | def data(item=None, show_doc=False):
"""loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets.
"""
if item:
try:
if show_doc:
__print_item_docs(item)
return
df = __read_csv(item)
return df
except KeyError:
find_similar(item)
else:
return __datasets_desc() | ['def', 'data', '(', 'item', '=', 'None', ',', 'show_doc', '=', 'False', ')', ':', 'if', 'item', ':', 'try', ':', 'if', 'show_doc', ':', '__print_item_docs', '(', 'item', ')', 'return', 'df', '=', '__read_csv', '(', 'item', ')', 'return', 'df', 'except', 'KeyError', ':', 'find_similar', '(', 'item', ')', 'else', ':', 'return', '__datasets_desc', '(', ')'] | loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets. | ['loads', 'a', 'datasaet', '(', 'from', 'in', '-', 'modules', 'datasets', ')', 'in', 'a', 'dataframe', 'data', 'structure', '.'] | train | https://github.com/iamaziz/PyDataset/blob/789c0ca7587b86343f636b132dcf1f475ee6b90b/pydataset/__init__.py#L8-L39 |
4,913 | allenai/allennlp | allennlp/semparse/domain_languages/wikitables_language.py | WikiTablesLanguage.previous | def previous(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a single row, and returns the row that occurs before
the input row in the original set of rows. If the input row happens to be the top row, we
will return an empty list.
"""
if not rows:
return []
input_row_index = self._get_row_index(rows[0])
if input_row_index > 0:
return [self.table_data[input_row_index - 1]]
return [] | python | def previous(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a single row, and returns the row that occurs before
the input row in the original set of rows. If the input row happens to be the top row, we
will return an empty list.
"""
if not rows:
return []
input_row_index = self._get_row_index(rows[0])
if input_row_index > 0:
return [self.table_data[input_row_index - 1]]
return [] | ['def', 'previous', '(', 'self', ',', 'rows', ':', 'List', '[', 'Row', ']', ')', '->', 'List', '[', 'Row', ']', ':', 'if', 'not', 'rows', ':', 'return', '[', ']', 'input_row_index', '=', 'self', '.', '_get_row_index', '(', 'rows', '[', '0', ']', ')', 'if', 'input_row_index', '>', '0', ':', 'return', '[', 'self', '.', 'table_data', '[', 'input_row_index', '-', '1', ']', ']', 'return', '[', ']'] | Takes an expression that evaluates to a single row, and returns the row that occurs before
the input row in the original set of rows. If the input row happens to be the top row, we
will return an empty list. | ['Takes', 'an', 'expression', 'that', 'evaluates', 'to', 'a', 'single', 'row', 'and', 'returns', 'the', 'row', 'that', 'occurs', 'before', 'the', 'input', 'row', 'in', 'the', 'original', 'set', 'of', 'rows', '.', 'If', 'the', 'input', 'row', 'happens', 'to', 'be', 'the', 'top', 'row', 'we', 'will', 'return', 'an', 'empty', 'list', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L432-L443 |
4,914 | mikeboers/sitetools | sitetools/utils.py | expand_user | def expand_user(path, user=None):
"""Roughly the same as os.path.expanduser, but you can pass a default user."""
def _replace(m):
m_user = m.group(1) or user
return pwd.getpwnam(m_user).pw_dir if m_user else pwd.getpwuid(os.getuid()).pw_dir
return re.sub(r'~(\w*)', _replace, path) | python | def expand_user(path, user=None):
"""Roughly the same as os.path.expanduser, but you can pass a default user."""
def _replace(m):
m_user = m.group(1) or user
return pwd.getpwnam(m_user).pw_dir if m_user else pwd.getpwuid(os.getuid()).pw_dir
return re.sub(r'~(\w*)', _replace, path) | ['def', 'expand_user', '(', 'path', ',', 'user', '=', 'None', ')', ':', 'def', '_replace', '(', 'm', ')', ':', 'm_user', '=', 'm', '.', 'group', '(', '1', ')', 'or', 'user', 'return', 'pwd', '.', 'getpwnam', '(', 'm_user', ')', '.', 'pw_dir', 'if', 'm_user', 'else', 'pwd', '.', 'getpwuid', '(', 'os', '.', 'getuid', '(', ')', ')', '.', 'pw_dir', 'return', 're', '.', 'sub', '(', "r'~(\\w*)'", ',', '_replace', ',', 'path', ')'] | Roughly the same as os.path.expanduser, but you can pass a default user. | ['Roughly', 'the', 'same', 'as', 'os', '.', 'path', '.', 'expanduser', 'but', 'you', 'can', 'pass', 'a', 'default', 'user', '.'] | train | https://github.com/mikeboers/sitetools/blob/1ec4eea6902b4a276f868a711b783dd965c123b7/sitetools/utils.py#L23-L30 |
4,915 | wummel/linkchecker | linkcheck/checker/httpurl.py | HttpUrl.read_content | def read_content(self):
"""Return data and data size for this URL.
Can be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
buf = StringIO()
for data in self.url_connection.iter_content(chunk_size=self.ReadChunkBytes):
if buf.tell() + len(data) > maxbytes:
raise LinkCheckerError(_("File size too large"))
buf.write(data)
return buf.getvalue() | python | def read_content(self):
"""Return data and data size for this URL.
Can be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
buf = StringIO()
for data in self.url_connection.iter_content(chunk_size=self.ReadChunkBytes):
if buf.tell() + len(data) > maxbytes:
raise LinkCheckerError(_("File size too large"))
buf.write(data)
return buf.getvalue() | ['def', 'read_content', '(', 'self', ')', ':', 'maxbytes', '=', 'self', '.', 'aggregate', '.', 'config', '[', '"maxfilesizedownload"', ']', 'buf', '=', 'StringIO', '(', ')', 'for', 'data', 'in', 'self', '.', 'url_connection', '.', 'iter_content', '(', 'chunk_size', '=', 'self', '.', 'ReadChunkBytes', ')', ':', 'if', 'buf', '.', 'tell', '(', ')', '+', 'len', '(', 'data', ')', '>', 'maxbytes', ':', 'raise', 'LinkCheckerError', '(', '_', '(', '"File size too large"', ')', ')', 'buf', '.', 'write', '(', 'data', ')', 'return', 'buf', '.', 'getvalue', '(', ')'] | Return data and data size for this URL.
Can be overridden in subclasses. | ['Return', 'data', 'and', 'data', 'size', 'for', 'this', 'URL', '.', 'Can', 'be', 'overridden', 'in', 'subclasses', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/httpurl.py#L308-L317 |
4,916 | aliyun/aliyun-odps-python-sdk | odps/df/backends/formatter.py | _pprint_dict | def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or options.display.max_seq_items or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs) | python | def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or options.display.max_seq_items or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs) | ['def', '_pprint_dict', '(', 'seq', ',', '_nest_lvl', '=', '0', ',', 'max_seq_items', '=', 'None', ',', '*', '*', 'kwds', ')', ':', 'fmt', '=', 'u', '(', '"{%s}"', ')', 'pairs', '=', '[', ']', 'pfmt', '=', 'u', '(', '"%s: %s"', ')', 'if', 'max_seq_items', 'is', 'False', ':', 'nitems', '=', 'len', '(', 'seq', ')', 'else', ':', 'nitems', '=', 'max_seq_items', 'or', 'options', '.', 'display', '.', 'max_seq_items', 'or', 'len', '(', 'seq', ')', 'for', 'k', ',', 'v', 'in', 'list', '(', 'seq', '.', 'items', '(', ')', ')', '[', ':', 'nitems', ']', ':', 'pairs', '.', 'append', '(', 'pfmt', '%', '(', 'pprint_thing', '(', 'k', ',', '_nest_lvl', '+', '1', ',', 'max_seq_items', '=', 'max_seq_items', ',', '*', '*', 'kwds', ')', ',', 'pprint_thing', '(', 'v', ',', '_nest_lvl', '+', '1', ',', 'max_seq_items', '=', 'max_seq_items', ',', '*', '*', 'kwds', ')', ')', ')', 'if', 'nitems', '<', 'len', '(', 'seq', ')', ':', 'return', 'fmt', '%', '(', '", "', '.', 'join', '(', 'pairs', ')', '+', '", ..."', ')', 'else', ':', 'return', 'fmt', '%', '", "', '.', 'join', '(', 'pairs', ')'] | internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly. | ['internal', '.', 'pprinter', 'for', 'iterables', '.', 'you', 'should', 'probably', 'use', 'pprint_thing', '()', 'rather', 'then', 'calling', 'this', 'directly', '.'] | train | https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/backends/formatter.py#L78-L100 |
4,917 | pandas-dev/pandas | pandas/tseries/offsets.py | Week._end_apply_index | def _end_apply_index(self, dtindex):
"""
Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex
"""
off = dtindex.to_perioddelta('D')
base, mult = libfrequencies.get_freq_code(self.freqstr)
base_period = dtindex.to_period(base)
if not isinstance(base_period._data, np.ndarray):
# unwrap PeriodIndex --> PeriodArray
base_period = base_period._data
if self.n > 0:
# when adding, dates on end roll to next
normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
roll = np.where(base_period.to_timestamp(how='end') == normed,
self.n, self.n - 1)
# integer-array addition on PeriodIndex is deprecated,
# so we use _addsub_int_array directly
shifted = base_period._addsub_int_array(roll, operator.add)
base = shifted.to_timestamp(how='end')
else:
# integer addition on PeriodIndex is deprecated,
# so we use _time_shift directly
roll = self.n
base = base_period._time_shift(roll).to_timestamp(how='end')
return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D') | python | def _end_apply_index(self, dtindex):
"""
Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex
"""
off = dtindex.to_perioddelta('D')
base, mult = libfrequencies.get_freq_code(self.freqstr)
base_period = dtindex.to_period(base)
if not isinstance(base_period._data, np.ndarray):
# unwrap PeriodIndex --> PeriodArray
base_period = base_period._data
if self.n > 0:
# when adding, dates on end roll to next
normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
roll = np.where(base_period.to_timestamp(how='end') == normed,
self.n, self.n - 1)
# integer-array addition on PeriodIndex is deprecated,
# so we use _addsub_int_array directly
shifted = base_period._addsub_int_array(roll, operator.add)
base = shifted.to_timestamp(how='end')
else:
# integer addition on PeriodIndex is deprecated,
# so we use _time_shift directly
roll = self.n
base = base_period._time_shift(roll).to_timestamp(how='end')
return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D') | ['def', '_end_apply_index', '(', 'self', ',', 'dtindex', ')', ':', 'off', '=', 'dtindex', '.', 'to_perioddelta', '(', "'D'", ')', 'base', ',', 'mult', '=', 'libfrequencies', '.', 'get_freq_code', '(', 'self', '.', 'freqstr', ')', 'base_period', '=', 'dtindex', '.', 'to_period', '(', 'base', ')', 'if', 'not', 'isinstance', '(', 'base_period', '.', '_data', ',', 'np', '.', 'ndarray', ')', ':', '# unwrap PeriodIndex --> PeriodArray', 'base_period', '=', 'base_period', '.', '_data', 'if', 'self', '.', 'n', '>', '0', ':', '# when adding, dates on end roll to next', 'normed', '=', 'dtindex', '-', 'off', '+', 'Timedelta', '(', '1', ',', "'D'", ')', '-', 'Timedelta', '(', '1', ',', "'ns'", ')', 'roll', '=', 'np', '.', 'where', '(', 'base_period', '.', 'to_timestamp', '(', 'how', '=', "'end'", ')', '==', 'normed', ',', 'self', '.', 'n', ',', 'self', '.', 'n', '-', '1', ')', '# integer-array addition on PeriodIndex is deprecated,', '# so we use _addsub_int_array directly', 'shifted', '=', 'base_period', '.', '_addsub_int_array', '(', 'roll', ',', 'operator', '.', 'add', ')', 'base', '=', 'shifted', '.', 'to_timestamp', '(', 'how', '=', "'end'", ')', 'else', ':', '# integer addition on PeriodIndex is deprecated,', '# so we use _time_shift directly', 'roll', '=', 'self', '.', 'n', 'base', '=', 'base_period', '.', '_time_shift', '(', 'roll', ')', '.', 'to_timestamp', '(', 'how', '=', "'end'", ')', 'return', 'base', '+', 'off', '+', 'Timedelta', '(', '1', ',', "'ns'", ')', '-', 'Timedelta', '(', '1', ',', "'D'", ')'] | Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex | ['Add', 'self', 'to', 'the', 'given', 'DatetimeIndex', 'specialized', 'for', 'case', 'where', 'self', '.', 'weekday', 'is', 'non', '-', 'null', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1354-L1390 |
4,918 | StanfordBioinformatics/loom | server/loomengine_server/api/models/base.py | BaseModel.save | def save(self, *args, **kwargs):
"""
This save method protects against two processesses concurrently modifying
the same object. Normally the second save would silently overwrite the
changes from the first. Instead we raise a ConcurrentModificationError.
"""
cls = self.__class__
if self.pk:
rows = cls.objects.filter(
pk=self.pk, _change=self._change).update(
_change=self._change + 1)
if not rows:
raise ConcurrentModificationError(cls.__name__, self.pk)
self._change += 1
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).save(*args, **kwargs)
except django.db.utils.OperationalError:
if count >= max_retries:
raise
count += 1 | python | def save(self, *args, **kwargs):
"""
This save method protects against two processesses concurrently modifying
the same object. Normally the second save would silently overwrite the
changes from the first. Instead we raise a ConcurrentModificationError.
"""
cls = self.__class__
if self.pk:
rows = cls.objects.filter(
pk=self.pk, _change=self._change).update(
_change=self._change + 1)
if not rows:
raise ConcurrentModificationError(cls.__name__, self.pk)
self._change += 1
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).save(*args, **kwargs)
except django.db.utils.OperationalError:
if count >= max_retries:
raise
count += 1 | ['def', 'save', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'cls', '=', 'self', '.', '__class__', 'if', 'self', '.', 'pk', ':', 'rows', '=', 'cls', '.', 'objects', '.', 'filter', '(', 'pk', '=', 'self', '.', 'pk', ',', '_change', '=', 'self', '.', '_change', ')', '.', 'update', '(', '_change', '=', 'self', '.', '_change', '+', '1', ')', 'if', 'not', 'rows', ':', 'raise', 'ConcurrentModificationError', '(', 'cls', '.', '__name__', ',', 'self', '.', 'pk', ')', 'self', '.', '_change', '+=', '1', 'count', '=', '0', 'max_retries', '=', '3', 'while', 'True', ':', 'try', ':', 'return', 'super', '(', 'BaseModel', ',', 'self', ')', '.', 'save', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'django', '.', 'db', '.', 'utils', '.', 'OperationalError', ':', 'if', 'count', '>=', 'max_retries', ':', 'raise', 'count', '+=', '1'] | This save method protects against two processesses concurrently modifying
the same object. Normally the second save would silently overwrite the
changes from the first. Instead we raise a ConcurrentModificationError. | ['This', 'save', 'method', 'protects', 'against', 'two', 'processesses', 'concurrently', 'modifying', 'the', 'same', 'object', '.', 'Normally', 'the', 'second', 'save', 'would', 'silently', 'overwrite', 'the', 'changes', 'from', 'the', 'first', '.', 'Instead', 'we', 'raise', 'a', 'ConcurrentModificationError', '.'] | train | https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L225-L248 |
4,919 | hyperledger/sawtooth-core | validator/sawtooth_validator/config/validator.py | load_toml_validator_config | def load_toml_validator_config(filename):
"""Returns a ValidatorConfig created by loading a TOML file from the
filesystem.
"""
if not os.path.exists(filename):
LOGGER.info(
"Skipping validator config loading from non-existent config file:"
" %s", filename)
return ValidatorConfig()
LOGGER.info("Loading validator information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise LocalConfigurationError(
"Unable to load validator configuration file: {}".format(str(e)))
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(
['bind', 'endpoint', 'peering', 'seeds', 'peers', 'network_public_key',
'network_private_key', 'scheduler', 'permissions', 'roles',
'opentsdb_url', 'opentsdb_db', 'opentsdb_username',
'opentsdb_password', 'minimum_peer_connectivity',
'maximum_peer_connectivity', 'state_pruning_block_depth',
'fork_cache_keep_time',
'component_thread_pool_workers', 'network_thread_pool_workers',
'signature_thread_pool_workers'])
if invalid_keys:
raise LocalConfigurationError(
"Invalid keys in validator config: "
"{}".format(", ".join(sorted(list(invalid_keys)))))
bind_network = None
bind_component = None
bind_consensus = None
for bind in toml_config.get("bind", []):
if "network" in bind:
bind_network = bind[bind.find(":") + 1:]
if "component" in bind:
bind_component = bind[bind.find(":") + 1:]
if "consensus" in bind:
bind_consensus = bind[bind.find(":") + 1:]
network_public_key = None
network_private_key = None
if toml_config.get("network_public_key") is not None:
network_public_key = toml_config.get("network_public_key").encode()
if toml_config.get("network_private_key") is not None:
network_private_key = toml_config.get("network_private_key").encode()
config = ValidatorConfig(
bind_network=bind_network,
bind_component=bind_component,
bind_consensus=bind_consensus,
endpoint=toml_config.get("endpoint", None),
peering=toml_config.get("peering", None),
seeds=toml_config.get("seeds", None),
peers=toml_config.get("peers", None),
network_public_key=network_public_key,
network_private_key=network_private_key,
scheduler=toml_config.get("scheduler", None),
permissions=parse_permissions(toml_config.get("permissions", None)),
roles=toml_config.get("roles", None),
opentsdb_url=toml_config.get("opentsdb_url", None),
opentsdb_db=toml_config.get("opentsdb_db", None),
opentsdb_username=toml_config.get("opentsdb_username", None),
opentsdb_password=toml_config.get("opentsdb_password", None),
minimum_peer_connectivity=toml_config.get(
"minimum_peer_connectivity", None),
maximum_peer_connectivity=toml_config.get(
"maximum_peer_connectivity", None),
state_pruning_block_depth=toml_config.get(
"state_pruning_block_depth", None),
fork_cache_keep_time=toml_config.get(
"fork_cache_keep_time", None),
component_thread_pool_workers=toml_config.get(
"component_thread_pool_workers", None),
network_thread_pool_workers=toml_config.get(
"network_thread_pool_workers", None),
signature_thread_pool_workers=toml_config.get(
"signature_thread_pool_workers", None)
)
return config | python | def load_toml_validator_config(filename):
"""Returns a ValidatorConfig created by loading a TOML file from the
filesystem.
"""
if not os.path.exists(filename):
LOGGER.info(
"Skipping validator config loading from non-existent config file:"
" %s", filename)
return ValidatorConfig()
LOGGER.info("Loading validator information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise LocalConfigurationError(
"Unable to load validator configuration file: {}".format(str(e)))
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(
['bind', 'endpoint', 'peering', 'seeds', 'peers', 'network_public_key',
'network_private_key', 'scheduler', 'permissions', 'roles',
'opentsdb_url', 'opentsdb_db', 'opentsdb_username',
'opentsdb_password', 'minimum_peer_connectivity',
'maximum_peer_connectivity', 'state_pruning_block_depth',
'fork_cache_keep_time',
'component_thread_pool_workers', 'network_thread_pool_workers',
'signature_thread_pool_workers'])
if invalid_keys:
raise LocalConfigurationError(
"Invalid keys in validator config: "
"{}".format(", ".join(sorted(list(invalid_keys)))))
bind_network = None
bind_component = None
bind_consensus = None
for bind in toml_config.get("bind", []):
if "network" in bind:
bind_network = bind[bind.find(":") + 1:]
if "component" in bind:
bind_component = bind[bind.find(":") + 1:]
if "consensus" in bind:
bind_consensus = bind[bind.find(":") + 1:]
network_public_key = None
network_private_key = None
if toml_config.get("network_public_key") is not None:
network_public_key = toml_config.get("network_public_key").encode()
if toml_config.get("network_private_key") is not None:
network_private_key = toml_config.get("network_private_key").encode()
config = ValidatorConfig(
bind_network=bind_network,
bind_component=bind_component,
bind_consensus=bind_consensus,
endpoint=toml_config.get("endpoint", None),
peering=toml_config.get("peering", None),
seeds=toml_config.get("seeds", None),
peers=toml_config.get("peers", None),
network_public_key=network_public_key,
network_private_key=network_private_key,
scheduler=toml_config.get("scheduler", None),
permissions=parse_permissions(toml_config.get("permissions", None)),
roles=toml_config.get("roles", None),
opentsdb_url=toml_config.get("opentsdb_url", None),
opentsdb_db=toml_config.get("opentsdb_db", None),
opentsdb_username=toml_config.get("opentsdb_username", None),
opentsdb_password=toml_config.get("opentsdb_password", None),
minimum_peer_connectivity=toml_config.get(
"minimum_peer_connectivity", None),
maximum_peer_connectivity=toml_config.get(
"maximum_peer_connectivity", None),
state_pruning_block_depth=toml_config.get(
"state_pruning_block_depth", None),
fork_cache_keep_time=toml_config.get(
"fork_cache_keep_time", None),
component_thread_pool_workers=toml_config.get(
"component_thread_pool_workers", None),
network_thread_pool_workers=toml_config.get(
"network_thread_pool_workers", None),
signature_thread_pool_workers=toml_config.get(
"signature_thread_pool_workers", None)
)
return config | ['def', 'load_toml_validator_config', '(', 'filename', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'LOGGER', '.', 'info', '(', '"Skipping validator config loading from non-existent config file:"', '" %s"', ',', 'filename', ')', 'return', 'ValidatorConfig', '(', ')', 'LOGGER', '.', 'info', '(', '"Loading validator information from config: %s"', ',', 'filename', ')', 'try', ':', 'with', 'open', '(', 'filename', ')', 'as', 'fd', ':', 'raw_config', '=', 'fd', '.', 'read', '(', ')', 'except', 'IOError', 'as', 'e', ':', 'raise', 'LocalConfigurationError', '(', '"Unable to load validator configuration file: {}"', '.', 'format', '(', 'str', '(', 'e', ')', ')', ')', 'toml_config', '=', 'toml', '.', 'loads', '(', 'raw_config', ')', 'invalid_keys', '=', 'set', '(', 'toml_config', '.', 'keys', '(', ')', ')', '.', 'difference', '(', '[', "'bind'", ',', "'endpoint'", ',', "'peering'", ',', "'seeds'", ',', "'peers'", ',', "'network_public_key'", ',', "'network_private_key'", ',', "'scheduler'", ',', "'permissions'", ',', "'roles'", ',', "'opentsdb_url'", ',', "'opentsdb_db'", ',', "'opentsdb_username'", ',', "'opentsdb_password'", ',', "'minimum_peer_connectivity'", ',', "'maximum_peer_connectivity'", ',', "'state_pruning_block_depth'", ',', "'fork_cache_keep_time'", ',', "'component_thread_pool_workers'", ',', "'network_thread_pool_workers'", ',', "'signature_thread_pool_workers'", ']', ')', 'if', 'invalid_keys', ':', 'raise', 'LocalConfigurationError', '(', '"Invalid keys in validator config: "', '"{}"', '.', 'format', '(', '", "', '.', 'join', '(', 'sorted', '(', 'list', '(', 'invalid_keys', ')', ')', ')', ')', ')', 'bind_network', '=', 'None', 'bind_component', '=', 'None', 'bind_consensus', '=', 'None', 'for', 'bind', 'in', 'toml_config', '.', 'get', '(', '"bind"', ',', '[', ']', ')', ':', 'if', '"network"', 'in', 'bind', ':', 'bind_network', '=', 'bind', '[', 'bind', '.', 'find', '(', '":"', ')', '+', '1', ':', ']', 'if', '"component"', 'in', 'bind', ':', 'bind_component', '=', 'bind', '[', 'bind', '.', 'find', '(', '":"', ')', '+', '1', ':', ']', 'if', '"consensus"', 'in', 'bind', ':', 'bind_consensus', '=', 'bind', '[', 'bind', '.', 'find', '(', '":"', ')', '+', '1', ':', ']', 'network_public_key', '=', 'None', 'network_private_key', '=', 'None', 'if', 'toml_config', '.', 'get', '(', '"network_public_key"', ')', 'is', 'not', 'None', ':', 'network_public_key', '=', 'toml_config', '.', 'get', '(', '"network_public_key"', ')', '.', 'encode', '(', ')', 'if', 'toml_config', '.', 'get', '(', '"network_private_key"', ')', 'is', 'not', 'None', ':', 'network_private_key', '=', 'toml_config', '.', 'get', '(', '"network_private_key"', ')', '.', 'encode', '(', ')', 'config', '=', 'ValidatorConfig', '(', 'bind_network', '=', 'bind_network', ',', 'bind_component', '=', 'bind_component', ',', 'bind_consensus', '=', 'bind_consensus', ',', 'endpoint', '=', 'toml_config', '.', 'get', '(', '"endpoint"', ',', 'None', ')', ',', 'peering', '=', 'toml_config', '.', 'get', '(', '"peering"', ',', 'None', ')', ',', 'seeds', '=', 'toml_config', '.', 'get', '(', '"seeds"', ',', 'None', ')', ',', 'peers', '=', 'toml_config', '.', 'get', '(', '"peers"', ',', 'None', ')', ',', 'network_public_key', '=', 'network_public_key', ',', 'network_private_key', '=', 'network_private_key', ',', 'scheduler', '=', 'toml_config', '.', 'get', '(', '"scheduler"', ',', 'None', ')', ',', 'permissions', '=', 'parse_permissions', '(', 'toml_config', '.', 'get', '(', '"permissions"', ',', 'None', ')', ')', ',', 'roles', '=', 'toml_config', '.', 'get', '(', '"roles"', ',', 'None', ')', ',', 'opentsdb_url', '=', 'toml_config', '.', 'get', '(', '"opentsdb_url"', ',', 'None', ')', ',', 'opentsdb_db', '=', 'toml_config', '.', 'get', '(', '"opentsdb_db"', ',', 'None', ')', ',', 'opentsdb_username', '=', 'toml_config', '.', 'get', '(', '"opentsdb_username"', ',', 'None', ')', ',', 'opentsdb_password', '=', 'toml_config', '.', 'get', '(', '"opentsdb_password"', ',', 'None', ')', ',', 'minimum_peer_connectivity', '=', 'toml_config', '.', 'get', '(', '"minimum_peer_connectivity"', ',', 'None', ')', ',', 'maximum_peer_connectivity', '=', 'toml_config', '.', 'get', '(', '"maximum_peer_connectivity"', ',', 'None', ')', ',', 'state_pruning_block_depth', '=', 'toml_config', '.', 'get', '(', '"state_pruning_block_depth"', ',', 'None', ')', ',', 'fork_cache_keep_time', '=', 'toml_config', '.', 'get', '(', '"fork_cache_keep_time"', ',', 'None', ')', ',', 'component_thread_pool_workers', '=', 'toml_config', '.', 'get', '(', '"component_thread_pool_workers"', ',', 'None', ')', ',', 'network_thread_pool_workers', '=', 'toml_config', '.', 'get', '(', '"network_thread_pool_workers"', ',', 'None', ')', ',', 'signature_thread_pool_workers', '=', 'toml_config', '.', 'get', '(', '"signature_thread_pool_workers"', ',', 'None', ')', ')', 'return', 'config'] | Returns a ValidatorConfig created by loading a TOML file from the
filesystem. | ['Returns', 'a', 'ValidatorConfig', 'created', 'by', 'loading', 'a', 'TOML', 'file', 'from', 'the', 'filesystem', '.'] | train | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/config/validator.py#L48-L134 |
4,920 | aio-libs/aiohttp | aiohttp/helpers.py | BasicAuth.decode | def decode(cls, auth_header: str, encoding: str='latin1') -> 'BasicAuth':
"""Create a BasicAuth object from an Authorization HTTP header."""
try:
auth_type, encoded_credentials = auth_header.split(' ', 1)
except ValueError:
raise ValueError('Could not parse authorization header.')
if auth_type.lower() != 'basic':
raise ValueError('Unknown authorization method %s' % auth_type)
try:
decoded = base64.b64decode(
encoded_credentials.encode('ascii'), validate=True
).decode(encoding)
except binascii.Error:
raise ValueError('Invalid base64 encoding.')
try:
# RFC 2617 HTTP Authentication
# https://www.ietf.org/rfc/rfc2617.txt
# the colon must be present, but the username and password may be
# otherwise blank.
username, password = decoded.split(':', 1)
except ValueError:
raise ValueError('Invalid credentials.')
return cls(username, password, encoding=encoding) | python | def decode(cls, auth_header: str, encoding: str='latin1') -> 'BasicAuth':
"""Create a BasicAuth object from an Authorization HTTP header."""
try:
auth_type, encoded_credentials = auth_header.split(' ', 1)
except ValueError:
raise ValueError('Could not parse authorization header.')
if auth_type.lower() != 'basic':
raise ValueError('Unknown authorization method %s' % auth_type)
try:
decoded = base64.b64decode(
encoded_credentials.encode('ascii'), validate=True
).decode(encoding)
except binascii.Error:
raise ValueError('Invalid base64 encoding.')
try:
# RFC 2617 HTTP Authentication
# https://www.ietf.org/rfc/rfc2617.txt
# the colon must be present, but the username and password may be
# otherwise blank.
username, password = decoded.split(':', 1)
except ValueError:
raise ValueError('Invalid credentials.')
return cls(username, password, encoding=encoding) | ['def', 'decode', '(', 'cls', ',', 'auth_header', ':', 'str', ',', 'encoding', ':', 'str', '=', "'latin1'", ')', '->', "'BasicAuth'", ':', 'try', ':', 'auth_type', ',', 'encoded_credentials', '=', 'auth_header', '.', 'split', '(', "' '", ',', '1', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', "'Could not parse authorization header.'", ')', 'if', 'auth_type', '.', 'lower', '(', ')', '!=', "'basic'", ':', 'raise', 'ValueError', '(', "'Unknown authorization method %s'", '%', 'auth_type', ')', 'try', ':', 'decoded', '=', 'base64', '.', 'b64decode', '(', 'encoded_credentials', '.', 'encode', '(', "'ascii'", ')', ',', 'validate', '=', 'True', ')', '.', 'decode', '(', 'encoding', ')', 'except', 'binascii', '.', 'Error', ':', 'raise', 'ValueError', '(', "'Invalid base64 encoding.'", ')', 'try', ':', '# RFC 2617 HTTP Authentication', '# https://www.ietf.org/rfc/rfc2617.txt', '# the colon must be present, but the username and password may be', '# otherwise blank.', 'username', ',', 'password', '=', 'decoded', '.', 'split', '(', "':'", ',', '1', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', "'Invalid credentials.'", ')', 'return', 'cls', '(', 'username', ',', 'password', ',', 'encoding', '=', 'encoding', ')'] | Create a BasicAuth object from an Authorization HTTP header. | ['Create', 'a', 'BasicAuth', 'object', 'from', 'an', 'Authorization', 'HTTP', 'header', '.'] | train | https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/helpers.py#L134-L160 |
4,921 | ioos/pyoos | pyoos/parsers/ioos/one/describe_sensor.py | DescribeSensor.get_ioos_def | def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | python | def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | ['def', 'get_ioos_def', '(', 'self', ',', 'ident', ',', 'elem_type', ',', 'ont', ')', ':', 'if', 'elem_type', '==', '"identifier"', ':', 'getter_fn', '=', 'self', '.', 'system', '.', 'get_identifiers_by_name', 'elif', 'elem_type', '==', '"classifier"', ':', 'getter_fn', '=', 'self', '.', 'system', '.', 'get_classifiers_by_name', 'else', ':', 'raise', 'ValueError', '(', '"Unknown element type \'{}\'"', '.', 'format', '(', 'elem_type', ')', ')', 'return', 'DescribeSensor', '.', 'get_named_by_definition', '(', 'getter_fn', '(', 'ident', ')', ',', 'urljoin', '(', 'ont', ',', 'ident', ')', ')'] | Gets a definition given an identifier and where to search for it | ['Gets', 'a', 'definition', 'given', 'an', 'identifier', 'and', 'where', 'to', 'search', 'for', 'it'] | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L50-L60 |
4,922 | toastdriven/alligator | alligator/backends/locmem_backend.py | Client.drop_all | def drop_all(self, queue_name):
"""
Drops all the task in the queue.
:param queue_name: The name of the queue. Usually handled by the
``Gator`` instance.
:type queue_name: string
"""
cls = self.__class__
for task_id in cls.queues.get(queue_name, []):
cls.task_data.pop(task_id, None)
cls.queues[queue_name] = [] | python | def drop_all(self, queue_name):
"""
Drops all the task in the queue.
:param queue_name: The name of the queue. Usually handled by the
``Gator`` instance.
:type queue_name: string
"""
cls = self.__class__
for task_id in cls.queues.get(queue_name, []):
cls.task_data.pop(task_id, None)
cls.queues[queue_name] = [] | ['def', 'drop_all', '(', 'self', ',', 'queue_name', ')', ':', 'cls', '=', 'self', '.', '__class__', 'for', 'task_id', 'in', 'cls', '.', 'queues', '.', 'get', '(', 'queue_name', ',', '[', ']', ')', ':', 'cls', '.', 'task_data', '.', 'pop', '(', 'task_id', ',', 'None', ')', 'cls', '.', 'queues', '[', 'queue_name', ']', '=', '[', ']'] | Drops all the task in the queue.
:param queue_name: The name of the queue. Usually handled by the
``Gator`` instance.
:type queue_name: string | ['Drops', 'all', 'the', 'task', 'in', 'the', 'queue', '.'] | train | https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/backends/locmem_backend.py#L30-L43 |
4,923 | weld-project/weld | python/numpy/weldnumpy/weldarray.py | weldarray._gen_weldobj | def _gen_weldobj(self, arr):
'''
Generating a new weldarray from a given arr for self.
@arr: weldarray or ndarray.
- weldarray: Just update the weldobject with the context from the
weldarray.
- ndarray: Add the given array to the context of the weldobject.
Sets self.name and self.weldobj.
'''
self.weldobj = WeldObject(NumpyArrayEncoder(), NumpyArrayDecoder())
if isinstance(arr, weldarray):
self.weldobj.update(arr.weldobj)
self.weldobj.weld_code = arr.weldobj.weld_code
self.name = arr.name
else:
# general case for arr being numpy scalar or ndarray
# weldobj returns the name bound to the given array. That is also
# the array that future ops will act on, so set weld_code to it.
self.name = self.weldobj.weld_code = self.weldobj.update(arr,
SUPPORTED_DTYPES[str(arr.dtype)]) | python | def _gen_weldobj(self, arr):
'''
Generating a new weldarray from a given arr for self.
@arr: weldarray or ndarray.
- weldarray: Just update the weldobject with the context from the
weldarray.
- ndarray: Add the given array to the context of the weldobject.
Sets self.name and self.weldobj.
'''
self.weldobj = WeldObject(NumpyArrayEncoder(), NumpyArrayDecoder())
if isinstance(arr, weldarray):
self.weldobj.update(arr.weldobj)
self.weldobj.weld_code = arr.weldobj.weld_code
self.name = arr.name
else:
# general case for arr being numpy scalar or ndarray
# weldobj returns the name bound to the given array. That is also
# the array that future ops will act on, so set weld_code to it.
self.name = self.weldobj.weld_code = self.weldobj.update(arr,
SUPPORTED_DTYPES[str(arr.dtype)]) | ['def', '_gen_weldobj', '(', 'self', ',', 'arr', ')', ':', 'self', '.', 'weldobj', '=', 'WeldObject', '(', 'NumpyArrayEncoder', '(', ')', ',', 'NumpyArrayDecoder', '(', ')', ')', 'if', 'isinstance', '(', 'arr', ',', 'weldarray', ')', ':', 'self', '.', 'weldobj', '.', 'update', '(', 'arr', '.', 'weldobj', ')', 'self', '.', 'weldobj', '.', 'weld_code', '=', 'arr', '.', 'weldobj', '.', 'weld_code', 'self', '.', 'name', '=', 'arr', '.', 'name', 'else', ':', '# general case for arr being numpy scalar or ndarray', '# weldobj returns the name bound to the given array. That is also', '# the array that future ops will act on, so set weld_code to it.', 'self', '.', 'name', '=', 'self', '.', 'weldobj', '.', 'weld_code', '=', 'self', '.', 'weldobj', '.', 'update', '(', 'arr', ',', 'SUPPORTED_DTYPES', '[', 'str', '(', 'arr', '.', 'dtype', ')', ']', ')'] | Generating a new weldarray from a given arr for self.
@arr: weldarray or ndarray.
- weldarray: Just update the weldobject with the context from the
weldarray.
- ndarray: Add the given array to the context of the weldobject.
Sets self.name and self.weldobj. | ['Generating', 'a', 'new', 'weldarray', 'from', 'a', 'given', 'arr', 'for', 'self', '.'] | train | https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/numpy/weldnumpy/weldarray.py#L192-L211 |
4,924 | ponty/confduino | confduino/util.py | clean_dir | def clean_dir(root):
'''remove .* and _* files and directories under root'''
for x in root.walkdirs('.*', errors='ignore'):
x.rmtree()
for x in root.walkdirs('_*', errors='ignore'):
x.rmtree()
for x in root.walkfiles('.*', errors='ignore'):
x.remove()
for x in root.walkfiles('_*', errors='ignore'):
x.remove() | python | def clean_dir(root):
'''remove .* and _* files and directories under root'''
for x in root.walkdirs('.*', errors='ignore'):
x.rmtree()
for x in root.walkdirs('_*', errors='ignore'):
x.rmtree()
for x in root.walkfiles('.*', errors='ignore'):
x.remove()
for x in root.walkfiles('_*', errors='ignore'):
x.remove() | ['def', 'clean_dir', '(', 'root', ')', ':', 'for', 'x', 'in', 'root', '.', 'walkdirs', '(', "'.*'", ',', 'errors', '=', "'ignore'", ')', ':', 'x', '.', 'rmtree', '(', ')', 'for', 'x', 'in', 'root', '.', 'walkdirs', '(', "'_*'", ',', 'errors', '=', "'ignore'", ')', ':', 'x', '.', 'rmtree', '(', ')', 'for', 'x', 'in', 'root', '.', 'walkfiles', '(', "'.*'", ',', 'errors', '=', "'ignore'", ')', ':', 'x', '.', 'remove', '(', ')', 'for', 'x', 'in', 'root', '.', 'walkfiles', '(', "'_*'", ',', 'errors', '=', "'ignore'", ')', ':', 'x', '.', 'remove', '(', ')'] | remove .* and _* files and directories under root | ['remove', '.', '*', 'and', '_', '*', 'files', 'and', 'directories', 'under', 'root'] | train | https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/util.py#L90-L100 |
4,925 | rfk/django-supervisor | djsupervisor/management/commands/supervisor.py | Command._handle_getconfig | def _handle_getconfig(self,cfg_file,*args,**options):
"""Command 'supervisor getconfig' prints merged config to stdout."""
if args:
raise CommandError("supervisor getconfig takes no arguments")
print cfg_file.read()
return 0 | python | def _handle_getconfig(self,cfg_file,*args,**options):
"""Command 'supervisor getconfig' prints merged config to stdout."""
if args:
raise CommandError("supervisor getconfig takes no arguments")
print cfg_file.read()
return 0 | ['def', '_handle_getconfig', '(', 'self', ',', 'cfg_file', ',', '*', 'args', ',', '*', '*', 'options', ')', ':', 'if', 'args', ':', 'raise', 'CommandError', '(', '"supervisor getconfig takes no arguments"', ')', 'print', 'cfg_file', '.', 'read', '(', ')', 'return', '0'] | Command 'supervisor getconfig' prints merged config to stdout. | ['Command', 'supervisor', 'getconfig', 'prints', 'merged', 'config', 'to', 'stdout', '.'] | train | https://github.com/rfk/django-supervisor/blob/545a379d4a73ed2ae21c4aee6b8009ded8aeedc6/djsupervisor/management/commands/supervisor.py#L215-L220 |
4,926 | spyder-ide/spyder | spyder/app/mainwindow.py | MainWindow.add_to_fileswitcher | def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
from spyder.widgets.fileswitcher import FileSwitcher
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index) | python | def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
from spyder.widgets.fileswitcher import FileSwitcher
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index) | ['def', 'add_to_fileswitcher', '(', 'self', ',', 'plugin', ',', 'tabs', ',', 'data', ',', 'icon', ')', ':', 'if', 'self', '.', 'fileswitcher', 'is', 'None', ':', 'from', 'spyder', '.', 'widgets', '.', 'fileswitcher', 'import', 'FileSwitcher', 'self', '.', 'fileswitcher', '=', 'FileSwitcher', '(', 'self', ',', 'plugin', ',', 'tabs', ',', 'data', ',', 'icon', ')', 'else', ':', 'self', '.', 'fileswitcher', '.', 'add_plugin', '(', 'plugin', ',', 'tabs', ',', 'data', ',', 'icon', ')', 'self', '.', 'fileswitcher', '.', 'sig_goto_file', '.', 'connect', '(', 'plugin', '.', 'get_current_tab_manager', '(', ')', '.', 'set_stack_index', ')'] | Add a plugin to the File Switcher. | ['Add', 'a', 'plugin', 'to', 'the', 'File', 'Switcher', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L3085-L3094 |
4,927 | ratcave/wavefront_reader | wavefront_reader/reading.py | read_wavefront | def read_wavefront(fname_obj):
"""Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file."""
fname_mtl = ''
geoms = read_objfile(fname_obj)
for line in open(fname_obj):
if line:
split_line = line.strip().split(' ', 1)
if len(split_line) < 2:
continue
prefix, data = split_line[0], split_line[1]
if 'mtllib' in prefix:
fname_mtl = data
break
if fname_mtl:
materials = read_mtlfile(path.join(path.dirname(fname_obj), fname_mtl))
for geom in geoms.values():
geom['material'] = materials[geom['usemtl']]
return geoms | python | def read_wavefront(fname_obj):
"""Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file."""
fname_mtl = ''
geoms = read_objfile(fname_obj)
for line in open(fname_obj):
if line:
split_line = line.strip().split(' ', 1)
if len(split_line) < 2:
continue
prefix, data = split_line[0], split_line[1]
if 'mtllib' in prefix:
fname_mtl = data
break
if fname_mtl:
materials = read_mtlfile(path.join(path.dirname(fname_obj), fname_mtl))
for geom in geoms.values():
geom['material'] = materials[geom['usemtl']]
return geoms | ['def', 'read_wavefront', '(', 'fname_obj', ')', ':', 'fname_mtl', '=', "''", 'geoms', '=', 'read_objfile', '(', 'fname_obj', ')', 'for', 'line', 'in', 'open', '(', 'fname_obj', ')', ':', 'if', 'line', ':', 'split_line', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', "' '", ',', '1', ')', 'if', 'len', '(', 'split_line', ')', '<', '2', ':', 'continue', 'prefix', ',', 'data', '=', 'split_line', '[', '0', ']', ',', 'split_line', '[', '1', ']', 'if', "'mtllib'", 'in', 'prefix', ':', 'fname_mtl', '=', 'data', 'break', 'if', 'fname_mtl', ':', 'materials', '=', 'read_mtlfile', '(', 'path', '.', 'join', '(', 'path', '.', 'dirname', '(', 'fname_obj', ')', ',', 'fname_mtl', ')', ')', 'for', 'geom', 'in', 'geoms', '.', 'values', '(', ')', ':', 'geom', '[', "'material'", ']', '=', 'materials', '[', 'geom', '[', "'usemtl'", ']', ']', 'return', 'geoms'] | Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file. | ['Returns', 'mesh', 'dictionary', 'along', 'with', 'their', 'material', 'dictionary', 'from', 'a', 'wavefront', '(', '.', 'obj', 'and', '/', 'or', '.', 'mtl', ')', 'file', '.'] | train | https://github.com/ratcave/wavefront_reader/blob/c515164a3952d6b85f8044f429406fddd862bfd0/wavefront_reader/reading.py#L98-L119 |
4,928 | alexras/pylsdj | pylsdj/filepack.py | merge | def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data | python | def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data | ['def', 'merge', '(', 'blocks', ')', ':', 'current_block', '=', 'blocks', '[', 'sorted', '(', 'blocks', '.', 'keys', '(', ')', ')', '[', '0', ']', ']', 'compressed_data', '=', '[', ']', 'eof', '=', 'False', 'while', 'not', 'eof', ':', 'data_size_to_append', '=', 'None', 'next_block', '=', 'None', 'i', '=', '0', 'while', 'i', '<', 'len', '(', 'current_block', '.', 'data', ')', '-', '1', ':', 'current_byte', '=', 'current_block', '.', 'data', '[', 'i', ']', 'next_byte', '=', 'current_block', '.', 'data', '[', 'i', '+', '1', ']', 'if', 'current_byte', '==', 'RLE_BYTE', ':', 'if', 'next_byte', '==', 'RLE_BYTE', ':', 'i', '+=', '2', 'else', ':', 'i', '+=', '3', 'elif', 'current_byte', '==', 'SPECIAL_BYTE', ':', 'if', 'next_byte', 'in', 'SPECIAL_DEFAULTS', ':', 'i', '+=', '3', 'elif', 'next_byte', '==', 'SPECIAL_BYTE', ':', 'i', '+=', '2', 'else', ':', 'data_size_to_append', '=', 'i', '# hit end of file', 'if', 'next_byte', '==', 'EOF_BYTE', ':', 'eof', '=', 'True', 'else', ':', 'next_block', '=', 'blocks', '[', 'next_byte', ']', 'break', 'else', ':', 'i', '+=', '1', 'assert', 'data_size_to_append', 'is', 'not', 'None', ',', '"Ran off the end of a "', '"block without encountering a block switch or EOF"', 'compressed_data', '.', 'extend', '(', 'current_block', '.', 'data', '[', '0', ':', 'data_size_to_append', ']', ')', 'if', 'not', 'eof', ':', 'assert', 'next_block', 'is', 'not', 'None', ',', '"Switched blocks, but did "', '"not provide the next block to switch to"', 'current_block', '=', 'next_block', 'return', 'compressed_data'] | Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes | ['Merge', 'the', 'given', 'blocks', 'into', 'a', 'contiguous', 'block', 'of', 'compressed', 'data', '.'] | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L204-L259 |
4,929 | zarr-developers/zarr | zarr/creation.py | open_array | def open_array(store=None, mode='a', shape=None, chunks=True, dtype=None,
compressor='default', fill_value=0, order='C', synchronizer=None,
filters=None, cache_metadata=True, cache_attrs=True, path=None,
object_codec=None, chunk_store=None, **kwargs):
"""Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system.
"""
# use same mode semantics as h5py
# r : read only, must exist
# r+ : read/write, must exist
# w : create, delete if exists
# w- or x : create, fail if exists
# a : read/write if exists, create otherwise (default)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
if chunk_store is not None:
chunk_store = normalize_store_arg(chunk_store, clobber=clobber)
path = normalize_storage_path(path)
# API compatibility with h5py
compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)
# ensure fill_value of correct type
if fill_value is not None:
fill_value = np.array(fill_value, dtype=dtype)[()]
# ensure store is initialized
if mode in ['r', 'r+']:
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
err_array_not_found(path)
elif mode == 'w':
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, overwrite=True, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode == 'a':
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode in ['w-', 'x']:
if contains_group(store, path=path):
err_contains_group(path)
elif contains_array(store, path=path):
err_contains_array(path)
else:
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
# determine read only status
read_only = mode == 'r'
# instantiate array
z = Array(store, read_only=read_only, synchronizer=synchronizer,
cache_metadata=cache_metadata, cache_attrs=cache_attrs, path=path,
chunk_store=chunk_store)
return z | python | def open_array(store=None, mode='a', shape=None, chunks=True, dtype=None,
compressor='default', fill_value=0, order='C', synchronizer=None,
filters=None, cache_metadata=True, cache_attrs=True, path=None,
object_codec=None, chunk_store=None, **kwargs):
"""Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system.
"""
# use same mode semantics as h5py
# r : read only, must exist
# r+ : read/write, must exist
# w : create, delete if exists
# w- or x : create, fail if exists
# a : read/write if exists, create otherwise (default)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
if chunk_store is not None:
chunk_store = normalize_store_arg(chunk_store, clobber=clobber)
path = normalize_storage_path(path)
# API compatibility with h5py
compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)
# ensure fill_value of correct type
if fill_value is not None:
fill_value = np.array(fill_value, dtype=dtype)[()]
# ensure store is initialized
if mode in ['r', 'r+']:
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
err_array_not_found(path)
elif mode == 'w':
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, overwrite=True, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode == 'a':
if contains_group(store, path=path):
err_contains_group(path)
elif not contains_array(store, path=path):
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
elif mode in ['w-', 'x']:
if contains_group(store, path=path):
err_contains_group(path)
elif contains_array(store, path=path):
err_contains_array(path)
else:
init_array(store, shape=shape, chunks=chunks, dtype=dtype,
compressor=compressor, fill_value=fill_value,
order=order, filters=filters, path=path,
object_codec=object_codec, chunk_store=chunk_store)
# determine read only status
read_only = mode == 'r'
# instantiate array
z = Array(store, read_only=read_only, synchronizer=synchronizer,
cache_metadata=cache_metadata, cache_attrs=cache_attrs, path=path,
chunk_store=chunk_store)
return z | ['def', 'open_array', '(', 'store', '=', 'None', ',', 'mode', '=', "'a'", ',', 'shape', '=', 'None', ',', 'chunks', '=', 'True', ',', 'dtype', '=', 'None', ',', 'compressor', '=', "'default'", ',', 'fill_value', '=', '0', ',', 'order', '=', "'C'", ',', 'synchronizer', '=', 'None', ',', 'filters', '=', 'None', ',', 'cache_metadata', '=', 'True', ',', 'cache_attrs', '=', 'True', ',', 'path', '=', 'None', ',', 'object_codec', '=', 'None', ',', 'chunk_store', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# use same mode semantics as h5py', '# r : read only, must exist', '# r+ : read/write, must exist', '# w : create, delete if exists', '# w- or x : create, fail if exists', '# a : read/write if exists, create otherwise (default)', '# handle polymorphic store arg', 'clobber', '=', 'mode', '==', "'w'", 'store', '=', 'normalize_store_arg', '(', 'store', ',', 'clobber', '=', 'clobber', ')', 'if', 'chunk_store', 'is', 'not', 'None', ':', 'chunk_store', '=', 'normalize_store_arg', '(', 'chunk_store', ',', 'clobber', '=', 'clobber', ')', 'path', '=', 'normalize_storage_path', '(', 'path', ')', '# API compatibility with h5py', 'compressor', ',', 'fill_value', '=', '_kwargs_compat', '(', 'compressor', ',', 'fill_value', ',', 'kwargs', ')', '# ensure fill_value of correct type', 'if', 'fill_value', 'is', 'not', 'None', ':', 'fill_value', '=', 'np', '.', 'array', '(', 'fill_value', ',', 'dtype', '=', 'dtype', ')', '[', '(', ')', ']', '# ensure store is initialized', 'if', 'mode', 'in', '[', "'r'", ',', "'r+'", ']', ':', 'if', 'contains_group', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'err_contains_group', '(', 'path', ')', 'elif', 'not', 'contains_array', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'err_array_not_found', '(', 'path', ')', 'elif', 'mode', '==', "'w'", ':', 'init_array', '(', 'store', ',', 'shape', '=', 'shape', ',', 'chunks', '=', 'chunks', ',', 'dtype', '=', 'dtype', ',', 'compressor', '=', 'compressor', ',', 'fill_value', '=', 'fill_value', ',', 'order', '=', 'order', ',', 'filters', '=', 'filters', ',', 'overwrite', '=', 'True', ',', 'path', '=', 'path', ',', 'object_codec', '=', 'object_codec', ',', 'chunk_store', '=', 'chunk_store', ')', 'elif', 'mode', '==', "'a'", ':', 'if', 'contains_group', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'err_contains_group', '(', 'path', ')', 'elif', 'not', 'contains_array', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'init_array', '(', 'store', ',', 'shape', '=', 'shape', ',', 'chunks', '=', 'chunks', ',', 'dtype', '=', 'dtype', ',', 'compressor', '=', 'compressor', ',', 'fill_value', '=', 'fill_value', ',', 'order', '=', 'order', ',', 'filters', '=', 'filters', ',', 'path', '=', 'path', ',', 'object_codec', '=', 'object_codec', ',', 'chunk_store', '=', 'chunk_store', ')', 'elif', 'mode', 'in', '[', "'w-'", ',', "'x'", ']', ':', 'if', 'contains_group', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'err_contains_group', '(', 'path', ')', 'elif', 'contains_array', '(', 'store', ',', 'path', '=', 'path', ')', ':', 'err_contains_array', '(', 'path', ')', 'else', ':', 'init_array', '(', 'store', ',', 'shape', '=', 'shape', ',', 'chunks', '=', 'chunks', ',', 'dtype', '=', 'dtype', ',', 'compressor', '=', 'compressor', ',', 'fill_value', '=', 'fill_value', ',', 'order', '=', 'order', ',', 'filters', '=', 'filters', ',', 'path', '=', 'path', ',', 'object_codec', '=', 'object_codec', ',', 'chunk_store', '=', 'chunk_store', ')', '# determine read only status', 'read_only', '=', 'mode', '==', "'r'", '# instantiate array', 'z', '=', 'Array', '(', 'store', ',', 'read_only', '=', 'read_only', ',', 'synchronizer', '=', 'synchronizer', ',', 'cache_metadata', '=', 'cache_metadata', ',', 'cache_attrs', '=', 'cache_attrs', ',', 'path', '=', 'path', ',', 'chunk_store', '=', 'chunk_store', ')', 'return', 'z'] | Open an array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
shape : int or tuple of ints, optional
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object, optional
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
synchronizer : object, optional
Array synchronizer.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
path : string, optional
Array path within store.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
chunk_store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
Returns
-------
z : zarr.core.Array
Examples
--------
>>> import numpy as np
>>> import zarr
>>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
... chunks=(1000, 1000), fill_value=0)
>>> z1[:] = np.arange(100000000).reshape(10000, 10000)
>>> z1
<zarr.core.Array (10000, 10000) float64>
>>> z2 = zarr.open_array('data/example.zarr', mode='r')
>>> z2
<zarr.core.Array (10000, 10000) float64 read-only>
>>> np.all(z1[:] == z2[:])
True
Notes
-----
There is no need to close an array. Data are automatically flushed to the
file system. | ['Open', 'an', 'array', 'using', 'file', '-', 'mode', '-', 'like', 'semantics', '.'] | train | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/creation.py#L352-L489 |
4,930 | pantsbuild/pants | src/python/pants/base/specs.py | Spec.address_families_for_dir | def address_families_for_dir(cls, address_families_dict, spec_dir_path):
"""Implementation of `matching_address_families()` for specs matching at most one directory."""
maybe_af = address_families_dict.get(spec_dir_path, None)
if maybe_af is None:
raise cls.AddressFamilyResolutionError(
'Path "{}" does not contain any BUILD files.'
.format(spec_dir_path))
return [maybe_af] | python | def address_families_for_dir(cls, address_families_dict, spec_dir_path):
"""Implementation of `matching_address_families()` for specs matching at most one directory."""
maybe_af = address_families_dict.get(spec_dir_path, None)
if maybe_af is None:
raise cls.AddressFamilyResolutionError(
'Path "{}" does not contain any BUILD files.'
.format(spec_dir_path))
return [maybe_af] | ['def', 'address_families_for_dir', '(', 'cls', ',', 'address_families_dict', ',', 'spec_dir_path', ')', ':', 'maybe_af', '=', 'address_families_dict', '.', 'get', '(', 'spec_dir_path', ',', 'None', ')', 'if', 'maybe_af', 'is', 'None', ':', 'raise', 'cls', '.', 'AddressFamilyResolutionError', '(', '\'Path "{}" does not contain any BUILD files.\'', '.', 'format', '(', 'spec_dir_path', ')', ')', 'return', '[', 'maybe_af', ']'] | Implementation of `matching_address_families()` for specs matching at most one directory. | ['Implementation', 'of', 'matching_address_families', '()', 'for', 'specs', 'matching', 'at', 'most', 'one', 'directory', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/specs.py#L45-L52 |
4,931 | usc-isi-i2/etk | etk/tokenizer.py | Tokenizer.tokenize | def tokenize(self, text: str, customize=True, disable=[]) -> List[Token]:
"""
Tokenize the given text, returning a list of tokens. Type token: class spacy.tokens.Token
Args:
text (string):
Returns: [tokens]
"""
"""Tokenize text"""
if not self.keep_multi_space:
text = re.sub(' +', ' ', text)
# disable spacy parsing, tagging etc as it takes a long time if the text is short
tokens = self.nlp(text, disable=disable)
if customize:
tokens = [self.custom_token(a_token) for a_token in tokens]
return tokens | python | def tokenize(self, text: str, customize=True, disable=[]) -> List[Token]:
"""
Tokenize the given text, returning a list of tokens. Type token: class spacy.tokens.Token
Args:
text (string):
Returns: [tokens]
"""
"""Tokenize text"""
if not self.keep_multi_space:
text = re.sub(' +', ' ', text)
# disable spacy parsing, tagging etc as it takes a long time if the text is short
tokens = self.nlp(text, disable=disable)
if customize:
tokens = [self.custom_token(a_token) for a_token in tokens]
return tokens | ['def', 'tokenize', '(', 'self', ',', 'text', ':', 'str', ',', 'customize', '=', 'True', ',', 'disable', '=', '[', ']', ')', '->', 'List', '[', 'Token', ']', ':', '"""Tokenize text"""', 'if', 'not', 'self', '.', 'keep_multi_space', ':', 'text', '=', 're', '.', 'sub', '(', "' +'", ',', "' '", ',', 'text', ')', '# disable spacy parsing, tagging etc as it takes a long time if the text is short', 'tokens', '=', 'self', '.', 'nlp', '(', 'text', ',', 'disable', '=', 'disable', ')', 'if', 'customize', ':', 'tokens', '=', '[', 'self', '.', 'custom_token', '(', 'a_token', ')', 'for', 'a_token', 'in', 'tokens', ']', 'return', 'tokens'] | Tokenize the given text, returning a list of tokens. Type token: class spacy.tokens.Token
Args:
text (string):
Returns: [tokens] | ['Tokenize', 'the', 'given', 'text', 'returning', 'a', 'list', 'of', 'tokens', '.', 'Type', 'token', ':', 'class', 'spacy', '.', 'tokens', '.', 'Token'] | train | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/tokenizer.py#L41-L60 |
4,932 | EVEprosper/ProsperCommon | prosper/common/prosper_config.py | check_value | def check_value(
config,
section,
option,
jinja_pattern=JINJA_PATTERN,
):
"""try to figure out if value is valid or jinja2 template value
Args:
config (:obj:`configparser.ConfigParser`): config object to read key from
section (str): name of section in configparser
option (str): name of option in configparser
jinja_pattern (:obj:`_sre.SRE_Pattern`): a `re.compile()` pattern to match on
Returns:
str: value if value, else None
Raises:
KeyError:
configparser.NoOptionError:
configparser.NoSectionError:
"""
value = config[section][option]
if re.match(jinja_pattern, value):
return None
return value | python | def check_value(
config,
section,
option,
jinja_pattern=JINJA_PATTERN,
):
"""try to figure out if value is valid or jinja2 template value
Args:
config (:obj:`configparser.ConfigParser`): config object to read key from
section (str): name of section in configparser
option (str): name of option in configparser
jinja_pattern (:obj:`_sre.SRE_Pattern`): a `re.compile()` pattern to match on
Returns:
str: value if value, else None
Raises:
KeyError:
configparser.NoOptionError:
configparser.NoSectionError:
"""
value = config[section][option]
if re.match(jinja_pattern, value):
return None
return value | ['def', 'check_value', '(', 'config', ',', 'section', ',', 'option', ',', 'jinja_pattern', '=', 'JINJA_PATTERN', ',', ')', ':', 'value', '=', 'config', '[', 'section', ']', '[', 'option', ']', 'if', 're', '.', 'match', '(', 'jinja_pattern', ',', 'value', ')', ':', 'return', 'None', 'return', 'value'] | try to figure out if value is valid or jinja2 template value
Args:
config (:obj:`configparser.ConfigParser`): config object to read key from
section (str): name of section in configparser
option (str): name of option in configparser
jinja_pattern (:obj:`_sre.SRE_Pattern`): a `re.compile()` pattern to match on
Returns:
str: value if value, else None
Raises:
KeyError:
configparser.NoOptionError:
configparser.NoSectionError: | ['try', 'to', 'figure', 'out', 'if', 'value', 'is', 'valid', 'or', 'jinja2', 'template', 'value'] | train | https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_config.py#L48-L75 |
4,933 | luckydonald/pytgbot | code_generation/output/pytgbot/api_types/receivable/stickers.py | MaskPosition.from_array | def from_array(array):
"""
Deserialize a new MaskPosition from a given dictionary.
:return: new MaskPosition instance.
:rtype: MaskPosition
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['point'] = u(array.get('point'))
data['x_shift'] = float(array.get('x_shift'))
data['y_shift'] = float(array.get('y_shift'))
data['scale'] = float(array.get('scale'))
data['_raw'] = array
return MaskPosition(**data) | python | def from_array(array):
"""
Deserialize a new MaskPosition from a given dictionary.
:return: new MaskPosition instance.
:rtype: MaskPosition
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['point'] = u(array.get('point'))
data['x_shift'] = float(array.get('x_shift'))
data['y_shift'] = float(array.get('y_shift'))
data['scale'] = float(array.get('scale'))
data['_raw'] = array
return MaskPosition(**data) | ['def', 'from_array', '(', 'array', ')', ':', 'if', 'array', 'is', 'None', 'or', 'not', 'array', ':', 'return', 'None', '# end if', 'assert_type_or_raise', '(', 'array', ',', 'dict', ',', 'parameter_name', '=', '"array"', ')', 'data', '=', '{', '}', 'data', '[', "'point'", ']', '=', 'u', '(', 'array', '.', 'get', '(', "'point'", ')', ')', 'data', '[', "'x_shift'", ']', '=', 'float', '(', 'array', '.', 'get', '(', "'x_shift'", ')', ')', 'data', '[', "'y_shift'", ']', '=', 'float', '(', 'array', '.', 'get', '(', "'y_shift'", ')', ')', 'data', '[', "'scale'", ']', '=', 'float', '(', 'array', '.', 'get', '(', "'scale'", ')', ')', 'data', '[', "'_raw'", ']', '=', 'array', 'return', 'MaskPosition', '(', '*', '*', 'data', ')'] | Deserialize a new MaskPosition from a given dictionary.
:return: new MaskPosition instance.
:rtype: MaskPosition | ['Deserialize', 'a', 'new', 'MaskPosition', 'from', 'a', 'given', 'dictionary', '.'] | train | https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/stickers.py#L237-L255 |
4,934 | bukun/TorCMS | torcms/handlers/label_handler.py | LabelHandler.list | def list(self, kind, tag_slug, cur_p=''):
'''
根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '')
'''
# 下面用来使用关键字过滤信息,如果网站信息量不是很大不要开启
# Todo:
# if self.get_current_user():
# redisvr.sadd(config.redis_kw + self.userinfo.user_name, tag_slug)
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MPost2Label.total_number(tag_slug, kind) / CMS_CFG['list_num'])
tag_info = MLabel.get_by_slug(tag_slug)
if tag_info:
tag_name = tag_info.name
else:
tag_name = 'Label search results'
kwd = {'tag_name': tag_name,
'tag_slug': tag_slug,
'title': tag_name,
'current_page': current_page_number,
'router': router_post[kind],
'kind': kind
}
the_list_file = './templates/list/label_{kind}.html'.format(kind=kind)
if os.path.exists(the_list_file):
tmpl = 'list/label_{kind}.html'.format(kind=kind)
else:
tmpl = 'list/label.html'
self.render(tmpl,
infos=MPost2Label.query_pager_by_slug(
tag_slug,
kind=kind,
current_page_num=current_page_number
),
kwd=kwd,
userinfo=self.userinfo,
pager=self.gen_pager(kind, tag_slug, pager_num, current_page_number),
cfg=CMS_CFG) | python | def list(self, kind, tag_slug, cur_p=''):
'''
根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '')
'''
# 下面用来使用关键字过滤信息,如果网站信息量不是很大不要开启
# Todo:
# if self.get_current_user():
# redisvr.sadd(config.redis_kw + self.userinfo.user_name, tag_slug)
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MPost2Label.total_number(tag_slug, kind) / CMS_CFG['list_num'])
tag_info = MLabel.get_by_slug(tag_slug)
if tag_info:
tag_name = tag_info.name
else:
tag_name = 'Label search results'
kwd = {'tag_name': tag_name,
'tag_slug': tag_slug,
'title': tag_name,
'current_page': current_page_number,
'router': router_post[kind],
'kind': kind
}
the_list_file = './templates/list/label_{kind}.html'.format(kind=kind)
if os.path.exists(the_list_file):
tmpl = 'list/label_{kind}.html'.format(kind=kind)
else:
tmpl = 'list/label.html'
self.render(tmpl,
infos=MPost2Label.query_pager_by_slug(
tag_slug,
kind=kind,
current_page_num=current_page_number
),
kwd=kwd,
userinfo=self.userinfo,
pager=self.gen_pager(kind, tag_slug, pager_num, current_page_number),
cfg=CMS_CFG) | ['def', 'list', '(', 'self', ',', 'kind', ',', 'tag_slug', ',', 'cur_p', '=', "''", ')', ':', '# 下面用来使用关键字过滤信息,如果网站信息量不是很大不要开启', '# Todo:', '# if self.get_current_user():', '# redisvr.sadd(config.redis_kw + self.userinfo.user_name, tag_slug)', 'if', 'cur_p', '==', "''", ':', 'current_page_number', '=', '1', 'else', ':', 'current_page_number', '=', 'int', '(', 'cur_p', ')', 'current_page_number', '=', '1', 'if', 'current_page_number', '<', '1', 'else', 'current_page_number', 'pager_num', '=', 'int', '(', 'MPost2Label', '.', 'total_number', '(', 'tag_slug', ',', 'kind', ')', '/', 'CMS_CFG', '[', "'list_num'", ']', ')', 'tag_info', '=', 'MLabel', '.', 'get_by_slug', '(', 'tag_slug', ')', 'if', 'tag_info', ':', 'tag_name', '=', 'tag_info', '.', 'name', 'else', ':', 'tag_name', '=', "'Label search results'", 'kwd', '=', '{', "'tag_name'", ':', 'tag_name', ',', "'tag_slug'", ':', 'tag_slug', ',', "'title'", ':', 'tag_name', ',', "'current_page'", ':', 'current_page_number', ',', "'router'", ':', 'router_post', '[', 'kind', ']', ',', "'kind'", ':', 'kind', '}', 'the_list_file', '=', "'./templates/list/label_{kind}.html'", '.', 'format', '(', 'kind', '=', 'kind', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'the_list_file', ')', ':', 'tmpl', '=', "'list/label_{kind}.html'", '.', 'format', '(', 'kind', '=', 'kind', ')', 'else', ':', 'tmpl', '=', "'list/label.html'", 'self', '.', 'render', '(', 'tmpl', ',', 'infos', '=', 'MPost2Label', '.', 'query_pager_by_slug', '(', 'tag_slug', ',', 'kind', '=', 'kind', ',', 'current_page_num', '=', 'current_page_number', ')', ',', 'kwd', '=', 'kwd', ',', 'userinfo', '=', 'self', '.', 'userinfo', ',', 'pager', '=', 'self', '.', 'gen_pager', '(', 'kind', ',', 'tag_slug', ',', 'pager_num', ',', 'current_page_number', ')', ',', 'cfg', '=', 'CMS_CFG', ')'] | 根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '') | ['根据', 'cat_handler', '.', 'py', '中的', 'def', 'view_cat_new', '(', 'self', 'cat_slug', 'cur_p', '=', ')'] | train | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/label_handler.py#L51-L98 |
4,935 | Calysto/calysto | calysto/ai/conx.py | Network.errorFunction | def errorFunction(self, t, a):
"""
Using a hyperbolic arctan on the error slightly exaggerates
the actual error non-linearly. Return t - a to just use the difference.
t - target vector
a - activation vector
"""
def difference(v):
if not self.hyperbolicError:
#if -0.1 < v < 0.1: return 0.0
#else:
return v
else:
if v < -0.9999999: return -17.0
elif v > 0.9999999: return 17.0
else: return math.log( (1.0 + v) / (1.0 - v) )
#else: return Numeric.arctanh(v) # half that above
return list(map(difference, t - a)) | python | def errorFunction(self, t, a):
"""
Using a hyperbolic arctan on the error slightly exaggerates
the actual error non-linearly. Return t - a to just use the difference.
t - target vector
a - activation vector
"""
def difference(v):
if not self.hyperbolicError:
#if -0.1 < v < 0.1: return 0.0
#else:
return v
else:
if v < -0.9999999: return -17.0
elif v > 0.9999999: return 17.0
else: return math.log( (1.0 + v) / (1.0 - v) )
#else: return Numeric.arctanh(v) # half that above
return list(map(difference, t - a)) | ['def', 'errorFunction', '(', 'self', ',', 't', ',', 'a', ')', ':', 'def', 'difference', '(', 'v', ')', ':', 'if', 'not', 'self', '.', 'hyperbolicError', ':', '#if -0.1 < v < 0.1: return 0.0', '#else:', 'return', 'v', 'else', ':', 'if', 'v', '<', '-', '0.9999999', ':', 'return', '-', '17.0', 'elif', 'v', '>', '0.9999999', ':', 'return', '17.0', 'else', ':', 'return', 'math', '.', 'log', '(', '(', '1.0', '+', 'v', ')', '/', '(', '1.0', '-', 'v', ')', ')', '#else: return Numeric.arctanh(v) # half that above', 'return', 'list', '(', 'map', '(', 'difference', ',', 't', '-', 'a', ')', ')'] | Using a hyperbolic arctan on the error slightly exaggerates
the actual error non-linearly. Return t - a to just use the difference.
t - target vector
a - activation vector | ['Using', 'a', 'hyperbolic', 'arctan', 'on', 'the', 'error', 'slightly', 'exaggerates', 'the', 'actual', 'error', 'non', '-', 'linearly', '.', 'Return', 't', '-', 'a', 'to', 'just', 'use', 'the', 'difference', '.', 't', '-', 'target', 'vector', 'a', '-', 'activation', 'vector'] | train | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L2348-L2365 |
4,936 | nameko/nameko | nameko/exceptions.py | deserialize | def deserialize(data):
""" Deserialize `data` to an exception instance.
If the `exc_path` value matches an exception registered as
``deserializable``, return an instance of that exception type.
Otherwise, return a `RemoteError` instance describing the exception
that occurred.
"""
key = data.get('exc_path')
if key in registry:
exc_args = data.get('exc_args', ())
return registry[key](*exc_args)
exc_type = data.get('exc_type')
value = data.get('value')
return RemoteError(exc_type=exc_type, value=value) | python | def deserialize(data):
""" Deserialize `data` to an exception instance.
If the `exc_path` value matches an exception registered as
``deserializable``, return an instance of that exception type.
Otherwise, return a `RemoteError` instance describing the exception
that occurred.
"""
key = data.get('exc_path')
if key in registry:
exc_args = data.get('exc_args', ())
return registry[key](*exc_args)
exc_type = data.get('exc_type')
value = data.get('value')
return RemoteError(exc_type=exc_type, value=value) | ['def', 'deserialize', '(', 'data', ')', ':', 'key', '=', 'data', '.', 'get', '(', "'exc_path'", ')', 'if', 'key', 'in', 'registry', ':', 'exc_args', '=', 'data', '.', 'get', '(', "'exc_args'", ',', '(', ')', ')', 'return', 'registry', '[', 'key', ']', '(', '*', 'exc_args', ')', 'exc_type', '=', 'data', '.', 'get', '(', "'exc_type'", ')', 'value', '=', 'data', '.', 'get', '(', "'value'", ')', 'return', 'RemoteError', '(', 'exc_type', '=', 'exc_type', ',', 'value', '=', 'value', ')'] | Deserialize `data` to an exception instance.
If the `exc_path` value matches an exception registered as
``deserializable``, return an instance of that exception type.
Otherwise, return a `RemoteError` instance describing the exception
that occurred. | ['Deserialize', 'data', 'to', 'an', 'exception', 'instance', '.'] | train | https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/exceptions.py#L97-L112 |
4,937 | QuantEcon/QuantEcon.py | quantecon/graph_tools.py | _populate_random_tournament_row_col | def _populate_random_tournament_row_col(n, r, row, col):
"""
Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place.
"""
k = 0
for i in range(n):
for j in range(i+1, n):
if r[k] < 0.5:
row[k], col[k] = i, j
else:
row[k], col[k] = j, i
k += 1 | python | def _populate_random_tournament_row_col(n, r, row, col):
"""
Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place.
"""
k = 0
for i in range(n):
for j in range(i+1, n):
if r[k] < 0.5:
row[k], col[k] = i, j
else:
row[k], col[k] = j, i
k += 1 | ['def', '_populate_random_tournament_row_col', '(', 'n', ',', 'r', ',', 'row', ',', 'col', ')', ':', 'k', '=', '0', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'n', ')', ':', 'if', 'r', '[', 'k', ']', '<', '0.5', ':', 'row', '[', 'k', ']', ',', 'col', '[', 'k', ']', '=', 'i', ',', 'j', 'else', ':', 'row', '[', 'k', ']', ',', 'col', '[', 'k', ']', '=', 'j', ',', 'i', 'k', '+=', '1'] | Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place. | ['Populate', 'ndarrays', 'row', 'and', 'col', 'with', 'directed', 'edge', 'indices', 'determined', 'by', 'random', 'numbers', 'in', 'r', 'for', 'a', 'tournament', 'graph', 'with', 'n', 'nodes', 'which', 'has', 'num_edges', '=', 'n', '*', '(', 'n', '-', '1', ')', '//', '2', 'edges', '.'] | train | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/graph_tools.py#L414-L439 |
4,938 | samuelcolvin/arq | arq/jobs.py | Job.status | async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued | python | async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued | ['async', 'def', 'status', '(', 'self', ')', '->', 'JobStatus', ':', 'if', 'await', 'self', '.', '_redis', '.', 'exists', '(', 'result_key_prefix', '+', 'self', '.', 'job_id', ')', ':', 'return', 'JobStatus', '.', 'complete', 'elif', 'await', 'self', '.', '_redis', '.', 'exists', '(', 'in_progress_key_prefix', '+', 'self', '.', 'job_id', ')', ':', 'return', 'JobStatus', '.', 'in_progress', 'else', ':', 'score', '=', 'await', 'self', '.', '_redis', '.', 'zscore', '(', 'queue_name', ',', 'self', '.', 'job_id', ')', 'if', 'not', 'score', ':', 'return', 'JobStatus', '.', 'not_found', 'return', 'JobStatus', '.', 'deferred', 'if', 'score', '>', 'timestamp_ms', '(', ')', 'else', 'JobStatus', '.', 'queued'] | Status of the job. | ['Status', 'of', 'the', 'job', '.'] | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L103-L115 |
4,939 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_lacp.py | brocade_lacp.vlag_commit_mode_disable | def vlag_commit_mode_disable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlag_commit_mode = ET.SubElement(config, "vlag-commit-mode", xmlns="urn:brocade.com:mgmt:brocade-lacp")
disable = ET.SubElement(vlag_commit_mode, "disable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def vlag_commit_mode_disable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlag_commit_mode = ET.SubElement(config, "vlag-commit-mode", xmlns="urn:brocade.com:mgmt:brocade-lacp")
disable = ET.SubElement(vlag_commit_mode, "disable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'vlag_commit_mode_disable', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'vlag_commit_mode', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"vlag-commit-mode"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-lacp"', ')', 'disable', '=', 'ET', '.', 'SubElement', '(', 'vlag_commit_mode', ',', '"disable"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lacp.py#L23-L31 |
4,940 | seryl/Python-Cotendo | cotendo/cotendohelper.py | CotendoDNS.del_record | def del_record(self, dns_record_type, host):
"""Remove a DNS record"""
rec = self.get_record(dns_record_type, host)
if rec:
self._entries = list(set(self._entries) - set([rec]))
return True | python | def del_record(self, dns_record_type, host):
"""Remove a DNS record"""
rec = self.get_record(dns_record_type, host)
if rec:
self._entries = list(set(self._entries) - set([rec]))
return True | ['def', 'del_record', '(', 'self', ',', 'dns_record_type', ',', 'host', ')', ':', 'rec', '=', 'self', '.', 'get_record', '(', 'dns_record_type', ',', 'host', ')', 'if', 'rec', ':', 'self', '.', '_entries', '=', 'list', '(', 'set', '(', 'self', '.', '_entries', ')', '-', 'set', '(', '[', 'rec', ']', ')', ')', 'return', 'True'] | Remove a DNS record | ['Remove', 'a', 'DNS', 'record'] | train | https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L92-L97 |
4,941 | frnmst/md-toc | md_toc/api.py | write_strings_on_files_between_markers | def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1 | python | def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1 | ['def', 'write_strings_on_files_between_markers', '(', 'filenames', ':', 'list', ',', 'strings', ':', 'list', ',', 'marker', ':', 'str', ')', ':', 'assert', 'len', '(', 'filenames', ')', '==', 'len', '(', 'strings', ')', 'if', 'len', '(', 'filenames', ')', '>', '0', ':', 'for', 'f', 'in', 'filenames', ':', 'assert', 'isinstance', '(', 'f', ',', 'str', ')', 'if', 'len', '(', 'strings', ')', '>', '0', ':', 'for', 's', 'in', 'strings', ':', 'assert', 'isinstance', '(', 's', ',', 'str', ')', 'file_id', '=', '0', 'for', 'f', 'in', 'filenames', ':', 'write_string_on_file_between_markers', '(', 'f', ',', 'strings', '[', 'file_id', ']', ',', 'marker', ')', 'file_id', '+=', '1'] | r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception. | ['r', 'Write', 'the', 'table', 'of', 'contents', 'on', 'multiple', 'files', '.'] | train | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L73-L100 |
4,942 | awslabs/serverless-application-model | examples/apps/kinesis-analytics-process-kpl-record/lambda_function.py | lambda_handler | def lambda_handler(event, context):
'''A Python AWS Lambda function to process aggregated records sent to KinesisAnalytics.'''
raw_kpl_records = event['records']
output = [process_kpl_record(kpl_record) for kpl_record in raw_kpl_records]
# Print number of successful and failed records.
success_count = sum(1 for record in output if record['result'] == 'Ok')
failure_count = sum(1 for record in output if record['result'] == 'ProcessingFailed')
print('Processing completed. Successful records: {0}, Failed records: {1}.'.format(success_count, failure_count))
return {'records': output} | python | def lambda_handler(event, context):
'''A Python AWS Lambda function to process aggregated records sent to KinesisAnalytics.'''
raw_kpl_records = event['records']
output = [process_kpl_record(kpl_record) for kpl_record in raw_kpl_records]
# Print number of successful and failed records.
success_count = sum(1 for record in output if record['result'] == 'Ok')
failure_count = sum(1 for record in output if record['result'] == 'ProcessingFailed')
print('Processing completed. Successful records: {0}, Failed records: {1}.'.format(success_count, failure_count))
return {'records': output} | ['def', 'lambda_handler', '(', 'event', ',', 'context', ')', ':', 'raw_kpl_records', '=', 'event', '[', "'records'", ']', 'output', '=', '[', 'process_kpl_record', '(', 'kpl_record', ')', 'for', 'kpl_record', 'in', 'raw_kpl_records', ']', '# Print number of successful and failed records.', 'success_count', '=', 'sum', '(', '1', 'for', 'record', 'in', 'output', 'if', 'record', '[', "'result'", ']', '==', "'Ok'", ')', 'failure_count', '=', 'sum', '(', '1', 'for', 'record', 'in', 'output', 'if', 'record', '[', "'result'", ']', '==', "'ProcessingFailed'", ')', 'print', '(', "'Processing completed. Successful records: {0}, Failed records: {1}.'", '.', 'format', '(', 'success_count', ',', 'failure_count', ')', ')', 'return', '{', "'records'", ':', 'output', '}'] | A Python AWS Lambda function to process aggregated records sent to KinesisAnalytics. | ['A', 'Python', 'AWS', 'Lambda', 'function', 'to', 'process', 'aggregated', 'records', 'sent', 'to', 'KinesisAnalytics', '.'] | train | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/kinesis-analytics-process-kpl-record/lambda_function.py#L25-L35 |
4,943 | glitchassassin/lackey | lackey/PlatformManagerDarwin.py | PlatformManagerDarwin.getWindowByTitle | def getWindowByTitle(self, wildcard, order=0):
""" Returns a handle for the first window that matches the provided "wildcard" regex """
for w in self._get_window_list():
if "kCGWindowName" in w and re.search(wildcard, w["kCGWindowName"], flags=re.I):
# Matches - make sure we get it in the correct order
if order == 0:
return w["kCGWindowNumber"]
else:
order -= 1 | python | def getWindowByTitle(self, wildcard, order=0):
""" Returns a handle for the first window that matches the provided "wildcard" regex """
for w in self._get_window_list():
if "kCGWindowName" in w and re.search(wildcard, w["kCGWindowName"], flags=re.I):
# Matches - make sure we get it in the correct order
if order == 0:
return w["kCGWindowNumber"]
else:
order -= 1 | ['def', 'getWindowByTitle', '(', 'self', ',', 'wildcard', ',', 'order', '=', '0', ')', ':', 'for', 'w', 'in', 'self', '.', '_get_window_list', '(', ')', ':', 'if', '"kCGWindowName"', 'in', 'w', 'and', 're', '.', 'search', '(', 'wildcard', ',', 'w', '[', '"kCGWindowName"', ']', ',', 'flags', '=', 're', '.', 'I', ')', ':', '# Matches - make sure we get it in the correct order', 'if', 'order', '==', '0', ':', 'return', 'w', '[', '"kCGWindowNumber"', ']', 'else', ':', 'order', '-=', '1'] | Returns a handle for the first window that matches the provided "wildcard" regex | ['Returns', 'a', 'handle', 'for', 'the', 'first', 'window', 'that', 'matches', 'the', 'provided', 'wildcard', 'regex'] | train | https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L302-L310 |
4,944 | hawkular/hawkular-client-python | hawkular/alerts/triggers.py | AlertsTriggerClient.enable | def enable(self, trigger_ids=[]):
"""
Enable triggers.
:param trigger_ids: List of trigger definition ids to enable
"""
trigger_ids = ','.join(trigger_ids)
url = self._service_url(['triggers', 'enabled'], params={'triggerIds': trigger_ids, 'enabled': 'true'})
self._put(url, data=None, parse_json=False) | python | def enable(self, trigger_ids=[]):
"""
Enable triggers.
:param trigger_ids: List of trigger definition ids to enable
"""
trigger_ids = ','.join(trigger_ids)
url = self._service_url(['triggers', 'enabled'], params={'triggerIds': trigger_ids, 'enabled': 'true'})
self._put(url, data=None, parse_json=False) | ['def', 'enable', '(', 'self', ',', 'trigger_ids', '=', '[', ']', ')', ':', 'trigger_ids', '=', "','", '.', 'join', '(', 'trigger_ids', ')', 'url', '=', 'self', '.', '_service_url', '(', '[', "'triggers'", ',', "'enabled'", ']', ',', 'params', '=', '{', "'triggerIds'", ':', 'trigger_ids', ',', "'enabled'", ':', "'true'", '}', ')', 'self', '.', '_put', '(', 'url', ',', 'data', '=', 'None', ',', 'parse_json', '=', 'False', ')'] | Enable triggers.
:param trigger_ids: List of trigger definition ids to enable | ['Enable', 'triggers', '.'] | train | https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L418-L426 |
4,945 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/subscribe/observer.py | Observer.notify | def notify(self, data):
"""Notify this observer that data has arrived"""
LOG.debug('notify received: %s', data)
self._notify_count += 1
if self._cancelled:
LOG.debug('notify skipping due to `cancelled`')
return self
if self._once_done and self._once:
LOG.debug('notify skipping due to `once`')
return self
with self._lock:
try:
# notify next consumer immediately
self._waitables.get_nowait().put_nowait(data)
LOG.debug('found a consumer, notifying')
except queue.Empty:
# store the notification
try:
self._notifications.put_nowait(data)
LOG.debug('no consumers, queueing data')
except queue.Full:
LOG.warning('notification queue full - discarding new data')
# callbacks are sent straight away
# bombproofing should be handled by individual callbacks
for callback in self._callbacks:
LOG.debug('callback: %s', callback)
callback(data)
self._once_done = True
return self | python | def notify(self, data):
"""Notify this observer that data has arrived"""
LOG.debug('notify received: %s', data)
self._notify_count += 1
if self._cancelled:
LOG.debug('notify skipping due to `cancelled`')
return self
if self._once_done and self._once:
LOG.debug('notify skipping due to `once`')
return self
with self._lock:
try:
# notify next consumer immediately
self._waitables.get_nowait().put_nowait(data)
LOG.debug('found a consumer, notifying')
except queue.Empty:
# store the notification
try:
self._notifications.put_nowait(data)
LOG.debug('no consumers, queueing data')
except queue.Full:
LOG.warning('notification queue full - discarding new data')
# callbacks are sent straight away
# bombproofing should be handled by individual callbacks
for callback in self._callbacks:
LOG.debug('callback: %s', callback)
callback(data)
self._once_done = True
return self | ['def', 'notify', '(', 'self', ',', 'data', ')', ':', 'LOG', '.', 'debug', '(', "'notify received: %s'", ',', 'data', ')', 'self', '.', '_notify_count', '+=', '1', 'if', 'self', '.', '_cancelled', ':', 'LOG', '.', 'debug', '(', "'notify skipping due to `cancelled`'", ')', 'return', 'self', 'if', 'self', '.', '_once_done', 'and', 'self', '.', '_once', ':', 'LOG', '.', 'debug', '(', "'notify skipping due to `once`'", ')', 'return', 'self', 'with', 'self', '.', '_lock', ':', 'try', ':', '# notify next consumer immediately', 'self', '.', '_waitables', '.', 'get_nowait', '(', ')', '.', 'put_nowait', '(', 'data', ')', 'LOG', '.', 'debug', '(', "'found a consumer, notifying'", ')', 'except', 'queue', '.', 'Empty', ':', '# store the notification', 'try', ':', 'self', '.', '_notifications', '.', 'put_nowait', '(', 'data', ')', 'LOG', '.', 'debug', '(', "'no consumers, queueing data'", ')', 'except', 'queue', '.', 'Full', ':', 'LOG', '.', 'warning', '(', "'notification queue full - discarding new data'", ')', '# callbacks are sent straight away', '# bombproofing should be handled by individual callbacks', 'for', 'callback', 'in', 'self', '.', '_callbacks', ':', 'LOG', '.', 'debug', '(', "'callback: %s'", ',', 'callback', ')', 'callback', '(', 'data', ')', 'self', '.', '_once_done', '=', 'True', 'return', 'self'] | Notify this observer that data has arrived | ['Notify', 'this', 'observer', 'that', 'data', 'has', 'arrived'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/subscribe/observer.py#L127-L156 |
4,946 | EVEprosper/ProsperCommon | prosper/common/prosper_config.py | get_value_from_environment | def get_value_from_environment(
section_name,
key_name,
envname_pad=ENVNAME_PAD,
logger=logging.getLogger('ProsperCommon'),
):
"""check environment for key/value pair
Args:
section_name (str): section name
key_name (str): key to look up
envname_pad (str): namespace padding
logger (:obj:`logging.logger`): logging handle
Returns:
str: value in environment
"""
var_name = '{pad}_{section}__{key}'.format(
pad=envname_pad,
section=section_name,
key=key_name
)
logger.debug('var_name=%s', var_name)
value = getenv(var_name)
logger.debug('env value=%s', value)
return value | python | def get_value_from_environment(
section_name,
key_name,
envname_pad=ENVNAME_PAD,
logger=logging.getLogger('ProsperCommon'),
):
"""check environment for key/value pair
Args:
section_name (str): section name
key_name (str): key to look up
envname_pad (str): namespace padding
logger (:obj:`logging.logger`): logging handle
Returns:
str: value in environment
"""
var_name = '{pad}_{section}__{key}'.format(
pad=envname_pad,
section=section_name,
key=key_name
)
logger.debug('var_name=%s', var_name)
value = getenv(var_name)
logger.debug('env value=%s', value)
return value | ['def', 'get_value_from_environment', '(', 'section_name', ',', 'key_name', ',', 'envname_pad', '=', 'ENVNAME_PAD', ',', 'logger', '=', 'logging', '.', 'getLogger', '(', "'ProsperCommon'", ')', ',', ')', ':', 'var_name', '=', "'{pad}_{section}__{key}'", '.', 'format', '(', 'pad', '=', 'envname_pad', ',', 'section', '=', 'section_name', ',', 'key', '=', 'key_name', ')', 'logger', '.', 'debug', '(', "'var_name=%s'", ',', 'var_name', ')', 'value', '=', 'getenv', '(', 'var_name', ')', 'logger', '.', 'debug', '(', "'env value=%s'", ',', 'value', ')', 'return', 'value'] | check environment for key/value pair
Args:
section_name (str): section name
key_name (str): key to look up
envname_pad (str): namespace padding
logger (:obj:`logging.logger`): logging handle
Returns:
str: value in environment | ['check', 'environment', 'for', 'key', '/', 'value', 'pair'] | train | https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_config.py#L211-L239 |
4,947 | Vauxoo/cfdilib | cfdilib/cfdilib.py | BaseDocument.guess_autoescape | def guess_autoescape(self, template_name):
"""Given a template Name I will gues using its
extension if we should autoscape or not.
Default autoscaped extensions: ('html', 'xhtml', 'htm', 'xml')
"""
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('html', 'xhtml', 'htm', 'xml') | python | def guess_autoescape(self, template_name):
"""Given a template Name I will gues using its
extension if we should autoscape or not.
Default autoscaped extensions: ('html', 'xhtml', 'htm', 'xml')
"""
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('html', 'xhtml', 'htm', 'xml') | ['def', 'guess_autoescape', '(', 'self', ',', 'template_name', ')', ':', 'if', 'template_name', 'is', 'None', 'or', "'.'", 'not', 'in', 'template_name', ':', 'return', 'False', 'ext', '=', 'template_name', '.', 'rsplit', '(', "'.'", ',', '1', ')', '[', '1', ']', 'return', 'ext', 'in', '(', "'html'", ',', "'xhtml'", ',', "'htm'", ',', "'xml'", ')'] | Given a template Name I will gues using its
extension if we should autoscape or not.
Default autoscaped extensions: ('html', 'xhtml', 'htm', 'xml') | ['Given', 'a', 'template', 'Name', 'I', 'will', 'gues', 'using', 'its', 'extension', 'if', 'we', 'should', 'autoscape', 'or', 'not', '.', 'Default', 'autoscaped', 'extensions', ':', '(', 'html', 'xhtml', 'htm', 'xml', ')'] | train | https://github.com/Vauxoo/cfdilib/blob/acd73d159f62119f3100d963a061820bbe3f93ea/cfdilib/cfdilib.py#L122-L130 |
4,948 | gem/oq-engine | openquake/server/views.py | _prepare_job | def _prepare_job(request, candidates):
"""
Creates a temporary directory, move uploaded files there and
select the job file by looking at the candidate names.
:returns: full path of the job_file
"""
temp_dir = tempfile.mkdtemp()
inifiles = []
arch = request.FILES.get('archive')
if arch is None:
# move each file to a new temp dir, using the upload file names,
# not the temporary ones
for each_file in request.FILES.values():
new_path = os.path.join(temp_dir, each_file.name)
shutil.move(each_file.temporary_file_path(), new_path)
if each_file.name in candidates:
inifiles.append(new_path)
return inifiles
# else extract the files from the archive into temp_dir
return readinput.extract_from_zip(arch, candidates) | python | def _prepare_job(request, candidates):
"""
Creates a temporary directory, move uploaded files there and
select the job file by looking at the candidate names.
:returns: full path of the job_file
"""
temp_dir = tempfile.mkdtemp()
inifiles = []
arch = request.FILES.get('archive')
if arch is None:
# move each file to a new temp dir, using the upload file names,
# not the temporary ones
for each_file in request.FILES.values():
new_path = os.path.join(temp_dir, each_file.name)
shutil.move(each_file.temporary_file_path(), new_path)
if each_file.name in candidates:
inifiles.append(new_path)
return inifiles
# else extract the files from the archive into temp_dir
return readinput.extract_from_zip(arch, candidates) | ['def', '_prepare_job', '(', 'request', ',', 'candidates', ')', ':', 'temp_dir', '=', 'tempfile', '.', 'mkdtemp', '(', ')', 'inifiles', '=', '[', ']', 'arch', '=', 'request', '.', 'FILES', '.', 'get', '(', "'archive'", ')', 'if', 'arch', 'is', 'None', ':', '# move each file to a new temp dir, using the upload file names,', '# not the temporary ones', 'for', 'each_file', 'in', 'request', '.', 'FILES', '.', 'values', '(', ')', ':', 'new_path', '=', 'os', '.', 'path', '.', 'join', '(', 'temp_dir', ',', 'each_file', '.', 'name', ')', 'shutil', '.', 'move', '(', 'each_file', '.', 'temporary_file_path', '(', ')', ',', 'new_path', ')', 'if', 'each_file', '.', 'name', 'in', 'candidates', ':', 'inifiles', '.', 'append', '(', 'new_path', ')', 'return', 'inifiles', '# else extract the files from the archive into temp_dir', 'return', 'readinput', '.', 'extract_from_zip', '(', 'arch', ',', 'candidates', ')'] | Creates a temporary directory, move uploaded files there and
select the job file by looking at the candidate names.
:returns: full path of the job_file | ['Creates', 'a', 'temporary', 'directory', 'move', 'uploaded', 'files', 'there', 'and', 'select', 'the', 'job', 'file', 'by', 'looking', 'at', 'the', 'candidate', 'names', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L124-L144 |
4,949 | tijme/not-your-average-web-crawler | nyawc/Crawler.py | Crawler.start_with | def start_with(self, request):
"""Start the crawler using the given request.
Args:
request (:class:`nyawc.http.Request`): The startpoint for the crawler.
"""
HTTPRequestHelper.patch_with_options(request, self.__options)
self.queue.add_request(request)
self.__crawler_start() | python | def start_with(self, request):
"""Start the crawler using the given request.
Args:
request (:class:`nyawc.http.Request`): The startpoint for the crawler.
"""
HTTPRequestHelper.patch_with_options(request, self.__options)
self.queue.add_request(request)
self.__crawler_start() | ['def', 'start_with', '(', 'self', ',', 'request', ')', ':', 'HTTPRequestHelper', '.', 'patch_with_options', '(', 'request', ',', 'self', '.', '__options', ')', 'self', '.', 'queue', '.', 'add_request', '(', 'request', ')', 'self', '.', '__crawler_start', '(', ')'] | Start the crawler using the given request.
Args:
request (:class:`nyawc.http.Request`): The startpoint for the crawler. | ['Start', 'the', 'crawler', 'using', 'the', 'given', 'request', '.'] | train | https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/Crawler.py#L87-L98 |
4,950 | ivilata/pymultihash | multihash/multihash.py | _do_digest | def _do_digest(data, func):
"""Return the binary digest of `data` with the given `func`."""
func = FuncReg.get(func)
hash = FuncReg.hash_from_func(func)
if not hash:
raise ValueError("no available hash function for hash", func)
hash.update(data)
return bytes(hash.digest()) | python | def _do_digest(data, func):
"""Return the binary digest of `data` with the given `func`."""
func = FuncReg.get(func)
hash = FuncReg.hash_from_func(func)
if not hash:
raise ValueError("no available hash function for hash", func)
hash.update(data)
return bytes(hash.digest()) | ['def', '_do_digest', '(', 'data', ',', 'func', ')', ':', 'func', '=', 'FuncReg', '.', 'get', '(', 'func', ')', 'hash', '=', 'FuncReg', '.', 'hash_from_func', '(', 'func', ')', 'if', 'not', 'hash', ':', 'raise', 'ValueError', '(', '"no available hash function for hash"', ',', 'func', ')', 'hash', '.', 'update', '(', 'data', ')', 'return', 'bytes', '(', 'hash', '.', 'digest', '(', ')', ')'] | Return the binary digest of `data` with the given `func`. | ['Return', 'the', 'binary', 'digest', 'of', 'data', 'with', 'the', 'given', 'func', '.'] | train | https://github.com/ivilata/pymultihash/blob/093365f20f6d8627c1fae13e0f4e0b35e9b39ad2/multihash/multihash.py#L16-L23 |
4,951 | GNS3/gns3-server | gns3server/compute/dynamips/nodes/atm_switch.py | ATMSwitch.map_vp | def map_vp(self, port1, vpi1, port2, vpi2):
"""
Creates a new Virtual Path connection.
:param port1: input port
:param vpi1: input vpi
:param port2: output port
:param vpi2: output vpi
"""
if port1 not in self._nios:
return
if port2 not in self._nios:
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
yield from self._hypervisor.send('atmsw create_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name,
input_nio=nio1,
input_vpi=vpi1,
output_nio=nio2,
output_vpi=vpi2))
log.info('ATM switch "{name}" [{id}]: VPC from port {port1} VPI {vpi1} to port {port2} VPI {vpi2} created'.format(name=self._name,
id=self._id,
port1=port1,
vpi1=vpi1,
port2=port2,
vpi2=vpi2))
self._active_mappings[(port1, vpi1)] = (port2, vpi2) | python | def map_vp(self, port1, vpi1, port2, vpi2):
"""
Creates a new Virtual Path connection.
:param port1: input port
:param vpi1: input vpi
:param port2: output port
:param vpi2: output vpi
"""
if port1 not in self._nios:
return
if port2 not in self._nios:
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
yield from self._hypervisor.send('atmsw create_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name,
input_nio=nio1,
input_vpi=vpi1,
output_nio=nio2,
output_vpi=vpi2))
log.info('ATM switch "{name}" [{id}]: VPC from port {port1} VPI {vpi1} to port {port2} VPI {vpi2} created'.format(name=self._name,
id=self._id,
port1=port1,
vpi1=vpi1,
port2=port2,
vpi2=vpi2))
self._active_mappings[(port1, vpi1)] = (port2, vpi2) | ['def', 'map_vp', '(', 'self', ',', 'port1', ',', 'vpi1', ',', 'port2', ',', 'vpi2', ')', ':', 'if', 'port1', 'not', 'in', 'self', '.', '_nios', ':', 'return', 'if', 'port2', 'not', 'in', 'self', '.', '_nios', ':', 'return', 'nio1', '=', 'self', '.', '_nios', '[', 'port1', ']', 'nio2', '=', 'self', '.', '_nios', '[', 'port2', ']', 'yield', 'from', 'self', '.', '_hypervisor', '.', 'send', '(', '\'atmsw create_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}\'', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'input_nio', '=', 'nio1', ',', 'input_vpi', '=', 'vpi1', ',', 'output_nio', '=', 'nio2', ',', 'output_vpi', '=', 'vpi2', ')', ')', 'log', '.', 'info', '(', '\'ATM switch "{name}" [{id}]: VPC from port {port1} VPI {vpi1} to port {port2} VPI {vpi2} created\'', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'id', '=', 'self', '.', '_id', ',', 'port1', '=', 'port1', ',', 'vpi1', '=', 'vpi1', ',', 'port2', '=', 'port2', ',', 'vpi2', '=', 'vpi2', ')', ')', 'self', '.', '_active_mappings', '[', '(', 'port1', ',', 'vpi1', ')', ']', '=', '(', 'port2', ',', 'vpi2', ')'] | Creates a new Virtual Path connection.
:param port1: input port
:param vpi1: input vpi
:param port2: output port
:param vpi2: output vpi | ['Creates', 'a', 'new', 'Virtual', 'Path', 'connection', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/atm_switch.py#L279-L311 |
4,952 | apple/turicreate | src/unity/python/turicreate/data_structures/sframe.py | SFrame.append | def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe.__copy__()
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name)
processed_other_frame.add_column(other_column, col_name, inplace=True)
# check column type
if my_column_types[i] != other_column.dtype:
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype))
with cython_context():
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__)) | python | def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe.__copy__()
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name)
processed_other_frame.add_column(other_column, col_name, inplace=True)
# check column type
if my_column_types[i] != other_column.dtype:
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype))
with cython_context():
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__)) | ['def', 'append', '(', 'self', ',', 'other', ')', ':', 'if', 'type', '(', 'other', ')', 'is', 'not', 'SFrame', ':', 'raise', 'RuntimeError', '(', '"SFrame append can only work with SFrame"', ')', 'left_empty', '=', 'len', '(', 'self', '.', 'column_names', '(', ')', ')', '==', '0', 'right_empty', '=', 'len', '(', 'other', '.', 'column_names', '(', ')', ')', '==', '0', 'if', '(', 'left_empty', 'and', 'right_empty', ')', ':', 'return', 'SFrame', '(', ')', 'if', '(', 'left_empty', 'or', 'right_empty', ')', ':', 'non_empty_sframe', '=', 'self', 'if', 'right_empty', 'else', 'other', 'return', 'non_empty_sframe', '.', '__copy__', '(', ')', 'my_column_names', '=', 'self', '.', 'column_names', '(', ')', 'my_column_types', '=', 'self', '.', 'column_types', '(', ')', 'other_column_names', '=', 'other', '.', 'column_names', '(', ')', 'if', '(', 'len', '(', 'my_column_names', ')', '!=', 'len', '(', 'other_column_names', ')', ')', ':', 'raise', 'RuntimeError', '(', '"Two SFrames have to have the same number of columns"', ')', '# check if the order of column name is the same', 'column_name_order_match', '=', 'True', 'for', 'i', 'in', 'range', '(', 'len', '(', 'my_column_names', ')', ')', ':', 'if', 'other_column_names', '[', 'i', ']', '!=', 'my_column_names', '[', 'i', ']', ':', 'column_name_order_match', '=', 'False', 'break', 'processed_other_frame', '=', 'other', 'if', 'not', 'column_name_order_match', ':', '# we allow name order of two sframes to be different, so we create a new sframe from', '# "other" sframe to make it has exactly the same shape', 'processed_other_frame', '=', 'SFrame', '(', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'my_column_names', ')', ')', ':', 'col_name', '=', 'my_column_names', '[', 'i', ']', 'if', '(', 'col_name', 'not', 'in', 'other_column_names', ')', ':', 'raise', 'RuntimeError', '(', '"Column "', '+', 'my_column_names', '[', 'i', ']', '+', '" does not exist in second SFrame"', ')', 'other_column', '=', 'other', '.', 'select_column', '(', 'col_name', ')', 'processed_other_frame', '.', 'add_column', '(', 'other_column', ',', 'col_name', ',', 'inplace', '=', 'True', ')', '# check column type', 'if', 'my_column_types', '[', 'i', ']', '!=', 'other_column', '.', 'dtype', ':', 'raise', 'RuntimeError', '(', '"Column "', '+', 'my_column_names', '[', 'i', ']', '+', '" type is not the same in two SFrames, one is "', '+', 'str', '(', 'my_column_types', '[', 'i', ']', ')', '+', '", the other is "', '+', 'str', '(', 'other_column', '.', 'dtype', ')', ')', 'with', 'cython_context', '(', ')', ':', 'return', 'SFrame', '(', '_proxy', '=', 'self', '.', '__proxy__', '.', 'append', '(', 'processed_other_frame', '.', '__proxy__', ')', ')'] | Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns] | ['Add', 'the', 'rows', 'of', 'an', 'SFrame', 'to', 'the', 'end', 'of', 'this', 'SFrame', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L3737-L3816 |
4,953 | neuropsychology/NeuroKit.py | neurokit/bio/bio_eda.py | cvxEDA | def cvxEDA(eda, sampling_rate=1000, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, verbose=False, options={'reltol':1e-9}):
"""
A convex optimization approach to electrodermal activity processing (CVXEDA).
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015).
Parameters
----------
eda : list or array
raw EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
tau0 : float
Slow time constant of the Bateman function.
tau1 : float
Fast time constant of the Bateman function.
delta_knot : float
Time between knots of the tonic spline function.
alpha : float
Penalization for the sparse SMNA driver.
gamma : float
Penalization for the tonic spline coefficients.
solver : bool
Sparse QP solver to be used, see cvxopt.solvers.qp
verbose : bool
Print progress?
options : dict
Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns
----------
phasic : numpy.array
The phasic component.
Notes
----------
*Authors*
- Luca Citi (https://github.com/lciti)
- Alberto Greco
*Dependencies*
- cvxopt
- numpy
*See Also*
- cvxEDA: https://github.com/lciti/cvxEDA
References
-----------
- Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing.
- Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804.
"""
frequency = 1/sampling_rate
# Normalizing signal
eda = z_score(eda)
eda = np.array(eda)[:,0]
n = len(eda)
eda = eda.astype('double')
eda = cv.matrix(eda)
# bateman ARMA model
a1 = 1./min(tau1, tau0) # a1 > a0
a0 = 1./max(tau1, tau0)
ar = np.array([(a1*frequency + 2.) * (a0*frequency + 2.), 2.*a1*a0*frequency**2 - 8.,
(a1*frequency - 2.) * (a0*frequency - 2.)]) / ((a1 - a0) * frequency**2)
ma = np.array([1., 2., 1.])
# matrices for ARMA model
i = np.arange(2, n)
A = cv.spmatrix(np.tile(ar, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
M = cv.spmatrix(np.tile(ma, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
# spline
delta_knot_s = int(round(delta_knot / frequency))
spl = np.r_[np.arange(1.,delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1
spl = np.convolve(spl, spl, 'full')
spl /= max(spl)
# matrix of spline regressors
i = np.c_[np.arange(-(len(spl)//2), (len(spl)+1)//2)] + np.r_[np.arange(0, n, delta_knot_s)]
nB = i.shape[1]
j = np.tile(np.arange(nB), (len(spl),1))
p = np.tile(spl, (nB,1)).T
valid = (i >= 0) & (i < n)
B = cv.spmatrix(p[valid], i[valid], j[valid])
# trend
C = cv.matrix(np.c_[np.ones(n), np.arange(1., n+1.)/n])
nC = C.size[1]
# Solve the problem:
# .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l
# s.t. A*q >= 0
if verbose is False:
options["show_progress"] = False
old_options = cv.solvers.options.copy()
cv.solvers.options.clear()
cv.solvers.options.update(options)
if solver == 'conelp':
# Use conelp
z = lambda m,n: cv.spmatrix([],[],[],(m,n))
G = cv.sparse([[-A,z(2,n),M,z(nB+2,n)],[z(n+2,nC),C,z(nB+2,nC)],
[z(n,1),-1,1,z(n+nB+2,1)],[z(2*n+2,1),-1,1,z(nB,1)],
[z(n+2,nB),B,z(2,nB),cv.spmatrix(1.0, range(nB), range(nB))]])
h = cv.matrix([z(n,1),.5,.5,eda,.5,.5,z(nB,1)])
c = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T,z(nC,1),1,gamma,z(nB,1)])
res = cv.solvers.conelp(c, G, h, dims={'l':n,'q':[n+2,nB+2],'s':[]})
obj = res['primal objective']
else:
# Use qp
Mt, Ct, Bt = M.T, C.T, B.T
H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C],
[Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(1.0, range(nB), range(nB))]])
f = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T - Mt*eda, -(Ct*eda), -(Bt*eda)])
res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))),
cv.matrix(0., (n,1)), solver=solver)
obj = res['primal objective'] + .5 * (eda.T * eda)
cv.solvers.options.clear()
cv.solvers.options.update(old_options)
l = res['x'][-nB:]
d = res['x'][n:n+nC]
tonic = B*l + C*d
q = res['x'][:n]
p = A * q
phasic = M * q
e = eda - phasic - tonic
phasic = np.array(phasic)[:,0]
# results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj))
return(tonic, phasic) | python | def cvxEDA(eda, sampling_rate=1000, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, verbose=False, options={'reltol':1e-9}):
"""
A convex optimization approach to electrodermal activity processing (CVXEDA).
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015).
Parameters
----------
eda : list or array
raw EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
tau0 : float
Slow time constant of the Bateman function.
tau1 : float
Fast time constant of the Bateman function.
delta_knot : float
Time between knots of the tonic spline function.
alpha : float
Penalization for the sparse SMNA driver.
gamma : float
Penalization for the tonic spline coefficients.
solver : bool
Sparse QP solver to be used, see cvxopt.solvers.qp
verbose : bool
Print progress?
options : dict
Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns
----------
phasic : numpy.array
The phasic component.
Notes
----------
*Authors*
- Luca Citi (https://github.com/lciti)
- Alberto Greco
*Dependencies*
- cvxopt
- numpy
*See Also*
- cvxEDA: https://github.com/lciti/cvxEDA
References
-----------
- Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing.
- Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804.
"""
frequency = 1/sampling_rate
# Normalizing signal
eda = z_score(eda)
eda = np.array(eda)[:,0]
n = len(eda)
eda = eda.astype('double')
eda = cv.matrix(eda)
# bateman ARMA model
a1 = 1./min(tau1, tau0) # a1 > a0
a0 = 1./max(tau1, tau0)
ar = np.array([(a1*frequency + 2.) * (a0*frequency + 2.), 2.*a1*a0*frequency**2 - 8.,
(a1*frequency - 2.) * (a0*frequency - 2.)]) / ((a1 - a0) * frequency**2)
ma = np.array([1., 2., 1.])
# matrices for ARMA model
i = np.arange(2, n)
A = cv.spmatrix(np.tile(ar, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
M = cv.spmatrix(np.tile(ma, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
# spline
delta_knot_s = int(round(delta_knot / frequency))
spl = np.r_[np.arange(1.,delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1
spl = np.convolve(spl, spl, 'full')
spl /= max(spl)
# matrix of spline regressors
i = np.c_[np.arange(-(len(spl)//2), (len(spl)+1)//2)] + np.r_[np.arange(0, n, delta_knot_s)]
nB = i.shape[1]
j = np.tile(np.arange(nB), (len(spl),1))
p = np.tile(spl, (nB,1)).T
valid = (i >= 0) & (i < n)
B = cv.spmatrix(p[valid], i[valid], j[valid])
# trend
C = cv.matrix(np.c_[np.ones(n), np.arange(1., n+1.)/n])
nC = C.size[1]
# Solve the problem:
# .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l
# s.t. A*q >= 0
if verbose is False:
options["show_progress"] = False
old_options = cv.solvers.options.copy()
cv.solvers.options.clear()
cv.solvers.options.update(options)
if solver == 'conelp':
# Use conelp
z = lambda m,n: cv.spmatrix([],[],[],(m,n))
G = cv.sparse([[-A,z(2,n),M,z(nB+2,n)],[z(n+2,nC),C,z(nB+2,nC)],
[z(n,1),-1,1,z(n+nB+2,1)],[z(2*n+2,1),-1,1,z(nB,1)],
[z(n+2,nB),B,z(2,nB),cv.spmatrix(1.0, range(nB), range(nB))]])
h = cv.matrix([z(n,1),.5,.5,eda,.5,.5,z(nB,1)])
c = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T,z(nC,1),1,gamma,z(nB,1)])
res = cv.solvers.conelp(c, G, h, dims={'l':n,'q':[n+2,nB+2],'s':[]})
obj = res['primal objective']
else:
# Use qp
Mt, Ct, Bt = M.T, C.T, B.T
H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C],
[Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(1.0, range(nB), range(nB))]])
f = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T - Mt*eda, -(Ct*eda), -(Bt*eda)])
res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))),
cv.matrix(0., (n,1)), solver=solver)
obj = res['primal objective'] + .5 * (eda.T * eda)
cv.solvers.options.clear()
cv.solvers.options.update(old_options)
l = res['x'][-nB:]
d = res['x'][n:n+nC]
tonic = B*l + C*d
q = res['x'][:n]
p = A * q
phasic = M * q
e = eda - phasic - tonic
phasic = np.array(phasic)[:,0]
# results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj))
return(tonic, phasic) | ['def', 'cvxEDA', '(', 'eda', ',', 'sampling_rate', '=', '1000', ',', 'tau0', '=', '2.', ',', 'tau1', '=', '0.7', ',', 'delta_knot', '=', '10.', ',', 'alpha', '=', '8e-4', ',', 'gamma', '=', '1e-2', ',', 'solver', '=', 'None', ',', 'verbose', '=', 'False', ',', 'options', '=', '{', "'reltol'", ':', '1e-9', '}', ')', ':', 'frequency', '=', '1', '/', 'sampling_rate', '# Normalizing signal', 'eda', '=', 'z_score', '(', 'eda', ')', 'eda', '=', 'np', '.', 'array', '(', 'eda', ')', '[', ':', ',', '0', ']', 'n', '=', 'len', '(', 'eda', ')', 'eda', '=', 'eda', '.', 'astype', '(', "'double'", ')', 'eda', '=', 'cv', '.', 'matrix', '(', 'eda', ')', '# bateman ARMA model', 'a1', '=', '1.', '/', 'min', '(', 'tau1', ',', 'tau0', ')', '# a1 > a0', 'a0', '=', '1.', '/', 'max', '(', 'tau1', ',', 'tau0', ')', 'ar', '=', 'np', '.', 'array', '(', '[', '(', 'a1', '*', 'frequency', '+', '2.', ')', '*', '(', 'a0', '*', 'frequency', '+', '2.', ')', ',', '2.', '*', 'a1', '*', 'a0', '*', 'frequency', '**', '2', '-', '8.', ',', '(', 'a1', '*', 'frequency', '-', '2.', ')', '*', '(', 'a0', '*', 'frequency', '-', '2.', ')', ']', ')', '/', '(', '(', 'a1', '-', 'a0', ')', '*', 'frequency', '**', '2', ')', 'ma', '=', 'np', '.', 'array', '(', '[', '1.', ',', '2.', ',', '1.', ']', ')', '# matrices for ARMA model', 'i', '=', 'np', '.', 'arange', '(', '2', ',', 'n', ')', 'A', '=', 'cv', '.', 'spmatrix', '(', 'np', '.', 'tile', '(', 'ar', ',', '(', 'n', '-', '2', ',', '1', ')', ')', ',', 'np', '.', 'c_', '[', 'i', ',', 'i', ',', 'i', ']', ',', 'np', '.', 'c_', '[', 'i', ',', 'i', '-', '1', ',', 'i', '-', '2', ']', ',', '(', 'n', ',', 'n', ')', ')', 'M', '=', 'cv', '.', 'spmatrix', '(', 'np', '.', 'tile', '(', 'ma', ',', '(', 'n', '-', '2', ',', '1', ')', ')', ',', 'np', '.', 'c_', '[', 'i', ',', 'i', ',', 'i', ']', ',', 'np', '.', 'c_', '[', 'i', ',', 'i', '-', '1', ',', 'i', '-', '2', ']', ',', '(', 'n', ',', 'n', ')', ')', '# spline', 'delta_knot_s', '=', 'int', '(', 'round', '(', 'delta_knot', '/', 'frequency', ')', ')', 'spl', '=', 'np', '.', 'r_', '[', 'np', '.', 'arange', '(', '1.', ',', 'delta_knot_s', ')', ',', 'np', '.', 'arange', '(', 'delta_knot_s', ',', '0.', ',', '-', '1.', ')', ']', '# order 1', 'spl', '=', 'np', '.', 'convolve', '(', 'spl', ',', 'spl', ',', "'full'", ')', 'spl', '/=', 'max', '(', 'spl', ')', '# matrix of spline regressors', 'i', '=', 'np', '.', 'c_', '[', 'np', '.', 'arange', '(', '-', '(', 'len', '(', 'spl', ')', '//', '2', ')', ',', '(', 'len', '(', 'spl', ')', '+', '1', ')', '//', '2', ')', ']', '+', 'np', '.', 'r_', '[', 'np', '.', 'arange', '(', '0', ',', 'n', ',', 'delta_knot_s', ')', ']', 'nB', '=', 'i', '.', 'shape', '[', '1', ']', 'j', '=', 'np', '.', 'tile', '(', 'np', '.', 'arange', '(', 'nB', ')', ',', '(', 'len', '(', 'spl', ')', ',', '1', ')', ')', 'p', '=', 'np', '.', 'tile', '(', 'spl', ',', '(', 'nB', ',', '1', ')', ')', '.', 'T', 'valid', '=', '(', 'i', '>=', '0', ')', '&', '(', 'i', '<', 'n', ')', 'B', '=', 'cv', '.', 'spmatrix', '(', 'p', '[', 'valid', ']', ',', 'i', '[', 'valid', ']', ',', 'j', '[', 'valid', ']', ')', '# trend', 'C', '=', 'cv', '.', 'matrix', '(', 'np', '.', 'c_', '[', 'np', '.', 'ones', '(', 'n', ')', ',', 'np', '.', 'arange', '(', '1.', ',', 'n', '+', '1.', ')', '/', 'n', ']', ')', 'nC', '=', 'C', '.', 'size', '[', '1', ']', '# Solve the problem:', "# .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l", '# s.t. A*q >= 0', 'if', 'verbose', 'is', 'False', ':', 'options', '[', '"show_progress"', ']', '=', 'False', 'old_options', '=', 'cv', '.', 'solvers', '.', 'options', '.', 'copy', '(', ')', 'cv', '.', 'solvers', '.', 'options', '.', 'clear', '(', ')', 'cv', '.', 'solvers', '.', 'options', '.', 'update', '(', 'options', ')', 'if', 'solver', '==', "'conelp'", ':', '# Use conelp', 'z', '=', 'lambda', 'm', ',', 'n', ':', 'cv', '.', 'spmatrix', '(', '[', ']', ',', '[', ']', ',', '[', ']', ',', '(', 'm', ',', 'n', ')', ')', 'G', '=', 'cv', '.', 'sparse', '(', '[', '[', '-', 'A', ',', 'z', '(', '2', ',', 'n', ')', ',', 'M', ',', 'z', '(', 'nB', '+', '2', ',', 'n', ')', ']', ',', '[', 'z', '(', 'n', '+', '2', ',', 'nC', ')', ',', 'C', ',', 'z', '(', 'nB', '+', '2', ',', 'nC', ')', ']', ',', '[', 'z', '(', 'n', ',', '1', ')', ',', '-', '1', ',', '1', ',', 'z', '(', 'n', '+', 'nB', '+', '2', ',', '1', ')', ']', ',', '[', 'z', '(', '2', '*', 'n', '+', '2', ',', '1', ')', ',', '-', '1', ',', '1', ',', 'z', '(', 'nB', ',', '1', ')', ']', ',', '[', 'z', '(', 'n', '+', '2', ',', 'nB', ')', ',', 'B', ',', 'z', '(', '2', ',', 'nB', ')', ',', 'cv', '.', 'spmatrix', '(', '1.0', ',', 'range', '(', 'nB', ')', ',', 'range', '(', 'nB', ')', ')', ']', ']', ')', 'h', '=', 'cv', '.', 'matrix', '(', '[', 'z', '(', 'n', ',', '1', ')', ',', '.5', ',', '.5', ',', 'eda', ',', '.5', ',', '.5', ',', 'z', '(', 'nB', ',', '1', ')', ']', ')', 'c', '=', 'cv', '.', 'matrix', '(', '[', '(', 'cv', '.', 'matrix', '(', 'alpha', ',', '(', '1', ',', 'n', ')', ')', '*', 'A', ')', '.', 'T', ',', 'z', '(', 'nC', ',', '1', ')', ',', '1', ',', 'gamma', ',', 'z', '(', 'nB', ',', '1', ')', ']', ')', 'res', '=', 'cv', '.', 'solvers', '.', 'conelp', '(', 'c', ',', 'G', ',', 'h', ',', 'dims', '=', '{', "'l'", ':', 'n', ',', "'q'", ':', '[', 'n', '+', '2', ',', 'nB', '+', '2', ']', ',', "'s'", ':', '[', ']', '}', ')', 'obj', '=', 'res', '[', "'primal objective'", ']', 'else', ':', '# Use qp', 'Mt', ',', 'Ct', ',', 'Bt', '=', 'M', '.', 'T', ',', 'C', '.', 'T', ',', 'B', '.', 'T', 'H', '=', 'cv', '.', 'sparse', '(', '[', '[', 'Mt', '*', 'M', ',', 'Ct', '*', 'M', ',', 'Bt', '*', 'M', ']', ',', '[', 'Mt', '*', 'C', ',', 'Ct', '*', 'C', ',', 'Bt', '*', 'C', ']', ',', '[', 'Mt', '*', 'B', ',', 'Ct', '*', 'B', ',', 'Bt', '*', 'B', '+', 'gamma', '*', 'cv', '.', 'spmatrix', '(', '1.0', ',', 'range', '(', 'nB', ')', ',', 'range', '(', 'nB', ')', ')', ']', ']', ')', 'f', '=', 'cv', '.', 'matrix', '(', '[', '(', 'cv', '.', 'matrix', '(', 'alpha', ',', '(', '1', ',', 'n', ')', ')', '*', 'A', ')', '.', 'T', '-', 'Mt', '*', 'eda', ',', '-', '(', 'Ct', '*', 'eda', ')', ',', '-', '(', 'Bt', '*', 'eda', ')', ']', ')', 'res', '=', 'cv', '.', 'solvers', '.', 'qp', '(', 'H', ',', 'f', ',', 'cv', '.', 'spmatrix', '(', '-', 'A', '.', 'V', ',', 'A', '.', 'I', ',', 'A', '.', 'J', ',', '(', 'n', ',', 'len', '(', 'f', ')', ')', ')', ',', 'cv', '.', 'matrix', '(', '0.', ',', '(', 'n', ',', '1', ')', ')', ',', 'solver', '=', 'solver', ')', 'obj', '=', 'res', '[', "'primal objective'", ']', '+', '.5', '*', '(', 'eda', '.', 'T', '*', 'eda', ')', 'cv', '.', 'solvers', '.', 'options', '.', 'clear', '(', ')', 'cv', '.', 'solvers', '.', 'options', '.', 'update', '(', 'old_options', ')', 'l', '=', 'res', '[', "'x'", ']', '[', '-', 'nB', ':', ']', 'd', '=', 'res', '[', "'x'", ']', '[', 'n', ':', 'n', '+', 'nC', ']', 'tonic', '=', 'B', '*', 'l', '+', 'C', '*', 'd', 'q', '=', 'res', '[', "'x'", ']', '[', ':', 'n', ']', 'p', '=', 'A', '*', 'q', 'phasic', '=', 'M', '*', 'q', 'e', '=', 'eda', '-', 'phasic', '-', 'tonic', 'phasic', '=', 'np', '.', 'array', '(', 'phasic', ')', '[', ':', ',', '0', ']', '# results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj))', 'return', '(', 'tonic', ',', 'phasic', ')'] | A convex optimization approach to electrodermal activity processing (CVXEDA).
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015).
Parameters
----------
eda : list or array
raw EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
tau0 : float
Slow time constant of the Bateman function.
tau1 : float
Fast time constant of the Bateman function.
delta_knot : float
Time between knots of the tonic spline function.
alpha : float
Penalization for the sparse SMNA driver.
gamma : float
Penalization for the tonic spline coefficients.
solver : bool
Sparse QP solver to be used, see cvxopt.solvers.qp
verbose : bool
Print progress?
options : dict
Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns
----------
phasic : numpy.array
The phasic component.
Notes
----------
*Authors*
- Luca Citi (https://github.com/lciti)
- Alberto Greco
*Dependencies*
- cvxopt
- numpy
*See Also*
- cvxEDA: https://github.com/lciti/cvxEDA
References
-----------
- Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing.
- Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804. | ['A', 'convex', 'optimization', 'approach', 'to', 'electrodermal', 'activity', 'processing', '(', 'CVXEDA', ')', '.'] | train | https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/bio/bio_eda.py#L182-L321 |
4,954 | square/pylink | pylink/jlink.py | JLink.halt | def halt(self):
"""Halts the CPU Core.
Args:
self (JLink): the ``JLink`` instance
Returns:
``True`` if halted, ``False`` otherwise.
"""
res = int(self._dll.JLINKARM_Halt())
if res == 0:
time.sleep(1)
return True
return False | python | def halt(self):
"""Halts the CPU Core.
Args:
self (JLink): the ``JLink`` instance
Returns:
``True`` if halted, ``False`` otherwise.
"""
res = int(self._dll.JLINKARM_Halt())
if res == 0:
time.sleep(1)
return True
return False | ['def', 'halt', '(', 'self', ')', ':', 'res', '=', 'int', '(', 'self', '.', '_dll', '.', 'JLINKARM_Halt', '(', ')', ')', 'if', 'res', '==', '0', ':', 'time', '.', 'sleep', '(', '1', ')', 'return', 'True', 'return', 'False'] | Halts the CPU Core.
Args:
self (JLink): the ``JLink`` instance
Returns:
``True`` if halted, ``False`` otherwise. | ['Halts', 'the', 'CPU', 'Core', '.'] | train | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L2121-L2134 |
4,955 | senaite/senaite.core | bika/lims/browser/analyses/view.py | AnalysesView._folder_item_reflex_icons | def _folder_item_reflex_icons(self, analysis_brain, item):
"""Adds an icon to the item dictionary if the analysis has been
automatically generated due to a reflex rule
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not analysis_brain.getIsReflexAnalysis:
# Do nothing
return
img = get_image('reflexrule.png',
title=t(_('It comes form a reflex rule')))
self._append_html_element(item, 'Service', img) | python | def _folder_item_reflex_icons(self, analysis_brain, item):
"""Adds an icon to the item dictionary if the analysis has been
automatically generated due to a reflex rule
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not analysis_brain.getIsReflexAnalysis:
# Do nothing
return
img = get_image('reflexrule.png',
title=t(_('It comes form a reflex rule')))
self._append_html_element(item, 'Service', img) | ['def', '_folder_item_reflex_icons', '(', 'self', ',', 'analysis_brain', ',', 'item', ')', ':', 'if', 'not', 'analysis_brain', '.', 'getIsReflexAnalysis', ':', '# Do nothing', 'return', 'img', '=', 'get_image', '(', "'reflexrule.png'", ',', 'title', '=', 't', '(', '_', '(', "'It comes form a reflex rule'", ')', ')', ')', 'self', '.', '_append_html_element', '(', 'item', ',', "'Service'", ',', 'img', ')'] | Adds an icon to the item dictionary if the analysis has been
automatically generated due to a reflex rule
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row | ['Adds', 'an', 'icon', 'to', 'the', 'item', 'dictionary', 'if', 'the', 'analysis', 'has', 'been', 'automatically', 'generated', 'due', 'to', 'a', 'reflex', 'rule'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L1123-L1135 |
4,956 | HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.load | def load(cls, path):
"""Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file.
"""
with open(path, 'r') as in_file:
metadata = json.load(in_file)
return cls.from_dict(metadata) | python | def load(cls, path):
"""Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file.
"""
with open(path, 'r') as in_file:
metadata = json.load(in_file)
return cls.from_dict(metadata) | ['def', 'load', '(', 'cls', ',', 'path', ')', ':', 'with', 'open', '(', 'path', ',', "'r'", ')', 'as', 'in_file', ':', 'metadata', '=', 'json', '.', 'load', '(', 'in_file', ')', 'return', 'cls', '.', 'from_dict', '(', 'metadata', ')'] | Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file. | ['Create', 'a', 'new', 'MLPipeline', 'from', 'a', 'JSON', 'specification', '.'] | train | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L369-L385 |
4,957 | bram85/topydo | topydo/ui/CLIApplicationBase.py | version | def version():
""" Print the current version and exit. """
from topydo.lib.Version import VERSION, LICENSE
print("topydo {}\n".format(VERSION))
print(LICENSE)
sys.exit(0) | python | def version():
""" Print the current version and exit. """
from topydo.lib.Version import VERSION, LICENSE
print("topydo {}\n".format(VERSION))
print(LICENSE)
sys.exit(0) | ['def', 'version', '(', ')', ':', 'from', 'topydo', '.', 'lib', '.', 'Version', 'import', 'VERSION', ',', 'LICENSE', 'print', '(', '"topydo {}\\n"', '.', 'format', '(', 'VERSION', ')', ')', 'print', '(', 'LICENSE', ')', 'sys', '.', 'exit', '(', '0', ')'] | Print the current version and exit. | ['Print', 'the', 'current', 'version', 'and', 'exit', '.'] | train | https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/CLIApplicationBase.py#L130-L135 |
4,958 | zhanglab/psamm | psamm/importer.py | reactions_to_files | def reactions_to_files(model, dest, writer, split_subsystem):
"""Turn the reaction subsystems into their own files.
If a subsystem has a number of reactions over the threshold, it gets its
own YAML file. All other reactions, those that don't have a subsystem or
are in a subsystem that falls below the threshold, get added to a common
reaction file.
Args:
model: :class:`psamm_import.model.MetabolicModel`.
dest: output path for model files.
writer: :class:`psamm.datasource.native.ModelWriter`.
split_subsystem: Divide reactions into multiple files by subsystem.
"""
def safe_file_name(origin_name):
safe_name = re.sub(
r'\W+', '_', origin_name, flags=re.UNICODE)
safe_name = re.sub(
r'_+', '_', safe_name.lower(), flags=re.UNICODE)
safe_name = safe_name.strip('_')
return safe_name
common_reactions = []
reaction_files = []
if not split_subsystem:
common_reactions = sorted(model.reactions, key=lambda r: r.id)
if len(common_reactions) > 0:
reaction_file = 'reactions.yaml'
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
else:
subsystems = {}
for reaction in sorted(model.reactions, key=lambda r: r.id):
if 'subsystem' in reaction.properties:
subsystem_file = safe_file_name(
reaction.properties['subsystem'])
subsystems.setdefault(subsystem_file, []).append(reaction)
else:
common_reactions.append(reaction)
subsystem_folder = 'reactions'
sub_existance = False
for subsystem_file, reactions in iteritems(subsystems):
if len(reactions) < _MAX_REACTION_COUNT:
for reaction in reactions:
common_reactions.append(reaction)
else:
if len(reactions) > 0:
mkdir_p(os.path.join(dest, subsystem_folder))
subsystem_file = os.path.join(
subsystem_folder, '{}.yaml'.format(subsystem_file))
with open(os.path.join(dest, subsystem_file), 'w') as f:
writer.write_reactions(f, reactions)
reaction_files.append(subsystem_file)
sub_existance = True
reaction_files.sort()
if sub_existance:
reaction_file = os.path.join(
subsystem_folder, 'other_reactions.yaml')
else:
reaction_file = 'reactions.yaml'
if len(common_reactions) > 0:
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
return reaction_files | python | def reactions_to_files(model, dest, writer, split_subsystem):
"""Turn the reaction subsystems into their own files.
If a subsystem has a number of reactions over the threshold, it gets its
own YAML file. All other reactions, those that don't have a subsystem or
are in a subsystem that falls below the threshold, get added to a common
reaction file.
Args:
model: :class:`psamm_import.model.MetabolicModel`.
dest: output path for model files.
writer: :class:`psamm.datasource.native.ModelWriter`.
split_subsystem: Divide reactions into multiple files by subsystem.
"""
def safe_file_name(origin_name):
safe_name = re.sub(
r'\W+', '_', origin_name, flags=re.UNICODE)
safe_name = re.sub(
r'_+', '_', safe_name.lower(), flags=re.UNICODE)
safe_name = safe_name.strip('_')
return safe_name
common_reactions = []
reaction_files = []
if not split_subsystem:
common_reactions = sorted(model.reactions, key=lambda r: r.id)
if len(common_reactions) > 0:
reaction_file = 'reactions.yaml'
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
else:
subsystems = {}
for reaction in sorted(model.reactions, key=lambda r: r.id):
if 'subsystem' in reaction.properties:
subsystem_file = safe_file_name(
reaction.properties['subsystem'])
subsystems.setdefault(subsystem_file, []).append(reaction)
else:
common_reactions.append(reaction)
subsystem_folder = 'reactions'
sub_existance = False
for subsystem_file, reactions in iteritems(subsystems):
if len(reactions) < _MAX_REACTION_COUNT:
for reaction in reactions:
common_reactions.append(reaction)
else:
if len(reactions) > 0:
mkdir_p(os.path.join(dest, subsystem_folder))
subsystem_file = os.path.join(
subsystem_folder, '{}.yaml'.format(subsystem_file))
with open(os.path.join(dest, subsystem_file), 'w') as f:
writer.write_reactions(f, reactions)
reaction_files.append(subsystem_file)
sub_existance = True
reaction_files.sort()
if sub_existance:
reaction_file = os.path.join(
subsystem_folder, 'other_reactions.yaml')
else:
reaction_file = 'reactions.yaml'
if len(common_reactions) > 0:
with open(os.path.join(dest, reaction_file), 'w') as f:
writer.write_reactions(f, common_reactions)
reaction_files.append(reaction_file)
return reaction_files | ['def', 'reactions_to_files', '(', 'model', ',', 'dest', ',', 'writer', ',', 'split_subsystem', ')', ':', 'def', 'safe_file_name', '(', 'origin_name', ')', ':', 'safe_name', '=', 're', '.', 'sub', '(', "r'\\W+'", ',', "'_'", ',', 'origin_name', ',', 'flags', '=', 're', '.', 'UNICODE', ')', 'safe_name', '=', 're', '.', 'sub', '(', "r'_+'", ',', "'_'", ',', 'safe_name', '.', 'lower', '(', ')', ',', 'flags', '=', 're', '.', 'UNICODE', ')', 'safe_name', '=', 'safe_name', '.', 'strip', '(', "'_'", ')', 'return', 'safe_name', 'common_reactions', '=', '[', ']', 'reaction_files', '=', '[', ']', 'if', 'not', 'split_subsystem', ':', 'common_reactions', '=', 'sorted', '(', 'model', '.', 'reactions', ',', 'key', '=', 'lambda', 'r', ':', 'r', '.', 'id', ')', 'if', 'len', '(', 'common_reactions', ')', '>', '0', ':', 'reaction_file', '=', "'reactions.yaml'", 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'dest', ',', 'reaction_file', ')', ',', "'w'", ')', 'as', 'f', ':', 'writer', '.', 'write_reactions', '(', 'f', ',', 'common_reactions', ')', 'reaction_files', '.', 'append', '(', 'reaction_file', ')', 'else', ':', 'subsystems', '=', '{', '}', 'for', 'reaction', 'in', 'sorted', '(', 'model', '.', 'reactions', ',', 'key', '=', 'lambda', 'r', ':', 'r', '.', 'id', ')', ':', 'if', "'subsystem'", 'in', 'reaction', '.', 'properties', ':', 'subsystem_file', '=', 'safe_file_name', '(', 'reaction', '.', 'properties', '[', "'subsystem'", ']', ')', 'subsystems', '.', 'setdefault', '(', 'subsystem_file', ',', '[', ']', ')', '.', 'append', '(', 'reaction', ')', 'else', ':', 'common_reactions', '.', 'append', '(', 'reaction', ')', 'subsystem_folder', '=', "'reactions'", 'sub_existance', '=', 'False', 'for', 'subsystem_file', ',', 'reactions', 'in', 'iteritems', '(', 'subsystems', ')', ':', 'if', 'len', '(', 'reactions', ')', '<', '_MAX_REACTION_COUNT', ':', 'for', 'reaction', 'in', 'reactions', ':', 'common_reactions', '.', 'append', '(', 'reaction', ')', 'else', ':', 'if', 'len', '(', 'reactions', ')', '>', '0', ':', 'mkdir_p', '(', 'os', '.', 'path', '.', 'join', '(', 'dest', ',', 'subsystem_folder', ')', ')', 'subsystem_file', '=', 'os', '.', 'path', '.', 'join', '(', 'subsystem_folder', ',', "'{}.yaml'", '.', 'format', '(', 'subsystem_file', ')', ')', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'dest', ',', 'subsystem_file', ')', ',', "'w'", ')', 'as', 'f', ':', 'writer', '.', 'write_reactions', '(', 'f', ',', 'reactions', ')', 'reaction_files', '.', 'append', '(', 'subsystem_file', ')', 'sub_existance', '=', 'True', 'reaction_files', '.', 'sort', '(', ')', 'if', 'sub_existance', ':', 'reaction_file', '=', 'os', '.', 'path', '.', 'join', '(', 'subsystem_folder', ',', "'other_reactions.yaml'", ')', 'else', ':', 'reaction_file', '=', "'reactions.yaml'", 'if', 'len', '(', 'common_reactions', ')', '>', '0', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'dest', ',', 'reaction_file', ')', ',', "'w'", ')', 'as', 'f', ':', 'writer', '.', 'write_reactions', '(', 'f', ',', 'common_reactions', ')', 'reaction_files', '.', 'append', '(', 'reaction_file', ')', 'return', 'reaction_files'] | Turn the reaction subsystems into their own files.
If a subsystem has a number of reactions over the threshold, it gets its
own YAML file. All other reactions, those that don't have a subsystem or
are in a subsystem that falls below the threshold, get added to a common
reaction file.
Args:
model: :class:`psamm_import.model.MetabolicModel`.
dest: output path for model files.
writer: :class:`psamm.datasource.native.ModelWriter`.
split_subsystem: Divide reactions into multiple files by subsystem. | ['Turn', 'the', 'reaction', 'subsystems', 'into', 'their', 'own', 'files', '.'] | train | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importer.py#L234-L303 |
4,959 | danielperna84/pyhomematic | pyhomematic/devicetypes/actors.py | ColorEffectLight.set_hs_color | def set_hs_color(self, hue: float, saturation: float):
"""
Set a fixed color and also turn off effects in order to see the color.
:param hue: Hue component (range 0-1)
:param saturation: Saturation component (range 0-1). Yields white for values near 0, other values are
interpreted as 100% saturation.
The input values are the components of an HSV color without the value/brightness component.
Example colors:
* Green: set_hs_color(120/360, 1)
* Blue: set_hs_color(240/360, 1)
* Yellow: set_hs_color(60/360, 1)
* White: set_hs_color(0, 0)
"""
self.turn_off_effect()
if saturation < 0.1: # Special case (white)
hm_color = 200
else:
hm_color = int(round(max(min(hue, 1), 0) * 199))
self.setValue(key="COLOR", channel=self._color_channel, value=hm_color) | python | def set_hs_color(self, hue: float, saturation: float):
"""
Set a fixed color and also turn off effects in order to see the color.
:param hue: Hue component (range 0-1)
:param saturation: Saturation component (range 0-1). Yields white for values near 0, other values are
interpreted as 100% saturation.
The input values are the components of an HSV color without the value/brightness component.
Example colors:
* Green: set_hs_color(120/360, 1)
* Blue: set_hs_color(240/360, 1)
* Yellow: set_hs_color(60/360, 1)
* White: set_hs_color(0, 0)
"""
self.turn_off_effect()
if saturation < 0.1: # Special case (white)
hm_color = 200
else:
hm_color = int(round(max(min(hue, 1), 0) * 199))
self.setValue(key="COLOR", channel=self._color_channel, value=hm_color) | ['def', 'set_hs_color', '(', 'self', ',', 'hue', ':', 'float', ',', 'saturation', ':', 'float', ')', ':', 'self', '.', 'turn_off_effect', '(', ')', 'if', 'saturation', '<', '0.1', ':', '# Special case (white)', 'hm_color', '=', '200', 'else', ':', 'hm_color', '=', 'int', '(', 'round', '(', 'max', '(', 'min', '(', 'hue', ',', '1', ')', ',', '0', ')', '*', '199', ')', ')', 'self', '.', 'setValue', '(', 'key', '=', '"COLOR"', ',', 'channel', '=', 'self', '.', '_color_channel', ',', 'value', '=', 'hm_color', ')'] | Set a fixed color and also turn off effects in order to see the color.
:param hue: Hue component (range 0-1)
:param saturation: Saturation component (range 0-1). Yields white for values near 0, other values are
interpreted as 100% saturation.
The input values are the components of an HSV color without the value/brightness component.
Example colors:
* Green: set_hs_color(120/360, 1)
* Blue: set_hs_color(240/360, 1)
* Yellow: set_hs_color(60/360, 1)
* White: set_hs_color(0, 0) | ['Set', 'a', 'fixed', 'color', 'and', 'also', 'turn', 'off', 'effects', 'in', 'order', 'to', 'see', 'the', 'color', '.'] | train | https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/devicetypes/actors.py#L534-L556 |
4,960 | cackharot/suds-py3 | suds/sax/enc.py | Encoder.decode | def decode(self, s):
"""
Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str
"""
if isinstance(s, str) and '&' in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s | python | def decode(self, s):
"""
Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str
"""
if isinstance(s, str) and '&' in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s | ['def', 'decode', '(', 'self', ',', 's', ')', ':', 'if', 'isinstance', '(', 's', ',', 'str', ')', 'and', "'&'", 'in', 's', ':', 'for', 'x', 'in', 'self', '.', 'decodings', ':', 's', '=', 's', '.', 'replace', '(', 'x', '[', '0', ']', ',', 'x', '[', '1', ']', ')', 'return', 's'] | Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str | ['Decode', 'special', 'characters', 'encodings', 'found', 'in', 'string', 'I', '{', 's', '}', '.'] | train | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/sax/enc.py#L78-L89 |
4,961 | ray-project/ray | python/ray/worker.py | shutdown | def shutdown(exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect()
# Disconnect global state from GCS.
global_state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
global_worker.set_mode(None) | python | def shutdown(exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect()
# Disconnect global state from GCS.
global_state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
global_worker.set_mode(None) | ['def', 'shutdown', '(', 'exiting_interpreter', '=', 'False', ')', ':', 'if', 'exiting_interpreter', 'and', 'global_worker', '.', 'mode', '==', 'SCRIPT_MODE', ':', '# This is a duration to sleep before shutting down everything in order', '# to make sure that log messages finish printing.', 'time', '.', 'sleep', '(', '0.5', ')', 'disconnect', '(', ')', '# Disconnect global state from GCS.', 'global_state', '.', 'disconnect', '(', ')', '# Shut down the Ray processes.', 'global', '_global_node', 'if', '_global_node', 'is', 'not', 'None', ':', '_global_node', '.', 'kill_all_processes', '(', 'check_alive', '=', 'False', ',', 'allow_graceful', '=', 'True', ')', '_global_node', '=', 'None', 'global_worker', '.', 'set_mode', '(', 'None', ')'] | Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages. | ['Disconnect', 'the', 'worker', 'and', 'terminate', 'processes', 'started', 'by', 'ray', '.', 'init', '()', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L1462-L1496 |
4,962 | AlecAivazis/graphql-over-kafka | nautilus/api/util/arg_string_from_dict.py | arg_string_from_dict | def arg_string_from_dict(arg_dict, **kwds):
"""
This function takes a series of ditionaries and creates an argument
string for a graphql query
"""
# the filters dictionary
filters = {
**arg_dict,
**kwds,
}
# return the correctly formed string
return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items()) | python | def arg_string_from_dict(arg_dict, **kwds):
"""
This function takes a series of ditionaries and creates an argument
string for a graphql query
"""
# the filters dictionary
filters = {
**arg_dict,
**kwds,
}
# return the correctly formed string
return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items()) | ['def', 'arg_string_from_dict', '(', 'arg_dict', ',', '*', '*', 'kwds', ')', ':', '# the filters dictionary', 'filters', '=', '{', '*', '*', 'arg_dict', ',', '*', '*', 'kwds', ',', '}', '# return the correctly formed string', 'return', '", "', '.', 'join', '(', '"{}: {}"', '.', 'format', '(', 'key', ',', 'json', '.', 'dumps', '(', 'value', ')', ')', 'for', 'key', ',', 'value', 'in', 'filters', '.', 'items', '(', ')', ')'] | This function takes a series of ditionaries and creates an argument
string for a graphql query | ['This', 'function', 'takes', 'a', 'series', 'of', 'ditionaries', 'and', 'creates', 'an', 'argument', 'string', 'for', 'a', 'graphql', 'query'] | train | https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/api/util/arg_string_from_dict.py#L3-L14 |
4,963 | pandas-dev/pandas | pandas/core/arrays/categorical.py | Categorical._from_inferred_categories | def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype, true_values=None):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True) | python | def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype, true_values=None):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True) | ['def', '_from_inferred_categories', '(', 'cls', ',', 'inferred_categories', ',', 'inferred_codes', ',', 'dtype', ',', 'true_values', '=', 'None', ')', ':', 'from', 'pandas', 'import', 'Index', ',', 'to_numeric', ',', 'to_datetime', ',', 'to_timedelta', 'cats', '=', 'Index', '(', 'inferred_categories', ')', 'known_categories', '=', '(', 'isinstance', '(', 'dtype', ',', 'CategoricalDtype', ')', 'and', 'dtype', '.', 'categories', 'is', 'not', 'None', ')', 'if', 'known_categories', ':', '# Convert to a specialized type with `dtype` if specified.', 'if', 'dtype', '.', 'categories', '.', 'is_numeric', '(', ')', ':', 'cats', '=', 'to_numeric', '(', 'inferred_categories', ',', 'errors', '=', '"coerce"', ')', 'elif', 'is_datetime64_dtype', '(', 'dtype', '.', 'categories', ')', ':', 'cats', '=', 'to_datetime', '(', 'inferred_categories', ',', 'errors', '=', '"coerce"', ')', 'elif', 'is_timedelta64_dtype', '(', 'dtype', '.', 'categories', ')', ':', 'cats', '=', 'to_timedelta', '(', 'inferred_categories', ',', 'errors', '=', '"coerce"', ')', 'elif', 'dtype', '.', 'categories', '.', 'is_boolean', '(', ')', ':', 'if', 'true_values', 'is', 'None', ':', 'true_values', '=', '[', '"True"', ',', '"TRUE"', ',', '"true"', ']', 'cats', '=', 'cats', '.', 'isin', '(', 'true_values', ')', 'if', 'known_categories', ':', '# Recode from observation order to dtype.categories order.', 'categories', '=', 'dtype', '.', 'categories', 'codes', '=', '_recode_for_categories', '(', 'inferred_codes', ',', 'cats', ',', 'categories', ')', 'elif', 'not', 'cats', '.', 'is_monotonic_increasing', ':', '# Sort categories and recode for unknown categories.', 'unsorted', '=', 'cats', '.', 'copy', '(', ')', 'categories', '=', 'cats', '.', 'sort_values', '(', ')', 'codes', '=', '_recode_for_categories', '(', 'inferred_codes', ',', 'unsorted', ',', 'categories', ')', 'dtype', '=', 'CategoricalDtype', '(', 'categories', ',', 'ordered', '=', 'False', ')', 'else', ':', 'dtype', '=', 'CategoricalDtype', '(', 'cats', ',', 'ordered', '=', 'False', ')', 'codes', '=', 'inferred_codes', 'return', 'cls', '(', 'codes', ',', 'dtype', '=', 'dtype', ',', 'fastpath', '=', 'True', ')'] | Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical | ['Construct', 'a', 'Categorical', 'from', 'inferred', 'values', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L528-L586 |
4,964 | GNS3/gns3-server | gns3server/compute/docker/docker_vm.py | DockerVM._add_ubridge_connection | def _add_ubridge_connection(self, nio, adapter_number):
"""
Creates a connection in uBridge.
:param nio: NIO instance or None if it's a dummy interface (if an interface is missing in ubridge you can't see it via ifconfig in the container)
:param adapter_number: adapter number
"""
try:
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
raise DockerError("Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(name=self.name,
adapter_number=adapter_number))
for index in range(4096):
if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs():
adapter.host_ifc = "tap-gns3-e{}".format(str(index))
break
if adapter.host_ifc is None:
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
adapter_number=adapter_number))
bridge_name = 'bridge{}'.format(adapter_number)
yield from self._ubridge_send('bridge create {}'.format(bridge_name))
self._bridges.add(bridge_name)
yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
hostif=adapter.host_ifc))
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace)
try:
yield from self._ubridge_send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.host_ifc,
ns=self._namespace,
adapter=adapter_number))
except UbridgeError as e:
raise UbridgeNamespaceError(e)
if nio:
yield from self._connect_nio(adapter_number, nio) | python | def _add_ubridge_connection(self, nio, adapter_number):
"""
Creates a connection in uBridge.
:param nio: NIO instance or None if it's a dummy interface (if an interface is missing in ubridge you can't see it via ifconfig in the container)
:param adapter_number: adapter number
"""
try:
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
raise DockerError("Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(name=self.name,
adapter_number=adapter_number))
for index in range(4096):
if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs():
adapter.host_ifc = "tap-gns3-e{}".format(str(index))
break
if adapter.host_ifc is None:
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
adapter_number=adapter_number))
bridge_name = 'bridge{}'.format(adapter_number)
yield from self._ubridge_send('bridge create {}'.format(bridge_name))
self._bridges.add(bridge_name)
yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
hostif=adapter.host_ifc))
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace)
try:
yield from self._ubridge_send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.host_ifc,
ns=self._namespace,
adapter=adapter_number))
except UbridgeError as e:
raise UbridgeNamespaceError(e)
if nio:
yield from self._connect_nio(adapter_number, nio) | ['def', '_add_ubridge_connection', '(', 'self', ',', 'nio', ',', 'adapter_number', ')', ':', 'try', ':', 'adapter', '=', 'self', '.', '_ethernet_adapters', '[', 'adapter_number', ']', 'except', 'IndexError', ':', 'raise', 'DockerError', '(', '"Adapter {adapter_number} doesn\'t exist on Docker container \'{name}\'"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'adapter_number', '=', 'adapter_number', ')', ')', 'for', 'index', 'in', 'range', '(', '4096', ')', ':', 'if', '"tap-gns3-e{}"', '.', 'format', '(', 'index', ')', 'not', 'in', 'psutil', '.', 'net_if_addrs', '(', ')', ':', 'adapter', '.', 'host_ifc', '=', '"tap-gns3-e{}"', '.', 'format', '(', 'str', '(', 'index', ')', ')', 'break', 'if', 'adapter', '.', 'host_ifc', 'is', 'None', ':', 'raise', 'DockerError', '(', '"Adapter {adapter_number} couldn\'t allocate interface on Docker container \'{name}\'. Too many Docker interfaces already exists"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'adapter_number', '=', 'adapter_number', ')', ')', 'bridge_name', '=', "'bridge{}'", '.', 'format', '(', 'adapter_number', ')', 'yield', 'from', 'self', '.', '_ubridge_send', '(', "'bridge create {}'", '.', 'format', '(', 'bridge_name', ')', ')', 'self', '.', '_bridges', '.', 'add', '(', 'bridge_name', ')', 'yield', 'from', 'self', '.', '_ubridge_send', '(', "'bridge add_nio_tap bridge{adapter_number} {hostif}'", '.', 'format', '(', 'adapter_number', '=', 'adapter_number', ',', 'hostif', '=', 'adapter', '.', 'host_ifc', ')', ')', 'log', '.', 'debug', '(', '"Move container %s adapter %s to namespace %s"', ',', 'self', '.', 'name', ',', 'adapter', '.', 'host_ifc', ',', 'self', '.', '_namespace', ')', 'try', ':', 'yield', 'from', 'self', '.', '_ubridge_send', '(', "'docker move_to_ns {ifc} {ns} eth{adapter}'", '.', 'format', '(', 'ifc', '=', 'adapter', '.', 'host_ifc', ',', 'ns', '=', 'self', '.', '_namespace', ',', 'adapter', '=', 'adapter_number', ')', ')', 'except', 'UbridgeError', 'as', 'e', ':', 'raise', 'UbridgeNamespaceError', '(', 'e', ')', 'if', 'nio', ':', 'yield', 'from', 'self', '.', '_connect_nio', '(', 'adapter_number', ',', 'nio', ')'] | Creates a connection in uBridge.
:param nio: NIO instance or None if it's a dummy interface (if an interface is missing in ubridge you can't see it via ifconfig in the container)
:param adapter_number: adapter number | ['Creates', 'a', 'connection', 'in', 'uBridge', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/docker/docker_vm.py#L669-L704 |
4,965 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.to_execution_plan | def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
) | python | def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
) | ['def', 'to_execution_plan', '(', 'self', ',', 'domain', ',', 'default_screen', ',', 'start_date', ',', 'end_date', ')', ':', 'if', 'self', '.', '_domain', 'is', 'not', 'GENERIC', 'and', 'self', '.', '_domain', 'is', 'not', 'domain', ':', 'raise', 'AssertionError', '(', '"Attempted to compile Pipeline with domain {} to execution "', '"plan with different domain {}."', '.', 'format', '(', 'self', '.', '_domain', ',', 'domain', ')', ')', 'return', 'ExecutionPlan', '(', 'domain', '=', 'domain', ',', 'terms', '=', 'self', '.', '_prepare_graph_terms', '(', 'default_screen', ')', ',', 'start_date', '=', 'start_date', ',', 'end_date', '=', 'end_date', ',', ')'] | Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements. | ['Compile', 'into', 'an', 'ExecutionPlan', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L160-L199 |
4,966 | IdentityPython/oidcendpoint | src/oidcendpoint/cookie.py | parse_cookie | def parse_cookie(name, sign_key, kaka, enc_key=None, sign_alg='SHA256'):
"""Parses and verifies a cookie value
Parses a cookie created by `make_cookie` and verifies
it has not been tampered with.
You need to provide the same `sign_key` and `enc_key`
used when creating the cookie, otherwise the verification
fails. See `make_cookie` for details about the verification.
:param sign_key: A signing key used to create the signature
:type sign_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance
:param kaka: The cookie
:param enc_key: The encryption key used.
:type enc_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance or None
:raises InvalidCookieSign: When verification fails.
:return: A tuple consisting of (payload, timestamp) or None if parsing fails
"""
if not kaka:
return None
parts = cookie_parts(name, kaka)
return ver_dec_content(parts, sign_key, enc_key, sign_alg) | python | def parse_cookie(name, sign_key, kaka, enc_key=None, sign_alg='SHA256'):
"""Parses and verifies a cookie value
Parses a cookie created by `make_cookie` and verifies
it has not been tampered with.
You need to provide the same `sign_key` and `enc_key`
used when creating the cookie, otherwise the verification
fails. See `make_cookie` for details about the verification.
:param sign_key: A signing key used to create the signature
:type sign_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance
:param kaka: The cookie
:param enc_key: The encryption key used.
:type enc_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance or None
:raises InvalidCookieSign: When verification fails.
:return: A tuple consisting of (payload, timestamp) or None if parsing fails
"""
if not kaka:
return None
parts = cookie_parts(name, kaka)
return ver_dec_content(parts, sign_key, enc_key, sign_alg) | ['def', 'parse_cookie', '(', 'name', ',', 'sign_key', ',', 'kaka', ',', 'enc_key', '=', 'None', ',', 'sign_alg', '=', "'SHA256'", ')', ':', 'if', 'not', 'kaka', ':', 'return', 'None', 'parts', '=', 'cookie_parts', '(', 'name', ',', 'kaka', ')', 'return', 'ver_dec_content', '(', 'parts', ',', 'sign_key', ',', 'enc_key', ',', 'sign_alg', ')'] | Parses and verifies a cookie value
Parses a cookie created by `make_cookie` and verifies
it has not been tampered with.
You need to provide the same `sign_key` and `enc_key`
used when creating the cookie, otherwise the verification
fails. See `make_cookie` for details about the verification.
:param sign_key: A signing key used to create the signature
:type sign_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance
:param kaka: The cookie
:param enc_key: The encryption key used.
:type enc_key: A :py:class:`cryptojwt.jwk.hmac.SYMKey` instance or None
:raises InvalidCookieSign: When verification fails.
:return: A tuple consisting of (payload, timestamp) or None if parsing fails | ['Parses', 'and', 'verifies', 'a', 'cookie', 'value'] | train | https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/cookie.py#L218-L241 |
4,967 | materialsproject/pymatgen | pymatgen/analysis/pourbaix_diagram.py | PourbaixDiagram.get_decomposition_energy | def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entry
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float): pH at which to find the decomposition
V (float): voltage at which to find the decomposition
Returns:
reaction corresponding to the decomposition
"""
# Find representative multientry
if self._multielement and not isinstance(entry, MultiEntry):
possible_entries = self._generate_multielement_entries(
self._filtered_entries, forced_include=[entry])
# Filter to only include materials where the entry is only solid
if entry.phase_type == "solid":
possible_entries = [e for e in possible_entries
if e.phase_type.count("Solid") == 1]
possible_energies = [e.normalized_energy_at_conditions(pH, V)
for e in possible_entries]
else:
possible_energies = [entry.normalized_energy_at_conditions(pH, V)]
min_energy = np.min(possible_energies, axis=0)
# Find entry and take the difference
hull = self.get_hull_energy(pH, V)
return min_energy - hull | python | def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entry
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float): pH at which to find the decomposition
V (float): voltage at which to find the decomposition
Returns:
reaction corresponding to the decomposition
"""
# Find representative multientry
if self._multielement and not isinstance(entry, MultiEntry):
possible_entries = self._generate_multielement_entries(
self._filtered_entries, forced_include=[entry])
# Filter to only include materials where the entry is only solid
if entry.phase_type == "solid":
possible_entries = [e for e in possible_entries
if e.phase_type.count("Solid") == 1]
possible_energies = [e.normalized_energy_at_conditions(pH, V)
for e in possible_entries]
else:
possible_energies = [entry.normalized_energy_at_conditions(pH, V)]
min_energy = np.min(possible_energies, axis=0)
# Find entry and take the difference
hull = self.get_hull_energy(pH, V)
return min_energy - hull | ['def', 'get_decomposition_energy', '(', 'self', ',', 'entry', ',', 'pH', ',', 'V', ')', ':', '# Find representative multientry', 'if', 'self', '.', '_multielement', 'and', 'not', 'isinstance', '(', 'entry', ',', 'MultiEntry', ')', ':', 'possible_entries', '=', 'self', '.', '_generate_multielement_entries', '(', 'self', '.', '_filtered_entries', ',', 'forced_include', '=', '[', 'entry', ']', ')', '# Filter to only include materials where the entry is only solid', 'if', 'entry', '.', 'phase_type', '==', '"solid"', ':', 'possible_entries', '=', '[', 'e', 'for', 'e', 'in', 'possible_entries', 'if', 'e', '.', 'phase_type', '.', 'count', '(', '"Solid"', ')', '==', '1', ']', 'possible_energies', '=', '[', 'e', '.', 'normalized_energy_at_conditions', '(', 'pH', ',', 'V', ')', 'for', 'e', 'in', 'possible_entries', ']', 'else', ':', 'possible_energies', '=', '[', 'entry', '.', 'normalized_energy_at_conditions', '(', 'pH', ',', 'V', ')', ']', 'min_energy', '=', 'np', '.', 'min', '(', 'possible_energies', ',', 'axis', '=', '0', ')', '# Find entry and take the difference', 'hull', '=', 'self', '.', 'get_hull_energy', '(', 'pH', ',', 'V', ')', 'return', 'min_energy', '-', 'hull'] | Finds decomposition to most stable entry
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float): pH at which to find the decomposition
V (float): voltage at which to find the decomposition
Returns:
reaction corresponding to the decomposition | ['Finds', 'decomposition', 'to', 'most', 'stable', 'entry'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/pourbaix_diagram.py#L683-L714 |
4,968 | EpistasisLab/scikit-rebate | skrebate/scoring_utils.py | ReliefF_compute_scores | def ReliefF_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN, headers, class_type, X, y, labels_std, data_type):
""" Unique scoring procedure for ReliefF algorithm. Scoring based on k nearest hits and misses of current target instance. """
scores = np.zeros(num_attributes)
for feature_num in range(num_attributes):
scores[feature_num] += compute_score(attr, mcmap, NN, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
return scores | python | def ReliefF_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN, headers, class_type, X, y, labels_std, data_type):
""" Unique scoring procedure for ReliefF algorithm. Scoring based on k nearest hits and misses of current target instance. """
scores = np.zeros(num_attributes)
for feature_num in range(num_attributes):
scores[feature_num] += compute_score(attr, mcmap, NN, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
return scores | ['def', 'ReliefF_compute_scores', '(', 'inst', ',', 'attr', ',', 'nan_entries', ',', 'num_attributes', ',', 'mcmap', ',', 'NN', ',', 'headers', ',', 'class_type', ',', 'X', ',', 'y', ',', 'labels_std', ',', 'data_type', ')', ':', 'scores', '=', 'np', '.', 'zeros', '(', 'num_attributes', ')', 'for', 'feature_num', 'in', 'range', '(', 'num_attributes', ')', ':', 'scores', '[', 'feature_num', ']', '+=', 'compute_score', '(', 'attr', ',', 'mcmap', ',', 'NN', ',', 'feature_num', ',', 'inst', ',', 'nan_entries', ',', 'headers', ',', 'class_type', ',', 'X', ',', 'y', ',', 'labels_std', ',', 'data_type', ')', 'return', 'scores'] | Unique scoring procedure for ReliefF algorithm. Scoring based on k nearest hits and misses of current target instance. | ['Unique', 'scoring', 'procedure', 'for', 'ReliefF', 'algorithm', '.', 'Scoring', 'based', 'on', 'k', 'nearest', 'hits', 'and', 'misses', 'of', 'current', 'target', 'instance', '.'] | train | https://github.com/EpistasisLab/scikit-rebate/blob/67dab51a7525fa5d076b059f1e6f8cff7481c1ef/skrebate/scoring_utils.py#L352-L358 |
4,969 | jpscaletti/authcode | authcode/auth_views_mixin.py | ViewsMixin.send_email | def send_email(self, user, subject, msg):
"""Should be overwritten in the setup"""
print('To:', user)
print('Subject:', subject)
print(msg) | python | def send_email(self, user, subject, msg):
"""Should be overwritten in the setup"""
print('To:', user)
print('Subject:', subject)
print(msg) | ['def', 'send_email', '(', 'self', ',', 'user', ',', 'subject', ',', 'msg', ')', ':', 'print', '(', "'To:'", ',', 'user', ')', 'print', '(', "'Subject:'", ',', 'subject', ')', 'print', '(', 'msg', ')'] | Should be overwritten in the setup | ['Should', 'be', 'overwritten', 'in', 'the', 'setup'] | train | https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_views_mixin.py#L67-L71 |
4,970 | JukeboxPipeline/jukebox-core | src/jukeboxcore/reftrack.py | Reftrack.fetch_uptodate | def fetch_uptodate(self, ):
"""Set and return whether the currently loaded entity is
the newest version in the department.
:returns: True, if newest version. False, if there is a newer version.
None, if there is nothing loaded yet.
:rtype: bool | None
:raises: None
"""
tfi = self.get_taskfileinfo()
if tfi:
self._uptodate = tfi.is_latest()
else:
self._uptodate = None
return self._uptodate | python | def fetch_uptodate(self, ):
"""Set and return whether the currently loaded entity is
the newest version in the department.
:returns: True, if newest version. False, if there is a newer version.
None, if there is nothing loaded yet.
:rtype: bool | None
:raises: None
"""
tfi = self.get_taskfileinfo()
if tfi:
self._uptodate = tfi.is_latest()
else:
self._uptodate = None
return self._uptodate | ['def', 'fetch_uptodate', '(', 'self', ',', ')', ':', 'tfi', '=', 'self', '.', 'get_taskfileinfo', '(', ')', 'if', 'tfi', ':', 'self', '.', '_uptodate', '=', 'tfi', '.', 'is_latest', '(', ')', 'else', ':', 'self', '.', '_uptodate', '=', 'None', 'return', 'self', '.', '_uptodate'] | Set and return whether the currently loaded entity is
the newest version in the department.
:returns: True, if newest version. False, if there is a newer version.
None, if there is nothing loaded yet.
:rtype: bool | None
:raises: None | ['Set', 'and', 'return', 'whether', 'the', 'currently', 'loaded', 'entity', 'is', 'the', 'newest', 'version', 'in', 'the', 'department', '.'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L1008-L1022 |
4,971 | JoseAntFer/pyny3d | pyny3d/shadows.py | ShadowsManager.get_sunpos | def get_sunpos(self, t, true_time=False):
"""
Computes the Sun positions for the *t* time vector.
*t* have to be in absolute minutes (0 at 00:00 01 Jan). The and
in Sun positions calculated are in solar time, that is, maximun
solar zenit exactly at midday.
The generated information is stored in:
* **.azimuth_zenit** (*ndarray*)
* **.true_time** (*datetime*): local time
:param t: Absolute minutes vector.
:type t: ndarray (dtype=int)
:param true_time: If True, a datetime vector with the true local
time will be stored at ``.true_time``
:type true_time: bool
:returns: Equivalent times in absolute minutes in year.
:rtype: ndarray (dtype=int)
:returns: None
.. seealso:: :func:`to_minutes` to easily genetare valid input
t.
"""
import numpy as np
lat = self.arg_latitude
long = self.arg_longitude
alphamin = self.arg_zenitmin
# Solar calculations
day = np.modf(t/1440)[0]
fractional_year = 2*np.pi/(365*24*60)*(-24*60+t)
declination = 0.006918 - \
0.399912*np.cos(fractional_year) + \
0.070257*np.sin(fractional_year) - \
0.006758*np.cos(2*fractional_year) + \
0.000907*np.sin(2*fractional_year) - \
0.002697*np.cos(3*fractional_year) + \
0.00148*np.sin(3*fractional_year)
hour_angle = np.tile(np.arange(-np.pi, np.pi, 2*np.pi/(24*60),
dtype='float'), 365)[t]
solar_zenit = np.arcsin(np.sin(lat)*np.sin(declination) + \
np.cos(lat)*np.cos(declination)*np.cos(hour_angle))
solar_zenit[solar_zenit<=0+alphamin] = np.nan
#### Avoiding numpy warning
aux = (np.sin(solar_zenit)*np.sin(lat) - np.sin(declination))/ \
(np.cos(solar_zenit)*np.cos(lat))
not_nan = np.logical_not(np.isnan(aux))
aux_1 = aux[not_nan]
aux_1[aux_1>=1] = np.nan
aux[not_nan] = aux_1
####
solar_azimuth = np.arccos(aux)
solar_azimuth[day==0.5] = 0
solar_azimuth[day<0.5] *= -1
self.azimuth_zenit = np.vstack((solar_azimuth, solar_zenit)).T
# True time
if true_time:
import datetime as dt
long = np.rad2deg(long)
instant_0 = dt.datetime(1,1,1,0,0,0) # Simulator time
# Real time
equation_time = 229.18*(0.000075+0.001868*np.cos(fractional_year) - \
0.032077*np.sin(fractional_year) - \
0.014615*np.cos(2*fractional_year) - \
0.040849*np.sin(2*fractional_year))
time_offset = equation_time + 4*long + 60*self.arg_UTC
true_solar_time = t + time_offset
delta_true_date_objs = np.array([dt.timedelta(minutes=i)
for i in true_solar_time])
self.true_time = instant_0 + delta_true_date_objs | python | def get_sunpos(self, t, true_time=False):
"""
Computes the Sun positions for the *t* time vector.
*t* have to be in absolute minutes (0 at 00:00 01 Jan). The and
in Sun positions calculated are in solar time, that is, maximun
solar zenit exactly at midday.
The generated information is stored in:
* **.azimuth_zenit** (*ndarray*)
* **.true_time** (*datetime*): local time
:param t: Absolute minutes vector.
:type t: ndarray (dtype=int)
:param true_time: If True, a datetime vector with the true local
time will be stored at ``.true_time``
:type true_time: bool
:returns: Equivalent times in absolute minutes in year.
:rtype: ndarray (dtype=int)
:returns: None
.. seealso:: :func:`to_minutes` to easily genetare valid input
t.
"""
import numpy as np
lat = self.arg_latitude
long = self.arg_longitude
alphamin = self.arg_zenitmin
# Solar calculations
day = np.modf(t/1440)[0]
fractional_year = 2*np.pi/(365*24*60)*(-24*60+t)
declination = 0.006918 - \
0.399912*np.cos(fractional_year) + \
0.070257*np.sin(fractional_year) - \
0.006758*np.cos(2*fractional_year) + \
0.000907*np.sin(2*fractional_year) - \
0.002697*np.cos(3*fractional_year) + \
0.00148*np.sin(3*fractional_year)
hour_angle = np.tile(np.arange(-np.pi, np.pi, 2*np.pi/(24*60),
dtype='float'), 365)[t]
solar_zenit = np.arcsin(np.sin(lat)*np.sin(declination) + \
np.cos(lat)*np.cos(declination)*np.cos(hour_angle))
solar_zenit[solar_zenit<=0+alphamin] = np.nan
#### Avoiding numpy warning
aux = (np.sin(solar_zenit)*np.sin(lat) - np.sin(declination))/ \
(np.cos(solar_zenit)*np.cos(lat))
not_nan = np.logical_not(np.isnan(aux))
aux_1 = aux[not_nan]
aux_1[aux_1>=1] = np.nan
aux[not_nan] = aux_1
####
solar_azimuth = np.arccos(aux)
solar_azimuth[day==0.5] = 0
solar_azimuth[day<0.5] *= -1
self.azimuth_zenit = np.vstack((solar_azimuth, solar_zenit)).T
# True time
if true_time:
import datetime as dt
long = np.rad2deg(long)
instant_0 = dt.datetime(1,1,1,0,0,0) # Simulator time
# Real time
equation_time = 229.18*(0.000075+0.001868*np.cos(fractional_year) - \
0.032077*np.sin(fractional_year) - \
0.014615*np.cos(2*fractional_year) - \
0.040849*np.sin(2*fractional_year))
time_offset = equation_time + 4*long + 60*self.arg_UTC
true_solar_time = t + time_offset
delta_true_date_objs = np.array([dt.timedelta(minutes=i)
for i in true_solar_time])
self.true_time = instant_0 + delta_true_date_objs | ['def', 'get_sunpos', '(', 'self', ',', 't', ',', 'true_time', '=', 'False', ')', ':', 'import', 'numpy', 'as', 'np', 'lat', '=', 'self', '.', 'arg_latitude', 'long', '=', 'self', '.', 'arg_longitude', 'alphamin', '=', 'self', '.', 'arg_zenitmin', '# Solar calculations\r', 'day', '=', 'np', '.', 'modf', '(', 't', '/', '1440', ')', '[', '0', ']', 'fractional_year', '=', '2', '*', 'np', '.', 'pi', '/', '(', '365', '*', '24', '*', '60', ')', '*', '(', '-', '24', '*', '60', '+', 't', ')', 'declination', '=', '0.006918', '-', '0.399912', '*', 'np', '.', 'cos', '(', 'fractional_year', ')', '+', '0.070257', '*', 'np', '.', 'sin', '(', 'fractional_year', ')', '-', '0.006758', '*', 'np', '.', 'cos', '(', '2', '*', 'fractional_year', ')', '+', '0.000907', '*', 'np', '.', 'sin', '(', '2', '*', 'fractional_year', ')', '-', '0.002697', '*', 'np', '.', 'cos', '(', '3', '*', 'fractional_year', ')', '+', '0.00148', '*', 'np', '.', 'sin', '(', '3', '*', 'fractional_year', ')', 'hour_angle', '=', 'np', '.', 'tile', '(', 'np', '.', 'arange', '(', '-', 'np', '.', 'pi', ',', 'np', '.', 'pi', ',', '2', '*', 'np', '.', 'pi', '/', '(', '24', '*', '60', ')', ',', 'dtype', '=', "'float'", ')', ',', '365', ')', '[', 't', ']', 'solar_zenit', '=', 'np', '.', 'arcsin', '(', 'np', '.', 'sin', '(', 'lat', ')', '*', 'np', '.', 'sin', '(', 'declination', ')', '+', 'np', '.', 'cos', '(', 'lat', ')', '*', 'np', '.', 'cos', '(', 'declination', ')', '*', 'np', '.', 'cos', '(', 'hour_angle', ')', ')', 'solar_zenit', '[', 'solar_zenit', '<=', '0', '+', 'alphamin', ']', '=', 'np', '.', 'nan', '#### Avoiding numpy warning\r', 'aux', '=', '(', 'np', '.', 'sin', '(', 'solar_zenit', ')', '*', 'np', '.', 'sin', '(', 'lat', ')', '-', 'np', '.', 'sin', '(', 'declination', ')', ')', '/', '(', 'np', '.', 'cos', '(', 'solar_zenit', ')', '*', 'np', '.', 'cos', '(', 'lat', ')', ')', 'not_nan', '=', 'np', '.', 'logical_not', '(', 'np', '.', 'isnan', '(', 'aux', ')', ')', 'aux_1', '=', 'aux', '[', 'not_nan', ']', 'aux_1', '[', 'aux_1', '>=', '1', ']', '=', 'np', '.', 'nan', 'aux', '[', 'not_nan', ']', '=', 'aux_1', '####\r', 'solar_azimuth', '=', 'np', '.', 'arccos', '(', 'aux', ')', 'solar_azimuth', '[', 'day', '==', '0.5', ']', '=', '0', 'solar_azimuth', '[', 'day', '<', '0.5', ']', '*=', '-', '1', 'self', '.', 'azimuth_zenit', '=', 'np', '.', 'vstack', '(', '(', 'solar_azimuth', ',', 'solar_zenit', ')', ')', '.', 'T', '# True time\r', 'if', 'true_time', ':', 'import', 'datetime', 'as', 'dt', 'long', '=', 'np', '.', 'rad2deg', '(', 'long', ')', 'instant_0', '=', 'dt', '.', 'datetime', '(', '1', ',', '1', ',', '1', ',', '0', ',', '0', ',', '0', ')', '# Simulator time \r', '# Real time\r', 'equation_time', '=', '229.18', '*', '(', '0.000075', '+', '0.001868', '*', 'np', '.', 'cos', '(', 'fractional_year', ')', '-', '0.032077', '*', 'np', '.', 'sin', '(', 'fractional_year', ')', '-', '0.014615', '*', 'np', '.', 'cos', '(', '2', '*', 'fractional_year', ')', '-', '0.040849', '*', 'np', '.', 'sin', '(', '2', '*', 'fractional_year', ')', ')', 'time_offset', '=', 'equation_time', '+', '4', '*', 'long', '+', '60', '*', 'self', '.', 'arg_UTC', 'true_solar_time', '=', 't', '+', 'time_offset', 'delta_true_date_objs', '=', 'np', '.', 'array', '(', '[', 'dt', '.', 'timedelta', '(', 'minutes', '=', 'i', ')', 'for', 'i', 'in', 'true_solar_time', ']', ')', 'self', '.', 'true_time', '=', 'instant_0', '+', 'delta_true_date_objs'] | Computes the Sun positions for the *t* time vector.
*t* have to be in absolute minutes (0 at 00:00 01 Jan). The and
in Sun positions calculated are in solar time, that is, maximun
solar zenit exactly at midday.
The generated information is stored in:
* **.azimuth_zenit** (*ndarray*)
* **.true_time** (*datetime*): local time
:param t: Absolute minutes vector.
:type t: ndarray (dtype=int)
:param true_time: If True, a datetime vector with the true local
time will be stored at ``.true_time``
:type true_time: bool
:returns: Equivalent times in absolute minutes in year.
:rtype: ndarray (dtype=int)
:returns: None
.. seealso:: :func:`to_minutes` to easily genetare valid input
t. | ['Computes', 'the', 'Sun', 'positions', 'for', 'the', '*', 't', '*', 'time', 'vector', '.', '*', 't', '*', 'have', 'to', 'be', 'in', 'absolute', 'minutes', '(', '0', 'at', '00', ':', '00', '01', 'Jan', ')', '.', 'The', 'and', 'in', 'Sun', 'positions', 'calculated', 'are', 'in', 'solar', 'time', 'that', 'is', 'maximun', 'solar', 'zenit', 'exactly', 'at', 'midday', '.', 'The', 'generated', 'information', 'is', 'stored', 'in', ':', '*', '**', '.', 'azimuth_zenit', '**', '(', '*', 'ndarray', '*', ')', '*', '**', '.', 'true_time', '**', '(', '*', 'datetime', '*', ')', ':', 'local', 'time', ':', 'param', 't', ':', 'Absolute', 'minutes', 'vector', '.', ':', 'type', 't', ':', 'ndarray', '(', 'dtype', '=', 'int', ')', ':', 'param', 'true_time', ':', 'If', 'True', 'a', 'datetime', 'vector', 'with', 'the', 'true', 'local', 'time', 'will', 'be', 'stored', 'at', '.', 'true_time', ':', 'type', 'true_time', ':', 'bool', ':', 'returns', ':', 'Equivalent', 'times', 'in', 'absolute', 'minutes', 'in', 'year', '.', ':', 'rtype', ':', 'ndarray', '(', 'dtype', '=', 'int', ')', ':', 'returns', ':', 'None', '..', 'seealso', '::', ':', 'func', ':', 'to_minutes', 'to', 'easily', 'genetare', 'valid', 'input', 't', '.'] | train | https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/shadows.py#L257-L332 |
4,972 | chemlab/chemlab | chemlab/graphics/transformations.py | quaternion_inverse | def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q) | python | def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q) | ['def', 'quaternion_inverse', '(', 'quaternion', ')', ':', 'q', '=', 'numpy', '.', 'array', '(', 'quaternion', ',', 'dtype', '=', 'numpy', '.', 'float64', ',', 'copy', '=', 'True', ')', 'numpy', '.', 'negative', '(', 'q', '[', '1', ':', ']', ',', 'q', '[', '1', ':', ']', ')', 'return', 'q', '/', 'numpy', '.', 'dot', '(', 'q', ',', 'q', ')'] | Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True | ['Return', 'inverse', 'of', 'quaternion', '.'] | train | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/graphics/transformations.py#L1429-L1440 |
4,973 | thanethomson/statik | statik/utils.py | copy_file_if_modified | def copy_file_if_modified(src_path, dest_path):
"""Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets."""
# if the destination path is a directory, delete it completely - we assume here we are
# writing a file to the filesystem
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
must_copy = False
if not os.path.exists(dest_path):
must_copy = True
else:
src_stat = os.stat(src_path)
dest_stat = os.stat(dest_path)
# if the size or last modified timestamp are different
if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or
(src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])):
must_copy = True
if must_copy:
shutil.copy2(src_path, dest_path) | python | def copy_file_if_modified(src_path, dest_path):
"""Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets."""
# if the destination path is a directory, delete it completely - we assume here we are
# writing a file to the filesystem
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
must_copy = False
if not os.path.exists(dest_path):
must_copy = True
else:
src_stat = os.stat(src_path)
dest_stat = os.stat(dest_path)
# if the size or last modified timestamp are different
if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or
(src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])):
must_copy = True
if must_copy:
shutil.copy2(src_path, dest_path) | ['def', 'copy_file_if_modified', '(', 'src_path', ',', 'dest_path', ')', ':', '# if the destination path is a directory, delete it completely - we assume here we are', '# writing a file to the filesystem', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'dest_path', ')', ':', 'shutil', '.', 'rmtree', '(', 'dest_path', ')', 'must_copy', '=', 'False', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'dest_path', ')', ':', 'must_copy', '=', 'True', 'else', ':', 'src_stat', '=', 'os', '.', 'stat', '(', 'src_path', ')', 'dest_stat', '=', 'os', '.', 'stat', '(', 'dest_path', ')', '# if the size or last modified timestamp are different', 'if', '(', '(', 'src_stat', '[', 'stat', '.', 'ST_SIZE', ']', '!=', 'dest_stat', '[', 'stat', '.', 'ST_SIZE', ']', ')', 'or', '(', 'src_stat', '[', 'stat', '.', 'ST_MTIME', ']', '!=', 'dest_stat', '[', 'stat', '.', 'ST_MTIME', ']', ')', ')', ':', 'must_copy', '=', 'True', 'if', 'must_copy', ':', 'shutil', '.', 'copy2', '(', 'src_path', ',', 'dest_path', ')'] | Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets. | ['Only', 'copies', 'the', 'file', 'from', 'the', 'source', 'path', 'to', 'the', 'destination', 'path', 'if', 'it', 'doesn', 't', 'exist', 'yet', 'or', 'it', 'has', 'been', 'modified', '.', 'Intended', 'to', 'provide', 'something', 'of', 'an', 'optimisation', 'when', 'a', 'project', 'has', 'large', 'trees', 'of', 'assets', '.'] | train | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L141-L163 |
4,974 | SylvanasSun/FishFishJump | fish_dashboard/scrapyd/scrapyd_service.py | get_job_amounts | def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list)
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts | python | def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list)
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts | ['def', 'get_job_amounts', '(', 'agent', ',', 'project_name', ',', 'spider_name', '=', 'None', ')', ':', 'job_list', '=', 'agent', '.', 'get_job_list', '(', 'project_name', ')', 'pending_job_list', '=', 'job_list', '[', "'pending'", ']', 'running_job_list', '=', 'job_list', '[', "'running'", ']', 'finished_job_list', '=', 'job_list', '[', "'finished'", ']', 'job_amounts', '=', '{', '}', 'if', 'spider_name', 'is', 'None', ':', 'job_amounts', '[', "'pending'", ']', '=', 'len', '(', 'pending_job_list', ')', 'job_amounts', '[', "'running'", ']', '=', 'len', '(', 'running_job_list', ')', 'job_amounts', '[', "'finished'", ']', '=', 'len', '(', 'finished_job_list', ')', 'else', ':', 'job_amounts', '[', "'pending'", ']', '=', 'len', '(', '[', 'j', 'for', 'j', 'in', 'pending_job_list', 'if', 'j', '[', "'spider'", ']', '==', 'spider_name', ']', ')', 'job_amounts', '[', "'running'", ']', '=', 'len', '(', '[', 'j', 'for', 'j', 'in', 'running_job_list', 'if', 'j', '[', "'spider'", ']', '==', 'spider_name', ']', ')', 'job_amounts', '[', "'finished'", ']', '=', 'len', '(', '[', 'j', 'for', 'j', 'in', 'finished_job_list', 'if', 'j', '[', "'spider'", ']', '==', 'spider_name', ']', ')', 'return', 'job_amounts'] | Get amounts that pending job amount, running job amount, finished job amount. | ['Get', 'amounts', 'that', 'pending', 'job', 'amount', 'running', 'job', 'amount', 'finished', 'job', 'amount', '.'] | train | https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_service.py#L212-L230 |
4,975 | DLR-RM/RAFCON | source/rafcon/gui/controllers/debug_console.py | DebugConsoleController.on_config_value_changed | def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1]
if "LOGGING" in config_key:
self.update_log_button_state() | python | def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1]
if "LOGGING" in config_key:
self.update_log_button_state() | ['def', 'on_config_value_changed', '(', 'self', ',', 'config_m', ',', 'prop_name', ',', 'info', ')', ':', 'config_key', '=', 'info', '[', "'args'", ']', '[', '1', ']', 'if', '"LOGGING"', 'in', 'config_key', ':', 'self', '.', 'update_log_button_state', '(', ')'] | Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key | ['Callback', 'when', 'a', 'config', 'value', 'has', 'been', 'changed'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/debug_console.py#L69-L78 |
4,976 | wolfhong/formic | formic/formic.py | Pattern._to_string | def _to_string(self):
"""Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3"""
if self.sections:
start = "/" if self.bound_start else "**/"
sections = "/**/".join(str(section) for section in self.sections)
end = "" if self.bound_end else "/**"
else:
start = ""
sections = ""
end = "" if self.bound_end else "**"
return "{0}{1}{2}/{3}".format(start, sections, end, str(self.file_pattern)) | python | def _to_string(self):
"""Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3"""
if self.sections:
start = "/" if self.bound_start else "**/"
sections = "/**/".join(str(section) for section in self.sections)
end = "" if self.bound_end else "/**"
else:
start = ""
sections = ""
end = "" if self.bound_end else "**"
return "{0}{1}{2}/{3}".format(start, sections, end, str(self.file_pattern)) | ['def', '_to_string', '(', 'self', ')', ':', 'if', 'self', '.', 'sections', ':', 'start', '=', '"/"', 'if', 'self', '.', 'bound_start', 'else', '"**/"', 'sections', '=', '"/**/"', '.', 'join', '(', 'str', '(', 'section', ')', 'for', 'section', 'in', 'self', '.', 'sections', ')', 'end', '=', '""', 'if', 'self', '.', 'bound_end', 'else', '"/**"', 'else', ':', 'start', '=', '""', 'sections', '=', '""', 'end', '=', '""', 'if', 'self', '.', 'bound_end', 'else', '"**"', 'return', '"{0}{1}{2}/{3}"', '.', 'format', '(', 'start', ',', 'sections', ',', 'end', ',', 'str', '(', 'self', '.', 'file_pattern', ')', ')'] | Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3 | ['Implemented', 'a', 'function', 'for', '__str__', 'and', '__repr__', 'to', 'use', 'but', 'which', 'prevents', 'infinite', 'recursion', 'when', 'migrating', 'to', 'Python', '3'] | train | https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/formic.py#L579-L590 |
4,977 | 9b/frisbee | frisbee/__init__.py | Frisbee._dyn_loader | def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs) | python | def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs) | ['def', '_dyn_loader', '(', 'self', ',', 'module', ':', 'str', ',', 'kwargs', ':', 'str', ')', ':', 'package_directory', ':', 'str', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'abspath', '(', '__file__', ')', ')', 'modules', ':', 'str', '=', 'package_directory', '+', '"/modules"', 'module', '=', 'module', '+', '".py"', 'if', 'module', 'not', 'in', 'os', '.', 'listdir', '(', 'modules', ')', ':', 'raise', 'Exception', '(', '"Module %s is not valid"', '%', 'module', ')', 'module_name', ':', 'str', '=', 'module', '[', ':', '-', '3', ']', 'import_path', ':', 'str', '=', '"%s.%s"', '%', '(', 'self', '.', 'MODULE_PATH', ',', 'module_name', ')', 'imported', '=', 'import_module', '(', 'import_path', ')', 'obj', '=', 'getattr', '(', 'imported', ',', "'Module'", ')', 'return', 'obj', '(', '*', '*', 'kwargs', ')'] | Dynamically load a specific module instance. | ['Dynamically', 'load', 'a', 'specific', 'module', 'instance', '.'] | train | https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L64-L75 |
4,978 | DreamLab/VmShepherd | src/vmshepherd/iaas/openstack_driver.py | OpenStackDriver._extract_ips | def _extract_ips(self, data):
'''
Extract ip addressess from openstack structure
{
'pl-krk-2-int-301-c2-int-1': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb',
'version': 4,
'addr': '10.185.138.36',
'OS-EXT-IPS:type': 'fixed'
}
]
}
:arg data: dict
:returns list
'''
result = []
for region in data.items():
for interface in region[1]:
result.append(interface['addr'])
return result | python | def _extract_ips(self, data):
'''
Extract ip addressess from openstack structure
{
'pl-krk-2-int-301-c2-int-1': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb',
'version': 4,
'addr': '10.185.138.36',
'OS-EXT-IPS:type': 'fixed'
}
]
}
:arg data: dict
:returns list
'''
result = []
for region in data.items():
for interface in region[1]:
result.append(interface['addr'])
return result | ['def', '_extract_ips', '(', 'self', ',', 'data', ')', ':', 'result', '=', '[', ']', 'for', 'region', 'in', 'data', '.', 'items', '(', ')', ':', 'for', 'interface', 'in', 'region', '[', '1', ']', ':', 'result', '.', 'append', '(', 'interface', '[', "'addr'", ']', ')', 'return', 'result'] | Extract ip addressess from openstack structure
{
'pl-krk-2-int-301-c2-int-1': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb',
'version': 4,
'addr': '10.185.138.36',
'OS-EXT-IPS:type': 'fixed'
}
]
}
:arg data: dict
:returns list | ['Extract', 'ip', 'addressess', 'from', 'openstack', 'structure', '{', 'pl', '-', 'krk', '-', '2', '-', 'int', '-', '301', '-', 'c2', '-', 'int', '-', '1', ':', '[', '{', 'OS', '-', 'EXT', '-', 'IPS', '-', 'MAC', ':', 'mac_addr', ':', 'fa', ':', '16', ':', '3e', ':', '29', ':', 'f1', ':', 'bb', 'version', ':', '4', 'addr', ':', '10', '.', '185', '.', '138', '.', '36', 'OS', '-', 'EXT', '-', 'IPS', ':', 'type', ':', 'fixed', '}', ']', '}', ':', 'arg', 'data', ':', 'dict', ':', 'returns', 'list'] | train | https://github.com/DreamLab/VmShepherd/blob/709a412c372b897d53808039c5c64a8b69c12c8d/src/vmshepherd/iaas/openstack_driver.py#L231-L251 |
4,979 | kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPC.notification_message | def notification_message(cls, item):
'''Convert an RPCRequest item to a message.'''
assert isinstance(item, Notification)
return cls.encode_payload(cls.request_payload(item, None)) | python | def notification_message(cls, item):
'''Convert an RPCRequest item to a message.'''
assert isinstance(item, Notification)
return cls.encode_payload(cls.request_payload(item, None)) | ['def', 'notification_message', '(', 'cls', ',', 'item', ')', ':', 'assert', 'isinstance', '(', 'item', ',', 'Notification', ')', 'return', 'cls', '.', 'encode_payload', '(', 'cls', '.', 'request_payload', '(', 'item', ',', 'None', ')', ')'] | Convert an RPCRequest item to a message. | ['Convert', 'an', 'RPCRequest', 'item', 'to', 'a', 'message', '.'] | train | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L290-L293 |
4,980 | Crunch-io/crunch-cube | src/cr/cube/distributions/wishart.py | WishartCDF.K | def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= (
np.float_power(2, 0.5 * self.n_min * self._n_max)
* self._mgamma(0.5 * self._n_max, self.n_min)
* self._mgamma(0.5 * self.n_min, self.n_min)
)
K2 = np.float_power(
2, self.alpha * self.size + 0.5 * self.size * (self.size + 1)
)
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1)
return K1 * K2 | python | def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= (
np.float_power(2, 0.5 * self.n_min * self._n_max)
* self._mgamma(0.5 * self._n_max, self.n_min)
* self._mgamma(0.5 * self.n_min, self.n_min)
)
K2 = np.float_power(
2, self.alpha * self.size + 0.5 * self.size * (self.size + 1)
)
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1)
return K1 * K2 | ['def', 'K', '(', 'self', ')', ':', 'K1', '=', 'np', '.', 'float_power', '(', 'pi', ',', '0.5', '*', 'self', '.', 'n_min', '*', 'self', '.', 'n_min', ')', 'K1', '/=', '(', 'np', '.', 'float_power', '(', '2', ',', '0.5', '*', 'self', '.', 'n_min', '*', 'self', '.', '_n_max', ')', '*', 'self', '.', '_mgamma', '(', '0.5', '*', 'self', '.', '_n_max', ',', 'self', '.', 'n_min', ')', '*', 'self', '.', '_mgamma', '(', '0.5', '*', 'self', '.', 'n_min', ',', 'self', '.', 'n_min', ')', ')', 'K2', '=', 'np', '.', 'float_power', '(', '2', ',', 'self', '.', 'alpha', '*', 'self', '.', 'size', '+', '0.5', '*', 'self', '.', 'size', '*', '(', 'self', '.', 'size', '+', '1', ')', ')', 'for', 'i', 'in', 'xrange', '(', 'self', '.', 'size', ')', ':', 'K2', '*=', 'gamma', '(', 'self', '.', 'alpha', '+', 'i', '+', '1', ')', 'return', 'K1', '*', 'K2'] | Normalizing constant for wishart CDF. | ['Normalizing', 'constant', 'for', 'wishart', 'CDF', '.'] | train | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/distributions/wishart.py#L89-L103 |
4,981 | zblz/naima | naima/plot.py | plot_samples | def plot_samples(
ax,
sampler,
modelidx=0,
sed=True,
n_samples=100,
e_unit=u.eV,
e_range=None,
e_npoints=100,
threads=None,
label=None,
last_step=False,
):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
modelx, model = _read_or_calc_samples(
sampler,
modelidx,
last_step=last_step,
e_range=e_range,
e_npoints=e_npoints,
threads=threads,
)
# pick first model sample for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
sample_alpha = min(5.0 / n_samples, 0.5)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.loglog(
modelx.to(e_unit).value,
(my * sedf).to(f_unit).value,
color=(0.1,) * 3,
alpha=sample_alpha,
lw=1.0,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
) | python | def plot_samples(
ax,
sampler,
modelidx=0,
sed=True,
n_samples=100,
e_unit=u.eV,
e_range=None,
e_npoints=100,
threads=None,
label=None,
last_step=False,
):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
modelx, model = _read_or_calc_samples(
sampler,
modelidx,
last_step=last_step,
e_range=e_range,
e_npoints=e_npoints,
threads=threads,
)
# pick first model sample for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
sample_alpha = min(5.0 / n_samples, 0.5)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.loglog(
modelx.to(e_unit).value,
(my * sedf).to(f_unit).value,
color=(0.1,) * 3,
alpha=sample_alpha,
lw=1.0,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
) | ['def', 'plot_samples', '(', 'ax', ',', 'sampler', ',', 'modelidx', '=', '0', ',', 'sed', '=', 'True', ',', 'n_samples', '=', '100', ',', 'e_unit', '=', 'u', '.', 'eV', ',', 'e_range', '=', 'None', ',', 'e_npoints', '=', '100', ',', 'threads', '=', 'None', ',', 'label', '=', 'None', ',', 'last_step', '=', 'False', ',', ')', ':', 'modelx', ',', 'model', '=', '_read_or_calc_samples', '(', 'sampler', ',', 'modelidx', ',', 'last_step', '=', 'last_step', ',', 'e_range', '=', 'e_range', ',', 'e_npoints', '=', 'e_npoints', ',', 'threads', '=', 'threads', ',', ')', '# pick first model sample for units', 'f_unit', ',', 'sedf', '=', 'sed_conversion', '(', 'modelx', ',', 'model', '[', '0', ']', '.', 'unit', ',', 'sed', ')', 'sample_alpha', '=', 'min', '(', '5.0', '/', 'n_samples', ',', '0.5', ')', 'for', 'my', 'in', 'model', '[', 'np', '.', 'random', '.', 'randint', '(', 'len', '(', 'model', ')', ',', 'size', '=', 'n_samples', ')', ']', ':', 'ax', '.', 'loglog', '(', 'modelx', '.', 'to', '(', 'e_unit', ')', '.', 'value', ',', '(', 'my', '*', 'sedf', ')', '.', 'to', '(', 'f_unit', ')', '.', 'value', ',', 'color', '=', '(', '0.1', ',', ')', '*', '3', ',', 'alpha', '=', 'sample_alpha', ',', 'lw', '=', '1.0', ',', ')', '_plot_MLmodel', '(', 'ax', ',', 'sampler', ',', 'modelidx', ',', 'e_range', ',', 'e_npoints', ',', 'e_unit', ',', 'sed', ')', 'if', 'label', 'is', 'not', 'None', ':', 'ax', '.', 'set_ylabel', '(', '"{0} [{1}]"', '.', 'format', '(', 'label', ',', 'f_unit', '.', 'to_string', '(', '"latex_inline"', ')', ')', ')'] | Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False). | ['Plot', 'a', 'number', 'of', 'samples', 'from', 'the', 'sampler', 'chain', '.'] | train | https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L628-L700 |
4,982 | CTPUG/wafer | wafer/schedule/admin.py | find_clashes | def find_clashes(all_items):
"""Find schedule items which clash (common slot and venue)"""
clashes = {}
seen_venue_slots = {}
for item in all_items:
for slot in item.slots.all():
pos = (item.venue, slot)
if pos in seen_venue_slots:
if seen_venue_slots[pos] not in clashes:
clashes[pos] = [seen_venue_slots[pos]]
clashes[pos].append(item)
else:
seen_venue_slots[pos] = item
# We return a list, to match other validators
return clashes.items() | python | def find_clashes(all_items):
"""Find schedule items which clash (common slot and venue)"""
clashes = {}
seen_venue_slots = {}
for item in all_items:
for slot in item.slots.all():
pos = (item.venue, slot)
if pos in seen_venue_slots:
if seen_venue_slots[pos] not in clashes:
clashes[pos] = [seen_venue_slots[pos]]
clashes[pos].append(item)
else:
seen_venue_slots[pos] = item
# We return a list, to match other validators
return clashes.items() | ['def', 'find_clashes', '(', 'all_items', ')', ':', 'clashes', '=', '{', '}', 'seen_venue_slots', '=', '{', '}', 'for', 'item', 'in', 'all_items', ':', 'for', 'slot', 'in', 'item', '.', 'slots', '.', 'all', '(', ')', ':', 'pos', '=', '(', 'item', '.', 'venue', ',', 'slot', ')', 'if', 'pos', 'in', 'seen_venue_slots', ':', 'if', 'seen_venue_slots', '[', 'pos', ']', 'not', 'in', 'clashes', ':', 'clashes', '[', 'pos', ']', '=', '[', 'seen_venue_slots', '[', 'pos', ']', ']', 'clashes', '[', 'pos', ']', '.', 'append', '(', 'item', ')', 'else', ':', 'seen_venue_slots', '[', 'pos', ']', '=', 'item', '# We return a list, to match other validators', 'return', 'clashes', '.', 'items', '(', ')'] | Find schedule items which clash (common slot and venue) | ['Find', 'schedule', 'items', 'which', 'clash', '(', 'common', 'slot', 'and', 'venue', ')'] | train | https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/schedule/admin.py#L106-L120 |
4,983 | angr/angr | angr/analyses/vfg.py | VFG._save_function_final_state | def _save_function_final_state(self, function_key, function_address, state):
"""
Save the final state of a function, and merge it with existing ones if there are any.
:param FunctionKey function_key: The key to this function.
:param int function_address: Address of the function.
:param SimState state: Initial state of the function.
:return: None
"""
l.debug('Saving the final state for function %#08x with function key %s',
function_address,
function_key
)
if function_key in self._function_final_states[function_address]:
existing_state = self._function_final_states[function_address][function_key]
merged_state = existing_state.merge(state, plugin_whitelist=self._mergeable_plugins)[0]
self._function_final_states[function_address][function_key] = merged_state
else:
self._function_final_states[function_address][function_key] = state | python | def _save_function_final_state(self, function_key, function_address, state):
"""
Save the final state of a function, and merge it with existing ones if there are any.
:param FunctionKey function_key: The key to this function.
:param int function_address: Address of the function.
:param SimState state: Initial state of the function.
:return: None
"""
l.debug('Saving the final state for function %#08x with function key %s',
function_address,
function_key
)
if function_key in self._function_final_states[function_address]:
existing_state = self._function_final_states[function_address][function_key]
merged_state = existing_state.merge(state, plugin_whitelist=self._mergeable_plugins)[0]
self._function_final_states[function_address][function_key] = merged_state
else:
self._function_final_states[function_address][function_key] = state | ['def', '_save_function_final_state', '(', 'self', ',', 'function_key', ',', 'function_address', ',', 'state', ')', ':', 'l', '.', 'debug', '(', "'Saving the final state for function %#08x with function key %s'", ',', 'function_address', ',', 'function_key', ')', 'if', 'function_key', 'in', 'self', '.', '_function_final_states', '[', 'function_address', ']', ':', 'existing_state', '=', 'self', '.', '_function_final_states', '[', 'function_address', ']', '[', 'function_key', ']', 'merged_state', '=', 'existing_state', '.', 'merge', '(', 'state', ',', 'plugin_whitelist', '=', 'self', '.', '_mergeable_plugins', ')', '[', '0', ']', 'self', '.', '_function_final_states', '[', 'function_address', ']', '[', 'function_key', ']', '=', 'merged_state', 'else', ':', 'self', '.', '_function_final_states', '[', 'function_address', ']', '[', 'function_key', ']', '=', 'state'] | Save the final state of a function, and merge it with existing ones if there are any.
:param FunctionKey function_key: The key to this function.
:param int function_address: Address of the function.
:param SimState state: Initial state of the function.
:return: None | ['Save', 'the', 'final', 'state', 'of', 'a', 'function', 'and', 'merge', 'it', 'with', 'existing', 'ones', 'if', 'there', 'are', 'any', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L1682-L1703 |
4,984 | bitesofcode/projexui | projexui/widgets/xviewwidget/xview.py | XView.setMaximumSize | def setMaximumSize(self, *args):
"""
Sets the maximum size value to the inputed size and emits the \
sizeConstraintChanged signal.
:param *args | <tuple>
"""
super(XView, self).setMaximumSize(*args)
if ( not self.signalsBlocked() ):
self.sizeConstraintChanged.emit() | python | def setMaximumSize(self, *args):
"""
Sets the maximum size value to the inputed size and emits the \
sizeConstraintChanged signal.
:param *args | <tuple>
"""
super(XView, self).setMaximumSize(*args)
if ( not self.signalsBlocked() ):
self.sizeConstraintChanged.emit() | ['def', 'setMaximumSize', '(', 'self', ',', '*', 'args', ')', ':', 'super', '(', 'XView', ',', 'self', ')', '.', 'setMaximumSize', '(', '*', 'args', ')', 'if', '(', 'not', 'self', '.', 'signalsBlocked', '(', ')', ')', ':', 'self', '.', 'sizeConstraintChanged', '.', 'emit', '(', ')'] | Sets the maximum size value to the inputed size and emits the \
sizeConstraintChanged signal.
:param *args | <tuple> | ['Sets', 'the', 'maximum', 'size', 'value', 'to', 'the', 'inputed', 'size', 'and', 'emits', 'the', '\\', 'sizeConstraintChanged', 'signal', '.', ':', 'param', '*', 'args', '|', '<tuple', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xview.py#L480-L490 |
4,985 | lk-geimfari/mimesis | mimesis/providers/person.py | Person.social_media_profile | def social_media_profile(self,
site: Optional[SocialNetwork] = None) -> str:
"""Generate profile for random social network.
:return: Profile in some network.
:Example:
http://facebook.com/some_user
"""
key = self._validate_enum(site, SocialNetwork)
website = SOCIAL_NETWORKS[key]
url = 'https://www.' + website
return url.format(self.username()) | python | def social_media_profile(self,
site: Optional[SocialNetwork] = None) -> str:
"""Generate profile for random social network.
:return: Profile in some network.
:Example:
http://facebook.com/some_user
"""
key = self._validate_enum(site, SocialNetwork)
website = SOCIAL_NETWORKS[key]
url = 'https://www.' + website
return url.format(self.username()) | ['def', 'social_media_profile', '(', 'self', ',', 'site', ':', 'Optional', '[', 'SocialNetwork', ']', '=', 'None', ')', '->', 'str', ':', 'key', '=', 'self', '.', '_validate_enum', '(', 'site', ',', 'SocialNetwork', ')', 'website', '=', 'SOCIAL_NETWORKS', '[', 'key', ']', 'url', '=', "'https://www.'", '+', 'website', 'return', 'url', '.', 'format', '(', 'self', '.', 'username', '(', ')', ')'] | Generate profile for random social network.
:return: Profile in some network.
:Example:
http://facebook.com/some_user | ['Generate', 'profile', 'for', 'random', 'social', 'network', '.'] | train | https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/person.py#L253-L265 |
4,986 | riverrun/drat | drat/analysis.py | Checktext.pre_check | def pre_check(self, data):
"""Count chars, words and sentences in the text."""
sentences = len(re.findall('[\.!?]+\W+', data)) or 1
chars = len(data) - len(re.findall('[^a-zA-Z0-9]', data))
num_words = len(re.findall('\s+', data))
data = re.split('[^a-zA-Z]+', data)
return data, sentences, chars, num_words | python | def pre_check(self, data):
"""Count chars, words and sentences in the text."""
sentences = len(re.findall('[\.!?]+\W+', data)) or 1
chars = len(data) - len(re.findall('[^a-zA-Z0-9]', data))
num_words = len(re.findall('\s+', data))
data = re.split('[^a-zA-Z]+', data)
return data, sentences, chars, num_words | ['def', 'pre_check', '(', 'self', ',', 'data', ')', ':', 'sentences', '=', 'len', '(', 're', '.', 'findall', '(', "'[\\.!?]+\\W+'", ',', 'data', ')', ')', 'or', '1', 'chars', '=', 'len', '(', 'data', ')', '-', 'len', '(', 're', '.', 'findall', '(', "'[^a-zA-Z0-9]'", ',', 'data', ')', ')', 'num_words', '=', 'len', '(', 're', '.', 'findall', '(', "'\\s+'", ',', 'data', ')', ')', 'data', '=', 're', '.', 'split', '(', "'[^a-zA-Z]+'", ',', 'data', ')', 'return', 'data', ',', 'sentences', ',', 'chars', ',', 'num_words'] | Count chars, words and sentences in the text. | ['Count', 'chars', 'words', 'and', 'sentences', 'in', 'the', 'text', '.'] | train | https://github.com/riverrun/drat/blob/50cbbf69c022b6ca6641cd55386813b0695c21f5/drat/analysis.py#L52-L58 |
4,987 | ValvePython/steam | steam/client/builtins/gameservers.py | SteamGameServers.get_ips_from_steamids | def get_ips_from_steamids(self, server_steam_ids, timeout=30):
"""Resolve IPs from SteamIDs
:param server_steam_ids: a list of steamids
:type server_steam_ids: list
:param timeout: (optional) timeout for request in seconds
:type timeout: int
:return: map of ips to steamids
:rtype: dict
:raises: :class:`.UnifiedMessageError`
Sample response:
.. code:: python
{SteamID(id=123456, type='AnonGameServer', universe='Public', instance=1234): '1.2.3.4:27060'}
"""
resp, error = self._um.send_and_wait("GameServers.GetServerIPsBySteamID#1",
{"server_steamids": server_steam_ids},
timeout=timeout,
)
if error:
raise error
if resp is None:
return None
return {SteamID(server.steamid): server.addr for server in resp.servers} | python | def get_ips_from_steamids(self, server_steam_ids, timeout=30):
"""Resolve IPs from SteamIDs
:param server_steam_ids: a list of steamids
:type server_steam_ids: list
:param timeout: (optional) timeout for request in seconds
:type timeout: int
:return: map of ips to steamids
:rtype: dict
:raises: :class:`.UnifiedMessageError`
Sample response:
.. code:: python
{SteamID(id=123456, type='AnonGameServer', universe='Public', instance=1234): '1.2.3.4:27060'}
"""
resp, error = self._um.send_and_wait("GameServers.GetServerIPsBySteamID#1",
{"server_steamids": server_steam_ids},
timeout=timeout,
)
if error:
raise error
if resp is None:
return None
return {SteamID(server.steamid): server.addr for server in resp.servers} | ['def', 'get_ips_from_steamids', '(', 'self', ',', 'server_steam_ids', ',', 'timeout', '=', '30', ')', ':', 'resp', ',', 'error', '=', 'self', '.', '_um', '.', 'send_and_wait', '(', '"GameServers.GetServerIPsBySteamID#1"', ',', '{', '"server_steamids"', ':', 'server_steam_ids', '}', ',', 'timeout', '=', 'timeout', ',', ')', 'if', 'error', ':', 'raise', 'error', 'if', 'resp', 'is', 'None', ':', 'return', 'None', 'return', '{', 'SteamID', '(', 'server', '.', 'steamid', ')', ':', 'server', '.', 'addr', 'for', 'server', 'in', 'resp', '.', 'servers', '}'] | Resolve IPs from SteamIDs
:param server_steam_ids: a list of steamids
:type server_steam_ids: list
:param timeout: (optional) timeout for request in seconds
:type timeout: int
:return: map of ips to steamids
:rtype: dict
:raises: :class:`.UnifiedMessageError`
Sample response:
.. code:: python
{SteamID(id=123456, type='AnonGameServer', universe='Public', instance=1234): '1.2.3.4:27060'} | ['Resolve', 'IPs', 'from', 'SteamIDs'] | train | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/builtins/gameservers.py#L169-L195 |
4,988 | secure-systems-lab/securesystemslib | securesystemslib/pyca_crypto_keys.py | _encrypt | def _encrypt(key_data, derived_key_information):
"""
Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails.
"""
# Generate a random Initialization Vector (IV). Follow the provably secure
# encrypt-then-MAC approach, which affords the ability to verify ciphertext
# without needing to decrypt it and preventing an attacker from feeding the
# block cipher malicious data. Modes like GCM provide both encryption and
# authentication, whereas CTR only provides encryption.
# Generate a random 128-bit IV. Random bits of data is needed for salts and
# initialization vectors suitable for the encryption algorithms used in
# 'pyca_crypto_keys.py'.
iv = os.urandom(16)
# Construct an AES-CTR Cipher object with the given key and a randomly
# generated IV.
symmetric_key = derived_key_information['derived_key']
encryptor = Cipher(algorithms.AES(symmetric_key), modes.CTR(iv),
backend=default_backend()).encryptor()
# Encrypt the plaintext and get the associated ciphertext.
# Do we need to check for any exceptions?
ciphertext = encryptor.update(key_data.encode('utf-8')) + encryptor.finalize()
# Generate the hmac of the ciphertext to ensure it has not been modified.
# The decryption routine may verify a ciphertext without having to perform
# a decryption operation.
symmetric_key = derived_key_information['derived_key']
salt = derived_key_information['salt']
hmac_object = \
cryptography.hazmat.primitives.hmac.HMAC(symmetric_key, hashes.SHA256(),
backend=default_backend())
hmac_object.update(ciphertext)
hmac_value = binascii.hexlify(hmac_object.finalize())
# Store the number of PBKDF2 iterations used to derive the symmetric key so
# that the decryption routine can regenerate the symmetric key successfully.
# The PBKDF2 iterations are allowed to vary for the keys loaded and saved.
iterations = derived_key_information['iterations']
# Return the salt, iterations, hmac, initialization vector, and ciphertext
# as a single string. These five values are delimited by
# '_ENCRYPTION_DELIMITER' to make extraction easier. This delimiter is
# arbitrarily chosen and should not occur in the hexadecimal representations
# of the fields it is separating.
return binascii.hexlify(salt).decode() + _ENCRYPTION_DELIMITER + \
str(iterations) + _ENCRYPTION_DELIMITER + \
hmac_value.decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(iv).decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(ciphertext).decode() | python | def _encrypt(key_data, derived_key_information):
"""
Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails.
"""
# Generate a random Initialization Vector (IV). Follow the provably secure
# encrypt-then-MAC approach, which affords the ability to verify ciphertext
# without needing to decrypt it and preventing an attacker from feeding the
# block cipher malicious data. Modes like GCM provide both encryption and
# authentication, whereas CTR only provides encryption.
# Generate a random 128-bit IV. Random bits of data is needed for salts and
# initialization vectors suitable for the encryption algorithms used in
# 'pyca_crypto_keys.py'.
iv = os.urandom(16)
# Construct an AES-CTR Cipher object with the given key and a randomly
# generated IV.
symmetric_key = derived_key_information['derived_key']
encryptor = Cipher(algorithms.AES(symmetric_key), modes.CTR(iv),
backend=default_backend()).encryptor()
# Encrypt the plaintext and get the associated ciphertext.
# Do we need to check for any exceptions?
ciphertext = encryptor.update(key_data.encode('utf-8')) + encryptor.finalize()
# Generate the hmac of the ciphertext to ensure it has not been modified.
# The decryption routine may verify a ciphertext without having to perform
# a decryption operation.
symmetric_key = derived_key_information['derived_key']
salt = derived_key_information['salt']
hmac_object = \
cryptography.hazmat.primitives.hmac.HMAC(symmetric_key, hashes.SHA256(),
backend=default_backend())
hmac_object.update(ciphertext)
hmac_value = binascii.hexlify(hmac_object.finalize())
# Store the number of PBKDF2 iterations used to derive the symmetric key so
# that the decryption routine can regenerate the symmetric key successfully.
# The PBKDF2 iterations are allowed to vary for the keys loaded and saved.
iterations = derived_key_information['iterations']
# Return the salt, iterations, hmac, initialization vector, and ciphertext
# as a single string. These five values are delimited by
# '_ENCRYPTION_DELIMITER' to make extraction easier. This delimiter is
# arbitrarily chosen and should not occur in the hexadecimal representations
# of the fields it is separating.
return binascii.hexlify(salt).decode() + _ENCRYPTION_DELIMITER + \
str(iterations) + _ENCRYPTION_DELIMITER + \
hmac_value.decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(iv).decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(ciphertext).decode() | ['def', '_encrypt', '(', 'key_data', ',', 'derived_key_information', ')', ':', '# Generate a random Initialization Vector (IV). Follow the provably secure', '# encrypt-then-MAC approach, which affords the ability to verify ciphertext', '# without needing to decrypt it and preventing an attacker from feeding the', '# block cipher malicious data. Modes like GCM provide both encryption and', '# authentication, whereas CTR only provides encryption.', '# Generate a random 128-bit IV. Random bits of data is needed for salts and', '# initialization vectors suitable for the encryption algorithms used in', "# 'pyca_crypto_keys.py'.", 'iv', '=', 'os', '.', 'urandom', '(', '16', ')', '# Construct an AES-CTR Cipher object with the given key and a randomly', '# generated IV.', 'symmetric_key', '=', 'derived_key_information', '[', "'derived_key'", ']', 'encryptor', '=', 'Cipher', '(', 'algorithms', '.', 'AES', '(', 'symmetric_key', ')', ',', 'modes', '.', 'CTR', '(', 'iv', ')', ',', 'backend', '=', 'default_backend', '(', ')', ')', '.', 'encryptor', '(', ')', '# Encrypt the plaintext and get the associated ciphertext.', '# Do we need to check for any exceptions?', 'ciphertext', '=', 'encryptor', '.', 'update', '(', 'key_data', '.', 'encode', '(', "'utf-8'", ')', ')', '+', 'encryptor', '.', 'finalize', '(', ')', '# Generate the hmac of the ciphertext to ensure it has not been modified.', '# The decryption routine may verify a ciphertext without having to perform', '# a decryption operation.', 'symmetric_key', '=', 'derived_key_information', '[', "'derived_key'", ']', 'salt', '=', 'derived_key_information', '[', "'salt'", ']', 'hmac_object', '=', 'cryptography', '.', 'hazmat', '.', 'primitives', '.', 'hmac', '.', 'HMAC', '(', 'symmetric_key', ',', 'hashes', '.', 'SHA256', '(', ')', ',', 'backend', '=', 'default_backend', '(', ')', ')', 'hmac_object', '.', 'update', '(', 'ciphertext', ')', 'hmac_value', '=', 'binascii', '.', 'hexlify', '(', 'hmac_object', '.', 'finalize', '(', ')', ')', '# Store the number of PBKDF2 iterations used to derive the symmetric key so', '# that the decryption routine can regenerate the symmetric key successfully.', '# The PBKDF2 iterations are allowed to vary for the keys loaded and saved.', 'iterations', '=', 'derived_key_information', '[', "'iterations'", ']', '# Return the salt, iterations, hmac, initialization vector, and ciphertext', '# as a single string. These five values are delimited by', "# '_ENCRYPTION_DELIMITER' to make extraction easier. This delimiter is", '# arbitrarily chosen and should not occur in the hexadecimal representations', '# of the fields it is separating.', 'return', 'binascii', '.', 'hexlify', '(', 'salt', ')', '.', 'decode', '(', ')', '+', '_ENCRYPTION_DELIMITER', '+', 'str', '(', 'iterations', ')', '+', '_ENCRYPTION_DELIMITER', '+', 'hmac_value', '.', 'decode', '(', ')', '+', '_ENCRYPTION_DELIMITER', '+', 'binascii', '.', 'hexlify', '(', 'iv', ')', '.', 'decode', '(', ')', '+', '_ENCRYPTION_DELIMITER', '+', 'binascii', '.', 'hexlify', '(', 'ciphertext', ')', '.', 'decode', '(', ')'] | Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails. | ['Encrypt', 'key_data', 'using', 'the', 'Advanced', 'Encryption', 'Standard', '(', 'AES', '-', '256', ')', 'algorithm', '.', 'derived_key_information', 'should', 'contain', 'a', 'key', 'strengthened', 'by', 'PBKDF2', '.', 'The', 'key', 'size', 'is', '256', 'bits', 'and', 'AES', 's', 'mode', 'of', 'operation', 'is', 'set', 'to', 'CTR', '(', 'CounTeR', 'Mode', ')', '.', 'The', 'HMAC', 'of', 'the', 'ciphertext', 'is', 'generated', 'to', 'ensure', 'the', 'ciphertext', 'has', 'not', 'been', 'modified', '.'] | train | https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/pyca_crypto_keys.py#L870-L939 |
4,989 | gem/oq-engine | openquake/hmtk/sources/area_source.py | mtkAreaSource.create_oqhazardlib_source | def create_oqhazardlib_source(self, tom, mesh_spacing, area_discretisation,
use_defaults=False):
"""
Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return AreaSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.upper_depth,
self.lower_depth,
conv.npd_to_pmf(self.nodal_plane_dist, use_defaults),
conv.hdd_to_pmf(self.hypo_depth_dist, use_defaults),
self.geometry,
area_discretisation) | python | def create_oqhazardlib_source(self, tom, mesh_spacing, area_discretisation,
use_defaults=False):
"""
Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return AreaSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.upper_depth,
self.lower_depth,
conv.npd_to_pmf(self.nodal_plane_dist, use_defaults),
conv.hdd_to_pmf(self.hypo_depth_dist, use_defaults),
self.geometry,
area_discretisation) | ['def', 'create_oqhazardlib_source', '(', 'self', ',', 'tom', ',', 'mesh_spacing', ',', 'area_discretisation', ',', 'use_defaults', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'mfd', ':', 'raise', 'ValueError', '(', '"Cannot write to hazardlib without MFD"', ')', 'return', 'AreaSource', '(', 'self', '.', 'id', ',', 'self', '.', 'name', ',', 'self', '.', 'trt', ',', 'self', '.', 'mfd', ',', 'mesh_spacing', ',', 'conv', '.', 'mag_scale_rel_to_hazardlib', '(', 'self', '.', 'mag_scale_rel', ',', 'use_defaults', ')', ',', 'conv', '.', 'render_aspect_ratio', '(', 'self', '.', 'rupt_aspect_ratio', ',', 'use_defaults', ')', ',', 'tom', ',', 'self', '.', 'upper_depth', ',', 'self', '.', 'lower_depth', ',', 'conv', '.', 'npd_to_pmf', '(', 'self', '.', 'nodal_plane_dist', ',', 'use_defaults', ')', ',', 'conv', '.', 'hdd_to_pmf', '(', 'self', '.', 'hypo_depth_dist', ',', 'use_defaults', ')', ',', 'self', '.', 'geometry', ',', 'area_discretisation', ')'] | Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing | ['Converts', 'the', 'source', 'model', 'into', 'an', 'instance', 'of', 'the', ':', 'class', ':', 'openquake', '.', 'hazardlib', '.', 'source', '.', 'area', '.', 'AreaSource'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/sources/area_source.py#L204-L232 |
4,990 | kytos/kytos-utils | kytos/utils/napps.py | NAppsManager.enable | def enable(self):
"""Enable a NApp if not already enabled.
Raises:
FileNotFoundError: If NApp is not installed.
PermissionError: No filesystem permission to enable NApp.
"""
core_napps_manager = CoreNAppsManager(base_path=self._enabled)
core_napps_manager.enable(self.user, self.napp) | python | def enable(self):
"""Enable a NApp if not already enabled.
Raises:
FileNotFoundError: If NApp is not installed.
PermissionError: No filesystem permission to enable NApp.
"""
core_napps_manager = CoreNAppsManager(base_path=self._enabled)
core_napps_manager.enable(self.user, self.napp) | ['def', 'enable', '(', 'self', ')', ':', 'core_napps_manager', '=', 'CoreNAppsManager', '(', 'base_path', '=', 'self', '.', '_enabled', ')', 'core_napps_manager', '.', 'enable', '(', 'self', '.', 'user', ',', 'self', '.', 'napp', ')'] | Enable a NApp if not already enabled.
Raises:
FileNotFoundError: If NApp is not installed.
PermissionError: No filesystem permission to enable NApp. | ['Enable', 'a', 'NApp', 'if', 'not', 'already', 'enabled', '.'] | train | https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L188-L197 |
4,991 | rossant/ipymd | ipymd/lib/opendocument.py | ODFDocument.container | def container(self, cls, **kwargs):
"""Container context manager."""
self.start_container(cls, **kwargs)
yield
self.end_container() | python | def container(self, cls, **kwargs):
"""Container context manager."""
self.start_container(cls, **kwargs)
yield
self.end_container() | ['def', 'container', '(', 'self', ',', 'cls', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'start_container', '(', 'cls', ',', '*', '*', 'kwargs', ')', 'yield', 'self', '.', 'end_container', '(', ')'] | Container context manager. | ['Container', 'context', 'manager', '.'] | train | https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/lib/opendocument.py#L471-L475 |
4,992 | log2timeline/plaso | tools/image_export.py | Main | def Main():
"""The main function.
Returns:
bool: True if successful or False otherwise.
"""
tool = image_export_tool.ImageExportTool()
if not tool.ParseArguments():
return False
if tool.list_signature_identifiers:
tool.ListSignatureIdentifiers()
return True
if not tool.has_filters:
logging.warning('No filter defined exporting all files.')
# TODO: print more status information like PrintOptions.
tool.PrintFilterCollection()
try:
tool.ProcessSources()
except (KeyboardInterrupt, errors.UserAbort):
logging.warning('Aborted by user.')
return False
except errors.BadConfigOption as exception:
logging.warning(exception)
return False
except errors.SourceScannerError as exception:
logging.warning((
'Unable to scan for a supported filesystem with error: {0!s}\n'
'Most likely the image format is not supported by the '
'tool.').format(exception))
return False
return True | python | def Main():
"""The main function.
Returns:
bool: True if successful or False otherwise.
"""
tool = image_export_tool.ImageExportTool()
if not tool.ParseArguments():
return False
if tool.list_signature_identifiers:
tool.ListSignatureIdentifiers()
return True
if not tool.has_filters:
logging.warning('No filter defined exporting all files.')
# TODO: print more status information like PrintOptions.
tool.PrintFilterCollection()
try:
tool.ProcessSources()
except (KeyboardInterrupt, errors.UserAbort):
logging.warning('Aborted by user.')
return False
except errors.BadConfigOption as exception:
logging.warning(exception)
return False
except errors.SourceScannerError as exception:
logging.warning((
'Unable to scan for a supported filesystem with error: {0!s}\n'
'Most likely the image format is not supported by the '
'tool.').format(exception))
return False
return True | ['def', 'Main', '(', ')', ':', 'tool', '=', 'image_export_tool', '.', 'ImageExportTool', '(', ')', 'if', 'not', 'tool', '.', 'ParseArguments', '(', ')', ':', 'return', 'False', 'if', 'tool', '.', 'list_signature_identifiers', ':', 'tool', '.', 'ListSignatureIdentifiers', '(', ')', 'return', 'True', 'if', 'not', 'tool', '.', 'has_filters', ':', 'logging', '.', 'warning', '(', "'No filter defined exporting all files.'", ')', '# TODO: print more status information like PrintOptions.', 'tool', '.', 'PrintFilterCollection', '(', ')', 'try', ':', 'tool', '.', 'ProcessSources', '(', ')', 'except', '(', 'KeyboardInterrupt', ',', 'errors', '.', 'UserAbort', ')', ':', 'logging', '.', 'warning', '(', "'Aborted by user.'", ')', 'return', 'False', 'except', 'errors', '.', 'BadConfigOption', 'as', 'exception', ':', 'logging', '.', 'warning', '(', 'exception', ')', 'return', 'False', 'except', 'errors', '.', 'SourceScannerError', 'as', 'exception', ':', 'logging', '.', 'warning', '(', '(', "'Unable to scan for a supported filesystem with error: {0!s}\\n'", "'Most likely the image format is not supported by the '", "'tool.'", ')', '.', 'format', '(', 'exception', ')', ')', 'return', 'False', 'return', 'True'] | The main function.
Returns:
bool: True if successful or False otherwise. | ['The', 'main', 'function', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/tools/image_export.py#L14-L53 |
4,993 | MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.delete_one | def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res | python | def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res | ['def', 'delete_one', '(', 'self', ',', 'id_', ')', ':', 'url', '=', '"https://api.knackhq.com/v1/objects/%s/records/%s"', '%', '(', 'self', '.', 'key', ',', 'id_', ')', 'res', '=', 'self', '.', 'delete', '(', 'url', ')', 'return', 'res'] | Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录 | ['Delete', 'one', 'record', '.', 'Ref', ':', 'http', ':', '//', 'helpdesk', '.', 'knackhq', '.', 'com', '/', 'support', '/', 'solutions', '/', 'articles', '/', '5000446111', '-', 'api', '-', 'reference', '-', 'root', '-', 'access#delete', ':', 'param', 'id_', ':', 'record', 'id_', '**', '中文文档', '**', '删除一条记录'] | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L278-L292 |
4,994 | saltstack/salt | salt/modules/snapper.py | list_configs | def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
) | python | def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
) | ['def', 'list_configs', '(', ')', ':', 'try', ':', 'configs', '=', 'snapper', '.', 'ListConfigs', '(', ')', 'return', 'dict', '(', '(', 'config', '[', '0', ']', ',', 'config', '[', '2', ']', ')', 'for', 'config', 'in', 'configs', ')', 'except', 'dbus', '.', 'DBusException', 'as', 'exc', ':', 'raise', 'CommandExecutionError', '(', "'Error encountered while listing configurations: {0}'", '.', 'format', '(', '_dbus_exception_to_reason', '(', 'exc', ',', 'locals', '(', ')', ')', ')', ')'] | List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs | ['List', 'all', 'available', 'configs'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L194-L211 |
4,995 | vertexproject/synapse | synapse/lib/syntax.py | parse_cmd_kwarg | def parse_cmd_kwarg(text, off=0):
'''
Parse a foo:bar=<valu> kwarg into (prop,valu),off
'''
_, off = nom(text, off, whites)
prop, off = nom(text, off, varset)
_, off = nom(text, off, whites)
if not nextchar(text, off, '='):
raise s_exc.BadSyntax(expected='= for kwarg ' + prop, at=off)
_, off = nom(text, off + 1, whites)
valu, off = parse_cmd_string(text, off)
return (prop, valu), off | python | def parse_cmd_kwarg(text, off=0):
'''
Parse a foo:bar=<valu> kwarg into (prop,valu),off
'''
_, off = nom(text, off, whites)
prop, off = nom(text, off, varset)
_, off = nom(text, off, whites)
if not nextchar(text, off, '='):
raise s_exc.BadSyntax(expected='= for kwarg ' + prop, at=off)
_, off = nom(text, off + 1, whites)
valu, off = parse_cmd_string(text, off)
return (prop, valu), off | ['def', 'parse_cmd_kwarg', '(', 'text', ',', 'off', '=', '0', ')', ':', '_', ',', 'off', '=', 'nom', '(', 'text', ',', 'off', ',', 'whites', ')', 'prop', ',', 'off', '=', 'nom', '(', 'text', ',', 'off', ',', 'varset', ')', '_', ',', 'off', '=', 'nom', '(', 'text', ',', 'off', ',', 'whites', ')', 'if', 'not', 'nextchar', '(', 'text', ',', 'off', ',', "'='", ')', ':', 'raise', 's_exc', '.', 'BadSyntax', '(', 'expected', '=', "'= for kwarg '", '+', 'prop', ',', 'at', '=', 'off', ')', '_', ',', 'off', '=', 'nom', '(', 'text', ',', 'off', '+', '1', ',', 'whites', ')', 'valu', ',', 'off', '=', 'parse_cmd_string', '(', 'text', ',', 'off', ')', 'return', '(', 'prop', ',', 'valu', ')', ',', 'off'] | Parse a foo:bar=<valu> kwarg into (prop,valu),off | ['Parse', 'a', 'foo', ':', 'bar', '=', '<valu', '>', 'kwarg', 'into', '(', 'prop', 'valu', ')', 'off'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/syntax.py#L310-L326 |
4,996 | beathan/django-akamai | django_akamai/purge.py | load_edgegrid_client_settings | def load_edgegrid_client_settings():
'''Load Akamai EdgeGrid configuration
returns a (hostname, EdgeGridAuth) tuple from the following locations:
1. Values specified directly in the Django settings::
AKAMAI_CCU_CLIENT_SECRET
AKAMAI_CCU_HOST
AKAMAI_CCU_ACCESS_TOKEN
AKAMAI_CCU_CLIENT_TOKEN
2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings
3. The default ~/.edgerc file
Both edgerc file load options will return the values from the “CCU” section
by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting.
'''
if getattr(settings, 'AKAMAI_CCU_CLIENT_SECRET', None):
# If the settings module has the values directly and they are not empty
# we'll use them without checking for an edgerc file:
host = settings.AKAMAI_CCU_HOST
auth = EdgeGridAuth(access_token=settings.AKAMAI_CCU_ACCESS_TOKEN,
client_token=settings.AKAMAI_CCU_CLIENT_TOKEN,
client_secret=settings.AKAMAI_CCU_CLIENT_SECRET)
return host, auth
else:
edgerc_section = getattr(settings, 'AKAMAI_EDGERC_CCU_SECTION', 'CCU')
edgerc_path = getattr(settings, 'AKAMAI_EDGERC_FILENAME', '~/.edgerc')
edgerc_path = os.path.expanduser(edgerc_path)
if os.path.isfile(edgerc_path):
edgerc = EdgeRc(edgerc_path)
host = edgerc.get(edgerc_section, 'host')
auth = EdgeGridAuth.from_edgerc(edgerc, section=edgerc_section)
return host, auth
raise InvalidAkamaiConfiguration('Cannot find Akamai client configuration!') | python | def load_edgegrid_client_settings():
'''Load Akamai EdgeGrid configuration
returns a (hostname, EdgeGridAuth) tuple from the following locations:
1. Values specified directly in the Django settings::
AKAMAI_CCU_CLIENT_SECRET
AKAMAI_CCU_HOST
AKAMAI_CCU_ACCESS_TOKEN
AKAMAI_CCU_CLIENT_TOKEN
2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings
3. The default ~/.edgerc file
Both edgerc file load options will return the values from the “CCU” section
by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting.
'''
if getattr(settings, 'AKAMAI_CCU_CLIENT_SECRET', None):
# If the settings module has the values directly and they are not empty
# we'll use them without checking for an edgerc file:
host = settings.AKAMAI_CCU_HOST
auth = EdgeGridAuth(access_token=settings.AKAMAI_CCU_ACCESS_TOKEN,
client_token=settings.AKAMAI_CCU_CLIENT_TOKEN,
client_secret=settings.AKAMAI_CCU_CLIENT_SECRET)
return host, auth
else:
edgerc_section = getattr(settings, 'AKAMAI_EDGERC_CCU_SECTION', 'CCU')
edgerc_path = getattr(settings, 'AKAMAI_EDGERC_FILENAME', '~/.edgerc')
edgerc_path = os.path.expanduser(edgerc_path)
if os.path.isfile(edgerc_path):
edgerc = EdgeRc(edgerc_path)
host = edgerc.get(edgerc_section, 'host')
auth = EdgeGridAuth.from_edgerc(edgerc, section=edgerc_section)
return host, auth
raise InvalidAkamaiConfiguration('Cannot find Akamai client configuration!') | ['def', 'load_edgegrid_client_settings', '(', ')', ':', 'if', 'getattr', '(', 'settings', ',', "'AKAMAI_CCU_CLIENT_SECRET'", ',', 'None', ')', ':', '# If the settings module has the values directly and they are not empty', "# we'll use them without checking for an edgerc file:", 'host', '=', 'settings', '.', 'AKAMAI_CCU_HOST', 'auth', '=', 'EdgeGridAuth', '(', 'access_token', '=', 'settings', '.', 'AKAMAI_CCU_ACCESS_TOKEN', ',', 'client_token', '=', 'settings', '.', 'AKAMAI_CCU_CLIENT_TOKEN', ',', 'client_secret', '=', 'settings', '.', 'AKAMAI_CCU_CLIENT_SECRET', ')', 'return', 'host', ',', 'auth', 'else', ':', 'edgerc_section', '=', 'getattr', '(', 'settings', ',', "'AKAMAI_EDGERC_CCU_SECTION'", ',', "'CCU'", ')', 'edgerc_path', '=', 'getattr', '(', 'settings', ',', "'AKAMAI_EDGERC_FILENAME'", ',', "'~/.edgerc'", ')', 'edgerc_path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'edgerc_path', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'edgerc_path', ')', ':', 'edgerc', '=', 'EdgeRc', '(', 'edgerc_path', ')', 'host', '=', 'edgerc', '.', 'get', '(', 'edgerc_section', ',', "'host'", ')', 'auth', '=', 'EdgeGridAuth', '.', 'from_edgerc', '(', 'edgerc', ',', 'section', '=', 'edgerc_section', ')', 'return', 'host', ',', 'auth', 'raise', 'InvalidAkamaiConfiguration', '(', "'Cannot find Akamai client configuration!'", ')'] | Load Akamai EdgeGrid configuration
returns a (hostname, EdgeGridAuth) tuple from the following locations:
1. Values specified directly in the Django settings::
AKAMAI_CCU_CLIENT_SECRET
AKAMAI_CCU_HOST
AKAMAI_CCU_ACCESS_TOKEN
AKAMAI_CCU_CLIENT_TOKEN
2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings
3. The default ~/.edgerc file
Both edgerc file load options will return the values from the “CCU” section
by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting. | ['Load', 'Akamai', 'EdgeGrid', 'configuration'] | train | https://github.com/beathan/django-akamai/blob/00cab2dd5fab3745742721185e75a55a5c26fe7e/django_akamai/purge.py#L68-L106 |
4,997 | dgomes/pymediaroom | pymediaroom/remote.py | discover | async def discover(ignore_list=[], max_wait=30, loop=None):
"""List STB in the network."""
stbs = []
try:
async with timeout(max_wait, loop=loop):
def responses_callback(notify):
"""Queue notify messages."""
_LOGGER.debug("Found: %s", notify.ip_address)
stbs.append(notify.ip_address)
mr_protocol = await install_mediaroom_protocol(responses_callback=responses_callback)
await asyncio.sleep(max_wait)
except asyncio.TimeoutError:
mr_protocol.close()
_LOGGER.debug("discover() timeout!")
return list(set([stb for stb in stbs if stb not in ignore_list])) | python | async def discover(ignore_list=[], max_wait=30, loop=None):
"""List STB in the network."""
stbs = []
try:
async with timeout(max_wait, loop=loop):
def responses_callback(notify):
"""Queue notify messages."""
_LOGGER.debug("Found: %s", notify.ip_address)
stbs.append(notify.ip_address)
mr_protocol = await install_mediaroom_protocol(responses_callback=responses_callback)
await asyncio.sleep(max_wait)
except asyncio.TimeoutError:
mr_protocol.close()
_LOGGER.debug("discover() timeout!")
return list(set([stb for stb in stbs if stb not in ignore_list])) | ['async', 'def', 'discover', '(', 'ignore_list', '=', '[', ']', ',', 'max_wait', '=', '30', ',', 'loop', '=', 'None', ')', ':', 'stbs', '=', '[', ']', 'try', ':', 'async', 'with', 'timeout', '(', 'max_wait', ',', 'loop', '=', 'loop', ')', ':', 'def', 'responses_callback', '(', 'notify', ')', ':', '"""Queue notify messages."""', '_LOGGER', '.', 'debug', '(', '"Found: %s"', ',', 'notify', '.', 'ip_address', ')', 'stbs', '.', 'append', '(', 'notify', '.', 'ip_address', ')', 'mr_protocol', '=', 'await', 'install_mediaroom_protocol', '(', 'responses_callback', '=', 'responses_callback', ')', 'await', 'asyncio', '.', 'sleep', '(', 'max_wait', ')', 'except', 'asyncio', '.', 'TimeoutError', ':', 'mr_protocol', '.', 'close', '(', ')', '_LOGGER', '.', 'debug', '(', '"discover() timeout!"', ')', 'return', 'list', '(', 'set', '(', '[', 'stb', 'for', 'stb', 'in', 'stbs', 'if', 'stb', 'not', 'in', 'ignore_list', ']', ')', ')'] | List STB in the network. | ['List', 'STB', 'in', 'the', 'network', '.'] | train | https://github.com/dgomes/pymediaroom/blob/f4f2686c8d5622dd5ae1bcdd76900ba35e148529/pymediaroom/remote.py#L137-L153 |
4,998 | jobovy/galpy | galpy/orbit/Orbit.py | Orbit.reverse | def reverse(self):
"""
NAME:
reverse
PURPOSE:
reverse an already integrated orbit (that is, make it go from end to beginning in t=0 to tend)
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
sortindx = list(range(len(self._orb.t)))
sortindx.sort(key=lambda x: self._orb.t[x],reverse=True)
for ii in range(self._orb.orbit.shape[1]):
self._orb.orbit[:,ii]= self._orb.orbit[sortindx,ii]
return None | python | def reverse(self):
"""
NAME:
reverse
PURPOSE:
reverse an already integrated orbit (that is, make it go from end to beginning in t=0 to tend)
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
sortindx = list(range(len(self._orb.t)))
sortindx.sort(key=lambda x: self._orb.t[x],reverse=True)
for ii in range(self._orb.orbit.shape[1]):
self._orb.orbit[:,ii]= self._orb.orbit[sortindx,ii]
return None | ['def', 'reverse', '(', 'self', ')', ':', 'if', 'hasattr', '(', 'self', ',', "'_orbInterp'", ')', ':', 'delattr', '(', 'self', ',', "'_orbInterp'", ')', 'if', 'hasattr', '(', 'self', ',', "'rs'", ')', ':', 'delattr', '(', 'self', ',', "'rs'", ')', 'sortindx', '=', 'list', '(', 'range', '(', 'len', '(', 'self', '.', '_orb', '.', 't', ')', ')', ')', 'sortindx', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'self', '.', '_orb', '.', 't', '[', 'x', ']', ',', 'reverse', '=', 'True', ')', 'for', 'ii', 'in', 'range', '(', 'self', '.', '_orb', '.', 'orbit', '.', 'shape', '[', '1', ']', ')', ':', 'self', '.', '_orb', '.', 'orbit', '[', ':', ',', 'ii', ']', '=', 'self', '.', '_orb', '.', 'orbit', '[', 'sortindx', ',', 'ii', ']', 'return', 'None'] | NAME:
reverse
PURPOSE:
reverse an already integrated orbit (that is, make it go from end to beginning in t=0 to tend)
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2011-04-13 - Written - Bovy (NYU) | ['NAME', ':'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L572-L599 |
4,999 | gagneurlab/concise | concise/initializers.py | _truncated_normal | def _truncated_normal(mean,
stddev,
seed=None,
normalize=True,
alpha=0.01):
''' Add noise with truncnorm from numpy.
Bounded (0.001,0.999)
'''
# within range ()
# provide entry to chose which adding noise way to use
if seed is not None:
np.random.seed(seed)
if stddev == 0:
X = mean
else:
gen_X = truncnorm((alpha - mean) / stddev,
((1 - alpha) - mean) / stddev,
loc=mean, scale=stddev)
X = gen_X.rvs() + mean
if normalize:
# Normalize, column sum to 1
col_sums = X.sum(1)
X = X / col_sums[:, np.newaxis]
return X | python | def _truncated_normal(mean,
stddev,
seed=None,
normalize=True,
alpha=0.01):
''' Add noise with truncnorm from numpy.
Bounded (0.001,0.999)
'''
# within range ()
# provide entry to chose which adding noise way to use
if seed is not None:
np.random.seed(seed)
if stddev == 0:
X = mean
else:
gen_X = truncnorm((alpha - mean) / stddev,
((1 - alpha) - mean) / stddev,
loc=mean, scale=stddev)
X = gen_X.rvs() + mean
if normalize:
# Normalize, column sum to 1
col_sums = X.sum(1)
X = X / col_sums[:, np.newaxis]
return X | ['def', '_truncated_normal', '(', 'mean', ',', 'stddev', ',', 'seed', '=', 'None', ',', 'normalize', '=', 'True', ',', 'alpha', '=', '0.01', ')', ':', '# within range ()', '# provide entry to chose which adding noise way to use', 'if', 'seed', 'is', 'not', 'None', ':', 'np', '.', 'random', '.', 'seed', '(', 'seed', ')', 'if', 'stddev', '==', '0', ':', 'X', '=', 'mean', 'else', ':', 'gen_X', '=', 'truncnorm', '(', '(', 'alpha', '-', 'mean', ')', '/', 'stddev', ',', '(', '(', '1', '-', 'alpha', ')', '-', 'mean', ')', '/', 'stddev', ',', 'loc', '=', 'mean', ',', 'scale', '=', 'stddev', ')', 'X', '=', 'gen_X', '.', 'rvs', '(', ')', '+', 'mean', 'if', 'normalize', ':', '# Normalize, column sum to 1', 'col_sums', '=', 'X', '.', 'sum', '(', '1', ')', 'X', '=', 'X', '/', 'col_sums', '[', ':', ',', 'np', '.', 'newaxis', ']', 'return', 'X'] | Add noise with truncnorm from numpy.
Bounded (0.001,0.999) | ['Add', 'noise', 'with', 'truncnorm', 'from', 'numpy', '.', 'Bounded', '(', '0', '.', '001', '0', '.', '999', ')'] | train | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/initializers.py#L31-L54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.