Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,600 | quodlibet/mutagen | mutagen/_senf/_stdlib.py | expanduser | def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == "~":
return _get_userdir()
elif path.startswith("~" + sep) or (
altsep is not None and path.startswith("~" + altsep)):
userdir = _get_userdir()
if userdir is None:
return path
return userdir + path[1:]
elif path.startswith("~"):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index
if sep_index == -1:
user = path[1:]
rest = ""
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest
else:
return path
else:
return path | python | def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == "~":
return _get_userdir()
elif path.startswith("~" + sep) or (
altsep is not None and path.startswith("~" + altsep)):
userdir = _get_userdir()
if userdir is None:
return path
return userdir + path[1:]
elif path.startswith("~"):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index
if sep_index == -1:
user = path[1:]
rest = ""
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest
else:
return path
else:
return path | ['def', 'expanduser', '(', 'path', ')', ':', 'path', '=', 'path2fsn', '(', 'path', ')', 'if', 'path', '==', '"~"', ':', 'return', '_get_userdir', '(', ')', 'elif', 'path', '.', 'startswith', '(', '"~"', '+', 'sep', ')', 'or', '(', 'altsep', 'is', 'not', 'None', 'and', 'path', '.', 'startswith', '(', '"~"', '+', 'altsep', ')', ')', ':', 'userdir', '=', '_get_userdir', '(', ')', 'if', 'userdir', 'is', 'None', ':', 'return', 'path', 'return', 'userdir', '+', 'path', '[', '1', ':', ']', 'elif', 'path', '.', 'startswith', '(', '"~"', ')', ':', 'sep_index', '=', 'path', '.', 'find', '(', 'sep', ')', 'if', 'altsep', 'is', 'not', 'None', ':', 'alt_index', '=', 'path', '.', 'find', '(', 'altsep', ')', 'if', 'alt_index', '!=', '-', '1', 'and', 'alt_index', '<', 'sep_index', ':', 'sep_index', '=', 'alt_index', 'if', 'sep_index', '==', '-', '1', ':', 'user', '=', 'path', '[', '1', ':', ']', 'rest', '=', '""', 'else', ':', 'user', '=', 'path', '[', '1', ':', 'sep_index', ']', 'rest', '=', 'path', '[', 'sep_index', ':', ']', 'userdir', '=', '_get_userdir', '(', 'user', ')', 'if', 'userdir', 'is', 'not', 'None', ':', 'return', 'userdir', '+', 'rest', 'else', ':', 'return', 'path', 'else', ':', 'return', 'path'] | Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`. | ['Args', ':', 'path', '(', 'pathlike', ')', ':', 'A', 'path', 'to', 'expand', 'Returns', ':', 'fsnative'] | train | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_stdlib.py#L91-L132 |
7,601 | richardkiss/pycoin | pycoin/ecdsa/native/secp256k1.py | Optimizations.multiply | def multiply(self, p, e):
"""Multiply a point by an integer."""
e %= self.order()
if p == self._infinity or e == 0:
return self._infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + to_bytes_32(p[0]) + to_bytes_32(p[1])
r = libsecp256k1.secp256k1_ec_pubkey_parse(
libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = libsecp256k1.secp256k1_ec_pubkey_tweak_mul(libsecp256k1.ctx, pubkey, to_bytes_32(e))
if not r:
return self._infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
return self.Point(x, y) | python | def multiply(self, p, e):
"""Multiply a point by an integer."""
e %= self.order()
if p == self._infinity or e == 0:
return self._infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + to_bytes_32(p[0]) + to_bytes_32(p[1])
r = libsecp256k1.secp256k1_ec_pubkey_parse(
libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = libsecp256k1.secp256k1_ec_pubkey_tweak_mul(libsecp256k1.ctx, pubkey, to_bytes_32(e))
if not r:
return self._infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
return self.Point(x, y) | ['def', 'multiply', '(', 'self', ',', 'p', ',', 'e', ')', ':', 'e', '%=', 'self', '.', 'order', '(', ')', 'if', 'p', '==', 'self', '.', '_infinity', 'or', 'e', '==', '0', ':', 'return', 'self', '.', '_infinity', 'pubkey', '=', 'create_string_buffer', '(', '64', ')', 'public_pair_bytes', '=', "b'\\4'", '+', 'to_bytes_32', '(', 'p', '[', '0', ']', ')', '+', 'to_bytes_32', '(', 'p', '[', '1', ']', ')', 'r', '=', 'libsecp256k1', '.', 'secp256k1_ec_pubkey_parse', '(', 'libsecp256k1', '.', 'ctx', ',', 'pubkey', ',', 'public_pair_bytes', ',', 'len', '(', 'public_pair_bytes', ')', ')', 'if', 'not', 'r', ':', 'return', 'False', 'r', '=', 'libsecp256k1', '.', 'secp256k1_ec_pubkey_tweak_mul', '(', 'libsecp256k1', '.', 'ctx', ',', 'pubkey', ',', 'to_bytes_32', '(', 'e', ')', ')', 'if', 'not', 'r', ':', 'return', 'self', '.', '_infinity', 'pubkey_serialized', '=', 'create_string_buffer', '(', '65', ')', 'pubkey_size', '=', 'c_size_t', '(', '65', ')', 'libsecp256k1', '.', 'secp256k1_ec_pubkey_serialize', '(', 'libsecp256k1', '.', 'ctx', ',', 'pubkey_serialized', ',', 'byref', '(', 'pubkey_size', ')', ',', 'pubkey', ',', 'SECP256K1_EC_UNCOMPRESSED', ')', 'x', '=', 'from_bytes_32', '(', 'pubkey_serialized', '[', '1', ':', '33', ']', ')', 'y', '=', 'from_bytes_32', '(', 'pubkey_serialized', '[', '33', ':', ']', ')', 'return', 'self', '.', 'Point', '(', 'x', ',', 'y', ')'] | Multiply a point by an integer. | ['Multiply', 'a', 'point', 'by', 'an', 'integer', '.'] | train | https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/ecdsa/native/secp256k1.py#L135-L156 |
7,602 | saltstack/salt | salt/modules/smf_service.py | restart | def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/usr/sbin/svcadm restart {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling restart doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False | python | def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/usr/sbin/svcadm restart {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling restart doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False | ['def', 'restart', '(', 'name', ')', ':', 'cmd', '=', "'/usr/sbin/svcadm restart {0}'", '.', 'format', '(', 'name', ')', 'if', 'not', '__salt__', '[', "'cmd.retcode'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')', ':', "# calling restart doesn't clear maintenance", "# or tell us that the service is in the 'online' state", 'return', 'start', '(', 'name', ')', 'return', 'False'] | Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name> | ['Restart', 'the', 'named', 'service'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smf_service.py#L193-L208 |
7,603 | googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject._designspace_locations | def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps | python | def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps | ['def', '_designspace_locations', '(', 'self', ',', 'designspace', ')', ':', 'maps', '=', '[', ']', 'for', 'elements', 'in', '(', 'designspace', '.', 'sources', ',', 'designspace', '.', 'instances', ')', ':', 'location_map', '=', '{', '}', 'for', 'element', 'in', 'elements', ':', 'path', '=', '_normpath', '(', 'element', '.', 'path', ')', 'location_map', '[', 'path', ']', '=', 'element', '.', 'location', 'maps', '.', 'append', '(', 'location_map', ')', 'return', 'maps'] | Map font filenames to their locations in a designspace. | ['Map', 'font', 'filenames', 'to', 'their', 'locations', 'in', 'a', 'designspace', '.'] | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1076-L1086 |
7,604 | psss/did | did/plugins/sentry.py | Sentry.issues | def issues(self, kind, email):
""" Filter unique issues for given activity type and email """
return list(set([unicode(activity.issue)
for activity in self.activities()
if kind == activity.kind and activity.user['email'] == email])) | python | def issues(self, kind, email):
""" Filter unique issues for given activity type and email """
return list(set([unicode(activity.issue)
for activity in self.activities()
if kind == activity.kind and activity.user['email'] == email])) | ['def', 'issues', '(', 'self', ',', 'kind', ',', 'email', ')', ':', 'return', 'list', '(', 'set', '(', '[', 'unicode', '(', 'activity', '.', 'issue', ')', 'for', 'activity', 'in', 'self', '.', 'activities', '(', ')', 'if', 'kind', '==', 'activity', '.', 'kind', 'and', 'activity', '.', 'user', '[', "'email'", ']', '==', 'email', ']', ')', ')'] | Filter unique issues for given activity type and email | ['Filter', 'unique', 'issues', 'for', 'given', 'activity', 'type', 'and', 'email'] | train | https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/sentry.py#L81-L85 |
7,605 | quantumlib/Cirq | cirq/circuits/circuit.py | Circuit._repr_pretty_ | def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Print ASCII diagram in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('Circuit(...)')
else:
p.text(self.to_text_diagram()) | python | def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Print ASCII diagram in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('Circuit(...)')
else:
p.text(self.to_text_diagram()) | ['def', '_repr_pretty_', '(', 'self', ',', 'p', ':', 'Any', ',', 'cycle', ':', 'bool', ')', '->', 'None', ':', 'if', 'cycle', ':', '# There should never be a cycle. This is just in case.', 'p', '.', 'text', '(', "'Circuit(...)'", ')', 'else', ':', 'p', '.', 'text', '(', 'self', '.', 'to_text_diagram', '(', ')', ')'] | Print ASCII diagram in Jupyter. | ['Print', 'ASCII', 'diagram', 'in', 'Jupyter', '.'] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L331-L337 |
7,606 | dw/mitogen | ansible_mitogen/connection.py | Connection.get_task_var | def get_task_var(self, key, default=None):
"""
Fetch the value of a task variable related to connection configuration,
or, if delegate_to is active, fetch the same variable via HostVars for
the delegated-to machine.
When running with delegate_to, Ansible tasks have variables associated
with the original machine, not the delegated-to machine, therefore it
does not make sense to extract connection-related configuration for the
delegated-to machine from them.
"""
if self._task_vars:
if self.delegate_to_hostname is None:
if key in self._task_vars:
return self._task_vars[key]
else:
delegated_vars = self._task_vars['ansible_delegated_vars']
if self.delegate_to_hostname in delegated_vars:
task_vars = delegated_vars[self.delegate_to_hostname]
if key in task_vars:
return task_vars[key]
return default | python | def get_task_var(self, key, default=None):
"""
Fetch the value of a task variable related to connection configuration,
or, if delegate_to is active, fetch the same variable via HostVars for
the delegated-to machine.
When running with delegate_to, Ansible tasks have variables associated
with the original machine, not the delegated-to machine, therefore it
does not make sense to extract connection-related configuration for the
delegated-to machine from them.
"""
if self._task_vars:
if self.delegate_to_hostname is None:
if key in self._task_vars:
return self._task_vars[key]
else:
delegated_vars = self._task_vars['ansible_delegated_vars']
if self.delegate_to_hostname in delegated_vars:
task_vars = delegated_vars[self.delegate_to_hostname]
if key in task_vars:
return task_vars[key]
return default | ['def', 'get_task_var', '(', 'self', ',', 'key', ',', 'default', '=', 'None', ')', ':', 'if', 'self', '.', '_task_vars', ':', 'if', 'self', '.', 'delegate_to_hostname', 'is', 'None', ':', 'if', 'key', 'in', 'self', '.', '_task_vars', ':', 'return', 'self', '.', '_task_vars', '[', 'key', ']', 'else', ':', 'delegated_vars', '=', 'self', '.', '_task_vars', '[', "'ansible_delegated_vars'", ']', 'if', 'self', '.', 'delegate_to_hostname', 'in', 'delegated_vars', ':', 'task_vars', '=', 'delegated_vars', '[', 'self', '.', 'delegate_to_hostname', ']', 'if', 'key', 'in', 'task_vars', ':', 'return', 'task_vars', '[', 'key', ']', 'return', 'default'] | Fetch the value of a task variable related to connection configuration,
or, if delegate_to is active, fetch the same variable via HostVars for
the delegated-to machine.
When running with delegate_to, Ansible tasks have variables associated
with the original machine, not the delegated-to machine, therefore it
does not make sense to extract connection-related configuration for the
delegated-to machine from them. | ['Fetch', 'the', 'value', 'of', 'a', 'task', 'variable', 'related', 'to', 'connection', 'configuration', 'or', 'if', 'delegate_to', 'is', 'active', 'fetch', 'the', 'same', 'variable', 'via', 'HostVars', 'for', 'the', 'delegated', '-', 'to', 'machine', '.'] | train | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L538-L560 |
7,607 | tensorflow/tensor2tensor | tensor2tensor/rl/dopamine_connector.py | _parse_hparams | def _parse_hparams(hparams):
"""Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
"""
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"]
ret = []
for prefix in prefixes:
ret_dict = {}
for key in hparams.values():
if prefix in key:
par_name = key[len(prefix):]
ret_dict[par_name] = hparams.get(key)
ret.append(ret_dict)
return ret | python | def _parse_hparams(hparams):
"""Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
"""
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"]
ret = []
for prefix in prefixes:
ret_dict = {}
for key in hparams.values():
if prefix in key:
par_name = key[len(prefix):]
ret_dict[par_name] = hparams.get(key)
ret.append(ret_dict)
return ret | ['def', '_parse_hparams', '(', 'hparams', ')', ':', 'prefixes', '=', '[', '"agent_"', ',', '"optimizer_"', ',', '"runner_"', ',', '"replay_buffer_"', ']', 'ret', '=', '[', ']', 'for', 'prefix', 'in', 'prefixes', ':', 'ret_dict', '=', '{', '}', 'for', 'key', 'in', 'hparams', '.', 'values', '(', ')', ':', 'if', 'prefix', 'in', 'key', ':', 'par_name', '=', 'key', '[', 'len', '(', 'prefix', ')', ':', ']', 'ret_dict', '[', 'par_name', ']', '=', 'hparams', '.', 'get', '(', 'key', ')', 'ret', '.', 'append', '(', 'ret_dict', ')', 'return', 'ret'] | Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. | ['Split', 'hparams', 'based', 'on', 'key', 'prefixes', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/dopamine_connector.py#L471-L491 |
7,608 | google/mobly | mobly/controllers/android_device_lib/jsonrpc_client_base.py | JsonRpcClientBase._client_send | def _client_send(self, msg):
"""Sends an Rpc message through the connection.
Args:
msg: string, the message to send.
Raises:
Error: a socket error occurred during the send.
"""
try:
self._client.write(msg.encode("utf8") + b'\n')
self._client.flush()
self.log.debug('Snippet sent %s.', msg)
except socket.error as e:
raise Error(
self._ad,
'Encountered socket error "%s" sending RPC message "%s"' %
(e, msg)) | python | def _client_send(self, msg):
"""Sends an Rpc message through the connection.
Args:
msg: string, the message to send.
Raises:
Error: a socket error occurred during the send.
"""
try:
self._client.write(msg.encode("utf8") + b'\n')
self._client.flush()
self.log.debug('Snippet sent %s.', msg)
except socket.error as e:
raise Error(
self._ad,
'Encountered socket error "%s" sending RPC message "%s"' %
(e, msg)) | ['def', '_client_send', '(', 'self', ',', 'msg', ')', ':', 'try', ':', 'self', '.', '_client', '.', 'write', '(', 'msg', '.', 'encode', '(', '"utf8"', ')', '+', "b'\\n'", ')', 'self', '.', '_client', '.', 'flush', '(', ')', 'self', '.', 'log', '.', 'debug', '(', "'Snippet sent %s.'", ',', 'msg', ')', 'except', 'socket', '.', 'error', 'as', 'e', ':', 'raise', 'Error', '(', 'self', '.', '_ad', ',', '\'Encountered socket error "%s" sending RPC message "%s"\'', '%', '(', 'e', ',', 'msg', ')', ')'] | Sends an Rpc message through the connection.
Args:
msg: string, the message to send.
Raises:
Error: a socket error occurred during the send. | ['Sends', 'an', 'Rpc', 'message', 'through', 'the', 'connection', '.'] | train | https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/jsonrpc_client_base.py#L237-L254 |
7,609 | jtwhite79/pyemu | pyemu/plot/plot_utils.py | ensemble_res_1to1 | def ensemble_res_1to1(ensemble, pst,facecolor='0.5',logger=None,filename=None,
skip_groups=[],base_ensemble=None,**kwargs):
"""helper function to plot ensemble 1-to-1 plots sbowing the simulated range
Parameters
----------
ensemble : varies
the ensemble argument can be a pandas.DataFrame or derived type or a str, which
is treated as a fileanme. Optionally, ensemble can be a list of these types or
a dict, in which case, the keys are treated as facecolor str (e.g., 'b', 'y', etc).
pst : pyemu.Pst
pst instance
facecolor : str
the histogram facecolor. Only applies if ensemble is a single thing
filename : str
the name of the pdf to create. If None, return figs without saving. Default is None.
base_ensemble : varies
an optional ensemble argument for the observations + noise ensemble.
This will be plotted as a transparent red bar on the 1to1 plot.
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot res_1to1")
obs = pst.observation_data
ensembles = _process_ensemble_arg(ensemble,facecolor,logger)
if base_ensemble is not None:
base_ensemble = _process_ensemble_arg(base_ensemble,"r",logger)
if "grouper" in kwargs:
raise NotImplementedError()
else:
grouper = obs.groupby(obs.obgnme).groups
for skip_group in skip_groups:
grouper.pop(skip_group)
fig = plt.figure(figsize=figsize)
if "fig_title" in kwargs:
plt.figtext(0.5,0.5,kwargs["fig_title"])
else:
plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='1to1')\nfrom pest control file '{0}'\n at {1}"
.format(pst.filename, str(datetime.now())), ha="center")
#if plot_hexbin:
# pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf")
#else:
# pdfname = pst.filename.replace(".pst", ".1to1.pdf")
figs = []
ax_count = 0
for g, names in grouper.items():
logger.log("plotting 1to1 for {0}".format(g))
obs_g = obs.loc[names, :]
logger.statement("using control file obsvals to calculate residuals")
if "include_zero" not in kwargs or kwargs["include_zero"] is False:
obs_g = obs_g.loc[obs_g.weight > 0, :]
if obs_g.shape[0] == 0:
logger.statement("no non-zero obs for group '{0}'".format(g))
logger.log("plotting 1to1 for {0}".format(g))
continue
if ax_count % (nr * nc) == 0:
if ax_count > 0:
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0
ax = axes[ax_count]
if base_ensemble is None:
mx = obs_g.obsval.max()
mn = obs_g.obsval.min()
else:
mn = base_ensemble["r"].loc[:,names].min().min()
mx = base_ensemble["r"].loc[:, names].max().max()
#if obs_g.shape[0] == 1:
mx *= 1.1
mn *= 0.9
#ax.axis('square')
if base_ensemble is not None:
obs_gg = obs_g.sort_values(by="obsval")
for c, en in base_ensemble.items():
en_g = en.loc[:, obs_gg.obsnme]
ex = en_g.max()
en = en_g.min()
#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]
ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2)
#ax.scatter([obs_g.sim], [obs_g.obsval], marker='.', s=10, color='b')
for c,en in ensembles.items():
en_g = en.loc[:,obs_g.obsnme]
ex = en_g.max()
en = en_g.min()
[ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)]
ax.plot([mn,mx],[mn,mx],'k--',lw=1.0)
xlim = (mn,mx)
ax.set_xlim(mn,mx)
ax.set_ylim(mn,mx)
if mx > 1.0e5:
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax.grid()
ax.set_xlabel("observed",labelpad=0.1)
ax.set_ylabel("simulated",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax_count += 1
ax = axes[ax_count]
#ax.scatter(obs_g.obsval, obs_g.res, marker='.', s=10, color='b')
if base_ensemble is not None:
obs_gg = obs_g.sort_values(by="obsval")
for c, en in base_ensemble.items():
en_g = en.loc[:, obs_gg.obsnme].subtract(obs_gg.obsval)
ex = en_g.max()
en = en_g.min()
#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]
ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2)
for c,en in ensembles.items():
en_g = en.loc[:,obs_g.obsnme].subtract(obs_g.obsval,axis=1)
ex = en_g.max()
en = en_g.min()
[ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)]
# if base_ensemble is not None:
# if base_ensemble is not None:
# for c, en in base_ensemble.items():
# en_g = en.loc[:, obs_g.obsnme].subtract(obs_g.obsval,axis=1)
# ex = en_g.max()
# en = en_g.min()
# [ax.plot([ov, ov], [een, eex], color=c, alpha=0.3) for ov, een, eex in
# zip(obs_g.obsval.values, en.values, ex.values)]
ylim = ax.get_ylim()
mx = max(np.abs(ylim[0]), np.abs(ylim[1]))
if obs_g.shape[0] == 1:
mx *= 1.1
ax.set_ylim(-mx, mx)
#show a zero residuals line
ax.plot(xlim, [0,0], 'k--', lw=1.0)
ax.set_xlim(xlim)
ax.set_ylabel("residual",labelpad=0.1)
ax.set_xlabel("observed",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax.grid()
if ax.get_xlim()[1] > 1.0e5:
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax_count += 1
logger.log("plotting 1to1 for {0}".format(g))
for a in range(ax_count, nr * nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([])
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
if filename is not None:
plt.tight_layout()
with PdfPages(filename) as pdf:
for fig in figs:
pdf.savefig(fig)
plt.close(fig)
logger.log("plot res_1to1")
else:
logger.log("plot res_1to1")
return figs | python | def ensemble_res_1to1(ensemble, pst,facecolor='0.5',logger=None,filename=None,
skip_groups=[],base_ensemble=None,**kwargs):
"""helper function to plot ensemble 1-to-1 plots sbowing the simulated range
Parameters
----------
ensemble : varies
the ensemble argument can be a pandas.DataFrame or derived type or a str, which
is treated as a fileanme. Optionally, ensemble can be a list of these types or
a dict, in which case, the keys are treated as facecolor str (e.g., 'b', 'y', etc).
pst : pyemu.Pst
pst instance
facecolor : str
the histogram facecolor. Only applies if ensemble is a single thing
filename : str
the name of the pdf to create. If None, return figs without saving. Default is None.
base_ensemble : varies
an optional ensemble argument for the observations + noise ensemble.
This will be plotted as a transparent red bar on the 1to1 plot.
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot res_1to1")
obs = pst.observation_data
ensembles = _process_ensemble_arg(ensemble,facecolor,logger)
if base_ensemble is not None:
base_ensemble = _process_ensemble_arg(base_ensemble,"r",logger)
if "grouper" in kwargs:
raise NotImplementedError()
else:
grouper = obs.groupby(obs.obgnme).groups
for skip_group in skip_groups:
grouper.pop(skip_group)
fig = plt.figure(figsize=figsize)
if "fig_title" in kwargs:
plt.figtext(0.5,0.5,kwargs["fig_title"])
else:
plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='1to1')\nfrom pest control file '{0}'\n at {1}"
.format(pst.filename, str(datetime.now())), ha="center")
#if plot_hexbin:
# pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf")
#else:
# pdfname = pst.filename.replace(".pst", ".1to1.pdf")
figs = []
ax_count = 0
for g, names in grouper.items():
logger.log("plotting 1to1 for {0}".format(g))
obs_g = obs.loc[names, :]
logger.statement("using control file obsvals to calculate residuals")
if "include_zero" not in kwargs or kwargs["include_zero"] is False:
obs_g = obs_g.loc[obs_g.weight > 0, :]
if obs_g.shape[0] == 0:
logger.statement("no non-zero obs for group '{0}'".format(g))
logger.log("plotting 1to1 for {0}".format(g))
continue
if ax_count % (nr * nc) == 0:
if ax_count > 0:
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0
ax = axes[ax_count]
if base_ensemble is None:
mx = obs_g.obsval.max()
mn = obs_g.obsval.min()
else:
mn = base_ensemble["r"].loc[:,names].min().min()
mx = base_ensemble["r"].loc[:, names].max().max()
#if obs_g.shape[0] == 1:
mx *= 1.1
mn *= 0.9
#ax.axis('square')
if base_ensemble is not None:
obs_gg = obs_g.sort_values(by="obsval")
for c, en in base_ensemble.items():
en_g = en.loc[:, obs_gg.obsnme]
ex = en_g.max()
en = en_g.min()
#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]
ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2)
#ax.scatter([obs_g.sim], [obs_g.obsval], marker='.', s=10, color='b')
for c,en in ensembles.items():
en_g = en.loc[:,obs_g.obsnme]
ex = en_g.max()
en = en_g.min()
[ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)]
ax.plot([mn,mx],[mn,mx],'k--',lw=1.0)
xlim = (mn,mx)
ax.set_xlim(mn,mx)
ax.set_ylim(mn,mx)
if mx > 1.0e5:
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax.grid()
ax.set_xlabel("observed",labelpad=0.1)
ax.set_ylabel("simulated",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax_count += 1
ax = axes[ax_count]
#ax.scatter(obs_g.obsval, obs_g.res, marker='.', s=10, color='b')
if base_ensemble is not None:
obs_gg = obs_g.sort_values(by="obsval")
for c, en in base_ensemble.items():
en_g = en.loc[:, obs_gg.obsnme].subtract(obs_gg.obsval)
ex = en_g.max()
en = en_g.min()
#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]
ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2)
for c,en in ensembles.items():
en_g = en.loc[:,obs_g.obsnme].subtract(obs_g.obsval,axis=1)
ex = en_g.max()
en = en_g.min()
[ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)]
# if base_ensemble is not None:
# if base_ensemble is not None:
# for c, en in base_ensemble.items():
# en_g = en.loc[:, obs_g.obsnme].subtract(obs_g.obsval,axis=1)
# ex = en_g.max()
# en = en_g.min()
# [ax.plot([ov, ov], [een, eex], color=c, alpha=0.3) for ov, een, eex in
# zip(obs_g.obsval.values, en.values, ex.values)]
ylim = ax.get_ylim()
mx = max(np.abs(ylim[0]), np.abs(ylim[1]))
if obs_g.shape[0] == 1:
mx *= 1.1
ax.set_ylim(-mx, mx)
#show a zero residuals line
ax.plot(xlim, [0,0], 'k--', lw=1.0)
ax.set_xlim(xlim)
ax.set_ylabel("residual",labelpad=0.1)
ax.set_xlabel("observed",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax.grid()
if ax.get_xlim()[1] > 1.0e5:
ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e'))
ax_count += 1
logger.log("plotting 1to1 for {0}".format(g))
for a in range(ax_count, nr * nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([])
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
if filename is not None:
plt.tight_layout()
with PdfPages(filename) as pdf:
for fig in figs:
pdf.savefig(fig)
plt.close(fig)
logger.log("plot res_1to1")
else:
logger.log("plot res_1to1")
return figs | ['def', 'ensemble_res_1to1', '(', 'ensemble', ',', 'pst', ',', 'facecolor', '=', "'0.5'", ',', 'logger', '=', 'None', ',', 'filename', '=', 'None', ',', 'skip_groups', '=', '[', ']', ',', 'base_ensemble', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'logger', 'is', 'None', ':', 'logger', '=', 'Logger', '(', "'Default_Loggger.log'", ',', 'echo', '=', 'False', ')', 'logger', '.', 'log', '(', '"plot res_1to1"', ')', 'obs', '=', 'pst', '.', 'observation_data', 'ensembles', '=', '_process_ensemble_arg', '(', 'ensemble', ',', 'facecolor', ',', 'logger', ')', 'if', 'base_ensemble', 'is', 'not', 'None', ':', 'base_ensemble', '=', '_process_ensemble_arg', '(', 'base_ensemble', ',', '"r"', ',', 'logger', ')', 'if', '"grouper"', 'in', 'kwargs', ':', 'raise', 'NotImplementedError', '(', ')', 'else', ':', 'grouper', '=', 'obs', '.', 'groupby', '(', 'obs', '.', 'obgnme', ')', '.', 'groups', 'for', 'skip_group', 'in', 'skip_groups', ':', 'grouper', '.', 'pop', '(', 'skip_group', ')', 'fig', '=', 'plt', '.', 'figure', '(', 'figsize', '=', 'figsize', ')', 'if', '"fig_title"', 'in', 'kwargs', ':', 'plt', '.', 'figtext', '(', '0.5', ',', '0.5', ',', 'kwargs', '[', '"fig_title"', ']', ')', 'else', ':', 'plt', '.', 'figtext', '(', '0.5', ',', '0.5', ',', '"pyemu.Pst.plot(kind=\'1to1\')\\nfrom pest control file \'{0}\'\\n at {1}"', '.', 'format', '(', 'pst', '.', 'filename', ',', 'str', '(', 'datetime', '.', 'now', '(', ')', ')', ')', ',', 'ha', '=', '"center"', ')', '#if plot_hexbin:', '# pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf")', '#else:', '# pdfname = pst.filename.replace(".pst", ".1to1.pdf")', 'figs', '=', '[', ']', 'ax_count', '=', '0', 'for', 'g', ',', 'names', 'in', 'grouper', '.', 'items', '(', ')', ':', 'logger', '.', 'log', '(', '"plotting 1to1 for {0}"', '.', 'format', '(', 'g', ')', ')', 'obs_g', '=', 'obs', '.', 'loc', '[', 'names', ',', ':', ']', 'logger', '.', 'statement', '(', '"using control file obsvals to calculate residuals"', ')', 'if', '"include_zero"', 'not', 'in', 'kwargs', 'or', 'kwargs', '[', '"include_zero"', ']', 'is', 'False', ':', 'obs_g', '=', 'obs_g', '.', 'loc', '[', 'obs_g', '.', 'weight', '>', '0', ',', ':', ']', 'if', 'obs_g', '.', 'shape', '[', '0', ']', '==', '0', ':', 'logger', '.', 'statement', '(', '"no non-zero obs for group \'{0}\'"', '.', 'format', '(', 'g', ')', ')', 'logger', '.', 'log', '(', '"plotting 1to1 for {0}"', '.', 'format', '(', 'g', ')', ')', 'continue', 'if', 'ax_count', '%', '(', 'nr', '*', 'nc', ')', '==', '0', ':', 'if', 'ax_count', '>', '0', ':', 'plt', '.', 'tight_layout', '(', ')', '#pdf.savefig()', '#plt.close(fig)', 'figs', '.', 'append', '(', 'fig', ')', 'fig', '=', 'plt', '.', 'figure', '(', 'figsize', '=', 'figsize', ')', 'axes', '=', 'get_page_axes', '(', ')', 'ax_count', '=', '0', 'ax', '=', 'axes', '[', 'ax_count', ']', 'if', 'base_ensemble', 'is', 'None', ':', 'mx', '=', 'obs_g', '.', 'obsval', '.', 'max', '(', ')', 'mn', '=', 'obs_g', '.', 'obsval', '.', 'min', '(', ')', 'else', ':', 'mn', '=', 'base_ensemble', '[', '"r"', ']', '.', 'loc', '[', ':', ',', 'names', ']', '.', 'min', '(', ')', '.', 'min', '(', ')', 'mx', '=', 'base_ensemble', '[', '"r"', ']', '.', 'loc', '[', ':', ',', 'names', ']', '.', 'max', '(', ')', '.', 'max', '(', ')', '#if obs_g.shape[0] == 1:', 'mx', '*=', '1.1', 'mn', '*=', '0.9', "#ax.axis('square')", 'if', 'base_ensemble', 'is', 'not', 'None', ':', 'obs_gg', '=', 'obs_g', '.', 'sort_values', '(', 'by', '=', '"obsval"', ')', 'for', 'c', ',', 'en', 'in', 'base_ensemble', '.', 'items', '(', ')', ':', 'en_g', '=', 'en', '.', 'loc', '[', ':', ',', 'obs_gg', '.', 'obsnme', ']', 'ex', '=', 'en_g', '.', 'max', '(', ')', 'en', '=', 'en_g', '.', 'min', '(', ')', '#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]', 'ax', '.', 'fill_between', '(', 'obs_gg', '.', 'obsval', ',', 'en', ',', 'ex', ',', 'facecolor', '=', 'c', ',', 'alpha', '=', '0.2', ')', "#ax.scatter([obs_g.sim], [obs_g.obsval], marker='.', s=10, color='b')", 'for', 'c', ',', 'en', 'in', 'ensembles', '.', 'items', '(', ')', ':', 'en_g', '=', 'en', '.', 'loc', '[', ':', ',', 'obs_g', '.', 'obsnme', ']', 'ex', '=', 'en_g', '.', 'max', '(', ')', 'en', '=', 'en_g', '.', 'min', '(', ')', '[', 'ax', '.', 'plot', '(', '[', 'ov', ',', 'ov', ']', ',', '[', 'een', ',', 'eex', ']', ',', 'color', '=', 'c', ')', 'for', 'ov', ',', 'een', ',', 'eex', 'in', 'zip', '(', 'obs_g', '.', 'obsval', '.', 'values', ',', 'en', '.', 'values', ',', 'ex', '.', 'values', ')', ']', 'ax', '.', 'plot', '(', '[', 'mn', ',', 'mx', ']', ',', '[', 'mn', ',', 'mx', ']', ',', "'k--'", ',', 'lw', '=', '1.0', ')', 'xlim', '=', '(', 'mn', ',', 'mx', ')', 'ax', '.', 'set_xlim', '(', 'mn', ',', 'mx', ')', 'ax', '.', 'set_ylim', '(', 'mn', ',', 'mx', ')', 'if', 'mx', '>', '1.0e5', ':', 'ax', '.', 'xaxis', '.', 'set_major_formatter', '(', 'matplotlib', '.', 'ticker', '.', 'FormatStrFormatter', '(', "'%1.0e'", ')', ')', 'ax', '.', 'yaxis', '.', 'set_major_formatter', '(', 'matplotlib', '.', 'ticker', '.', 'FormatStrFormatter', '(', "'%1.0e'", ')', ')', 'ax', '.', 'grid', '(', ')', 'ax', '.', 'set_xlabel', '(', '"observed"', ',', 'labelpad', '=', '0.1', ')', 'ax', '.', 'set_ylabel', '(', '"simulated"', ',', 'labelpad', '=', '0.1', ')', 'ax', '.', 'set_title', '(', '"{0}) group:{1}, {2} observations"', '.', 'format', '(', 'abet', '[', 'ax_count', ']', ',', 'g', ',', 'obs_g', '.', 'shape', '[', '0', ']', ')', ',', 'loc', '=', '"left"', ')', 'ax_count', '+=', '1', 'ax', '=', 'axes', '[', 'ax_count', ']', "#ax.scatter(obs_g.obsval, obs_g.res, marker='.', s=10, color='b')", 'if', 'base_ensemble', 'is', 'not', 'None', ':', 'obs_gg', '=', 'obs_g', '.', 'sort_values', '(', 'by', '=', '"obsval"', ')', 'for', 'c', ',', 'en', 'in', 'base_ensemble', '.', 'items', '(', ')', ':', 'en_g', '=', 'en', '.', 'loc', '[', ':', ',', 'obs_gg', '.', 'obsnme', ']', '.', 'subtract', '(', 'obs_gg', '.', 'obsval', ')', 'ex', '=', 'en_g', '.', 'max', '(', ')', 'en', '=', 'en_g', '.', 'min', '(', ')', '#[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)]', 'ax', '.', 'fill_between', '(', 'obs_gg', '.', 'obsval', ',', 'en', ',', 'ex', ',', 'facecolor', '=', 'c', ',', 'alpha', '=', '0.2', ')', 'for', 'c', ',', 'en', 'in', 'ensembles', '.', 'items', '(', ')', ':', 'en_g', '=', 'en', '.', 'loc', '[', ':', ',', 'obs_g', '.', 'obsnme', ']', '.', 'subtract', '(', 'obs_g', '.', 'obsval', ',', 'axis', '=', '1', ')', 'ex', '=', 'en_g', '.', 'max', '(', ')', 'en', '=', 'en_g', '.', 'min', '(', ')', '[', 'ax', '.', 'plot', '(', '[', 'ov', ',', 'ov', ']', ',', '[', 'een', ',', 'eex', ']', ',', 'color', '=', 'c', ')', 'for', 'ov', ',', 'een', ',', 'eex', 'in', 'zip', '(', 'obs_g', '.', 'obsval', '.', 'values', ',', 'en', '.', 'values', ',', 'ex', '.', 'values', ')', ']', '# if base_ensemble is not None:', '# if base_ensemble is not None:', '# for c, en in base_ensemble.items():', '# en_g = en.loc[:, obs_g.obsnme].subtract(obs_g.obsval,axis=1)', '# ex = en_g.max()', '# en = en_g.min()', '# [ax.plot([ov, ov], [een, eex], color=c, alpha=0.3) for ov, een, eex in', '# zip(obs_g.obsval.values, en.values, ex.values)]', 'ylim', '=', 'ax', '.', 'get_ylim', '(', ')', 'mx', '=', 'max', '(', 'np', '.', 'abs', '(', 'ylim', '[', '0', ']', ')', ',', 'np', '.', 'abs', '(', 'ylim', '[', '1', ']', ')', ')', 'if', 'obs_g', '.', 'shape', '[', '0', ']', '==', '1', ':', 'mx', '*=', '1.1', 'ax', '.', 'set_ylim', '(', '-', 'mx', ',', 'mx', ')', '#show a zero residuals line', 'ax', '.', 'plot', '(', 'xlim', ',', '[', '0', ',', '0', ']', ',', "'k--'", ',', 'lw', '=', '1.0', ')', 'ax', '.', 'set_xlim', '(', 'xlim', ')', 'ax', '.', 'set_ylabel', '(', '"residual"', ',', 'labelpad', '=', '0.1', ')', 'ax', '.', 'set_xlabel', '(', '"observed"', ',', 'labelpad', '=', '0.1', ')', 'ax', '.', 'set_title', '(', '"{0}) group:{1}, {2} observations"', '.', 'format', '(', 'abet', '[', 'ax_count', ']', ',', 'g', ',', 'obs_g', '.', 'shape', '[', '0', ']', ')', ',', 'loc', '=', '"left"', ')', 'ax', '.', 'grid', '(', ')', 'if', 'ax', '.', 'get_xlim', '(', ')', '[', '1', ']', '>', '1.0e5', ':', 'ax', '.', 'xaxis', '.', 'set_major_formatter', '(', 'matplotlib', '.', 'ticker', '.', 'FormatStrFormatter', '(', "'%1.0e'", ')', ')', 'ax_count', '+=', '1', 'logger', '.', 'log', '(', '"plotting 1to1 for {0}"', '.', 'format', '(', 'g', ')', ')', 'for', 'a', 'in', 'range', '(', 'ax_count', ',', 'nr', '*', 'nc', ')', ':', 'axes', '[', 'a', ']', '.', 'set_axis_off', '(', ')', 'axes', '[', 'a', ']', '.', 'set_yticks', '(', '[', ']', ')', 'axes', '[', 'a', ']', '.', 'set_xticks', '(', '[', ']', ')', 'plt', '.', 'tight_layout', '(', ')', '#pdf.savefig()', '#plt.close(fig)', 'figs', '.', 'append', '(', 'fig', ')', 'if', 'filename', 'is', 'not', 'None', ':', 'plt', '.', 'tight_layout', '(', ')', 'with', 'PdfPages', '(', 'filename', ')', 'as', 'pdf', ':', 'for', 'fig', 'in', 'figs', ':', 'pdf', '.', 'savefig', '(', 'fig', ')', 'plt', '.', 'close', '(', 'fig', ')', 'logger', '.', 'log', '(', '"plot res_1to1"', ')', 'else', ':', 'logger', '.', 'log', '(', '"plot res_1to1"', ')', 'return', 'figs'] | helper function to plot ensemble 1-to-1 plots sbowing the simulated range
Parameters
----------
ensemble : varies
the ensemble argument can be a pandas.DataFrame or derived type or a str, which
is treated as a fileanme. Optionally, ensemble can be a list of these types or
a dict, in which case, the keys are treated as facecolor str (e.g., 'b', 'y', etc).
pst : pyemu.Pst
pst instance
facecolor : str
the histogram facecolor. Only applies if ensemble is a single thing
filename : str
the name of the pdf to create. If None, return figs without saving. Default is None.
base_ensemble : varies
an optional ensemble argument for the observations + noise ensemble.
This will be plotted as a transparent red bar on the 1to1 plot. | ['helper', 'function', 'to', 'plot', 'ensemble', '1', '-', 'to', '-', '1', 'plots', 'sbowing', 'the', 'simulated', 'range'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/plot/plot_utils.py#L1224-L1403 |
7,610 | saltstack/salt | salt/modules/virt.py | vm_state | def vm_state(vm_=None, **kwargs):
'''
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain>
'''
def _info(dom):
'''
Compute domain state
'''
state = ''
raw = dom.info()
state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
return state
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info | python | def vm_state(vm_=None, **kwargs):
'''
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain>
'''
def _info(dom):
'''
Compute domain state
'''
state = ''
raw = dom.info()
state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
return state
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info | ['def', 'vm_state', '(', 'vm_', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'def', '_info', '(', 'dom', ')', ':', "'''\n Compute domain state\n '''", 'state', '=', "''", 'raw', '=', 'dom', '.', 'info', '(', ')', 'state', '=', 'VIRT_STATE_NAME_MAP', '.', 'get', '(', 'raw', '[', '0', ']', ',', "'unknown'", ')', 'return', 'state', 'info', '=', '{', '}', 'conn', '=', '__get_conn', '(', '*', '*', 'kwargs', ')', 'if', 'vm_', ':', 'info', '[', 'vm_', ']', '=', '_info', '(', '_get_domain', '(', 'conn', ',', 'vm_', ')', ')', 'else', ':', 'for', 'domain', 'in', '_get_domain', '(', 'conn', ',', 'iterable', '=', 'True', ')', ':', 'info', '[', 'domain', '.', 'name', '(', ')', ']', '=', '_info', '(', 'domain', ')', 'conn', '.', 'close', '(', ')', 'return', 'info'] | Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain> | ['Return', 'list', 'of', 'all', 'the', 'vms', 'and', 'their', 'state', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L2215-L2255 |
7,611 | lra/mackup | mackup/config.py | Config._setup_parser | def _setup_parser(self, filename=None):
"""
Configure the ConfigParser instance the way we want it.
Args:
filename (str) or None
Returns:
SafeConfigParser
"""
assert isinstance(filename, str) or filename is None
# If we are not overriding the config filename
if not filename:
filename = MACKUP_CONFIG_FILE
parser = configparser.SafeConfigParser(allow_no_value=True)
parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))
return parser | python | def _setup_parser(self, filename=None):
"""
Configure the ConfigParser instance the way we want it.
Args:
filename (str) or None
Returns:
SafeConfigParser
"""
assert isinstance(filename, str) or filename is None
# If we are not overriding the config filename
if not filename:
filename = MACKUP_CONFIG_FILE
parser = configparser.SafeConfigParser(allow_no_value=True)
parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))
return parser | ['def', '_setup_parser', '(', 'self', ',', 'filename', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'filename', ',', 'str', ')', 'or', 'filename', 'is', 'None', '# If we are not overriding the config filename', 'if', 'not', 'filename', ':', 'filename', '=', 'MACKUP_CONFIG_FILE', 'parser', '=', 'configparser', '.', 'SafeConfigParser', '(', 'allow_no_value', '=', 'True', ')', 'parser', '.', 'read', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'environ', '[', "'HOME'", ']', ',', 'filename', ')', ')', ')', 'return', 'parser'] | Configure the ConfigParser instance the way we want it.
Args:
filename (str) or None
Returns:
SafeConfigParser | ['Configure', 'the', 'ConfigParser', 'instance', 'the', 'way', 'we', 'want', 'it', '.'] | train | https://github.com/lra/mackup/blob/ed0b5626b033f232868900bfd5108df448873725/mackup/config.py#L132-L151 |
7,612 | PyGithub/PyGithub | github/Requester.py | Requester.NEW_DEBUG_FRAME | def NEW_DEBUG_FRAME(self, requestHeader):
"""
Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
new_frame = [requestHeader, None, None, None]
if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered)
self._frameBuffer.append(new_frame)
else:
self._frameBuffer[0] = new_frame # pragma no cover (Should be covered)
self._frameCount = len(self._frameBuffer) - 1 | python | def NEW_DEBUG_FRAME(self, requestHeader):
"""
Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
new_frame = [requestHeader, None, None, None]
if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered)
self._frameBuffer.append(new_frame)
else:
self._frameBuffer[0] = new_frame # pragma no cover (Should be covered)
self._frameCount = len(self._frameBuffer) - 1 | ['def', 'NEW_DEBUG_FRAME', '(', 'self', ',', 'requestHeader', ')', ':', 'if', 'self', '.', 'DEBUG_FLAG', ':', '# pragma no branch (Flag always set in tests)', 'new_frame', '=', '[', 'requestHeader', ',', 'None', ',', 'None', ',', 'None', ']', 'if', 'self', '.', '_frameCount', '<', 'self', '.', 'DEBUG_FRAME_BUFFER_SIZE', '-', '1', ':', '# pragma no branch (Should be covered)', 'self', '.', '_frameBuffer', '.', 'append', '(', 'new_frame', ')', 'else', ':', 'self', '.', '_frameBuffer', '[', '0', ']', '=', 'new_frame', '# pragma no cover (Should be covered)', 'self', '.', '_frameCount', '=', 'len', '(', 'self', '.', '_frameBuffer', ')', '-', '1'] | Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None | ['Initialize', 'a', 'debug', 'frame', 'with', 'requestHeader', 'Frame', 'count', 'is', 'updated', 'and', 'will', 'be', 'attached', 'to', 'respond', 'header', 'The', 'structure', 'of', 'a', 'frame', ':', '[', 'requestHeader', 'statusCode', 'responseHeader', 'raw_data', ']', 'Some', 'of', 'them', 'may', 'be', 'None'] | train | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Requester.py#L188-L202 |
7,613 | aws/chalice | chalice/deploy/packager.py | PipRunner._execute | def _execute(self,
command, # type: str
args, # type: List[str]
env_vars=None, # type: EnvVars
shim=None # type: OptStr
):
# type: (...) -> Tuple[int, bytes, bytes]
"""Execute a pip command with the given arguments."""
main_args = [command] + args
logger.debug("calling pip %s", ' '.join(main_args))
rc, out, err = self._wrapped_pip.main(main_args, env_vars=env_vars,
shim=shim)
return rc, out, err | python | def _execute(self,
command, # type: str
args, # type: List[str]
env_vars=None, # type: EnvVars
shim=None # type: OptStr
):
# type: (...) -> Tuple[int, bytes, bytes]
"""Execute a pip command with the given arguments."""
main_args = [command] + args
logger.debug("calling pip %s", ' '.join(main_args))
rc, out, err = self._wrapped_pip.main(main_args, env_vars=env_vars,
shim=shim)
return rc, out, err | ['def', '_execute', '(', 'self', ',', 'command', ',', '# type: str', 'args', ',', '# type: List[str]', 'env_vars', '=', 'None', ',', '# type: EnvVars', 'shim', '=', 'None', '# type: OptStr', ')', ':', '# type: (...) -> Tuple[int, bytes, bytes]', 'main_args', '=', '[', 'command', ']', '+', 'args', 'logger', '.', 'debug', '(', '"calling pip %s"', ',', "' '", '.', 'join', '(', 'main_args', ')', ')', 'rc', ',', 'out', ',', 'err', '=', 'self', '.', '_wrapped_pip', '.', 'main', '(', 'main_args', ',', 'env_vars', '=', 'env_vars', ',', 'shim', '=', 'shim', ')', 'return', 'rc', ',', 'out', ',', 'err'] | Execute a pip command with the given arguments. | ['Execute', 'a', 'pip', 'command', 'with', 'the', 'given', 'arguments', '.'] | train | https://github.com/aws/chalice/blob/10d7fb52e68bd1c52aae251c97e3939fc0190412/chalice/deploy/packager.py#L696-L708 |
7,614 | bokeh/bokeh | bokeh/model.py | Model._attach_document | def _attach_document(self, doc):
''' Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly
'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
doc.theme.apply_to_model(self)
self._document = doc
self._update_event_callbacks() | python | def _attach_document(self, doc):
''' Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly
'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
doc.theme.apply_to_model(self)
self._document = doc
self._update_event_callbacks() | ['def', '_attach_document', '(', 'self', ',', 'doc', ')', ':', 'if', 'self', '.', '_document', 'is', 'not', 'None', 'and', 'self', '.', '_document', 'is', 'not', 'doc', ':', 'raise', 'RuntimeError', '(', '"Models must be owned by only a single document, %r is already in a doc"', '%', '(', 'self', ')', ')', 'doc', '.', 'theme', '.', 'apply_to_model', '(', 'self', ')', 'self', '.', '_document', '=', 'doc', 'self', '.', '_update_event_callbacks', '(', ')'] | Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly | ['Attach', 'a', 'model', 'to', 'a', 'Bokeh', '|Document|', '.'] | train | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/model.py#L704-L715 |
7,615 | PaulHancock/Aegean | AegeanTools/cluster.py | pairwise_ellpitical_binary | def pairwise_ellpitical_binary(sources, eps, far=None):
"""
Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist`
"""
if far is None:
far = max(a.a/3600 for a in sources)
l = len(sources)
distances = np.zeros((l, l), dtype=bool)
for i in range(l):
for j in range(i, l):
if i == j:
distances[i, j] = False
continue
src1 = sources[i]
src2 = sources[j]
if src2.dec - src1.dec > far:
break
if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far:
continue
distances[i, j] = norm_dist(src1, src2) > eps
distances[j, i] = distances[i, j]
return distances | python | def pairwise_ellpitical_binary(sources, eps, far=None):
"""
Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist`
"""
if far is None:
far = max(a.a/3600 for a in sources)
l = len(sources)
distances = np.zeros((l, l), dtype=bool)
for i in range(l):
for j in range(i, l):
if i == j:
distances[i, j] = False
continue
src1 = sources[i]
src2 = sources[j]
if src2.dec - src1.dec > far:
break
if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far:
continue
distances[i, j] = norm_dist(src1, src2) > eps
distances[j, i] = distances[i, j]
return distances | ['def', 'pairwise_ellpitical_binary', '(', 'sources', ',', 'eps', ',', 'far', '=', 'None', ')', ':', 'if', 'far', 'is', 'None', ':', 'far', '=', 'max', '(', 'a', '.', 'a', '/', '3600', 'for', 'a', 'in', 'sources', ')', 'l', '=', 'len', '(', 'sources', ')', 'distances', '=', 'np', '.', 'zeros', '(', '(', 'l', ',', 'l', ')', ',', 'dtype', '=', 'bool', ')', 'for', 'i', 'in', 'range', '(', 'l', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', ',', 'l', ')', ':', 'if', 'i', '==', 'j', ':', 'distances', '[', 'i', ',', 'j', ']', '=', 'False', 'continue', 'src1', '=', 'sources', '[', 'i', ']', 'src2', '=', 'sources', '[', 'j', ']', 'if', 'src2', '.', 'dec', '-', 'src1', '.', 'dec', '>', 'far', ':', 'break', 'if', 'abs', '(', 'src2', '.', 'ra', '-', 'src1', '.', 'ra', ')', '*', 'np', '.', 'cos', '(', 'np', '.', 'radians', '(', 'src1', '.', 'dec', ')', ')', '>', 'far', ':', 'continue', 'distances', '[', 'i', ',', 'j', ']', '=', 'norm_dist', '(', 'src1', ',', 'src2', ')', '>', 'eps', 'distances', '[', 'j', ',', 'i', ']', '=', 'distances', '[', 'i', ',', 'j', ']', 'return', 'distances'] | Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist` | ['Do', 'a', 'pairwise', 'comparison', 'of', 'all', 'sources', 'and', 'determine', 'if', 'they', 'have', 'a', 'normalized', 'distance', 'within', 'eps', '.'] | train | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/cluster.py#L89-L135 |
7,616 | Erotemic/utool | utool/util_tags.py | filterflags_general_tags | def filterflags_general_tags(tags_list, has_any=None, has_all=None,
has_none=None, min_num=None, max_num=None,
any_startswith=None, any_endswith=None,
in_any=None, any_match=None, none_match=None,
logic='and', ignore_case=True):
r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
Notes:
in_any should probably be ni_any
TODO: make this function more natural
CommandLine:
python -m utool.util_tags --exec-filterflags_general_tags
python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx
python -m utool.util_tags --exec-filterflags_general_tags:0
python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n
python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o
python -m utool.util_tags --exec-filterflags_general_tags:1
python -m utool.util_tags --exec-filterflags_general_tags:2
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
"""
import numpy as np
import utool as ut
def _fix_tags(tags):
if ignore_case:
return set([]) if tags is None else {six.text_type(t.lower()) for t in tags}
else:
return set([]) if tags is None else {six.text_type() for t in tags}
if logic is None:
logic = 'and'
logic_func = {
'and': np.logical_and,
'or': np.logical_or,
}[logic]
default_func = {
'and': np.ones,
'or': np.zeros,
}[logic]
tags_list_ = [_fix_tags(tags_) for tags_ in tags_list]
flags = default_func(len(tags_list_), dtype=np.bool)
if min_num is not None:
flags_ = [len(tags_) >= min_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if max_num is not None:
flags_ = [len(tags_) <= max_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_any is not None:
has_any = _fix_tags(set(ut.ensure_iterable(has_any)))
flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_none is not None:
has_none = _fix_tags(set(ut.ensure_iterable(has_none)))
flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_all is not None:
has_all = _fix_tags(set(ut.ensure_iterable(has_all)))
flags_ = [len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
def _test_item(tags_, fields, op, compare):
t_flags = [any([compare(t, f) for f in fields]) for t in tags_]
num_passed = sum(t_flags)
flag = op(num_passed, 0)
return flag
def _flag_tags(tags_list, fields, op, compare):
flags = [_test_item(tags_, fields, op, compare) for tags_ in tags_list_]
return flags
def _exec_filter(flags, tags_list, fields, op, compare):
if fields is not None:
fields = ut.ensure_iterable(fields)
if ignore_case:
fields = [f.lower() for f in fields]
flags_ = _flag_tags(tags_list, fields, op, compare)
logic_func(flags, flags_, out=flags)
return flags
flags = _exec_filter(
flags, tags_list, any_startswith,
operator.gt, six.text_type.startswith)
flags = _exec_filter(
flags, tags_list, in_any,
operator.gt, operator.contains)
flags = _exec_filter(
flags, tags_list, any_endswith,
operator.gt, six.text_type.endswith)
flags = _exec_filter(
flags, tags_list, any_match,
operator.gt, lambda t, f: re.match(f, t))
flags = _exec_filter(
flags, tags_list, none_match,
operator.eq, lambda t, f: re.match(f, t))
return flags | python | def filterflags_general_tags(tags_list, has_any=None, has_all=None,
has_none=None, min_num=None, max_num=None,
any_startswith=None, any_endswith=None,
in_any=None, any_match=None, none_match=None,
logic='and', ignore_case=True):
r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
Notes:
in_any should probably be ni_any
TODO: make this function more natural
CommandLine:
python -m utool.util_tags --exec-filterflags_general_tags
python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx
python -m utool.util_tags --exec-filterflags_general_tags:0
python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n
python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o
python -m utool.util_tags --exec-filterflags_general_tags:1
python -m utool.util_tags --exec-filterflags_general_tags:2
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
"""
import numpy as np
import utool as ut
def _fix_tags(tags):
if ignore_case:
return set([]) if tags is None else {six.text_type(t.lower()) for t in tags}
else:
return set([]) if tags is None else {six.text_type() for t in tags}
if logic is None:
logic = 'and'
logic_func = {
'and': np.logical_and,
'or': np.logical_or,
}[logic]
default_func = {
'and': np.ones,
'or': np.zeros,
}[logic]
tags_list_ = [_fix_tags(tags_) for tags_ in tags_list]
flags = default_func(len(tags_list_), dtype=np.bool)
if min_num is not None:
flags_ = [len(tags_) >= min_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if max_num is not None:
flags_ = [len(tags_) <= max_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_any is not None:
has_any = _fix_tags(set(ut.ensure_iterable(has_any)))
flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_none is not None:
has_none = _fix_tags(set(ut.ensure_iterable(has_none)))
flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_all is not None:
has_all = _fix_tags(set(ut.ensure_iterable(has_all)))
flags_ = [len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
def _test_item(tags_, fields, op, compare):
t_flags = [any([compare(t, f) for f in fields]) for t in tags_]
num_passed = sum(t_flags)
flag = op(num_passed, 0)
return flag
def _flag_tags(tags_list, fields, op, compare):
flags = [_test_item(tags_, fields, op, compare) for tags_ in tags_list_]
return flags
def _exec_filter(flags, tags_list, fields, op, compare):
if fields is not None:
fields = ut.ensure_iterable(fields)
if ignore_case:
fields = [f.lower() for f in fields]
flags_ = _flag_tags(tags_list, fields, op, compare)
logic_func(flags, flags_, out=flags)
return flags
flags = _exec_filter(
flags, tags_list, any_startswith,
operator.gt, six.text_type.startswith)
flags = _exec_filter(
flags, tags_list, in_any,
operator.gt, operator.contains)
flags = _exec_filter(
flags, tags_list, any_endswith,
operator.gt, six.text_type.endswith)
flags = _exec_filter(
flags, tags_list, any_match,
operator.gt, lambda t, f: re.match(f, t))
flags = _exec_filter(
flags, tags_list, none_match,
operator.eq, lambda t, f: re.match(f, t))
return flags | ['def', 'filterflags_general_tags', '(', 'tags_list', ',', 'has_any', '=', 'None', ',', 'has_all', '=', 'None', ',', 'has_none', '=', 'None', ',', 'min_num', '=', 'None', ',', 'max_num', '=', 'None', ',', 'any_startswith', '=', 'None', ',', 'any_endswith', '=', 'None', ',', 'in_any', '=', 'None', ',', 'any_match', '=', 'None', ',', 'none_match', '=', 'None', ',', 'logic', '=', "'and'", ',', 'ignore_case', '=', 'True', ')', ':', 'import', 'numpy', 'as', 'np', 'import', 'utool', 'as', 'ut', 'def', '_fix_tags', '(', 'tags', ')', ':', 'if', 'ignore_case', ':', 'return', 'set', '(', '[', ']', ')', 'if', 'tags', 'is', 'None', 'else', '{', 'six', '.', 'text_type', '(', 't', '.', 'lower', '(', ')', ')', 'for', 't', 'in', 'tags', '}', 'else', ':', 'return', 'set', '(', '[', ']', ')', 'if', 'tags', 'is', 'None', 'else', '{', 'six', '.', 'text_type', '(', ')', 'for', 't', 'in', 'tags', '}', 'if', 'logic', 'is', 'None', ':', 'logic', '=', "'and'", 'logic_func', '=', '{', "'and'", ':', 'np', '.', 'logical_and', ',', "'or'", ':', 'np', '.', 'logical_or', ',', '}', '[', 'logic', ']', 'default_func', '=', '{', "'and'", ':', 'np', '.', 'ones', ',', "'or'", ':', 'np', '.', 'zeros', ',', '}', '[', 'logic', ']', 'tags_list_', '=', '[', '_fix_tags', '(', 'tags_', ')', 'for', 'tags_', 'in', 'tags_list', ']', 'flags', '=', 'default_func', '(', 'len', '(', 'tags_list_', ')', ',', 'dtype', '=', 'np', '.', 'bool', ')', 'if', 'min_num', 'is', 'not', 'None', ':', 'flags_', '=', '[', 'len', '(', 'tags_', ')', '>=', 'min_num', 'for', 'tags_', 'in', 'tags_list_', ']', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'if', 'max_num', 'is', 'not', 'None', ':', 'flags_', '=', '[', 'len', '(', 'tags_', ')', '<=', 'max_num', 'for', 'tags_', 'in', 'tags_list_', ']', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'if', 'has_any', 'is', 'not', 'None', ':', 'has_any', '=', '_fix_tags', '(', 'set', '(', 'ut', '.', 'ensure_iterable', '(', 'has_any', ')', ')', ')', 'flags_', '=', '[', 'len', '(', 'has_any', '.', 'intersection', '(', 'tags_', ')', ')', '>', '0', 'for', 'tags_', 'in', 'tags_list_', ']', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'if', 'has_none', 'is', 'not', 'None', ':', 'has_none', '=', '_fix_tags', '(', 'set', '(', 'ut', '.', 'ensure_iterable', '(', 'has_none', ')', ')', ')', 'flags_', '=', '[', 'len', '(', 'has_none', '.', 'intersection', '(', 'tags_', ')', ')', '==', '0', 'for', 'tags_', 'in', 'tags_list_', ']', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'if', 'has_all', 'is', 'not', 'None', ':', 'has_all', '=', '_fix_tags', '(', 'set', '(', 'ut', '.', 'ensure_iterable', '(', 'has_all', ')', ')', ')', 'flags_', '=', '[', 'len', '(', 'has_all', '.', 'intersection', '(', 'tags_', ')', ')', '==', 'len', '(', 'has_all', ')', 'for', 'tags_', 'in', 'tags_list_', ']', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'def', '_test_item', '(', 'tags_', ',', 'fields', ',', 'op', ',', 'compare', ')', ':', 't_flags', '=', '[', 'any', '(', '[', 'compare', '(', 't', ',', 'f', ')', 'for', 'f', 'in', 'fields', ']', ')', 'for', 't', 'in', 'tags_', ']', 'num_passed', '=', 'sum', '(', 't_flags', ')', 'flag', '=', 'op', '(', 'num_passed', ',', '0', ')', 'return', 'flag', 'def', '_flag_tags', '(', 'tags_list', ',', 'fields', ',', 'op', ',', 'compare', ')', ':', 'flags', '=', '[', '_test_item', '(', 'tags_', ',', 'fields', ',', 'op', ',', 'compare', ')', 'for', 'tags_', 'in', 'tags_list_', ']', 'return', 'flags', 'def', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'fields', ',', 'op', ',', 'compare', ')', ':', 'if', 'fields', 'is', 'not', 'None', ':', 'fields', '=', 'ut', '.', 'ensure_iterable', '(', 'fields', ')', 'if', 'ignore_case', ':', 'fields', '=', '[', 'f', '.', 'lower', '(', ')', 'for', 'f', 'in', 'fields', ']', 'flags_', '=', '_flag_tags', '(', 'tags_list', ',', 'fields', ',', 'op', ',', 'compare', ')', 'logic_func', '(', 'flags', ',', 'flags_', ',', 'out', '=', 'flags', ')', 'return', 'flags', 'flags', '=', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'any_startswith', ',', 'operator', '.', 'gt', ',', 'six', '.', 'text_type', '.', 'startswith', ')', 'flags', '=', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'in_any', ',', 'operator', '.', 'gt', ',', 'operator', '.', 'contains', ')', 'flags', '=', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'any_endswith', ',', 'operator', '.', 'gt', ',', 'six', '.', 'text_type', '.', 'endswith', ')', 'flags', '=', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'any_match', ',', 'operator', '.', 'gt', ',', 'lambda', 't', ',', 'f', ':', 're', '.', 'match', '(', 'f', ',', 't', ')', ')', 'flags', '=', '_exec_filter', '(', 'flags', ',', 'tags_list', ',', 'none_match', ',', 'operator', '.', 'eq', ',', 'lambda', 't', ',', 'f', ':', 're', '.', 'match', '(', 'f', ',', 't', ')', ')', 'return', 'flags'] | r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
Notes:
in_any should probably be ni_any
TODO: make this function more natural
CommandLine:
python -m utool.util_tags --exec-filterflags_general_tags
python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx
python -m utool.util_tags --exec-filterflags_general_tags:0
python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n
python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o
python -m utool.util_tags --exec-filterflags_general_tags:1
python -m utool.util_tags --exec-filterflags_general_tags:2
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']] | ['r', 'maybe', 'integrate', 'into', 'utool?', 'Seems', 'pretty', 'general'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_tags.py#L141-L300 |
7,617 | bwohlberg/sporco | sporco/dictlrn/onlinecdl.py | OnlineConvBPDNDictLearn.solve | def solve(self, S, dimK=None):
"""Compute sparse coding and dictionary update for training
data `S`."""
# Use dimK specified in __init__ as default
if dimK is None and self.dimK is not None:
dimK = self.dimK
# Start solve timer
self.timer.start(['solve', 'solve_wo_eval'])
# Solve CSC problem on S and do dictionary step
self.init_vars(S, dimK)
self.xstep(S, self.lmbda, dimK)
self.dstep()
# Stop solve timer
self.timer.stop('solve_wo_eval')
# Extract and record iteration stats
self.manage_itstat()
# Increment iteration count
self.j += 1
# Stop solve timer
self.timer.stop('solve')
# Return current dictionary
return self.getdict() | python | def solve(self, S, dimK=None):
"""Compute sparse coding and dictionary update for training
data `S`."""
# Use dimK specified in __init__ as default
if dimK is None and self.dimK is not None:
dimK = self.dimK
# Start solve timer
self.timer.start(['solve', 'solve_wo_eval'])
# Solve CSC problem on S and do dictionary step
self.init_vars(S, dimK)
self.xstep(S, self.lmbda, dimK)
self.dstep()
# Stop solve timer
self.timer.stop('solve_wo_eval')
# Extract and record iteration stats
self.manage_itstat()
# Increment iteration count
self.j += 1
# Stop solve timer
self.timer.stop('solve')
# Return current dictionary
return self.getdict() | ['def', 'solve', '(', 'self', ',', 'S', ',', 'dimK', '=', 'None', ')', ':', '# Use dimK specified in __init__ as default', 'if', 'dimK', 'is', 'None', 'and', 'self', '.', 'dimK', 'is', 'not', 'None', ':', 'dimK', '=', 'self', '.', 'dimK', '# Start solve timer', 'self', '.', 'timer', '.', 'start', '(', '[', "'solve'", ',', "'solve_wo_eval'", ']', ')', '# Solve CSC problem on S and do dictionary step', 'self', '.', 'init_vars', '(', 'S', ',', 'dimK', ')', 'self', '.', 'xstep', '(', 'S', ',', 'self', '.', 'lmbda', ',', 'dimK', ')', 'self', '.', 'dstep', '(', ')', '# Stop solve timer', 'self', '.', 'timer', '.', 'stop', '(', "'solve_wo_eval'", ')', '# Extract and record iteration stats', 'self', '.', 'manage_itstat', '(', ')', '# Increment iteration count', 'self', '.', 'j', '+=', '1', '# Stop solve timer', 'self', '.', 'timer', '.', 'stop', '(', "'solve'", ')', '# Return current dictionary', 'return', 'self', '.', 'getdict', '(', ')'] | Compute sparse coding and dictionary update for training
data `S`. | ['Compute', 'sparse', 'coding', 'and', 'dictionary', 'update', 'for', 'training', 'data', 'S', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/onlinecdl.py#L209-L238 |
7,618 | Arello-Mobile/swagger2rst | swg2rst/swagger/abstract_type_object.py | AbstractTypeObject.set_type_by_schema | def set_type_by_schema(self, schema_obj, schema_type):
"""
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
"""
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id | python | def set_type_by_schema(self, schema_obj, schema_type):
"""
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
"""
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id | ['def', 'set_type_by_schema', '(', 'self', ',', 'schema_obj', ',', 'schema_type', ')', ':', 'schema_id', '=', 'self', '.', '_get_object_schema_id', '(', 'schema_obj', ',', 'schema_type', ')', 'if', 'not', 'self', '.', 'storage', '.', 'contains', '(', 'schema_id', ')', ':', 'schema', '=', 'self', '.', 'storage', '.', 'create_schema', '(', 'schema_obj', ',', 'self', '.', 'name', ',', 'schema_type', ',', 'root', '=', 'self', '.', 'root', ')', 'assert', 'schema', '.', 'schema_id', '==', 'schema_id', 'self', '.', '_type', '=', 'schema_id'] | Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type: | ['Set', 'property', 'type', 'by', 'schema', 'object', 'Schema', 'will', 'create', 'if', 'it', 'doesn', 't', 'exists', 'in', 'collection'] | train | https://github.com/Arello-Mobile/swagger2rst/blob/e519f70701477dcc9f0bb237ee5b8e08e848701b/swg2rst/swagger/abstract_type_object.py#L75-L89 |
7,619 | mikedh/trimesh | trimesh/ray/ray_triangle.py | ray_bounds | def ray_bounds(ray_origins,
ray_directions,
bounds,
buffer_dist=1e-5):
"""
Given a set of rays and a bounding box for the volume of interest
where the rays will be passing through, find the bounding boxes
of the rays as they pass through the volume.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
bounds: (2,3) bounding box (min, max)
buffer_dist: float, distance to pad zero width bounding boxes
Returns
---------
ray_bounding: (n) set of AABB of rays passing through volume
"""
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
# bounding box we are testing against
bounds = np.asanyarray(bounds)
# find the primary axis of the vector
axis = np.abs(ray_directions).argmax(axis=1)
axis_bound = bounds.reshape((2, -1)).T[axis]
axis_ori = np.array([ray_origins[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
axis_dir = np.array([ray_directions[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
# parametric equation of a line
# point = direction*t + origin
# p = dt + o
# t = (p-o)/d
t = (axis_bound - axis_ori) / axis_dir
# prevent the bounding box from including triangles
# behind the ray origin
t[t < buffer_dist] = buffer_dist
# the value of t for both the upper and lower bounds
t_a = t[:, 0].reshape((-1, 1))
t_b = t[:, 1].reshape((-1, 1))
# the cartesion point for where the line hits the plane defined by
# axis
on_a = (ray_directions * t_a) + ray_origins
on_b = (ray_directions * t_b) + ray_origins
on_plane = np.column_stack(
(on_a, on_b)).reshape(
(-1, 2, ray_directions.shape[1]))
ray_bounding = np.hstack((on_plane.min(axis=1),
on_plane.max(axis=1)))
# pad the bounding box by TOL_BUFFER
# not sure if this is necessary, but if the ray is axis aligned
# this function will otherwise return zero volume bounding boxes
# which may or may not screw up the r-tree intersection queries
ray_bounding += np.array([-1, -1, -1, 1, 1, 1]) * buffer_dist
return ray_bounding | python | def ray_bounds(ray_origins,
ray_directions,
bounds,
buffer_dist=1e-5):
"""
Given a set of rays and a bounding box for the volume of interest
where the rays will be passing through, find the bounding boxes
of the rays as they pass through the volume.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
bounds: (2,3) bounding box (min, max)
buffer_dist: float, distance to pad zero width bounding boxes
Returns
---------
ray_bounding: (n) set of AABB of rays passing through volume
"""
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
# bounding box we are testing against
bounds = np.asanyarray(bounds)
# find the primary axis of the vector
axis = np.abs(ray_directions).argmax(axis=1)
axis_bound = bounds.reshape((2, -1)).T[axis]
axis_ori = np.array([ray_origins[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
axis_dir = np.array([ray_directions[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
# parametric equation of a line
# point = direction*t + origin
# p = dt + o
# t = (p-o)/d
t = (axis_bound - axis_ori) / axis_dir
# prevent the bounding box from including triangles
# behind the ray origin
t[t < buffer_dist] = buffer_dist
# the value of t for both the upper and lower bounds
t_a = t[:, 0].reshape((-1, 1))
t_b = t[:, 1].reshape((-1, 1))
# the cartesion point for where the line hits the plane defined by
# axis
on_a = (ray_directions * t_a) + ray_origins
on_b = (ray_directions * t_b) + ray_origins
on_plane = np.column_stack(
(on_a, on_b)).reshape(
(-1, 2, ray_directions.shape[1]))
ray_bounding = np.hstack((on_plane.min(axis=1),
on_plane.max(axis=1)))
# pad the bounding box by TOL_BUFFER
# not sure if this is necessary, but if the ray is axis aligned
# this function will otherwise return zero volume bounding boxes
# which may or may not screw up the r-tree intersection queries
ray_bounding += np.array([-1, -1, -1, 1, 1, 1]) * buffer_dist
return ray_bounding | ['def', 'ray_bounds', '(', 'ray_origins', ',', 'ray_directions', ',', 'bounds', ',', 'buffer_dist', '=', '1e-5', ')', ':', 'ray_origins', '=', 'np', '.', 'asanyarray', '(', 'ray_origins', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'ray_directions', '=', 'np', '.', 'asanyarray', '(', 'ray_directions', ',', 'dtype', '=', 'np', '.', 'float64', ')', '# bounding box we are testing against', 'bounds', '=', 'np', '.', 'asanyarray', '(', 'bounds', ')', '# find the primary axis of the vector', 'axis', '=', 'np', '.', 'abs', '(', 'ray_directions', ')', '.', 'argmax', '(', 'axis', '=', '1', ')', 'axis_bound', '=', 'bounds', '.', 'reshape', '(', '(', '2', ',', '-', '1', ')', ')', '.', 'T', '[', 'axis', ']', 'axis_ori', '=', 'np', '.', 'array', '(', '[', 'ray_origins', '[', 'i', ']', '[', 'a', ']', 'for', 'i', ',', 'a', 'in', 'enumerate', '(', 'axis', ')', ']', ')', '.', 'reshape', '(', '(', '-', '1', ',', '1', ')', ')', 'axis_dir', '=', 'np', '.', 'array', '(', '[', 'ray_directions', '[', 'i', ']', '[', 'a', ']', 'for', 'i', ',', 'a', 'in', 'enumerate', '(', 'axis', ')', ']', ')', '.', 'reshape', '(', '(', '-', '1', ',', '1', ')', ')', '# parametric equation of a line', '# point = direction*t + origin', '# p = dt + o', '# t = (p-o)/d', 't', '=', '(', 'axis_bound', '-', 'axis_ori', ')', '/', 'axis_dir', '# prevent the bounding box from including triangles', '# behind the ray origin', 't', '[', 't', '<', 'buffer_dist', ']', '=', 'buffer_dist', '# the value of t for both the upper and lower bounds', 't_a', '=', 't', '[', ':', ',', '0', ']', '.', 'reshape', '(', '(', '-', '1', ',', '1', ')', ')', 't_b', '=', 't', '[', ':', ',', '1', ']', '.', 'reshape', '(', '(', '-', '1', ',', '1', ')', ')', '# the cartesion point for where the line hits the plane defined by', '# axis', 'on_a', '=', '(', 'ray_directions', '*', 't_a', ')', '+', 'ray_origins', 'on_b', '=', '(', 'ray_directions', '*', 't_b', ')', '+', 'ray_origins', 'on_plane', '=', 'np', '.', 'column_stack', '(', '(', 'on_a', ',', 'on_b', ')', ')', '.', 'reshape', '(', '(', '-', '1', ',', '2', ',', 'ray_directions', '.', 'shape', '[', '1', ']', ')', ')', 'ray_bounding', '=', 'np', '.', 'hstack', '(', '(', 'on_plane', '.', 'min', '(', 'axis', '=', '1', ')', ',', 'on_plane', '.', 'max', '(', 'axis', '=', '1', ')', ')', ')', '# pad the bounding box by TOL_BUFFER', '# not sure if this is necessary, but if the ray is axis aligned', '# this function will otherwise return zero volume bounding boxes', '# which may or may not screw up the r-tree intersection queries', 'ray_bounding', '+=', 'np', '.', 'array', '(', '[', '-', '1', ',', '-', '1', ',', '-', '1', ',', '1', ',', '1', ',', '1', ']', ')', '*', 'buffer_dist', 'return', 'ray_bounding'] | Given a set of rays and a bounding box for the volume of interest
where the rays will be passing through, find the bounding boxes
of the rays as they pass through the volume.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
bounds: (2,3) bounding box (min, max)
buffer_dist: float, distance to pad zero width bounding boxes
Returns
---------
ray_bounding: (n) set of AABB of rays passing through volume | ['Given', 'a', 'set', 'of', 'rays', 'and', 'a', 'bounding', 'box', 'for', 'the', 'volume', 'of', 'interest', 'where', 'the', 'rays', 'will', 'be', 'passing', 'through', 'find', 'the', 'bounding', 'boxes', 'of', 'the', 'rays', 'as', 'they', 'pass', 'through', 'the', 'volume', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/ray/ray_triangle.py#L302-L368 |
7,620 | dbtsai/python-mimeparse | mimeparse.py | parse_media_range | def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
:rtype: (str,str,dict)
"""
(type, subtype, params) = parse_mime_type(range)
params.setdefault('q', params.pop('Q', None)) # q is case insensitive
try:
if not params['q'] or not 0 <= float(params['q']) <= 1:
params['q'] = '1'
except ValueError: # from float()
params['q'] = '1'
return (type, subtype, params) | python | def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
:rtype: (str,str,dict)
"""
(type, subtype, params) = parse_mime_type(range)
params.setdefault('q', params.pop('Q', None)) # q is case insensitive
try:
if not params['q'] or not 0 <= float(params['q']) <= 1:
params['q'] = '1'
except ValueError: # from float()
params['q'] = '1'
return (type, subtype, params) | ['def', 'parse_media_range', '(', 'range', ')', ':', '(', 'type', ',', 'subtype', ',', 'params', ')', '=', 'parse_mime_type', '(', 'range', ')', 'params', '.', 'setdefault', '(', "'q'", ',', 'params', '.', 'pop', '(', "'Q'", ',', 'None', ')', ')', '# q is case insensitive', 'try', ':', 'if', 'not', 'params', '[', "'q'", ']', 'or', 'not', '0', '<=', 'float', '(', 'params', '[', "'q'", ']', ')', '<=', '1', ':', 'params', '[', "'q'", ']', '=', "'1'", 'except', 'ValueError', ':', '# from float()', 'params', '[', "'q'", ']', '=', "'1'", 'return', '(', 'type', ',', 'subtype', ',', 'params', ')'] | Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
:rtype: (str,str,dict) | ['Parse', 'a', 'media', '-', 'range', 'into', 'its', 'component', 'parts', '.'] | train | https://github.com/dbtsai/python-mimeparse/blob/cf605c0994149b1a1936b3a8a597203fe3fbb62e/mimeparse.py#L42-L66 |
7,621 | pystorm/pystorm | pystorm/bolt.py | TicklessBatchingBolt._batch_entry | def _batch_entry(self):
"""Entry point for the batcher thread."""
try:
while True:
self._batch_entry_run()
except:
self.exc_info = sys.exc_info()
os.kill(self.pid, signal.SIGUSR1) | python | def _batch_entry(self):
"""Entry point for the batcher thread."""
try:
while True:
self._batch_entry_run()
except:
self.exc_info = sys.exc_info()
os.kill(self.pid, signal.SIGUSR1) | ['def', '_batch_entry', '(', 'self', ')', ':', 'try', ':', 'while', 'True', ':', 'self', '.', '_batch_entry_run', '(', ')', 'except', ':', 'self', '.', 'exc_info', '=', 'sys', '.', 'exc_info', '(', ')', 'os', '.', 'kill', '(', 'self', '.', 'pid', ',', 'signal', '.', 'SIGUSR1', ')'] | Entry point for the batcher thread. | ['Entry', 'point', 'for', 'the', 'batcher', 'thread', '.'] | train | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L505-L512 |
7,622 | SeattleTestbed/seash | pyreadline/modes/basemode.py | BaseMode.forward_word_extend_selection | def forward_word_extend_selection(self, e): #
u"""Move forward to the end of the next word. Words are composed of
letters and digits."""
self.l_buffer.forward_word_extend_selection(self.argument_reset)
self.finalize() | python | def forward_word_extend_selection(self, e): #
u"""Move forward to the end of the next word. Words are composed of
letters and digits."""
self.l_buffer.forward_word_extend_selection(self.argument_reset)
self.finalize() | ['def', 'forward_word_extend_selection', '(', 'self', ',', 'e', ')', ':', '# \r', 'self', '.', 'l_buffer', '.', 'forward_word_extend_selection', '(', 'self', '.', 'argument_reset', ')', 'self', '.', 'finalize', '(', ')'] | u"""Move forward to the end of the next word. Words are composed of
letters and digits. | ['u', 'Move', 'forward', 'to', 'the', 'end', 'of', 'the', 'next', 'word', '.', 'Words', 'are', 'composed', 'of', 'letters', 'and', 'digits', '.'] | train | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/modes/basemode.py#L378-L382 |
7,623 | docker/docker-py | docker/api/client.py | APIClient._stream_raw_result | def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
for out in response.iter_content(chunk_size, decode):
yield out | python | def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
for out in response.iter_content(chunk_size, decode):
yield out | ['def', '_stream_raw_result', '(', 'self', ',', 'response', ',', 'chunk_size', '=', '1', ',', 'decode', '=', 'True', ')', ':', 'self', '.', '_raise_for_status', '(', 'response', ')', 'for', 'out', 'in', 'response', '.', 'iter_content', '(', 'chunk_size', ',', 'decode', ')', ':', 'yield', 'out'] | Stream result for TTY-enabled container and raw binary data | ['Stream', 'result', 'for', 'TTY', '-', 'enabled', 'container', 'and', 'raw', 'binary', 'data'] | train | https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/client.py#L393-L397 |
7,624 | google/grr | grr/server/grr_response_server/databases/mem_client_reports.py | InMemoryDBClientReportsMixin.ReadAllClientGraphSeries | def ReadAllClientGraphSeries(
self,
client_label,
report_type,
time_range = None,
):
"""See db.Database."""
series_with_timestamps = {}
for series_key, series in iteritems(self.client_graph_series):
series_label, series_type, timestamp = series_key
if series_label == client_label and series_type == report_type:
if time_range is not None and not time_range.Includes(timestamp):
continue
series_with_timestamps[timestamp.Copy()] = series.Copy()
return series_with_timestamps | python | def ReadAllClientGraphSeries(
self,
client_label,
report_type,
time_range = None,
):
"""See db.Database."""
series_with_timestamps = {}
for series_key, series in iteritems(self.client_graph_series):
series_label, series_type, timestamp = series_key
if series_label == client_label and series_type == report_type:
if time_range is not None and not time_range.Includes(timestamp):
continue
series_with_timestamps[timestamp.Copy()] = series.Copy()
return series_with_timestamps | ['def', 'ReadAllClientGraphSeries', '(', 'self', ',', 'client_label', ',', 'report_type', ',', 'time_range', '=', 'None', ',', ')', ':', 'series_with_timestamps', '=', '{', '}', 'for', 'series_key', ',', 'series', 'in', 'iteritems', '(', 'self', '.', 'client_graph_series', ')', ':', 'series_label', ',', 'series_type', ',', 'timestamp', '=', 'series_key', 'if', 'series_label', '==', 'client_label', 'and', 'series_type', '==', 'report_type', ':', 'if', 'time_range', 'is', 'not', 'None', 'and', 'not', 'time_range', '.', 'Includes', '(', 'timestamp', ')', ':', 'continue', 'series_with_timestamps', '[', 'timestamp', '.', 'Copy', '(', ')', ']', '=', 'series', '.', 'Copy', '(', ')', 'return', 'series_with_timestamps'] | See db.Database. | ['See', 'db', '.', 'Database', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_client_reports.py#L40-L54 |
7,625 | smarie/python-valid8 | valid8/entry_points.py | Validator.is_valid | def is_valid(self,
value # type: Any
):
# type: (...) -> bool
"""
Validates the provided value and returns a boolean indicating success or failure. Any Exception happening in
the validation process will be silently caught.
:param value: the value to validate
:return: a boolean flag indicating success or failure
"""
# noinspection PyBroadException
try:
# perform validation
res = self.main_function(value)
# return a boolean indicating if success or failure
return result_is_success(res)
except Exception:
# caught exception means failure > return False
return False | python | def is_valid(self,
value # type: Any
):
# type: (...) -> bool
"""
Validates the provided value and returns a boolean indicating success or failure. Any Exception happening in
the validation process will be silently caught.
:param value: the value to validate
:return: a boolean flag indicating success or failure
"""
# noinspection PyBroadException
try:
# perform validation
res = self.main_function(value)
# return a boolean indicating if success or failure
return result_is_success(res)
except Exception:
# caught exception means failure > return False
return False | ['def', 'is_valid', '(', 'self', ',', 'value', '# type: Any', ')', ':', '# type: (...) -> bool', '# noinspection PyBroadException', 'try', ':', '# perform validation', 'res', '=', 'self', '.', 'main_function', '(', 'value', ')', '# return a boolean indicating if success or failure', 'return', 'result_is_success', '(', 'res', ')', 'except', 'Exception', ':', '# caught exception means failure > return False', 'return', 'False'] | Validates the provided value and returns a boolean indicating success or failure. Any Exception happening in
the validation process will be silently caught.
:param value: the value to validate
:return: a boolean flag indicating success or failure | ['Validates', 'the', 'provided', 'value', 'and', 'returns', 'a', 'boolean', 'indicating', 'success', 'or', 'failure', '.', 'Any', 'Exception', 'happening', 'in', 'the', 'validation', 'process', 'will', 'be', 'silently', 'caught', '.'] | train | https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points.py#L631-L652 |
7,626 | edibledinos/pwnypack | pwnypack/flow.py | SocketChannel.read | def read(self, n):
"""
Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed.
"""
d = b''
while n:
try:
block = self._socket.recv(n)
except socket.error:
block = None
if not block:
raise EOFError('Socket closed')
d += block
n -= len(block)
return d | python | def read(self, n):
"""
Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed.
"""
d = b''
while n:
try:
block = self._socket.recv(n)
except socket.error:
block = None
if not block:
raise EOFError('Socket closed')
d += block
n -= len(block)
return d | ['def', 'read', '(', 'self', ',', 'n', ')', ':', 'd', '=', "b''", 'while', 'n', ':', 'try', ':', 'block', '=', 'self', '.', '_socket', '.', 'recv', '(', 'n', ')', 'except', 'socket', '.', 'error', ':', 'block', '=', 'None', 'if', 'not', 'block', ':', 'raise', 'EOFError', '(', "'Socket closed'", ')', 'd', '+=', 'block', 'n', '-=', 'len', '(', 'block', ')', 'return', 'd'] | Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed. | ['Receive', '*', 'n', '*', 'bytes', 'from', 'the', 'socket', '.'] | train | https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/flow.py#L165-L189 |
7,627 | jwodder/doapi | doapi/domain.py | Domain.fetch_all_records | def fetch_all_records(self):
r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return map(self._record, api.paginate(self.record_url, 'domain_records')) | python | def fetch_all_records(self):
r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return map(self._record, api.paginate(self.record_url, 'domain_records')) | ['def', 'fetch_all_records', '(', 'self', ')', ':', 'api', '=', 'self', '.', 'doapi_manager', 'return', 'map', '(', 'self', '.', '_record', ',', 'api', '.', 'paginate', '(', 'self', '.', 'record_url', ',', "'domain_records'", ')', ')'] | r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error | ['r', 'Returns', 'a', 'generator', 'that', 'yields', 'all', 'of', 'the', 'DNS', 'records', 'for', 'the', 'domain'] | train | https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/domain.py#L92-L100 |
7,628 | Damgaard/PyImgur | pyimgur/__init__.py | User.get_settings | def get_settings(self):
"""
Returns current settings.
Only accessible if authenticated as the user.
"""
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
return self._imgur._send_request(url) | python | def get_settings(self):
"""
Returns current settings.
Only accessible if authenticated as the user.
"""
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
return self._imgur._send_request(url) | ['def', 'get_settings', '(', 'self', ')', ':', 'url', '=', 'self', '.', '_imgur', '.', '_base_url', '+', '"/3/account/{0}/settings"', '.', 'format', '(', 'self', '.', 'name', ')', 'return', 'self', '.', '_imgur', '.', '_send_request', '(', 'url', ')'] | Returns current settings.
Only accessible if authenticated as the user. | ['Returns', 'current', 'settings', '.'] | train | https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L1386-L1393 |
7,629 | zhebrak/raftos | raftos/server.py | register | async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
host, port = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
host, port = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) | python | async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
host, port = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
host, port = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) | ['async', 'def', 'register', '(', '*', 'address_list', ',', 'cluster', '=', 'None', ',', 'loop', '=', 'None', ')', ':', 'loop', '=', 'loop', 'or', 'asyncio', '.', 'get_event_loop', '(', ')', 'for', 'address', 'in', 'address_list', ':', 'host', ',', 'port', '=', 'address', '.', 'rsplit', '(', "':'", ',', '1', ')', 'node', '=', 'Node', '(', 'address', '=', '(', 'host', ',', 'int', '(', 'port', ')', ')', ',', 'loop', '=', 'loop', ')', 'await', 'node', '.', 'start', '(', ')', 'for', 'address', 'in', 'cluster', ':', 'host', ',', 'port', '=', 'address', '.', 'rsplit', '(', "':'", ',', '1', ')', 'port', '=', 'int', '(', 'port', ')', 'if', '(', 'host', ',', 'port', ')', '!=', '(', 'node', '.', 'host', ',', 'node', '.', 'port', ')', ':', 'node', '.', 'update_cluster', '(', '(', 'host', ',', 'port', ')', ')'] | Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...] | ['Start', 'Raft', 'node', '(', 'server', ')', 'Args', ':', 'address_list', '—', '127', '.', '0', '.', '0', '.', '1', ':', '8000', '[', '127', '.', '0', '.', '0', '.', '1', ':', '8001', '...', ']', 'cluster', '—', '[', '127', '.', '0', '.', '0', '.', '1', ':', '8001', '127', '.', '0', '.', '0', '.', '1', ':', '8002', '...', ']'] | train | https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/server.py#L8-L26 |
7,630 | diffeo/rejester | rejester/workers.py | MultiWorker._get_and_start_work | def _get_and_start_work(self):
"return (async_result, work_unit) or (None, None)"
worker_id = nice_identifier()
work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb())
if work_unit is None:
return None, None
async_result = self.pool.apply_async(
run_worker,
(HeadlessWorker, self.task_master.registry.config,
worker_id,
work_unit.work_spec_name,
work_unit.key),
callback=self._finish_callback)
return async_result, work_unit | python | def _get_and_start_work(self):
"return (async_result, work_unit) or (None, None)"
worker_id = nice_identifier()
work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb())
if work_unit is None:
return None, None
async_result = self.pool.apply_async(
run_worker,
(HeadlessWorker, self.task_master.registry.config,
worker_id,
work_unit.work_spec_name,
work_unit.key),
callback=self._finish_callback)
return async_result, work_unit | ['def', '_get_and_start_work', '(', 'self', ')', ':', 'worker_id', '=', 'nice_identifier', '(', ')', 'work_unit', '=', 'self', '.', 'task_master', '.', 'get_work', '(', 'worker_id', ',', 'available_gb', '=', 'self', '.', 'available_gb', '(', ')', ')', 'if', 'work_unit', 'is', 'None', ':', 'return', 'None', ',', 'None', 'async_result', '=', 'self', '.', 'pool', '.', 'apply_async', '(', 'run_worker', ',', '(', 'HeadlessWorker', ',', 'self', '.', 'task_master', '.', 'registry', '.', 'config', ',', 'worker_id', ',', 'work_unit', '.', 'work_spec_name', ',', 'work_unit', '.', 'key', ')', ',', 'callback', '=', 'self', '.', '_finish_callback', ')', 'return', 'async_result', ',', 'work_unit'] | return (async_result, work_unit) or (None, None) | ['return', '(', 'async_result', 'work_unit', ')', 'or', '(', 'None', 'None', ')'] | train | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/workers.py#L258-L271 |
7,631 | pandas-dev/pandas | pandas/io/formats/style.py | Styler.applymap | def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self | python | def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self | ['def', 'applymap', '(', 'self', ',', 'func', ',', 'subset', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', '_todo', '.', 'append', '(', '(', 'lambda', 'instance', ':', 'getattr', '(', 'instance', ',', "'_applymap'", ')', ',', '(', 'func', ',', 'subset', ')', ',', 'kwargs', ')', ')', 'return', 'self'] | Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where | ['Apply', 'a', 'function', 'elementwise', 'updating', 'the', 'HTML', 'representation', 'with', 'the', 'result', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L625-L650 |
7,632 | orb-framework/orb | orb/core/column_types/reference.py | ReferenceColumn.valueFromString | def valueFromString(self, value, context=None):
"""
Re-implements the orb.Column.valueFromString method to
lookup a reference object based on the given value.
:param value: <str>
:param context: <orb.Context> || None
:return: <orb.Model> || None
"""
model = self.referenceModel()
return model(value, context=context) | python | def valueFromString(self, value, context=None):
"""
Re-implements the orb.Column.valueFromString method to
lookup a reference object based on the given value.
:param value: <str>
:param context: <orb.Context> || None
:return: <orb.Model> || None
"""
model = self.referenceModel()
return model(value, context=context) | ['def', 'valueFromString', '(', 'self', ',', 'value', ',', 'context', '=', 'None', ')', ':', 'model', '=', 'self', '.', 'referenceModel', '(', ')', 'return', 'model', '(', 'value', ',', 'context', '=', 'context', ')'] | Re-implements the orb.Column.valueFromString method to
lookup a reference object based on the given value.
:param value: <str>
:param context: <orb.Context> || None
:return: <orb.Model> || None | ['Re', '-', 'implements', 'the', 'orb', '.', 'Column', '.', 'valueFromString', 'method', 'to', 'lookup', 'a', 'reference', 'object', 'based', 'on', 'the', 'given', 'value', '.'] | train | https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/reference.py#L197-L208 |
7,633 | saltstack/salt | salt/modules/dracr.py | __parse_drac | def __parse_drac(output):
'''
Parse Dell DRAC output
'''
drac = {}
section = ''
for i in output.splitlines():
if i.strip().endswith(':') and '=' not in i:
section = i[0:-1]
drac[section] = {}
if i.rstrip() and '=' in i:
if section in drac:
drac[section].update(dict(
[[prop.strip() for prop in i.split('=')]]
))
else:
section = i.strip()
if section not in drac and section:
drac[section] = {}
return drac | python | def __parse_drac(output):
'''
Parse Dell DRAC output
'''
drac = {}
section = ''
for i in output.splitlines():
if i.strip().endswith(':') and '=' not in i:
section = i[0:-1]
drac[section] = {}
if i.rstrip() and '=' in i:
if section in drac:
drac[section].update(dict(
[[prop.strip() for prop in i.split('=')]]
))
else:
section = i.strip()
if section not in drac and section:
drac[section] = {}
return drac | ['def', '__parse_drac', '(', 'output', ')', ':', 'drac', '=', '{', '}', 'section', '=', "''", 'for', 'i', 'in', 'output', '.', 'splitlines', '(', ')', ':', 'if', 'i', '.', 'strip', '(', ')', '.', 'endswith', '(', "':'", ')', 'and', "'='", 'not', 'in', 'i', ':', 'section', '=', 'i', '[', '0', ':', '-', '1', ']', 'drac', '[', 'section', ']', '=', '{', '}', 'if', 'i', '.', 'rstrip', '(', ')', 'and', "'='", 'in', 'i', ':', 'if', 'section', 'in', 'drac', ':', 'drac', '[', 'section', ']', '.', 'update', '(', 'dict', '(', '[', '[', 'prop', '.', 'strip', '(', ')', 'for', 'prop', 'in', 'i', '.', 'split', '(', "'='", ')', ']', ']', ')', ')', 'else', ':', 'section', '=', 'i', '.', 'strip', '(', ')', 'if', 'section', 'not', 'in', 'drac', 'and', 'section', ':', 'drac', '[', 'section', ']', '=', '{', '}', 'return', 'drac'] | Parse Dell DRAC output | ['Parse', 'Dell', 'DRAC', 'output'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dracr.py#L43-L64 |
7,634 | summa-tx/riemann | riemann/encoding/addresses.py | parse_hash | def parse_hash(address):
'''
str -> bytes
There's probably a better way to do this.
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):] | python | def parse_hash(address):
'''
str -> bytes
There's probably a better way to do this.
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):] | ['def', 'parse_hash', '(', 'address', ')', ':', 'raw', '=', 'parse', '(', 'address', ')', '# Cash addresses', 'try', ':', 'if', 'address', '.', 'find', '(', 'riemann', '.', 'network', '.', 'CASHADDR_PREFIX', ')', '==', '0', ':', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'CASHADDR_P2SH', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'CASHADDR_P2SH', ')', ':', ']', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'CASHADDR_P2PKH', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'CASHADDR_P2PKH', ')', ':', ']', 'except', 'TypeError', ':', 'pass', '# Segwit addresses', 'try', ':', 'if', 'address', '.', 'find', '(', 'riemann', '.', 'network', '.', 'BECH32_HRP', ')', '==', '0', ':', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'P2WSH_PREFIX', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'P2WSH_PREFIX', ')', ':', ']', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'P2WPKH_PREFIX', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'P2WPKH_PREFIX', ')', ':', ']', 'except', 'TypeError', ':', 'pass', '# Legacy Addresses', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'P2SH_PREFIX', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'P2SH_PREFIX', ')', ':', ']', 'if', 'raw', '.', 'find', '(', 'riemann', '.', 'network', '.', 'P2PKH_PREFIX', ')', '==', '0', ':', 'return', 'raw', '[', 'len', '(', 'riemann', '.', 'network', '.', 'P2PKH_PREFIX', ')', ':', ']'] | str -> bytes
There's probably a better way to do this. | ['str', '-', '>', 'bytes', 'There', 's', 'probably', 'a', 'better', 'way', 'to', 'do', 'this', '.'] | train | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L224-L256 |
7,635 | LogicalDash/LiSE | LiSE/LiSE/portal.py | Portal.delete | def delete(self):
"""Remove myself from my :class:`Character`.
For symmetry with :class:`Thing` and :class`Place`.
"""
branch, turn, tick = self.engine._nbtt()
self.engine._edges_cache.store(
self.character.name,
self.origin.name,
self.destination.name,
0,
branch,
turn,
tick,
None
)
self.engine.query.exist_edge(
self.character.name,
self.origin.name,
self.destination.name,
branch, turn, tick, False
)
try:
del self.engine._edge_objs[
(self.graph.name, self.orig, self.dest)
]
except KeyError:
pass
self.character.portal[self.origin.name].send(
self.character.portal[self.origin.name],
key='dest', val=None
) | python | def delete(self):
"""Remove myself from my :class:`Character`.
For symmetry with :class:`Thing` and :class`Place`.
"""
branch, turn, tick = self.engine._nbtt()
self.engine._edges_cache.store(
self.character.name,
self.origin.name,
self.destination.name,
0,
branch,
turn,
tick,
None
)
self.engine.query.exist_edge(
self.character.name,
self.origin.name,
self.destination.name,
branch, turn, tick, False
)
try:
del self.engine._edge_objs[
(self.graph.name, self.orig, self.dest)
]
except KeyError:
pass
self.character.portal[self.origin.name].send(
self.character.portal[self.origin.name],
key='dest', val=None
) | ['def', 'delete', '(', 'self', ')', ':', 'branch', ',', 'turn', ',', 'tick', '=', 'self', '.', 'engine', '.', '_nbtt', '(', ')', 'self', '.', 'engine', '.', '_edges_cache', '.', 'store', '(', 'self', '.', 'character', '.', 'name', ',', 'self', '.', 'origin', '.', 'name', ',', 'self', '.', 'destination', '.', 'name', ',', '0', ',', 'branch', ',', 'turn', ',', 'tick', ',', 'None', ')', 'self', '.', 'engine', '.', 'query', '.', 'exist_edge', '(', 'self', '.', 'character', '.', 'name', ',', 'self', '.', 'origin', '.', 'name', ',', 'self', '.', 'destination', '.', 'name', ',', 'branch', ',', 'turn', ',', 'tick', ',', 'False', ')', 'try', ':', 'del', 'self', '.', 'engine', '.', '_edge_objs', '[', '(', 'self', '.', 'graph', '.', 'name', ',', 'self', '.', 'orig', ',', 'self', '.', 'dest', ')', ']', 'except', 'KeyError', ':', 'pass', 'self', '.', 'character', '.', 'portal', '[', 'self', '.', 'origin', '.', 'name', ']', '.', 'send', '(', 'self', '.', 'character', '.', 'portal', '[', 'self', '.', 'origin', '.', 'name', ']', ',', 'key', '=', "'dest'", ',', 'val', '=', 'None', ')'] | Remove myself from my :class:`Character`.
For symmetry with :class:`Thing` and :class`Place`. | ['Remove', 'myself', 'from', 'my', ':', 'class', ':', 'Character', '.'] | train | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/portal.py#L233-L265 |
7,636 | quantopian/zipline | zipline/pipeline/filters/filter.py | NumExprFilter._compute | def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask | python | def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask | ['def', '_compute', '(', 'self', ',', 'arrays', ',', 'dates', ',', 'assets', ',', 'mask', ')', ':', 'return', 'super', '(', 'NumExprFilter', ',', 'self', ')', '.', '_compute', '(', 'arrays', ',', 'dates', ',', 'assets', ',', 'mask', ',', ')', '&', 'mask'] | Compute our result with numexpr, then re-apply `mask`. | ['Compute', 'our', 'result', 'with', 'numexpr', 'then', 're', '-', 'apply', 'mask', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L247-L256 |
7,637 | juju/charm-helpers | charmhelpers/contrib/hahelpers/cluster.py | determine_api_port | def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10) | python | def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10) | ['def', 'determine_api_port', '(', 'public_port', ',', 'singlenode_mode', '=', 'False', ')', ':', 'i', '=', '0', 'if', 'singlenode_mode', ':', 'i', '+=', '1', 'elif', 'len', '(', 'peer_units', '(', ')', ')', '>', '0', 'or', 'is_clustered', '(', ')', ':', 'i', '+=', '1', 'if', 'https', '(', ')', ':', 'i', '+=', '1', 'return', 'public_port', '-', '(', 'i', '*', '10', ')'] | Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service | ['Determine', 'correct', 'API', 'server', 'listening', 'port', 'based', 'on', 'existence', 'of', 'HTTPS', 'reverse', 'proxy', 'and', '/', 'or', 'haproxy', '.'] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hahelpers/cluster.py#L244-L262 |
7,638 | horazont/aioxmpp | aioxmpp/security_layer.py | extract_pk_blob_from_pyasn1 | def extract_pk_blob_from_pyasn1(pyasn1_struct):
"""
Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1`
structure (which must represent a certificate).
"""
pk = pyasn1_struct.getComponentByName(
"tbsCertificate"
).getComponentByName(
"subjectPublicKeyInfo"
)
return pyasn1.codec.der.encoder.encode(pk) | python | def extract_pk_blob_from_pyasn1(pyasn1_struct):
"""
Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1`
structure (which must represent a certificate).
"""
pk = pyasn1_struct.getComponentByName(
"tbsCertificate"
).getComponentByName(
"subjectPublicKeyInfo"
)
return pyasn1.codec.der.encoder.encode(pk) | ['def', 'extract_pk_blob_from_pyasn1', '(', 'pyasn1_struct', ')', ':', 'pk', '=', 'pyasn1_struct', '.', 'getComponentByName', '(', '"tbsCertificate"', ')', '.', 'getComponentByName', '(', '"subjectPublicKeyInfo"', ')', 'return', 'pyasn1', '.', 'codec', '.', 'der', '.', 'encoder', '.', 'encode', '(', 'pk', ')'] | Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1`
structure (which must represent a certificate). | ['Extract', 'an', 'ASN', '.', '1', 'encoded', 'public', 'key', 'blob', 'from', 'the', 'given', ':', 'mod', ':', 'pyasn1', 'structure', '(', 'which', 'must', 'represent', 'a', 'certificate', ')', '.'] | train | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/security_layer.py#L204-L216 |
7,639 | adrn/gala | gala/dynamics/core.py | PhaseSpacePosition.get_components | def get_components(self, which):
"""
Get the component name dictionary for the desired object.
The returned dictionary maps component names on this class to component
names on the desired object.
Parameters
----------
which : str
Can either be ``'pos'`` or ``'vel'`` to get the components for the
position or velocity object.
"""
mappings = self.representation_mappings.get(
getattr(self, which).__class__, [])
old_to_new = dict()
for name in getattr(self, which).components:
for m in mappings:
if isinstance(m, RegexRepresentationMapping):
pattr = re.match(m.repr_name, name)
old_to_new[name] = m.new_name.format(*pattr.groups())
elif m.repr_name == name:
old_to_new[name] = m.new_name
mapping = OrderedDict()
for name in getattr(self, which).components:
mapping[old_to_new.get(name, name)] = name
return mapping | python | def get_components(self, which):
"""
Get the component name dictionary for the desired object.
The returned dictionary maps component names on this class to component
names on the desired object.
Parameters
----------
which : str
Can either be ``'pos'`` or ``'vel'`` to get the components for the
position or velocity object.
"""
mappings = self.representation_mappings.get(
getattr(self, which).__class__, [])
old_to_new = dict()
for name in getattr(self, which).components:
for m in mappings:
if isinstance(m, RegexRepresentationMapping):
pattr = re.match(m.repr_name, name)
old_to_new[name] = m.new_name.format(*pattr.groups())
elif m.repr_name == name:
old_to_new[name] = m.new_name
mapping = OrderedDict()
for name in getattr(self, which).components:
mapping[old_to_new.get(name, name)] = name
return mapping | ['def', 'get_components', '(', 'self', ',', 'which', ')', ':', 'mappings', '=', 'self', '.', 'representation_mappings', '.', 'get', '(', 'getattr', '(', 'self', ',', 'which', ')', '.', '__class__', ',', '[', ']', ')', 'old_to_new', '=', 'dict', '(', ')', 'for', 'name', 'in', 'getattr', '(', 'self', ',', 'which', ')', '.', 'components', ':', 'for', 'm', 'in', 'mappings', ':', 'if', 'isinstance', '(', 'm', ',', 'RegexRepresentationMapping', ')', ':', 'pattr', '=', 're', '.', 'match', '(', 'm', '.', 'repr_name', ',', 'name', ')', 'old_to_new', '[', 'name', ']', '=', 'm', '.', 'new_name', '.', 'format', '(', '*', 'pattr', '.', 'groups', '(', ')', ')', 'elif', 'm', '.', 'repr_name', '==', 'name', ':', 'old_to_new', '[', 'name', ']', '=', 'm', '.', 'new_name', 'mapping', '=', 'OrderedDict', '(', ')', 'for', 'name', 'in', 'getattr', '(', 'self', ',', 'which', ')', '.', 'components', ':', 'mapping', '[', 'old_to_new', '.', 'get', '(', 'name', ',', 'name', ')', ']', '=', 'name', 'return', 'mapping'] | Get the component name dictionary for the desired object.
The returned dictionary maps component names on this class to component
names on the desired object.
Parameters
----------
which : str
Can either be ``'pos'`` or ``'vel'`` to get the components for the
position or velocity object. | ['Get', 'the', 'component', 'name', 'dictionary', 'for', 'the', 'desired', 'object', '.'] | train | https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/core.py#L196-L226 |
7,640 | marioidival/pyramid_mongoengine | pyramid_mongoengine/__init__.py | _connect_database | def _connect_database(config):
"""Create simple connection with Mongodb
config comes with settings from .ini file.
"""
settings = config.registry.settings
mongo_uri = "mongodb://localhost:27017"
mongodb_name = "test"
if settings.get("mongo_url"):
mongo_uri = settings["mongo_url"]
if settings.get("mongodb_name"):
mongodb_name = settings["mongodb_name"]
return mongoengine.connect(mongodb_name, host=mongo_uri) | python | def _connect_database(config):
"""Create simple connection with Mongodb
config comes with settings from .ini file.
"""
settings = config.registry.settings
mongo_uri = "mongodb://localhost:27017"
mongodb_name = "test"
if settings.get("mongo_url"):
mongo_uri = settings["mongo_url"]
if settings.get("mongodb_name"):
mongodb_name = settings["mongodb_name"]
return mongoengine.connect(mongodb_name, host=mongo_uri) | ['def', '_connect_database', '(', 'config', ')', ':', 'settings', '=', 'config', '.', 'registry', '.', 'settings', 'mongo_uri', '=', '"mongodb://localhost:27017"', 'mongodb_name', '=', '"test"', 'if', 'settings', '.', 'get', '(', '"mongo_url"', ')', ':', 'mongo_uri', '=', 'settings', '[', '"mongo_url"', ']', 'if', 'settings', '.', 'get', '(', '"mongodb_name"', ')', ':', 'mongodb_name', '=', 'settings', '[', '"mongodb_name"', ']', 'return', 'mongoengine', '.', 'connect', '(', 'mongodb_name', ',', 'host', '=', 'mongo_uri', ')'] | Create simple connection with Mongodb
config comes with settings from .ini file. | ['Create', 'simple', 'connection', 'with', 'Mongodb'] | train | https://github.com/marioidival/pyramid_mongoengine/blob/3fcf45714cb3fc114ee613fea0f508a0119077d1/pyramid_mongoengine/__init__.py#L44-L60 |
7,641 | PmagPy/PmagPy | pmagpy/builder2.py | ErMagicBuilder.change_location | def change_location(self, old_location_name, new_location_name, new_parent_name=None,
new_er_data=None, new_pmag_data=None, replace_data=False):
"""
Find actual data object for location with old_location_name.
Then call Location class change method to update location name and data.
"""
location = self.find_by_name(old_location_name, self.locations)
if not location:
print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name))
return False
location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data)
return location | python | def change_location(self, old_location_name, new_location_name, new_parent_name=None,
new_er_data=None, new_pmag_data=None, replace_data=False):
"""
Find actual data object for location with old_location_name.
Then call Location class change method to update location name and data.
"""
location = self.find_by_name(old_location_name, self.locations)
if not location:
print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name))
return False
location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data)
return location | ['def', 'change_location', '(', 'self', ',', 'old_location_name', ',', 'new_location_name', ',', 'new_parent_name', '=', 'None', ',', 'new_er_data', '=', 'None', ',', 'new_pmag_data', '=', 'None', ',', 'replace_data', '=', 'False', ')', ':', 'location', '=', 'self', '.', 'find_by_name', '(', 'old_location_name', ',', 'self', '.', 'locations', ')', 'if', 'not', 'location', ':', 'print', '(', "'-W- {} is not a currently existing location, so it cannot be updated.'", '.', 'format', '(', 'old_location_name', ')', ')', 'return', 'False', 'location', '.', 'change_location', '(', 'new_location_name', ',', 'new_er_data', ',', 'new_pmag_data', ',', 'replace_data', ')', 'return', 'location'] | Find actual data object for location with old_location_name.
Then call Location class change method to update location name and data. | ['Find', 'actual', 'data', 'object', 'for', 'location', 'with', 'old_location_name', '.', 'Then', 'call', 'Location', 'class', 'change', 'method', 'to', 'update', 'location', 'name', 'and', 'data', '.'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L405-L416 |
7,642 | pycontribs/pyrax | pyrax/object_storage.py | StorageClient.cdn_request | def cdn_request(self, uri, method, *args, **kwargs):
"""
If the service supports CDN, use this method to access CDN-specific
URIs.
"""
if not self.cdn_management_url:
raise exc.NotCDNEnabled("CDN is not enabled for this service.")
cdn_uri = "%s%s" % (self.cdn_management_url, uri)
mthd = self.method_dict.get(method.upper())
try:
resp, resp_body = mthd(cdn_uri, *args, **kwargs)
except exc.NotFound as e:
# This could be due to either the container does not exist, or that
# the container exists but is not CDN-enabled.
try:
mgt_uri = "%s%s" % (self.management_url, uri)
resp, resp_body = self.method_head(mgt_uri)
except exc.NotFound:
raise
raise exc.NotCDNEnabled("This container is not CDN-enabled.")
return resp, resp_body | python | def cdn_request(self, uri, method, *args, **kwargs):
"""
If the service supports CDN, use this method to access CDN-specific
URIs.
"""
if not self.cdn_management_url:
raise exc.NotCDNEnabled("CDN is not enabled for this service.")
cdn_uri = "%s%s" % (self.cdn_management_url, uri)
mthd = self.method_dict.get(method.upper())
try:
resp, resp_body = mthd(cdn_uri, *args, **kwargs)
except exc.NotFound as e:
# This could be due to either the container does not exist, or that
# the container exists but is not CDN-enabled.
try:
mgt_uri = "%s%s" % (self.management_url, uri)
resp, resp_body = self.method_head(mgt_uri)
except exc.NotFound:
raise
raise exc.NotCDNEnabled("This container is not CDN-enabled.")
return resp, resp_body | ['def', 'cdn_request', '(', 'self', ',', 'uri', ',', 'method', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'self', '.', 'cdn_management_url', ':', 'raise', 'exc', '.', 'NotCDNEnabled', '(', '"CDN is not enabled for this service."', ')', 'cdn_uri', '=', '"%s%s"', '%', '(', 'self', '.', 'cdn_management_url', ',', 'uri', ')', 'mthd', '=', 'self', '.', 'method_dict', '.', 'get', '(', 'method', '.', 'upper', '(', ')', ')', 'try', ':', 'resp', ',', 'resp_body', '=', 'mthd', '(', 'cdn_uri', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'exc', '.', 'NotFound', 'as', 'e', ':', '# This could be due to either the container does not exist, or that', '# the container exists but is not CDN-enabled.', 'try', ':', 'mgt_uri', '=', '"%s%s"', '%', '(', 'self', '.', 'management_url', ',', 'uri', ')', 'resp', ',', 'resp_body', '=', 'self', '.', 'method_head', '(', 'mgt_uri', ')', 'except', 'exc', '.', 'NotFound', ':', 'raise', 'raise', 'exc', '.', 'NotCDNEnabled', '(', '"This container is not CDN-enabled."', ')', 'return', 'resp', ',', 'resp_body'] | If the service supports CDN, use this method to access CDN-specific
URIs. | ['If', 'the', 'service', 'supports', 'CDN', 'use', 'this', 'method', 'to', 'access', 'CDN', '-', 'specific', 'URIs', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L3202-L3222 |
7,643 | ulfalizer/Kconfiglib | examples/menuconfig_example.py | node_str | def node_str(node):
"""
Returns the complete menu entry text for a menu node, or "" for invisible
menu nodes. Invisible menu nodes are those that lack a prompt or that do
not have a satisfied prompt condition.
Example return value: "[*] Bool symbol (BOOL)"
The symbol name is printed in parentheses to the right of the prompt. This
is so that symbols can easily be referred to in the configuration
interface.
"""
if not node.prompt:
return ""
# Even for menu nodes for symbols and choices, it's wrong to check
# Symbol.visibility / Choice.visibility here. The reason is that a symbol
# (and a choice, in theory) can be defined in multiple locations, giving it
# multiple menu nodes, which do not necessarily all have the same prompt
# visibility. Symbol.visibility / Choice.visibility is calculated as the OR
# of the visibility of all the prompts.
prompt, prompt_cond = node.prompt
if not expr_value(prompt_cond):
return ""
if node.item == MENU:
return " " + prompt
if node.item == COMMENT:
return " *** {} ***".format(prompt)
# Symbol or Choice
sc = node.item
if sc.type == UNKNOWN:
# Skip symbols defined without a type (these are obscure and generate
# a warning)
return ""
# {:3} sets the field width to three. Gives nice alignment for empty string
# values.
res = "{:3} {}".format(value_str(sc), prompt)
# Don't print the name for unnamed choices (the normal kind)
if sc.name is not None:
res += " ({})".format(sc.name)
return res | python | def node_str(node):
"""
Returns the complete menu entry text for a menu node, or "" for invisible
menu nodes. Invisible menu nodes are those that lack a prompt or that do
not have a satisfied prompt condition.
Example return value: "[*] Bool symbol (BOOL)"
The symbol name is printed in parentheses to the right of the prompt. This
is so that symbols can easily be referred to in the configuration
interface.
"""
if not node.prompt:
return ""
# Even for menu nodes for symbols and choices, it's wrong to check
# Symbol.visibility / Choice.visibility here. The reason is that a symbol
# (and a choice, in theory) can be defined in multiple locations, giving it
# multiple menu nodes, which do not necessarily all have the same prompt
# visibility. Symbol.visibility / Choice.visibility is calculated as the OR
# of the visibility of all the prompts.
prompt, prompt_cond = node.prompt
if not expr_value(prompt_cond):
return ""
if node.item == MENU:
return " " + prompt
if node.item == COMMENT:
return " *** {} ***".format(prompt)
# Symbol or Choice
sc = node.item
if sc.type == UNKNOWN:
# Skip symbols defined without a type (these are obscure and generate
# a warning)
return ""
# {:3} sets the field width to three. Gives nice alignment for empty string
# values.
res = "{:3} {}".format(value_str(sc), prompt)
# Don't print the name for unnamed choices (the normal kind)
if sc.name is not None:
res += " ({})".format(sc.name)
return res | ['def', 'node_str', '(', 'node', ')', ':', 'if', 'not', 'node', '.', 'prompt', ':', 'return', '""', "# Even for menu nodes for symbols and choices, it's wrong to check", '# Symbol.visibility / Choice.visibility here. The reason is that a symbol', '# (and a choice, in theory) can be defined in multiple locations, giving it', '# multiple menu nodes, which do not necessarily all have the same prompt', '# visibility. Symbol.visibility / Choice.visibility is calculated as the OR', '# of the visibility of all the prompts.', 'prompt', ',', 'prompt_cond', '=', 'node', '.', 'prompt', 'if', 'not', 'expr_value', '(', 'prompt_cond', ')', ':', 'return', '""', 'if', 'node', '.', 'item', '==', 'MENU', ':', 'return', '" "', '+', 'prompt', 'if', 'node', '.', 'item', '==', 'COMMENT', ':', 'return', '" *** {} ***"', '.', 'format', '(', 'prompt', ')', '# Symbol or Choice', 'sc', '=', 'node', '.', 'item', 'if', 'sc', '.', 'type', '==', 'UNKNOWN', ':', '# Skip symbols defined without a type (these are obscure and generate', '# a warning)', 'return', '""', '# {:3} sets the field width to three. Gives nice alignment for empty string', '# values.', 'res', '=', '"{:3} {}"', '.', 'format', '(', 'value_str', '(', 'sc', ')', ',', 'prompt', ')', "# Don't print the name for unnamed choices (the normal kind)", 'if', 'sc', '.', 'name', 'is', 'not', 'None', ':', 'res', '+=', '" ({})"', '.', 'format', '(', 'sc', '.', 'name', ')', 'return', 'res'] | Returns the complete menu entry text for a menu node, or "" for invisible
menu nodes. Invisible menu nodes are those that lack a prompt or that do
not have a satisfied prompt condition.
Example return value: "[*] Bool symbol (BOOL)"
The symbol name is printed in parentheses to the right of the prompt. This
is so that symbols can easily be referred to in the configuration
interface. | ['Returns', 'the', 'complete', 'menu', 'entry', 'text', 'for', 'a', 'menu', 'node', 'or', 'for', 'invisible', 'menu', 'nodes', '.', 'Invisible', 'menu', 'nodes', 'are', 'those', 'that', 'lack', 'a', 'prompt', 'or', 'that', 'do', 'not', 'have', 'a', 'satisfied', 'prompt', 'condition', '.'] | train | https://github.com/ulfalizer/Kconfiglib/blob/9fe13c03de16c341cd7ed40167216207b821ea50/examples/menuconfig_example.py#L173-L221 |
7,644 | fabioz/PyDev.Debugger | third_party/pep8/lib2to3/lib2to3/pytree.py | Node.append_child | def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed() | python | def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed() | ['def', 'append_child', '(', 'self', ',', 'child', ')', ':', 'child', '.', 'parent', '=', 'self', 'self', '.', 'children', '.', 'append', '(', 'child', ')', 'self', '.', 'changed', '(', ')'] | Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately. | ['Equivalent', 'to', 'node', '.', 'children', '.', 'append', '(', 'child', ')', '.', 'This', 'method', 'also', 'sets', 'the', 'child', 's', 'parent', 'attribute', 'appropriately', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L341-L348 |
7,645 | tamasgal/km3pipe | km3pipe/io/daq.py | DAQPump.determine_frame_positions | def determine_frame_positions(self):
"""Record the file pointer position of each frame"""
self.rewind_file()
with ignored(struct.error):
while True:
pointer_position = self.blob_file.tell()
length = struct.unpack('<i', self.blob_file.read(4))[0]
self.blob_file.seek(length - 4, 1)
self.frame_positions.append(pointer_position)
self.rewind_file()
log.info("Found {0} frames.".format(len(self.frame_positions))) | python | def determine_frame_positions(self):
"""Record the file pointer position of each frame"""
self.rewind_file()
with ignored(struct.error):
while True:
pointer_position = self.blob_file.tell()
length = struct.unpack('<i', self.blob_file.read(4))[0]
self.blob_file.seek(length - 4, 1)
self.frame_positions.append(pointer_position)
self.rewind_file()
log.info("Found {0} frames.".format(len(self.frame_positions))) | ['def', 'determine_frame_positions', '(', 'self', ')', ':', 'self', '.', 'rewind_file', '(', ')', 'with', 'ignored', '(', 'struct', '.', 'error', ')', ':', 'while', 'True', ':', 'pointer_position', '=', 'self', '.', 'blob_file', '.', 'tell', '(', ')', 'length', '=', 'struct', '.', 'unpack', '(', "'<i'", ',', 'self', '.', 'blob_file', '.', 'read', '(', '4', ')', ')', '[', '0', ']', 'self', '.', 'blob_file', '.', 'seek', '(', 'length', '-', '4', ',', '1', ')', 'self', '.', 'frame_positions', '.', 'append', '(', 'pointer_position', ')', 'self', '.', 'rewind_file', '(', ')', 'log', '.', 'info', '(', '"Found {0} frames."', '.', 'format', '(', 'len', '(', 'self', '.', 'frame_positions', ')', ')', ')'] | Record the file pointer position of each frame | ['Record', 'the', 'file', 'pointer', 'position', 'of', 'each', 'frame'] | train | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L191-L201 |
7,646 | relwell/corenlp-xml-lib | corenlp_xml/document.py | Sentence.sentiment | def sentiment(self):
"""
The sentiment of this sentence
:getter: Returns the sentiment value of this sentence
:type: int
"""
if self._sentiment is None:
self._sentiment = int(self._element.get('sentiment'))
return self._sentiment | python | def sentiment(self):
"""
The sentiment of this sentence
:getter: Returns the sentiment value of this sentence
:type: int
"""
if self._sentiment is None:
self._sentiment = int(self._element.get('sentiment'))
return self._sentiment | ['def', 'sentiment', '(', 'self', ')', ':', 'if', 'self', '.', '_sentiment', 'is', 'None', ':', 'self', '.', '_sentiment', '=', 'int', '(', 'self', '.', '_element', '.', 'get', '(', "'sentiment'", ')', ')', 'return', 'self', '.', '_sentiment'] | The sentiment of this sentence
:getter: Returns the sentiment value of this sentence
:type: int | ['The', 'sentiment', 'of', 'this', 'sentence'] | train | https://github.com/relwell/corenlp-xml-lib/blob/9b0f8c912ba3ecedd34473f74a9f2d033a75baf9/corenlp_xml/document.py#L132-L142 |
7,647 | internetarchive/warc | warc/arc.py | ARCRecord.from_string | def from_string(cls, string, version):
"""
Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here.
"""
header, payload = string.split("\n",1)
if payload[0] == '\n': # There's an extra
payload = payload[1:]
if int(version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
return cls(header = arc_header, payload = payload, version = version) | python | def from_string(cls, string, version):
"""
Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here.
"""
header, payload = string.split("\n",1)
if payload[0] == '\n': # There's an extra
payload = payload[1:]
if int(version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
return cls(header = arc_header, payload = payload, version = version) | ['def', 'from_string', '(', 'cls', ',', 'string', ',', 'version', ')', ':', 'header', ',', 'payload', '=', 'string', '.', 'split', '(', '"\\n"', ',', '1', ')', 'if', 'payload', '[', '0', ']', '==', "'\\n'", ':', "# There's an extra", 'payload', '=', 'payload', '[', '1', ':', ']', 'if', 'int', '(', 'version', ')', '==', '1', ':', 'arc_header_re', '=', 'ARC1_HEADER_RE', 'elif', 'int', '(', 'version', ')', '==', '2', ':', 'arc_header_re', '=', 'ARC2_HEADER_RE', 'matches', '=', 'arc_header_re', '.', 'search', '(', 'header', ')', 'headers', '=', 'matches', '.', 'groupdict', '(', ')', 'arc_header', '=', 'ARCHeader', '(', '*', '*', 'headers', ')', 'return', 'cls', '(', 'header', '=', 'arc_header', ',', 'payload', '=', 'payload', ',', 'version', '=', 'version', ')'] | Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here. | ['Constructs', 'an', 'ARC', 'record', 'from', 'a', 'string', 'and', 'returns', 'it', '.'] | train | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L161-L179 |
7,648 | PSPC-SPAC-buyandsell/von_anchor | von_anchor/op/setnym.py | setnym | async def setnym(ini_path: str) -> int:
"""
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure
"""
config = inis2dict(ini_path)
if config['Trustee Anchor']['name'] == config['VON Anchor']['name']:
raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor')
cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None
if not ok_role(cfg_van_role):
raise BadRole('Configured role {} is not valid'.format(cfg_van_role))
pool_data = NodePoolData(
config['Node Pool']['name'],
config['Node Pool'].get('genesis.txn.path', None) or None)
an_data = {
'tan': AnchorData(
Role.TRUSTEE,
config['Trustee Anchor']['name'],
config['Trustee Anchor'].get('seed', None) or None,
config['Trustee Anchor'].get('did', None) or None,
config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['Trustee Anchor'].get('wallet.type', None) or None,
config['Trustee Anchor'].get('wallet.access', None) or None),
'van': AnchorData(
Role.get(cfg_van_role),
config['VON Anchor']['name'],
config['VON Anchor'].get('seed', None) or None,
config['VON Anchor'].get('did', None) or None,
config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['VON Anchor'].get('wallet.type', None) or None,
config['VON Anchor'].get('wallet.access', None) or None)
}
an_wallet = await _set_wallets(an_data)
p_mgr = NodePoolManager()
if pool_data.name not in await p_mgr.list():
if pool_data.genesis_txn_path:
await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path)
else:
raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format(
pool_data.name,
ini_path))
async with an_wallet['tan'] as w_tan, (
an_wallet['van']) as w_van, (
p_mgr.get(pool_data.name)) as pool, (
TrusteeAnchor(w_tan, pool)) as tan, (
NominalAnchor(w_van, pool)) as van:
send_verkey = van.verkey
try:
nym_role = await tan.get_nym_role(van.did)
if an_data['van'].role == nym_role:
return 0 # ledger is as per configuration
send_verkey = None # only owner can touch verkey
if nym_role != Role.USER: # only remove role when it is not already None on the ledger
await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE)
except AbsentNym:
pass # cryptonym not there yet, fall through
await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role)
return 0 | python | async def setnym(ini_path: str) -> int:
"""
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure
"""
config = inis2dict(ini_path)
if config['Trustee Anchor']['name'] == config['VON Anchor']['name']:
raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor')
cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None
if not ok_role(cfg_van_role):
raise BadRole('Configured role {} is not valid'.format(cfg_van_role))
pool_data = NodePoolData(
config['Node Pool']['name'],
config['Node Pool'].get('genesis.txn.path', None) or None)
an_data = {
'tan': AnchorData(
Role.TRUSTEE,
config['Trustee Anchor']['name'],
config['Trustee Anchor'].get('seed', None) or None,
config['Trustee Anchor'].get('did', None) or None,
config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['Trustee Anchor'].get('wallet.type', None) or None,
config['Trustee Anchor'].get('wallet.access', None) or None),
'van': AnchorData(
Role.get(cfg_van_role),
config['VON Anchor']['name'],
config['VON Anchor'].get('seed', None) or None,
config['VON Anchor'].get('did', None) or None,
config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['VON Anchor'].get('wallet.type', None) or None,
config['VON Anchor'].get('wallet.access', None) or None)
}
an_wallet = await _set_wallets(an_data)
p_mgr = NodePoolManager()
if pool_data.name not in await p_mgr.list():
if pool_data.genesis_txn_path:
await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path)
else:
raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format(
pool_data.name,
ini_path))
async with an_wallet['tan'] as w_tan, (
an_wallet['van']) as w_van, (
p_mgr.get(pool_data.name)) as pool, (
TrusteeAnchor(w_tan, pool)) as tan, (
NominalAnchor(w_van, pool)) as van:
send_verkey = van.verkey
try:
nym_role = await tan.get_nym_role(van.did)
if an_data['van'].role == nym_role:
return 0 # ledger is as per configuration
send_verkey = None # only owner can touch verkey
if nym_role != Role.USER: # only remove role when it is not already None on the ledger
await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE)
except AbsentNym:
pass # cryptonym not there yet, fall through
await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role)
return 0 | ['async', 'def', 'setnym', '(', 'ini_path', ':', 'str', ')', '->', 'int', ':', 'config', '=', 'inis2dict', '(', 'ini_path', ')', 'if', 'config', '[', "'Trustee Anchor'", ']', '[', "'name'", ']', '==', 'config', '[', "'VON Anchor'", ']', '[', "'name'", ']', ':', 'raise', 'ExtantWallet', '(', "'Wallet names must differ between VON Anchor and Trustee Anchor'", ')', 'cfg_van_role', '=', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'role'", ',', 'None', ')', 'or', 'None', "# nudge empty value from '' to None", 'if', 'not', 'ok_role', '(', 'cfg_van_role', ')', ':', 'raise', 'BadRole', '(', "'Configured role {} is not valid'", '.', 'format', '(', 'cfg_van_role', ')', ')', 'pool_data', '=', 'NodePoolData', '(', 'config', '[', "'Node Pool'", ']', '[', "'name'", ']', ',', 'config', '[', "'Node Pool'", ']', '.', 'get', '(', "'genesis.txn.path'", ',', 'None', ')', 'or', 'None', ')', 'an_data', '=', '{', "'tan'", ':', 'AnchorData', '(', 'Role', '.', 'TRUSTEE', ',', 'config', '[', "'Trustee Anchor'", ']', '[', "'name'", ']', ',', 'config', '[', "'Trustee Anchor'", ']', '.', 'get', '(', "'seed'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'Trustee Anchor'", ']', '.', 'get', '(', "'did'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'Trustee Anchor'", ']', '.', 'get', '(', "'wallet.create'", ',', "'0'", ')', '.', 'lower', '(', ')', 'in', '[', "'1'", ',', "'true'", ',', "'yes'", ']', ',', 'config', '[', "'Trustee Anchor'", ']', '.', 'get', '(', "'wallet.type'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'Trustee Anchor'", ']', '.', 'get', '(', "'wallet.access'", ',', 'None', ')', 'or', 'None', ')', ',', "'van'", ':', 'AnchorData', '(', 'Role', '.', 'get', '(', 'cfg_van_role', ')', ',', 'config', '[', "'VON Anchor'", ']', '[', "'name'", ']', ',', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'seed'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'did'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'wallet.create'", ',', "'0'", ')', '.', 'lower', '(', ')', 'in', '[', "'1'", ',', "'true'", ',', "'yes'", ']', ',', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'wallet.type'", ',', 'None', ')', 'or', 'None', ',', 'config', '[', "'VON Anchor'", ']', '.', 'get', '(', "'wallet.access'", ',', 'None', ')', 'or', 'None', ')', '}', 'an_wallet', '=', 'await', '_set_wallets', '(', 'an_data', ')', 'p_mgr', '=', 'NodePoolManager', '(', ')', 'if', 'pool_data', '.', 'name', 'not', 'in', 'await', 'p_mgr', '.', 'list', '(', ')', ':', 'if', 'pool_data', '.', 'genesis_txn_path', ':', 'await', 'p_mgr', '.', 'add_config', '(', 'pool_data', '.', 'name', ',', 'pool_data', '.', 'genesis_txn_path', ')', 'else', ':', 'raise', 'AbsentPool', '(', "'Node pool {} has no ledger configuration, but {} specifies no genesis txn path'", '.', 'format', '(', 'pool_data', '.', 'name', ',', 'ini_path', ')', ')', 'async', 'with', 'an_wallet', '[', "'tan'", ']', 'as', 'w_tan', ',', '(', 'an_wallet', '[', "'van'", ']', ')', 'as', 'w_van', ',', '(', 'p_mgr', '.', 'get', '(', 'pool_data', '.', 'name', ')', ')', 'as', 'pool', ',', '(', 'TrusteeAnchor', '(', 'w_tan', ',', 'pool', ')', ')', 'as', 'tan', ',', '(', 'NominalAnchor', '(', 'w_van', ',', 'pool', ')', ')', 'as', 'van', ':', 'send_verkey', '=', 'van', '.', 'verkey', 'try', ':', 'nym_role', '=', 'await', 'tan', '.', 'get_nym_role', '(', 'van', '.', 'did', ')', 'if', 'an_data', '[', "'van'", ']', '.', 'role', '==', 'nym_role', ':', 'return', '0', '# ledger is as per configuration', 'send_verkey', '=', 'None', '# only owner can touch verkey', 'if', 'nym_role', '!=', 'Role', '.', 'USER', ':', '# only remove role when it is not already None on the ledger', 'await', 'tan', '.', 'send_nym', '(', 'van', '.', 'did', ',', 'send_verkey', ',', 'van', '.', 'wallet', '.', 'name', ',', 'Role', '.', 'ROLE_REMOVE', ')', 'except', 'AbsentNym', ':', 'pass', '# cryptonym not there yet, fall through', 'await', 'tan', '.', 'send_nym', '(', 'van', '.', 'did', ',', 'send_verkey', ',', 'van', '.', 'wallet', '.', 'name', ',', 'an_data', '[', "'van'", ']', '.', 'role', ')', 'return', '0'] | Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure | ['Set', 'configuration', '.', 'Open', 'pool', 'trustee', 'anchor', 'and', 'wallet', 'of', 'anchor', 'whose', 'nym', 'to', 'send', '.', 'Register', 'exit', 'hooks', 'to', 'close', 'pool', 'and', 'trustee', 'anchor', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L110-L182 |
7,649 | thiagopbueno/pyrddl | pyrddl/parser.py | RDDLParser.p_state_action_constraint_section | def p_state_action_constraint_section(self, p):
'''state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI
| STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI'''
if len(p) == 6:
p[0] = ('constraints', p[3])
elif len(p) == 5:
p[0] = ('constraints', [])
self._print_verbose('state-action-constraints') | python | def p_state_action_constraint_section(self, p):
'''state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI
| STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI'''
if len(p) == 6:
p[0] = ('constraints', p[3])
elif len(p) == 5:
p[0] = ('constraints', [])
self._print_verbose('state-action-constraints') | ['def', 'p_state_action_constraint_section', '(', 'self', ',', 'p', ')', ':', 'if', 'len', '(', 'p', ')', '==', '6', ':', 'p', '[', '0', ']', '=', '(', "'constraints'", ',', 'p', '[', '3', ']', ')', 'elif', 'len', '(', 'p', ')', '==', '5', ':', 'p', '[', '0', ']', '=', '(', "'constraints'", ',', '[', ']', ')', 'self', '.', '_print_verbose', '(', "'state-action-constraints'", ')'] | state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI
| STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI | ['state_action_constraint_section', ':', 'STATE_ACTION_CONSTRAINTS', 'LCURLY', 'state_cons_list', 'RCURLY', 'SEMI', '|', 'STATE_ACTION_CONSTRAINTS', 'LCURLY', 'RCURLY', 'SEMI'] | train | https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L441-L448 |
7,650 | dancsalo/TensorBase | tensorbase/data.py | Mnist.extract_labels | def extract_labels(self, f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D unit8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = self._read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = self._read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return self.dense_to_one_hot(labels, num_classes)
return labels | python | def extract_labels(self, f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D unit8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = self._read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = self._read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return self.dense_to_one_hot(labels, num_classes)
return labels | ['def', 'extract_labels', '(', 'self', ',', 'f', ',', 'one_hot', '=', 'False', ',', 'num_classes', '=', '10', ')', ':', 'print', '(', "'Extracting'", ',', 'f', '.', 'name', ')', 'with', 'gzip', '.', 'GzipFile', '(', 'fileobj', '=', 'f', ')', 'as', 'bytestream', ':', 'magic', '=', 'self', '.', '_read32', '(', 'bytestream', ')', 'if', 'magic', '!=', '2049', ':', 'raise', 'ValueError', '(', "'Invalid magic number %d in MNIST label file: %s'", '%', '(', 'magic', ',', 'f', '.', 'name', ')', ')', 'num_items', '=', 'self', '.', '_read32', '(', 'bytestream', ')', 'buf', '=', 'bytestream', '.', 'read', '(', 'num_items', ')', 'labels', '=', 'np', '.', 'frombuffer', '(', 'buf', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'if', 'one_hot', ':', 'return', 'self', '.', 'dense_to_one_hot', '(', 'labels', ',', 'num_classes', ')', 'return', 'labels'] | Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D unit8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049. | ['Extract', 'the', 'labels', 'into', 'a', '1D', 'uint8', 'numpy', 'array', '[', 'index', ']', '.', 'Args', ':', 'f', ':', 'A', 'file', 'object', 'that', 'can', 'be', 'passed', 'into', 'a', 'gzip', 'reader', '.', 'one_hot', ':', 'Does', 'one', 'hot', 'encoding', 'for', 'the', 'result', '.', 'num_classes', ':', 'Number', 'of', 'classes', 'for', 'the', 'one', 'hot', 'encoding', '.', 'Returns', ':', 'labels', ':', 'a', '1D', 'unit8', 'numpy', 'array', '.', 'Raises', ':', 'ValueError', ':', 'If', 'the', 'bystream', 'doesn', 't', 'start', 'with', '2049', '.'] | train | https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/data.py#L94-L116 |
7,651 | CulturePlex/django-zotero | django_zotero/templatetags/zotero_inline_extras.py | zotero_inline_tags | def zotero_inline_tags(parser, token):
"""
Render an inline formset of tags.
Usage:
{% zotero_inline_tags formset[ option] %}
option = "all" | "media" | "formset"
"""
args = token.split_contents()
length = len(args)
if length == 2:
rendered_node = RenderedAllNode(args[1])
elif length == 3 and args[2].lower() == u'all':
rendered_node = RenderedAllNode(args[1])
elif length == 3 and args[2].lower() == u'media':
rendered_node = RenderedMediaNode(args[1])
elif length == 3 and args[2].lower() == u'formset':
rendered_node = RenderedFormsetNode(args[1])
else:
raise t.TemplateSyntaxError('Incorrect arguments in %s.' % args[0])
return rendered_node | python | def zotero_inline_tags(parser, token):
"""
Render an inline formset of tags.
Usage:
{% zotero_inline_tags formset[ option] %}
option = "all" | "media" | "formset"
"""
args = token.split_contents()
length = len(args)
if length == 2:
rendered_node = RenderedAllNode(args[1])
elif length == 3 and args[2].lower() == u'all':
rendered_node = RenderedAllNode(args[1])
elif length == 3 and args[2].lower() == u'media':
rendered_node = RenderedMediaNode(args[1])
elif length == 3 and args[2].lower() == u'formset':
rendered_node = RenderedFormsetNode(args[1])
else:
raise t.TemplateSyntaxError('Incorrect arguments in %s.' % args[0])
return rendered_node | ['def', 'zotero_inline_tags', '(', 'parser', ',', 'token', ')', ':', 'args', '=', 'token', '.', 'split_contents', '(', ')', 'length', '=', 'len', '(', 'args', ')', 'if', 'length', '==', '2', ':', 'rendered_node', '=', 'RenderedAllNode', '(', 'args', '[', '1', ']', ')', 'elif', 'length', '==', '3', 'and', 'args', '[', '2', ']', '.', 'lower', '(', ')', '==', "u'all'", ':', 'rendered_node', '=', 'RenderedAllNode', '(', 'args', '[', '1', ']', ')', 'elif', 'length', '==', '3', 'and', 'args', '[', '2', ']', '.', 'lower', '(', ')', '==', "u'media'", ':', 'rendered_node', '=', 'RenderedMediaNode', '(', 'args', '[', '1', ']', ')', 'elif', 'length', '==', '3', 'and', 'args', '[', '2', ']', '.', 'lower', '(', ')', '==', "u'formset'", ':', 'rendered_node', '=', 'RenderedFormsetNode', '(', 'args', '[', '1', ']', ')', 'else', ':', 'raise', 't', '.', 'TemplateSyntaxError', '(', "'Incorrect arguments in %s.'", '%', 'args', '[', '0', ']', ')', 'return', 'rendered_node'] | Render an inline formset of tags.
Usage:
{% zotero_inline_tags formset[ option] %}
option = "all" | "media" | "formset" | ['Render', 'an', 'inline', 'formset', 'of', 'tags', '.', 'Usage', ':', '{', '%', 'zotero_inline_tags', 'formset', '[', 'option', ']', '%', '}', 'option', '=', 'all', '|', 'media', '|', 'formset'] | train | https://github.com/CulturePlex/django-zotero/blob/de31583a80a2bd2459c118fb5aa767a2842e0b00/django_zotero/templatetags/zotero_inline_extras.py#L10-L32 |
7,652 | apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_mpi.py | mpi_submit | def mpi_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()]
sargs = ' '.join(args.command + worker_args)
if args.hostfile is None:
cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args)
else:
cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args)
print cmd
subprocess.check_call(cmd, shell = True) | python | def mpi_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()]
sargs = ' '.join(args.command + worker_args)
if args.hostfile is None:
cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args)
else:
cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args)
print cmd
subprocess.check_call(cmd, shell = True) | ['def', 'mpi_submit', '(', 'nslave', ',', 'worker_args', ',', 'worker_envs', ')', ':', 'worker_args', '+=', '[', "'%s=%s'", '%', '(', 'k', ',', 'str', '(', 'v', ')', ')', 'for', 'k', ',', 'v', 'in', 'worker_envs', '.', 'items', '(', ')', ']', 'sargs', '=', "' '", '.', 'join', '(', 'args', '.', 'command', '+', 'worker_args', ')', 'if', 'args', '.', 'hostfile', 'is', 'None', ':', 'cmd', '=', "' '", '.', 'join', '(', '[', "'mpirun -n %d'", '%', '(', 'nslave', ')', ']', '+', 'args', '.', 'command', '+', 'worker_args', ')', 'else', ':', 'cmd', '=', "' '", '.', 'join', '(', '[', "'mpirun -n %d --hostfile %s'", '%', '(', 'nslave', ',', 'args', '.', 'hostfile', ')', ']', '+', 'args', '.', 'command', '+', 'worker_args', ')', 'print', 'cmd', 'subprocess', '.', 'check_call', '(', 'cmd', ',', 'shell', '=', 'True', ')'] | customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit | ['customized', 'submit', 'script', 'that', 'submit', 'nslave', 'jobs', 'each', 'must', 'contain', 'args', 'as', 'parameter', 'note', 'this', 'can', 'be', 'a', 'lambda', 'function', 'containing', 'additional', 'parameters', 'in', 'input', 'Parameters', 'nslave', 'number', 'of', 'slave', 'process', 'to', 'start', 'up', 'args', 'arguments', 'to', 'launch', 'each', 'job', 'this', 'usually', 'includes', 'the', 'parameters', 'of', 'master_uri', 'and', 'parameters', 'passed', 'into', 'submit'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_mpi.py#L24-L40 |
7,653 | awslabs/sockeye | sockeye/utils.py | plot_attention | def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str):
"""
Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to.
"""
try:
import matplotlib
except ImportError:
raise RuntimeError("Please install matplotlib.")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens, rotation='vertical')
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plt.savefig(filename)
logger.info("Saved alignment visualization to " + filename) | python | def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str):
"""
Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to.
"""
try:
import matplotlib
except ImportError:
raise RuntimeError("Please install matplotlib.")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens, rotation='vertical')
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plt.savefig(filename)
logger.info("Saved alignment visualization to " + filename) | ['def', 'plot_attention', '(', 'attention_matrix', ':', 'np', '.', 'ndarray', ',', 'source_tokens', ':', 'List', '[', 'str', ']', ',', 'target_tokens', ':', 'List', '[', 'str', ']', ',', 'filename', ':', 'str', ')', ':', 'try', ':', 'import', 'matplotlib', 'except', 'ImportError', ':', 'raise', 'RuntimeError', '(', '"Please install matplotlib."', ')', 'matplotlib', '.', 'use', '(', '"Agg"', ')', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'assert', 'attention_matrix', '.', 'shape', '[', '0', ']', '==', 'len', '(', 'target_tokens', ')', 'plt', '.', 'imshow', '(', 'attention_matrix', '.', 'transpose', '(', ')', ',', 'interpolation', '=', '"nearest"', ',', 'cmap', '=', '"Greys"', ')', 'plt', '.', 'xlabel', '(', '"target"', ')', 'plt', '.', 'ylabel', '(', '"source"', ')', 'plt', '.', 'gca', '(', ')', '.', 'set_xticks', '(', '[', 'i', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'target_tokens', ')', ')', ']', ')', 'plt', '.', 'gca', '(', ')', '.', 'set_yticks', '(', '[', 'i', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'source_tokens', ')', ')', ']', ')', 'plt', '.', 'gca', '(', ')', '.', 'set_xticklabels', '(', 'target_tokens', ',', 'rotation', '=', "'vertical'", ')', 'plt', '.', 'gca', '(', ')', '.', 'set_yticklabels', '(', 'source_tokens', ')', 'plt', '.', 'tight_layout', '(', ')', 'plt', '.', 'savefig', '(', 'filename', ')', 'logger', '.', 'info', '(', '"Saved alignment visualization to "', '+', 'filename', ')'] | Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to. | ['Uses', 'matplotlib', 'for', 'creating', 'a', 'visualization', 'of', 'the', 'attention', 'matrix', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L366-L392 |
7,654 | secdev/scapy | scapy/compat.py | base64_bytes | def base64_bytes(x):
"""Turn base64 into bytes"""
if six.PY2:
return base64.decodestring(x)
return base64.decodebytes(bytes_encode(x)) | python | def base64_bytes(x):
"""Turn base64 into bytes"""
if six.PY2:
return base64.decodestring(x)
return base64.decodebytes(bytes_encode(x)) | ['def', 'base64_bytes', '(', 'x', ')', ':', 'if', 'six', '.', 'PY2', ':', 'return', 'base64', '.', 'decodestring', '(', 'x', ')', 'return', 'base64', '.', 'decodebytes', '(', 'bytes_encode', '(', 'x', ')', ')'] | Turn base64 into bytes | ['Turn', 'base64', 'into', 'bytes'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/compat.py#L88-L92 |
7,655 | klmitch/bark | bark/handlers.py | socket_handler | def socket_handler(name, logname, host, port):
"""
A Bark logging handler logging output to a stream (TCP) socket.
The server listening at the given 'host' and 'port' will be sent a
pickled dictionary.
Similar to logging.handlers.SocketHandler.
"""
return wrap_log_handler(logging.handlers.SocketHandler(host, port)) | python | def socket_handler(name, logname, host, port):
"""
A Bark logging handler logging output to a stream (TCP) socket.
The server listening at the given 'host' and 'port' will be sent a
pickled dictionary.
Similar to logging.handlers.SocketHandler.
"""
return wrap_log_handler(logging.handlers.SocketHandler(host, port)) | ['def', 'socket_handler', '(', 'name', ',', 'logname', ',', 'host', ',', 'port', ')', ':', 'return', 'wrap_log_handler', '(', 'logging', '.', 'handlers', '.', 'SocketHandler', '(', 'host', ',', 'port', ')', ')'] | A Bark logging handler logging output to a stream (TCP) socket.
The server listening at the given 'host' and 'port' will be sent a
pickled dictionary.
Similar to logging.handlers.SocketHandler. | ['A', 'Bark', 'logging', 'handler', 'logging', 'output', 'to', 'a', 'stream', '(', 'TCP', ')', 'socket', '.', 'The', 'server', 'listening', 'at', 'the', 'given', 'host', 'and', 'port', 'will', 'be', 'sent', 'a', 'pickled', 'dictionary', '.'] | train | https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/handlers.py#L298-L307 |
7,656 | pixelogik/NearPy | nearpy/storage/storage_mongo.py | MongoStorage.store_vector | def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in MongoDB with specified key.
"""
mongo_key = self._format_mongo_key(hash_name, bucket_key)
val_dict = {}
val_dict['lsh'] = mongo_key
# Depending on type (sparse or not) fill value dict
if scipy.sparse.issparse(v):
# Make sure that we are using COO format (easy to handle)
if not scipy.sparse.isspmatrix_coo(v):
v = scipy.sparse.coo_matrix(v)
# Construct list of [index, value] items,
# one for each non-zero element of the sparse vector
encoded_values = []
for k in range(v.data.size):
row_index = v.row[k]
value = v.data[k]
encoded_values.append([int(row_index), value])
val_dict['sparse'] = 1
val_dict['nonzeros'] = encoded_values
val_dict['dim'] = v.shape[0]
else:
# Make sure it is a 1d vector
v = numpy.reshape(v, v.shape[0])
val_dict['vector'] = v.tostring()
val_dict['dtype'] = v.dtype.name
# Add data if set
if data is not None:
val_dict['data'] = data
# Push JSON representation of dict to end of bucket list
self.mongo_object.insert_one(val_dict) | python | def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in MongoDB with specified key.
"""
mongo_key = self._format_mongo_key(hash_name, bucket_key)
val_dict = {}
val_dict['lsh'] = mongo_key
# Depending on type (sparse or not) fill value dict
if scipy.sparse.issparse(v):
# Make sure that we are using COO format (easy to handle)
if not scipy.sparse.isspmatrix_coo(v):
v = scipy.sparse.coo_matrix(v)
# Construct list of [index, value] items,
# one for each non-zero element of the sparse vector
encoded_values = []
for k in range(v.data.size):
row_index = v.row[k]
value = v.data[k]
encoded_values.append([int(row_index), value])
val_dict['sparse'] = 1
val_dict['nonzeros'] = encoded_values
val_dict['dim'] = v.shape[0]
else:
# Make sure it is a 1d vector
v = numpy.reshape(v, v.shape[0])
val_dict['vector'] = v.tostring()
val_dict['dtype'] = v.dtype.name
# Add data if set
if data is not None:
val_dict['data'] = data
# Push JSON representation of dict to end of bucket list
self.mongo_object.insert_one(val_dict) | ['def', 'store_vector', '(', 'self', ',', 'hash_name', ',', 'bucket_key', ',', 'v', ',', 'data', ')', ':', 'mongo_key', '=', 'self', '.', '_format_mongo_key', '(', 'hash_name', ',', 'bucket_key', ')', 'val_dict', '=', '{', '}', 'val_dict', '[', "'lsh'", ']', '=', 'mongo_key', '# Depending on type (sparse or not) fill value dict', 'if', 'scipy', '.', 'sparse', '.', 'issparse', '(', 'v', ')', ':', '# Make sure that we are using COO format (easy to handle)', 'if', 'not', 'scipy', '.', 'sparse', '.', 'isspmatrix_coo', '(', 'v', ')', ':', 'v', '=', 'scipy', '.', 'sparse', '.', 'coo_matrix', '(', 'v', ')', '# Construct list of [index, value] items,', '# one for each non-zero element of the sparse vector', 'encoded_values', '=', '[', ']', 'for', 'k', 'in', 'range', '(', 'v', '.', 'data', '.', 'size', ')', ':', 'row_index', '=', 'v', '.', 'row', '[', 'k', ']', 'value', '=', 'v', '.', 'data', '[', 'k', ']', 'encoded_values', '.', 'append', '(', '[', 'int', '(', 'row_index', ')', ',', 'value', ']', ')', 'val_dict', '[', "'sparse'", ']', '=', '1', 'val_dict', '[', "'nonzeros'", ']', '=', 'encoded_values', 'val_dict', '[', "'dim'", ']', '=', 'v', '.', 'shape', '[', '0', ']', 'else', ':', '# Make sure it is a 1d vector', 'v', '=', 'numpy', '.', 'reshape', '(', 'v', ',', 'v', '.', 'shape', '[', '0', ']', ')', 'val_dict', '[', "'vector'", ']', '=', 'v', '.', 'tostring', '(', ')', 'val_dict', '[', "'dtype'", ']', '=', 'v', '.', 'dtype', '.', 'name', '# Add data if set', 'if', 'data', 'is', 'not', 'None', ':', 'val_dict', '[', "'data'", ']', '=', 'data', '# Push JSON representation of dict to end of bucket list', 'self', '.', 'mongo_object', '.', 'insert_one', '(', 'val_dict', ')'] | Stores vector and JSON-serializable data in MongoDB with specified key. | ['Stores', 'vector', 'and', 'JSON', '-', 'serializable', 'data', 'in', 'MongoDB', 'with', 'specified', 'key', '.'] | train | https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/storage/storage_mongo.py#L48-L87 |
7,657 | chriso/gauged | gauged/writer.py | Writer.flush | def flush(self):
"""Flush all pending gauges"""
writer = self.writer
if writer is None:
raise GaugedUseAfterFreeError
self.flush_writer_position()
keys = self.translate_keys()
blocks = []
current_block = self.current_block
statistics = self.statistics
driver = self.driver
flags = 0 # for future extensions, e.g. block compression
for namespace, key, block in self.pending_blocks():
length = block.byte_length()
if not length:
continue
key_id = keys[(namespace, key)]
statistics[namespace].byte_count += length
blocks.append((namespace, current_block, key_id, block.buffer(),
flags))
if self.config.overwrite_blocks:
driver.replace_blocks(blocks)
else:
driver.insert_or_append_blocks(blocks)
if not Gauged.writer_flush_maps(writer, True):
raise MemoryError
update_namespace = driver.add_namespace_statistics
for namespace, stats in statistics.iteritems():
update_namespace(namespace, self.current_block,
stats.data_points, stats.byte_count)
statistics.clear()
driver.commit()
self.flush_now = False | python | def flush(self):
"""Flush all pending gauges"""
writer = self.writer
if writer is None:
raise GaugedUseAfterFreeError
self.flush_writer_position()
keys = self.translate_keys()
blocks = []
current_block = self.current_block
statistics = self.statistics
driver = self.driver
flags = 0 # for future extensions, e.g. block compression
for namespace, key, block in self.pending_blocks():
length = block.byte_length()
if not length:
continue
key_id = keys[(namespace, key)]
statistics[namespace].byte_count += length
blocks.append((namespace, current_block, key_id, block.buffer(),
flags))
if self.config.overwrite_blocks:
driver.replace_blocks(blocks)
else:
driver.insert_or_append_blocks(blocks)
if not Gauged.writer_flush_maps(writer, True):
raise MemoryError
update_namespace = driver.add_namespace_statistics
for namespace, stats in statistics.iteritems():
update_namespace(namespace, self.current_block,
stats.data_points, stats.byte_count)
statistics.clear()
driver.commit()
self.flush_now = False | ['def', 'flush', '(', 'self', ')', ':', 'writer', '=', 'self', '.', 'writer', 'if', 'writer', 'is', 'None', ':', 'raise', 'GaugedUseAfterFreeError', 'self', '.', 'flush_writer_position', '(', ')', 'keys', '=', 'self', '.', 'translate_keys', '(', ')', 'blocks', '=', '[', ']', 'current_block', '=', 'self', '.', 'current_block', 'statistics', '=', 'self', '.', 'statistics', 'driver', '=', 'self', '.', 'driver', 'flags', '=', '0', '# for future extensions, e.g. block compression', 'for', 'namespace', ',', 'key', ',', 'block', 'in', 'self', '.', 'pending_blocks', '(', ')', ':', 'length', '=', 'block', '.', 'byte_length', '(', ')', 'if', 'not', 'length', ':', 'continue', 'key_id', '=', 'keys', '[', '(', 'namespace', ',', 'key', ')', ']', 'statistics', '[', 'namespace', ']', '.', 'byte_count', '+=', 'length', 'blocks', '.', 'append', '(', '(', 'namespace', ',', 'current_block', ',', 'key_id', ',', 'block', '.', 'buffer', '(', ')', ',', 'flags', ')', ')', 'if', 'self', '.', 'config', '.', 'overwrite_blocks', ':', 'driver', '.', 'replace_blocks', '(', 'blocks', ')', 'else', ':', 'driver', '.', 'insert_or_append_blocks', '(', 'blocks', ')', 'if', 'not', 'Gauged', '.', 'writer_flush_maps', '(', 'writer', ',', 'True', ')', ':', 'raise', 'MemoryError', 'update_namespace', '=', 'driver', '.', 'add_namespace_statistics', 'for', 'namespace', ',', 'stats', 'in', 'statistics', '.', 'iteritems', '(', ')', ':', 'update_namespace', '(', 'namespace', ',', 'self', '.', 'current_block', ',', 'stats', '.', 'data_points', ',', 'stats', '.', 'byte_count', ')', 'statistics', '.', 'clear', '(', ')', 'driver', '.', 'commit', '(', ')', 'self', '.', 'flush_now', '=', 'False'] | Flush all pending gauges | ['Flush', 'all', 'pending', 'gauges'] | train | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L130-L162 |
7,658 | bapakode/OmMongo | ommongo/fields/mapping.py | KVField.wrap | def wrap(self, value):
''' Expects a dictionary with the keys being instances of ``KVField.key_type``
and the values being instances of ``KVField.value_type``. After validation,
the dictionary is transformed into a list of dictionaries with ``k`` and ``v``
fields set to the keys and values from the original dictionary.
'''
self.validate_wrap(value)
ret = []
for k, v in value.items():
k = self.key_type.wrap(k)
v = self.value_type.wrap(v)
ret.append( { 'k' : k, 'v' : v })
return ret | python | def wrap(self, value):
''' Expects a dictionary with the keys being instances of ``KVField.key_type``
and the values being instances of ``KVField.value_type``. After validation,
the dictionary is transformed into a list of dictionaries with ``k`` and ``v``
fields set to the keys and values from the original dictionary.
'''
self.validate_wrap(value)
ret = []
for k, v in value.items():
k = self.key_type.wrap(k)
v = self.value_type.wrap(v)
ret.append( { 'k' : k, 'v' : v })
return ret | ['def', 'wrap', '(', 'self', ',', 'value', ')', ':', 'self', '.', 'validate_wrap', '(', 'value', ')', 'ret', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', ':', 'k', '=', 'self', '.', 'key_type', '.', 'wrap', '(', 'k', ')', 'v', '=', 'self', '.', 'value_type', '.', 'wrap', '(', 'v', ')', 'ret', '.', 'append', '(', '{', "'k'", ':', 'k', ',', "'v'", ':', 'v', '}', ')', 'return', 'ret'] | Expects a dictionary with the keys being instances of ``KVField.key_type``
and the values being instances of ``KVField.value_type``. After validation,
the dictionary is transformed into a list of dictionaries with ``k`` and ``v``
fields set to the keys and values from the original dictionary. | ['Expects', 'a', 'dictionary', 'with', 'the', 'keys', 'being', 'instances', 'of', 'KVField', '.', 'key_type', 'and', 'the', 'values', 'being', 'instances', 'of', 'KVField', '.', 'value_type', '.', 'After', 'validation', 'the', 'dictionary', 'is', 'transformed', 'into', 'a', 'list', 'of', 'dictionaries', 'with', 'k', 'and', 'v', 'fields', 'set', 'to', 'the', 'keys', 'and', 'values', 'from', 'the', 'original', 'dictionary', '.'] | train | https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/mapping.py#L203-L215 |
7,659 | EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/arraysetops.py | remap | def remap(input, keys, values, missing='ignore', inplace=False):
"""Given an input array, remap its entries corresponding to 'keys' to 'values'
equivalent of output = [map.get(i, default=i) for i in input],
if map were a dictionary of corresponding keys and values
Parameters
----------
input : ndarray, [...]
values to perform replacements in
keys : ndarray, [...]
values to perform replacements in
values : ndarray, [...]
values to perform replacements in
missing : {'raise', 'ignore'}
if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys'
if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped
inplace : bool, optional
if True, input array is remapped in place
if false, a copy is returned
Returns
-------
output : ndarray, [...]
like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values'
"""
input = np.asarray(input) # FIXME: currently instances of Index are not allowed
values = np.asarray(values)
if missing == 'ignore':
idx = indices(keys, input, missing='mask')
mask = np.logical_not(idx.mask)
idx = idx.data
elif missing == 'raise':
idx = indices(keys, input, missing='raise')
mask = Ellipsis
else:
raise ValueError("'missing' should be either 'ignore' or 'raise'")
output = input if inplace else input.copy()
output[mask] = values[idx[mask]]
return output | python | def remap(input, keys, values, missing='ignore', inplace=False):
"""Given an input array, remap its entries corresponding to 'keys' to 'values'
equivalent of output = [map.get(i, default=i) for i in input],
if map were a dictionary of corresponding keys and values
Parameters
----------
input : ndarray, [...]
values to perform replacements in
keys : ndarray, [...]
values to perform replacements in
values : ndarray, [...]
values to perform replacements in
missing : {'raise', 'ignore'}
if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys'
if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped
inplace : bool, optional
if True, input array is remapped in place
if false, a copy is returned
Returns
-------
output : ndarray, [...]
like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values'
"""
input = np.asarray(input) # FIXME: currently instances of Index are not allowed
values = np.asarray(values)
if missing == 'ignore':
idx = indices(keys, input, missing='mask')
mask = np.logical_not(idx.mask)
idx = idx.data
elif missing == 'raise':
idx = indices(keys, input, missing='raise')
mask = Ellipsis
else:
raise ValueError("'missing' should be either 'ignore' or 'raise'")
output = input if inplace else input.copy()
output[mask] = values[idx[mask]]
return output | ['def', 'remap', '(', 'input', ',', 'keys', ',', 'values', ',', 'missing', '=', "'ignore'", ',', 'inplace', '=', 'False', ')', ':', 'input', '=', 'np', '.', 'asarray', '(', 'input', ')', '# FIXME: currently instances of Index are not allowed', 'values', '=', 'np', '.', 'asarray', '(', 'values', ')', 'if', 'missing', '==', "'ignore'", ':', 'idx', '=', 'indices', '(', 'keys', ',', 'input', ',', 'missing', '=', "'mask'", ')', 'mask', '=', 'np', '.', 'logical_not', '(', 'idx', '.', 'mask', ')', 'idx', '=', 'idx', '.', 'data', 'elif', 'missing', '==', "'raise'", ':', 'idx', '=', 'indices', '(', 'keys', ',', 'input', ',', 'missing', '=', "'raise'", ')', 'mask', '=', 'Ellipsis', 'else', ':', 'raise', 'ValueError', '(', '"\'missing\' should be either \'ignore\' or \'raise\'"', ')', 'output', '=', 'input', 'if', 'inplace', 'else', 'input', '.', 'copy', '(', ')', 'output', '[', 'mask', ']', '=', 'values', '[', 'idx', '[', 'mask', ']', ']', 'return', 'output'] | Given an input array, remap its entries corresponding to 'keys' to 'values'
equivalent of output = [map.get(i, default=i) for i in input],
if map were a dictionary of corresponding keys and values
Parameters
----------
input : ndarray, [...]
values to perform replacements in
keys : ndarray, [...]
values to perform replacements in
values : ndarray, [...]
values to perform replacements in
missing : {'raise', 'ignore'}
if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys'
if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped
inplace : bool, optional
if True, input array is remapped in place
if false, a copy is returned
Returns
-------
output : ndarray, [...]
like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values' | ['Given', 'an', 'input', 'array', 'remap', 'its', 'entries', 'corresponding', 'to', 'keys', 'to', 'values', 'equivalent', 'of', 'output', '=', '[', 'map', '.', 'get', '(', 'i', 'default', '=', 'i', ')', 'for', 'i', 'in', 'input', ']', 'if', 'map', 'were', 'a', 'dictionary', 'of', 'corresponding', 'keys', 'and', 'values'] | train | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L167-L205 |
7,660 | manns/pyspread | pyspread/src/lib/vlc.py | libvlc_video_set_teletext | def libvlc_video_set_teletext(p_mi, i_page):
'''Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested.
'''
f = _Cfunctions.get('libvlc_video_set_teletext', None) or \
_Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_page) | python | def libvlc_video_set_teletext(p_mi, i_page):
'''Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested.
'''
f = _Cfunctions.get('libvlc_video_set_teletext', None) or \
_Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_page) | ['def', 'libvlc_video_set_teletext', '(', 'p_mi', ',', 'i_page', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_video_set_teletext'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_video_set_teletext'", ',', '(', '(', '1', ',', ')', ',', '(', '1', ',', ')', ',', ')', ',', 'None', ',', 'None', ',', 'MediaPlayer', ',', 'ctypes', '.', 'c_int', ')', 'return', 'f', '(', 'p_mi', ',', 'i_page', ')'] | Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested. | ['Set', 'new', 'teletext', 'page', 'to', 'retrieve', '.'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5826-L5834 |
7,661 | ev3dev/ev3dev-lang-python | ev3dev2/led.py | duration_expired | def duration_expired(start_time, duration_seconds):
"""
Return True if ``duration_seconds`` have expired since ``start_time``
"""
if duration_seconds is not None:
delta_seconds = datetime_delta_to_seconds(dt.datetime.now() - start_time)
if delta_seconds >= duration_seconds:
return True
return False | python | def duration_expired(start_time, duration_seconds):
"""
Return True if ``duration_seconds`` have expired since ``start_time``
"""
if duration_seconds is not None:
delta_seconds = datetime_delta_to_seconds(dt.datetime.now() - start_time)
if delta_seconds >= duration_seconds:
return True
return False | ['def', 'duration_expired', '(', 'start_time', ',', 'duration_seconds', ')', ':', 'if', 'duration_seconds', 'is', 'not', 'None', ':', 'delta_seconds', '=', 'datetime_delta_to_seconds', '(', 'dt', '.', 'datetime', '.', 'now', '(', ')', '-', 'start_time', ')', 'if', 'delta_seconds', '>=', 'duration_seconds', ':', 'return', 'True', 'return', 'False'] | Return True if ``duration_seconds`` have expired since ``start_time`` | ['Return', 'True', 'if', 'duration_seconds', 'have', 'expired', 'since', 'start_time'] | train | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/led.py#L80-L91 |
7,662 | saltstack/salt | salt/modules/smartos_imgadm.py | docker_to_uuid | def docker_to_uuid(uuid):
'''
Get the image uuid from an imported docker image
.. versionadded:: 2019.2.0
'''
if _is_uuid(uuid):
return uuid
if _is_docker_uuid(uuid):
images = list_installed(verbose=True)
for image_uuid in images:
if 'name' not in images[image_uuid]:
continue
if images[image_uuid]['name'] == uuid:
return image_uuid
return None | python | def docker_to_uuid(uuid):
'''
Get the image uuid from an imported docker image
.. versionadded:: 2019.2.0
'''
if _is_uuid(uuid):
return uuid
if _is_docker_uuid(uuid):
images = list_installed(verbose=True)
for image_uuid in images:
if 'name' not in images[image_uuid]:
continue
if images[image_uuid]['name'] == uuid:
return image_uuid
return None | ['def', 'docker_to_uuid', '(', 'uuid', ')', ':', 'if', '_is_uuid', '(', 'uuid', ')', ':', 'return', 'uuid', 'if', '_is_docker_uuid', '(', 'uuid', ')', ':', 'images', '=', 'list_installed', '(', 'verbose', '=', 'True', ')', 'for', 'image_uuid', 'in', 'images', ':', 'if', "'name'", 'not', 'in', 'images', '[', 'image_uuid', ']', ':', 'continue', 'if', 'images', '[', 'image_uuid', ']', '[', "'name'", ']', '==', 'uuid', ':', 'return', 'image_uuid', 'return', 'None'] | Get the image uuid from an imported docker image
.. versionadded:: 2019.2.0 | ['Get', 'the', 'image', 'uuid', 'from', 'an', 'imported', 'docker', 'image'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_imgadm.py#L155-L170 |
7,663 | ppo/django-guitar | guitar/utils/admin.py | list_insert | def list_insert(lst, new_elements, index_or_name=None, after=True):
"""
Return a copy of the list with the new element(s) inserted.
Args:
lst (list): The original list.
new_elements ("any" or list of "any"): The element(s) to insert in the list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Default: None (=append).
after (bool): Whether to insert the new elements before or after the reference element. Default: True.
Returns:
(list) A copy of the original list containing the new element(s).
"""
if index_or_name is None:
index = None
else:
try:
index = get_list_index(lst, index_or_name)
except ValueError:
index = None
to_return = lst[:]
if index is None: # Append.
to_return += new_elements
elif index == 0: # Prepend.
to_return = new_elements + to_return
else:
if after:
index += 1
to_return = to_return[:index] + new_elements + to_return[index:]
return to_return | python | def list_insert(lst, new_elements, index_or_name=None, after=True):
"""
Return a copy of the list with the new element(s) inserted.
Args:
lst (list): The original list.
new_elements ("any" or list of "any"): The element(s) to insert in the list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Default: None (=append).
after (bool): Whether to insert the new elements before or after the reference element. Default: True.
Returns:
(list) A copy of the original list containing the new element(s).
"""
if index_or_name is None:
index = None
else:
try:
index = get_list_index(lst, index_or_name)
except ValueError:
index = None
to_return = lst[:]
if index is None: # Append.
to_return += new_elements
elif index == 0: # Prepend.
to_return = new_elements + to_return
else:
if after:
index += 1
to_return = to_return[:index] + new_elements + to_return[index:]
return to_return | ['def', 'list_insert', '(', 'lst', ',', 'new_elements', ',', 'index_or_name', '=', 'None', ',', 'after', '=', 'True', ')', ':', 'if', 'index_or_name', 'is', 'None', ':', 'index', '=', 'None', 'else', ':', 'try', ':', 'index', '=', 'get_list_index', '(', 'lst', ',', 'index_or_name', ')', 'except', 'ValueError', ':', 'index', '=', 'None', 'to_return', '=', 'lst', '[', ':', ']', 'if', 'index', 'is', 'None', ':', '# Append.', 'to_return', '+=', 'new_elements', 'elif', 'index', '==', '0', ':', '# Prepend.', 'to_return', '=', 'new_elements', '+', 'to_return', 'else', ':', 'if', 'after', ':', 'index', '+=', '1', 'to_return', '=', 'to_return', '[', ':', 'index', ']', '+', 'new_elements', '+', 'to_return', '[', 'index', ':', ']', 'return', 'to_return'] | Return a copy of the list with the new element(s) inserted.
Args:
lst (list): The original list.
new_elements ("any" or list of "any"): The element(s) to insert in the list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Default: None (=append).
after (bool): Whether to insert the new elements before or after the reference element. Default: True.
Returns:
(list) A copy of the original list containing the new element(s). | ['Return', 'a', 'copy', 'of', 'the', 'list', 'with', 'the', 'new', 'element', '(', 's', ')', 'inserted', '.'] | train | https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L173-L204 |
7,664 | greyli/flask-ckeditor | flask_ckeditor/__init__.py | _CKEditor.config | def config(name='ckeditor', custom_config='', **kwargs):
"""Config CKEditor.
:param name: The target input field's name. If you use Flask-WTF/WTForms, it need to set
to field's name. Default to ``'ckeditor'``.
:param custom_config: The addition config, for example ``uiColor: '#9AB8F3'``.
The proper syntax for each option is ``configuration name : configuration value``.
You can use comma to separate multiple key-value pairs. See the list of available
configuration settings on
`CKEditor documentation <https://docs.ckeditor.com/ckeditor4/docs/#!/api/CKEDITOR.config>`_.
:param kwargs: Mirror arguments to overwritten configuration variables, see docs for more details.
.. versionadded:: 0.3
"""
extra_plugins = kwargs.get('extra_plugins', current_app.config['CKEDITOR_EXTRA_PLUGINS'])
file_uploader = kwargs.get('file_uploader', current_app.config['CKEDITOR_FILE_UPLOADER'])
file_browser = kwargs.get('file_browser', current_app.config['CKEDITOR_FILE_BROWSER'])
if file_uploader != '':
file_uploader = get_url(file_uploader)
if file_browser != '':
file_browser = get_url(file_browser)
if file_uploader or file_browser and 'filebrowser' not in extra_plugins:
extra_plugins.append('filebrowser')
language = kwargs.get('language', current_app.config['CKEDITOR_LANGUAGE'])
height = kwargs.get('height', current_app.config['CKEDITOR_HEIGHT'])
width = kwargs.get('width', current_app.config['CKEDITOR_WIDTH'])
code_theme = kwargs.get('code_theme', current_app.config['CKEDITOR_CODE_THEME'])
wrong_key_arg = kwargs.get('codesnippet', None)
if wrong_key_arg:
warnings.warn('Argument codesnippet was renamed to enable_codesnippet and will be removed in future.')
enable_codesnippet = kwargs.get('enable_codesnippet', wrong_key_arg) or \
current_app.config['CKEDITOR_ENABLE_CODESNIPPET']
if enable_codesnippet and 'codesnippet' not in extra_plugins:
extra_plugins.append('codesnippet')
enable_csrf = kwargs.get('enable_csrf', current_app.config['CKEDITOR_ENABLE_CSRF'])
if enable_csrf:
if 'csrf' not in current_app.extensions:
raise RuntimeError("CSRFProtect is not initialized. It's required to enable CSRF protect, \
see docs for more details.")
csrf_header = render_template_string('''
fileTools_requestHeaders: {
'X-CSRFToken': '{{ csrf_token() }}',
},''')
else:
csrf_header = ''
return Markup('''
<script type="text/javascript">
CKEDITOR.replace( "%s", {
language: "%s",
height: %s,
width: %s,
codeSnippet_theme: "%s",
imageUploadUrl: "%s",
filebrowserUploadUrl: "%s",
filebrowserBrowseUrl: "%s",
extraPlugins: "%s",
%s // CSRF token header for XHR request
%s
});
</script>''' % (
name, language, height, width, code_theme, file_uploader, file_uploader, file_browser,
','.join(extra_plugins), csrf_header, custom_config)) | python | def config(name='ckeditor', custom_config='', **kwargs):
"""Config CKEditor.
:param name: The target input field's name. If you use Flask-WTF/WTForms, it need to set
to field's name. Default to ``'ckeditor'``.
:param custom_config: The addition config, for example ``uiColor: '#9AB8F3'``.
The proper syntax for each option is ``configuration name : configuration value``.
You can use comma to separate multiple key-value pairs. See the list of available
configuration settings on
`CKEditor documentation <https://docs.ckeditor.com/ckeditor4/docs/#!/api/CKEDITOR.config>`_.
:param kwargs: Mirror arguments to overwritten configuration variables, see docs for more details.
.. versionadded:: 0.3
"""
extra_plugins = kwargs.get('extra_plugins', current_app.config['CKEDITOR_EXTRA_PLUGINS'])
file_uploader = kwargs.get('file_uploader', current_app.config['CKEDITOR_FILE_UPLOADER'])
file_browser = kwargs.get('file_browser', current_app.config['CKEDITOR_FILE_BROWSER'])
if file_uploader != '':
file_uploader = get_url(file_uploader)
if file_browser != '':
file_browser = get_url(file_browser)
if file_uploader or file_browser and 'filebrowser' not in extra_plugins:
extra_plugins.append('filebrowser')
language = kwargs.get('language', current_app.config['CKEDITOR_LANGUAGE'])
height = kwargs.get('height', current_app.config['CKEDITOR_HEIGHT'])
width = kwargs.get('width', current_app.config['CKEDITOR_WIDTH'])
code_theme = kwargs.get('code_theme', current_app.config['CKEDITOR_CODE_THEME'])
wrong_key_arg = kwargs.get('codesnippet', None)
if wrong_key_arg:
warnings.warn('Argument codesnippet was renamed to enable_codesnippet and will be removed in future.')
enable_codesnippet = kwargs.get('enable_codesnippet', wrong_key_arg) or \
current_app.config['CKEDITOR_ENABLE_CODESNIPPET']
if enable_codesnippet and 'codesnippet' not in extra_plugins:
extra_plugins.append('codesnippet')
enable_csrf = kwargs.get('enable_csrf', current_app.config['CKEDITOR_ENABLE_CSRF'])
if enable_csrf:
if 'csrf' not in current_app.extensions:
raise RuntimeError("CSRFProtect is not initialized. It's required to enable CSRF protect, \
see docs for more details.")
csrf_header = render_template_string('''
fileTools_requestHeaders: {
'X-CSRFToken': '{{ csrf_token() }}',
},''')
else:
csrf_header = ''
return Markup('''
<script type="text/javascript">
CKEDITOR.replace( "%s", {
language: "%s",
height: %s,
width: %s,
codeSnippet_theme: "%s",
imageUploadUrl: "%s",
filebrowserUploadUrl: "%s",
filebrowserBrowseUrl: "%s",
extraPlugins: "%s",
%s // CSRF token header for XHR request
%s
});
</script>''' % (
name, language, height, width, code_theme, file_uploader, file_uploader, file_browser,
','.join(extra_plugins), csrf_header, custom_config)) | ['def', 'config', '(', 'name', '=', "'ckeditor'", ',', 'custom_config', '=', "''", ',', '*', '*', 'kwargs', ')', ':', 'extra_plugins', '=', 'kwargs', '.', 'get', '(', "'extra_plugins'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_EXTRA_PLUGINS'", ']', ')', 'file_uploader', '=', 'kwargs', '.', 'get', '(', "'file_uploader'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_FILE_UPLOADER'", ']', ')', 'file_browser', '=', 'kwargs', '.', 'get', '(', "'file_browser'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_FILE_BROWSER'", ']', ')', 'if', 'file_uploader', '!=', "''", ':', 'file_uploader', '=', 'get_url', '(', 'file_uploader', ')', 'if', 'file_browser', '!=', "''", ':', 'file_browser', '=', 'get_url', '(', 'file_browser', ')', 'if', 'file_uploader', 'or', 'file_browser', 'and', "'filebrowser'", 'not', 'in', 'extra_plugins', ':', 'extra_plugins', '.', 'append', '(', "'filebrowser'", ')', 'language', '=', 'kwargs', '.', 'get', '(', "'language'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_LANGUAGE'", ']', ')', 'height', '=', 'kwargs', '.', 'get', '(', "'height'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_HEIGHT'", ']', ')', 'width', '=', 'kwargs', '.', 'get', '(', "'width'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_WIDTH'", ']', ')', 'code_theme', '=', 'kwargs', '.', 'get', '(', "'code_theme'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_CODE_THEME'", ']', ')', 'wrong_key_arg', '=', 'kwargs', '.', 'get', '(', "'codesnippet'", ',', 'None', ')', 'if', 'wrong_key_arg', ':', 'warnings', '.', 'warn', '(', "'Argument codesnippet was renamed to enable_codesnippet and will be removed in future.'", ')', 'enable_codesnippet', '=', 'kwargs', '.', 'get', '(', "'enable_codesnippet'", ',', 'wrong_key_arg', ')', 'or', 'current_app', '.', 'config', '[', "'CKEDITOR_ENABLE_CODESNIPPET'", ']', 'if', 'enable_codesnippet', 'and', "'codesnippet'", 'not', 'in', 'extra_plugins', ':', 'extra_plugins', '.', 'append', '(', "'codesnippet'", ')', 'enable_csrf', '=', 'kwargs', '.', 'get', '(', "'enable_csrf'", ',', 'current_app', '.', 'config', '[', "'CKEDITOR_ENABLE_CSRF'", ']', ')', 'if', 'enable_csrf', ':', 'if', "'csrf'", 'not', 'in', 'current_app', '.', 'extensions', ':', 'raise', 'RuntimeError', '(', '"CSRFProtect is not initialized. It\'s required to enable CSRF protect, \\\n see docs for more details."', ')', 'csrf_header', '=', 'render_template_string', '(', "'''\n fileTools_requestHeaders: {\n 'X-CSRFToken': '{{ csrf_token() }}',\n },'''", ')', 'else', ':', 'csrf_header', '=', "''", 'return', 'Markup', '(', '\'\'\'\n<script type="text/javascript">\n CKEDITOR.replace( "%s", {\n language: "%s",\n height: %s,\n width: %s,\n codeSnippet_theme: "%s",\n imageUploadUrl: "%s",\n filebrowserUploadUrl: "%s",\n filebrowserBrowseUrl: "%s",\n extraPlugins: "%s",\n %s // CSRF token header for XHR request\n %s\n });\n</script>\'\'\'', '%', '(', 'name', ',', 'language', ',', 'height', ',', 'width', ',', 'code_theme', ',', 'file_uploader', ',', 'file_uploader', ',', 'file_browser', ',', "','", '.', 'join', '(', 'extra_plugins', ')', ',', 'csrf_header', ',', 'custom_config', ')', ')'] | Config CKEditor.
:param name: The target input field's name. If you use Flask-WTF/WTForms, it need to set
to field's name. Default to ``'ckeditor'``.
:param custom_config: The addition config, for example ``uiColor: '#9AB8F3'``.
The proper syntax for each option is ``configuration name : configuration value``.
You can use comma to separate multiple key-value pairs. See the list of available
configuration settings on
`CKEditor documentation <https://docs.ckeditor.com/ckeditor4/docs/#!/api/CKEDITOR.config>`_.
:param kwargs: Mirror arguments to overwritten configuration variables, see docs for more details.
.. versionadded:: 0.3 | ['Config', 'CKEditor', '.'] | train | https://github.com/greyli/flask-ckeditor/blob/a8a1aa0d5736271762700d06fe9dbc0f8ed43aec/flask_ckeditor/__init__.py#L50-L122 |
7,665 | JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgets/reftrackwidget.py | ReftrackWidget.set_menu | def set_menu(self, ):
"""Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
"""
self.menu = QtGui.QMenu(self)
actions = self.reftrack.get_additional_actions()
self.actions = []
for a in actions:
if a.icon:
qaction = QtGui.QAction(a.icon, a.name, self)
else:
qaction = QtGui.QAction(a.name, self)
qaction.setCheckable(a.checkable)
qaction.setChecked(a.checked)
qaction.setEnabled(a.enabled)
qaction.triggered.connect(a.action)
self.actions.append(qaction)
self.menu.addAction(qaction)
self.menu_tb.setMenu(self.menu) | python | def set_menu(self, ):
"""Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
"""
self.menu = QtGui.QMenu(self)
actions = self.reftrack.get_additional_actions()
self.actions = []
for a in actions:
if a.icon:
qaction = QtGui.QAction(a.icon, a.name, self)
else:
qaction = QtGui.QAction(a.name, self)
qaction.setCheckable(a.checkable)
qaction.setChecked(a.checked)
qaction.setEnabled(a.enabled)
qaction.triggered.connect(a.action)
self.actions.append(qaction)
self.menu.addAction(qaction)
self.menu_tb.setMenu(self.menu) | ['def', 'set_menu', '(', 'self', ',', ')', ':', 'self', '.', 'menu', '=', 'QtGui', '.', 'QMenu', '(', 'self', ')', 'actions', '=', 'self', '.', 'reftrack', '.', 'get_additional_actions', '(', ')', 'self', '.', 'actions', '=', '[', ']', 'for', 'a', 'in', 'actions', ':', 'if', 'a', '.', 'icon', ':', 'qaction', '=', 'QtGui', '.', 'QAction', '(', 'a', '.', 'icon', ',', 'a', '.', 'name', ',', 'self', ')', 'else', ':', 'qaction', '=', 'QtGui', '.', 'QAction', '(', 'a', '.', 'name', ',', 'self', ')', 'qaction', '.', 'setCheckable', '(', 'a', '.', 'checkable', ')', 'qaction', '.', 'setChecked', '(', 'a', '.', 'checked', ')', 'qaction', '.', 'setEnabled', '(', 'a', '.', 'enabled', ')', 'qaction', '.', 'triggered', '.', 'connect', '(', 'a', '.', 'action', ')', 'self', '.', 'actions', '.', 'append', '(', 'qaction', ')', 'self', '.', 'menu', '.', 'addAction', '(', 'qaction', ')', 'self', '.', 'menu_tb', '.', 'setMenu', '(', 'self', '.', 'menu', ')'] | Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None | ['Setup', 'the', 'menu', 'that', 'the', 'menu_tb', 'button', 'uses'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L301-L322 |
7,666 | networks-lab/metaknowledge | metaknowledge/WOS/tagProcessing/tagFunctions.py | authAddress | def authAddress(val):
"""
# The C1 Tag
extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways.
# Parameters
_val_: `list[str]`
> The raw data from a WOS file
# Returns
`list[str]`
> A list of addresses
"""
ret = []
for a in val:
if a[0] == '[':
ret.append('] '.join(a.split('] ')[1:]))
else:
ret.append(a)
return ret | python | def authAddress(val):
"""
# The C1 Tag
extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways.
# Parameters
_val_: `list[str]`
> The raw data from a WOS file
# Returns
`list[str]`
> A list of addresses
"""
ret = []
for a in val:
if a[0] == '[':
ret.append('] '.join(a.split('] ')[1:]))
else:
ret.append(a)
return ret | ['def', 'authAddress', '(', 'val', ')', ':', 'ret', '=', '[', ']', 'for', 'a', 'in', 'val', ':', 'if', 'a', '[', '0', ']', '==', "'['", ':', 'ret', '.', 'append', '(', "'] '", '.', 'join', '(', 'a', '.', 'split', '(', "'] '", ')', '[', '1', ':', ']', ')', ')', 'else', ':', 'ret', '.', 'append', '(', 'a', ')', 'return', 'ret'] | # The C1 Tag
extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways.
# Parameters
_val_: `list[str]`
> The raw data from a WOS file
# Returns
`list[str]`
> A list of addresses | ['#', 'The', 'C1', 'Tag'] | train | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/tagFunctions.py#L394-L419 |
7,667 | spyder-ide/spyder | spyder/plugins/variableexplorer/widgets/collectionseditor.py | RemoteCollectionsEditorTableView.new_value | def new_value(self, name, value):
"""Create new value in data"""
try:
# We need to enclose values in a list to be able to send
# them to the kernel in Python 2
svalue = [cloudpickle.dumps(value, protocol=PICKLE_PROTOCOL)]
# Needed to prevent memory leaks. See issue 7158
if len(svalue) < MAX_SERIALIZED_LENGHT:
self.shellwidget.set_value(name, svalue)
else:
QMessageBox.warning(self, _("Warning"),
_("The object you are trying to modify is "
"too big to be sent back to the kernel. "
"Therefore, your modifications won't "
"take place."))
except TypeError as e:
QMessageBox.critical(self, _("Error"),
"TypeError: %s" % to_text_string(e))
self.shellwidget.refresh_namespacebrowser() | python | def new_value(self, name, value):
"""Create new value in data"""
try:
# We need to enclose values in a list to be able to send
# them to the kernel in Python 2
svalue = [cloudpickle.dumps(value, protocol=PICKLE_PROTOCOL)]
# Needed to prevent memory leaks. See issue 7158
if len(svalue) < MAX_SERIALIZED_LENGHT:
self.shellwidget.set_value(name, svalue)
else:
QMessageBox.warning(self, _("Warning"),
_("The object you are trying to modify is "
"too big to be sent back to the kernel. "
"Therefore, your modifications won't "
"take place."))
except TypeError as e:
QMessageBox.critical(self, _("Error"),
"TypeError: %s" % to_text_string(e))
self.shellwidget.refresh_namespacebrowser() | ['def', 'new_value', '(', 'self', ',', 'name', ',', 'value', ')', ':', 'try', ':', '# We need to enclose values in a list to be able to send\r', '# them to the kernel in Python 2\r', 'svalue', '=', '[', 'cloudpickle', '.', 'dumps', '(', 'value', ',', 'protocol', '=', 'PICKLE_PROTOCOL', ')', ']', '# Needed to prevent memory leaks. See issue 7158\r', 'if', 'len', '(', 'svalue', ')', '<', 'MAX_SERIALIZED_LENGHT', ':', 'self', '.', 'shellwidget', '.', 'set_value', '(', 'name', ',', 'svalue', ')', 'else', ':', 'QMessageBox', '.', 'warning', '(', 'self', ',', '_', '(', '"Warning"', ')', ',', '_', '(', '"The object you are trying to modify is "', '"too big to be sent back to the kernel. "', '"Therefore, your modifications won\'t "', '"take place."', ')', ')', 'except', 'TypeError', 'as', 'e', ':', 'QMessageBox', '.', 'critical', '(', 'self', ',', '_', '(', '"Error"', ')', ',', '"TypeError: %s"', '%', 'to_text_string', '(', 'e', ')', ')', 'self', '.', 'shellwidget', '.', 'refresh_namespacebrowser', '(', ')'] | Create new value in data | ['Create', 'new', 'value', 'in', 'data'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L1568-L1587 |
7,668 | allenai/allennlp | allennlp/nn/util.py | sequence_cross_entropy_with_logits | def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss | python | def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss | ['def', 'sequence_cross_entropy_with_logits', '(', 'logits', ':', 'torch', '.', 'FloatTensor', ',', 'targets', ':', 'torch', '.', 'LongTensor', ',', 'weights', ':', 'torch', '.', 'FloatTensor', ',', 'average', ':', 'str', '=', '"batch"', ',', 'label_smoothing', ':', 'float', '=', 'None', ')', '->', 'torch', '.', 'FloatTensor', ':', 'if', 'average', 'not', 'in', '{', 'None', ',', '"token"', ',', '"batch"', '}', ':', 'raise', 'ValueError', '(', '"Got average f{average}, expected one of "', '"None, \'token\', or \'batch\'"', ')', '# shape : (batch * sequence_length, num_classes)', 'logits_flat', '=', 'logits', '.', 'view', '(', '-', '1', ',', 'logits', '.', 'size', '(', '-', '1', ')', ')', '# shape : (batch * sequence_length, num_classes)', 'log_probs_flat', '=', 'torch', '.', 'nn', '.', 'functional', '.', 'log_softmax', '(', 'logits_flat', ',', 'dim', '=', '-', '1', ')', '# shape : (batch * max_len, 1)', 'targets_flat', '=', 'targets', '.', 'view', '(', '-', '1', ',', '1', ')', '.', 'long', '(', ')', 'if', 'label_smoothing', 'is', 'not', 'None', 'and', 'label_smoothing', '>', '0.0', ':', 'num_classes', '=', 'logits', '.', 'size', '(', '-', '1', ')', 'smoothing_value', '=', 'label_smoothing', '/', 'num_classes', '# Fill all the correct indices with 1 - smoothing value.', 'one_hot_targets', '=', 'torch', '.', 'zeros_like', '(', 'log_probs_flat', ')', '.', 'scatter_', '(', '-', '1', ',', 'targets_flat', ',', '1.0', '-', 'label_smoothing', ')', 'smoothed_targets', '=', 'one_hot_targets', '+', 'smoothing_value', 'negative_log_likelihood_flat', '=', '-', 'log_probs_flat', '*', 'smoothed_targets', 'negative_log_likelihood_flat', '=', 'negative_log_likelihood_flat', '.', 'sum', '(', '-', '1', ',', 'keepdim', '=', 'True', ')', 'else', ':', '# Contribution to the negative log likelihood only comes from the exact indices', '# of the targets, as the target distributions are one-hot. Here we use torch.gather', '# to extract the indices of the num_classes dimension which contribute to the loss.', '# shape : (batch * sequence_length, 1)', 'negative_log_likelihood_flat', '=', '-', 'torch', '.', 'gather', '(', 'log_probs_flat', ',', 'dim', '=', '1', ',', 'index', '=', 'targets_flat', ')', '# shape : (batch, sequence_length)', 'negative_log_likelihood', '=', 'negative_log_likelihood_flat', '.', 'view', '(', '*', 'targets', '.', 'size', '(', ')', ')', '# shape : (batch, sequence_length)', 'negative_log_likelihood', '=', 'negative_log_likelihood', '*', 'weights', '.', 'float', '(', ')', 'if', 'average', '==', '"batch"', ':', '# shape : (batch_size,)', 'per_batch_loss', '=', 'negative_log_likelihood', '.', 'sum', '(', '1', ')', '/', '(', 'weights', '.', 'sum', '(', '1', ')', '.', 'float', '(', ')', '+', '1e-13', ')', 'num_non_empty_sequences', '=', '(', '(', 'weights', '.', 'sum', '(', '1', ')', '>', '0', ')', '.', 'float', '(', ')', '.', 'sum', '(', ')', '+', '1e-13', ')', 'return', 'per_batch_loss', '.', 'sum', '(', ')', '/', 'num_non_empty_sequences', 'elif', 'average', '==', '"token"', ':', 'return', 'negative_log_likelihood', '.', 'sum', '(', ')', '/', '(', 'weights', '.', 'sum', '(', ')', '.', 'float', '(', ')', '+', '1e-13', ')', 'else', ':', '# shape : (batch_size,)', 'per_batch_loss', '=', 'negative_log_likelihood', '.', 'sum', '(', '1', ')', '/', '(', 'weights', '.', 'sum', '(', '1', ')', '.', 'float', '(', ')', '+', '1e-13', ')', 'return', 'per_batch_loss'] | Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,). | ['Computes', 'the', 'cross', 'entropy', 'loss', 'of', 'a', 'sequence', 'weighted', 'with', 'respect', 'to', 'some', 'user', 'provided', 'weights', '.', 'Note', 'that', 'the', 'weighting', 'here', 'is', 'not', 'the', 'same', 'as', 'in', 'the', ':', 'func', ':', 'torch', '.', 'nn', '.', 'CrossEntropyLoss', '()', 'criterion', 'which', 'is', 'weighting', 'classes', ';', 'here', 'we', 'are', 'weighting', 'the', 'loss', 'contribution', 'from', 'particular', 'elements', 'in', 'the', 'sequence', '.', 'This', 'allows', 'loss', 'computations', 'for', 'models', 'which', 'use', 'padding', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L569-L648 |
7,669 | casacore/python-casacore | casacore/tables/table.py | taql | def taql(command, style='Python', tables=[], globals={}, locals={}):
"""Execute a TaQL command and return a table object.
A `TaQL <../../doc/199.html>`_
command is an SQL-like command to do a selection of rows and/or
columns in a table.
The default style used in a TaQL command is python, which means 0-based
indexing, C-ordered arrays, and non-inclusive end in ranges.
It is possible to use python variables directly in the command using
`$var` where `var` is the name of the variable to use. For example::
t = table('3c343.MS')
value = 5.1
t1 = taql('select from $t where COL > $value')
In this example the table `$t` is replaced by a sequence number
(such as `$1`) and `$value` by its value 5.1.
The table object of `t` will be appended to a copy of the `tables`
argument such that the sequence number inserted matches the table object
in the list.
The more advanced user can already use `$n` in the query string and
supply the associated table object in the `tables` argument
(where `n` represents the (n-1)th `tables` element).
The :func:`query` command makes use of this feature.
The arguments `globals` and `locals` can be used to pass in a dict
containing the possible variables used in the TaQL command. They can
be obtained with the python functions locals() and globals().
If `locals` is empty, the local variables in the calling function will
be used, so normally one does not need to use these arguments.
"""
# Substitute possible tables given as $name.
cmd = command
# Copy the tables argument and make sure it is a list
tabs = []
for tab in tables:
tabs += [tab]
try:
import casacore.util
if len(locals) == 0:
# local variables in caller are 3 levels up from getlocals
locals = casacore.util.getlocals(3)
cmd = casacore.util.substitute(cmd, [(table, '', tabs)],
globals, locals)
except Exception:
pass
if style:
cmd = 'using style ' + style + ' ' + cmd
tab = table(cmd, tabs, _oper=2)
result = tab._getcalcresult()
# If result is empty, it was a normal TaQL command resulting in a table.
# Otherwise it is a record containing calc values.
if len(result) == 0:
return tab
return result['values'] | python | def taql(command, style='Python', tables=[], globals={}, locals={}):
"""Execute a TaQL command and return a table object.
A `TaQL <../../doc/199.html>`_
command is an SQL-like command to do a selection of rows and/or
columns in a table.
The default style used in a TaQL command is python, which means 0-based
indexing, C-ordered arrays, and non-inclusive end in ranges.
It is possible to use python variables directly in the command using
`$var` where `var` is the name of the variable to use. For example::
t = table('3c343.MS')
value = 5.1
t1 = taql('select from $t where COL > $value')
In this example the table `$t` is replaced by a sequence number
(such as `$1`) and `$value` by its value 5.1.
The table object of `t` will be appended to a copy of the `tables`
argument such that the sequence number inserted matches the table object
in the list.
The more advanced user can already use `$n` in the query string and
supply the associated table object in the `tables` argument
(where `n` represents the (n-1)th `tables` element).
The :func:`query` command makes use of this feature.
The arguments `globals` and `locals` can be used to pass in a dict
containing the possible variables used in the TaQL command. They can
be obtained with the python functions locals() and globals().
If `locals` is empty, the local variables in the calling function will
be used, so normally one does not need to use these arguments.
"""
# Substitute possible tables given as $name.
cmd = command
# Copy the tables argument and make sure it is a list
tabs = []
for tab in tables:
tabs += [tab]
try:
import casacore.util
if len(locals) == 0:
# local variables in caller are 3 levels up from getlocals
locals = casacore.util.getlocals(3)
cmd = casacore.util.substitute(cmd, [(table, '', tabs)],
globals, locals)
except Exception:
pass
if style:
cmd = 'using style ' + style + ' ' + cmd
tab = table(cmd, tabs, _oper=2)
result = tab._getcalcresult()
# If result is empty, it was a normal TaQL command resulting in a table.
# Otherwise it is a record containing calc values.
if len(result) == 0:
return tab
return result['values'] | ['def', 'taql', '(', 'command', ',', 'style', '=', "'Python'", ',', 'tables', '=', '[', ']', ',', 'globals', '=', '{', '}', ',', 'locals', '=', '{', '}', ')', ':', '# Substitute possible tables given as $name.', 'cmd', '=', 'command', '# Copy the tables argument and make sure it is a list', 'tabs', '=', '[', ']', 'for', 'tab', 'in', 'tables', ':', 'tabs', '+=', '[', 'tab', ']', 'try', ':', 'import', 'casacore', '.', 'util', 'if', 'len', '(', 'locals', ')', '==', '0', ':', '# local variables in caller are 3 levels up from getlocals', 'locals', '=', 'casacore', '.', 'util', '.', 'getlocals', '(', '3', ')', 'cmd', '=', 'casacore', '.', 'util', '.', 'substitute', '(', 'cmd', ',', '[', '(', 'table', ',', "''", ',', 'tabs', ')', ']', ',', 'globals', ',', 'locals', ')', 'except', 'Exception', ':', 'pass', 'if', 'style', ':', 'cmd', '=', "'using style '", '+', 'style', '+', "' '", '+', 'cmd', 'tab', '=', 'table', '(', 'cmd', ',', 'tabs', ',', '_oper', '=', '2', ')', 'result', '=', 'tab', '.', '_getcalcresult', '(', ')', '# If result is empty, it was a normal TaQL command resulting in a table.', '# Otherwise it is a record containing calc values.', 'if', 'len', '(', 'result', ')', '==', '0', ':', 'return', 'tab', 'return', 'result', '[', "'values'", ']'] | Execute a TaQL command and return a table object.
A `TaQL <../../doc/199.html>`_
command is an SQL-like command to do a selection of rows and/or
columns in a table.
The default style used in a TaQL command is python, which means 0-based
indexing, C-ordered arrays, and non-inclusive end in ranges.
It is possible to use python variables directly in the command using
`$var` where `var` is the name of the variable to use. For example::
t = table('3c343.MS')
value = 5.1
t1 = taql('select from $t where COL > $value')
In this example the table `$t` is replaced by a sequence number
(such as `$1`) and `$value` by its value 5.1.
The table object of `t` will be appended to a copy of the `tables`
argument such that the sequence number inserted matches the table object
in the list.
The more advanced user can already use `$n` in the query string and
supply the associated table object in the `tables` argument
(where `n` represents the (n-1)th `tables` element).
The :func:`query` command makes use of this feature.
The arguments `globals` and `locals` can be used to pass in a dict
containing the possible variables used in the TaQL command. They can
be obtained with the python functions locals() and globals().
If `locals` is empty, the local variables in the calling function will
be used, so normally one does not need to use these arguments. | ['Execute', 'a', 'TaQL', 'command', 'and', 'return', 'a', 'table', 'object', '.'] | train | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/table.py#L104-L162 |
7,670 | LLNL/certipy | certipy/certipy.py | CertStore.save | def save(self):
"""Write the store dict to a file specified by store_file_path"""
with open(self.store_file_path, 'w') as fh:
fh.write(json.dumps(self.store, indent=4)) | python | def save(self):
"""Write the store dict to a file specified by store_file_path"""
with open(self.store_file_path, 'w') as fh:
fh.write(json.dumps(self.store, indent=4)) | ['def', 'save', '(', 'self', ')', ':', 'with', 'open', '(', 'self', '.', 'store_file_path', ',', "'w'", ')', 'as', 'fh', ':', 'fh', '.', 'write', '(', 'json', '.', 'dumps', '(', 'self', '.', 'store', ',', 'indent', '=', '4', ')', ')'] | Write the store dict to a file specified by store_file_path | ['Write', 'the', 'store', 'dict', 'to', 'a', 'file', 'specified', 'by', 'store_file_path'] | train | https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L253-L257 |
7,671 | Duke-GCB/DukeDSClient | ddsc/core/ddsapi.py | DataServiceApi.send_external | def send_external(self, http_verb, host, url, http_headers, chunk):
"""
Used with create_upload_url to send a chunk the the possibly external object store.
:param http_verb: str PUT or POST
:param host: str host we are sending the chunk to
:param url: str url to use when sending
:param http_headers: object headers to send with the request
:param chunk: content to send
:return: requests.Response containing the successful result
"""
if http_verb == 'PUT':
return self.http.put(host + url, data=chunk, headers=http_headers)
elif http_verb == 'POST':
return self.http.post(host + url, data=chunk, headers=http_headers)
else:
raise ValueError("Unsupported http_verb:" + http_verb) | python | def send_external(self, http_verb, host, url, http_headers, chunk):
"""
Used with create_upload_url to send a chunk the the possibly external object store.
:param http_verb: str PUT or POST
:param host: str host we are sending the chunk to
:param url: str url to use when sending
:param http_headers: object headers to send with the request
:param chunk: content to send
:return: requests.Response containing the successful result
"""
if http_verb == 'PUT':
return self.http.put(host + url, data=chunk, headers=http_headers)
elif http_verb == 'POST':
return self.http.post(host + url, data=chunk, headers=http_headers)
else:
raise ValueError("Unsupported http_verb:" + http_verb) | ['def', 'send_external', '(', 'self', ',', 'http_verb', ',', 'host', ',', 'url', ',', 'http_headers', ',', 'chunk', ')', ':', 'if', 'http_verb', '==', "'PUT'", ':', 'return', 'self', '.', 'http', '.', 'put', '(', 'host', '+', 'url', ',', 'data', '=', 'chunk', ',', 'headers', '=', 'http_headers', ')', 'elif', 'http_verb', '==', "'POST'", ':', 'return', 'self', '.', 'http', '.', 'post', '(', 'host', '+', 'url', ',', 'data', '=', 'chunk', ',', 'headers', '=', 'http_headers', ')', 'else', ':', 'raise', 'ValueError', '(', '"Unsupported http_verb:"', '+', 'http_verb', ')'] | Used with create_upload_url to send a chunk the the possibly external object store.
:param http_verb: str PUT or POST
:param host: str host we are sending the chunk to
:param url: str url to use when sending
:param http_headers: object headers to send with the request
:param chunk: content to send
:return: requests.Response containing the successful result | ['Used', 'with', 'create_upload_url', 'to', 'send', 'a', 'chunk', 'the', 'the', 'possibly', 'external', 'object', 'store', '.', ':', 'param', 'http_verb', ':', 'str', 'PUT', 'or', 'POST', ':', 'param', 'host', ':', 'str', 'host', 'we', 'are', 'sending', 'the', 'chunk', 'to', ':', 'param', 'url', ':', 'str', 'url', 'to', 'use', 'when', 'sending', ':', 'param', 'http_headers', ':', 'object', 'headers', 'to', 'send', 'with', 'the', 'request', ':', 'param', 'chunk', ':', 'content', 'to', 'send', ':', 'return', ':', 'requests', '.', 'Response', 'containing', 'the', 'successful', 'result'] | train | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ddsapi.py#L578-L593 |
7,672 | pyinvoke/invocations | invocations/docs.py | watch_docs | def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | python | def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | ['def', 'watch_docs', '(', 'c', ')', ':', '# TODO: break back down into generic single-site version, then create split', '# tasks as with docs/www above. Probably wants invoke#63.', "# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.", '# Readme & WWW triggers WWW', 'www_c', '=', 'Context', '(', 'config', '=', 'c', '.', 'config', '.', 'clone', '(', ')', ')', 'www_c', '.', 'update', '(', '*', '*', 'www', '.', 'configuration', '(', ')', ')', 'www_handler', '=', 'make_handler', '(', 'ctx', '=', 'www_c', ',', 'task_', '=', 'www', '[', '"build"', ']', ',', 'regexes', '=', '[', 'r"\\./README.rst"', ',', 'r"\\./sites/www"', ']', ',', 'ignore_regexes', '=', '[', 'r".*/\\..*\\.swp"', ',', 'r"\\./sites/www/_build"', ']', ',', ')', '# Code and docs trigger API', 'docs_c', '=', 'Context', '(', 'config', '=', 'c', '.', 'config', '.', 'clone', '(', ')', ')', 'docs_c', '.', 'update', '(', '*', '*', 'docs', '.', 'configuration', '(', ')', ')', 'regexes', '=', '[', 'r"\\./sites/docs"', ']', 'package', '=', 'c', '.', 'get', '(', '"packaging"', ',', '{', '}', ')', '.', 'get', '(', '"package"', ',', 'None', ')', 'if', 'package', 'is', 'None', ':', 'package', '=', 'c', '.', 'get', '(', '"tests"', ',', '{', '}', ')', '.', 'get', '(', '"package"', ',', 'None', ')', 'if', 'package', ':', 'regexes', '.', 'append', '(', 'r"\\./{}/"', '.', 'format', '(', 'package', ')', ')', 'api_handler', '=', 'make_handler', '(', 'ctx', '=', 'docs_c', ',', 'task_', '=', 'docs', '[', '"build"', ']', ',', 'regexes', '=', 'regexes', ',', 'ignore_regexes', '=', '[', 'r".*/\\..*\\.swp"', ',', 'r"\\./sites/docs/_build"', ']', ',', ')', 'observe', '(', 'www_handler', ',', 'api_handler', ')'] | Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates. | ['Watch', 'both', 'doc', 'trees', '&', 'rebuild', 'them', 'if', 'files', 'change', '.'] | train | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L173-L215 |
7,673 | zimeon/iiif | iiif/flask_utils.py | iiif_info_handler | def iiif_info_handler(prefix=None, identifier=None,
config=None, klass=None, auth=None, **args):
"""Handler for IIIF Image Information requests."""
if (not auth or degraded_request(identifier) or auth.info_authz()):
# go ahead with request as made
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_information_response()
except IIIFError as e:
return i.error_response(e)
elif (auth.info_authn()):
# authn but not authz -> 401
abort(401)
else:
# redirect to degraded
response = redirect(host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/info.json')
response.headers['Access-control-allow-origin'] = '*'
return response | python | def iiif_info_handler(prefix=None, identifier=None,
config=None, klass=None, auth=None, **args):
"""Handler for IIIF Image Information requests."""
if (not auth or degraded_request(identifier) or auth.info_authz()):
# go ahead with request as made
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_information_response()
except IIIFError as e:
return i.error_response(e)
elif (auth.info_authn()):
# authn but not authz -> 401
abort(401)
else:
# redirect to degraded
response = redirect(host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/info.json')
response.headers['Access-control-allow-origin'] = '*'
return response | ['def', 'iiif_info_handler', '(', 'prefix', '=', 'None', ',', 'identifier', '=', 'None', ',', 'config', '=', 'None', ',', 'klass', '=', 'None', ',', 'auth', '=', 'None', ',', '*', '*', 'args', ')', ':', 'if', '(', 'not', 'auth', 'or', 'degraded_request', '(', 'identifier', ')', 'or', 'auth', '.', 'info_authz', '(', ')', ')', ':', '# go ahead with request as made', 'if', '(', 'auth', ')', ':', 'logging', '.', 'debug', '(', '"Authorized for image %s"', '%', 'identifier', ')', 'i', '=', 'IIIFHandler', '(', 'prefix', ',', 'identifier', ',', 'config', ',', 'klass', ',', 'auth', ')', 'try', ':', 'return', 'i', '.', 'image_information_response', '(', ')', 'except', 'IIIFError', 'as', 'e', ':', 'return', 'i', '.', 'error_response', '(', 'e', ')', 'elif', '(', 'auth', '.', 'info_authn', '(', ')', ')', ':', '# authn but not authz -> 401', 'abort', '(', '401', ')', 'else', ':', '# redirect to degraded', 'response', '=', 'redirect', '(', 'host_port_prefix', '(', 'config', '.', 'host', ',', 'config', '.', 'port', ',', 'prefix', ')', '+', "'/'", '+', 'identifier', '+', "'-deg/info.json'", ')', 'response', '.', 'headers', '[', "'Access-control-allow-origin'", ']', '=', "'*'", 'return', 'response'] | Handler for IIIF Image Information requests. | ['Handler', 'for', 'IIIF', 'Image', 'Information', 'requests', '.'] | train | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L384-L404 |
7,674 | watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | ListCollectionFieldsResponse._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'fields') and self.fields is not None:
_dict['fields'] = [x._to_dict() for x in self.fields]
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'fields') and self.fields is not None:
_dict['fields'] = [x._to_dict() for x in self.fields]
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'fields'", ')', 'and', 'self', '.', 'fields', 'is', 'not', 'None', ':', '_dict', '[', "'fields'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'fields', ']', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L6650-L6655 |
7,675 | saltstack/salt | salt/modules/boto_datapipeline.py | delete_pipeline | def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None):
'''
Delete a pipeline, its pipeline definition, and its run history. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.delete_pipeline my_pipeline_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
client.delete_pipeline(pipelineId=pipeline_id)
r['result'] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | python | def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None):
'''
Delete a pipeline, its pipeline definition, and its run history. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.delete_pipeline my_pipeline_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
client.delete_pipeline(pipelineId=pipeline_id)
r['result'] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | ['def', 'delete_pipeline', '(', 'pipeline_id', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'client', '=', '_get_client', '(', 'region', ',', 'key', ',', 'keyid', ',', 'profile', ')', 'r', '=', '{', '}', 'try', ':', 'client', '.', 'delete_pipeline', '(', 'pipelineId', '=', 'pipeline_id', ')', 'r', '[', "'result'", ']', '=', 'True', 'except', '(', 'botocore', '.', 'exceptions', '.', 'BotoCoreError', ',', 'botocore', '.', 'exceptions', '.', 'ClientError', ')', 'as', 'e', ':', 'r', '[', "'error'", ']', '=', 'six', '.', 'text_type', '(', 'e', ')', 'return', 'r'] | Delete a pipeline, its pipeline definition, and its run history. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.delete_pipeline my_pipeline_id | ['Delete', 'a', 'pipeline', 'its', 'pipeline', 'definition', 'and', 'its', 'run', 'history', '.', 'This', 'function', 'is', 'idempotent', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_datapipeline.py#L82-L99 |
7,676 | projecthamster/hamster | src/hamster/client.py | Storage.get_facts | def get_facts(self, date, end_date=None, search_terms="", ongoing_days=0):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
ongoing_days (int): look into the last `ongoing_days` days
for still ongoing activities
"""
facts = []
if ongoing_days:
# look for still ongoing activities
earlier_start = date - dt.timedelta(days=ongoing_days)
earlier_end = date - dt.timedelta(days=1)
earlier_facts = self._get_facts(earlier_start, earlier_end, search_terms=search_terms)
facts.extend(fact for fact in earlier_facts if not fact.end_time)
# add facts between date and end_date
facts.extend(self._get_facts(date, end_date, search_terms=search_terms))
return facts | python | def get_facts(self, date, end_date=None, search_terms="", ongoing_days=0):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
ongoing_days (int): look into the last `ongoing_days` days
for still ongoing activities
"""
facts = []
if ongoing_days:
# look for still ongoing activities
earlier_start = date - dt.timedelta(days=ongoing_days)
earlier_end = date - dt.timedelta(days=1)
earlier_facts = self._get_facts(earlier_start, earlier_end, search_terms=search_terms)
facts.extend(fact for fact in earlier_facts if not fact.end_time)
# add facts between date and end_date
facts.extend(self._get_facts(date, end_date, search_terms=search_terms))
return facts | ['def', 'get_facts', '(', 'self', ',', 'date', ',', 'end_date', '=', 'None', ',', 'search_terms', '=', '""', ',', 'ongoing_days', '=', '0', ')', ':', 'facts', '=', '[', ']', 'if', 'ongoing_days', ':', '# look for still ongoing activities', 'earlier_start', '=', 'date', '-', 'dt', '.', 'timedelta', '(', 'days', '=', 'ongoing_days', ')', 'earlier_end', '=', 'date', '-', 'dt', '.', 'timedelta', '(', 'days', '=', '1', ')', 'earlier_facts', '=', 'self', '.', '_get_facts', '(', 'earlier_start', ',', 'earlier_end', ',', 'search_terms', '=', 'search_terms', ')', 'facts', '.', 'extend', '(', 'fact', 'for', 'fact', 'in', 'earlier_facts', 'if', 'not', 'fact', '.', 'end_time', ')', '# add facts between date and end_date', 'facts', '.', 'extend', '(', 'self', '.', '_get_facts', '(', 'date', ',', 'end_date', ',', 'search_terms', '=', 'search_terms', ')', ')', 'return', 'facts'] | Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
ongoing_days (int): look into the last `ongoing_days` days
for still ongoing activities | ['Returns', 'facts', 'for', 'the', 'time', 'span', 'matching', 'the', 'optional', 'filter', 'criteria', '.', 'In', 'search', 'terms', 'comma', '(', ')', 'translates', 'to', 'boolean', 'OR', 'and', 'space', '(', ')', 'to', 'boolean', 'AND', '.', 'Filter', 'is', 'applied', 'to', 'tags', 'categories', 'activity', 'names', 'and', 'description', 'ongoing_days', '(', 'int', ')', ':', 'look', 'into', 'the', 'last', 'ongoing_days', 'days', 'for', 'still', 'ongoing', 'activities'] | train | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/client.py#L121-L138 |
7,677 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py | brocade_system_monitor.system_monitor_LineCard_alert_state | def system_monitor_LineCard_alert_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
LineCard = ET.SubElement(system_monitor, "LineCard")
alert = ET.SubElement(LineCard, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop('state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def system_monitor_LineCard_alert_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
LineCard = ET.SubElement(system_monitor, "LineCard")
alert = ET.SubElement(LineCard, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop('state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'system_monitor_LineCard_alert_state', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'system_monitor', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"system-monitor"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-system-monitor"', ')', 'LineCard', '=', 'ET', '.', 'SubElement', '(', 'system_monitor', ',', '"LineCard"', ')', 'alert', '=', 'ET', '.', 'SubElement', '(', 'LineCard', ',', '"alert"', ')', 'state', '=', 'ET', '.', 'SubElement', '(', 'alert', ',', '"state"', ')', 'state', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'state'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L298-L309 |
7,678 | dbarsam/python-vsgen | vsgen/util/logger.py | VSGLogger._registerHandler | def _registerHandler(self, handler):
"""
Registers a handler.
:param handler: A handler object.
"""
self._logger.addHandler(handler)
self._handlers.append(handler) | python | def _registerHandler(self, handler):
"""
Registers a handler.
:param handler: A handler object.
"""
self._logger.addHandler(handler)
self._handlers.append(handler) | ['def', '_registerHandler', '(', 'self', ',', 'handler', ')', ':', 'self', '.', '_logger', '.', 'addHandler', '(', 'handler', ')', 'self', '.', '_handlers', '.', 'append', '(', 'handler', ')'] | Registers a handler.
:param handler: A handler object. | ['Registers', 'a', 'handler', '.'] | train | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/logger.py#L80-L87 |
7,679 | SwissDataScienceCenter/renku-python | renku/models/_jsonld.py | JSONLDMixin.from_jsonld | def from_jsonld(cls, data, __reference__=None, __source__=None):
"""Instantiate a JSON-LD class from data."""
if isinstance(data, cls):
return data
if not isinstance(data, dict):
raise ValueError(data)
if '@type' in data:
type_ = tuple(sorted(data['@type']))
if type_ in cls.__type_registry__ and getattr(
cls, '_jsonld_type', None
) != type_:
new_cls = cls.__type_registry__[type_]
if cls != new_cls:
return new_cls.from_jsonld(data)
if cls._jsonld_translate:
data = ld.compact(data, {'@context': cls._jsonld_translate})
data.pop('@context', None)
data.setdefault('@context', cls._jsonld_context)
if data['@context'] != cls._jsonld_context:
compacted = ld.compact(data, {'@context': cls._jsonld_context})
else:
compacted = data
# assert compacted['@type'] == cls._jsonld_type, '@type must be equal'
# TODO update self(not cls)._jsonld_context with data['@context']
fields = cls._jsonld_fields
if __reference__:
with with_reference(__reference__):
self = cls(
**{
k.lstrip('_'): v
for k, v in compacted.items() if k in fields
}
)
else:
self = cls(
**{
k.lstrip('_'): v
for k, v in compacted.items() if k in fields
}
)
if __source__:
setattr(self, '__source__', __source__)
return self | python | def from_jsonld(cls, data, __reference__=None, __source__=None):
"""Instantiate a JSON-LD class from data."""
if isinstance(data, cls):
return data
if not isinstance(data, dict):
raise ValueError(data)
if '@type' in data:
type_ = tuple(sorted(data['@type']))
if type_ in cls.__type_registry__ and getattr(
cls, '_jsonld_type', None
) != type_:
new_cls = cls.__type_registry__[type_]
if cls != new_cls:
return new_cls.from_jsonld(data)
if cls._jsonld_translate:
data = ld.compact(data, {'@context': cls._jsonld_translate})
data.pop('@context', None)
data.setdefault('@context', cls._jsonld_context)
if data['@context'] != cls._jsonld_context:
compacted = ld.compact(data, {'@context': cls._jsonld_context})
else:
compacted = data
# assert compacted['@type'] == cls._jsonld_type, '@type must be equal'
# TODO update self(not cls)._jsonld_context with data['@context']
fields = cls._jsonld_fields
if __reference__:
with with_reference(__reference__):
self = cls(
**{
k.lstrip('_'): v
for k, v in compacted.items() if k in fields
}
)
else:
self = cls(
**{
k.lstrip('_'): v
for k, v in compacted.items() if k in fields
}
)
if __source__:
setattr(self, '__source__', __source__)
return self | ['def', 'from_jsonld', '(', 'cls', ',', 'data', ',', '__reference__', '=', 'None', ',', '__source__', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'data', ',', 'cls', ')', ':', 'return', 'data', 'if', 'not', 'isinstance', '(', 'data', ',', 'dict', ')', ':', 'raise', 'ValueError', '(', 'data', ')', 'if', "'@type'", 'in', 'data', ':', 'type_', '=', 'tuple', '(', 'sorted', '(', 'data', '[', "'@type'", ']', ')', ')', 'if', 'type_', 'in', 'cls', '.', '__type_registry__', 'and', 'getattr', '(', 'cls', ',', "'_jsonld_type'", ',', 'None', ')', '!=', 'type_', ':', 'new_cls', '=', 'cls', '.', '__type_registry__', '[', 'type_', ']', 'if', 'cls', '!=', 'new_cls', ':', 'return', 'new_cls', '.', 'from_jsonld', '(', 'data', ')', 'if', 'cls', '.', '_jsonld_translate', ':', 'data', '=', 'ld', '.', 'compact', '(', 'data', ',', '{', "'@context'", ':', 'cls', '.', '_jsonld_translate', '}', ')', 'data', '.', 'pop', '(', "'@context'", ',', 'None', ')', 'data', '.', 'setdefault', '(', "'@context'", ',', 'cls', '.', '_jsonld_context', ')', 'if', 'data', '[', "'@context'", ']', '!=', 'cls', '.', '_jsonld_context', ':', 'compacted', '=', 'ld', '.', 'compact', '(', 'data', ',', '{', "'@context'", ':', 'cls', '.', '_jsonld_context', '}', ')', 'else', ':', 'compacted', '=', 'data', "# assert compacted['@type'] == cls._jsonld_type, '@type must be equal'", "# TODO update self(not cls)._jsonld_context with data['@context']", 'fields', '=', 'cls', '.', '_jsonld_fields', 'if', '__reference__', ':', 'with', 'with_reference', '(', '__reference__', ')', ':', 'self', '=', 'cls', '(', '*', '*', '{', 'k', '.', 'lstrip', '(', "'_'", ')', ':', 'v', 'for', 'k', ',', 'v', 'in', 'compacted', '.', 'items', '(', ')', 'if', 'k', 'in', 'fields', '}', ')', 'else', ':', 'self', '=', 'cls', '(', '*', '*', '{', 'k', '.', 'lstrip', '(', "'_'", ')', ':', 'v', 'for', 'k', ',', 'v', 'in', 'compacted', '.', 'items', '(', ')', 'if', 'k', 'in', 'fields', '}', ')', 'if', '__source__', ':', 'setattr', '(', 'self', ',', "'__source__'", ',', '__source__', ')', 'return', 'self'] | Instantiate a JSON-LD class from data. | ['Instantiate', 'a', 'JSON', '-', 'LD', 'class', 'from', 'data', '.'] | train | https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/models/_jsonld.py#L304-L353 |
7,680 | sdispater/eloquent | eloquent/database_manager.py | BaseDatabaseManager.purge | def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] | python | def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] | ['def', 'purge', '(', 'self', ',', 'name', '=', 'None', ')', ':', 'self', '.', 'disconnect', '(', 'name', ')', 'if', 'name', 'in', 'self', '.', '_connections', ':', 'del', 'self', '.', '_connections', '[', 'name', ']'] | Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None | ['Disconnect', 'from', 'the', 'given', 'database', 'and', 'remove', 'from', 'local', 'cache'] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/database_manager.py#L65-L77 |
7,681 | google/grr | grr/client_builder/grr_response_client_builder/build.py | ClientBuilder.MakeZip | def MakeZip(self, input_dir, output_file):
"""Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension.
"""
logging.info("Generating zip template file at %s", output_file)
basename, _ = os.path.splitext(output_file)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True) | python | def MakeZip(self, input_dir, output_file):
"""Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension.
"""
logging.info("Generating zip template file at %s", output_file)
basename, _ = os.path.splitext(output_file)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True) | ['def', 'MakeZip', '(', 'self', ',', 'input_dir', ',', 'output_file', ')', ':', 'logging', '.', 'info', '(', '"Generating zip template file at %s"', ',', 'output_file', ')', 'basename', ',', '_', '=', 'os', '.', 'path', '.', 'splitext', '(', 'output_file', ')', '# TODO(user):pytype: incorrect make_archive() definition in typeshed.', '# pytype: disable=wrong-arg-types', 'shutil', '.', 'make_archive', '(', 'basename', ',', '"zip"', ',', 'base_dir', '=', '"."', ',', 'root_dir', '=', 'input_dir', ',', 'verbose', '=', 'True', ')'] | Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension. | ['Creates', 'a', 'ZIP', 'archive', 'of', 'the', 'files', 'in', 'the', 'input', 'directory', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/build.py#L264-L276 |
7,682 | opencobra/memote | memote/suite/cli/callbacks.py | probe_git | def probe_git():
"""Return a git repository instance if it exists."""
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.warning(
"We highly recommend keeping your model in a git repository."
" It allows you to track changes and to easily collaborate with"
" others via online platforms such as https://github.com.\n")
return
if repo.is_dirty():
LOGGER.critical(
"Please git commit or git stash all changes before running"
" the memote suite.")
sys.exit(1)
return repo | python | def probe_git():
"""Return a git repository instance if it exists."""
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.warning(
"We highly recommend keeping your model in a git repository."
" It allows you to track changes and to easily collaborate with"
" others via online platforms such as https://github.com.\n")
return
if repo.is_dirty():
LOGGER.critical(
"Please git commit or git stash all changes before running"
" the memote suite.")
sys.exit(1)
return repo | ['def', 'probe_git', '(', ')', ':', 'try', ':', 'repo', '=', 'git', '.', 'Repo', '(', ')', 'except', 'git', '.', 'InvalidGitRepositoryError', ':', 'LOGGER', '.', 'warning', '(', '"We highly recommend keeping your model in a git repository."', '" It allows you to track changes and to easily collaborate with"', '" others via online platforms such as https://github.com.\\n"', ')', 'return', 'if', 'repo', '.', 'is_dirty', '(', ')', ':', 'LOGGER', '.', 'critical', '(', '"Please git commit or git stash all changes before running"', '" the memote suite."', ')', 'sys', '.', 'exit', '(', '1', ')', 'return', 'repo'] | Return a git repository instance if it exists. | ['Return', 'a', 'git', 'repository', 'instance', 'if', 'it', 'exists', '.'] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/callbacks.py#L79-L94 |
7,683 | mozilla-releng/scriptworker | scriptworker/artifacts.py | get_upstream_artifacts_full_paths_per_task_id | def get_upstream_artifacts_full_paths_per_task_id(context):
"""List the downloaded upstream artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict, dict: lists of the paths to upstream artifacts, sorted by task_id.
First dict represents the existing upstream artifacts. The second one
maps the optional artifacts that couldn't be downloaded
Raises:
scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.
"""
upstream_artifacts = context.task['payload']['upstreamArtifacts']
task_ids_and_relative_paths = [
(artifact_definition['taskId'], artifact_definition['paths'])
for artifact_definition in upstream_artifacts
]
optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)
upstream_artifacts_full_paths_per_task_id = {}
failed_paths_per_task_id = {}
for task_id, paths in task_ids_and_relative_paths:
for path in paths:
try:
path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path)
add_enumerable_item_to_dict(
dict_=upstream_artifacts_full_paths_per_task_id,
key=task_id, item=path_to_add
)
except ScriptWorkerTaskException:
if path in optional_artifacts_per_task_id.get(task_id, []):
log.warning('Optional artifact "{}" of task "{}" not found'.format(path, task_id))
add_enumerable_item_to_dict(
dict_=failed_paths_per_task_id,
key=task_id, item=path
)
else:
raise
return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id | python | def get_upstream_artifacts_full_paths_per_task_id(context):
"""List the downloaded upstream artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict, dict: lists of the paths to upstream artifacts, sorted by task_id.
First dict represents the existing upstream artifacts. The second one
maps the optional artifacts that couldn't be downloaded
Raises:
scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.
"""
upstream_artifacts = context.task['payload']['upstreamArtifacts']
task_ids_and_relative_paths = [
(artifact_definition['taskId'], artifact_definition['paths'])
for artifact_definition in upstream_artifacts
]
optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)
upstream_artifacts_full_paths_per_task_id = {}
failed_paths_per_task_id = {}
for task_id, paths in task_ids_and_relative_paths:
for path in paths:
try:
path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path)
add_enumerable_item_to_dict(
dict_=upstream_artifacts_full_paths_per_task_id,
key=task_id, item=path_to_add
)
except ScriptWorkerTaskException:
if path in optional_artifacts_per_task_id.get(task_id, []):
log.warning('Optional artifact "{}" of task "{}" not found'.format(path, task_id))
add_enumerable_item_to_dict(
dict_=failed_paths_per_task_id,
key=task_id, item=path
)
else:
raise
return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id | ['def', 'get_upstream_artifacts_full_paths_per_task_id', '(', 'context', ')', ':', 'upstream_artifacts', '=', 'context', '.', 'task', '[', "'payload'", ']', '[', "'upstreamArtifacts'", ']', 'task_ids_and_relative_paths', '=', '[', '(', 'artifact_definition', '[', "'taskId'", ']', ',', 'artifact_definition', '[', "'paths'", ']', ')', 'for', 'artifact_definition', 'in', 'upstream_artifacts', ']', 'optional_artifacts_per_task_id', '=', 'get_optional_artifacts_per_task_id', '(', 'upstream_artifacts', ')', 'upstream_artifacts_full_paths_per_task_id', '=', '{', '}', 'failed_paths_per_task_id', '=', '{', '}', 'for', 'task_id', ',', 'paths', 'in', 'task_ids_and_relative_paths', ':', 'for', 'path', 'in', 'paths', ':', 'try', ':', 'path_to_add', '=', 'get_and_check_single_upstream_artifact_full_path', '(', 'context', ',', 'task_id', ',', 'path', ')', 'add_enumerable_item_to_dict', '(', 'dict_', '=', 'upstream_artifacts_full_paths_per_task_id', ',', 'key', '=', 'task_id', ',', 'item', '=', 'path_to_add', ')', 'except', 'ScriptWorkerTaskException', ':', 'if', 'path', 'in', 'optional_artifacts_per_task_id', '.', 'get', '(', 'task_id', ',', '[', ']', ')', ':', 'log', '.', 'warning', '(', '\'Optional artifact "{}" of task "{}" not found\'', '.', 'format', '(', 'path', ',', 'task_id', ')', ')', 'add_enumerable_item_to_dict', '(', 'dict_', '=', 'failed_paths_per_task_id', ',', 'key', '=', 'task_id', ',', 'item', '=', 'path', ')', 'else', ':', 'raise', 'return', 'upstream_artifacts_full_paths_per_task_id', ',', 'failed_paths_per_task_id'] | List the downloaded upstream artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict, dict: lists of the paths to upstream artifacts, sorted by task_id.
First dict represents the existing upstream artifacts. The second one
maps the optional artifacts that couldn't be downloaded
Raises:
scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist. | ['List', 'the', 'downloaded', 'upstream', 'artifacts', '.'] | train | https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/artifacts.py#L312-L355 |
7,684 | obriencj/python-javatools | javatools/__init__.py | JavaMemberInfo.get_annotationdefault | def get_annotationdefault(self):
"""
The AnnotationDefault attribute, only present upon fields in an
annotaion.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20
""" # noqa
buff = self.get_attribute("AnnotationDefault")
if buff is None:
return None
with unpack(buff) as up:
(ti, ) = up.unpack_struct(_H)
return ti | python | def get_annotationdefault(self):
"""
The AnnotationDefault attribute, only present upon fields in an
annotaion.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20
""" # noqa
buff = self.get_attribute("AnnotationDefault")
if buff is None:
return None
with unpack(buff) as up:
(ti, ) = up.unpack_struct(_H)
return ti | ['def', 'get_annotationdefault', '(', 'self', ')', ':', '# noqa', 'buff', '=', 'self', '.', 'get_attribute', '(', '"AnnotationDefault"', ')', 'if', 'buff', 'is', 'None', ':', 'return', 'None', 'with', 'unpack', '(', 'buff', ')', 'as', 'up', ':', '(', 'ti', ',', ')', '=', 'up', '.', 'unpack_struct', '(', '_H', ')', 'return', 'ti'] | The AnnotationDefault attribute, only present upon fields in an
annotaion.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20 | ['The', 'AnnotationDefault', 'attribute', 'only', 'present', 'upon', 'fields', 'in', 'an', 'annotaion', '.'] | train | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1270-L1285 |
7,685 | angr/angr | angr/state_plugins/javavm_memory.py | SimJavaVmMemory.store_array_elements | def store_array_elements(self, array, start_idx, data):
"""
Stores either a single element or a range of elements in the array.
:param array: Reference to the array.
:param start_idx: Starting index for the store.
:param data: Either a single value or a list of values.
"""
# we process data as a list of elements
# => if there is only a single element, wrap it in a list
data = data if isinstance(data, list) else [data]
# concretize start index
concrete_start_idxes = self.concretize_store_idx(start_idx)
if len(concrete_start_idxes) == 1:
# only one start index
# => concrete store
concrete_start_idx = concrete_start_idxes[0]
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type)
# if the index was symbolic before concretization, this
# constraint it to concrete start idx
self.state.solver.add(concrete_start_idx == start_idx)
else:
# multiple start indexes
# => symbolic store
start_idx_options = []
for concrete_start_idx in concrete_start_idxes:
start_idx_options.append(concrete_start_idx == start_idx)
# we store elements condtioned with the start index:
# => if concrete_start_idx == start_idx
# then store the value
# else keep the current value
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type,
store_condition=start_idx_options[-1])
# constraint start_idx, s.t. it evals to one of the concretized indexes
constraint_on_start_idx = self.state.solver.Or(*start_idx_options)
self.state.add_constraints(constraint_on_start_idx) | python | def store_array_elements(self, array, start_idx, data):
"""
Stores either a single element or a range of elements in the array.
:param array: Reference to the array.
:param start_idx: Starting index for the store.
:param data: Either a single value or a list of values.
"""
# we process data as a list of elements
# => if there is only a single element, wrap it in a list
data = data if isinstance(data, list) else [data]
# concretize start index
concrete_start_idxes = self.concretize_store_idx(start_idx)
if len(concrete_start_idxes) == 1:
# only one start index
# => concrete store
concrete_start_idx = concrete_start_idxes[0]
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type)
# if the index was symbolic before concretization, this
# constraint it to concrete start idx
self.state.solver.add(concrete_start_idx == start_idx)
else:
# multiple start indexes
# => symbolic store
start_idx_options = []
for concrete_start_idx in concrete_start_idxes:
start_idx_options.append(concrete_start_idx == start_idx)
# we store elements condtioned with the start index:
# => if concrete_start_idx == start_idx
# then store the value
# else keep the current value
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type,
store_condition=start_idx_options[-1])
# constraint start_idx, s.t. it evals to one of the concretized indexes
constraint_on_start_idx = self.state.solver.Or(*start_idx_options)
self.state.add_constraints(constraint_on_start_idx) | ['def', 'store_array_elements', '(', 'self', ',', 'array', ',', 'start_idx', ',', 'data', ')', ':', '# we process data as a list of elements', '# => if there is only a single element, wrap it in a list', 'data', '=', 'data', 'if', 'isinstance', '(', 'data', ',', 'list', ')', 'else', '[', 'data', ']', '# concretize start index', 'concrete_start_idxes', '=', 'self', '.', 'concretize_store_idx', '(', 'start_idx', ')', 'if', 'len', '(', 'concrete_start_idxes', ')', '==', '1', ':', '# only one start index', '# => concrete store', 'concrete_start_idx', '=', 'concrete_start_idxes', '[', '0', ']', 'for', 'i', ',', 'value', 'in', 'enumerate', '(', 'data', ')', ':', 'self', '.', '_store_array_element_on_heap', '(', 'array', '=', 'array', ',', 'idx', '=', 'concrete_start_idx', '+', 'i', ',', 'value', '=', 'value', ',', 'value_type', '=', 'array', '.', 'element_type', ')', '# if the index was symbolic before concretization, this', '# constraint it to concrete start idx', 'self', '.', 'state', '.', 'solver', '.', 'add', '(', 'concrete_start_idx', '==', 'start_idx', ')', 'else', ':', '# multiple start indexes', '# => symbolic store', 'start_idx_options', '=', '[', ']', 'for', 'concrete_start_idx', 'in', 'concrete_start_idxes', ':', 'start_idx_options', '.', 'append', '(', 'concrete_start_idx', '==', 'start_idx', ')', '# we store elements condtioned with the start index:', '# => if concrete_start_idx == start_idx', '# then store the value', '# else keep the current value', 'for', 'i', ',', 'value', 'in', 'enumerate', '(', 'data', ')', ':', 'self', '.', '_store_array_element_on_heap', '(', 'array', '=', 'array', ',', 'idx', '=', 'concrete_start_idx', '+', 'i', ',', 'value', '=', 'value', ',', 'value_type', '=', 'array', '.', 'element_type', ',', 'store_condition', '=', 'start_idx_options', '[', '-', '1', ']', ')', '# constraint start_idx, s.t. it evals to one of the concretized indexes', 'constraint_on_start_idx', '=', 'self', '.', 'state', '.', 'solver', '.', 'Or', '(', '*', 'start_idx_options', ')', 'self', '.', 'state', '.', 'add_constraints', '(', 'constraint_on_start_idx', ')'] | Stores either a single element or a range of elements in the array.
:param array: Reference to the array.
:param start_idx: Starting index for the store.
:param data: Either a single value or a list of values. | ['Stores', 'either', 'a', 'single', 'element', 'or', 'a', 'range', 'of', 'elements', 'in', 'the', 'array', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/javavm_memory.py#L133-L180 |
7,686 | supercoderz/pyflightdata | pyflightdata/flightdata.py | FlightData.get_airport_weather | def get_airport_weather(self, iata, page=1, limit=100):
"""Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10)
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
weather = self._fr24.get_airport_weather(url)
mi = weather['sky']['visibility']['mi']
if (mi is not None) and (mi != "None"):
mi = float(mi)
km = mi * 1.6094
weather['sky']['visibility']['km'] = km
return weather | python | def get_airport_weather(self, iata, page=1, limit=100):
"""Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10)
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
weather = self._fr24.get_airport_weather(url)
mi = weather['sky']['visibility']['mi']
if (mi is not None) and (mi != "None"):
mi = float(mi)
km = mi * 1.6094
weather['sky']['visibility']['km'] = km
return weather | ['def', 'get_airport_weather', '(', 'self', ',', 'iata', ',', 'page', '=', '1', ',', 'limit', '=', '100', ')', ':', 'url', '=', 'AIRPORT_DATA_BASE', '.', 'format', '(', 'iata', ',', 'str', '(', 'self', '.', 'AUTH_TOKEN', ')', ',', 'page', ',', 'limit', ')', 'weather', '=', 'self', '.', '_fr24', '.', 'get_airport_weather', '(', 'url', ')', 'mi', '=', 'weather', '[', "'sky'", ']', '[', "'visibility'", ']', '[', "'mi'", ']', 'if', '(', 'mi', 'is', 'not', 'None', ')', 'and', '(', 'mi', '!=', '"None"', ')', ':', 'mi', '=', 'float', '(', 'mi', ')', 'km', '=', 'mi', '*', '1.6094', 'weather', '[', "'sky'", ']', '[', "'visibility'", ']', '[', "'km'", ']', '=', 'km', 'return', 'weather'] | Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10) | ['Retrieve', 'the', 'weather', 'at', 'an', 'airport'] | train | https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L241-L271 |
7,687 | tensorflow/tensor2tensor | tensor2tensor/layers/common_layers.py | apply_norm | def apply_norm(x, norm_type, depth, epsilon, layer_collection=None):
"""Apply Normalization."""
if layer_collection is not None:
assert norm_type == "layer"
if norm_type == "layer":
return layer_norm(
x, filters=depth, epsilon=epsilon, layer_collection=layer_collection)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return layers().BatchNormalization(epsilon=epsilon)(x)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.") | python | def apply_norm(x, norm_type, depth, epsilon, layer_collection=None):
"""Apply Normalization."""
if layer_collection is not None:
assert norm_type == "layer"
if norm_type == "layer":
return layer_norm(
x, filters=depth, epsilon=epsilon, layer_collection=layer_collection)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return layers().BatchNormalization(epsilon=epsilon)(x)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.") | ['def', 'apply_norm', '(', 'x', ',', 'norm_type', ',', 'depth', ',', 'epsilon', ',', 'layer_collection', '=', 'None', ')', ':', 'if', 'layer_collection', 'is', 'not', 'None', ':', 'assert', 'norm_type', '==', '"layer"', 'if', 'norm_type', '==', '"layer"', ':', 'return', 'layer_norm', '(', 'x', ',', 'filters', '=', 'depth', ',', 'epsilon', '=', 'epsilon', ',', 'layer_collection', '=', 'layer_collection', ')', 'if', 'norm_type', '==', '"group"', ':', 'return', 'group_norm', '(', 'x', ',', 'filters', '=', 'depth', ',', 'epsilon', '=', 'epsilon', ')', 'if', 'norm_type', '==', '"batch"', ':', 'return', 'layers', '(', ')', '.', 'BatchNormalization', '(', 'epsilon', '=', 'epsilon', ')', '(', 'x', ')', 'if', 'norm_type', '==', '"noam"', ':', 'return', 'noam_norm', '(', 'x', ',', 'epsilon', ')', 'if', 'norm_type', '==', '"l2"', ':', 'return', 'l2_norm', '(', 'x', ',', 'filters', '=', 'depth', ',', 'epsilon', '=', 'epsilon', ')', 'if', 'norm_type', '==', '"none"', ':', 'return', 'x', 'raise', 'ValueError', '(', '"Parameter normalizer_fn must be one of: \'layer\', \'batch\',"', '"\'noam\', \'lr\', \'none\'."', ')'] | Apply Normalization. | ['Apply', 'Normalization', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L781-L799 |
7,688 | twisted/mantissa | xmantissa/sharing.py | getAuthenticatedRole | def getAuthenticatedRole(store):
"""
Get the base 'Authenticated' role for this store, which is the role that is
given to every user who is explicitly identified by a non-anonymous
username.
"""
def tx():
def addToEveryone(newAuthenticatedRole):
newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store))
return newAuthenticatedRole
return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated')
return store.transact(tx) | python | def getAuthenticatedRole(store):
"""
Get the base 'Authenticated' role for this store, which is the role that is
given to every user who is explicitly identified by a non-anonymous
username.
"""
def tx():
def addToEveryone(newAuthenticatedRole):
newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store))
return newAuthenticatedRole
return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated')
return store.transact(tx) | ['def', 'getAuthenticatedRole', '(', 'store', ')', ':', 'def', 'tx', '(', ')', ':', 'def', 'addToEveryone', '(', 'newAuthenticatedRole', ')', ':', 'newAuthenticatedRole', '.', 'becomeMemberOf', '(', 'getEveryoneRole', '(', 'store', ')', ')', 'return', 'newAuthenticatedRole', 'return', 'store', '.', 'findOrCreate', '(', 'Role', ',', 'addToEveryone', ',', 'externalID', '=', "u'Authenticated'", ')', 'return', 'store', '.', 'transact', '(', 'tx', ')'] | Get the base 'Authenticated' role for this store, which is the role that is
given to every user who is explicitly identified by a non-anonymous
username. | ['Get', 'the', 'base', 'Authenticated', 'role', 'for', 'this', 'store', 'which', 'is', 'the', 'role', 'that', 'is', 'given', 'to', 'every', 'user', 'who', 'is', 'explicitly', 'identified', 'by', 'a', 'non', '-', 'anonymous', 'username', '.'] | train | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/sharing.py#L618-L629 |
7,689 | ramses-tech/ramses | ramses/utils.py | get_resource_children | def get_resource_children(raml_resource):
""" Get children of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
"""
path = raml_resource.path
return [res for res in raml_resource.root.resources
if res.parent and res.parent.path == path] | python | def get_resource_children(raml_resource):
""" Get children of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
"""
path = raml_resource.path
return [res for res in raml_resource.root.resources
if res.parent and res.parent.path == path] | ['def', 'get_resource_children', '(', 'raml_resource', ')', ':', 'path', '=', 'raml_resource', '.', 'path', 'return', '[', 'res', 'for', 'res', 'in', 'raml_resource', '.', 'root', '.', 'resources', 'if', 'res', '.', 'parent', 'and', 'res', '.', 'parent', '.', 'path', '==', 'path', ']'] | Get children of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode. | ['Get', 'children', 'of', ':', 'raml_resource', ':', '.'] | train | https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L298-L305 |
7,690 | ToFuProject/tofu | tofu/geom/_plot.py | Struct_plot | def Struct_plot(lS, lax=None, proj='all', element=None, dP=None,
dI=None, dBs=None, dBv=None,
dVect=None, dIHor=None, dBsHor=None, dBvHor=None,
Lim=None, Nstep=None, dLeg=None, indices=False,
draw=True, fs=None, wintit=None, tit=None, Test=True):
""" Plot the projections of a list of Struct subclass instances
D. VEZINET, Aug. 2014
Inputs :
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created
axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
dLeg A dictionnary specifying the style of the legend box (if None => no legend)
Outputs :
axP The plt.Axes instance on which the poloidal plot was performed
axT The plt.Axes instance on which the toroidal plot was performed
"""
proj = proj.lower()
if Test:
msg = "Arg proj must be in ['cross','hor','all','3d'] !"
assert proj in ['cross','hor','all','3d'], msg
lax, C0, C1, C2 = _check_Lax(lax,n=2)
assert type(draw) is bool, "Arg draw must be a bool !"
C0 = issubclass(lS.__class__, utils.ToFuObject)
C1 = (isinstance(lS,list)
and all([issubclass(ss.__class__, utils.ToFuObject) for ss in lS]))
msg = "Arg lves must be a Struct subclass or a list of such !"
assert C0 or C1, msg
if C0:
lS = [lS]
nS = len(lS)
if wintit is None:
wintit = _wintit
kwa = dict(fs=fs, wintit=wintit, Test=Test)
if proj=='3d':
# Temporary matplotlib issue
dLeg = None
for ii in range(0,nS):
dplot = _Struct_plot_format(lS[ii], proj=proj, Elt=element,
dP=dP, dI=dI, dBs=dBs,
dBv=dBv, dVect=dVect, dIHor=dIHor,
dBsHor=dBsHor, dBvHor=dBvHor,
Lim=Lim, Nstep=Nstep)
for k in dplot.keys():
dplot[k].update(kwa)
if proj=='3d':
lax[0] = _Plot_3D_plt_Ves(lS[ii], ax=lax[0], LegDict=None,
draw=False, **dplot[proj])
else:
if proj=='cross':
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='hor':
lax[0] = _Plot_HorProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='all':
if lax[0] is None or lax[1] is None:
lax = list(_def.Plot_LOSProj_DefAxes('All', fs=fs,
wintit=wintit,
Type=lS[ii].Id.Type))
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0], LegDict=None,
indices=indices,
draw=False, **dplot['cross'])
lax[1] = _Plot_HorProj_Ves(lS[ii], ax=lax[1], LegDict=None,
indices=indices,
draw=False, **dplot['hor'])
# recompute the ax.dataLim
lax[0].relim()
if proj=='all':
lax[1].relim()
# update ax.viewLim using the new dataLim
lax[0].autoscale_view()
if proj=='all':
lax[1].autoscale_view()
if tit is not None:
lax[0].figure.suptitle(tit)
if not dLeg is None:
lax[0].legend(**dLeg)
if draw:
lax[0].relim()
lax[0].autoscale_view()
if len(lax)==2 and lax[1] is not None:
lax[1].relim()
lax[1].autoscale_view()
lax[0].figure.canvas.draw()
lax = lax if proj=='all' else lax[0]
return lax | python | def Struct_plot(lS, lax=None, proj='all', element=None, dP=None,
dI=None, dBs=None, dBv=None,
dVect=None, dIHor=None, dBsHor=None, dBvHor=None,
Lim=None, Nstep=None, dLeg=None, indices=False,
draw=True, fs=None, wintit=None, tit=None, Test=True):
""" Plot the projections of a list of Struct subclass instances
D. VEZINET, Aug. 2014
Inputs :
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created
axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
dLeg A dictionnary specifying the style of the legend box (if None => no legend)
Outputs :
axP The plt.Axes instance on which the poloidal plot was performed
axT The plt.Axes instance on which the toroidal plot was performed
"""
proj = proj.lower()
if Test:
msg = "Arg proj must be in ['cross','hor','all','3d'] !"
assert proj in ['cross','hor','all','3d'], msg
lax, C0, C1, C2 = _check_Lax(lax,n=2)
assert type(draw) is bool, "Arg draw must be a bool !"
C0 = issubclass(lS.__class__, utils.ToFuObject)
C1 = (isinstance(lS,list)
and all([issubclass(ss.__class__, utils.ToFuObject) for ss in lS]))
msg = "Arg lves must be a Struct subclass or a list of such !"
assert C0 or C1, msg
if C0:
lS = [lS]
nS = len(lS)
if wintit is None:
wintit = _wintit
kwa = dict(fs=fs, wintit=wintit, Test=Test)
if proj=='3d':
# Temporary matplotlib issue
dLeg = None
for ii in range(0,nS):
dplot = _Struct_plot_format(lS[ii], proj=proj, Elt=element,
dP=dP, dI=dI, dBs=dBs,
dBv=dBv, dVect=dVect, dIHor=dIHor,
dBsHor=dBsHor, dBvHor=dBvHor,
Lim=Lim, Nstep=Nstep)
for k in dplot.keys():
dplot[k].update(kwa)
if proj=='3d':
lax[0] = _Plot_3D_plt_Ves(lS[ii], ax=lax[0], LegDict=None,
draw=False, **dplot[proj])
else:
if proj=='cross':
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='hor':
lax[0] = _Plot_HorProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='all':
if lax[0] is None or lax[1] is None:
lax = list(_def.Plot_LOSProj_DefAxes('All', fs=fs,
wintit=wintit,
Type=lS[ii].Id.Type))
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0], LegDict=None,
indices=indices,
draw=False, **dplot['cross'])
lax[1] = _Plot_HorProj_Ves(lS[ii], ax=lax[1], LegDict=None,
indices=indices,
draw=False, **dplot['hor'])
# recompute the ax.dataLim
lax[0].relim()
if proj=='all':
lax[1].relim()
# update ax.viewLim using the new dataLim
lax[0].autoscale_view()
if proj=='all':
lax[1].autoscale_view()
if tit is not None:
lax[0].figure.suptitle(tit)
if not dLeg is None:
lax[0].legend(**dLeg)
if draw:
lax[0].relim()
lax[0].autoscale_view()
if len(lax)==2 and lax[1] is not None:
lax[1].relim()
lax[1].autoscale_view()
lax[0].figure.canvas.draw()
lax = lax if proj=='all' else lax[0]
return lax | ['def', 'Struct_plot', '(', 'lS', ',', 'lax', '=', 'None', ',', 'proj', '=', "'all'", ',', 'element', '=', 'None', ',', 'dP', '=', 'None', ',', 'dI', '=', 'None', ',', 'dBs', '=', 'None', ',', 'dBv', '=', 'None', ',', 'dVect', '=', 'None', ',', 'dIHor', '=', 'None', ',', 'dBsHor', '=', 'None', ',', 'dBvHor', '=', 'None', ',', 'Lim', '=', 'None', ',', 'Nstep', '=', 'None', ',', 'dLeg', '=', 'None', ',', 'indices', '=', 'False', ',', 'draw', '=', 'True', ',', 'fs', '=', 'None', ',', 'wintit', '=', 'None', ',', 'tit', '=', 'None', ',', 'Test', '=', 'True', ')', ':', 'proj', '=', 'proj', '.', 'lower', '(', ')', 'if', 'Test', ':', 'msg', '=', '"Arg proj must be in [\'cross\',\'hor\',\'all\',\'3d\'] !"', 'assert', 'proj', 'in', '[', "'cross'", ',', "'hor'", ',', "'all'", ',', "'3d'", ']', ',', 'msg', 'lax', ',', 'C0', ',', 'C1', ',', 'C2', '=', '_check_Lax', '(', 'lax', ',', 'n', '=', '2', ')', 'assert', 'type', '(', 'draw', ')', 'is', 'bool', ',', '"Arg draw must be a bool !"', 'C0', '=', 'issubclass', '(', 'lS', '.', '__class__', ',', 'utils', '.', 'ToFuObject', ')', 'C1', '=', '(', 'isinstance', '(', 'lS', ',', 'list', ')', 'and', 'all', '(', '[', 'issubclass', '(', 'ss', '.', '__class__', ',', 'utils', '.', 'ToFuObject', ')', 'for', 'ss', 'in', 'lS', ']', ')', ')', 'msg', '=', '"Arg lves must be a Struct subclass or a list of such !"', 'assert', 'C0', 'or', 'C1', ',', 'msg', 'if', 'C0', ':', 'lS', '=', '[', 'lS', ']', 'nS', '=', 'len', '(', 'lS', ')', 'if', 'wintit', 'is', 'None', ':', 'wintit', '=', '_wintit', 'kwa', '=', 'dict', '(', 'fs', '=', 'fs', ',', 'wintit', '=', 'wintit', ',', 'Test', '=', 'Test', ')', 'if', 'proj', '==', "'3d'", ':', '# Temporary matplotlib issue', 'dLeg', '=', 'None', 'for', 'ii', 'in', 'range', '(', '0', ',', 'nS', ')', ':', 'dplot', '=', '_Struct_plot_format', '(', 'lS', '[', 'ii', ']', ',', 'proj', '=', 'proj', ',', 'Elt', '=', 'element', ',', 'dP', '=', 'dP', ',', 'dI', '=', 'dI', ',', 'dBs', '=', 'dBs', ',', 'dBv', '=', 'dBv', ',', 'dVect', '=', 'dVect', ',', 'dIHor', '=', 'dIHor', ',', 'dBsHor', '=', 'dBsHor', ',', 'dBvHor', '=', 'dBvHor', ',', 'Lim', '=', 'Lim', ',', 'Nstep', '=', 'Nstep', ')', 'for', 'k', 'in', 'dplot', '.', 'keys', '(', ')', ':', 'dplot', '[', 'k', ']', '.', 'update', '(', 'kwa', ')', 'if', 'proj', '==', "'3d'", ':', 'lax', '[', '0', ']', '=', '_Plot_3D_plt_Ves', '(', 'lS', '[', 'ii', ']', ',', 'ax', '=', 'lax', '[', '0', ']', ',', 'LegDict', '=', 'None', ',', 'draw', '=', 'False', ',', '*', '*', 'dplot', '[', 'proj', ']', ')', 'else', ':', 'if', 'proj', '==', "'cross'", ':', 'lax', '[', '0', ']', '=', '_Plot_CrossProj_Ves', '(', 'lS', '[', 'ii', ']', ',', 'ax', '=', 'lax', '[', '0', ']', ',', 'indices', '=', 'indices', ',', 'LegDict', '=', 'None', ',', 'draw', '=', 'False', ',', '*', '*', 'dplot', '[', 'proj', ']', ')', 'elif', 'proj', '==', "'hor'", ':', 'lax', '[', '0', ']', '=', '_Plot_HorProj_Ves', '(', 'lS', '[', 'ii', ']', ',', 'ax', '=', 'lax', '[', '0', ']', ',', 'indices', '=', 'indices', ',', 'LegDict', '=', 'None', ',', 'draw', '=', 'False', ',', '*', '*', 'dplot', '[', 'proj', ']', ')', 'elif', 'proj', '==', "'all'", ':', 'if', 'lax', '[', '0', ']', 'is', 'None', 'or', 'lax', '[', '1', ']', 'is', 'None', ':', 'lax', '=', 'list', '(', '_def', '.', 'Plot_LOSProj_DefAxes', '(', "'All'", ',', 'fs', '=', 'fs', ',', 'wintit', '=', 'wintit', ',', 'Type', '=', 'lS', '[', 'ii', ']', '.', 'Id', '.', 'Type', ')', ')', 'lax', '[', '0', ']', '=', '_Plot_CrossProj_Ves', '(', 'lS', '[', 'ii', ']', ',', 'ax', '=', 'lax', '[', '0', ']', ',', 'LegDict', '=', 'None', ',', 'indices', '=', 'indices', ',', 'draw', '=', 'False', ',', '*', '*', 'dplot', '[', "'cross'", ']', ')', 'lax', '[', '1', ']', '=', '_Plot_HorProj_Ves', '(', 'lS', '[', 'ii', ']', ',', 'ax', '=', 'lax', '[', '1', ']', ',', 'LegDict', '=', 'None', ',', 'indices', '=', 'indices', ',', 'draw', '=', 'False', ',', '*', '*', 'dplot', '[', "'hor'", ']', ')', '# recompute the ax.dataLim', 'lax', '[', '0', ']', '.', 'relim', '(', ')', 'if', 'proj', '==', "'all'", ':', 'lax', '[', '1', ']', '.', 'relim', '(', ')', '# update ax.viewLim using the new dataLim', 'lax', '[', '0', ']', '.', 'autoscale_view', '(', ')', 'if', 'proj', '==', "'all'", ':', 'lax', '[', '1', ']', '.', 'autoscale_view', '(', ')', 'if', 'tit', 'is', 'not', 'None', ':', 'lax', '[', '0', ']', '.', 'figure', '.', 'suptitle', '(', 'tit', ')', 'if', 'not', 'dLeg', 'is', 'None', ':', 'lax', '[', '0', ']', '.', 'legend', '(', '*', '*', 'dLeg', ')', 'if', 'draw', ':', 'lax', '[', '0', ']', '.', 'relim', '(', ')', 'lax', '[', '0', ']', '.', 'autoscale_view', '(', ')', 'if', 'len', '(', 'lax', ')', '==', '2', 'and', 'lax', '[', '1', ']', 'is', 'not', 'None', ':', 'lax', '[', '1', ']', '.', 'relim', '(', ')', 'lax', '[', '1', ']', '.', 'autoscale_view', '(', ')', 'lax', '[', '0', ']', '.', 'figure', '.', 'canvas', '.', 'draw', '(', ')', 'lax', '=', 'lax', 'if', 'proj', '==', "'all'", 'else', 'lax', '[', '0', ']', 'return', 'lax'] | Plot the projections of a list of Struct subclass instances
D. VEZINET, Aug. 2014
Inputs :
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created
axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
dLeg A dictionnary specifying the style of the legend box (if None => no legend)
Outputs :
axP The plt.Axes instance on which the poloidal plot was performed
axT The plt.Axes instance on which the toroidal plot was performed | ['Plot', 'the', 'projections', 'of', 'a', 'list', 'of', 'Struct', 'subclass', 'instances'] | train | https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_plot.py#L143-L240 |
7,691 | ejeschke/ginga | ginga/ImageView.py | ImageViewBase.refresh_timer_cb | def refresh_timer_cb(self, timer, flags):
"""Refresh timer callback.
This callback will normally only be called internally.
Parameters
----------
timer : a Ginga GUI timer
A GUI-based Ginga timer
flags : dict-like
A set of flags controlling the timer
"""
# this is the timer call back, from the GUI thread
start_time = time.time()
if flags.get('done', False):
return
# calculate next deadline
deadline = self.rf_deadline
self.rf_deadline += self.rf_rate
self.rf_timer_count += 1
delta = abs(start_time - deadline)
self.rf_delta_total += delta
adjust = 0.0
if start_time > deadline:
# we are late
self.rf_late_total += delta
self.rf_late_count += 1
late_avg = self.rf_late_total / self.rf_late_count
adjust = - (late_avg / 2.0)
self.rf_skip_total += delta
if self.rf_skip_total < self.rf_rate:
self.rf_draw_count += 1
# TODO: can we optimize whence?
self.redraw_now(whence=0)
else:
# <-- we are behind by amount of time equal to one frame.
# skip a redraw and attempt to catch up some time
self.rf_skip_total = 0
else:
if start_time < deadline:
# we are early
self.rf_early_total += delta
self.rf_early_count += 1
self.rf_skip_total = max(0.0, self.rf_skip_total - delta)
early_avg = self.rf_early_total / self.rf_early_count
adjust = early_avg / 4.0
self.rf_draw_count += 1
# TODO: can we optimize whence?
self.redraw_now(whence=0)
delay = max(0.0, self.rf_deadline - time.time() + adjust)
timer.start(delay) | python | def refresh_timer_cb(self, timer, flags):
"""Refresh timer callback.
This callback will normally only be called internally.
Parameters
----------
timer : a Ginga GUI timer
A GUI-based Ginga timer
flags : dict-like
A set of flags controlling the timer
"""
# this is the timer call back, from the GUI thread
start_time = time.time()
if flags.get('done', False):
return
# calculate next deadline
deadline = self.rf_deadline
self.rf_deadline += self.rf_rate
self.rf_timer_count += 1
delta = abs(start_time - deadline)
self.rf_delta_total += delta
adjust = 0.0
if start_time > deadline:
# we are late
self.rf_late_total += delta
self.rf_late_count += 1
late_avg = self.rf_late_total / self.rf_late_count
adjust = - (late_avg / 2.0)
self.rf_skip_total += delta
if self.rf_skip_total < self.rf_rate:
self.rf_draw_count += 1
# TODO: can we optimize whence?
self.redraw_now(whence=0)
else:
# <-- we are behind by amount of time equal to one frame.
# skip a redraw and attempt to catch up some time
self.rf_skip_total = 0
else:
if start_time < deadline:
# we are early
self.rf_early_total += delta
self.rf_early_count += 1
self.rf_skip_total = max(0.0, self.rf_skip_total - delta)
early_avg = self.rf_early_total / self.rf_early_count
adjust = early_avg / 4.0
self.rf_draw_count += 1
# TODO: can we optimize whence?
self.redraw_now(whence=0)
delay = max(0.0, self.rf_deadline - time.time() + adjust)
timer.start(delay) | ['def', 'refresh_timer_cb', '(', 'self', ',', 'timer', ',', 'flags', ')', ':', '# this is the timer call back, from the GUI thread', 'start_time', '=', 'time', '.', 'time', '(', ')', 'if', 'flags', '.', 'get', '(', "'done'", ',', 'False', ')', ':', 'return', '# calculate next deadline', 'deadline', '=', 'self', '.', 'rf_deadline', 'self', '.', 'rf_deadline', '+=', 'self', '.', 'rf_rate', 'self', '.', 'rf_timer_count', '+=', '1', 'delta', '=', 'abs', '(', 'start_time', '-', 'deadline', ')', 'self', '.', 'rf_delta_total', '+=', 'delta', 'adjust', '=', '0.0', 'if', 'start_time', '>', 'deadline', ':', '# we are late', 'self', '.', 'rf_late_total', '+=', 'delta', 'self', '.', 'rf_late_count', '+=', '1', 'late_avg', '=', 'self', '.', 'rf_late_total', '/', 'self', '.', 'rf_late_count', 'adjust', '=', '-', '(', 'late_avg', '/', '2.0', ')', 'self', '.', 'rf_skip_total', '+=', 'delta', 'if', 'self', '.', 'rf_skip_total', '<', 'self', '.', 'rf_rate', ':', 'self', '.', 'rf_draw_count', '+=', '1', '# TODO: can we optimize whence?', 'self', '.', 'redraw_now', '(', 'whence', '=', '0', ')', 'else', ':', '# <-- we are behind by amount of time equal to one frame.', '# skip a redraw and attempt to catch up some time', 'self', '.', 'rf_skip_total', '=', '0', 'else', ':', 'if', 'start_time', '<', 'deadline', ':', '# we are early', 'self', '.', 'rf_early_total', '+=', 'delta', 'self', '.', 'rf_early_count', '+=', '1', 'self', '.', 'rf_skip_total', '=', 'max', '(', '0.0', ',', 'self', '.', 'rf_skip_total', '-', 'delta', ')', 'early_avg', '=', 'self', '.', 'rf_early_total', '/', 'self', '.', 'rf_early_count', 'adjust', '=', 'early_avg', '/', '4.0', 'self', '.', 'rf_draw_count', '+=', '1', '# TODO: can we optimize whence?', 'self', '.', 'redraw_now', '(', 'whence', '=', '0', ')', 'delay', '=', 'max', '(', '0.0', ',', 'self', '.', 'rf_deadline', '-', 'time', '.', 'time', '(', ')', '+', 'adjust', ')', 'timer', '.', 'start', '(', 'delay', ')'] | Refresh timer callback.
This callback will normally only be called internally.
Parameters
----------
timer : a Ginga GUI timer
A GUI-based Ginga timer
flags : dict-like
A set of flags controlling the timer | ['Refresh', 'timer', 'callback', '.', 'This', 'callback', 'will', 'normally', 'only', 'be', 'called', 'internally', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1205-L1261 |
7,692 | google/mobly | mobly/controllers/android_device_lib/services/snippet_management_service.py | SnippetManagementService.pause | def pause(self):
"""Pauses all the snippet clients under management.
This clears the host port of a client because a new port will be
allocated in `resume`.
"""
for client in self._snippet_clients.values():
self._device.log.debug(
'Clearing host port %d of SnippetClient<%s>.',
client.host_port, client.package)
client.clear_host_port() | python | def pause(self):
"""Pauses all the snippet clients under management.
This clears the host port of a client because a new port will be
allocated in `resume`.
"""
for client in self._snippet_clients.values():
self._device.log.debug(
'Clearing host port %d of SnippetClient<%s>.',
client.host_port, client.package)
client.clear_host_port() | ['def', 'pause', '(', 'self', ')', ':', 'for', 'client', 'in', 'self', '.', '_snippet_clients', '.', 'values', '(', ')', ':', 'self', '.', '_device', '.', 'log', '.', 'debug', '(', "'Clearing host port %d of SnippetClient<%s>.'", ',', 'client', '.', 'host_port', ',', 'client', '.', 'package', ')', 'client', '.', 'clear_host_port', '(', ')'] | Pauses all the snippet clients under management.
This clears the host port of a client because a new port will be
allocated in `resume`. | ['Pauses', 'all', 'the', 'snippet', 'clients', 'under', 'management', '.'] | train | https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/services/snippet_management_service.py#L127-L137 |
7,693 | CityOfZion/neo-python-rpc | neorpc/Client.py | RPCClient.get_storage | def get_storage(self, contract_hash, storage_key, id=None, endpoint=None):
"""
Returns a storage item of a specified contract
Args:
contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
storage_key: (str) storage key to lookup, for example 'totalSupply'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
bytearray: bytearray value of the storage item
"""
result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint)
try:
return bytearray(binascii.unhexlify(result.encode('utf-8')))
except Exception as e:
raise NEORPCException("could not decode result %s " % e) | python | def get_storage(self, contract_hash, storage_key, id=None, endpoint=None):
"""
Returns a storage item of a specified contract
Args:
contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
storage_key: (str) storage key to lookup, for example 'totalSupply'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
bytearray: bytearray value of the storage item
"""
result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint)
try:
return bytearray(binascii.unhexlify(result.encode('utf-8')))
except Exception as e:
raise NEORPCException("could not decode result %s " % e) | ['def', 'get_storage', '(', 'self', ',', 'contract_hash', ',', 'storage_key', ',', 'id', '=', 'None', ',', 'endpoint', '=', 'None', ')', ':', 'result', '=', 'self', '.', '_call_endpoint', '(', 'GET_STORAGE', ',', 'params', '=', '[', 'contract_hash', ',', 'binascii', '.', 'hexlify', '(', 'storage_key', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'decode', '(', "'utf-8'", ')', ']', ',', 'id', '=', 'id', ',', 'endpoint', '=', 'endpoint', ')', 'try', ':', 'return', 'bytearray', '(', 'binascii', '.', 'unhexlify', '(', 'result', '.', 'encode', '(', "'utf-8'", ')', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'NEORPCException', '(', '"could not decode result %s "', '%', 'e', ')'] | Returns a storage item of a specified contract
Args:
contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
storage_key: (str) storage key to lookup, for example 'totalSupply'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
bytearray: bytearray value of the storage item | ['Returns', 'a', 'storage', 'item', 'of', 'a', 'specified', 'contract', 'Args', ':', 'contract_hash', ':', '(', 'str', ')', 'hash', 'of', 'the', 'contract', 'to', 'lookup', 'for', 'example', 'd7678dd97c000be3f33e9362e673101bac4ca654', 'storage_key', ':', '(', 'str', ')', 'storage', 'key', 'to', 'lookup', 'for', 'example', 'totalSupply', 'id', ':', '(', 'int', 'optional', ')', 'id', 'to', 'use', 'for', 'response', 'tracking', 'endpoint', ':', '(', 'RPCEndpoint', 'optional', ')', 'endpoint', 'to', 'specify', 'to', 'use', 'Returns', ':', 'bytearray', ':', 'bytearray', 'value', 'of', 'the', 'storage', 'item'] | train | https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L189-L205 |
7,694 | watson-developer-cloud/python-sdk | examples/assistant_tone_analyzer_integration/tone_detection.py | updateWritingTone | def updateWritingTone(user, writingTone, maintainHistory):
"""
updateWritingTone updates the user with the writing tones interpreted based
on the specified thresholds
@param: user a json object representing user information (tone) to be used
in conversing with the Conversation Service
@param: writingTone a json object containing the writing tones in the
payload returned by the Tone Analyzer
"""
currentWriting = []
currentWritingObject = []
# Process each writing tone and determine if it is high or low
for tone in writingTone['tones']:
if tone['score'] >= WRITING_HIGH_SCORE_THRESHOLD:
currentWriting.append(tone['tone_name'].lower() + '_high')
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= WRITING_NO_SCORE_THRESHOLD:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'no evidence'
})
else:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user writing tone
user['tone']['writing']['current'] = currentWriting
if maintainHistory:
if 'history' not in user['tone']['writing']:
user['tone']['writing']['history'] = []
user['tone']['writing']['history'].append(currentWritingObject) | python | def updateWritingTone(user, writingTone, maintainHistory):
"""
updateWritingTone updates the user with the writing tones interpreted based
on the specified thresholds
@param: user a json object representing user information (tone) to be used
in conversing with the Conversation Service
@param: writingTone a json object containing the writing tones in the
payload returned by the Tone Analyzer
"""
currentWriting = []
currentWritingObject = []
# Process each writing tone and determine if it is high or low
for tone in writingTone['tones']:
if tone['score'] >= WRITING_HIGH_SCORE_THRESHOLD:
currentWriting.append(tone['tone_name'].lower() + '_high')
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= WRITING_NO_SCORE_THRESHOLD:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'no evidence'
})
else:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user writing tone
user['tone']['writing']['current'] = currentWriting
if maintainHistory:
if 'history' not in user['tone']['writing']:
user['tone']['writing']['history'] = []
user['tone']['writing']['history'].append(currentWritingObject) | ['def', 'updateWritingTone', '(', 'user', ',', 'writingTone', ',', 'maintainHistory', ')', ':', 'currentWriting', '=', '[', ']', 'currentWritingObject', '=', '[', ']', '# Process each writing tone and determine if it is high or low', 'for', 'tone', 'in', 'writingTone', '[', "'tones'", ']', ':', 'if', 'tone', '[', "'score'", ']', '>=', 'WRITING_HIGH_SCORE_THRESHOLD', ':', 'currentWriting', '.', 'append', '(', 'tone', '[', "'tone_name'", ']', '.', 'lower', '(', ')', '+', "'_high'", ')', 'currentWritingObject', '.', 'append', '(', '{', "'tone_name'", ':', 'tone', '[', "'tone_name'", ']', '.', 'lower', '(', ')', ',', "'score'", ':', 'tone', '[', "'score'", ']', ',', "'interpretation'", ':', "'likely high'", '}', ')', 'elif', 'tone', '[', "'score'", ']', '<=', 'WRITING_NO_SCORE_THRESHOLD', ':', 'currentWritingObject', '.', 'append', '(', '{', "'tone_name'", ':', 'tone', '[', "'tone_name'", ']', '.', 'lower', '(', ')', ',', "'score'", ':', 'tone', '[', "'score'", ']', ',', "'interpretation'", ':', "'no evidence'", '}', ')', 'else', ':', 'currentWritingObject', '.', 'append', '(', '{', "'tone_name'", ':', 'tone', '[', "'tone_name'", ']', '.', 'lower', '(', ')', ',', "'score'", ':', 'tone', '[', "'score'", ']', ',', "'interpretation'", ':', "'likely medium'", '}', ')', '# update user writing tone', 'user', '[', "'tone'", ']', '[', "'writing'", ']', '[', "'current'", ']', '=', 'currentWriting', 'if', 'maintainHistory', ':', 'if', "'history'", 'not', 'in', 'user', '[', "'tone'", ']', '[', "'writing'", ']', ':', 'user', '[', "'tone'", ']', '[', "'writing'", ']', '[', "'history'", ']', '=', '[', ']', 'user', '[', "'tone'", ']', '[', "'writing'", ']', '[', "'history'", ']', '.', 'append', '(', 'currentWritingObject', ')'] | updateWritingTone updates the user with the writing tones interpreted based
on the specified thresholds
@param: user a json object representing user information (tone) to be used
in conversing with the Conversation Service
@param: writingTone a json object containing the writing tones in the
payload returned by the Tone Analyzer | ['updateWritingTone', 'updates', 'the', 'user', 'with', 'the', 'writing', 'tones', 'interpreted', 'based', 'on', 'the', 'specified', 'thresholds'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/examples/assistant_tone_analyzer_integration/tone_detection.py#L144-L183 |
7,695 | Capitains/MyCapytain | MyCapytain/resolvers/cts/local.py | CtsCapitainsLocalResolver.getSiblings | def getSiblings(self, textId, subreference: CtsReference):
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage CtsReference
:type subreference: str
:return: Tuple of references
:rtype: (str, str)
"""
text, inventory = self.__getText__(textId)
if not isinstance(subreference, CtsReference):
subreference = CtsReference(subreference)
passage = text.getTextualNode(subreference)
return passage.siblingsId | python | def getSiblings(self, textId, subreference: CtsReference):
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage CtsReference
:type subreference: str
:return: Tuple of references
:rtype: (str, str)
"""
text, inventory = self.__getText__(textId)
if not isinstance(subreference, CtsReference):
subreference = CtsReference(subreference)
passage = text.getTextualNode(subreference)
return passage.siblingsId | ['def', 'getSiblings', '(', 'self', ',', 'textId', ',', 'subreference', ':', 'CtsReference', ')', ':', 'text', ',', 'inventory', '=', 'self', '.', '__getText__', '(', 'textId', ')', 'if', 'not', 'isinstance', '(', 'subreference', ',', 'CtsReference', ')', ':', 'subreference', '=', 'CtsReference', '(', 'subreference', ')', 'passage', '=', 'text', '.', 'getTextualNode', '(', 'subreference', ')', 'return', 'passage', '.', 'siblingsId'] | Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage CtsReference
:type subreference: str
:return: Tuple of references
:rtype: (str, str) | ['Retrieve', 'the', 'siblings', 'of', 'a', 'textual', 'node'] | train | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L494-L508 |
7,696 | ktbyers/netmiko | netmiko/scp_functions.py | file_transfer | def file_transfer(
ssh_conn,
source_file,
dest_file,
file_system=None,
direction="put",
disable_md5=False,
inline_transfer=False,
overwrite_file=False,
):
"""Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices.
inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers.
return {
'file_exists': boolean,
'file_transferred': boolean,
'file_verified': boolean,
}
"""
transferred_and_verified = {
"file_exists": True,
"file_transferred": True,
"file_verified": True,
}
transferred_and_notverified = {
"file_exists": True,
"file_transferred": True,
"file_verified": False,
}
nottransferred_but_verified = {
"file_exists": True,
"file_transferred": False,
"file_verified": True,
}
if "cisco_ios" in ssh_conn.device_type or "cisco_xe" in ssh_conn.device_type:
cisco_ios = True
else:
cisco_ios = False
if not cisco_ios and inline_transfer:
raise ValueError("Inline Transfer only supported for Cisco IOS/Cisco IOS-XE")
scp_args = {
"ssh_conn": ssh_conn,
"source_file": source_file,
"dest_file": dest_file,
"direction": direction,
}
if file_system is not None:
scp_args["file_system"] = file_system
TransferClass = InLineTransfer if inline_transfer else FileTransfer
with TransferClass(**scp_args) as scp_transfer:
if scp_transfer.check_file_exists():
if overwrite_file:
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
else:
# File exists, you can overwrite it, MD5 is wrong (transfer file)
verifyspace_and_transferfile(scp_transfer)
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError(
"MD5 failure between source and destination files"
)
else:
# File exists, you can overwrite it, but MD5 not allowed (transfer file)
verifyspace_and_transferfile(scp_transfer)
return transferred_and_notverified
else:
# File exists, but you can't overwrite it.
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
msg = "File already exists and overwrite_file is disabled"
raise ValueError(msg)
else:
verifyspace_and_transferfile(scp_transfer)
# File doesn't exist
if not disable_md5:
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError("MD5 failure between source and destination files")
else:
return transferred_and_notverified | python | def file_transfer(
ssh_conn,
source_file,
dest_file,
file_system=None,
direction="put",
disable_md5=False,
inline_transfer=False,
overwrite_file=False,
):
"""Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices.
inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers.
return {
'file_exists': boolean,
'file_transferred': boolean,
'file_verified': boolean,
}
"""
transferred_and_verified = {
"file_exists": True,
"file_transferred": True,
"file_verified": True,
}
transferred_and_notverified = {
"file_exists": True,
"file_transferred": True,
"file_verified": False,
}
nottransferred_but_verified = {
"file_exists": True,
"file_transferred": False,
"file_verified": True,
}
if "cisco_ios" in ssh_conn.device_type or "cisco_xe" in ssh_conn.device_type:
cisco_ios = True
else:
cisco_ios = False
if not cisco_ios and inline_transfer:
raise ValueError("Inline Transfer only supported for Cisco IOS/Cisco IOS-XE")
scp_args = {
"ssh_conn": ssh_conn,
"source_file": source_file,
"dest_file": dest_file,
"direction": direction,
}
if file_system is not None:
scp_args["file_system"] = file_system
TransferClass = InLineTransfer if inline_transfer else FileTransfer
with TransferClass(**scp_args) as scp_transfer:
if scp_transfer.check_file_exists():
if overwrite_file:
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
else:
# File exists, you can overwrite it, MD5 is wrong (transfer file)
verifyspace_and_transferfile(scp_transfer)
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError(
"MD5 failure between source and destination files"
)
else:
# File exists, you can overwrite it, but MD5 not allowed (transfer file)
verifyspace_and_transferfile(scp_transfer)
return transferred_and_notverified
else:
# File exists, but you can't overwrite it.
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
msg = "File already exists and overwrite_file is disabled"
raise ValueError(msg)
else:
verifyspace_and_transferfile(scp_transfer)
# File doesn't exist
if not disable_md5:
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError("MD5 failure between source and destination files")
else:
return transferred_and_notverified | ['def', 'file_transfer', '(', 'ssh_conn', ',', 'source_file', ',', 'dest_file', ',', 'file_system', '=', 'None', ',', 'direction', '=', '"put"', ',', 'disable_md5', '=', 'False', ',', 'inline_transfer', '=', 'False', ',', 'overwrite_file', '=', 'False', ',', ')', ':', 'transferred_and_verified', '=', '{', '"file_exists"', ':', 'True', ',', '"file_transferred"', ':', 'True', ',', '"file_verified"', ':', 'True', ',', '}', 'transferred_and_notverified', '=', '{', '"file_exists"', ':', 'True', ',', '"file_transferred"', ':', 'True', ',', '"file_verified"', ':', 'False', ',', '}', 'nottransferred_but_verified', '=', '{', '"file_exists"', ':', 'True', ',', '"file_transferred"', ':', 'False', ',', '"file_verified"', ':', 'True', ',', '}', 'if', '"cisco_ios"', 'in', 'ssh_conn', '.', 'device_type', 'or', '"cisco_xe"', 'in', 'ssh_conn', '.', 'device_type', ':', 'cisco_ios', '=', 'True', 'else', ':', 'cisco_ios', '=', 'False', 'if', 'not', 'cisco_ios', 'and', 'inline_transfer', ':', 'raise', 'ValueError', '(', '"Inline Transfer only supported for Cisco IOS/Cisco IOS-XE"', ')', 'scp_args', '=', '{', '"ssh_conn"', ':', 'ssh_conn', ',', '"source_file"', ':', 'source_file', ',', '"dest_file"', ':', 'dest_file', ',', '"direction"', ':', 'direction', ',', '}', 'if', 'file_system', 'is', 'not', 'None', ':', 'scp_args', '[', '"file_system"', ']', '=', 'file_system', 'TransferClass', '=', 'InLineTransfer', 'if', 'inline_transfer', 'else', 'FileTransfer', 'with', 'TransferClass', '(', '*', '*', 'scp_args', ')', 'as', 'scp_transfer', ':', 'if', 'scp_transfer', '.', 'check_file_exists', '(', ')', ':', 'if', 'overwrite_file', ':', 'if', 'not', 'disable_md5', ':', 'if', 'scp_transfer', '.', 'compare_md5', '(', ')', ':', 'return', 'nottransferred_but_verified', 'else', ':', '# File exists, you can overwrite it, MD5 is wrong (transfer file)', 'verifyspace_and_transferfile', '(', 'scp_transfer', ')', 'if', 'scp_transfer', '.', 'compare_md5', '(', ')', ':', 'return', 'transferred_and_verified', 'else', ':', 'raise', 'ValueError', '(', '"MD5 failure between source and destination files"', ')', 'else', ':', '# File exists, you can overwrite it, but MD5 not allowed (transfer file)', 'verifyspace_and_transferfile', '(', 'scp_transfer', ')', 'return', 'transferred_and_notverified', 'else', ':', "# File exists, but you can't overwrite it.", 'if', 'not', 'disable_md5', ':', 'if', 'scp_transfer', '.', 'compare_md5', '(', ')', ':', 'return', 'nottransferred_but_verified', 'msg', '=', '"File already exists and overwrite_file is disabled"', 'raise', 'ValueError', '(', 'msg', ')', 'else', ':', 'verifyspace_and_transferfile', '(', 'scp_transfer', ')', "# File doesn't exist", 'if', 'not', 'disable_md5', ':', 'if', 'scp_transfer', '.', 'compare_md5', '(', ')', ':', 'return', 'transferred_and_verified', 'else', ':', 'raise', 'ValueError', '(', '"MD5 failure between source and destination files"', ')', 'else', ':', 'return', 'transferred_and_notverified'] | Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices.
inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers.
return {
'file_exists': boolean,
'file_transferred': boolean,
'file_verified': boolean,
} | ['Use', 'Secure', 'Copy', 'or', 'Inline', '(', 'IOS', '-', 'only', ')', 'to', 'transfer', 'files', 'to', '/', 'from', 'network', 'devices', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/scp_functions.py#L23-L112 |
7,697 | kejbaly2/metrique | metrique/utils.py | json_encode_default | def json_encode_default(obj):
'''
Convert datetime.datetime to timestamp
:param obj: value to (possibly) convert
'''
if isinstance(obj, (datetime, date)):
result = dt2ts(obj)
else:
result = json_encoder.default(obj)
return to_encoding(result) | python | def json_encode_default(obj):
'''
Convert datetime.datetime to timestamp
:param obj: value to (possibly) convert
'''
if isinstance(obj, (datetime, date)):
result = dt2ts(obj)
else:
result = json_encoder.default(obj)
return to_encoding(result) | ['def', 'json_encode_default', '(', 'obj', ')', ':', 'if', 'isinstance', '(', 'obj', ',', '(', 'datetime', ',', 'date', ')', ')', ':', 'result', '=', 'dt2ts', '(', 'obj', ')', 'else', ':', 'result', '=', 'json_encoder', '.', 'default', '(', 'obj', ')', 'return', 'to_encoding', '(', 'result', ')'] | Convert datetime.datetime to timestamp
:param obj: value to (possibly) convert | ['Convert', 'datetime', '.', 'datetime', 'to', 'timestamp'] | train | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L706-L716 |
7,698 | devopshq/artifactory | dohq_artifactory/admin.py | AdminObject._create_and_update | def _create_and_update(self):
"""
Create or update request, re-read object from Artifactory
:return: None
"""
data_json = self._create_json()
data_json.update(self.additional_params)
request_url = self._artifactory.drive + '/api/{uri}/{x.name}'.format(uri=self._uri, x=self)
r = self._session.put(
request_url,
json=data_json,
headers={'Content-Type': 'application/json'},
auth=self._auth,
)
r.raise_for_status()
rest_delay()
self.read() | python | def _create_and_update(self):
"""
Create or update request, re-read object from Artifactory
:return: None
"""
data_json = self._create_json()
data_json.update(self.additional_params)
request_url = self._artifactory.drive + '/api/{uri}/{x.name}'.format(uri=self._uri, x=self)
r = self._session.put(
request_url,
json=data_json,
headers={'Content-Type': 'application/json'},
auth=self._auth,
)
r.raise_for_status()
rest_delay()
self.read() | ['def', '_create_and_update', '(', 'self', ')', ':', 'data_json', '=', 'self', '.', '_create_json', '(', ')', 'data_json', '.', 'update', '(', 'self', '.', 'additional_params', ')', 'request_url', '=', 'self', '.', '_artifactory', '.', 'drive', '+', "'/api/{uri}/{x.name}'", '.', 'format', '(', 'uri', '=', 'self', '.', '_uri', ',', 'x', '=', 'self', ')', 'r', '=', 'self', '.', '_session', '.', 'put', '(', 'request_url', ',', 'json', '=', 'data_json', ',', 'headers', '=', '{', "'Content-Type'", ':', "'application/json'", '}', ',', 'auth', '=', 'self', '.', '_auth', ',', ')', 'r', '.', 'raise_for_status', '(', ')', 'rest_delay', '(', ')', 'self', '.', 'read', '(', ')'] | Create or update request, re-read object from Artifactory
:return: None | ['Create', 'or', 'update', 'request', 're', '-', 'read', 'object', 'from', 'Artifactory', ':', 'return', ':', 'None'] | train | https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/dohq_artifactory/admin.py#L66-L82 |
7,699 | wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.removeconfounds | def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None):
"""
Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not self.confounds and not confounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.')
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if confounds:
self.set_confounds(confounds)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
if not clean_params:
clean_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
self.set_pipeline('teneto_' + teneto.__version__)
self.set_bids_suffix('roi')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]}) | python | def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None):
"""
Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not self.confounds and not confounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.')
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if confounds:
self.set_confounds(confounds)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
if not clean_params:
clean_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
self.set_pipeline('teneto_' + teneto.__version__)
self.set_bids_suffix('roi')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]}) | ['def', 'removeconfounds', '(', 'self', ',', 'confounds', '=', 'None', ',', 'clean_params', '=', 'None', ',', 'transpose', '=', 'None', ',', 'njobs', '=', 'None', ',', 'update_pipeline', '=', 'True', ',', 'overwrite', '=', 'True', ',', 'tag', '=', 'None', ')', ':', 'if', 'not', 'njobs', ':', 'njobs', '=', 'self', '.', 'njobs', 'self', '.', 'add_history', '(', 'inspect', '.', 'stack', '(', ')', '[', '0', ']', '[', '3', ']', ',', 'locals', '(', ')', ',', '1', ')', 'if', 'not', 'self', '.', 'confounds', 'and', 'not', 'confounds', ':', 'raise', 'ValueError', '(', "'Specified confounds are not found. Make sure that you have run self.set_confunds([\\'Confound1\\',\\'Confound2\\']) first or pass confounds as input to function.'", ')', 'if', 'not', 'tag', ':', 'tag', '=', "''", 'else', ':', 'tag', '=', "'desc-'", '+', 'tag', 'if', 'confounds', ':', 'self', '.', 'set_confounds', '(', 'confounds', ')', 'files', '=', 'sorted', '(', 'self', '.', 'get_selected_files', '(', 'quiet', '=', '1', ')', ')', 'confound_files', '=', 'sorted', '(', 'self', '.', 'get_selected_files', '(', 'quiet', '=', '1', ',', 'pipeline', '=', "'confound'", ')', ')', 'files', ',', 'confound_files', '=', 'confound_matching', '(', 'files', ',', 'confound_files', ')', 'if', 'not', 'clean_params', ':', 'clean_params', '=', '{', '}', 'with', 'ProcessPoolExecutor', '(', 'max_workers', '=', 'njobs', ')', 'as', 'executor', ':', 'job', '=', '{', 'executor', '.', 'submit', '(', 'self', '.', '_run_removeconfounds', ',', 'f', ',', 'confound_files', '[', 'i', ']', ',', 'clean_params', ',', 'transpose', ',', 'overwrite', ',', 'tag', ')', 'for', 'i', ',', 'f', 'in', 'enumerate', '(', 'files', ')', '}', 'for', 'j', 'in', 'as_completed', '(', 'job', ')', ':', 'j', '.', 'result', '(', ')', 'self', '.', 'set_pipeline', '(', "'teneto_'", '+', 'teneto', '.', '__version__', ')', 'self', '.', 'set_bids_suffix', '(', "'roi'", ')', 'if', 'tag', ':', 'self', '.', 'set_bids_tags', '(', '{', "'desc'", ':', 'tag', '.', 'split', '(', "'-'", ')', '[', '1', ']', '}', ')'] | Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. | ['Removes', 'specified', 'confounds', 'using', 'nilearn', '.', 'signal', '.', 'clean'] | train | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1036-L1094 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.