Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,400 | BlackEarth/bf | bf/css.py | CSS.cmyk_to_rgb | def cmyk_to_rgb(Class, c, m, y, k):
"""CMYK in % to RGB in 0-255
based on https://www.openprocessing.org/sketch/46231#
"""
c = float(c)/100.0
m = float(m)/100.0
y = float(y)/100.0
k = float(k)/100.0
nc = (c * (1-k)) + k
nm = (m * (1-k)) + k
ny = (y * (1-k)) + k
r = int((1-nc) * 255)
g = int((1-nm) * 255)
b = int((1-ny) * 255)
return dict(r=r, g=g, b=b) | python | def cmyk_to_rgb(Class, c, m, y, k):
"""CMYK in % to RGB in 0-255
based on https://www.openprocessing.org/sketch/46231#
"""
c = float(c)/100.0
m = float(m)/100.0
y = float(y)/100.0
k = float(k)/100.0
nc = (c * (1-k)) + k
nm = (m * (1-k)) + k
ny = (y * (1-k)) + k
r = int((1-nc) * 255)
g = int((1-nm) * 255)
b = int((1-ny) * 255)
return dict(r=r, g=g, b=b) | ['def', 'cmyk_to_rgb', '(', 'Class', ',', 'c', ',', 'm', ',', 'y', ',', 'k', ')', ':', 'c', '=', 'float', '(', 'c', ')', '/', '100.0', 'm', '=', 'float', '(', 'm', ')', '/', '100.0', 'y', '=', 'float', '(', 'y', ')', '/', '100.0', 'k', '=', 'float', '(', 'k', ')', '/', '100.0', 'nc', '=', '(', 'c', '*', '(', '1', '-', 'k', ')', ')', '+', 'k', 'nm', '=', '(', 'm', '*', '(', '1', '-', 'k', ')', ')', '+', 'k', 'ny', '=', '(', 'y', '*', '(', '1', '-', 'k', ')', ')', '+', 'k', 'r', '=', 'int', '(', '(', '1', '-', 'nc', ')', '*', '255', ')', 'g', '=', 'int', '(', '(', '1', '-', 'nm', ')', '*', '255', ')', 'b', '=', 'int', '(', '(', '1', '-', 'ny', ')', '*', '255', ')', 'return', 'dict', '(', 'r', '=', 'r', ',', 'g', '=', 'g', ',', 'b', '=', 'b', ')'] | CMYK in % to RGB in 0-255
based on https://www.openprocessing.org/sketch/46231# | ['CMYK', 'in', '%', 'to', 'RGB', 'in', '0', '-', '255', 'based', 'on', 'https', ':', '//', 'www', '.', 'openprocessing', '.', 'org', '/', 'sketch', '/', '46231#'] | train | https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L134-L151 |
4,401 | MillionIntegrals/vel | vel/api/train_phase.py | TrainPhase.epoch_info | def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo:
""" Create Epoch info """
raise NotImplementedError | python | def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo:
""" Create Epoch info """
raise NotImplementedError | ['def', 'epoch_info', '(', 'self', ',', 'training_info', ':', 'TrainingInfo', ',', 'global_idx', ':', 'int', ',', 'local_idx', ':', 'int', ')', '->', 'EpochInfo', ':', 'raise', 'NotImplementedError'] | Create Epoch info | ['Create', 'Epoch', 'info'] | train | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/train_phase.py#L24-L26 |
4,402 | pytroll/posttroll | posttroll/listener.py | Listener.create_subscriber | def create_subscriber(self):
'''Create a subscriber instance using specified addresses and
message types.
'''
if self.subscriber is None:
if self.topics:
self.subscriber = NSSubscriber(self.services, self.topics,
addr_listener=True,
addresses=self.addresses,
nameserver=self.nameserver)
self.recv = self.subscriber.start().recv | python | def create_subscriber(self):
'''Create a subscriber instance using specified addresses and
message types.
'''
if self.subscriber is None:
if self.topics:
self.subscriber = NSSubscriber(self.services, self.topics,
addr_listener=True,
addresses=self.addresses,
nameserver=self.nameserver)
self.recv = self.subscriber.start().recv | ['def', 'create_subscriber', '(', 'self', ')', ':', 'if', 'self', '.', 'subscriber', 'is', 'None', ':', 'if', 'self', '.', 'topics', ':', 'self', '.', 'subscriber', '=', 'NSSubscriber', '(', 'self', '.', 'services', ',', 'self', '.', 'topics', ',', 'addr_listener', '=', 'True', ',', 'addresses', '=', 'self', '.', 'addresses', ',', 'nameserver', '=', 'self', '.', 'nameserver', ')', 'self', '.', 'recv', '=', 'self', '.', 'subscriber', '.', 'start', '(', ')', '.', 'recv'] | Create a subscriber instance using specified addresses and
message types. | ['Create', 'a', 'subscriber', 'instance', 'using', 'specified', 'addresses', 'and', 'message', 'types', '.'] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L105-L115 |
4,403 | pandas-dev/pandas | pandas/core/generic.py | NDFrame._consolidate | def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self) | python | def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self) | ['def', '_consolidate', '(', 'self', ',', 'inplace', '=', 'False', ')', ':', 'inplace', '=', 'validate_bool_kwarg', '(', 'inplace', ',', "'inplace'", ')', 'if', 'inplace', ':', 'self', '.', '_consolidate_inplace', '(', ')', 'else', ':', 'f', '=', 'lambda', ':', 'self', '.', '_data', '.', 'consolidate', '(', ')', 'cons_data', '=', 'self', '.', '_protect_consolidate', '(', 'f', ')', 'return', 'self', '.', '_constructor', '(', 'cons_data', ')', '.', '__finalize__', '(', 'self', ')'] | Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : same type as caller | ['Compute', 'NDFrame', 'with', 'consolidated', 'internals', '(', 'data', 'of', 'each', 'dtype', 'grouped', 'together', 'in', 'a', 'single', 'ndarray', ')', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5173-L5193 |
4,404 | OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceConfigCalibration.matrix | def matrix(self):
"""The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_matrix(
self._handle, matrix)
return rc, tuple(matrix) | python | def matrix(self):
"""The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_matrix(
self._handle, matrix)
return rc, tuple(matrix) | ['def', 'matrix', '(', 'self', ')', ':', 'matrix', '=', '(', 'c_float', '*', '6', ')', '(', ')', 'rc', '=', 'self', '.', '_libinput', '.', 'libinput_device_config_calibration_get_matrix', '(', 'self', '.', '_handle', ',', 'matrix', ')', 'return', 'rc', ',', 'tuple', '(', 'matrix', ')'] | The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`. | ['The', 'current', 'calibration', 'matrix', 'for', 'this', 'device', '.'] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L540-L554 |
4,405 | Crypto-toolbox/bitex | bitex/api/WSS/bitfinex.py | BitfinexWSS.restart | def restart(self, soft=False):
"""
Restarts client. If soft is True, the client attempts to re-subscribe
to all channels which it was previously subscribed to.
:return:
"""
log.info("BitfinexWSS.restart(): Restarting client..")
super(BitfinexWSS, self).restart()
# cache channel labels temporarily if soft == True
channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None
# clear previous channel caches
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
if channel_labels:
# re-subscribe to channels
for channel_name, kwargs in channel_labels:
self._subscribe(channel_name, **kwargs) | python | def restart(self, soft=False):
"""
Restarts client. If soft is True, the client attempts to re-subscribe
to all channels which it was previously subscribed to.
:return:
"""
log.info("BitfinexWSS.restart(): Restarting client..")
super(BitfinexWSS, self).restart()
# cache channel labels temporarily if soft == True
channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None
# clear previous channel caches
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
if channel_labels:
# re-subscribe to channels
for channel_name, kwargs in channel_labels:
self._subscribe(channel_name, **kwargs) | ['def', 'restart', '(', 'self', ',', 'soft', '=', 'False', ')', ':', 'log', '.', 'info', '(', '"BitfinexWSS.restart(): Restarting client.."', ')', 'super', '(', 'BitfinexWSS', ',', 'self', ')', '.', 'restart', '(', ')', '# cache channel labels temporarily if soft == True', 'channel_labels', '=', '[', 'self', '.', 'channel_labels', '[', 'k', ']', 'for', 'k', 'in', 'self', '.', 'channel_labels', ']', 'if', 'soft', 'else', 'None', '# clear previous channel caches', 'self', '.', 'channels', '=', '{', '}', 'self', '.', 'channel_labels', '=', '{', '}', 'self', '.', 'channel_states', '=', '{', '}', 'if', 'channel_labels', ':', '# re-subscribe to channels', 'for', 'channel_name', ',', 'kwargs', 'in', 'channel_labels', ':', 'self', '.', '_subscribe', '(', 'channel_name', ',', '*', '*', 'kwargs', ')'] | Restarts client. If soft is True, the client attempts to re-subscribe
to all channels which it was previously subscribed to.
:return: | ['Restarts', 'client', '.', 'If', 'soft', 'is', 'True', 'the', 'client', 'attempts', 'to', 're', '-', 'subscribe', 'to', 'all', 'channels', 'which', 'it', 'was', 'previously', 'subscribed', 'to', '.', ':', 'return', ':'] | train | https://github.com/Crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L255-L275 |
4,406 | pypa/pipenv | pipenv/vendor/distlib/database.py | DependencyGraph.add_edge | def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x) | python | def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x) | ['def', 'add_edge', '(', 'self', ',', 'x', ',', 'y', ',', 'label', '=', 'None', ')', ':', 'self', '.', 'adjacency_list', '[', 'x', ']', '.', 'append', '(', '(', 'y', ',', 'label', ')', ')', '# multiple edges are allowed, so be careful', 'if', 'x', 'not', 'in', 'self', '.', 'reverse_list', '[', 'y', ']', ':', 'self', '.', 'reverse_list', '[', 'y', ']', '.', 'append', '(', 'x', ')'] | Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None`` | ['Add', 'an', 'edge', 'from', 'distribution', '*', 'x', '*', 'to', 'distribution', '*', 'y', '*', 'with', 'the', 'given', '*', 'label', '*', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L1112-L1125 |
4,407 | ArchiveTeam/wpull | wpull/stats.py | Statistics.increment | def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size) | python | def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size) | ['def', 'increment', '(', 'self', ',', 'size', ':', 'int', ')', ':', 'assert', 'size', '>=', '0', ',', 'size', 'self', '.', 'files', '+=', '1', 'self', '.', 'size', '+=', 'size', 'self', '.', 'bandwidth_meter', '.', 'feed', '(', 'size', ')'] | Increment the number of files downloaded.
Args:
size: The size of the file | ['Increment', 'the', 'number', 'of', 'files', 'downloaded', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/stats.py#L56-L66 |
4,408 | gccxml/pygccxml | pygccxml/parser/directory_cache.py | directory_cache_t._save | def _save(self):
"""
save the cache index, in case it was modified.
Saves the index table and the file name repository in the file
`index.dat`
"""
if self.__modified_flag:
self.__filename_rep.update_id_counter()
indexfilename = os.path.join(self.__dir, "index.dat")
self._write_file(
indexfilename,
(self.__index,
self.__filename_rep))
self.__modified_flag = False | python | def _save(self):
"""
save the cache index, in case it was modified.
Saves the index table and the file name repository in the file
`index.dat`
"""
if self.__modified_flag:
self.__filename_rep.update_id_counter()
indexfilename = os.path.join(self.__dir, "index.dat")
self._write_file(
indexfilename,
(self.__index,
self.__filename_rep))
self.__modified_flag = False | ['def', '_save', '(', 'self', ')', ':', 'if', 'self', '.', '__modified_flag', ':', 'self', '.', '__filename_rep', '.', 'update_id_counter', '(', ')', 'indexfilename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '__dir', ',', '"index.dat"', ')', 'self', '.', '_write_file', '(', 'indexfilename', ',', '(', 'self', '.', '__index', ',', 'self', '.', '__filename_rep', ')', ')', 'self', '.', '__modified_flag', '=', 'False'] | save the cache index, in case it was modified.
Saves the index table and the file name repository in the file
`index.dat` | ['save', 'the', 'cache', 'index', 'in', 'case', 'it', 'was', 'modified', '.'] | train | https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/directory_cache.py#L245-L261 |
4,409 | chriso/gauged | gauged/writer.py | Writer.clear_from | def clear_from(self, timestamp):
"""Clear all data from `timestamp` onwards. Note that the timestamp
is rounded down to the nearest block boundary"""
block_size = self.config.block_size
offset, remainder = timestamp // block_size, timestamp % block_size
if remainder:
raise ValueError('Timestamp must be on a block boundary')
self.driver.clear_from(offset, timestamp) | python | def clear_from(self, timestamp):
"""Clear all data from `timestamp` onwards. Note that the timestamp
is rounded down to the nearest block boundary"""
block_size = self.config.block_size
offset, remainder = timestamp // block_size, timestamp % block_size
if remainder:
raise ValueError('Timestamp must be on a block boundary')
self.driver.clear_from(offset, timestamp) | ['def', 'clear_from', '(', 'self', ',', 'timestamp', ')', ':', 'block_size', '=', 'self', '.', 'config', '.', 'block_size', 'offset', ',', 'remainder', '=', 'timestamp', '//', 'block_size', ',', 'timestamp', '%', 'block_size', 'if', 'remainder', ':', 'raise', 'ValueError', '(', "'Timestamp must be on a block boundary'", ')', 'self', '.', 'driver', '.', 'clear_from', '(', 'offset', ',', 'timestamp', ')'] | Clear all data from `timestamp` onwards. Note that the timestamp
is rounded down to the nearest block boundary | ['Clear', 'all', 'data', 'from', 'timestamp', 'onwards', '.', 'Note', 'that', 'the', 'timestamp', 'is', 'rounded', 'down', 'to', 'the', 'nearest', 'block', 'boundary'] | train | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L170-L177 |
4,410 | linode/linode_api4-python | linode_api4/objects/linode.py | Instance.initiate_migration | def initiate_migration(self):
"""
Initiates a pending migration that is already scheduled for this Linode
Instance
"""
self._client.post('{}/migrate'.format(Instance.api_endpoint), model=self) | python | def initiate_migration(self):
"""
Initiates a pending migration that is already scheduled for this Linode
Instance
"""
self._client.post('{}/migrate'.format(Instance.api_endpoint), model=self) | ['def', 'initiate_migration', '(', 'self', ')', ':', 'self', '.', '_client', '.', 'post', '(', "'{}/migrate'", '.', 'format', '(', 'Instance', '.', 'api_endpoint', ')', ',', 'model', '=', 'self', ')'] | Initiates a pending migration that is already scheduled for this Linode
Instance | ['Initiates', 'a', 'pending', 'migration', 'that', 'is', 'already', 'scheduled', 'for', 'this', 'Linode', 'Instance'] | train | https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/linode.py#L643-L648 |
4,411 | mikedh/trimesh | trimesh/triangles.py | cross | def cross(triangles):
"""
Returns the cross product of two edges from input triangles
Parameters
--------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
--------------
crosses : (n, 3) float
Cross product of two edge vectors
"""
vectors = np.diff(triangles, axis=1)
crosses = np.cross(vectors[:, 0], vectors[:, 1])
return crosses | python | def cross(triangles):
"""
Returns the cross product of two edges from input triangles
Parameters
--------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
--------------
crosses : (n, 3) float
Cross product of two edge vectors
"""
vectors = np.diff(triangles, axis=1)
crosses = np.cross(vectors[:, 0], vectors[:, 1])
return crosses | ['def', 'cross', '(', 'triangles', ')', ':', 'vectors', '=', 'np', '.', 'diff', '(', 'triangles', ',', 'axis', '=', '1', ')', 'crosses', '=', 'np', '.', 'cross', '(', 'vectors', '[', ':', ',', '0', ']', ',', 'vectors', '[', ':', ',', '1', ']', ')', 'return', 'crosses'] | Returns the cross product of two edges from input triangles
Parameters
--------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
--------------
crosses : (n, 3) float
Cross product of two edge vectors | ['Returns', 'the', 'cross', 'product', 'of', 'two', 'edges', 'from', 'input', 'triangles'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/triangles.py#L15-L31 |
4,412 | hyperledger/indy-sdk | vcx/wrappers/python3/vcx/api/disclosed_proof.py | DisclosedProof.get_creds | async def get_creds(self) -> dict:
"""
Gets the credentials from a disclosed proof
Example:
msg_id = '1'
phone_number = '8019119191'
connection = await Connection.create(source_id)
await connection.connect(phone_number)
disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id)
creds = await disclosed_proof.get_creds()
:return: credentials
"""
if not hasattr(DisclosedProof.get_creds, "cb"):
self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback")
DisclosedProof.get_creds.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_disclosed_proof_handle = c_uint32(self.handle)
data = await do_call('vcx_disclosed_proof_retrieve_credentials',
c_disclosed_proof_handle,
DisclosedProof.get_creds.cb)
return json.loads(data.decode()) | python | async def get_creds(self) -> dict:
"""
Gets the credentials from a disclosed proof
Example:
msg_id = '1'
phone_number = '8019119191'
connection = await Connection.create(source_id)
await connection.connect(phone_number)
disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id)
creds = await disclosed_proof.get_creds()
:return: credentials
"""
if not hasattr(DisclosedProof.get_creds, "cb"):
self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback")
DisclosedProof.get_creds.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_disclosed_proof_handle = c_uint32(self.handle)
data = await do_call('vcx_disclosed_proof_retrieve_credentials',
c_disclosed_proof_handle,
DisclosedProof.get_creds.cb)
return json.loads(data.decode()) | ['async', 'def', 'get_creds', '(', 'self', ')', '->', 'dict', ':', 'if', 'not', 'hasattr', '(', 'DisclosedProof', '.', 'get_creds', ',', '"cb"', ')', ':', 'self', '.', 'logger', '.', 'debug', '(', '"vcx_disclosed_proof_retrieve_credentials: Creating callback"', ')', 'DisclosedProof', '.', 'get_creds', '.', 'cb', '=', 'create_cb', '(', 'CFUNCTYPE', '(', 'None', ',', 'c_uint32', ',', 'c_uint32', ',', 'c_char_p', ')', ')', 'c_disclosed_proof_handle', '=', 'c_uint32', '(', 'self', '.', 'handle', ')', 'data', '=', 'await', 'do_call', '(', "'vcx_disclosed_proof_retrieve_credentials'", ',', 'c_disclosed_proof_handle', ',', 'DisclosedProof', '.', 'get_creds', '.', 'cb', ')', 'return', 'json', '.', 'loads', '(', 'data', '.', 'decode', '(', ')', ')'] | Gets the credentials from a disclosed proof
Example:
msg_id = '1'
phone_number = '8019119191'
connection = await Connection.create(source_id)
await connection.connect(phone_number)
disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id)
creds = await disclosed_proof.get_creds()
:return: credentials | ['Gets', 'the', 'credentials', 'from', 'a', 'disclosed', 'proof', 'Example', ':', 'msg_id', '=', '1', 'phone_number', '=', '8019119191', 'connection', '=', 'await', 'Connection', '.', 'create', '(', 'source_id', ')', 'await', 'connection', '.', 'connect', '(', 'phone_number', ')', 'disclosed_proof', '=', 'await', 'DisclosedProof', '.', 'create_with_msgid', '(', 'source_id', 'connection', 'msg_id', ')', 'creds', '=', 'await', 'disclosed_proof', '.', 'get_creds', '()', ':', 'return', ':', 'credentials'] | train | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/disclosed_proof.py#L204-L225 |
4,413 | Sanji-IO/sanji | sanji/model_initiator.py | ModelInitiator.backup_db | def backup_db(self):
"""
" Generate a xxxxx.backup.json.
"""
with self.db_mutex:
if os.path.exists(self.json_db_path):
try:
shutil.copy2(self.json_db_path, self.backup_json_db_path)
except (IOError, OSError):
_logger.debug("*** No file to copy.") | python | def backup_db(self):
"""
" Generate a xxxxx.backup.json.
"""
with self.db_mutex:
if os.path.exists(self.json_db_path):
try:
shutil.copy2(self.json_db_path, self.backup_json_db_path)
except (IOError, OSError):
_logger.debug("*** No file to copy.") | ['def', 'backup_db', '(', 'self', ')', ':', 'with', 'self', '.', 'db_mutex', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'self', '.', 'json_db_path', ')', ':', 'try', ':', 'shutil', '.', 'copy2', '(', 'self', '.', 'json_db_path', ',', 'self', '.', 'backup_json_db_path', ')', 'except', '(', 'IOError', ',', 'OSError', ')', ':', '_logger', '.', 'debug', '(', '"*** No file to copy."', ')'] | " Generate a xxxxx.backup.json. | ['Generate', 'a', 'xxxxx', '.', 'backup', '.', 'json', '.'] | train | https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/model_initiator.py#L112-L121 |
4,414 | aws/aws-encryption-sdk-python | src/aws_encryption_sdk/key_providers/raw.py | RawMasterKey.owns_data_key | def owns_data_key(self, data_key):
"""Determines if data_key object is owned by this RawMasterKey.
:param data_key: Data key to evaluate
:type data_key: :class:`aws_encryption_sdk.structures.DataKey`,
:class:`aws_encryption_sdk.structures.RawDataKey`,
or :class:`aws_encryption_sdk.structures.EncryptedDataKey`
:returns: Boolean statement of ownership
:rtype: bool
"""
expected_key_info_len = -1
if (
self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC
and data_key.key_provider == self.key_provider
):
return True
elif self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.SYMMETRIC:
expected_key_info_len = (
len(self._key_info_prefix) + self.config.wrapping_key.wrapping_algorithm.algorithm.iv_len
)
if (
data_key.key_provider.provider_id == self.provider_id
and len(data_key.key_provider.key_info) == expected_key_info_len
and data_key.key_provider.key_info.startswith(self._key_info_prefix)
):
return True
_LOGGER.debug(
(
"RawMasterKey does not own data_key: %s\n"
"Expected provider_id: %s\n"
"Expected key_info len: %s\n"
"Expected key_info prefix: %s"
),
data_key,
self.provider_id,
expected_key_info_len,
self._key_info_prefix,
)
return False | python | def owns_data_key(self, data_key):
"""Determines if data_key object is owned by this RawMasterKey.
:param data_key: Data key to evaluate
:type data_key: :class:`aws_encryption_sdk.structures.DataKey`,
:class:`aws_encryption_sdk.structures.RawDataKey`,
or :class:`aws_encryption_sdk.structures.EncryptedDataKey`
:returns: Boolean statement of ownership
:rtype: bool
"""
expected_key_info_len = -1
if (
self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC
and data_key.key_provider == self.key_provider
):
return True
elif self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.SYMMETRIC:
expected_key_info_len = (
len(self._key_info_prefix) + self.config.wrapping_key.wrapping_algorithm.algorithm.iv_len
)
if (
data_key.key_provider.provider_id == self.provider_id
and len(data_key.key_provider.key_info) == expected_key_info_len
and data_key.key_provider.key_info.startswith(self._key_info_prefix)
):
return True
_LOGGER.debug(
(
"RawMasterKey does not own data_key: %s\n"
"Expected provider_id: %s\n"
"Expected key_info len: %s\n"
"Expected key_info prefix: %s"
),
data_key,
self.provider_id,
expected_key_info_len,
self._key_info_prefix,
)
return False | ['def', 'owns_data_key', '(', 'self', ',', 'data_key', ')', ':', 'expected_key_info_len', '=', '-', '1', 'if', '(', 'self', '.', 'config', '.', 'wrapping_key', '.', 'wrapping_algorithm', '.', 'encryption_type', 'is', 'EncryptionType', '.', 'ASYMMETRIC', 'and', 'data_key', '.', 'key_provider', '==', 'self', '.', 'key_provider', ')', ':', 'return', 'True', 'elif', 'self', '.', 'config', '.', 'wrapping_key', '.', 'wrapping_algorithm', '.', 'encryption_type', 'is', 'EncryptionType', '.', 'SYMMETRIC', ':', 'expected_key_info_len', '=', '(', 'len', '(', 'self', '.', '_key_info_prefix', ')', '+', 'self', '.', 'config', '.', 'wrapping_key', '.', 'wrapping_algorithm', '.', 'algorithm', '.', 'iv_len', ')', 'if', '(', 'data_key', '.', 'key_provider', '.', 'provider_id', '==', 'self', '.', 'provider_id', 'and', 'len', '(', 'data_key', '.', 'key_provider', '.', 'key_info', ')', '==', 'expected_key_info_len', 'and', 'data_key', '.', 'key_provider', '.', 'key_info', '.', 'startswith', '(', 'self', '.', '_key_info_prefix', ')', ')', ':', 'return', 'True', '_LOGGER', '.', 'debug', '(', '(', '"RawMasterKey does not own data_key: %s\\n"', '"Expected provider_id: %s\\n"', '"Expected key_info len: %s\\n"', '"Expected key_info prefix: %s"', ')', ',', 'data_key', ',', 'self', '.', 'provider_id', ',', 'expected_key_info_len', ',', 'self', '.', '_key_info_prefix', ',', ')', 'return', 'False'] | Determines if data_key object is owned by this RawMasterKey.
:param data_key: Data key to evaluate
:type data_key: :class:`aws_encryption_sdk.structures.DataKey`,
:class:`aws_encryption_sdk.structures.RawDataKey`,
or :class:`aws_encryption_sdk.structures.EncryptedDataKey`
:returns: Boolean statement of ownership
:rtype: bool | ['Determines', 'if', 'data_key', 'object', 'is', 'owned', 'by', 'this', 'RawMasterKey', '.'] | train | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/key_providers/raw.py#L75-L113 |
4,415 | scopus-api/scopus | scopus/author_retrieval.py | AuthorRetrieval.get_documents | def get_documents(self, subtypes=None, refresh=False):
"""Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes.
"""
search = ScopusSearch('au-id({})'.format(self.identifier), refresh)
if subtypes:
return [p for p in search.results if p.subtype in subtypes]
else:
return search.results | python | def get_documents(self, subtypes=None, refresh=False):
"""Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes.
"""
search = ScopusSearch('au-id({})'.format(self.identifier), refresh)
if subtypes:
return [p for p in search.results if p.subtype in subtypes]
else:
return search.results | ['def', 'get_documents', '(', 'self', ',', 'subtypes', '=', 'None', ',', 'refresh', '=', 'False', ')', ':', 'search', '=', 'ScopusSearch', '(', "'au-id({})'", '.', 'format', '(', 'self', '.', 'identifier', ')', ',', 'refresh', ')', 'if', 'subtypes', ':', 'return', '[', 'p', 'for', 'p', 'in', 'search', '.', 'results', 'if', 'p', '.', 'subtype', 'in', 'subtypes', ']', 'else', ':', 'return', 'search', '.', 'results'] | Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes. | ['Return', 'list', 'of', 'author', 's', 'publications', 'using', 'ScopusSearch', 'which', 'fit', 'a', 'specified', 'set', 'of', 'document', 'subtypes', '.'] | train | https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/author_retrieval.py#L280-L288 |
4,416 | i3visio/osrframework | osrframework/searchengines/google.py | search | def search(query, tld='com', lang='en', num=10, start=0, stop=None, pause=2.0,
only_standard=False):
"""
Search the given query string using Google.
@type query: str
@param query: Query string. Must NOT be url-encoded.
@type tld: str
@param tld: Top level domain.
@type lang: str
@param lang: Languaje.
@type num: int
@param num: Number of results per page.
@type start: int
@param start: First result to retrieve.
@type stop: int
@param stop: Last result to retrieve.
Use C{None} to keep searching forever.
@type pause: float
@param pause: Lapse to wait between HTTP requests.
A lapse too long will make the search slow, but a lapse too short may
cause Google to block your IP. Your mileage may vary!
@type only_standard: bool
@param only_standard: If C{True}, only returns the standard results from
each page. If C{False}, it returns every possible link from each page,
except for those that point back to Google itself. Defaults to C{False}
for backwards compatibility with older versions of this module.
@rtype: generator
@return: Generator (iterator) that yields found URLs. If the C{stop}
parameter is C{None} the iterator will loop forever.
"""
# Lazy import of BeautifulSoup.
# Try to use BeautifulSoup 4 if available, fall back to 3 otherwise.
global BeautifulSoup
if BeautifulSoup is None:
try:
from bs4 import BeautifulSoup
except ImportError:
from BeautifulSoup import BeautifulSoup
# Set of hashes for the results found.
# This is used to avoid repeated results.
hashes = set()
# Prepare the search string.
query = quote_plus(query)
# Grab the cookie from the home page.
get_page(url_home % vars())
# Prepare the URL of the first request.
if start:
if num == 10:
url = url_next_page % vars()
else:
url = url_next_page_num % vars()
else:
if num == 10:
url = url_search % vars()
else:
url = url_search_num % vars()
# Loop until we reach the maximum result, if any (otherwise, loop forever).
while not stop or start < stop:
# Sleep between requests.
time.sleep(pause)
# Request the Google Search results page.
html = get_page(url)
# Parse the response and process every anchored URL.
soup = BeautifulSoup(html)
anchors = soup.find(id='search').findAll('a')
for a in anchors:
# Leave only the "standard" results if requested.
# Otherwise grab all possible links.
if only_standard and (
not a.parent or a.parent.name.lower() != "h3"):
continue
# Get the URL from the anchor tag.
try:
link = a['href']
except KeyError:
continue
# Filter invalid links and links pointing to Google itself.
link = filter_result(link)
if not link:
continue
# Discard repeated results.
h = hash(link)
if h in hashes:
continue
hashes.add(h)
# Yield the result.
yield link
# End if there are no more results.
if not soup.find(id='nav'):
break
# Prepare the URL for the next request.
start += num
if num == 10:
url = url_next_page % vars()
else:
url = url_next_page_num % vars() | python | def search(query, tld='com', lang='en', num=10, start=0, stop=None, pause=2.0,
only_standard=False):
"""
Search the given query string using Google.
@type query: str
@param query: Query string. Must NOT be url-encoded.
@type tld: str
@param tld: Top level domain.
@type lang: str
@param lang: Languaje.
@type num: int
@param num: Number of results per page.
@type start: int
@param start: First result to retrieve.
@type stop: int
@param stop: Last result to retrieve.
Use C{None} to keep searching forever.
@type pause: float
@param pause: Lapse to wait between HTTP requests.
A lapse too long will make the search slow, but a lapse too short may
cause Google to block your IP. Your mileage may vary!
@type only_standard: bool
@param only_standard: If C{True}, only returns the standard results from
each page. If C{False}, it returns every possible link from each page,
except for those that point back to Google itself. Defaults to C{False}
for backwards compatibility with older versions of this module.
@rtype: generator
@return: Generator (iterator) that yields found URLs. If the C{stop}
parameter is C{None} the iterator will loop forever.
"""
# Lazy import of BeautifulSoup.
# Try to use BeautifulSoup 4 if available, fall back to 3 otherwise.
global BeautifulSoup
if BeautifulSoup is None:
try:
from bs4 import BeautifulSoup
except ImportError:
from BeautifulSoup import BeautifulSoup
# Set of hashes for the results found.
# This is used to avoid repeated results.
hashes = set()
# Prepare the search string.
query = quote_plus(query)
# Grab the cookie from the home page.
get_page(url_home % vars())
# Prepare the URL of the first request.
if start:
if num == 10:
url = url_next_page % vars()
else:
url = url_next_page_num % vars()
else:
if num == 10:
url = url_search % vars()
else:
url = url_search_num % vars()
# Loop until we reach the maximum result, if any (otherwise, loop forever).
while not stop or start < stop:
# Sleep between requests.
time.sleep(pause)
# Request the Google Search results page.
html = get_page(url)
# Parse the response and process every anchored URL.
soup = BeautifulSoup(html)
anchors = soup.find(id='search').findAll('a')
for a in anchors:
# Leave only the "standard" results if requested.
# Otherwise grab all possible links.
if only_standard and (
not a.parent or a.parent.name.lower() != "h3"):
continue
# Get the URL from the anchor tag.
try:
link = a['href']
except KeyError:
continue
# Filter invalid links and links pointing to Google itself.
link = filter_result(link)
if not link:
continue
# Discard repeated results.
h = hash(link)
if h in hashes:
continue
hashes.add(h)
# Yield the result.
yield link
# End if there are no more results.
if not soup.find(id='nav'):
break
# Prepare the URL for the next request.
start += num
if num == 10:
url = url_next_page % vars()
else:
url = url_next_page_num % vars() | ['def', 'search', '(', 'query', ',', 'tld', '=', "'com'", ',', 'lang', '=', "'en'", ',', 'num', '=', '10', ',', 'start', '=', '0', ',', 'stop', '=', 'None', ',', 'pause', '=', '2.0', ',', 'only_standard', '=', 'False', ')', ':', '# Lazy import of BeautifulSoup.\r', '# Try to use BeautifulSoup 4 if available, fall back to 3 otherwise.\r', 'global', 'BeautifulSoup', 'if', 'BeautifulSoup', 'is', 'None', ':', 'try', ':', 'from', 'bs4', 'import', 'BeautifulSoup', 'except', 'ImportError', ':', 'from', 'BeautifulSoup', 'import', 'BeautifulSoup', '# Set of hashes for the results found.\r', '# This is used to avoid repeated results.\r', 'hashes', '=', 'set', '(', ')', '# Prepare the search string.\r', 'query', '=', 'quote_plus', '(', 'query', ')', '# Grab the cookie from the home page.\r', 'get_page', '(', 'url_home', '%', 'vars', '(', ')', ')', '# Prepare the URL of the first request.\r', 'if', 'start', ':', 'if', 'num', '==', '10', ':', 'url', '=', 'url_next_page', '%', 'vars', '(', ')', 'else', ':', 'url', '=', 'url_next_page_num', '%', 'vars', '(', ')', 'else', ':', 'if', 'num', '==', '10', ':', 'url', '=', 'url_search', '%', 'vars', '(', ')', 'else', ':', 'url', '=', 'url_search_num', '%', 'vars', '(', ')', '# Loop until we reach the maximum result, if any (otherwise, loop forever).\r', 'while', 'not', 'stop', 'or', 'start', '<', 'stop', ':', '# Sleep between requests.\r', 'time', '.', 'sleep', '(', 'pause', ')', '# Request the Google Search results page.\r', 'html', '=', 'get_page', '(', 'url', ')', '# Parse the response and process every anchored URL.\r', 'soup', '=', 'BeautifulSoup', '(', 'html', ')', 'anchors', '=', 'soup', '.', 'find', '(', 'id', '=', "'search'", ')', '.', 'findAll', '(', "'a'", ')', 'for', 'a', 'in', 'anchors', ':', '# Leave only the "standard" results if requested.\r', '# Otherwise grab all possible links.\r', 'if', 'only_standard', 'and', '(', 'not', 'a', '.', 'parent', 'or', 'a', '.', 'parent', '.', 'name', '.', 'lower', '(', ')', '!=', '"h3"', ')', ':', 'continue', '# Get the URL from the anchor tag.\r', 'try', ':', 'link', '=', 'a', '[', "'href'", ']', 'except', 'KeyError', ':', 'continue', '# Filter invalid links and links pointing to Google itself.\r', 'link', '=', 'filter_result', '(', 'link', ')', 'if', 'not', 'link', ':', 'continue', '# Discard repeated results.\r', 'h', '=', 'hash', '(', 'link', ')', 'if', 'h', 'in', 'hashes', ':', 'continue', 'hashes', '.', 'add', '(', 'h', ')', '# Yield the result.\r', 'yield', 'link', '# End if there are no more results.\r', 'if', 'not', 'soup', '.', 'find', '(', 'id', '=', "'nav'", ')', ':', 'break', '# Prepare the URL for the next request.\r', 'start', '+=', 'num', 'if', 'num', '==', '10', ':', 'url', '=', 'url_next_page', '%', 'vars', '(', ')', 'else', ':', 'url', '=', 'url_next_page_num', '%', 'vars', '(', ')'] | Search the given query string using Google.
@type query: str
@param query: Query string. Must NOT be url-encoded.
@type tld: str
@param tld: Top level domain.
@type lang: str
@param lang: Languaje.
@type num: int
@param num: Number of results per page.
@type start: int
@param start: First result to retrieve.
@type stop: int
@param stop: Last result to retrieve.
Use C{None} to keep searching forever.
@type pause: float
@param pause: Lapse to wait between HTTP requests.
A lapse too long will make the search slow, but a lapse too short may
cause Google to block your IP. Your mileage may vary!
@type only_standard: bool
@param only_standard: If C{True}, only returns the standard results from
each page. If C{False}, it returns every possible link from each page,
except for those that point back to Google itself. Defaults to C{False}
for backwards compatibility with older versions of this module.
@rtype: generator
@return: Generator (iterator) that yields found URLs. If the C{stop}
parameter is C{None} the iterator will loop forever. | ['Search', 'the', 'given', 'query', 'string', 'using', 'Google', '.'] | train | https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/searchengines/google.py#L114-L234 |
4,417 | estnltk/estnltk | estnltk/vabamorf/morf.py | get_group_tokens | def get_group_tokens(root):
"""Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens.
"""
global all_markers
if root in all_markers or root in ['-', '_']: # special case
return [[root]]
groups = []
for group in root.split('-'):
toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')]
groups.append(toks)
return groups | python | def get_group_tokens(root):
"""Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens.
"""
global all_markers
if root in all_markers or root in ['-', '_']: # special case
return [[root]]
groups = []
for group in root.split('-'):
toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')]
groups.append(toks)
return groups | ['def', 'get_group_tokens', '(', 'root', ')', ':', 'global', 'all_markers', 'if', 'root', 'in', 'all_markers', 'or', 'root', 'in', '[', "'-'", ',', "'_'", ']', ':', '# special case', 'return', '[', '[', 'root', ']', ']', 'groups', '=', '[', ']', 'for', 'group', 'in', 'root', '.', 'split', '(', "'-'", ')', ':', 'toks', '=', '[', 'trim_phonetics', '(', 'trim_compounds', '(', 'tok', ')', ')', 'for', 'tok', 'in', 'group', '.', 'split', '(', "'_'", ')', ']', 'groups', '.', 'append', '(', 'toks', ')', 'return', 'groups'] | Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens. | ['Function', 'to', 'extract', 'tokens', 'in', 'hyphenated', 'groups', '(', 'saunameheks', '-', 'tallimeheks', ')', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L390-L410 |
4,418 | PMEAL/OpenPNM | openpnm/io/Statoil.py | Statoil.load | def load(cls, path, prefix, network=None):
r"""
Load data from the \'dat\' files located in specified folder.
Parameters
----------
path : string
The full path to the folder containing the set of \'dat\' files.
prefix : string
The file name prefix on each file. The data files are stored
as \<prefix\>_node1.dat.
network : OpenPNM Network Object
If given then the data will be loaded on it and returned. If not
given, a Network will be created and returned.
Returns
-------
An OpenPNM Project containing a GenericNetwork holding all the data
"""
net = {}
# ---------------------------------------------------------------------
# Parse the link1 file
path = Path(path)
filename = Path(path.resolve(), prefix+'_link1.dat')
with open(filename, mode='r') as f:
link1 = pd.read_table(filepath_or_buffer=f,
header=None,
skiprows=1,
sep=' ',
skipinitialspace=True,
index_col=0)
link1.columns = ['throat.pore1', 'throat.pore2', 'throat.radius',
'throat.shape_factor', 'throat.total_length']
# Add link1 props to net
net['throat.conns'] = sp.vstack((link1['throat.pore1']-1,
link1['throat.pore2']-1)).T
net['throat.conns'] = sp.sort(net['throat.conns'], axis=1)
net['throat.radius'] = sp.array(link1['throat.radius'])
net['throat.shape_factor'] = sp.array(link1['throat.shape_factor'])
net['throat.total_length'] = sp.array(link1['throat.total_length'])
# ---------------------------------------------------------------------
filename = Path(path.resolve(), prefix+'_link2.dat')
with open(filename, mode='r') as f:
link2 = pd.read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
link2.columns = ['throat.pore1', 'throat.pore2',
'throat.pore1_length', 'throat.pore2_length',
'throat.length', 'throat.volume',
'throat.clay_volume']
# Add link2 props to net
net['throat.length'] = sp.array(link2['throat.length'])
net['throat.volume'] = sp.array(link2['throat.volume'])
net['throat.clay_volume'] = sp.array(link2['throat.clay_volume'])
# ---------------------------------------------------------------------
# Parse the node1 file
filename = Path(path.resolve(), prefix+'_node1.dat')
with open(filename, mode='r') as f:
row_0 = f.readline().split()
num_lines = int(row_0[0])
array = sp.ndarray([num_lines, 6])
for i in range(num_lines):
row = f.readline()\
.replace('\t', ' ').replace('\n', ' ').split()
array[i, :] = row[0:6]
node1 = pd.DataFrame(array[:, [1, 2, 3, 4]])
node1.columns = ['pore.x_coord', 'pore.y_coord', 'pore.z_coord',
'pore.coordination_number']
# Add node1 props to net
net['pore.coords'] = sp.vstack((node1['pore.x_coord'],
node1['pore.y_coord'],
node1['pore.z_coord'])).T
# ---------------------------------------------------------------------
# Parse the node1 file
filename = Path(path.resolve(), prefix+'_node2.dat')
with open(filename, mode='r') as f:
node2 = pd.read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
node2.columns = ['pore.volume', 'pore.radius', 'pore.shape_factor',
'pore.clay_volume']
# Add node2 props to net
net['pore.volume'] = sp.array(node2['pore.volume'])
net['pore.radius'] = sp.array(node2['pore.radius'])
net['pore.shape_factor'] = sp.array(node2['pore.shape_factor'])
net['pore.clay_volume'] = sp.array(node2['pore.clay_volume'])
if network is None:
network = GenericNetwork()
network = cls._update_network(network=network, net=net)
# Use OpenPNM Tools to clean up network
# Trim throats connected to 'inlet' or 'outlet' reservoirs
trim1 = sp.where(sp.any(net['throat.conns'] == -1, axis=1))[0]
# Apply 'outlet' label to these pores
outlets = network['throat.conns'][trim1, 1]
network['pore.outlets'] = False
network['pore.outlets'][outlets] = True
trim2 = sp.where(sp.any(net['throat.conns'] == -2, axis=1))[0]
# Apply 'inlet' label to these pores
inlets = network['throat.conns'][trim2, 1]
network['pore.inlets'] = False
network['pore.inlets'][inlets] = True
# Now trim the throats
to_trim = sp.hstack([trim1, trim2])
trim(network=network, throats=to_trim)
return network.project | python | def load(cls, path, prefix, network=None):
r"""
Load data from the \'dat\' files located in specified folder.
Parameters
----------
path : string
The full path to the folder containing the set of \'dat\' files.
prefix : string
The file name prefix on each file. The data files are stored
as \<prefix\>_node1.dat.
network : OpenPNM Network Object
If given then the data will be loaded on it and returned. If not
given, a Network will be created and returned.
Returns
-------
An OpenPNM Project containing a GenericNetwork holding all the data
"""
net = {}
# ---------------------------------------------------------------------
# Parse the link1 file
path = Path(path)
filename = Path(path.resolve(), prefix+'_link1.dat')
with open(filename, mode='r') as f:
link1 = pd.read_table(filepath_or_buffer=f,
header=None,
skiprows=1,
sep=' ',
skipinitialspace=True,
index_col=0)
link1.columns = ['throat.pore1', 'throat.pore2', 'throat.radius',
'throat.shape_factor', 'throat.total_length']
# Add link1 props to net
net['throat.conns'] = sp.vstack((link1['throat.pore1']-1,
link1['throat.pore2']-1)).T
net['throat.conns'] = sp.sort(net['throat.conns'], axis=1)
net['throat.radius'] = sp.array(link1['throat.radius'])
net['throat.shape_factor'] = sp.array(link1['throat.shape_factor'])
net['throat.total_length'] = sp.array(link1['throat.total_length'])
# ---------------------------------------------------------------------
filename = Path(path.resolve(), prefix+'_link2.dat')
with open(filename, mode='r') as f:
link2 = pd.read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
link2.columns = ['throat.pore1', 'throat.pore2',
'throat.pore1_length', 'throat.pore2_length',
'throat.length', 'throat.volume',
'throat.clay_volume']
# Add link2 props to net
net['throat.length'] = sp.array(link2['throat.length'])
net['throat.volume'] = sp.array(link2['throat.volume'])
net['throat.clay_volume'] = sp.array(link2['throat.clay_volume'])
# ---------------------------------------------------------------------
# Parse the node1 file
filename = Path(path.resolve(), prefix+'_node1.dat')
with open(filename, mode='r') as f:
row_0 = f.readline().split()
num_lines = int(row_0[0])
array = sp.ndarray([num_lines, 6])
for i in range(num_lines):
row = f.readline()\
.replace('\t', ' ').replace('\n', ' ').split()
array[i, :] = row[0:6]
node1 = pd.DataFrame(array[:, [1, 2, 3, 4]])
node1.columns = ['pore.x_coord', 'pore.y_coord', 'pore.z_coord',
'pore.coordination_number']
# Add node1 props to net
net['pore.coords'] = sp.vstack((node1['pore.x_coord'],
node1['pore.y_coord'],
node1['pore.z_coord'])).T
# ---------------------------------------------------------------------
# Parse the node1 file
filename = Path(path.resolve(), prefix+'_node2.dat')
with open(filename, mode='r') as f:
node2 = pd.read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
node2.columns = ['pore.volume', 'pore.radius', 'pore.shape_factor',
'pore.clay_volume']
# Add node2 props to net
net['pore.volume'] = sp.array(node2['pore.volume'])
net['pore.radius'] = sp.array(node2['pore.radius'])
net['pore.shape_factor'] = sp.array(node2['pore.shape_factor'])
net['pore.clay_volume'] = sp.array(node2['pore.clay_volume'])
if network is None:
network = GenericNetwork()
network = cls._update_network(network=network, net=net)
# Use OpenPNM Tools to clean up network
# Trim throats connected to 'inlet' or 'outlet' reservoirs
trim1 = sp.where(sp.any(net['throat.conns'] == -1, axis=1))[0]
# Apply 'outlet' label to these pores
outlets = network['throat.conns'][trim1, 1]
network['pore.outlets'] = False
network['pore.outlets'][outlets] = True
trim2 = sp.where(sp.any(net['throat.conns'] == -2, axis=1))[0]
# Apply 'inlet' label to these pores
inlets = network['throat.conns'][trim2, 1]
network['pore.inlets'] = False
network['pore.inlets'][inlets] = True
# Now trim the throats
to_trim = sp.hstack([trim1, trim2])
trim(network=network, throats=to_trim)
return network.project | ['def', 'load', '(', 'cls', ',', 'path', ',', 'prefix', ',', 'network', '=', 'None', ')', ':', 'net', '=', '{', '}', '# ---------------------------------------------------------------------', '# Parse the link1 file', 'path', '=', 'Path', '(', 'path', ')', 'filename', '=', 'Path', '(', 'path', '.', 'resolve', '(', ')', ',', 'prefix', '+', "'_link1.dat'", ')', 'with', 'open', '(', 'filename', ',', 'mode', '=', "'r'", ')', 'as', 'f', ':', 'link1', '=', 'pd', '.', 'read_table', '(', 'filepath_or_buffer', '=', 'f', ',', 'header', '=', 'None', ',', 'skiprows', '=', '1', ',', 'sep', '=', "' '", ',', 'skipinitialspace', '=', 'True', ',', 'index_col', '=', '0', ')', 'link1', '.', 'columns', '=', '[', "'throat.pore1'", ',', "'throat.pore2'", ',', "'throat.radius'", ',', "'throat.shape_factor'", ',', "'throat.total_length'", ']', '# Add link1 props to net', 'net', '[', "'throat.conns'", ']', '=', 'sp', '.', 'vstack', '(', '(', 'link1', '[', "'throat.pore1'", ']', '-', '1', ',', 'link1', '[', "'throat.pore2'", ']', '-', '1', ')', ')', '.', 'T', 'net', '[', "'throat.conns'", ']', '=', 'sp', '.', 'sort', '(', 'net', '[', "'throat.conns'", ']', ',', 'axis', '=', '1', ')', 'net', '[', "'throat.radius'", ']', '=', 'sp', '.', 'array', '(', 'link1', '[', "'throat.radius'", ']', ')', 'net', '[', "'throat.shape_factor'", ']', '=', 'sp', '.', 'array', '(', 'link1', '[', "'throat.shape_factor'", ']', ')', 'net', '[', "'throat.total_length'", ']', '=', 'sp', '.', 'array', '(', 'link1', '[', "'throat.total_length'", ']', ')', '# ---------------------------------------------------------------------', 'filename', '=', 'Path', '(', 'path', '.', 'resolve', '(', ')', ',', 'prefix', '+', "'_link2.dat'", ')', 'with', 'open', '(', 'filename', ',', 'mode', '=', "'r'", ')', 'as', 'f', ':', 'link2', '=', 'pd', '.', 'read_table', '(', 'filepath_or_buffer', '=', 'f', ',', 'header', '=', 'None', ',', 'sep', '=', "' '", ',', 'skipinitialspace', '=', 'True', ',', 'index_col', '=', '0', ')', 'link2', '.', 'columns', '=', '[', "'throat.pore1'", ',', "'throat.pore2'", ',', "'throat.pore1_length'", ',', "'throat.pore2_length'", ',', "'throat.length'", ',', "'throat.volume'", ',', "'throat.clay_volume'", ']', '# Add link2 props to net', 'net', '[', "'throat.length'", ']', '=', 'sp', '.', 'array', '(', 'link2', '[', "'throat.length'", ']', ')', 'net', '[', "'throat.volume'", ']', '=', 'sp', '.', 'array', '(', 'link2', '[', "'throat.volume'", ']', ')', 'net', '[', "'throat.clay_volume'", ']', '=', 'sp', '.', 'array', '(', 'link2', '[', "'throat.clay_volume'", ']', ')', '# ---------------------------------------------------------------------', '# Parse the node1 file', 'filename', '=', 'Path', '(', 'path', '.', 'resolve', '(', ')', ',', 'prefix', '+', "'_node1.dat'", ')', 'with', 'open', '(', 'filename', ',', 'mode', '=', "'r'", ')', 'as', 'f', ':', 'row_0', '=', 'f', '.', 'readline', '(', ')', '.', 'split', '(', ')', 'num_lines', '=', 'int', '(', 'row_0', '[', '0', ']', ')', 'array', '=', 'sp', '.', 'ndarray', '(', '[', 'num_lines', ',', '6', ']', ')', 'for', 'i', 'in', 'range', '(', 'num_lines', ')', ':', 'row', '=', 'f', '.', 'readline', '(', ')', '.', 'replace', '(', "'\\t'", ',', "' '", ')', '.', 'replace', '(', "'\\n'", ',', "' '", ')', '.', 'split', '(', ')', 'array', '[', 'i', ',', ':', ']', '=', 'row', '[', '0', ':', '6', ']', 'node1', '=', 'pd', '.', 'DataFrame', '(', 'array', '[', ':', ',', '[', '1', ',', '2', ',', '3', ',', '4', ']', ']', ')', 'node1', '.', 'columns', '=', '[', "'pore.x_coord'", ',', "'pore.y_coord'", ',', "'pore.z_coord'", ',', "'pore.coordination_number'", ']', '# Add node1 props to net', 'net', '[', "'pore.coords'", ']', '=', 'sp', '.', 'vstack', '(', '(', 'node1', '[', "'pore.x_coord'", ']', ',', 'node1', '[', "'pore.y_coord'", ']', ',', 'node1', '[', "'pore.z_coord'", ']', ')', ')', '.', 'T', '# ---------------------------------------------------------------------', '# Parse the node1 file', 'filename', '=', 'Path', '(', 'path', '.', 'resolve', '(', ')', ',', 'prefix', '+', "'_node2.dat'", ')', 'with', 'open', '(', 'filename', ',', 'mode', '=', "'r'", ')', 'as', 'f', ':', 'node2', '=', 'pd', '.', 'read_table', '(', 'filepath_or_buffer', '=', 'f', ',', 'header', '=', 'None', ',', 'sep', '=', "' '", ',', 'skipinitialspace', '=', 'True', ',', 'index_col', '=', '0', ')', 'node2', '.', 'columns', '=', '[', "'pore.volume'", ',', "'pore.radius'", ',', "'pore.shape_factor'", ',', "'pore.clay_volume'", ']', '# Add node2 props to net', 'net', '[', "'pore.volume'", ']', '=', 'sp', '.', 'array', '(', 'node2', '[', "'pore.volume'", ']', ')', 'net', '[', "'pore.radius'", ']', '=', 'sp', '.', 'array', '(', 'node2', '[', "'pore.radius'", ']', ')', 'net', '[', "'pore.shape_factor'", ']', '=', 'sp', '.', 'array', '(', 'node2', '[', "'pore.shape_factor'", ']', ')', 'net', '[', "'pore.clay_volume'", ']', '=', 'sp', '.', 'array', '(', 'node2', '[', "'pore.clay_volume'", ']', ')', 'if', 'network', 'is', 'None', ':', 'network', '=', 'GenericNetwork', '(', ')', 'network', '=', 'cls', '.', '_update_network', '(', 'network', '=', 'network', ',', 'net', '=', 'net', ')', '# Use OpenPNM Tools to clean up network', "# Trim throats connected to 'inlet' or 'outlet' reservoirs", 'trim1', '=', 'sp', '.', 'where', '(', 'sp', '.', 'any', '(', 'net', '[', "'throat.conns'", ']', '==', '-', '1', ',', 'axis', '=', '1', ')', ')', '[', '0', ']', "# Apply 'outlet' label to these pores", 'outlets', '=', 'network', '[', "'throat.conns'", ']', '[', 'trim1', ',', '1', ']', 'network', '[', "'pore.outlets'", ']', '=', 'False', 'network', '[', "'pore.outlets'", ']', '[', 'outlets', ']', '=', 'True', 'trim2', '=', 'sp', '.', 'where', '(', 'sp', '.', 'any', '(', 'net', '[', "'throat.conns'", ']', '==', '-', '2', ',', 'axis', '=', '1', ')', ')', '[', '0', ']', "# Apply 'inlet' label to these pores", 'inlets', '=', 'network', '[', "'throat.conns'", ']', '[', 'trim2', ',', '1', ']', 'network', '[', "'pore.inlets'", ']', '=', 'False', 'network', '[', "'pore.inlets'", ']', '[', 'inlets', ']', '=', 'True', '# Now trim the throats', 'to_trim', '=', 'sp', '.', 'hstack', '(', '[', 'trim1', ',', 'trim2', ']', ')', 'trim', '(', 'network', '=', 'network', ',', 'throats', '=', 'to_trim', ')', 'return', 'network', '.', 'project'] | r"""
Load data from the \'dat\' files located in specified folder.
Parameters
----------
path : string
The full path to the folder containing the set of \'dat\' files.
prefix : string
The file name prefix on each file. The data files are stored
as \<prefix\>_node1.dat.
network : OpenPNM Network Object
If given then the data will be loaded on it and returned. If not
given, a Network will be created and returned.
Returns
-------
An OpenPNM Project containing a GenericNetwork holding all the data | ['r', 'Load', 'data', 'from', 'the', '\\', 'dat', '\\', 'files', 'located', 'in', 'specified', 'folder', '.'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/io/Statoil.py#L28-L143 |
4,419 | openatx/facebook-wda | wda/__init__.py | Session.screenshot | def screenshot(self):
"""
Take screenshot with session check
Returns:
PIL.Image
"""
b64data = self.http.get('/screenshot').value
raw_data = base64.b64decode(b64data)
from PIL import Image
buff = io.BytesIO(raw_data)
return Image.open(buff) | python | def screenshot(self):
"""
Take screenshot with session check
Returns:
PIL.Image
"""
b64data = self.http.get('/screenshot').value
raw_data = base64.b64decode(b64data)
from PIL import Image
buff = io.BytesIO(raw_data)
return Image.open(buff) | ['def', 'screenshot', '(', 'self', ')', ':', 'b64data', '=', 'self', '.', 'http', '.', 'get', '(', "'/screenshot'", ')', '.', 'value', 'raw_data', '=', 'base64', '.', 'b64decode', '(', 'b64data', ')', 'from', 'PIL', 'import', 'Image', 'buff', '=', 'io', '.', 'BytesIO', '(', 'raw_data', ')', 'return', 'Image', '.', 'open', '(', 'buff', ')'] | Take screenshot with session check
Returns:
PIL.Image | ['Take', 'screenshot', 'with', 'session', 'check'] | train | https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L463-L474 |
4,420 | dingusdk/PythonIhcSdk | ihcsdk/ihccontroller.py | IHCController.set_runtime_value_bool | def set_runtime_value_bool(self, ihcid: int, value: bool) -> bool:
""" Set bool runtime value with re-authenticate if needed"""
if self.client.set_runtime_value_bool(ihcid, value):
return True
self.re_authenticate()
return self.client.set_runtime_value_bool(ihcid, value) | python | def set_runtime_value_bool(self, ihcid: int, value: bool) -> bool:
""" Set bool runtime value with re-authenticate if needed"""
if self.client.set_runtime_value_bool(ihcid, value):
return True
self.re_authenticate()
return self.client.set_runtime_value_bool(ihcid, value) | ['def', 'set_runtime_value_bool', '(', 'self', ',', 'ihcid', ':', 'int', ',', 'value', ':', 'bool', ')', '->', 'bool', ':', 'if', 'self', '.', 'client', '.', 'set_runtime_value_bool', '(', 'ihcid', ',', 'value', ')', ':', 'return', 'True', 'self', '.', 're_authenticate', '(', ')', 'return', 'self', '.', 'client', '.', 'set_runtime_value_bool', '(', 'ihcid', ',', 'value', ')'] | Set bool runtime value with re-authenticate if needed | ['Set', 'bool', 'runtime', 'value', 'with', 're', '-', 'authenticate', 'if', 'needed'] | train | https://github.com/dingusdk/PythonIhcSdk/blob/7e2067e009fe7600b49f30bff1cf91dc72fc891e/ihcsdk/ihccontroller.py#L54-L59 |
4,421 | mlperf/training | translation/tensorflow/transformer/utils/tokenizer.py | _unicode_to_native | def _unicode_to_native(s):
"""Convert string from unicode to native format (required in Python 2)."""
if six.PY2:
return s.encode("utf-8") if isinstance(s, unicode) else s
else:
return s | python | def _unicode_to_native(s):
"""Convert string from unicode to native format (required in Python 2)."""
if six.PY2:
return s.encode("utf-8") if isinstance(s, unicode) else s
else:
return s | ['def', '_unicode_to_native', '(', 's', ')', ':', 'if', 'six', '.', 'PY2', ':', 'return', 's', '.', 'encode', '(', '"utf-8"', ')', 'if', 'isinstance', '(', 's', ',', 'unicode', ')', 'else', 's', 'else', ':', 'return', 's'] | Convert string from unicode to native format (required in Python 2). | ['Convert', 'string', 'from', 'unicode', 'to', 'native', 'format', '(', 'required', 'in', 'Python', '2', ')', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L216-L221 |
4,422 | MLAB-project/pymlab | src/pymlab/sensors/clkgen.py | CLKGEN01.set_freq | def set_freq(self, fout, freq):
"""
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
"""
hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers
n1div_tuple = (1,) + tuple(range(2,129,2)) #
fdco_min = 5670.0 # set maximum as minimum
hsdiv = self.get_hs_div() # read curent dividers
n1div = self.get_n1_div() #
if abs((freq-fout)*1e6/fout) > 3500:
# Large change of frequency
fdco = fout * hsdiv * n1div # calculate high frequency oscillator
fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285
for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption
for n1div_iter in n1div_tuple:
fdco_new = freq * hsdiv_iter * n1div_iter
if (fdco_new >= 4850) and (fdco_new <= 5670):
if (fdco_new <= fdco_min):
fdco_min = fdco_new
hsdiv = hsdiv_iter
n1div = n1div_iter
rfreq = fdco_min / fxtal
self.freeze_dco() # write registers
self.set_hs_div(hsdiv)
self.set_n1_div(n1div)
self.set_rfreq(rfreq)
self.unfreeze_dco()
self.new_freq()
else:
# Small change of frequency
rfreq = self.get_rfreq() * (freq/fout)
self.freeze_m() # write registers
self.set_rfreq(rfreq)
self.unfreeze_m() | python | def set_freq(self, fout, freq):
"""
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
"""
hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers
n1div_tuple = (1,) + tuple(range(2,129,2)) #
fdco_min = 5670.0 # set maximum as minimum
hsdiv = self.get_hs_div() # read curent dividers
n1div = self.get_n1_div() #
if abs((freq-fout)*1e6/fout) > 3500:
# Large change of frequency
fdco = fout * hsdiv * n1div # calculate high frequency oscillator
fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285
for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption
for n1div_iter in n1div_tuple:
fdco_new = freq * hsdiv_iter * n1div_iter
if (fdco_new >= 4850) and (fdco_new <= 5670):
if (fdco_new <= fdco_min):
fdco_min = fdco_new
hsdiv = hsdiv_iter
n1div = n1div_iter
rfreq = fdco_min / fxtal
self.freeze_dco() # write registers
self.set_hs_div(hsdiv)
self.set_n1_div(n1div)
self.set_rfreq(rfreq)
self.unfreeze_dco()
self.new_freq()
else:
# Small change of frequency
rfreq = self.get_rfreq() * (freq/fout)
self.freeze_m() # write registers
self.set_rfreq(rfreq)
self.unfreeze_m() | ['def', 'set_freq', '(', 'self', ',', 'fout', ',', 'freq', ')', ':', 'hsdiv_tuple', '=', '(', '4', ',', '5', ',', '6', ',', '7', ',', '9', ',', '11', ')', '# possible dividers', 'n1div_tuple', '=', '(', '1', ',', ')', '+', 'tuple', '(', 'range', '(', '2', ',', '129', ',', '2', ')', ')', '#', 'fdco_min', '=', '5670.0', '# set maximum as minimum', 'hsdiv', '=', 'self', '.', 'get_hs_div', '(', ')', '# read curent dividers', 'n1div', '=', 'self', '.', 'get_n1_div', '(', ')', '#', 'if', 'abs', '(', '(', 'freq', '-', 'fout', ')', '*', '1e6', '/', 'fout', ')', '>', '3500', ':', '# Large change of frequency ', 'fdco', '=', 'fout', '*', 'hsdiv', '*', 'n1div', '# calculate high frequency oscillator', 'fxtal', '=', 'fdco', '/', 'self', '.', 'get_rfreq', '(', ')', '# should be fxtal = 114.285 ', 'for', 'hsdiv_iter', 'in', 'hsdiv_tuple', ':', '# find dividers with minimal power consumption', 'for', 'n1div_iter', 'in', 'n1div_tuple', ':', 'fdco_new', '=', 'freq', '*', 'hsdiv_iter', '*', 'n1div_iter', 'if', '(', 'fdco_new', '>=', '4850', ')', 'and', '(', 'fdco_new', '<=', '5670', ')', ':', 'if', '(', 'fdco_new', '<=', 'fdco_min', ')', ':', 'fdco_min', '=', 'fdco_new', 'hsdiv', '=', 'hsdiv_iter', 'n1div', '=', 'n1div_iter', 'rfreq', '=', 'fdco_min', '/', 'fxtal', 'self', '.', 'freeze_dco', '(', ')', '# write registers', 'self', '.', 'set_hs_div', '(', 'hsdiv', ')', 'self', '.', 'set_n1_div', '(', 'n1div', ')', 'self', '.', 'set_rfreq', '(', 'rfreq', ')', 'self', '.', 'unfreeze_dco', '(', ')', 'self', '.', 'new_freq', '(', ')', 'else', ':', '# Small change of frequency', 'rfreq', '=', 'self', '.', 'get_rfreq', '(', ')', '*', '(', 'freq', '/', 'fout', ')', 'self', '.', 'freeze_m', '(', ')', '# write registers ', 'self', '.', 'set_rfreq', '(', 'rfreq', ')', 'self', '.', 'unfreeze_m', '(', ')'] | Sets new output frequency, required parameters are real current frequency at output and new required frequency. | ['Sets', 'new', 'output', 'frequency', 'required', 'parameters', 'are', 'real', 'current', 'frequency', 'at', 'output', 'and', 'new', 'required', 'frequency', '.'] | train | https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/src/pymlab/sensors/clkgen.py#L102-L139 |
4,423 | allenai/allennlp | allennlp/state_machines/trainers/decoder_trainer.py | DecoderTrainer.decode | def decode(self,
initial_state: State,
transition_function: TransitionFunction,
supervision: SupervisionType) -> Dict[str, torch.Tensor]:
"""
Takes an initial state object, a means of transitioning from state to state, and a
supervision signal, and uses the supervision to train the transition function to pick
"good" states.
This function should typically return a ``loss`` key during training, which the ``Model``
will use as its loss.
Parameters
----------
initial_state : ``State``
This is the initial state for decoding, typically initialized after running some kind
of encoder on some inputs.
transition_function : ``TransitionFunction``
This is the transition function that scores all possible actions that can be taken in a
given state, and returns a ranked list of next states at each step of decoding.
supervision : ``SupervisionType``
This is the supervision that is used to train the ``transition_function`` function to
pick "good" states. You can use whatever kind of supervision you want (e.g., a single
"gold" action sequence, a set of possible "gold" action sequences, a reward function,
etc.). We use ``typing.Generics`` to make sure that our static type checker is happy
with how you've matched the supervision that you provide in the model to the
``DecoderTrainer`` that you want to use.
"""
raise NotImplementedError | python | def decode(self,
initial_state: State,
transition_function: TransitionFunction,
supervision: SupervisionType) -> Dict[str, torch.Tensor]:
"""
Takes an initial state object, a means of transitioning from state to state, and a
supervision signal, and uses the supervision to train the transition function to pick
"good" states.
This function should typically return a ``loss`` key during training, which the ``Model``
will use as its loss.
Parameters
----------
initial_state : ``State``
This is the initial state for decoding, typically initialized after running some kind
of encoder on some inputs.
transition_function : ``TransitionFunction``
This is the transition function that scores all possible actions that can be taken in a
given state, and returns a ranked list of next states at each step of decoding.
supervision : ``SupervisionType``
This is the supervision that is used to train the ``transition_function`` function to
pick "good" states. You can use whatever kind of supervision you want (e.g., a single
"gold" action sequence, a set of possible "gold" action sequences, a reward function,
etc.). We use ``typing.Generics`` to make sure that our static type checker is happy
with how you've matched the supervision that you provide in the model to the
``DecoderTrainer`` that you want to use.
"""
raise NotImplementedError | ['def', 'decode', '(', 'self', ',', 'initial_state', ':', 'State', ',', 'transition_function', ':', 'TransitionFunction', ',', 'supervision', ':', 'SupervisionType', ')', '->', 'Dict', '[', 'str', ',', 'torch', '.', 'Tensor', ']', ':', 'raise', 'NotImplementedError'] | Takes an initial state object, a means of transitioning from state to state, and a
supervision signal, and uses the supervision to train the transition function to pick
"good" states.
This function should typically return a ``loss`` key during training, which the ``Model``
will use as its loss.
Parameters
----------
initial_state : ``State``
This is the initial state for decoding, typically initialized after running some kind
of encoder on some inputs.
transition_function : ``TransitionFunction``
This is the transition function that scores all possible actions that can be taken in a
given state, and returns a ranked list of next states at each step of decoding.
supervision : ``SupervisionType``
This is the supervision that is used to train the ``transition_function`` function to
pick "good" states. You can use whatever kind of supervision you want (e.g., a single
"gold" action sequence, a set of possible "gold" action sequences, a reward function,
etc.). We use ``typing.Generics`` to make sure that our static type checker is happy
with how you've matched the supervision that you provide in the model to the
``DecoderTrainer`` that you want to use. | ['Takes', 'an', 'initial', 'state', 'object', 'a', 'means', 'of', 'transitioning', 'from', 'state', 'to', 'state', 'and', 'a', 'supervision', 'signal', 'and', 'uses', 'the', 'supervision', 'to', 'train', 'the', 'transition', 'function', 'to', 'pick', 'good', 'states', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/trainers/decoder_trainer.py#L24-L52 |
4,424 | StackStorm/pybind | pybind/slxos/v17s_1_02/mpls_state/rsvp/__init__.py | rsvp._set_igp_sync | def _set_igp_sync(self, v, load=False):
"""
Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igp_sync is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igp_sync() directly.
YANG Description: MPLS Rsvp IGP Synchronization information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igp_sync must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__igp_sync = t
if hasattr(self, '_set'):
self._set() | python | def _set_igp_sync(self, v, load=False):
"""
Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igp_sync is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igp_sync() directly.
YANG Description: MPLS Rsvp IGP Synchronization information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igp_sync must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__igp_sync = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_igp_sync', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'igp_sync', '.', 'igp_sync', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"igp-sync"', ',', 'rest_name', '=', '"igp-sync"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'callpoint'", ':', "u'mpls-rsvp-igp-sync'", ',', "u'cli-suppress-show-path'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-mpls-operational'", ',', 'defining_module', '=', "'brocade-mpls-operational'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'False', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""igp_sync must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=igp_sync.igp_sync, is_container=\'container\', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'mpls-rsvp-igp-sync\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls-operational\', defining_module=\'brocade-mpls-operational\', yang_type=\'container\', is_config=False)"""', ',', '}', ')', 'self', '.', '__igp_sync', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igp_sync is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igp_sync() directly.
YANG Description: MPLS Rsvp IGP Synchronization information | ['Setter', 'method', 'for', 'igp_sync', 'mapped', 'from', 'YANG', 'variable', '/', 'mpls_state', '/', 'rsvp', '/', 'igp_sync', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_igp_sync', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_igp_sync', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_state/rsvp/__init__.py#L407-L430 |
4,425 | IzunaDevs/SnekChek | snekchek/__main__.py | run_main | def run_main(args: argparse.Namespace, do_exit=True) -> None:
"""Runs the checks and exits.
To extend this tool, use this function and set do_exit to False
to get returned the status code.
"""
if args.init:
generate()
return None # exit after generate instead of starting to lint
handler = CheckHandler(
file=args.config_file, out_json=args.json, files=args.files)
for style in get_stylers():
handler.run_linter(style())
for linter in get_linters():
handler.run_linter(linter())
for security in get_security():
handler.run_linter(security())
for tool in get_tools():
tool = tool()
# Only run pypi if everything else passed
if tool.name == "pypi" and handler.status_code != 0:
continue
handler.run_linter(tool)
if do_exit:
handler.exit()
return handler.status_code | python | def run_main(args: argparse.Namespace, do_exit=True) -> None:
"""Runs the checks and exits.
To extend this tool, use this function and set do_exit to False
to get returned the status code.
"""
if args.init:
generate()
return None # exit after generate instead of starting to lint
handler = CheckHandler(
file=args.config_file, out_json=args.json, files=args.files)
for style in get_stylers():
handler.run_linter(style())
for linter in get_linters():
handler.run_linter(linter())
for security in get_security():
handler.run_linter(security())
for tool in get_tools():
tool = tool()
# Only run pypi if everything else passed
if tool.name == "pypi" and handler.status_code != 0:
continue
handler.run_linter(tool)
if do_exit:
handler.exit()
return handler.status_code | ['def', 'run_main', '(', 'args', ':', 'argparse', '.', 'Namespace', ',', 'do_exit', '=', 'True', ')', '->', 'None', ':', 'if', 'args', '.', 'init', ':', 'generate', '(', ')', 'return', 'None', '# exit after generate instead of starting to lint', 'handler', '=', 'CheckHandler', '(', 'file', '=', 'args', '.', 'config_file', ',', 'out_json', '=', 'args', '.', 'json', ',', 'files', '=', 'args', '.', 'files', ')', 'for', 'style', 'in', 'get_stylers', '(', ')', ':', 'handler', '.', 'run_linter', '(', 'style', '(', ')', ')', 'for', 'linter', 'in', 'get_linters', '(', ')', ':', 'handler', '.', 'run_linter', '(', 'linter', '(', ')', ')', 'for', 'security', 'in', 'get_security', '(', ')', ':', 'handler', '.', 'run_linter', '(', 'security', '(', ')', ')', 'for', 'tool', 'in', 'get_tools', '(', ')', ':', 'tool', '=', 'tool', '(', ')', '# Only run pypi if everything else passed', 'if', 'tool', '.', 'name', '==', '"pypi"', 'and', 'handler', '.', 'status_code', '!=', '0', ':', 'continue', 'handler', '.', 'run_linter', '(', 'tool', ')', 'if', 'do_exit', ':', 'handler', '.', 'exit', '(', ')', 'return', 'handler', '.', 'status_code'] | Runs the checks and exits.
To extend this tool, use this function and set do_exit to False
to get returned the status code. | ['Runs', 'the', 'checks', 'and', 'exits', '.'] | train | https://github.com/IzunaDevs/SnekChek/blob/fdb01bdf1ec8e79d9aae2a11d96bfb27e53a97a9/snekchek/__main__.py#L39-L72 |
4,426 | rackerlabs/simpl | simpl/config.py | Config.validate_config | def validate_config(self, values, argv=None, strict=False):
"""Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit
"""
options = []
for option in self._options:
kwargs = option.kwargs.copy()
if option.name in values:
if 'default' in kwargs:
# Since we're overriding defaults, we need to
# preserve the default value for the help text:
help_text = kwargs.get('help')
if help_text:
if '(default: ' not in help_text:
kwargs['help'] = '%s (default: %s)' % (
help_text, kwargs['default']
)
kwargs['default'] = values[option.name]
kwargs['required'] = False # since we have a value
temp = Option(*option.args, **kwargs)
options.append(temp)
parser = self.build_parser(options,
formatter_class=argparse.HelpFormatter)
if argv:
parsed, extras = parser.parse_known_args(argv[1:])
if extras:
valid, _ = self.parse_passthru_args(argv[1:])
parsed, extras = parser.parse_known_args(valid)
if extras and strict: # still
self.build_parser(options)
parser.parse_args(argv[1:])
else:
parsed = parser.parse_args([])
results = vars(parsed)
raise_for_group = {}
for option in self._options:
if option.kwargs.get('required'):
if option.dest not in results or results[option.dest] is None:
if getattr(option, '_mutexgroup', None):
raise_for_group.setdefault(option._mutexgroup, [])
raise_for_group[option._mutexgroup].append(
option._action)
else:
raise SystemExit("'%s' is required. See --help "
"for more info." % option.name)
else:
if getattr(option, '_mutexgroup', None):
raise_for_group.pop(option._mutexgroup, None)
if raise_for_group:
optstrings = [str(k.option_strings)
for k in raise_for_group.values()[0]]
msg = "One of %s required. " % " ,".join(optstrings)
raise SystemExit(msg + "See --help for more info.")
return results | python | def validate_config(self, values, argv=None, strict=False):
"""Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit
"""
options = []
for option in self._options:
kwargs = option.kwargs.copy()
if option.name in values:
if 'default' in kwargs:
# Since we're overriding defaults, we need to
# preserve the default value for the help text:
help_text = kwargs.get('help')
if help_text:
if '(default: ' not in help_text:
kwargs['help'] = '%s (default: %s)' % (
help_text, kwargs['default']
)
kwargs['default'] = values[option.name]
kwargs['required'] = False # since we have a value
temp = Option(*option.args, **kwargs)
options.append(temp)
parser = self.build_parser(options,
formatter_class=argparse.HelpFormatter)
if argv:
parsed, extras = parser.parse_known_args(argv[1:])
if extras:
valid, _ = self.parse_passthru_args(argv[1:])
parsed, extras = parser.parse_known_args(valid)
if extras and strict: # still
self.build_parser(options)
parser.parse_args(argv[1:])
else:
parsed = parser.parse_args([])
results = vars(parsed)
raise_for_group = {}
for option in self._options:
if option.kwargs.get('required'):
if option.dest not in results or results[option.dest] is None:
if getattr(option, '_mutexgroup', None):
raise_for_group.setdefault(option._mutexgroup, [])
raise_for_group[option._mutexgroup].append(
option._action)
else:
raise SystemExit("'%s' is required. See --help "
"for more info." % option.name)
else:
if getattr(option, '_mutexgroup', None):
raise_for_group.pop(option._mutexgroup, None)
if raise_for_group:
optstrings = [str(k.option_strings)
for k in raise_for_group.values()[0]]
msg = "One of %s required. " % " ,".join(optstrings)
raise SystemExit(msg + "See --help for more info.")
return results | ['def', 'validate_config', '(', 'self', ',', 'values', ',', 'argv', '=', 'None', ',', 'strict', '=', 'False', ')', ':', 'options', '=', '[', ']', 'for', 'option', 'in', 'self', '.', '_options', ':', 'kwargs', '=', 'option', '.', 'kwargs', '.', 'copy', '(', ')', 'if', 'option', '.', 'name', 'in', 'values', ':', 'if', "'default'", 'in', 'kwargs', ':', "# Since we're overriding defaults, we need to", '# preserve the default value for the help text:', 'help_text', '=', 'kwargs', '.', 'get', '(', "'help'", ')', 'if', 'help_text', ':', 'if', "'(default: '", 'not', 'in', 'help_text', ':', 'kwargs', '[', "'help'", ']', '=', "'%s (default: %s)'", '%', '(', 'help_text', ',', 'kwargs', '[', "'default'", ']', ')', 'kwargs', '[', "'default'", ']', '=', 'values', '[', 'option', '.', 'name', ']', 'kwargs', '[', "'required'", ']', '=', 'False', '# since we have a value', 'temp', '=', 'Option', '(', '*', 'option', '.', 'args', ',', '*', '*', 'kwargs', ')', 'options', '.', 'append', '(', 'temp', ')', 'parser', '=', 'self', '.', 'build_parser', '(', 'options', ',', 'formatter_class', '=', 'argparse', '.', 'HelpFormatter', ')', 'if', 'argv', ':', 'parsed', ',', 'extras', '=', 'parser', '.', 'parse_known_args', '(', 'argv', '[', '1', ':', ']', ')', 'if', 'extras', ':', 'valid', ',', '_', '=', 'self', '.', 'parse_passthru_args', '(', 'argv', '[', '1', ':', ']', ')', 'parsed', ',', 'extras', '=', 'parser', '.', 'parse_known_args', '(', 'valid', ')', 'if', 'extras', 'and', 'strict', ':', '# still', 'self', '.', 'build_parser', '(', 'options', ')', 'parser', '.', 'parse_args', '(', 'argv', '[', '1', ':', ']', ')', 'else', ':', 'parsed', '=', 'parser', '.', 'parse_args', '(', '[', ']', ')', 'results', '=', 'vars', '(', 'parsed', ')', 'raise_for_group', '=', '{', '}', 'for', 'option', 'in', 'self', '.', '_options', ':', 'if', 'option', '.', 'kwargs', '.', 'get', '(', "'required'", ')', ':', 'if', 'option', '.', 'dest', 'not', 'in', 'results', 'or', 'results', '[', 'option', '.', 'dest', ']', 'is', 'None', ':', 'if', 'getattr', '(', 'option', ',', "'_mutexgroup'", ',', 'None', ')', ':', 'raise_for_group', '.', 'setdefault', '(', 'option', '.', '_mutexgroup', ',', '[', ']', ')', 'raise_for_group', '[', 'option', '.', '_mutexgroup', ']', '.', 'append', '(', 'option', '.', '_action', ')', 'else', ':', 'raise', 'SystemExit', '(', '"\'%s\' is required. See --help "', '"for more info."', '%', 'option', '.', 'name', ')', 'else', ':', 'if', 'getattr', '(', 'option', ',', "'_mutexgroup'", ',', 'None', ')', ':', 'raise_for_group', '.', 'pop', '(', 'option', '.', '_mutexgroup', ',', 'None', ')', 'if', 'raise_for_group', ':', 'optstrings', '=', '[', 'str', '(', 'k', '.', 'option_strings', ')', 'for', 'k', 'in', 'raise_for_group', '.', 'values', '(', ')', '[', '0', ']', ']', 'msg', '=', '"One of %s required. "', '%', '" ,"', '.', 'join', '(', 'optstrings', ')', 'raise', 'SystemExit', '(', 'msg', '+', '"See --help for more info."', ')', 'return', 'results'] | Validate all config values through the command-line parser.
This takes all supplied options (which could have been retrieved from a
number of sources (such as CLI, env vars, etc...) and then validates
them by running them through argparser (and raises SystemExit on
failure).
:returns dict: key/values for all config values (from all sources)
:raises: SystemExit | ['Validate', 'all', 'config', 'values', 'through', 'the', 'command', '-', 'line', 'parser', '.'] | train | https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L511-L572 |
4,427 | fitnr/twitter_bot_utils | twitter_bot_utils/helpers.py | has_entities | def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False | python | def has_entities(status):
"""
Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API
"""
try:
if sum(len(v) for v in status.entities.values()) > 0:
return True
except AttributeError:
if sum(len(v) for v in status['entities'].values()) > 0:
return True
return False | ['def', 'has_entities', '(', 'status', ')', ':', 'try', ':', 'if', 'sum', '(', 'len', '(', 'v', ')', 'for', 'v', 'in', 'status', '.', 'entities', '.', 'values', '(', ')', ')', '>', '0', ':', 'return', 'True', 'except', 'AttributeError', ':', 'if', 'sum', '(', 'len', '(', 'v', ')', 'for', 'v', 'in', 'status', '[', "'entities'", ']', '.', 'values', '(', ')', ')', '>', '0', ':', 'return', 'True', 'return', 'False'] | Returns true if a Status object has entities.
Args:
status: either a tweepy.Status object or a dict returned from Twitter API | ['Returns', 'true', 'if', 'a', 'Status', 'object', 'has', 'entities', '.'] | train | https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L54-L69 |
4,428 | micha030201/aionationstates | aionationstates/region_.py | Region.officers | async def officers(self, root):
"""Regional Officers. Does not include the Founder or
the Delegate, unless they have additional titles as Officers.
In the correct order.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Officer`
"""
officers = sorted(
root.find('OFFICERS'),
# I struggle to say what else this tag would be useful for.
key=lambda elem: int(elem.find('ORDER').text)
)
return [Officer(elem) for elem in officers] | python | async def officers(self, root):
"""Regional Officers. Does not include the Founder or
the Delegate, unless they have additional titles as Officers.
In the correct order.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Officer`
"""
officers = sorted(
root.find('OFFICERS'),
# I struggle to say what else this tag would be useful for.
key=lambda elem: int(elem.find('ORDER').text)
)
return [Officer(elem) for elem in officers] | ['async', 'def', 'officers', '(', 'self', ',', 'root', ')', ':', 'officers', '=', 'sorted', '(', 'root', '.', 'find', '(', "'OFFICERS'", ')', ',', '# I struggle to say what else this tag would be useful for.', 'key', '=', 'lambda', 'elem', ':', 'int', '(', 'elem', '.', 'find', '(', "'ORDER'", ')', '.', 'text', ')', ')', 'return', '[', 'Officer', '(', 'elem', ')', 'for', 'elem', 'in', 'officers', ']'] | Regional Officers. Does not include the Founder or
the Delegate, unless they have additional titles as Officers.
In the correct order.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Officer` | ['Regional', 'Officers', '.', 'Does', 'not', 'include', 'the', 'Founder', 'or', 'the', 'Delegate', 'unless', 'they', 'have', 'additional', 'titles', 'as', 'Officers', '.'] | train | https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/region_.py#L451-L466 |
4,429 | ejeschke/ginga | ginga/util/wcs.py | get_starsep_RaDecDeg | def get_starsep_RaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg):
"""Calculate separation."""
sep = deltaStarsRaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg)
sgn, deg, mn, sec = degToDms(sep)
if deg != 0:
txt = '%02d:%02d:%06.3f' % (deg, mn, sec)
else:
txt = '%02d:%06.3f' % (mn, sec)
return txt | python | def get_starsep_RaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg):
"""Calculate separation."""
sep = deltaStarsRaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg)
sgn, deg, mn, sec = degToDms(sep)
if deg != 0:
txt = '%02d:%02d:%06.3f' % (deg, mn, sec)
else:
txt = '%02d:%06.3f' % (mn, sec)
return txt | ['def', 'get_starsep_RaDecDeg', '(', 'ra1_deg', ',', 'dec1_deg', ',', 'ra2_deg', ',', 'dec2_deg', ')', ':', 'sep', '=', 'deltaStarsRaDecDeg', '(', 'ra1_deg', ',', 'dec1_deg', ',', 'ra2_deg', ',', 'dec2_deg', ')', 'sgn', ',', 'deg', ',', 'mn', ',', 'sec', '=', 'degToDms', '(', 'sep', ')', 'if', 'deg', '!=', '0', ':', 'txt', '=', "'%02d:%02d:%06.3f'", '%', '(', 'deg', ',', 'mn', ',', 'sec', ')', 'else', ':', 'txt', '=', "'%02d:%06.3f'", '%', '(', 'mn', ',', 'sec', ')', 'return', 'txt'] | Calculate separation. | ['Calculate', 'separation', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/wcs.py#L577-L585 |
4,430 | Alignak-monitoring/alignak | alignak/brok.py | Brok.prepare | def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True | python | def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True | ['def', 'prepare', '(', 'self', ')', ':', '# Maybe the Brok is a old daemon one or was already prepared', '# if so, the data is already ok', 'if', 'hasattr', '(', 'self', ',', "'prepared'", ')', 'and', 'not', 'self', '.', 'prepared', ':', 'self', '.', 'data', '=', 'unserialize', '(', 'self', '.', 'data', ')', 'if', 'self', '.', 'instance_id', ':', 'self', '.', 'data', '[', "'instance_id'", ']', '=', 'self', '.', 'instance_id', 'self', '.', 'prepared', '=', 'True'] | Un-serialize data from data attribute and add instance_id key if necessary
:return: None | ['Un', '-', 'serialize', 'data', 'from', 'data', 'attribute', 'and', 'add', 'instance_id', 'key', 'if', 'necessary'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/brok.py#L144-L155 |
4,431 | bitesofcode/projexui | projexui/dialogs/xwizardbrowserdialog/xwizardbrowserdialog.py | XWizardBrowserDialog.runWizard | def runWizard( self ):
"""
Runs the current wizard.
"""
plugin = self.currentPlugin()
if ( plugin and plugin.runWizard(self) ):
self.accept() | python | def runWizard( self ):
"""
Runs the current wizard.
"""
plugin = self.currentPlugin()
if ( plugin and plugin.runWizard(self) ):
self.accept() | ['def', 'runWizard', '(', 'self', ')', ':', 'plugin', '=', 'self', '.', 'currentPlugin', '(', ')', 'if', '(', 'plugin', 'and', 'plugin', '.', 'runWizard', '(', 'self', ')', ')', ':', 'self', '.', 'accept', '(', ')'] | Runs the current wizard. | ['Runs', 'the', 'current', 'wizard', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xwizardbrowserdialog/xwizardbrowserdialog.py#L133-L139 |
4,432 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py | brocade_mac_access_list.get_mac_acl_for_intf_input_interface_type | def get_mac_acl_for_intf_input_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf")
config = get_mac_acl_for_intf
input = ET.SubElement(get_mac_acl_for_intf, "input")
interface_type = ET.SubElement(input, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_mac_acl_for_intf_input_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf")
config = get_mac_acl_for_intf
input = ET.SubElement(get_mac_acl_for_intf, "input")
interface_type = ET.SubElement(input, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_mac_acl_for_intf_input_interface_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_mac_acl_for_intf', '=', 'ET', '.', 'Element', '(', '"get_mac_acl_for_intf"', ')', 'config', '=', 'get_mac_acl_for_intf', 'input', '=', 'ET', '.', 'SubElement', '(', 'get_mac_acl_for_intf', ',', '"input"', ')', 'interface_type', '=', 'ET', '.', 'SubElement', '(', 'input', ',', '"interface-type"', ')', 'interface_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_access_list.py#L391-L402 |
4,433 | planetlabs/planet-client-python | planet/scripts/v1.py | mosaic_info | def mosaic_info(name, pretty):
'''Get information for a specific mosaic'''
cl = clientv1()
echo_json_response(call_and_wrap(cl.get_mosaic_by_name, name), pretty) | python | def mosaic_info(name, pretty):
'''Get information for a specific mosaic'''
cl = clientv1()
echo_json_response(call_and_wrap(cl.get_mosaic_by_name, name), pretty) | ['def', 'mosaic_info', '(', 'name', ',', 'pretty', ')', ':', 'cl', '=', 'clientv1', '(', ')', 'echo_json_response', '(', 'call_and_wrap', '(', 'cl', '.', 'get_mosaic_by_name', ',', 'name', ')', ',', 'pretty', ')'] | Get information for a specific mosaic | ['Get', 'information', 'for', 'a', 'specific', 'mosaic'] | train | https://github.com/planetlabs/planet-client-python/blob/1c62ce7d416819951dddee0c22068fef6d40b027/planet/scripts/v1.py#L259-L262 |
4,434 | google/grr | grr/server/grr_response_server/aff4_objects/user_managers.py | FullAccessControlManager._IsHomeDir | def _IsHomeDir(self, subject, token):
"""Checks user access permissions for paths under aff4:/users."""
h = CheckAccessHelper("IsHomeDir")
h.Allow("aff4:/users/%s" % token.username)
h.Allow("aff4:/users/%s/*" % token.username)
try:
return h.CheckAccess(subject, token)
except access_control.UnauthorizedAccess:
raise access_control.UnauthorizedAccess(
"User can only access their "
"home directory.", subject=subject) | python | def _IsHomeDir(self, subject, token):
"""Checks user access permissions for paths under aff4:/users."""
h = CheckAccessHelper("IsHomeDir")
h.Allow("aff4:/users/%s" % token.username)
h.Allow("aff4:/users/%s/*" % token.username)
try:
return h.CheckAccess(subject, token)
except access_control.UnauthorizedAccess:
raise access_control.UnauthorizedAccess(
"User can only access their "
"home directory.", subject=subject) | ['def', '_IsHomeDir', '(', 'self', ',', 'subject', ',', 'token', ')', ':', 'h', '=', 'CheckAccessHelper', '(', '"IsHomeDir"', ')', 'h', '.', 'Allow', '(', '"aff4:/users/%s"', '%', 'token', '.', 'username', ')', 'h', '.', 'Allow', '(', '"aff4:/users/%s/*"', '%', 'token', '.', 'username', ')', 'try', ':', 'return', 'h', '.', 'CheckAccess', '(', 'subject', ',', 'token', ')', 'except', 'access_control', '.', 'UnauthorizedAccess', ':', 'raise', 'access_control', '.', 'UnauthorizedAccess', '(', '"User can only access their "', '"home directory."', ',', 'subject', '=', 'subject', ')'] | Checks user access permissions for paths under aff4:/users. | ['Checks', 'user', 'access', 'permissions', 'for', 'paths', 'under', 'aff4', ':', '/', 'users', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/user_managers.py#L329-L339 |
4,435 | hatemile/hatemile-for-python | hatemile/implementation/css.py | AccessibleCSSImplementation._is_valid_inherit_element | def _is_valid_inherit_element(self, element):
"""
Check that the children of element can be manipulated to apply the CSS
properties.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the children of element can be manipulated to apply
the CSS properties or False if the children of element cannot
be manipulated to apply the CSS properties.
:rtype: bool
"""
# pylint: disable=no-self-use
tag_name = element.get_tag_name()
return (
(tag_name in AccessibleCSSImplementation.VALID_INHERIT_TAGS)
and (not element.has_attribute(CommonFunctions.DATA_IGNORE))
) | python | def _is_valid_inherit_element(self, element):
"""
Check that the children of element can be manipulated to apply the CSS
properties.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the children of element can be manipulated to apply
the CSS properties or False if the children of element cannot
be manipulated to apply the CSS properties.
:rtype: bool
"""
# pylint: disable=no-self-use
tag_name = element.get_tag_name()
return (
(tag_name in AccessibleCSSImplementation.VALID_INHERIT_TAGS)
and (not element.has_attribute(CommonFunctions.DATA_IGNORE))
) | ['def', '_is_valid_inherit_element', '(', 'self', ',', 'element', ')', ':', '# pylint: disable=no-self-use', 'tag_name', '=', 'element', '.', 'get_tag_name', '(', ')', 'return', '(', '(', 'tag_name', 'in', 'AccessibleCSSImplementation', '.', 'VALID_INHERIT_TAGS', ')', 'and', '(', 'not', 'element', '.', 'has_attribute', '(', 'CommonFunctions', '.', 'DATA_IGNORE', ')', ')', ')'] | Check that the children of element can be manipulated to apply the CSS
properties.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: True if the children of element can be manipulated to apply
the CSS properties or False if the children of element cannot
be manipulated to apply the CSS properties.
:rtype: bool | ['Check', 'that', 'the', 'children', 'of', 'element', 'can', 'be', 'manipulated', 'to', 'apply', 'the', 'CSS', 'properties', '.'] | train | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L445-L463 |
4,436 | brainiak/brainiak | brainiak/fcma/classifier.py | Classifier._normalize_correlation_data | def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data | python | def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data | ['def', '_normalize_correlation_data', '(', 'self', ',', 'corr_data', ',', 'norm_unit', ')', ':', '# normalize if necessary', 'if', 'norm_unit', '>', '1', ':', 'num_samples', '=', 'len', '(', 'corr_data', ')', '[', '_', ',', 'd2', ',', 'd3', ']', '=', 'corr_data', '.', 'shape', 'second_dimension', '=', 'd2', '*', 'd3', '# this is a shallow copy', 'normalized_corr_data', '=', 'corr_data', '.', 'reshape', '(', '1', ',', 'num_samples', ',', 'second_dimension', ')', 'fcma_extension', '.', 'normalization', '(', 'normalized_corr_data', ',', 'norm_unit', ')', 'normalized_corr_data', '=', 'normalized_corr_data', '.', 'reshape', '(', 'num_samples', ',', 'd2', ',', 'd3', ')', 'logger', '.', 'debug', '(', "'normalization done'", ')', 'else', ':', 'normalized_corr_data', '=', 'corr_data', 'return', 'normalized_corr_data'] | Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels] | ['Normalize', 'the', 'correlation', 'data', 'if', 'necessary', '.'] | train | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L184-L220 |
4,437 | peshay/tpm | tpm.py | TpmApi.change_user_password | def change_user_password(self, ID, data):
"""Change password of a User."""
# http://teampasswordmanager.com/docs/api-users/#change_password
log.info('Change user %s password' % ID)
self.put('users/%s/change_password.json' % ID, data) | python | def change_user_password(self, ID, data):
"""Change password of a User."""
# http://teampasswordmanager.com/docs/api-users/#change_password
log.info('Change user %s password' % ID)
self.put('users/%s/change_password.json' % ID, data) | ['def', 'change_user_password', '(', 'self', ',', 'ID', ',', 'data', ')', ':', '# http://teampasswordmanager.com/docs/api-users/#change_password', 'log', '.', 'info', '(', "'Change user %s password'", '%', 'ID', ')', 'self', '.', 'put', '(', "'users/%s/change_password.json'", '%', 'ID', ',', 'data', ')'] | Change password of a User. | ['Change', 'password', 'of', 'a', 'User', '.'] | train | https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L510-L514 |
4,438 | Robin8Put/pmes | storage/rpc_methods.py | StorageTable.set_review | async def set_review(self, **params):
"""Writes review for content
Accepts:
- cid
- review
- public_key
- rating
- txid
- coinid
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
cid = int(params.get("cid", 0))
txid = params.get("txid")
coinid = params.get("coinid")
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Get content
database = client[coinid]
content_collection = database[settings.CONTENT]
content = await content_collection.find_one({"cid":cid})
if not content:
return {"error":404, "reason":"Not found current content"}
database = client[coinid]
review_collection = database[settings.REVIEW]
await review_collection.insert_one({"cid":cid, "confirmed":None,
"txid":txid, "coinid":coinid})
return {"result":"ok"} | python | async def set_review(self, **params):
"""Writes review for content
Accepts:
- cid
- review
- public_key
- rating
- txid
- coinid
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
cid = int(params.get("cid", 0))
txid = params.get("txid")
coinid = params.get("coinid")
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Get content
database = client[coinid]
content_collection = database[settings.CONTENT]
content = await content_collection.find_one({"cid":cid})
if not content:
return {"error":404, "reason":"Not found current content"}
database = client[coinid]
review_collection = database[settings.REVIEW]
await review_collection.insert_one({"cid":cid, "confirmed":None,
"txid":txid, "coinid":coinid})
return {"result":"ok"} | ['async', 'def', 'set_review', '(', 'self', ',', '*', '*', 'params', ')', ':', 'if', 'params', '.', 'get', '(', '"message"', ')', ':', 'params', '=', 'json', '.', 'loads', '(', 'params', '.', 'get', '(', '"message"', ',', '"{}"', ')', ')', 'if', 'not', 'params', ':', 'return', '{', '"error"', ':', '400', ',', '"reason"', ':', '"Missed required fields"', '}', 'cid', '=', 'int', '(', 'params', '.', 'get', '(', '"cid"', ',', '0', ')', ')', 'txid', '=', 'params', '.', 'get', '(', '"txid"', ')', 'coinid', '=', 'params', '.', 'get', '(', '"coinid"', ')', 'try', ':', 'coinid', '=', 'coinid', '.', 'replace', '(', '"TEST"', ',', '""', ')', 'except', ':', 'pass', '# Get content', 'database', '=', 'client', '[', 'coinid', ']', 'content_collection', '=', 'database', '[', 'settings', '.', 'CONTENT', ']', 'content', '=', 'await', 'content_collection', '.', 'find_one', '(', '{', '"cid"', ':', 'cid', '}', ')', 'if', 'not', 'content', ':', 'return', '{', '"error"', ':', '404', ',', '"reason"', ':', '"Not found current content"', '}', 'database', '=', 'client', '[', 'coinid', ']', 'review_collection', '=', 'database', '[', 'settings', '.', 'REVIEW', ']', 'await', 'review_collection', '.', 'insert_one', '(', '{', '"cid"', ':', 'cid', ',', '"confirmed"', ':', 'None', ',', '"txid"', ':', 'txid', ',', '"coinid"', ':', 'coinid', '}', ')', 'return', '{', '"result"', ':', '"ok"', '}'] | Writes review for content
Accepts:
- cid
- review
- public_key
- rating
- txid
- coinid | ['Writes', 'review', 'for', 'content', 'Accepts', ':', '-', 'cid', '-', 'review', '-', 'public_key', '-', 'rating', '-', 'txid', '-', 'coinid'] | train | https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/storage/rpc_methods.py#L750-L786 |
4,439 | BreakingBytes/simkit | simkit/core/simulations.py | topological_sort | def topological_sort(dag):
"""
topological sort
:param dag: directed acyclic graph
:type dag: dict
.. seealso:: `Topographical Sorting
<http://en.wikipedia.org/wiki/Topological_sorting>`_,
`Directed Acyclic Graph (DAG)
<https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_
"""
# find all edges of dag
topsort = [node for node, edge in dag.iteritems() if not edge]
# loop through nodes until topologically sorted
while len(topsort) < len(dag):
num_nodes = len(topsort) # number of nodes
# unsorted nodes
for node in dag.viewkeys() - set(topsort):
# nodes with no incoming edges
if set(dag[node]) <= set(topsort):
topsort.append(node)
break
# circular dependencies
if len(topsort) == num_nodes:
raise CircularDependencyError(dag.viewkeys() - set(topsort))
return topsort | python | def topological_sort(dag):
"""
topological sort
:param dag: directed acyclic graph
:type dag: dict
.. seealso:: `Topographical Sorting
<http://en.wikipedia.org/wiki/Topological_sorting>`_,
`Directed Acyclic Graph (DAG)
<https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_
"""
# find all edges of dag
topsort = [node for node, edge in dag.iteritems() if not edge]
# loop through nodes until topologically sorted
while len(topsort) < len(dag):
num_nodes = len(topsort) # number of nodes
# unsorted nodes
for node in dag.viewkeys() - set(topsort):
# nodes with no incoming edges
if set(dag[node]) <= set(topsort):
topsort.append(node)
break
# circular dependencies
if len(topsort) == num_nodes:
raise CircularDependencyError(dag.viewkeys() - set(topsort))
return topsort | ['def', 'topological_sort', '(', 'dag', ')', ':', '# find all edges of dag', 'topsort', '=', '[', 'node', 'for', 'node', ',', 'edge', 'in', 'dag', '.', 'iteritems', '(', ')', 'if', 'not', 'edge', ']', '# loop through nodes until topologically sorted', 'while', 'len', '(', 'topsort', ')', '<', 'len', '(', 'dag', ')', ':', 'num_nodes', '=', 'len', '(', 'topsort', ')', '# number of nodes', '# unsorted nodes', 'for', 'node', 'in', 'dag', '.', 'viewkeys', '(', ')', '-', 'set', '(', 'topsort', ')', ':', '# nodes with no incoming edges', 'if', 'set', '(', 'dag', '[', 'node', ']', ')', '<=', 'set', '(', 'topsort', ')', ':', 'topsort', '.', 'append', '(', 'node', ')', 'break', '# circular dependencies', 'if', 'len', '(', 'topsort', ')', '==', 'num_nodes', ':', 'raise', 'CircularDependencyError', '(', 'dag', '.', 'viewkeys', '(', ')', '-', 'set', '(', 'topsort', ')', ')', 'return', 'topsort'] | topological sort
:param dag: directed acyclic graph
:type dag: dict
.. seealso:: `Topographical Sorting
<http://en.wikipedia.org/wiki/Topological_sorting>`_,
`Directed Acyclic Graph (DAG)
<https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_ | ['topological', 'sort'] | train | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L69-L95 |
4,440 | mitsei/dlkit | dlkit/json_/grading/sessions.py | GradeSystemGradebookSession.get_gradebook_ids_by_grade_system | def get_gradebook_ids_by_grade_system(self, grade_system_id):
"""Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('GRADING', local=True)
lookup_session = mgr.get_grade_system_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
grade_system = lookup_session.get_grade_system(grade_system_id)
id_list = []
for idstr in grade_system._my_map['assignedGradebookIds']:
id_list.append(Id(idstr))
return IdList(id_list) | python | def get_gradebook_ids_by_grade_system(self, grade_system_id):
"""Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('GRADING', local=True)
lookup_session = mgr.get_grade_system_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
grade_system = lookup_session.get_grade_system(grade_system_id)
id_list = []
for idstr in grade_system._my_map['assignedGradebookIds']:
id_list.append(Id(idstr))
return IdList(id_list) | ['def', 'get_gradebook_ids_by_grade_system', '(', 'self', ',', 'grade_system_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinSession.get_bin_ids_by_resource', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'GRADING'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_grade_system_lookup_session', '(', 'proxy', '=', 'self', '.', '_proxy', ')', 'lookup_session', '.', 'use_federated_gradebook_view', '(', ')', 'grade_system', '=', 'lookup_session', '.', 'get_grade_system', '(', 'grade_system_id', ')', 'id_list', '=', '[', ']', 'for', 'idstr', 'in', 'grade_system', '.', '_my_map', '[', "'assignedGradebookIds'", ']', ':', 'id_list', '.', 'append', '(', 'Id', '(', 'idstr', ')', ')', 'return', 'IdList', '(', 'id_list', ')'] | Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'list', 'of', 'Gradebook', 'Ids', 'mapped', 'to', 'a', 'GradeSystem', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L1477-L1499 |
4,441 | SystemRDL/systemrdl-compiler | systemrdl/node.py | get_group_node_size | def get_group_node_size(node):
"""
Shared getter for AddrmapNode and RegfileNode's "size" property
"""
# After structural placement, children are sorted
if( not node.inst.children
or (not isinstance(node.inst.children[-1], comp.AddressableComponent))
):
# No addressable child exists.
return 0
# Current node's size is based on last child
last_child_node = Node._factory(node.inst.children[-1], node.env, node)
return(
last_child_node.inst.addr_offset
+ last_child_node.total_size
) | python | def get_group_node_size(node):
"""
Shared getter for AddrmapNode and RegfileNode's "size" property
"""
# After structural placement, children are sorted
if( not node.inst.children
or (not isinstance(node.inst.children[-1], comp.AddressableComponent))
):
# No addressable child exists.
return 0
# Current node's size is based on last child
last_child_node = Node._factory(node.inst.children[-1], node.env, node)
return(
last_child_node.inst.addr_offset
+ last_child_node.total_size
) | ['def', 'get_group_node_size', '(', 'node', ')', ':', '# After structural placement, children are sorted', 'if', '(', 'not', 'node', '.', 'inst', '.', 'children', 'or', '(', 'not', 'isinstance', '(', 'node', '.', 'inst', '.', 'children', '[', '-', '1', ']', ',', 'comp', '.', 'AddressableComponent', ')', ')', ')', ':', '# No addressable child exists.', 'return', '0', "# Current node's size is based on last child", 'last_child_node', '=', 'Node', '.', '_factory', '(', 'node', '.', 'inst', '.', 'children', '[', '-', '1', ']', ',', 'node', '.', 'env', ',', 'node', ')', 'return', '(', 'last_child_node', '.', 'inst', '.', 'addr_offset', '+', 'last_child_node', '.', 'total_size', ')'] | Shared getter for AddrmapNode and RegfileNode's "size" property | ['Shared', 'getter', 'for', 'AddrmapNode', 'and', 'RegfileNode', 's', 'size', 'property'] | train | https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/node.py#L810-L826 |
4,442 | bwesterb/py-seccure | src/__init__.py | mod_root | def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x | python | def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x | ['def', 'mod_root', '(', 'a', ',', 'p', ')', ':', 'if', 'a', '==', '0', ':', 'return', '0', 'if', 'not', 'mod_issquare', '(', 'a', ',', 'p', ')', ':', 'raise', 'ValueError', 'n', '=', '2', 'while', 'mod_issquare', '(', 'n', ',', 'p', ')', ':', 'n', '+=', '1', 'q', '=', 'p', '-', '1', 'r', '=', '0', 'while', 'not', 'q', '.', 'getbit', '(', 'r', ')', ':', 'r', '+=', '1', 'q', '=', 'q', '>>', 'r', 'y', '=', 'pow', '(', 'n', ',', 'q', ',', 'p', ')', 'h', '=', 'q', '>>', '1', 'b', '=', 'pow', '(', 'a', ',', 'h', ',', 'p', ')', 'x', '=', '(', 'a', '*', 'b', ')', '%', 'p', 'b', '=', '(', 'b', '*', 'x', ')', '%', 'p', 'while', 'b', '!=', '1', ':', 'h', '=', '(', 'b', '*', 'b', ')', '%', 'p', 'm', '=', '1', 'while', 'h', '!=', '1', ':', 'h', '=', '(', 'h', '*', 'h', ')', '%', 'p', 'm', '+=', '1', 'h', '=', 'gmpy', '.', 'mpz', '(', '0', ')', 'h', '=', 'h', '.', 'setbit', '(', 'r', '-', 'm', '-', '1', ')', 't', '=', 'pow', '(', 'y', ',', 'h', ',', 'p', ')', 'y', '=', '(', 't', '*', 't', ')', '%', 'p', 'r', '=', 'm', 'x', '=', '(', 'x', '*', 't', ')', '%', 'p', 'b', '=', '(', 'b', '*', 'y', ')', '%', 'p', 'return', 'x'] | Return a root of `a' modulo p | ['Return', 'a', 'root', 'of', 'a', 'modulo', 'p'] | train | https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L122-L154 |
4,443 | grahambell/pymoc | lib/pymoc/moc.py | MOC.cells | def cells(self):
"""The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2
"""
n = 0
for (order, cells) in self:
n += len(cells)
return n | python | def cells(self):
"""The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2
"""
n = 0
for (order, cells) in self:
n += len(cells)
return n | ['def', 'cells', '(', 'self', ')', ':', 'n', '=', '0', 'for', '(', 'order', ',', 'cells', ')', 'in', 'self', ':', 'n', '+=', 'len', '(', 'cells', ')', 'return', 'n'] | The number of cells in the MOC.
This gives the total number of cells at all orders,
with cells from every order counted equally.
>>> m = MOC(0, (1, 2))
>>> m.cells
2 | ['The', 'number', 'of', 'cells', 'in', 'the', 'MOC', '.'] | train | https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/moc.py#L365-L381 |
4,444 | numberoverzero/bloop | bloop/stream/buffer.py | RecordBuffer.push_all | def push_all(self, record_shard_pairs):
"""Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
"""
# Faster than inserting one at a time; the heap is sorted once after all inserts.
for record, shard in record_shard_pairs:
item = heap_item(self.clock, record, shard)
self.heap.append(item)
heapq.heapify(self.heap) | python | def push_all(self, record_shard_pairs):
"""Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
"""
# Faster than inserting one at a time; the heap is sorted once after all inserts.
for record, shard in record_shard_pairs:
item = heap_item(self.clock, record, shard)
self.heap.append(item)
heapq.heapify(self.heap) | ['def', 'push_all', '(', 'self', ',', 'record_shard_pairs', ')', ':', '# Faster than inserting one at a time; the heap is sorted once after all inserts.', 'for', 'record', ',', 'shard', 'in', 'record_shard_pairs', ':', 'item', '=', 'heap_item', '(', 'self', '.', 'clock', ',', 'record', ',', 'shard', ')', 'self', '.', 'heap', '.', 'append', '(', 'item', ')', 'heapq', '.', 'heapify', '(', 'self', '.', 'heap', ')'] | Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`). | ['Push', 'multiple', '(', 'record', 'shard', ')', 'pairs', 'at', 'once', 'with', 'only', 'one', ':', 'meth', ':', 'heapq', '.', 'heapify', 'call', 'to', 'maintain', 'order', '.'] | train | https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/buffer.py#L48-L58 |
4,445 | fhcrc/nestly | nestly/scripts/nestrun.py | NestlyProcess.log_tail | def log_tail(self, nlines=10):
"""
Return the last ``nlines`` lines of the log file
"""
log_path = os.path.join(self.working_dir, self.log_name)
with open(log_path) as fp:
d = collections.deque(maxlen=nlines)
d.extend(fp)
return ''.join(d) | python | def log_tail(self, nlines=10):
"""
Return the last ``nlines`` lines of the log file
"""
log_path = os.path.join(self.working_dir, self.log_name)
with open(log_path) as fp:
d = collections.deque(maxlen=nlines)
d.extend(fp)
return ''.join(d) | ['def', 'log_tail', '(', 'self', ',', 'nlines', '=', '10', ')', ':', 'log_path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'working_dir', ',', 'self', '.', 'log_name', ')', 'with', 'open', '(', 'log_path', ')', 'as', 'fp', ':', 'd', '=', 'collections', '.', 'deque', '(', 'maxlen', '=', 'nlines', ')', 'd', '.', 'extend', '(', 'fp', ')', 'return', "''", '.', 'join', '(', 'd', ')'] | Return the last ``nlines`` lines of the log file | ['Return', 'the', 'last', 'nlines', 'lines', 'of', 'the', 'log', 'file'] | train | https://github.com/fhcrc/nestly/blob/4d7818b5950f405d2067a6b8577d5afb7527c9ff/nestly/scripts/nestrun.py#L201-L209 |
4,446 | arubertoson/maya-launcher | mayalauncher.py | Config._create_default_config_file | def _create_default_config_file(self):
"""
If config file does not exists create and set default values.
"""
logger.info('Initialize Maya launcher, creating config file...\n')
self.add_section(self.DEFAULTS)
self.add_section(self.PATTERNS)
self.add_section(self.ENVIRONMENTS)
self.add_section(self.EXECUTABLES)
self.set(self.DEFAULTS, 'executable', None)
self.set(self.DEFAULTS, 'environment', None)
self.set(self.PATTERNS, 'exclude', ', '.join(self.EXLUDE_PATTERNS))
self.set(self.PATTERNS, 'icon_ext', ', '.join(self.ICON_EXTENSIONS))
self.config_file.parent.mkdir(exist_ok=True)
self.config_file.touch()
with self.config_file.open('wb') as f:
self.write(f)
# If this function is run inform the user that a new file has been
# created.
sys.exit('Maya launcher has successfully created config file at:\n'
' "{}"'.format(str(self.config_file))) | python | def _create_default_config_file(self):
"""
If config file does not exists create and set default values.
"""
logger.info('Initialize Maya launcher, creating config file...\n')
self.add_section(self.DEFAULTS)
self.add_section(self.PATTERNS)
self.add_section(self.ENVIRONMENTS)
self.add_section(self.EXECUTABLES)
self.set(self.DEFAULTS, 'executable', None)
self.set(self.DEFAULTS, 'environment', None)
self.set(self.PATTERNS, 'exclude', ', '.join(self.EXLUDE_PATTERNS))
self.set(self.PATTERNS, 'icon_ext', ', '.join(self.ICON_EXTENSIONS))
self.config_file.parent.mkdir(exist_ok=True)
self.config_file.touch()
with self.config_file.open('wb') as f:
self.write(f)
# If this function is run inform the user that a new file has been
# created.
sys.exit('Maya launcher has successfully created config file at:\n'
' "{}"'.format(str(self.config_file))) | ['def', '_create_default_config_file', '(', 'self', ')', ':', 'logger', '.', 'info', '(', "'Initialize Maya launcher, creating config file...\\n'", ')', 'self', '.', 'add_section', '(', 'self', '.', 'DEFAULTS', ')', 'self', '.', 'add_section', '(', 'self', '.', 'PATTERNS', ')', 'self', '.', 'add_section', '(', 'self', '.', 'ENVIRONMENTS', ')', 'self', '.', 'add_section', '(', 'self', '.', 'EXECUTABLES', ')', 'self', '.', 'set', '(', 'self', '.', 'DEFAULTS', ',', "'executable'", ',', 'None', ')', 'self', '.', 'set', '(', 'self', '.', 'DEFAULTS', ',', "'environment'", ',', 'None', ')', 'self', '.', 'set', '(', 'self', '.', 'PATTERNS', ',', "'exclude'", ',', "', '", '.', 'join', '(', 'self', '.', 'EXLUDE_PATTERNS', ')', ')', 'self', '.', 'set', '(', 'self', '.', 'PATTERNS', ',', "'icon_ext'", ',', "', '", '.', 'join', '(', 'self', '.', 'ICON_EXTENSIONS', ')', ')', 'self', '.', 'config_file', '.', 'parent', '.', 'mkdir', '(', 'exist_ok', '=', 'True', ')', 'self', '.', 'config_file', '.', 'touch', '(', ')', 'with', 'self', '.', 'config_file', '.', 'open', '(', "'wb'", ')', 'as', 'f', ':', 'self', '.', 'write', '(', 'f', ')', '# If this function is run inform the user that a new file has been\r', '# created.\r', 'sys', '.', 'exit', '(', "'Maya launcher has successfully created config file at:\\n'", '\' "{}"\'', '.', 'format', '(', 'str', '(', 'self', '.', 'config_file', ')', ')', ')'] | If config file does not exists create and set default values. | ['If', 'config', 'file', 'does', 'not', 'exists', 'create', 'and', 'set', 'default', 'values', '.'] | train | https://github.com/arubertoson/maya-launcher/blob/9bd82cce7edf4afb803dd8044107a324e93f197f/mayalauncher.py#L113-L135 |
4,447 | Yubico/python-pyhsm | pyhsm/tools/decrypt_aead.py | parse_args | def parse_args():
"""
Parse the command line arguments
"""
parser = argparse.ArgumentParser(description = 'Decrypt AEADs',
add_help = True,
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable verbose operation',
)
parser.add_argument('--debug',
dest='debug',
action='store_true', default=False,
help='Enable debug operation',
)
parser.add_argument('--format',
dest='format',
default='raw',
help='Select output format (aead, raw or yubikey-csv)',
)
parser.add_argument('--output-dir',
dest='output_dir',
help='Output dir basename (for --format aead)',
metavar='DIR',
)
parser.add_argument('--print-filename',
dest='print_filename',
action='store_true', default=False,
help='Prefix each row with the AEAD filename',
)
parser.add_argument('--key-handle',
dest='key_handle',
help='Key handle used when generating the AEADs.',
metavar='HANDLE',
)
parser.add_argument('--key-handle-out',
dest='key_handle_out',
help='Key handle used when generating *new* AEADs (with --format aead).',
metavar='HANDLE',
)
parser.add_argument('--aes-key',
dest='aes_key',
required=True,
help='AES key used when generating the AEADs.',
metavar='HEXSTR',
)
parser.add_argument('--aes-key-out',
dest='aes_key_out',
required=False,
help='AES key used when generating *new* AEADs (with --format aead).',
metavar='HEXSTR',
)
parser.add_argument('--start-public-id',
dest='start_id',
required=False, default=None,
help='The first public id to decrypt',
metavar='INT-OR-MODHEX',
)
parser.add_argument('--stop-public-id',
dest='stop_id',
required=False, default=None,
help='The last public id to decrypt',
metavar='INT-OR-MODHEX',
)
parser.add_argument('--fail-fast',
dest='fail_fast',
action='store_true', default=False,
help='Terminate on the first AEAD failure, rather than keep going.',
)
parser.add_argument('paths',
nargs='+',
help='Files and/or directories to process.',
metavar='FILE-OR-DIR'
)
args = parser.parse_args()
# argument fixups
args.format = args.format.lower()
args.aes_key = args.aes_key.decode('hex')
if args.key_handle:
args.key_handle = pyhsm.util.key_handle_to_int(args.key_handle)
if args.start_id is not None:
try:
n = int(args.start_id)
except ValueError:
hexstr = pyhsm.yubikey.modhex_decode(args.start_id)
n = int(hexstr, 16)
args.start_id = n
if args.stop_id is not None:
try:
n = int(args.stop_id)
except ValueError:
hexstr = pyhsm.yubikey.modhex_decode(args.stop_id)
n = int(hexstr, 16)
args.stop_id = n
# some checks
if args.format == 'aead':
if not args.output_dir:
sys.stderr.write("error: --output-dir is required when using --format aead.\n")
return False
if not os.path.isdir(args.output_dir):
sys.stderr.write("error: Output directory '%s' not found\n" % (args.output_dir))
return False
if not args.aes_key_out:
sys.stderr.write("error: --aes-key-out is required when using --format aead.\n")
return False
if not args.key_handle_out:
sys.stderr.write("error: --key-handle-out is required when using --format aead.\n")
return False
# argument fixups
args.aes_key_out = args.aes_key_out.decode('hex')
args.key_handle_out_orig = args.key_handle_out # save to use in AEAD output paths
args.key_handle_out = pyhsm.util.key_handle_to_int(args.key_handle_out)
return args | python | def parse_args():
"""
Parse the command line arguments
"""
parser = argparse.ArgumentParser(description = 'Decrypt AEADs',
add_help = True,
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable verbose operation',
)
parser.add_argument('--debug',
dest='debug',
action='store_true', default=False,
help='Enable debug operation',
)
parser.add_argument('--format',
dest='format',
default='raw',
help='Select output format (aead, raw or yubikey-csv)',
)
parser.add_argument('--output-dir',
dest='output_dir',
help='Output dir basename (for --format aead)',
metavar='DIR',
)
parser.add_argument('--print-filename',
dest='print_filename',
action='store_true', default=False,
help='Prefix each row with the AEAD filename',
)
parser.add_argument('--key-handle',
dest='key_handle',
help='Key handle used when generating the AEADs.',
metavar='HANDLE',
)
parser.add_argument('--key-handle-out',
dest='key_handle_out',
help='Key handle used when generating *new* AEADs (with --format aead).',
metavar='HANDLE',
)
parser.add_argument('--aes-key',
dest='aes_key',
required=True,
help='AES key used when generating the AEADs.',
metavar='HEXSTR',
)
parser.add_argument('--aes-key-out',
dest='aes_key_out',
required=False,
help='AES key used when generating *new* AEADs (with --format aead).',
metavar='HEXSTR',
)
parser.add_argument('--start-public-id',
dest='start_id',
required=False, default=None,
help='The first public id to decrypt',
metavar='INT-OR-MODHEX',
)
parser.add_argument('--stop-public-id',
dest='stop_id',
required=False, default=None,
help='The last public id to decrypt',
metavar='INT-OR-MODHEX',
)
parser.add_argument('--fail-fast',
dest='fail_fast',
action='store_true', default=False,
help='Terminate on the first AEAD failure, rather than keep going.',
)
parser.add_argument('paths',
nargs='+',
help='Files and/or directories to process.',
metavar='FILE-OR-DIR'
)
args = parser.parse_args()
# argument fixups
args.format = args.format.lower()
args.aes_key = args.aes_key.decode('hex')
if args.key_handle:
args.key_handle = pyhsm.util.key_handle_to_int(args.key_handle)
if args.start_id is not None:
try:
n = int(args.start_id)
except ValueError:
hexstr = pyhsm.yubikey.modhex_decode(args.start_id)
n = int(hexstr, 16)
args.start_id = n
if args.stop_id is not None:
try:
n = int(args.stop_id)
except ValueError:
hexstr = pyhsm.yubikey.modhex_decode(args.stop_id)
n = int(hexstr, 16)
args.stop_id = n
# some checks
if args.format == 'aead':
if not args.output_dir:
sys.stderr.write("error: --output-dir is required when using --format aead.\n")
return False
if not os.path.isdir(args.output_dir):
sys.stderr.write("error: Output directory '%s' not found\n" % (args.output_dir))
return False
if not args.aes_key_out:
sys.stderr.write("error: --aes-key-out is required when using --format aead.\n")
return False
if not args.key_handle_out:
sys.stderr.write("error: --key-handle-out is required when using --format aead.\n")
return False
# argument fixups
args.aes_key_out = args.aes_key_out.decode('hex')
args.key_handle_out_orig = args.key_handle_out # save to use in AEAD output paths
args.key_handle_out = pyhsm.util.key_handle_to_int(args.key_handle_out)
return args | ['def', 'parse_args', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Decrypt AEADs'", ',', 'add_help', '=', 'True', ',', 'formatter_class', '=', 'argparse', '.', 'ArgumentDefaultsHelpFormatter', ',', ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', "'--verbose'", ',', 'dest', '=', "'verbose'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Enable verbose operation'", ',', ')', 'parser', '.', 'add_argument', '(', "'--debug'", ',', 'dest', '=', "'debug'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Enable debug operation'", ',', ')', 'parser', '.', 'add_argument', '(', "'--format'", ',', 'dest', '=', "'format'", ',', 'default', '=', "'raw'", ',', 'help', '=', "'Select output format (aead, raw or yubikey-csv)'", ',', ')', 'parser', '.', 'add_argument', '(', "'--output-dir'", ',', 'dest', '=', "'output_dir'", ',', 'help', '=', "'Output dir basename (for --format aead)'", ',', 'metavar', '=', "'DIR'", ',', ')', 'parser', '.', 'add_argument', '(', "'--print-filename'", ',', 'dest', '=', "'print_filename'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Prefix each row with the AEAD filename'", ',', ')', 'parser', '.', 'add_argument', '(', "'--key-handle'", ',', 'dest', '=', "'key_handle'", ',', 'help', '=', "'Key handle used when generating the AEADs.'", ',', 'metavar', '=', "'HANDLE'", ',', ')', 'parser', '.', 'add_argument', '(', "'--key-handle-out'", ',', 'dest', '=', "'key_handle_out'", ',', 'help', '=', "'Key handle used when generating *new* AEADs (with --format aead).'", ',', 'metavar', '=', "'HANDLE'", ',', ')', 'parser', '.', 'add_argument', '(', "'--aes-key'", ',', 'dest', '=', "'aes_key'", ',', 'required', '=', 'True', ',', 'help', '=', "'AES key used when generating the AEADs.'", ',', 'metavar', '=', "'HEXSTR'", ',', ')', 'parser', '.', 'add_argument', '(', "'--aes-key-out'", ',', 'dest', '=', "'aes_key_out'", ',', 'required', '=', 'False', ',', 'help', '=', "'AES key used when generating *new* AEADs (with --format aead).'", ',', 'metavar', '=', "'HEXSTR'", ',', ')', 'parser', '.', 'add_argument', '(', "'--start-public-id'", ',', 'dest', '=', "'start_id'", ',', 'required', '=', 'False', ',', 'default', '=', 'None', ',', 'help', '=', "'The first public id to decrypt'", ',', 'metavar', '=', "'INT-OR-MODHEX'", ',', ')', 'parser', '.', 'add_argument', '(', "'--stop-public-id'", ',', 'dest', '=', "'stop_id'", ',', 'required', '=', 'False', ',', 'default', '=', 'None', ',', 'help', '=', "'The last public id to decrypt'", ',', 'metavar', '=', "'INT-OR-MODHEX'", ',', ')', 'parser', '.', 'add_argument', '(', "'--fail-fast'", ',', 'dest', '=', "'fail_fast'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Terminate on the first AEAD failure, rather than keep going.'", ',', ')', 'parser', '.', 'add_argument', '(', "'paths'", ',', 'nargs', '=', "'+'", ',', 'help', '=', "'Files and/or directories to process.'", ',', 'metavar', '=', "'FILE-OR-DIR'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', '# argument fixups', 'args', '.', 'format', '=', 'args', '.', 'format', '.', 'lower', '(', ')', 'args', '.', 'aes_key', '=', 'args', '.', 'aes_key', '.', 'decode', '(', "'hex'", ')', 'if', 'args', '.', 'key_handle', ':', 'args', '.', 'key_handle', '=', 'pyhsm', '.', 'util', '.', 'key_handle_to_int', '(', 'args', '.', 'key_handle', ')', 'if', 'args', '.', 'start_id', 'is', 'not', 'None', ':', 'try', ':', 'n', '=', 'int', '(', 'args', '.', 'start_id', ')', 'except', 'ValueError', ':', 'hexstr', '=', 'pyhsm', '.', 'yubikey', '.', 'modhex_decode', '(', 'args', '.', 'start_id', ')', 'n', '=', 'int', '(', 'hexstr', ',', '16', ')', 'args', '.', 'start_id', '=', 'n', 'if', 'args', '.', 'stop_id', 'is', 'not', 'None', ':', 'try', ':', 'n', '=', 'int', '(', 'args', '.', 'stop_id', ')', 'except', 'ValueError', ':', 'hexstr', '=', 'pyhsm', '.', 'yubikey', '.', 'modhex_decode', '(', 'args', '.', 'stop_id', ')', 'n', '=', 'int', '(', 'hexstr', ',', '16', ')', 'args', '.', 'stop_id', '=', 'n', '# some checks', 'if', 'args', '.', 'format', '==', "'aead'", ':', 'if', 'not', 'args', '.', 'output_dir', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"error: --output-dir is required when using --format aead.\\n"', ')', 'return', 'False', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'args', '.', 'output_dir', ')', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"error: Output directory \'%s\' not found\\n"', '%', '(', 'args', '.', 'output_dir', ')', ')', 'return', 'False', 'if', 'not', 'args', '.', 'aes_key_out', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"error: --aes-key-out is required when using --format aead.\\n"', ')', 'return', 'False', 'if', 'not', 'args', '.', 'key_handle_out', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"error: --key-handle-out is required when using --format aead.\\n"', ')', 'return', 'False', '# argument fixups', 'args', '.', 'aes_key_out', '=', 'args', '.', 'aes_key_out', '.', 'decode', '(', "'hex'", ')', 'args', '.', 'key_handle_out_orig', '=', 'args', '.', 'key_handle_out', '# save to use in AEAD output paths', 'args', '.', 'key_handle_out', '=', 'pyhsm', '.', 'util', '.', 'key_handle_to_int', '(', 'args', '.', 'key_handle_out', ')', 'return', 'args'] | Parse the command line arguments | ['Parse', 'the', 'command', 'line', 'arguments'] | train | https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/tools/decrypt_aead.py#L24-L139 |
4,448 | onelogin/python-saml | src/onelogin/saml2/settings.py | OneLogin_Saml2_Settings.__load_settings_from_file | def __load_settings_from_file(self):
"""
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
"""
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
# In the php toolkit instead of being a json file it is a php file and
# it is directly included
json_data = open(filename, 'r')
settings = json.load(json_data)
json_data.close()
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
json_data = open(advanced_filename, 'r')
settings.update(json.load(json_data)) # Merge settings
json_data.close()
return self.__load_settings_from_dict(settings) | python | def __load_settings_from_file(self):
"""
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
"""
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
# In the php toolkit instead of being a json file it is a php file and
# it is directly included
json_data = open(filename, 'r')
settings = json.load(json_data)
json_data.close()
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
json_data = open(advanced_filename, 'r')
settings.update(json.load(json_data)) # Merge settings
json_data.close()
return self.__load_settings_from_dict(settings) | ['def', '__load_settings_from_file', '(', 'self', ')', ':', 'filename', '=', 'self', '.', 'get_base_path', '(', ')', '+', "'settings.json'", 'if', 'not', 'exists', '(', 'filename', ')', ':', 'raise', 'OneLogin_Saml2_Error', '(', "'Settings file not found: %s'", ',', 'OneLogin_Saml2_Error', '.', 'SETTINGS_FILE_NOT_FOUND', ',', 'filename', ')', '# In the php toolkit instead of being a json file it is a php file and', '# it is directly included', 'json_data', '=', 'open', '(', 'filename', ',', "'r'", ')', 'settings', '=', 'json', '.', 'load', '(', 'json_data', ')', 'json_data', '.', 'close', '(', ')', 'advanced_filename', '=', 'self', '.', 'get_base_path', '(', ')', '+', "'advanced_settings.json'", 'if', 'exists', '(', 'advanced_filename', ')', ':', 'json_data', '=', 'open', '(', 'advanced_filename', ',', "'r'", ')', 'settings', '.', 'update', '(', 'json', '.', 'load', '(', 'json_data', ')', ')', '# Merge settings', 'json_data', '.', 'close', '(', ')', 'return', 'self', '.', '__load_settings_from_dict', '(', 'settings', ')'] | Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean | ['Loads', 'settings', 'info', 'from', 'the', 'settings', 'json', 'file'] | train | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/settings.py#L220-L248 |
4,449 | pypa/pipenv | pipenv/vendor/distlib/_backport/tarfile.py | TarInfo._proc_gnusparse_00 | def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes)) | python | def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes)) | ['def', '_proc_gnusparse_00', '(', 'self', ',', 'next', ',', 'pax_headers', ',', 'buf', ')', ':', 'offsets', '=', '[', ']', 'for', 'match', 'in', 're', '.', 'finditer', '(', 'br"\\d+ GNU.sparse.offset=(\\d+)\\n"', ',', 'buf', ')', ':', 'offsets', '.', 'append', '(', 'int', '(', 'match', '.', 'group', '(', '1', ')', ')', ')', 'numbytes', '=', '[', ']', 'for', 'match', 'in', 're', '.', 'finditer', '(', 'br"\\d+ GNU.sparse.numbytes=(\\d+)\\n"', ',', 'buf', ')', ':', 'numbytes', '.', 'append', '(', 'int', '(', 'match', '.', 'group', '(', '1', ')', ')', ')', 'next', '.', 'sparse', '=', 'list', '(', 'zip', '(', 'offsets', ',', 'numbytes', ')', ')'] | Process a GNU tar extended sparse header, version 0.0. | ['Process', 'a', 'GNU', 'tar', 'extended', 'sparse', 'header', 'version', '0', '.', '0', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1485-L1494 |
4,450 | bitesofcode/projexui | projexui/widgets/xviewwidget/xviewprofiletoolbar.py | XViewProfileToolBar.createProfile | def createProfile(self, profile=None, clearLayout=True):
"""
Prompts the user to create a new profile.
"""
if profile:
prof = profile
elif not self.viewWidget() or clearLayout:
prof = XViewProfile()
else:
prof = self.viewWidget().saveProfile()
blocked = self.signalsBlocked()
self.blockSignals(False)
changed = self.editProfile(prof)
self.blockSignals(blocked)
if not changed:
return
act = self.addProfile(prof)
act.setChecked(True)
# update the interface
if self.viewWidget() and (profile or clearLayout):
self.viewWidget().restoreProfile(prof)
if not self.signalsBlocked():
self.profileCreated.emit(prof)
self.profilesChanged.emit() | python | def createProfile(self, profile=None, clearLayout=True):
"""
Prompts the user to create a new profile.
"""
if profile:
prof = profile
elif not self.viewWidget() or clearLayout:
prof = XViewProfile()
else:
prof = self.viewWidget().saveProfile()
blocked = self.signalsBlocked()
self.blockSignals(False)
changed = self.editProfile(prof)
self.blockSignals(blocked)
if not changed:
return
act = self.addProfile(prof)
act.setChecked(True)
# update the interface
if self.viewWidget() and (profile or clearLayout):
self.viewWidget().restoreProfile(prof)
if not self.signalsBlocked():
self.profileCreated.emit(prof)
self.profilesChanged.emit() | ['def', 'createProfile', '(', 'self', ',', 'profile', '=', 'None', ',', 'clearLayout', '=', 'True', ')', ':', 'if', 'profile', ':', 'prof', '=', 'profile', 'elif', 'not', 'self', '.', 'viewWidget', '(', ')', 'or', 'clearLayout', ':', 'prof', '=', 'XViewProfile', '(', ')', 'else', ':', 'prof', '=', 'self', '.', 'viewWidget', '(', ')', '.', 'saveProfile', '(', ')', 'blocked', '=', 'self', '.', 'signalsBlocked', '(', ')', 'self', '.', 'blockSignals', '(', 'False', ')', 'changed', '=', 'self', '.', 'editProfile', '(', 'prof', ')', 'self', '.', 'blockSignals', '(', 'blocked', ')', 'if', 'not', 'changed', ':', 'return', 'act', '=', 'self', '.', 'addProfile', '(', 'prof', ')', 'act', '.', 'setChecked', '(', 'True', ')', '# update the interface\r', 'if', 'self', '.', 'viewWidget', '(', ')', 'and', '(', 'profile', 'or', 'clearLayout', ')', ':', 'self', '.', 'viewWidget', '(', ')', '.', 'restoreProfile', '(', 'prof', ')', 'if', 'not', 'self', '.', 'signalsBlocked', '(', ')', ':', 'self', '.', 'profileCreated', '.', 'emit', '(', 'prof', ')', 'self', '.', 'profilesChanged', '.', 'emit', '(', ')'] | Prompts the user to create a new profile. | ['Prompts', 'the', 'user', 'to', 'create', 'a', 'new', 'profile', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofiletoolbar.py#L164-L192 |
4,451 | tanghaibao/jcvi | jcvi/apps/vecscreen.py | mask | def mask(args):
"""
%prog mask fastafile
Mask the contaminants. By default, this will compare against UniVec_Core and
Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can
perform FASTA tidy if requested.
"""
p = OptionParser(mask.__doc__)
p.add_option("--db",
help="Contaminant db other than Ecoli K12 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
assert op.exists(fastafile)
outfastafile = fastafile.rsplit(".", 1)[0] + ".masked.fasta"
vecbedfile = blast([fastafile])
ecoliurl = \
"ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna"
ecolifile = opts.db or download(ecoliurl, filename="Ecoli.fasta")
assert op.exists(ecolifile)
ecolibedfile = blast([fastafile, "--db={0}".format(ecolifile)])
cmd = "cat {0} {1}".format(vecbedfile, ecolibedfile)
cmd += " | mergeBed -nms -d 100 -i stdin"
cmd += " | maskFastaFromBed -fi {0} -bed stdin -fo {1}".\
format(fastafile, outfastafile)
sh(cmd)
return tidy([outfastafile]) | python | def mask(args):
"""
%prog mask fastafile
Mask the contaminants. By default, this will compare against UniVec_Core and
Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can
perform FASTA tidy if requested.
"""
p = OptionParser(mask.__doc__)
p.add_option("--db",
help="Contaminant db other than Ecoli K12 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
assert op.exists(fastafile)
outfastafile = fastafile.rsplit(".", 1)[0] + ".masked.fasta"
vecbedfile = blast([fastafile])
ecoliurl = \
"ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna"
ecolifile = opts.db or download(ecoliurl, filename="Ecoli.fasta")
assert op.exists(ecolifile)
ecolibedfile = blast([fastafile, "--db={0}".format(ecolifile)])
cmd = "cat {0} {1}".format(vecbedfile, ecolibedfile)
cmd += " | mergeBed -nms -d 100 -i stdin"
cmd += " | maskFastaFromBed -fi {0} -bed stdin -fo {1}".\
format(fastafile, outfastafile)
sh(cmd)
return tidy([outfastafile]) | ['def', 'mask', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'mask', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--db"', ',', 'help', '=', '"Contaminant db other than Ecoli K12 [default: %default]"', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'fastafile', ',', '=', 'args', 'assert', 'op', '.', 'exists', '(', 'fastafile', ')', 'outfastafile', '=', 'fastafile', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', '+', '".masked.fasta"', 'vecbedfile', '=', 'blast', '(', '[', 'fastafile', ']', ')', 'ecoliurl', '=', '"ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna"', 'ecolifile', '=', 'opts', '.', 'db', 'or', 'download', '(', 'ecoliurl', ',', 'filename', '=', '"Ecoli.fasta"', ')', 'assert', 'op', '.', 'exists', '(', 'ecolifile', ')', 'ecolibedfile', '=', 'blast', '(', '[', 'fastafile', ',', '"--db={0}"', '.', 'format', '(', 'ecolifile', ')', ']', ')', 'cmd', '=', '"cat {0} {1}"', '.', 'format', '(', 'vecbedfile', ',', 'ecolibedfile', ')', 'cmd', '+=', '" | mergeBed -nms -d 100 -i stdin"', 'cmd', '+=', '" | maskFastaFromBed -fi {0} -bed stdin -fo {1}"', '.', 'format', '(', 'fastafile', ',', 'outfastafile', ')', 'sh', '(', 'cmd', ')', 'return', 'tidy', '(', '[', 'outfastafile', ']', ')'] | %prog mask fastafile
Mask the contaminants. By default, this will compare against UniVec_Core and
Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can
perform FASTA tidy if requested. | ['%prog', 'mask', 'fastafile'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/vecscreen.py#L29-L62 |
4,452 | CZ-NIC/yangson | yangson/schemanode.py | ListNode.orphan_entry | def orphan_entry(self, rval: RawObject) -> "ArrayEntry":
"""Return an isolated entry of the receiver.
Args:
rval: Raw object to be used for the returned entry.
"""
val = self.entry_from_raw(rval)
return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self,
val.timestamp) | python | def orphan_entry(self, rval: RawObject) -> "ArrayEntry":
"""Return an isolated entry of the receiver.
Args:
rval: Raw object to be used for the returned entry.
"""
val = self.entry_from_raw(rval)
return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self,
val.timestamp) | ['def', 'orphan_entry', '(', 'self', ',', 'rval', ':', 'RawObject', ')', '->', '"ArrayEntry"', ':', 'val', '=', 'self', '.', 'entry_from_raw', '(', 'rval', ')', 'return', 'ArrayEntry', '(', '0', ',', 'EmptyList', '(', ')', ',', 'EmptyList', '(', ')', ',', 'val', ',', 'None', ',', 'self', ',', 'val', '.', 'timestamp', ')'] | Return an isolated entry of the receiver.
Args:
rval: Raw object to be used for the returned entry. | ['Return', 'an', 'isolated', 'entry', 'of', 'the', 'receiver', '.'] | train | https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L1096-L1104 |
4,453 | mapillary/mapillary_tools | mapillary_tools/uploader.py | progress | def progress(count, total, suffix=''):
'''
Display progress bar
sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
'''
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s\r' % (bar, percents, '%', suffix))
sys.stdout.flush() | python | def progress(count, total, suffix=''):
'''
Display progress bar
sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
'''
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s\r' % (bar, percents, '%', suffix))
sys.stdout.flush() | ['def', 'progress', '(', 'count', ',', 'total', ',', 'suffix', '=', "''", ')', ':', 'bar_len', '=', '60', 'filled_len', '=', 'int', '(', 'round', '(', 'bar_len', '*', 'count', '/', 'float', '(', 'total', ')', ')', ')', 'percents', '=', 'round', '(', '100.0', '*', 'count', '/', 'float', '(', 'total', ')', ',', '1', ')', 'bar', '=', "'='", '*', 'filled_len', '+', "'-'", '*', '(', 'bar_len', '-', 'filled_len', ')', 'sys', '.', 'stdout', '.', 'write', '(', "'[%s] %s%s %s\\r'", '%', '(', 'bar', ',', 'percents', ',', "'%'", ',', 'suffix', ')', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')'] | Display progress bar
sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 | ['Display', 'progress', 'bar', 'sources', ':', 'https', ':', '//', 'gist', '.', 'github', '.', 'com', '/', 'vladignatyev', '/', '06860ec2040cb497f0f3'] | train | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/uploader.py#L409-L419 |
4,454 | mitsei/dlkit | dlkit/services/repository.py | Repository.use_plenary_composition_view | def use_plenary_composition_view(self):
"""Pass through to provider CompositionLookupSession.use_plenary_composition_view"""
self._object_views['composition'] = PLENARY
# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_composition_view()
except AttributeError:
pass | python | def use_plenary_composition_view(self):
"""Pass through to provider CompositionLookupSession.use_plenary_composition_view"""
self._object_views['composition'] = PLENARY
# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_composition_view()
except AttributeError:
pass | ['def', 'use_plenary_composition_view', '(', 'self', ')', ':', 'self', '.', '_object_views', '[', "'composition'", ']', '=', 'PLENARY', "# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked", 'for', 'session', 'in', 'self', '.', '_get_provider_sessions', '(', ')', ':', 'try', ':', 'session', '.', 'use_plenary_composition_view', '(', ')', 'except', 'AttributeError', ':', 'pass'] | Pass through to provider CompositionLookupSession.use_plenary_composition_view | ['Pass', 'through', 'to', 'provider', 'CompositionLookupSession', '.', 'use_plenary_composition_view'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1872-L1880 |
4,455 | openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_cdd_only_candidate_models | def get_cdd_only_candidate_models(
data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col
):
""" Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings.
"""
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")]
candidate_models = [
get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models | python | def get_cdd_only_candidate_models(
data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col
):
""" Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings.
"""
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")]
candidate_models = [
get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models | ['def', 'get_cdd_only_candidate_models', '(', 'data', ',', 'minimum_non_zero_cdd', ',', 'minimum_total_cdd', ',', 'beta_cdd_maximum_p_value', ',', 'weights_col', ')', ':', 'balance_points', '=', '[', 'int', '(', 'col', '[', '4', ':', ']', ')', 'for', 'col', 'in', 'data', '.', 'columns', 'if', 'col', '.', 'startswith', '(', '"cdd"', ')', ']', 'candidate_models', '=', '[', 'get_single_cdd_only_candidate_model', '(', 'data', ',', 'minimum_non_zero_cdd', ',', 'minimum_total_cdd', ',', 'beta_cdd_maximum_p_value', ',', 'weights_col', ',', 'balance_point', ',', ')', 'for', 'balance_point', 'in', 'balance_points', ']', 'return', 'candidate_models'] | Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings. | ['Return', 'a', 'list', 'of', 'all', 'possible', 'candidate', 'cdd', '-', 'only', 'models', '.'] | train | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1139-L1179 |
4,456 | eaton-lab/toytree | toytree/etemini.py | TreeNode.remove_child | def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child | python | def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child | ['def', 'remove_child', '(', 'self', ',', 'child', ')', ':', 'try', ':', 'self', '.', 'children', '.', 'remove', '(', 'child', ')', 'except', 'ValueError', 'as', 'e', ':', 'raise', 'TreeError', '(', '"child not found"', ')', 'else', ':', 'child', '.', 'up', '=', 'None', 'return', 'child'] | Removes a child from this node (parent and child
nodes still exit but are no longer connected). | ['Removes', 'a', 'child', 'from', 'this', 'node', '(', 'parent', 'and', 'child', 'nodes', 'still', 'exit', 'but', 'are', 'no', 'longer', 'connected', ')', '.'] | train | https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L277-L288 |
4,457 | mitsei/dlkit | dlkit/services/logging_.py | LoggingManager.use_plenary_log_view | def use_plenary_log_view(self):
"""Pass through to provider LogEntryLogSession.use_plenary_log_view"""
self._log_view = PLENARY
# self._get_provider_session('log_entry_log_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_log_view()
except AttributeError:
pass | python | def use_plenary_log_view(self):
"""Pass through to provider LogEntryLogSession.use_plenary_log_view"""
self._log_view = PLENARY
# self._get_provider_session('log_entry_log_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_log_view()
except AttributeError:
pass | ['def', 'use_plenary_log_view', '(', 'self', ')', ':', 'self', '.', '_log_view', '=', 'PLENARY', "# self._get_provider_session('log_entry_log_session') # To make sure the session is tracked", 'for', 'session', 'in', 'self', '.', '_get_provider_sessions', '(', ')', ':', 'try', ':', 'session', '.', 'use_plenary_log_view', '(', ')', 'except', 'AttributeError', ':', 'pass'] | Pass through to provider LogEntryLogSession.use_plenary_log_view | ['Pass', 'through', 'to', 'provider', 'LogEntryLogSession', '.', 'use_plenary_log_view'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/logging_.py#L424-L432 |
4,458 | ray-project/ray | python/ray/worker.py | Worker.submit_task | def submit_task(self,
function_descriptor,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
max_actor_reconstructions=0,
execution_dependencies=None,
new_actor_handles=None,
num_return_vals=None,
resources=None,
placement_resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with
function_descriptor with arguments args. Retrieve object IDs for the
outputs of the function from the scheduler and immediately return them.
Args:
function_descriptor: The function descriptor to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objects.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
placement_resources: The resources required for placing the task.
If this is not provided or if it is an empty dictionary, then
the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task.
"""
with profiling.profile("submit_task"):
if actor_id is None:
assert actor_handle_id is None
actor_id = ActorID.nil()
actor_handle_id = ActorHandleID.nil()
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ActorID.nil()
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = ObjectID.nil()
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_raylet = []
for arg in args:
if isinstance(arg, ObjectID):
args_for_raylet.append(arg)
elif ray._raylet.check_simple_value(arg):
args_for_raylet.append(arg)
else:
args_for_raylet.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
if new_actor_handles is None:
new_actor_handles = []
if driver_id is None:
driver_id = self.task_driver_id
if resources is None:
raise ValueError("The resources dictionary is required.")
for value in resources.values():
assert (isinstance(value, int) or isinstance(value, float))
if value < 0:
raise ValueError(
"Resource quantities must be nonnegative.")
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers.")
# Remove any resources with zero quantity requirements
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity > 0
}
if placement_resources is None:
placement_resources = {}
# Increment the worker's task index to track how many tasks
# have been submitted by the current task so far.
self.task_context.task_index += 1
# The parent task must be set for the submitted task.
assert not self.current_task_id.is_nil()
# Current driver id must not be nil when submitting a task.
# Because every task must belong to a driver.
assert not self.task_driver_id.is_nil()
# Submit the task to raylet.
function_descriptor_list = (
function_descriptor.get_function_descriptor_list())
assert isinstance(driver_id, DriverID)
task = ray._raylet.Task(
driver_id,
function_descriptor_list,
args_for_raylet,
num_return_vals,
self.current_task_id,
self.task_context.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
max_actor_reconstructions,
actor_id,
actor_handle_id,
actor_counter,
new_actor_handles,
execution_dependencies,
resources,
placement_resources,
)
self.raylet_client.submit_task(task)
return task.returns() | python | def submit_task(self,
function_descriptor,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
max_actor_reconstructions=0,
execution_dependencies=None,
new_actor_handles=None,
num_return_vals=None,
resources=None,
placement_resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with
function_descriptor with arguments args. Retrieve object IDs for the
outputs of the function from the scheduler and immediately return them.
Args:
function_descriptor: The function descriptor to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objects.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
placement_resources: The resources required for placing the task.
If this is not provided or if it is an empty dictionary, then
the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task.
"""
with profiling.profile("submit_task"):
if actor_id is None:
assert actor_handle_id is None
actor_id = ActorID.nil()
actor_handle_id = ActorHandleID.nil()
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ActorID.nil()
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = ObjectID.nil()
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_raylet = []
for arg in args:
if isinstance(arg, ObjectID):
args_for_raylet.append(arg)
elif ray._raylet.check_simple_value(arg):
args_for_raylet.append(arg)
else:
args_for_raylet.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
if new_actor_handles is None:
new_actor_handles = []
if driver_id is None:
driver_id = self.task_driver_id
if resources is None:
raise ValueError("The resources dictionary is required.")
for value in resources.values():
assert (isinstance(value, int) or isinstance(value, float))
if value < 0:
raise ValueError(
"Resource quantities must be nonnegative.")
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers.")
# Remove any resources with zero quantity requirements
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity > 0
}
if placement_resources is None:
placement_resources = {}
# Increment the worker's task index to track how many tasks
# have been submitted by the current task so far.
self.task_context.task_index += 1
# The parent task must be set for the submitted task.
assert not self.current_task_id.is_nil()
# Current driver id must not be nil when submitting a task.
# Because every task must belong to a driver.
assert not self.task_driver_id.is_nil()
# Submit the task to raylet.
function_descriptor_list = (
function_descriptor.get_function_descriptor_list())
assert isinstance(driver_id, DriverID)
task = ray._raylet.Task(
driver_id,
function_descriptor_list,
args_for_raylet,
num_return_vals,
self.current_task_id,
self.task_context.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
max_actor_reconstructions,
actor_id,
actor_handle_id,
actor_counter,
new_actor_handles,
execution_dependencies,
resources,
placement_resources,
)
self.raylet_client.submit_task(task)
return task.returns() | ['def', 'submit_task', '(', 'self', ',', 'function_descriptor', ',', 'args', ',', 'actor_id', '=', 'None', ',', 'actor_handle_id', '=', 'None', ',', 'actor_counter', '=', '0', ',', 'actor_creation_id', '=', 'None', ',', 'actor_creation_dummy_object_id', '=', 'None', ',', 'max_actor_reconstructions', '=', '0', ',', 'execution_dependencies', '=', 'None', ',', 'new_actor_handles', '=', 'None', ',', 'num_return_vals', '=', 'None', ',', 'resources', '=', 'None', ',', 'placement_resources', '=', 'None', ',', 'driver_id', '=', 'None', ')', ':', 'with', 'profiling', '.', 'profile', '(', '"submit_task"', ')', ':', 'if', 'actor_id', 'is', 'None', ':', 'assert', 'actor_handle_id', 'is', 'None', 'actor_id', '=', 'ActorID', '.', 'nil', '(', ')', 'actor_handle_id', '=', 'ActorHandleID', '.', 'nil', '(', ')', 'else', ':', 'assert', 'actor_handle_id', 'is', 'not', 'None', 'if', 'actor_creation_id', 'is', 'None', ':', 'actor_creation_id', '=', 'ActorID', '.', 'nil', '(', ')', 'if', 'actor_creation_dummy_object_id', 'is', 'None', ':', 'actor_creation_dummy_object_id', '=', 'ObjectID', '.', 'nil', '(', ')', '# Put large or complex arguments that are passed by value in the', '# object store first.', 'args_for_raylet', '=', '[', ']', 'for', 'arg', 'in', 'args', ':', 'if', 'isinstance', '(', 'arg', ',', 'ObjectID', ')', ':', 'args_for_raylet', '.', 'append', '(', 'arg', ')', 'elif', 'ray', '.', '_raylet', '.', 'check_simple_value', '(', 'arg', ')', ':', 'args_for_raylet', '.', 'append', '(', 'arg', ')', 'else', ':', 'args_for_raylet', '.', 'append', '(', 'put', '(', 'arg', ')', ')', '# By default, there are no execution dependencies.', 'if', 'execution_dependencies', 'is', 'None', ':', 'execution_dependencies', '=', '[', ']', 'if', 'new_actor_handles', 'is', 'None', ':', 'new_actor_handles', '=', '[', ']', 'if', 'driver_id', 'is', 'None', ':', 'driver_id', '=', 'self', '.', 'task_driver_id', 'if', 'resources', 'is', 'None', ':', 'raise', 'ValueError', '(', '"The resources dictionary is required."', ')', 'for', 'value', 'in', 'resources', '.', 'values', '(', ')', ':', 'assert', '(', 'isinstance', '(', 'value', ',', 'int', ')', 'or', 'isinstance', '(', 'value', ',', 'float', ')', ')', 'if', 'value', '<', '0', ':', 'raise', 'ValueError', '(', '"Resource quantities must be nonnegative."', ')', 'if', '(', 'value', '>=', '1', 'and', 'isinstance', '(', 'value', ',', 'float', ')', 'and', 'not', 'value', '.', 'is_integer', '(', ')', ')', ':', 'raise', 'ValueError', '(', '"Resource quantities must all be whole numbers."', ')', '# Remove any resources with zero quantity requirements', 'resources', '=', '{', 'resource_label', ':', 'resource_quantity', 'for', 'resource_label', ',', 'resource_quantity', 'in', 'resources', '.', 'items', '(', ')', 'if', 'resource_quantity', '>', '0', '}', 'if', 'placement_resources', 'is', 'None', ':', 'placement_resources', '=', '{', '}', "# Increment the worker's task index to track how many tasks", '# have been submitted by the current task so far.', 'self', '.', 'task_context', '.', 'task_index', '+=', '1', '# The parent task must be set for the submitted task.', 'assert', 'not', 'self', '.', 'current_task_id', '.', 'is_nil', '(', ')', '# Current driver id must not be nil when submitting a task.', '# Because every task must belong to a driver.', 'assert', 'not', 'self', '.', 'task_driver_id', '.', 'is_nil', '(', ')', '# Submit the task to raylet.', 'function_descriptor_list', '=', '(', 'function_descriptor', '.', 'get_function_descriptor_list', '(', ')', ')', 'assert', 'isinstance', '(', 'driver_id', ',', 'DriverID', ')', 'task', '=', 'ray', '.', '_raylet', '.', 'Task', '(', 'driver_id', ',', 'function_descriptor_list', ',', 'args_for_raylet', ',', 'num_return_vals', ',', 'self', '.', 'current_task_id', ',', 'self', '.', 'task_context', '.', 'task_index', ',', 'actor_creation_id', ',', 'actor_creation_dummy_object_id', ',', 'max_actor_reconstructions', ',', 'actor_id', ',', 'actor_handle_id', ',', 'actor_counter', ',', 'new_actor_handles', ',', 'execution_dependencies', ',', 'resources', ',', 'placement_resources', ',', ')', 'self', '.', 'raylet_client', '.', 'submit_task', '(', 'task', ')', 'return', 'task', '.', 'returns', '(', ')'] | Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with
function_descriptor with arguments args. Retrieve object IDs for the
outputs of the function from the scheduler and immediately return them.
Args:
function_descriptor: The function descriptor to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objects.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
placement_resources: The resources required for placing the task.
If this is not provided or if it is an empty dictionary, then
the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task. | ['Submit', 'a', 'remote', 'task', 'to', 'the', 'scheduler', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L561-L699 |
4,459 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/git/git_client_base.py | GitClientBase.restore_repository_from_recycle_bin | def restore_repository_from_recycle_bin(self, repository_details, project, repository_id):
"""RestoreRepositoryFromRecycleBin.
[Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable.
:param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details:
:param str project: Project ID or project name
:param str repository_id: The ID of the repository.
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(repository_details, 'GitRecycleBinRepositoryDetails')
response = self._send(http_method='PATCH',
location_id='a663da97-81db-4eb3-8b83-287670f63073',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitRepository', response) | python | def restore_repository_from_recycle_bin(self, repository_details, project, repository_id):
"""RestoreRepositoryFromRecycleBin.
[Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable.
:param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details:
:param str project: Project ID or project name
:param str repository_id: The ID of the repository.
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(repository_details, 'GitRecycleBinRepositoryDetails')
response = self._send(http_method='PATCH',
location_id='a663da97-81db-4eb3-8b83-287670f63073',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitRepository', response) | ['def', 'restore_repository_from_recycle_bin', '(', 'self', ',', 'repository_details', ',', 'project', ',', 'repository_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'if', 'repository_id', 'is', 'not', 'None', ':', 'route_values', '[', "'repositoryId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'repository_id'", ',', 'repository_id', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'repository_details', ',', "'GitRecycleBinRepositoryDetails'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'PATCH'", ',', 'location_id', '=', "'a663da97-81db-4eb3-8b83-287670f63073'", ',', 'version', '=', "'5.1-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'GitRepository'", ',', 'response', ')'] | RestoreRepositoryFromRecycleBin.
[Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable.
:param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details:
:param str project: Project ID or project name
:param str repository_id: The ID of the repository.
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` | ['RestoreRepositoryFromRecycleBin', '.', '[', 'Preview', 'API', ']', 'Recover', 'a', 'soft', '-', 'deleted', 'Git', 'repository', '.', 'Recently', 'deleted', 'repositories', 'go', 'into', 'a', 'soft', '-', 'delete', 'state', 'for', 'a', 'period', 'of', 'time', 'before', 'they', 'are', 'hard', 'deleted', 'and', 'become', 'unrecoverable', '.', ':', 'param', ':', 'class', ':', '<GitRecycleBinRepositoryDetails', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'git', '.', 'models', '.', 'GitRecycleBinRepositoryDetails', '>', 'repository_details', ':', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'param', 'str', 'repository_id', ':', 'The', 'ID', 'of', 'the', 'repository', '.', ':', 'rtype', ':', ':', 'class', ':', '<GitRepository', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'git', '.', 'models', '.', 'GitRepository', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L2701-L2720 |
4,460 | pymc-devs/pymc | pymc/NormalApproximation.py | MAP.grad_and_hess | def grad_and_hess(self):
"""
Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin.
"""
for i in xrange(self.len):
di = self.diff(i)
self.grad[i] = di
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij | python | def grad_and_hess(self):
"""
Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin.
"""
for i in xrange(self.len):
di = self.diff(i)
self.grad[i] = di
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij | ['def', 'grad_and_hess', '(', 'self', ')', ':', 'for', 'i', 'in', 'xrange', '(', 'self', '.', 'len', ')', ':', 'di', '=', 'self', '.', 'diff', '(', 'i', ')', 'self', '.', 'grad', '[', 'i', ']', '=', 'di', 'self', '.', 'hess', '[', 'i', ',', 'i', ']', '=', 'self', '.', 'diff', '(', 'i', ',', '2', ')', 'if', 'i', '<', 'self', '.', 'len', '-', '1', ':', 'for', 'j', 'in', 'xrange', '(', 'i', '+', '1', ',', 'self', '.', 'len', ')', ':', 'dij', '=', 'self', '.', 'diff2', '(', 'i', ',', 'j', ')', 'self', '.', 'hess', '[', 'i', ',', 'j', ']', '=', 'dij', 'self', '.', 'hess', '[', 'j', ',', 'i', ']', '=', 'dij'] | Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin. | ['Computes', 'self', 's', 'gradient', 'and', 'Hessian', '.', 'Used', 'if', 'the', 'optimization', 'method', 'for', 'a', 'NormApprox', 'doesn', 't', 'use', 'gradients', 'and', 'hessians', 'for', 'instance', 'fmin', '.'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L487-L505 |
4,461 | nerdvegas/rez | src/rezplugins/package_repository/memory.py | MemoryPackageRepository.create_repository | def create_repository(cls, repository_data):
"""Create a standalone, in-memory repository.
Using this function bypasses the `package_repository_manager` singleton.
This is usually desired however, since in-memory repositories are for
temporarily storing programmatically created packages, which we do not
want to cache and that do not persist.
Args:
repository_data (dict): Repository data, see class docstring.
Returns:
`MemoryPackageRepository` object.
"""
location = "memory{%s}" % hex(id(repository_data))
resource_pool = ResourcePool(cache_size=None)
repo = MemoryPackageRepository(location, resource_pool)
repo.data = repository_data
return repo | python | def create_repository(cls, repository_data):
"""Create a standalone, in-memory repository.
Using this function bypasses the `package_repository_manager` singleton.
This is usually desired however, since in-memory repositories are for
temporarily storing programmatically created packages, which we do not
want to cache and that do not persist.
Args:
repository_data (dict): Repository data, see class docstring.
Returns:
`MemoryPackageRepository` object.
"""
location = "memory{%s}" % hex(id(repository_data))
resource_pool = ResourcePool(cache_size=None)
repo = MemoryPackageRepository(location, resource_pool)
repo.data = repository_data
return repo | ['def', 'create_repository', '(', 'cls', ',', 'repository_data', ')', ':', 'location', '=', '"memory{%s}"', '%', 'hex', '(', 'id', '(', 'repository_data', ')', ')', 'resource_pool', '=', 'ResourcePool', '(', 'cache_size', '=', 'None', ')', 'repo', '=', 'MemoryPackageRepository', '(', 'location', ',', 'resource_pool', ')', 'repo', '.', 'data', '=', 'repository_data', 'return', 'repo'] | Create a standalone, in-memory repository.
Using this function bypasses the `package_repository_manager` singleton.
This is usually desired however, since in-memory repositories are for
temporarily storing programmatically created packages, which we do not
want to cache and that do not persist.
Args:
repository_data (dict): Repository data, see class docstring.
Returns:
`MemoryPackageRepository` object. | ['Create', 'a', 'standalone', 'in', '-', 'memory', 'repository', '.'] | train | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezplugins/package_repository/memory.py#L134-L152 |
4,462 | a1ezzz/wasp-general | wasp_general/uri.py | WURIComponentVerifier.validate | def validate(self, uri):
""" Check an URI for compatibility with this specification. Return True if the URI is compatible.
:param uri: an URI to check
:return: bool
"""
requirement = self.requirement()
uri_component = uri.component(self.component())
if uri_component is None:
return requirement != WURIComponentVerifier.Requirement.required
if requirement == WURIComponentVerifier.Requirement.unsupported:
return False
re_obj = self.re_obj()
if re_obj is not None:
return re_obj.match(uri_component) is not None
return True | python | def validate(self, uri):
""" Check an URI for compatibility with this specification. Return True if the URI is compatible.
:param uri: an URI to check
:return: bool
"""
requirement = self.requirement()
uri_component = uri.component(self.component())
if uri_component is None:
return requirement != WURIComponentVerifier.Requirement.required
if requirement == WURIComponentVerifier.Requirement.unsupported:
return False
re_obj = self.re_obj()
if re_obj is not None:
return re_obj.match(uri_component) is not None
return True | ['def', 'validate', '(', 'self', ',', 'uri', ')', ':', 'requirement', '=', 'self', '.', 'requirement', '(', ')', 'uri_component', '=', 'uri', '.', 'component', '(', 'self', '.', 'component', '(', ')', ')', 'if', 'uri_component', 'is', 'None', ':', 'return', 'requirement', '!=', 'WURIComponentVerifier', '.', 'Requirement', '.', 'required', 'if', 'requirement', '==', 'WURIComponentVerifier', '.', 'Requirement', '.', 'unsupported', ':', 'return', 'False', 're_obj', '=', 'self', '.', 're_obj', '(', ')', 'if', 're_obj', 'is', 'not', 'None', ':', 'return', 're_obj', '.', 'match', '(', 'uri_component', ')', 'is', 'not', 'None', 'return', 'True'] | Check an URI for compatibility with this specification. Return True if the URI is compatible.
:param uri: an URI to check
:return: bool | ['Check', 'an', 'URI', 'for', 'compatibility', 'with', 'this', 'specification', '.', 'Return', 'True', 'if', 'the', 'URI', 'is', 'compatible', '.'] | train | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/uri.py#L537-L555 |
4,463 | pypa/pipenv | pipenv/vendor/urllib3/connection.py | HTTPConnection.request_chunked | def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n') | python | def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n') | ['def', 'request_chunked', '(', 'self', ',', 'method', ',', 'url', ',', 'body', '=', 'None', ',', 'headers', '=', 'None', ')', ':', 'headers', '=', 'HTTPHeaderDict', '(', 'headers', 'if', 'headers', 'is', 'not', 'None', 'else', '{', '}', ')', 'skip_accept_encoding', '=', "'accept-encoding'", 'in', 'headers', 'skip_host', '=', "'host'", 'in', 'headers', 'self', '.', 'putrequest', '(', 'method', ',', 'url', ',', 'skip_accept_encoding', '=', 'skip_accept_encoding', ',', 'skip_host', '=', 'skip_host', ')', 'for', 'header', ',', 'value', 'in', 'headers', '.', 'items', '(', ')', ':', 'self', '.', 'putheader', '(', 'header', ',', 'value', ')', 'if', "'transfer-encoding'", 'not', 'in', 'headers', ':', 'self', '.', 'putheader', '(', "'Transfer-Encoding'", ',', "'chunked'", ')', 'self', '.', 'endheaders', '(', ')', 'if', 'body', 'is', 'not', 'None', ':', 'stringish_types', '=', 'six', '.', 'string_types', '+', '(', 'bytes', ',', ')', 'if', 'isinstance', '(', 'body', ',', 'stringish_types', ')', ':', 'body', '=', '(', 'body', ',', ')', 'for', 'chunk', 'in', 'body', ':', 'if', 'not', 'chunk', ':', 'continue', 'if', 'not', 'isinstance', '(', 'chunk', ',', 'bytes', ')', ':', 'chunk', '=', 'chunk', '.', 'encode', '(', "'utf8'", ')', 'len_str', '=', 'hex', '(', 'len', '(', 'chunk', ')', ')', '[', '2', ':', ']', 'self', '.', 'send', '(', 'len_str', '.', 'encode', '(', "'utf-8'", ')', ')', 'self', '.', 'send', '(', "b'\\r\\n'", ')', 'self', '.', 'send', '(', 'chunk', ')', 'self', '.', 'send', '(', "b'\\r\\n'", ')', '# After the if clause, to always have a closed body', 'self', '.', 'send', '(', "b'0\\r\\n\\r\\n'", ')'] | Alternative to the common request method, which sends the
body with chunked encoding and not as one block | ['Alternative', 'to', 'the', 'common', 'request', 'method', 'which', 'sends', 'the', 'body', 'with', 'chunked', 'encoding', 'and', 'not', 'as', 'one', 'block'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connection.py#L184-L220 |
4,464 | smarie/python-parsyfiles | parsyfiles/parsing_registries.py | insert_element_to_dict_of_dicts | def insert_element_to_dict_of_dicts(dict_of_dicts: Dict[str, Dict[str, str]], first_key: str, second_key: str, contents):
"""
Utility method
:param dict_of_dicts:
:param first_key:
:param second_key:
:param contents:
:return:
"""
if first_key not in dict_of_dicts.keys():
dict_of_dicts[first_key] = {second_key: contents}
else:
if second_key not in dict_of_dicts[first_key].keys():
dict_of_dicts[first_key][second_key] = contents
else:
warn('Overriding contents for ' + first_key + '/' + second_key)
dict_of_dicts[first_key][second_key] = contents | python | def insert_element_to_dict_of_dicts(dict_of_dicts: Dict[str, Dict[str, str]], first_key: str, second_key: str, contents):
"""
Utility method
:param dict_of_dicts:
:param first_key:
:param second_key:
:param contents:
:return:
"""
if first_key not in dict_of_dicts.keys():
dict_of_dicts[first_key] = {second_key: contents}
else:
if second_key not in dict_of_dicts[first_key].keys():
dict_of_dicts[first_key][second_key] = contents
else:
warn('Overriding contents for ' + first_key + '/' + second_key)
dict_of_dicts[first_key][second_key] = contents | ['def', 'insert_element_to_dict_of_dicts', '(', 'dict_of_dicts', ':', 'Dict', '[', 'str', ',', 'Dict', '[', 'str', ',', 'str', ']', ']', ',', 'first_key', ':', 'str', ',', 'second_key', ':', 'str', ',', 'contents', ')', ':', 'if', 'first_key', 'not', 'in', 'dict_of_dicts', '.', 'keys', '(', ')', ':', 'dict_of_dicts', '[', 'first_key', ']', '=', '{', 'second_key', ':', 'contents', '}', 'else', ':', 'if', 'second_key', 'not', 'in', 'dict_of_dicts', '[', 'first_key', ']', '.', 'keys', '(', ')', ':', 'dict_of_dicts', '[', 'first_key', ']', '[', 'second_key', ']', '=', 'contents', 'else', ':', 'warn', '(', "'Overriding contents for '", '+', 'first_key', '+', "'/'", '+', 'second_key', ')', 'dict_of_dicts', '[', 'first_key', ']', '[', 'second_key', ']', '=', 'contents'] | Utility method
:param dict_of_dicts:
:param first_key:
:param second_key:
:param contents:
:return: | ['Utility', 'method'] | train | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L213-L231 |
4,465 | peepall/FancyLogger | FancyLogger/__init__.py | FancyLogger.set_task_object | def set_task_object(self,
task_id,
task_progress_object):
"""
Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information.
"""
self.set_task(task_id=task_id,
total=task_progress_object.total,
prefix=task_progress_object.prefix,
suffix=task_progress_object.suffix,
decimals=task_progress_object.decimals,
bar_length=task_progress_object.bar_length,
keep_alive=task_progress_object.keep_alive,
display_time=task_progress_object.display_time) | python | def set_task_object(self,
task_id,
task_progress_object):
"""
Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information.
"""
self.set_task(task_id=task_id,
total=task_progress_object.total,
prefix=task_progress_object.prefix,
suffix=task_progress_object.suffix,
decimals=task_progress_object.decimals,
bar_length=task_progress_object.bar_length,
keep_alive=task_progress_object.keep_alive,
display_time=task_progress_object.display_time) | ['def', 'set_task_object', '(', 'self', ',', 'task_id', ',', 'task_progress_object', ')', ':', 'self', '.', 'set_task', '(', 'task_id', '=', 'task_id', ',', 'total', '=', 'task_progress_object', '.', 'total', ',', 'prefix', '=', 'task_progress_object', '.', 'prefix', ',', 'suffix', '=', 'task_progress_object', '.', 'suffix', ',', 'decimals', '=', 'task_progress_object', '.', 'decimals', ',', 'bar_length', '=', 'task_progress_object', '.', 'bar_length', ',', 'keep_alive', '=', 'task_progress_object', '.', 'keep_alive', ',', 'display_time', '=', 'task_progress_object', '.', 'display_time', ')'] | Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information. | ['Defines', 'a', 'new', 'progress', 'bar', 'with', 'the', 'given', 'information', 'using', 'a', 'TaskProgress', 'object', '.', ':', 'param', 'task_id', ':', 'Unique', 'identifier', 'for', 'this', 'progress', 'bar', '.', 'Will', 'erase', 'if', 'already', 'existing', '.', ':', 'param', 'task_progress_object', ':', 'TaskProgress', 'object', 'holding', 'the', 'progress', 'bar', 'information', '.'] | train | https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/__init__.py#L288-L303 |
4,466 | choderalab/pymbar | pymbar/utils.py | logsumexp | def logsumexp(a, axis=None, b=None, use_numexpr=True):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance.
"""
a = np.asarray(a)
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("b * exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(b * np.exp(a - a_max), axis=axis))
else:
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(np.exp(a - a_max), axis=axis))
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out | python | def logsumexp(a, axis=None, b=None, use_numexpr=True):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance.
"""
a = np.asarray(a)
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("b * exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(b * np.exp(a - a_max), axis=axis))
else:
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(np.exp(a - a_max), axis=axis))
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out | ['def', 'logsumexp', '(', 'a', ',', 'axis', '=', 'None', ',', 'b', '=', 'None', ',', 'use_numexpr', '=', 'True', ')', ':', 'a', '=', 'np', '.', 'asarray', '(', 'a', ')', 'a_max', '=', 'np', '.', 'amax', '(', 'a', ',', 'axis', '=', 'axis', ',', 'keepdims', '=', 'True', ')', 'if', 'a_max', '.', 'ndim', '>', '0', ':', 'a_max', '[', '~', 'np', '.', 'isfinite', '(', 'a_max', ')', ']', '=', '0', 'elif', 'not', 'np', '.', 'isfinite', '(', 'a_max', ')', ':', 'a_max', '=', '0', 'if', 'b', 'is', 'not', 'None', ':', 'b', '=', 'np', '.', 'asarray', '(', 'b', ')', 'if', 'use_numexpr', 'and', 'HAVE_NUMEXPR', ':', 'out', '=', 'np', '.', 'log', '(', 'numexpr', '.', 'evaluate', '(', '"b * exp(a - a_max)"', ')', '.', 'sum', '(', 'axis', ')', ')', 'else', ':', 'out', '=', 'np', '.', 'log', '(', 'np', '.', 'sum', '(', 'b', '*', 'np', '.', 'exp', '(', 'a', '-', 'a_max', ')', ',', 'axis', '=', 'axis', ')', ')', 'else', ':', 'if', 'use_numexpr', 'and', 'HAVE_NUMEXPR', ':', 'out', '=', 'np', '.', 'log', '(', 'numexpr', '.', 'evaluate', '(', '"exp(a - a_max)"', ')', '.', 'sum', '(', 'axis', ')', ')', 'else', ':', 'out', '=', 'np', '.', 'log', '(', 'np', '.', 'sum', '(', 'np', '.', 'exp', '(', 'a', '-', 'a_max', ')', ',', 'axis', '=', 'axis', ')', ')', 'a_max', '=', 'np', '.', 'squeeze', '(', 'a_max', ',', 'axis', '=', 'axis', ')', 'out', '+=', 'a_max', 'return', 'out'] | Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance. | ['Compute', 'the', 'log', 'of', 'the', 'sum', 'of', 'exponentials', 'of', 'input', 'elements', '.'] | train | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L271-L329 |
4,467 | tanghaibao/jcvi | jcvi/apps/phylo.py | smart_reroot | def smart_reroot(treefile, outgroupfile, outfile, format=0):
"""
simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
"""
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile | python | def smart_reroot(treefile, outgroupfile, outfile, format=0):
"""
simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
"""
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile | ['def', 'smart_reroot', '(', 'treefile', ',', 'outgroupfile', ',', 'outfile', ',', 'format', '=', '0', ')', ':', 'tree', '=', 'Tree', '(', 'treefile', ',', 'format', '=', 'format', ')', 'leaves', '=', '[', 't', '.', 'name', 'for', 't', 'in', 'tree', '.', 'get_leaves', '(', ')', ']', '[', ':', ':', '-', '1', ']', 'outgroup', '=', '[', ']', 'for', 'o', 'in', 'must_open', '(', 'outgroupfile', ')', ':', 'o', '=', 'o', '.', 'strip', '(', ')', 'for', 'leaf', 'in', 'leaves', ':', 'if', 'leaf', '[', ':', 'len', '(', 'o', ')', ']', '==', 'o', ':', 'outgroup', '.', 'append', '(', 'leaf', ')', 'if', 'outgroup', ':', 'break', 'if', 'not', 'outgroup', ':', 'print', '(', '"Outgroup not found. Tree {0} cannot be rerooted."', '.', 'format', '(', 'treefile', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'treefile', 'try', ':', 'tree', '.', 'set_outgroup', '(', 'tree', '.', 'get_common_ancestor', '(', '*', 'outgroup', ')', ')', 'except', 'ValueError', ':', 'assert', 'type', '(', 'outgroup', ')', '==', 'list', 'outgroup', '=', 'outgroup', '[', '0', ']', 'tree', '.', 'set_outgroup', '(', 'outgroup', ')', 'tree', '.', 'write', '(', 'outfile', '=', 'outfile', ',', 'format', '=', 'format', ')', 'logging', '.', 'debug', '(', '"Rerooted tree printed to {0}"', '.', 'format', '(', 'outfile', ')', ')', 'return', 'outfile'] | simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees | ['simple', 'function', 'to', 'reroot', 'Newick', 'format', 'tree', 'using', 'ete2'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/phylo.py#L193-L224 |
4,468 | geographika/mappyfile | mappyfile/pprint.py | PrettyPrinter.is_hidden_container | def is_hidden_container(self, key, val):
"""
The key is not one of the Mapfile keywords, and its
values are a list
"""
if key in ("layers", "classes", "styles", "symbols", "labels",
"outputformats", "features", "scaletokens",
"composites") and isinstance(val, list):
return True
else:
return False | python | def is_hidden_container(self, key, val):
"""
The key is not one of the Mapfile keywords, and its
values are a list
"""
if key in ("layers", "classes", "styles", "symbols", "labels",
"outputformats", "features", "scaletokens",
"composites") and isinstance(val, list):
return True
else:
return False | ['def', 'is_hidden_container', '(', 'self', ',', 'key', ',', 'val', ')', ':', 'if', 'key', 'in', '(', '"layers"', ',', '"classes"', ',', '"styles"', ',', '"symbols"', ',', '"labels"', ',', '"outputformats"', ',', '"features"', ',', '"scaletokens"', ',', '"composites"', ')', 'and', 'isinstance', '(', 'val', ',', 'list', ')', ':', 'return', 'True', 'else', ':', 'return', 'False'] | The key is not one of the Mapfile keywords, and its
values are a list | ['The', 'key', 'is', 'not', 'one', 'of', 'the', 'Mapfile', 'keywords', 'and', 'its', 'values', 'are', 'a', 'list'] | train | https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/mappyfile/pprint.py#L273-L284 |
4,469 | PlaidWeb/Publ | publ/maintenance.py | Maintenance.run | def run(self, force=False):
""" Run all pending tasks; 'force' will run all tasks whether they're
pending or not. """
now = time.time()
for func, spec in self.tasks.items():
if force or now >= spec.get('next_run', 0):
func()
spec['next_run'] = now + spec['interval'] | python | def run(self, force=False):
""" Run all pending tasks; 'force' will run all tasks whether they're
pending or not. """
now = time.time()
for func, spec in self.tasks.items():
if force or now >= spec.get('next_run', 0):
func()
spec['next_run'] = now + spec['interval'] | ['def', 'run', '(', 'self', ',', 'force', '=', 'False', ')', ':', 'now', '=', 'time', '.', 'time', '(', ')', 'for', 'func', ',', 'spec', 'in', 'self', '.', 'tasks', '.', 'items', '(', ')', ':', 'if', 'force', 'or', 'now', '>=', 'spec', '.', 'get', '(', "'next_run'", ',', '0', ')', ':', 'func', '(', ')', 'spec', '[', "'next_run'", ']', '=', 'now', '+', 'spec', '[', "'interval'", ']'] | Run all pending tasks; 'force' will run all tasks whether they're
pending or not. | ['Run', 'all', 'pending', 'tasks', ';', 'force', 'will', 'run', 'all', 'tasks', 'whether', 'they', 're', 'pending', 'or', 'not', '.'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/maintenance.py#L16-L23 |
4,470 | synw/dataswim | dataswim/data/clean.py | Clean.fdate | def fdate(self, *cols, precision: str="S", format: str=None):
"""
Convert column values to formated date string
:param \*cols: names of the colums
:type \*cols: str, at least one
:param precision: time precision: Y, M, D, H, Min S, defaults to "S"
:type precision: str, optional
:param format: python date format, defaults to None
:type format: str, optional
:example: ``ds.fdate("mycol1", "mycol2", precision)``
"""
def formatdate(row):
return row.strftime(format)
def convert(row):
encoded = '%Y-%m-%d %H:%M:%S'
if precision == "Min":
encoded = '%Y-%m-%d %H:%M'
elif precision == "H":
encoded = '%Y-%m-%d %H'
elif precision == "D":
encoded = '%Y-%m-%d'
elif precision == "M":
encoded = '%Y-%m'
elif precision == "Y":
encoded = '%Y'
return row.strftime(encoded)
try:
for f in cols:
try:
if format is None:
self.df[f] = pd.to_datetime(self.df[f]).apply(convert)
else:
self.df[f] = pd.to_datetime(
self.df[f]).apply(formatdate)
except ValueError as e:
self.err(e, "Can not convert date")
return
except KeyError:
self.warning("Can not find colums " + " ".join(cols))
return
except Exception as e:
self.err(e, "Can not process date col") | python | def fdate(self, *cols, precision: str="S", format: str=None):
"""
Convert column values to formated date string
:param \*cols: names of the colums
:type \*cols: str, at least one
:param precision: time precision: Y, M, D, H, Min S, defaults to "S"
:type precision: str, optional
:param format: python date format, defaults to None
:type format: str, optional
:example: ``ds.fdate("mycol1", "mycol2", precision)``
"""
def formatdate(row):
return row.strftime(format)
def convert(row):
encoded = '%Y-%m-%d %H:%M:%S'
if precision == "Min":
encoded = '%Y-%m-%d %H:%M'
elif precision == "H":
encoded = '%Y-%m-%d %H'
elif precision == "D":
encoded = '%Y-%m-%d'
elif precision == "M":
encoded = '%Y-%m'
elif precision == "Y":
encoded = '%Y'
return row.strftime(encoded)
try:
for f in cols:
try:
if format is None:
self.df[f] = pd.to_datetime(self.df[f]).apply(convert)
else:
self.df[f] = pd.to_datetime(
self.df[f]).apply(formatdate)
except ValueError as e:
self.err(e, "Can not convert date")
return
except KeyError:
self.warning("Can not find colums " + " ".join(cols))
return
except Exception as e:
self.err(e, "Can not process date col") | ['def', 'fdate', '(', 'self', ',', '*', 'cols', ',', 'precision', ':', 'str', '=', '"S"', ',', 'format', ':', 'str', '=', 'None', ')', ':', 'def', 'formatdate', '(', 'row', ')', ':', 'return', 'row', '.', 'strftime', '(', 'format', ')', 'def', 'convert', '(', 'row', ')', ':', 'encoded', '=', "'%Y-%m-%d %H:%M:%S'", 'if', 'precision', '==', '"Min"', ':', 'encoded', '=', "'%Y-%m-%d %H:%M'", 'elif', 'precision', '==', '"H"', ':', 'encoded', '=', "'%Y-%m-%d %H'", 'elif', 'precision', '==', '"D"', ':', 'encoded', '=', "'%Y-%m-%d'", 'elif', 'precision', '==', '"M"', ':', 'encoded', '=', "'%Y-%m'", 'elif', 'precision', '==', '"Y"', ':', 'encoded', '=', "'%Y'", 'return', 'row', '.', 'strftime', '(', 'encoded', ')', 'try', ':', 'for', 'f', 'in', 'cols', ':', 'try', ':', 'if', 'format', 'is', 'None', ':', 'self', '.', 'df', '[', 'f', ']', '=', 'pd', '.', 'to_datetime', '(', 'self', '.', 'df', '[', 'f', ']', ')', '.', 'apply', '(', 'convert', ')', 'else', ':', 'self', '.', 'df', '[', 'f', ']', '=', 'pd', '.', 'to_datetime', '(', 'self', '.', 'df', '[', 'f', ']', ')', '.', 'apply', '(', 'formatdate', ')', 'except', 'ValueError', 'as', 'e', ':', 'self', '.', 'err', '(', 'e', ',', '"Can not convert date"', ')', 'return', 'except', 'KeyError', ':', 'self', '.', 'warning', '(', '"Can not find colums "', '+', '" "', '.', 'join', '(', 'cols', ')', ')', 'return', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'err', '(', 'e', ',', '"Can not process date col"', ')'] | Convert column values to formated date string
:param \*cols: names of the colums
:type \*cols: str, at least one
:param precision: time precision: Y, M, D, H, Min S, defaults to "S"
:type precision: str, optional
:param format: python date format, defaults to None
:type format: str, optional
:example: ``ds.fdate("mycol1", "mycol2", precision)`` | ['Convert', 'column', 'values', 'to', 'formated', 'date', 'string'] | train | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L200-L246 |
4,471 | Mangopay/mangopay2-python-sdk | mangopay/compat.py | python_2_unicode_compatible | def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass | python | def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass | ['def', 'python_2_unicode_compatible', '(', 'klass', ')', ':', 'if', 'six', '.', 'PY2', ':', 'klass', '.', '__unicode__', '=', 'klass', '.', '__str__', 'klass', '.', '__str__', '=', 'lambda', 'self', ':', 'self', '.', '__unicode__', '(', ')', '.', 'encode', '(', "'utf-8'", ')', 'return', 'klass'] | A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class. | ['A', 'decorator', 'that', 'defines', '__unicode__', 'and', '__str__', 'methods', 'under', 'Python', '2', '.', 'Under', 'Python', '3', 'it', 'does', 'nothing', '.'] | train | https://github.com/Mangopay/mangopay2-python-sdk/blob/9bbbc0f797581c9fdf7da5a70879bee6643024b7/mangopay/compat.py#L5-L16 |
4,472 | mitsei/dlkit | dlkit/json_/osid/objects.py | OsidObjectForm._init_map | def _init_map(self, record_types=None):
"""Initialize map for form"""
OsidForm._init_map(self)
self._my_map['displayName'] = dict(self._display_name_default)
self._my_map['description'] = dict(self._description_default)
self._my_map['genusTypeId'] = self._genus_type_default
OsidExtensibleForm._init_map(self, record_types) | python | def _init_map(self, record_types=None):
"""Initialize map for form"""
OsidForm._init_map(self)
self._my_map['displayName'] = dict(self._display_name_default)
self._my_map['description'] = dict(self._description_default)
self._my_map['genusTypeId'] = self._genus_type_default
OsidExtensibleForm._init_map(self, record_types) | ['def', '_init_map', '(', 'self', ',', 'record_types', '=', 'None', ')', ':', 'OsidForm', '.', '_init_map', '(', 'self', ')', 'self', '.', '_my_map', '[', "'displayName'", ']', '=', 'dict', '(', 'self', '.', '_display_name_default', ')', 'self', '.', '_my_map', '[', "'description'", ']', '=', 'dict', '(', 'self', '.', '_description_default', ')', 'self', '.', '_my_map', '[', "'genusTypeId'", ']', '=', 'self', '.', '_genus_type_default', 'OsidExtensibleForm', '.', '_init_map', '(', 'self', ',', 'record_types', ')'] | Initialize map for form | ['Initialize', 'map', 'for', 'form'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/objects.py#L1791-L1797 |
4,473 | MillionIntegrals/vel | vel/rl/algo/policy_gradient/a2c.py | A2CPolicyGradient.process_rollout | def process_rollout(self, batch_info, rollout: Rollout):
""" Process rollout for ALGO before any chunking/shuffling """
assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts"
advantages = discount_bootstrap_gae(
rewards_buffer=rollout.transition_tensors['rewards'],
dones_buffer=rollout.transition_tensors['dones'],
values_buffer=rollout.transition_tensors['values'],
final_values=rollout.rollout_tensors['final_values'],
discount_factor=self.discount_factor,
gae_lambda=self.gae_lambda,
number_of_steps=rollout.num_steps
)
returns = advantages + rollout.transition_tensors['values']
rollout.transition_tensors['advantages'] = advantages
rollout.transition_tensors['returns'] = returns
return rollout | python | def process_rollout(self, batch_info, rollout: Rollout):
""" Process rollout for ALGO before any chunking/shuffling """
assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts"
advantages = discount_bootstrap_gae(
rewards_buffer=rollout.transition_tensors['rewards'],
dones_buffer=rollout.transition_tensors['dones'],
values_buffer=rollout.transition_tensors['values'],
final_values=rollout.rollout_tensors['final_values'],
discount_factor=self.discount_factor,
gae_lambda=self.gae_lambda,
number_of_steps=rollout.num_steps
)
returns = advantages + rollout.transition_tensors['values']
rollout.transition_tensors['advantages'] = advantages
rollout.transition_tensors['returns'] = returns
return rollout | ['def', 'process_rollout', '(', 'self', ',', 'batch_info', ',', 'rollout', ':', 'Rollout', ')', ':', 'assert', 'isinstance', '(', 'rollout', ',', 'Trajectories', ')', ',', '"A2C requires trajectory rollouts"', 'advantages', '=', 'discount_bootstrap_gae', '(', 'rewards_buffer', '=', 'rollout', '.', 'transition_tensors', '[', "'rewards'", ']', ',', 'dones_buffer', '=', 'rollout', '.', 'transition_tensors', '[', "'dones'", ']', ',', 'values_buffer', '=', 'rollout', '.', 'transition_tensors', '[', "'values'", ']', ',', 'final_values', '=', 'rollout', '.', 'rollout_tensors', '[', "'final_values'", ']', ',', 'discount_factor', '=', 'self', '.', 'discount_factor', ',', 'gae_lambda', '=', 'self', '.', 'gae_lambda', ',', 'number_of_steps', '=', 'rollout', '.', 'num_steps', ')', 'returns', '=', 'advantages', '+', 'rollout', '.', 'transition_tensors', '[', "'values'", ']', 'rollout', '.', 'transition_tensors', '[', "'advantages'", ']', '=', 'advantages', 'rollout', '.', 'transition_tensors', '[', "'returns'", ']', '=', 'returns', 'return', 'rollout'] | Process rollout for ALGO before any chunking/shuffling | ['Process', 'rollout', 'for', 'ALGO', 'before', 'any', 'chunking', '/', 'shuffling'] | train | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/a2c.py#L20-L39 |
4,474 | codelv/enaml-native-cli | enamlnativecli/main.py | EnamlNativeCli.start | def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise | python | def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise | ['def', 'start', '(', 'self', ')', ':', 'self', '.', 'check_dependencies', '(', ')', 'self', '.', 'args', '=', 'self', '.', 'parser', '.', 'parse_args', '(', ')', "# Python 3 doesn't set the cmd if no args are given", 'if', 'not', 'hasattr', '(', 'self', '.', 'args', ',', "'cmd'", ')', ':', 'self', '.', 'parser', '.', 'print_help', '(', ')', 'return', 'cmd', '=', 'self', '.', 'args', '.', 'cmd', 'try', ':', 'if', 'cmd', '.', 'app_dir_required', 'and', 'not', 'self', '.', 'in_app_directory', ':', 'raise', 'EnvironmentError', '(', '"\'enaml-native {}\' must be run within an app root "', '"directory not: {}"', '.', 'format', '(', 'cmd', '.', 'title', ',', 'os', '.', 'getcwd', '(', ')', ')', ')', 'cmd', '.', 'run', '(', 'self', '.', 'args', ')', 'except', 'sh', '.', 'ErrorReturnCode', 'as', 'e', ':', 'raise'] | Run the commands | ['Run', 'the', 'commands'] | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1741-L1759 |
4,475 | estnltk/estnltk | estnltk/wordnet/eurown.py | Variant.addUsage_Label | def addUsage_Label(self,usage_label):
'''Appends one Usage_Label to usage_labels
'''
if isinstance(usage_label, Usage_Label):
self.usage_labels.append(usage_label)
else:
raise (Usage_LabelError,
'usage_label Type should be Usage_Label, not %s' % type(
usage_label)
) | python | def addUsage_Label(self,usage_label):
'''Appends one Usage_Label to usage_labels
'''
if isinstance(usage_label, Usage_Label):
self.usage_labels.append(usage_label)
else:
raise (Usage_LabelError,
'usage_label Type should be Usage_Label, not %s' % type(
usage_label)
) | ['def', 'addUsage_Label', '(', 'self', ',', 'usage_label', ')', ':', 'if', 'isinstance', '(', 'usage_label', ',', 'Usage_Label', ')', ':', 'self', '.', 'usage_labels', '.', 'append', '(', 'usage_label', ')', 'else', ':', 'raise', '(', 'Usage_LabelError', ',', "'usage_label Type should be Usage_Label, not %s'", '%', 'type', '(', 'usage_label', ')', ')'] | Appends one Usage_Label to usage_labels | ['Appends', 'one', 'Usage_Label', 'to', 'usage_labels'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/eurown.py#L1739-L1748 |
4,476 | modin-project/modin | modin/backends/pandas/query_compiler.py | PandasQueryCompiler._list_like_func | def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns) | python | def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns) | ['def', '_list_like_func', '(', 'self', ',', 'func', ',', 'axis', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'func_prepared', '=', 'self', '.', '_prepare_method', '(', 'lambda', 'df', ':', 'pandas', '.', 'DataFrame', '(', 'df', '.', 'apply', '(', 'func', ',', 'axis', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ')', ')', 'new_data', '=', 'self', '.', '_map_across_full_axis', '(', 'axis', ',', 'func_prepared', ')', '# When the function is list-like, the function names become the index/columns', 'new_index', '=', '(', '[', 'f', 'if', 'isinstance', '(', 'f', ',', 'string_types', ')', 'else', 'f', '.', '__name__', 'for', 'f', 'in', 'func', ']', 'if', 'axis', '==', '0', 'else', 'self', '.', 'index', ')', 'new_columns', '=', '(', '[', 'f', 'if', 'isinstance', '(', 'f', ',', 'string_types', ')', 'else', 'f', '.', '__name__', 'for', 'f', 'in', 'func', ']', 'if', 'axis', '==', '1', 'else', 'self', '.', 'columns', ')', 'return', 'self', '.', '__constructor__', '(', 'new_data', ',', 'new_index', ',', 'new_columns', ')'] | Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler. | ['Apply', 'list', '-', 'like', 'function', 'across', 'given', 'axis', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2200-L2225 |
4,477 | openid/JWTConnect-Python-OidcMsg | src/oidcmsg/message.py | Message.serialize | def serialize(self, method="urlencoded", lev=0, **kwargs):
"""
Convert this instance to another representation. Which representation
is given by the choice of serialization method.
:param method: A serialization method. Presently 'urlencoded', 'json',
'jwt' and 'dict' is supported.
:param lev:
:param kwargs: Extra key word arguments
:return: THe content of this message serialized using a chosen method
"""
return getattr(self, "to_%s" % method)(lev=lev, **kwargs) | python | def serialize(self, method="urlencoded", lev=0, **kwargs):
"""
Convert this instance to another representation. Which representation
is given by the choice of serialization method.
:param method: A serialization method. Presently 'urlencoded', 'json',
'jwt' and 'dict' is supported.
:param lev:
:param kwargs: Extra key word arguments
:return: THe content of this message serialized using a chosen method
"""
return getattr(self, "to_%s" % method)(lev=lev, **kwargs) | ['def', 'serialize', '(', 'self', ',', 'method', '=', '"urlencoded"', ',', 'lev', '=', '0', ',', '*', '*', 'kwargs', ')', ':', 'return', 'getattr', '(', 'self', ',', '"to_%s"', '%', 'method', ')', '(', 'lev', '=', 'lev', ',', '*', '*', 'kwargs', ')'] | Convert this instance to another representation. Which representation
is given by the choice of serialization method.
:param method: A serialization method. Presently 'urlencoded', 'json',
'jwt' and 'dict' is supported.
:param lev:
:param kwargs: Extra key word arguments
:return: THe content of this message serialized using a chosen method | ['Convert', 'this', 'instance', 'to', 'another', 'representation', '.', 'Which', 'representation', 'is', 'given', 'by', 'the', 'choice', 'of', 'serialization', 'method', '.', ':', 'param', 'method', ':', 'A', 'serialization', 'method', '.', 'Presently', 'urlencoded', 'json', 'jwt', 'and', 'dict', 'is', 'supported', '.', ':', 'param', 'lev', ':', ':', 'param', 'kwargs', ':', 'Extra', 'key', 'word', 'arguments', ':', 'return', ':', 'THe', 'content', 'of', 'this', 'message', 'serialized', 'using', 'a', 'chosen', 'method'] | train | https://github.com/openid/JWTConnect-Python-OidcMsg/blob/58ade5eb67131abfb99f38b6a92d43b697c9f2fa/src/oidcmsg/message.py#L146-L157 |
4,478 | google/dotty | efilter/transforms/solve.py | solve_tuple | def solve_tuple(expr, vars):
"""Build a tuple from subexpressions."""
result = tuple(solve(x, vars).value for x in expr.children)
return Result(result, ()) | python | def solve_tuple(expr, vars):
"""Build a tuple from subexpressions."""
result = tuple(solve(x, vars).value for x in expr.children)
return Result(result, ()) | ['def', 'solve_tuple', '(', 'expr', ',', 'vars', ')', ':', 'result', '=', 'tuple', '(', 'solve', '(', 'x', ',', 'vars', ')', '.', 'value', 'for', 'x', 'in', 'expr', '.', 'children', ')', 'return', 'Result', '(', 'result', ',', '(', ')', ')'] | Build a tuple from subexpressions. | ['Build', 'a', 'tuple', 'from', 'subexpressions', '.'] | train | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L404-L407 |
4,479 | pydata/xarray | xarray/core/groupby.py | GroupBy._iter_grouped | def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices}) | python | def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices}) | ['def', '_iter_grouped', '(', 'self', ')', ':', 'for', 'indices', 'in', 'self', '.', '_group_indices', ':', 'yield', 'self', '.', '_obj', '.', 'isel', '(', '*', '*', '{', 'self', '.', '_group_dim', ':', 'indices', '}', ')'] | Iterate over each element in this group | ['Iterate', 'over', 'each', 'element', 'in', 'this', 'group'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/groupby.py#L322-L325 |
4,480 | pandas-dev/pandas | pandas/util/_decorators.py | deprecate | def deprecate(name, alternative, version, alt_name=None,
klass=None, stacklevel=2, msg=None):
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
@wraps(alternative)
def wrapper(*args, **kwargs):
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
doc_error_msg = ('deprecate needs a correctly formatted docstring in '
'the target function (should have a one liner short '
'summary, and opening quotes should be in their own '
'line). Found:\n{}'.format(alternative.__doc__))
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count('\n') < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent("""
{summary}
.. deprecated:: {depr_version}
{depr_msg}
{rest_of_docstring}""").format(summary=summary.strip(),
depr_version=version,
depr_msg=msg,
rest_of_docstring=dedent(doc))
return wrapper | python | def deprecate(name, alternative, version, alt_name=None,
klass=None, stacklevel=2, msg=None):
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
@wraps(alternative)
def wrapper(*args, **kwargs):
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
doc_error_msg = ('deprecate needs a correctly formatted docstring in '
'the target function (should have a one liner short '
'summary, and opening quotes should be in their own '
'line). Found:\n{}'.format(alternative.__doc__))
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count('\n') < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent("""
{summary}
.. deprecated:: {depr_version}
{depr_msg}
{rest_of_docstring}""").format(summary=summary.strip(),
depr_version=version,
depr_msg=msg,
rest_of_docstring=dedent(doc))
return wrapper | ['def', 'deprecate', '(', 'name', ',', 'alternative', ',', 'version', ',', 'alt_name', '=', 'None', ',', 'klass', '=', 'None', ',', 'stacklevel', '=', '2', ',', 'msg', '=', 'None', ')', ':', 'alt_name', '=', 'alt_name', 'or', 'alternative', '.', '__name__', 'klass', '=', 'klass', 'or', 'FutureWarning', 'warning_msg', '=', 'msg', 'or', "'{} is deprecated, use {} instead'", '.', 'format', '(', 'name', ',', 'alt_name', ')', '@', 'wraps', '(', 'alternative', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'warnings', '.', 'warn', '(', 'warning_msg', ',', 'klass', ',', 'stacklevel', '=', 'stacklevel', ')', 'return', 'alternative', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', '# adding deprecated directive to the docstring', 'msg', '=', 'msg', 'or', "'Use `{alt_name}` instead.'", '.', 'format', '(', 'alt_name', '=', 'alt_name', ')', 'doc_error_msg', '=', '(', "'deprecate needs a correctly formatted docstring in '", "'the target function (should have a one liner short '", "'summary, and opening quotes should be in their own '", "'line). Found:\\n{}'", '.', 'format', '(', 'alternative', '.', '__doc__', ')', ')', '# when python is running in optimized mode (i.e. `-OO`), docstrings are', '# removed, so we check that a docstring with correct formatting is used', '# but we allow empty docstrings', 'if', 'alternative', '.', '__doc__', ':', 'if', 'alternative', '.', '__doc__', '.', 'count', '(', "'\\n'", ')', '<', '3', ':', 'raise', 'AssertionError', '(', 'doc_error_msg', ')', 'empty1', ',', 'summary', ',', 'empty2', ',', 'doc', '=', 'alternative', '.', '__doc__', '.', 'split', '(', "'\\n'", ',', '3', ')', 'if', 'empty1', 'or', 'empty2', 'and', 'not', 'summary', ':', 'raise', 'AssertionError', '(', 'doc_error_msg', ')', 'wrapper', '.', '__doc__', '=', 'dedent', '(', '"""\n {summary}\n\n .. deprecated:: {depr_version}\n {depr_msg}\n\n {rest_of_docstring}"""', ')', '.', 'format', '(', 'summary', '=', 'summary', '.', 'strip', '(', ')', ',', 'depr_version', '=', 'version', ',', 'depr_msg', '=', 'msg', ',', 'rest_of_docstring', '=', 'dedent', '(', 'doc', ')', ')', 'return', 'wrapper'] | Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.' | ['Return', 'a', 'new', 'function', 'that', 'emits', 'a', 'deprecation', 'warning', 'on', 'use', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L9-L74 |
4,481 | spotify/snakebite | snakebite/minicluster.py | MiniCluster.is_zero_bytes_file | def is_zero_bytes_file(self, path):
"""Return True if file <path> is zero bytes in size, else return False"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-z', self._full_hdfs_path(path)]) == 0 | python | def is_zero_bytes_file(self, path):
"""Return True if file <path> is zero bytes in size, else return False"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-z', self._full_hdfs_path(path)]) == 0 | ['def', 'is_zero_bytes_file', '(', 'self', ',', 'path', ')', ':', 'return', 'self', '.', '_getReturnCodeCmd', '(', '[', 'self', '.', '_hadoop_cmd', ',', "'fs'", ',', "'-test'", ',', "'-z'", ',', 'self', '.', '_full_hdfs_path', '(', 'path', ')', ']', ')', '==', '0'] | Return True if file <path> is zero bytes in size, else return False | ['Return', 'True', 'if', 'file', '<path', '>', 'is', 'zero', 'bytes', 'in', 'size', 'else', 'return', 'False'] | train | https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/minicluster.py#L116-L118 |
4,482 | IBMStreams/pypi.streamsx | streamsx/spl/op.py | Invoke.output | def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e | python | def output(self, stream, value):
"""SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e | ['def', 'output', '(', 'self', ',', 'stream', ',', 'value', ')', ':', 'if', 'stream', 'not', 'in', 'self', '.', 'outputs', ':', 'raise', 'ValueError', '(', '"Stream is not an output of this operator."', ')', 'e', '=', 'self', '.', 'expression', '(', 'value', ')', 'e', '.', '_stream', '=', 'stream', 'return', 'e'] | SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator. | ['SPL', 'output', 'port', 'assignment', 'expression', '.'] | train | https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/spl/op.py#L240-L254 |
4,483 | dshean/pygeotools | pygeotools/lib/timelib.py | getTimeZone | def getTimeZone(lat, lon):
"""Get timezone for a given lat/lon
"""
#Need to fix for Python 2.x and 3.X support
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
#http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010
req = "http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="+str(lat)+"%2C"+str(lon)+"&databases=TimeZone"
opener = urllib.request.build_opener()
f = opener.open(req)
tree = ET.parse(f)
root = tree.getroot()
#Check response
tzid = None
if root.attrib['code'] == '0':
tz = list(root.iter('TimeZone'))[0]
#shortname = tz.attrib['ShortName']
tzid = tz.attrib['TimeZoneId']
return tzid | python | def getTimeZone(lat, lon):
"""Get timezone for a given lat/lon
"""
#Need to fix for Python 2.x and 3.X support
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
#http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010
req = "http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="+str(lat)+"%2C"+str(lon)+"&databases=TimeZone"
opener = urllib.request.build_opener()
f = opener.open(req)
tree = ET.parse(f)
root = tree.getroot()
#Check response
tzid = None
if root.attrib['code'] == '0':
tz = list(root.iter('TimeZone'))[0]
#shortname = tz.attrib['ShortName']
tzid = tz.attrib['TimeZoneId']
return tzid | ['def', 'getTimeZone', '(', 'lat', ',', 'lon', ')', ':', '#Need to fix for Python 2.x and 3.X support', 'import', 'urllib', '.', 'request', ',', 'urllib', '.', 'error', ',', 'urllib', '.', 'parse', 'import', 'xml', '.', 'etree', '.', 'ElementTree', 'as', 'ET', '#http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010', 'req', '=', '"http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="', '+', 'str', '(', 'lat', ')', '+', '"%2C"', '+', 'str', '(', 'lon', ')', '+', '"&databases=TimeZone"', 'opener', '=', 'urllib', '.', 'request', '.', 'build_opener', '(', ')', 'f', '=', 'opener', '.', 'open', '(', 'req', ')', 'tree', '=', 'ET', '.', 'parse', '(', 'f', ')', 'root', '=', 'tree', '.', 'getroot', '(', ')', '#Check response', 'tzid', '=', 'None', 'if', 'root', '.', 'attrib', '[', "'code'", ']', '==', "'0'", ':', 'tz', '=', 'list', '(', 'root', '.', 'iter', '(', "'TimeZone'", ')', ')', '[', '0', ']', "#shortname = tz.attrib['ShortName']", 'tzid', '=', 'tz', '.', 'attrib', '[', "'TimeZoneId'", ']', 'return', 'tzid'] | Get timezone for a given lat/lon | ['Get', 'timezone', 'for', 'a', 'given', 'lat', '/', 'lon'] | train | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/timelib.py#L17-L35 |
4,484 | hardbyte/python-can | can/interfaces/systec/ucan.py | UcanServer.init_hardware | def init_hardware(self, serial=None, device_number=ANY_MODULE):
"""
Initializes the device with the corresponding serial or device number.
:param int or None serial: Serial number of the USB-CANmodul.
:param int device_number: Device number (0 – 254, or :const:`ANY_MODULE` for the first device).
"""
if not self._hw_is_initialized:
# initialize hardware either by device number or serial
if serial is None:
UcanInitHardwareEx(byref(self._handle), device_number, self._callback_ref, None)
else:
UcanInitHardwareEx2(byref(self._handle), serial, self._callback_ref, None)
self._hw_is_initialized = True | python | def init_hardware(self, serial=None, device_number=ANY_MODULE):
"""
Initializes the device with the corresponding serial or device number.
:param int or None serial: Serial number of the USB-CANmodul.
:param int device_number: Device number (0 – 254, or :const:`ANY_MODULE` for the first device).
"""
if not self._hw_is_initialized:
# initialize hardware either by device number or serial
if serial is None:
UcanInitHardwareEx(byref(self._handle), device_number, self._callback_ref, None)
else:
UcanInitHardwareEx2(byref(self._handle), serial, self._callback_ref, None)
self._hw_is_initialized = True | ['def', 'init_hardware', '(', 'self', ',', 'serial', '=', 'None', ',', 'device_number', '=', 'ANY_MODULE', ')', ':', 'if', 'not', 'self', '.', '_hw_is_initialized', ':', '# initialize hardware either by device number or serial', 'if', 'serial', 'is', 'None', ':', 'UcanInitHardwareEx', '(', 'byref', '(', 'self', '.', '_handle', ')', ',', 'device_number', ',', 'self', '.', '_callback_ref', ',', 'None', ')', 'else', ':', 'UcanInitHardwareEx2', '(', 'byref', '(', 'self', '.', '_handle', ')', ',', 'serial', ',', 'self', '.', '_callback_ref', ',', 'None', ')', 'self', '.', '_hw_is_initialized', '=', 'True'] | Initializes the device with the corresponding serial or device number.
:param int or None serial: Serial number of the USB-CANmodul.
:param int device_number: Device number (0 – 254, or :const:`ANY_MODULE` for the first device). | ['Initializes', 'the', 'device', 'with', 'the', 'corresponding', 'serial', 'or', 'device', 'number', '.'] | train | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L358-L371 |
4,485 | twilio/twilio-python | twilio/rest/preview/sync/service/sync_list/sync_list_item.py | SyncListItemList.get | def get(self, index):
"""
Constructs a SyncListItemContext
:param index: The index
:returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
"""
return SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=index,
) | python | def get(self, index):
"""
Constructs a SyncListItemContext
:param index: The index
:returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
"""
return SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=index,
) | ['def', 'get', '(', 'self', ',', 'index', ')', ':', 'return', 'SyncListItemContext', '(', 'self', '.', '_version', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', 'list_sid', '=', 'self', '.', '_solution', '[', "'list_sid'", ']', ',', 'index', '=', 'index', ',', ')'] | Constructs a SyncListItemContext
:param index: The index
:returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext | ['Constructs', 'a', 'SyncListItemContext'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/sync/service/sync_list/sync_list_item.py#L164-L178 |
4,486 | ray-project/ray | python/ray/experimental/state.py | GlobalState.error_messages | def error_messages(self, driver_id=None):
"""Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver.
"""
if driver_id is not None:
assert isinstance(driver_id, ray.DriverID)
return self._error_messages(driver_id)
error_table_keys = self.redis_client.keys(
ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*")
driver_ids = [
key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(driver_id): self._error_messages(
ray.DriverID(driver_id))
for driver_id in driver_ids
} | python | def error_messages(self, driver_id=None):
"""Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver.
"""
if driver_id is not None:
assert isinstance(driver_id, ray.DriverID)
return self._error_messages(driver_id)
error_table_keys = self.redis_client.keys(
ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*")
driver_ids = [
key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(driver_id): self._error_messages(
ray.DriverID(driver_id))
for driver_id in driver_ids
} | ['def', 'error_messages', '(', 'self', ',', 'driver_id', '=', 'None', ')', ':', 'if', 'driver_id', 'is', 'not', 'None', ':', 'assert', 'isinstance', '(', 'driver_id', ',', 'ray', '.', 'DriverID', ')', 'return', 'self', '.', '_error_messages', '(', 'driver_id', ')', 'error_table_keys', '=', 'self', '.', 'redis_client', '.', 'keys', '(', 'ray', '.', 'gcs_utils', '.', 'TablePrefix_ERROR_INFO_string', '+', '"*"', ')', 'driver_ids', '=', '[', 'key', '[', 'len', '(', 'ray', '.', 'gcs_utils', '.', 'TablePrefix_ERROR_INFO_string', ')', ':', ']', 'for', 'key', 'in', 'error_table_keys', ']', 'return', '{', 'binary_to_hex', '(', 'driver_id', ')', ':', 'self', '.', '_error_messages', '(', 'ray', '.', 'DriverID', '(', 'driver_id', ')', ')', 'for', 'driver_id', 'in', 'driver_ids', '}'] | Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver. | ['Get', 'the', 'error', 'messages', 'for', 'all', 'drivers', 'or', 'a', 'specific', 'driver', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/state.py#L876-L902 |
4,487 | UB-UNIBAS/simple-elastic | simple_elastic/index.py | ElasticIndex.scroll | def scroll(self, query=None, scroll='5m', size=100, unpack=True):
"""Scroll an index with the specified search query.
Works as a generator. Will yield `size` results per iteration until all hits are returned.
"""
query = self.match_all if query is None else query
response = self.instance.search(index=self.index, doc_type=self.doc_type, body=query, size=size, scroll=scroll)
while len(response['hits']['hits']) > 0:
scroll_id = response['_scroll_id']
logging.debug(response)
if unpack:
yield [source['_source'] if '_source' in source else source for source in response['hits']['hits']]
else:
yield response['hits']['hits']
response = self.instance.scroll(scroll_id=scroll_id, scroll=scroll) | python | def scroll(self, query=None, scroll='5m', size=100, unpack=True):
"""Scroll an index with the specified search query.
Works as a generator. Will yield `size` results per iteration until all hits are returned.
"""
query = self.match_all if query is None else query
response = self.instance.search(index=self.index, doc_type=self.doc_type, body=query, size=size, scroll=scroll)
while len(response['hits']['hits']) > 0:
scroll_id = response['_scroll_id']
logging.debug(response)
if unpack:
yield [source['_source'] if '_source' in source else source for source in response['hits']['hits']]
else:
yield response['hits']['hits']
response = self.instance.scroll(scroll_id=scroll_id, scroll=scroll) | ['def', 'scroll', '(', 'self', ',', 'query', '=', 'None', ',', 'scroll', '=', "'5m'", ',', 'size', '=', '100', ',', 'unpack', '=', 'True', ')', ':', 'query', '=', 'self', '.', 'match_all', 'if', 'query', 'is', 'None', 'else', 'query', 'response', '=', 'self', '.', 'instance', '.', 'search', '(', 'index', '=', 'self', '.', 'index', ',', 'doc_type', '=', 'self', '.', 'doc_type', ',', 'body', '=', 'query', ',', 'size', '=', 'size', ',', 'scroll', '=', 'scroll', ')', 'while', 'len', '(', 'response', '[', "'hits'", ']', '[', "'hits'", ']', ')', '>', '0', ':', 'scroll_id', '=', 'response', '[', "'_scroll_id'", ']', 'logging', '.', 'debug', '(', 'response', ')', 'if', 'unpack', ':', 'yield', '[', 'source', '[', "'_source'", ']', 'if', "'_source'", 'in', 'source', 'else', 'source', 'for', 'source', 'in', 'response', '[', "'hits'", ']', '[', "'hits'", ']', ']', 'else', ':', 'yield', 'response', '[', "'hits'", ']', '[', "'hits'", ']', 'response', '=', 'self', '.', 'instance', '.', 'scroll', '(', 'scroll_id', '=', 'scroll_id', ',', 'scroll', '=', 'scroll', ')'] | Scroll an index with the specified search query.
Works as a generator. Will yield `size` results per iteration until all hits are returned. | ['Scroll', 'an', 'index', 'with', 'the', 'specified', 'search', 'query', '.'] | train | https://github.com/UB-UNIBAS/simple-elastic/blob/54f2fdd3405a7eafbf8873f337da263b8d47532a/simple_elastic/index.py#L137-L151 |
4,488 | iotile/coretools | transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py | AWSIOTDeviceAdapter._unbind_topics | def _unbind_topics(self, topics):
"""Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
"""
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response) | python | def _unbind_topics(self, topics):
"""Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
"""
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response) | ['def', '_unbind_topics', '(', 'self', ',', 'topics', ')', ':', 'self', '.', 'client', '.', 'unsubscribe', '(', 'topics', '.', 'status', ')', 'self', '.', 'client', '.', 'unsubscribe', '(', 'topics', '.', 'tracing', ')', 'self', '.', 'client', '.', 'unsubscribe', '(', 'topics', '.', 'streaming', ')', 'self', '.', 'client', '.', 'unsubscribe', '(', 'topics', '.', 'response', ')'] | Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to. | ['Unsubscribe', 'to', 'all', 'of', 'the', 'topics', 'we', 'needed', 'for', 'communication', 'with', 'device'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py#L341-L352 |
4,489 | materialsproject/pymatgen | pymatgen/analysis/adsorption.py | AdsorbateSiteFinder.generate_adsorption_structures | def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0,
reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
for coords in self.find_adsorption_sites(**find_args)['all']:
structs.append(self.add_adsorbate(
molecule, coords, repeat=repeat, reorient=reorient))
return structs | python | def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0,
reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
for coords in self.find_adsorption_sites(**find_args)['all']:
structs.append(self.add_adsorbate(
molecule, coords, repeat=repeat, reorient=reorient))
return structs | ['def', 'generate_adsorption_structures', '(', 'self', ',', 'molecule', ',', 'repeat', '=', 'None', ',', 'min_lw', '=', '5.0', ',', 'reorient', '=', 'True', ',', 'find_args', '=', '{', '}', ')', ':', 'if', 'repeat', 'is', 'None', ':', 'xrep', '=', 'np', '.', 'ceil', '(', 'min_lw', '/', 'np', '.', 'linalg', '.', 'norm', '(', 'self', '.', 'slab', '.', 'lattice', '.', 'matrix', '[', '0', ']', ')', ')', 'yrep', '=', 'np', '.', 'ceil', '(', 'min_lw', '/', 'np', '.', 'linalg', '.', 'norm', '(', 'self', '.', 'slab', '.', 'lattice', '.', 'matrix', '[', '1', ']', ')', ')', 'repeat', '=', '[', 'xrep', ',', 'yrep', ',', '1', ']', 'structs', '=', '[', ']', 'for', 'coords', 'in', 'self', '.', 'find_adsorption_sites', '(', '*', '*', 'find_args', ')', '[', "'all'", ']', ':', 'structs', '.', 'append', '(', 'self', '.', 'add_adsorbate', '(', 'molecule', ',', 'coords', ',', 'repeat', '=', 'repeat', ',', 'reorient', '=', 'reorient', ')', ')', 'return', 'structs'] | Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0} | ['Function', 'that', 'generates', 'all', 'adsorption', 'structures', 'for', 'a', 'given', 'molecular', 'adsorbate', '.', 'Can', 'take', 'repeat', 'argument', 'or', 'minimum', 'length', '/', 'width', 'of', 'precursor', 'slab', 'as', 'an', 'input'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/adsorption.py#L423-L449 |
4,490 | decryptus/sonicprobe | sonicprobe/helpers.py | linesubst | def linesubst(line, variables):
"""
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
"""
# trivial no substitution early detection:
if '{{' not in line and '\\' not in line:
return line
st = NORM
out = ""
curvar = ""
for c in line:
if st is NORM:
if c == '{':
st = ONE
elif c == '\\':
st = LIT
else:
out += c
elif st is LIT:
out += c
st = NORM
elif st is ONE:
if c == '{':
st = TWO
elif c == '\\':
out += '{'
st = LIT
else:
out += '{' + c
st = NORM
elif st is TWO:
if c == '\\':
st = TLIT
elif c == '}':
st = TERM
else:
curvar += c
elif st is TLIT:
curvar += c
st = TWO
elif st is TERM:
if c == '}':
if curvar not in variables:
LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar)
else:
LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar])
value = variables[curvar]
if isinstance(value, (float, int, long)):
value = str(value)
out += value
curvar = ''
st = NORM
elif c == '\\':
curvar += '}'
st = TLIT
else:
curvar += '}' + c
st = TWO
if st is not NORM:
LOG.warning("st is not NORM at end of line: " + line)
LOG.warning("returned substitution: " + out)
return out | python | def linesubst(line, variables):
"""
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
"""
# trivial no substitution early detection:
if '{{' not in line and '\\' not in line:
return line
st = NORM
out = ""
curvar = ""
for c in line:
if st is NORM:
if c == '{':
st = ONE
elif c == '\\':
st = LIT
else:
out += c
elif st is LIT:
out += c
st = NORM
elif st is ONE:
if c == '{':
st = TWO
elif c == '\\':
out += '{'
st = LIT
else:
out += '{' + c
st = NORM
elif st is TWO:
if c == '\\':
st = TLIT
elif c == '}':
st = TERM
else:
curvar += c
elif st is TLIT:
curvar += c
st = TWO
elif st is TERM:
if c == '}':
if curvar not in variables:
LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar)
else:
LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar])
value = variables[curvar]
if isinstance(value, (float, int, long)):
value = str(value)
out += value
curvar = ''
st = NORM
elif c == '\\':
curvar += '}'
st = TLIT
else:
curvar += '}' + c
st = TWO
if st is not NORM:
LOG.warning("st is not NORM at end of line: " + line)
LOG.warning("returned substitution: " + out)
return out | ['def', 'linesubst', '(', 'line', ',', 'variables', ')', ':', '# trivial no substitution early detection:', 'if', "'{{'", 'not', 'in', 'line', 'and', "'\\\\'", 'not', 'in', 'line', ':', 'return', 'line', 'st', '=', 'NORM', 'out', '=', '""', 'curvar', '=', '""', 'for', 'c', 'in', 'line', ':', 'if', 'st', 'is', 'NORM', ':', 'if', 'c', '==', "'{'", ':', 'st', '=', 'ONE', 'elif', 'c', '==', "'\\\\'", ':', 'st', '=', 'LIT', 'else', ':', 'out', '+=', 'c', 'elif', 'st', 'is', 'LIT', ':', 'out', '+=', 'c', 'st', '=', 'NORM', 'elif', 'st', 'is', 'ONE', ':', 'if', 'c', '==', "'{'", ':', 'st', '=', 'TWO', 'elif', 'c', '==', "'\\\\'", ':', 'out', '+=', "'{'", 'st', '=', 'LIT', 'else', ':', 'out', '+=', "'{'", '+', 'c', 'st', '=', 'NORM', 'elif', 'st', 'is', 'TWO', ':', 'if', 'c', '==', "'\\\\'", ':', 'st', '=', 'TLIT', 'elif', 'c', '==', "'}'", ':', 'st', '=', 'TERM', 'else', ':', 'curvar', '+=', 'c', 'elif', 'st', 'is', 'TLIT', ':', 'curvar', '+=', 'c', 'st', '=', 'TWO', 'elif', 'st', 'is', 'TERM', ':', 'if', 'c', '==', "'}'", ':', 'if', 'curvar', 'not', 'in', 'variables', ':', 'LOG', '.', 'warning', '(', '"Unknown variable %r detected, will just be replaced by an empty string"', ',', 'curvar', ')', 'else', ':', 'LOG', '.', 'debug', '(', '"Substitution of {{%s}} by %r"', ',', 'curvar', ',', 'variables', '[', 'curvar', ']', ')', 'value', '=', 'variables', '[', 'curvar', ']', 'if', 'isinstance', '(', 'value', ',', '(', 'float', ',', 'int', ',', 'long', ')', ')', ':', 'value', '=', 'str', '(', 'value', ')', 'out', '+=', 'value', 'curvar', '=', "''", 'st', '=', 'NORM', 'elif', 'c', '==', "'\\\\'", ':', 'curvar', '+=', "'}'", 'st', '=', 'TLIT', 'else', ':', 'curvar', '+=', "'}'", '+', 'c', 'st', '=', 'TWO', 'if', 'st', 'is', 'not', 'NORM', ':', 'LOG', '.', 'warning', '(', '"st is not NORM at end of line: "', '+', 'line', ')', 'LOG', '.', 'warning', '(', '"returned substitution: "', '+', 'out', ')', 'return', 'out'] | In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :) | ['In', 'a', 'string', 'substitute', '{{', 'varname', '}}', 'occurrences', 'with', 'the', 'value', 'of', 'variables', '[', 'varname', ']', '\\\\', 'being', 'an', 'escaping', 'char', '...', 'If', 'at', 'first', 'you', 'don', 't', 'understand', 'this', 'function', 'draw', 'its', 'finite', 'state', 'machine', 'and', 'everything', 'will', 'become', 'crystal', 'clear', ':', ')'] | train | https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/helpers.py#L357-L423 |
4,491 | jameshilliard/hlk-sw16 | hlk_sw16/protocol.py | SW16Protocol._reset_timeout | def _reset_timeout(self):
"""Reset timeout for date keep alive."""
if self._timeout:
self._timeout.cancel()
self._timeout = self.loop.call_later(self.client.timeout,
self.transport.close) | python | def _reset_timeout(self):
"""Reset timeout for date keep alive."""
if self._timeout:
self._timeout.cancel()
self._timeout = self.loop.call_later(self.client.timeout,
self.transport.close) | ['def', '_reset_timeout', '(', 'self', ')', ':', 'if', 'self', '.', '_timeout', ':', 'self', '.', '_timeout', '.', 'cancel', '(', ')', 'self', '.', '_timeout', '=', 'self', '.', 'loop', '.', 'call_later', '(', 'self', '.', 'client', '.', 'timeout', ',', 'self', '.', 'transport', '.', 'close', ')'] | Reset timeout for date keep alive. | ['Reset', 'timeout', 'for', 'date', 'keep', 'alive', '.'] | train | https://github.com/jameshilliard/hlk-sw16/blob/4f0c5a7b76b42167f4dc9d2aa6312c7518a8cd56/hlk_sw16/protocol.py#L30-L35 |
4,492 | hobson/pug-dj | pug/dj/db.py | count_in_category | def count_in_category(x='call_type', filter_dict=None, model=DEFAULT_MODEL, app=DEFAULT_APP, sort=True, limit=1000):
"""
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
>>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1)
>>> len(x) == len(y) == 5
True
>>> y[1] >= y[0]
True
"""
sort = sort_prefix(sort)
model = get_model(model, app)
filter_dict = filter_dict or {}
x = fuzzy.extractOne(str(x), model._meta.get_all_field_names())[0]
objects = model.objects.filter(**filter_dict)
objects = objects.values(x)
objects = objects.annotate(y=djmodels.Count(x))
if sort is not None:
objects = objects.order_by(sort + 'y')
objects = objects.all()
if limit:
objects = objects[:int(limit)]
objects = normalize_choices(util.sod_transposed(objects), field_name=x, app=app, human_readable=True)
if not objects:
return None
objects = consolidated_counts(objects, field_name=x, count_name='y')
if sort is not None:
objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort))
return objects[x], objects['y'] | python | def count_in_category(x='call_type', filter_dict=None, model=DEFAULT_MODEL, app=DEFAULT_APP, sort=True, limit=1000):
"""
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
>>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1)
>>> len(x) == len(y) == 5
True
>>> y[1] >= y[0]
True
"""
sort = sort_prefix(sort)
model = get_model(model, app)
filter_dict = filter_dict or {}
x = fuzzy.extractOne(str(x), model._meta.get_all_field_names())[0]
objects = model.objects.filter(**filter_dict)
objects = objects.values(x)
objects = objects.annotate(y=djmodels.Count(x))
if sort is not None:
objects = objects.order_by(sort + 'y')
objects = objects.all()
if limit:
objects = objects[:int(limit)]
objects = normalize_choices(util.sod_transposed(objects), field_name=x, app=app, human_readable=True)
if not objects:
return None
objects = consolidated_counts(objects, field_name=x, count_name='y')
if sort is not None:
objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort))
return objects[x], objects['y'] | ['def', 'count_in_category', '(', 'x', '=', "'call_type'", ',', 'filter_dict', '=', 'None', ',', 'model', '=', 'DEFAULT_MODEL', ',', 'app', '=', 'DEFAULT_APP', ',', 'sort', '=', 'True', ',', 'limit', '=', '1000', ')', ':', 'sort', '=', 'sort_prefix', '(', 'sort', ')', 'model', '=', 'get_model', '(', 'model', ',', 'app', ')', 'filter_dict', '=', 'filter_dict', 'or', '{', '}', 'x', '=', 'fuzzy', '.', 'extractOne', '(', 'str', '(', 'x', ')', ',', 'model', '.', '_meta', '.', 'get_all_field_names', '(', ')', ')', '[', '0', ']', 'objects', '=', 'model', '.', 'objects', '.', 'filter', '(', '*', '*', 'filter_dict', ')', 'objects', '=', 'objects', '.', 'values', '(', 'x', ')', 'objects', '=', 'objects', '.', 'annotate', '(', 'y', '=', 'djmodels', '.', 'Count', '(', 'x', ')', ')', 'if', 'sort', 'is', 'not', 'None', ':', 'objects', '=', 'objects', '.', 'order_by', '(', 'sort', '+', "'y'", ')', 'objects', '=', 'objects', '.', 'all', '(', ')', 'if', 'limit', ':', 'objects', '=', 'objects', '[', ':', 'int', '(', 'limit', ')', ']', 'objects', '=', 'normalize_choices', '(', 'util', '.', 'sod_transposed', '(', 'objects', ')', ',', 'field_name', '=', 'x', ',', 'app', '=', 'app', ',', 'human_readable', '=', 'True', ')', 'if', 'not', 'objects', ':', 'return', 'None', 'objects', '=', 'consolidated_counts', '(', 'objects', ',', 'field_name', '=', 'x', ',', 'count_name', '=', "'y'", ')', 'if', 'sort', 'is', 'not', 'None', ':', 'objects', '=', 'sorted_dict_of_lists', '(', 'objects', ',', 'field_names', '=', '[', "'y'", ',', 'x', ']', ',', 'reverse', '=', 'bool', '(', 'sort', ')', ')', 'return', 'objects', '[', 'x', ']', ',', 'objects', '[', "'y'", ']'] | Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
>>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1)
>>> len(x) == len(y) == 5
True
>>> y[1] >= y[0]
True | ['Count', 'the', 'number', 'of', 'records', 'for', 'each', 'discrete', '(', 'categorical', ')', 'value', 'of', 'a', 'field', 'and', 'return', 'a', 'dict', 'of', 'two', 'lists', 'the', 'field', 'values', 'and', 'the', 'counts', '.'] | train | https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L582-L612 |
4,493 | MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | laplacian_eigenmaps | def laplacian_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
"""
# Calculate sparse graph Laplacian.
laplacian = get_normalized_laplacian(adjacency_matrix)
# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.
try:
eigenvalues, eigenvectors = spla.eigsh(laplacian,
k=k,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
# Discard the eigenvector corresponding to the zero-valued eigenvalue.
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | python | def laplacian_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
"""
# Calculate sparse graph Laplacian.
laplacian = get_normalized_laplacian(adjacency_matrix)
# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.
try:
eigenvalues, eigenvectors = spla.eigsh(laplacian,
k=k,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
# Discard the eigenvector corresponding to the zero-valued eigenvalue.
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | ['def', 'laplacian_eigenmaps', '(', 'adjacency_matrix', ',', 'k', ')', ':', '# Calculate sparse graph Laplacian.', 'laplacian', '=', 'get_normalized_laplacian', '(', 'adjacency_matrix', ')', '# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.', 'try', ':', 'eigenvalues', ',', 'eigenvectors', '=', 'spla', '.', 'eigsh', '(', 'laplacian', ',', 'k', '=', 'k', ',', 'which', '=', "'SM'", ',', 'return_eigenvectors', '=', 'True', ')', 'except', 'spla', '.', 'ArpackNoConvergence', 'as', 'e', ':', 'print', '(', '"ARPACK has not converged."', ')', 'eigenvalue', '=', 'e', '.', 'eigenvalues', 'eigenvectors', '=', 'e', '.', 'eigenvectors', '# Discard the eigenvector corresponding to the zero-valued eigenvalue.', 'eigenvectors', '=', 'eigenvectors', '[', ':', ',', '1', ':', ']', 'return', 'eigenvectors'] | Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector. | ['Performs', 'spectral', 'graph', 'embedding', 'using', 'the', 'graph', 'symmetric', 'normalized', 'Laplacian', 'matrix', '.'] | train | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L264-L294 |
4,494 | StanfordVL/robosuite | robosuite/environments/baxter.py | BaxterEnv._load_model | def _load_model(self):
"""Loads robot and optionally add grippers."""
super()._load_model()
self.mujoco_robot = Baxter()
if self.has_gripper_right:
self.gripper_right = gripper_factory(self.gripper_right_name)
if not self.gripper_visualization:
self.gripper_right.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper_right)
if self.has_gripper_left:
self.gripper_left = gripper_factory(self.gripper_left_name)
if not self.gripper_visualization:
self.gripper_left.hide_visualization()
self.mujoco_robot.add_gripper("left_hand", self.gripper_left) | python | def _load_model(self):
"""Loads robot and optionally add grippers."""
super()._load_model()
self.mujoco_robot = Baxter()
if self.has_gripper_right:
self.gripper_right = gripper_factory(self.gripper_right_name)
if not self.gripper_visualization:
self.gripper_right.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper_right)
if self.has_gripper_left:
self.gripper_left = gripper_factory(self.gripper_left_name)
if not self.gripper_visualization:
self.gripper_left.hide_visualization()
self.mujoco_robot.add_gripper("left_hand", self.gripper_left) | ['def', '_load_model', '(', 'self', ')', ':', 'super', '(', ')', '.', '_load_model', '(', ')', 'self', '.', 'mujoco_robot', '=', 'Baxter', '(', ')', 'if', 'self', '.', 'has_gripper_right', ':', 'self', '.', 'gripper_right', '=', 'gripper_factory', '(', 'self', '.', 'gripper_right_name', ')', 'if', 'not', 'self', '.', 'gripper_visualization', ':', 'self', '.', 'gripper_right', '.', 'hide_visualization', '(', ')', 'self', '.', 'mujoco_robot', '.', 'add_gripper', '(', '"right_hand"', ',', 'self', '.', 'gripper_right', ')', 'if', 'self', '.', 'has_gripper_left', ':', 'self', '.', 'gripper_left', '=', 'gripper_factory', '(', 'self', '.', 'gripper_left_name', ')', 'if', 'not', 'self', '.', 'gripper_visualization', ':', 'self', '.', 'gripper_left', '.', 'hide_visualization', '(', ')', 'self', '.', 'mujoco_robot', '.', 'add_gripper', '(', '"left_hand"', ',', 'self', '.', 'gripper_left', ')'] | Loads robot and optionally add grippers. | ['Loads', 'robot', 'and', 'optionally', 'add', 'grippers', '.'] | train | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter.py#L76-L90 |
4,495 | openego/eDisGo | edisgo/tools/pypsa_io_lopf.py | lv_to_pypsa | def lv_to_pypsa(network):
"""
Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit'
"""
generators = []
loads = []
branch_tees = []
lines = []
lv_stations = []
storages = []
for lv_grid in network.mv_grid.lv_grids:
generators.extend(lv_grid.graph.nodes_by_attribute('generator'))
loads.extend(lv_grid.graph.nodes_by_attribute('load'))
branch_tees.extend(lv_grid.graph.nodes_by_attribute('branch_tee'))
lines.extend(lv_grid.graph.lines())
lv_stations.extend(lv_grid.graph.nodes_by_attribute('lv_station'))
storages.extend(lv_grid.graph.nodes_by_attribute('storage'))
omega = 2 * pi * 50
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': []
}
bus = {'name': [], 'v_nom': [], 'x': [], 'y': []}
load = {'name': [], 'bus': []}
line = {'name': [],
'bus0': [],
'bus1': [],
'type': [],
'x': [],
'r': [],
's_nom': [],
's_nom_min': [],
's_max_pu': [],
's_nom_extendable': [],
'capital_cost': [],
'length': []}
storage = {
'name': [],
'bus': [],
'p_nom': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': [],
'max_hours': []}
# create dictionary representing generators and associated buses
for gen in generators:
bus_name = '_'.join(['Bus', repr(gen)])
generator['name'].append(repr(gen))
generator['bus'].append(bus_name)
generator['control'].append('PQ')
generator['p_nom'].append(gen.nominal_capacity / 1e3)
generator['type'].append('_'.join([gen.type, gen.subtype]))
generator['p_nom_extendable'].append(False)
generator['p_nom_min'].append(0) # 0.3
generator['p_nom_max'].append(0)
generator['capital_cost'].append(0)
bus['name'].append(bus_name)
bus['v_nom'].append(gen.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dictionary representing branch tees
for bt in branch_tees:
bus['name'].append('_'.join(['Bus', repr(bt)]))
bus['v_nom'].append(bt.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframes representing loads and associated buses
for lo in loads:
bus_name = '_'.join(['Bus', repr(lo)])
load['name'].append(repr(lo))
load['bus'].append(bus_name)
bus['name'].append(bus_name)
bus['v_nom'].append(lo.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframe for lines
for l in lines:
line['name'].append(repr(l['line']))
if l['adj_nodes'][0] in lv_stations:
line['bus0'].append(
'_'.join(['Bus', l['adj_nodes'][0].__repr__(side='lv')]))
else:
line['bus0'].append('_'.join(['Bus', repr(l['adj_nodes'][0])]))
if l['adj_nodes'][1] in lv_stations:
line['bus1'].append(
'_'.join(['Bus', l['adj_nodes'][1].__repr__(side='lv')]))
else:
line['bus1'].append('_'.join(['Bus', repr(l['adj_nodes'][1])]))
line['type'].append("")
line['x'].append(
l['line'].type['L'] * omega / 1e3 * l['line'].length)
line['r'].append(l['line'].type['R'] * l['line'].length)
s_nom = sqrt(3) * l['line'].type['I_max_th'] * \
l['line'].type['U_n'] / 1e3
line['s_nom'].append(s_nom)
line['s_nom_min'].append(s_nom)
line['s_max_pu'].append(0.6)
line['s_nom_extendable'].append(True)
line['capital_cost'].append(100)
line['length'].append(l['line'].length)
lv_components = {
'Generator': pd.DataFrame(generator).set_index('name'),
'Bus': pd.DataFrame(bus).set_index('name'),
'Load': pd.DataFrame(load).set_index('name'),
'Line': pd.DataFrame(line).set_index('name'),
'StorageUnit': pd.DataFrame(storage).set_index('name')}
return lv_components | python | def lv_to_pypsa(network):
"""
Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit'
"""
generators = []
loads = []
branch_tees = []
lines = []
lv_stations = []
storages = []
for lv_grid in network.mv_grid.lv_grids:
generators.extend(lv_grid.graph.nodes_by_attribute('generator'))
loads.extend(lv_grid.graph.nodes_by_attribute('load'))
branch_tees.extend(lv_grid.graph.nodes_by_attribute('branch_tee'))
lines.extend(lv_grid.graph.lines())
lv_stations.extend(lv_grid.graph.nodes_by_attribute('lv_station'))
storages.extend(lv_grid.graph.nodes_by_attribute('storage'))
omega = 2 * pi * 50
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': []
}
bus = {'name': [], 'v_nom': [], 'x': [], 'y': []}
load = {'name': [], 'bus': []}
line = {'name': [],
'bus0': [],
'bus1': [],
'type': [],
'x': [],
'r': [],
's_nom': [],
's_nom_min': [],
's_max_pu': [],
's_nom_extendable': [],
'capital_cost': [],
'length': []}
storage = {
'name': [],
'bus': [],
'p_nom': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': [],
'max_hours': []}
# create dictionary representing generators and associated buses
for gen in generators:
bus_name = '_'.join(['Bus', repr(gen)])
generator['name'].append(repr(gen))
generator['bus'].append(bus_name)
generator['control'].append('PQ')
generator['p_nom'].append(gen.nominal_capacity / 1e3)
generator['type'].append('_'.join([gen.type, gen.subtype]))
generator['p_nom_extendable'].append(False)
generator['p_nom_min'].append(0) # 0.3
generator['p_nom_max'].append(0)
generator['capital_cost'].append(0)
bus['name'].append(bus_name)
bus['v_nom'].append(gen.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dictionary representing branch tees
for bt in branch_tees:
bus['name'].append('_'.join(['Bus', repr(bt)]))
bus['v_nom'].append(bt.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframes representing loads and associated buses
for lo in loads:
bus_name = '_'.join(['Bus', repr(lo)])
load['name'].append(repr(lo))
load['bus'].append(bus_name)
bus['name'].append(bus_name)
bus['v_nom'].append(lo.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframe for lines
for l in lines:
line['name'].append(repr(l['line']))
if l['adj_nodes'][0] in lv_stations:
line['bus0'].append(
'_'.join(['Bus', l['adj_nodes'][0].__repr__(side='lv')]))
else:
line['bus0'].append('_'.join(['Bus', repr(l['adj_nodes'][0])]))
if l['adj_nodes'][1] in lv_stations:
line['bus1'].append(
'_'.join(['Bus', l['adj_nodes'][1].__repr__(side='lv')]))
else:
line['bus1'].append('_'.join(['Bus', repr(l['adj_nodes'][1])]))
line['type'].append("")
line['x'].append(
l['line'].type['L'] * omega / 1e3 * l['line'].length)
line['r'].append(l['line'].type['R'] * l['line'].length)
s_nom = sqrt(3) * l['line'].type['I_max_th'] * \
l['line'].type['U_n'] / 1e3
line['s_nom'].append(s_nom)
line['s_nom_min'].append(s_nom)
line['s_max_pu'].append(0.6)
line['s_nom_extendable'].append(True)
line['capital_cost'].append(100)
line['length'].append(l['line'].length)
lv_components = {
'Generator': pd.DataFrame(generator).set_index('name'),
'Bus': pd.DataFrame(bus).set_index('name'),
'Load': pd.DataFrame(load).set_index('name'),
'Line': pd.DataFrame(line).set_index('name'),
'StorageUnit': pd.DataFrame(storage).set_index('name')}
return lv_components | ['def', 'lv_to_pypsa', '(', 'network', ')', ':', 'generators', '=', '[', ']', 'loads', '=', '[', ']', 'branch_tees', '=', '[', ']', 'lines', '=', '[', ']', 'lv_stations', '=', '[', ']', 'storages', '=', '[', ']', 'for', 'lv_grid', 'in', 'network', '.', 'mv_grid', '.', 'lv_grids', ':', 'generators', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'nodes_by_attribute', '(', "'generator'", ')', ')', 'loads', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'nodes_by_attribute', '(', "'load'", ')', ')', 'branch_tees', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'nodes_by_attribute', '(', "'branch_tee'", ')', ')', 'lines', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'lines', '(', ')', ')', 'lv_stations', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'nodes_by_attribute', '(', "'lv_station'", ')', ')', 'storages', '.', 'extend', '(', 'lv_grid', '.', 'graph', '.', 'nodes_by_attribute', '(', "'storage'", ')', ')', 'omega', '=', '2', '*', 'pi', '*', '50', 'generator', '=', '{', "'name'", ':', '[', ']', ',', "'bus'", ':', '[', ']', ',', "'control'", ':', '[', ']', ',', "'p_nom'", ':', '[', ']', ',', "'type'", ':', '[', ']', ',', "'p_nom_extendable'", ':', '[', ']', ',', "'p_nom_min'", ':', '[', ']', ',', "'p_nom_max'", ':', '[', ']', ',', "'capital_cost'", ':', '[', ']', '}', 'bus', '=', '{', "'name'", ':', '[', ']', ',', "'v_nom'", ':', '[', ']', ',', "'x'", ':', '[', ']', ',', "'y'", ':', '[', ']', '}', 'load', '=', '{', "'name'", ':', '[', ']', ',', "'bus'", ':', '[', ']', '}', 'line', '=', '{', "'name'", ':', '[', ']', ',', "'bus0'", ':', '[', ']', ',', "'bus1'", ':', '[', ']', ',', "'type'", ':', '[', ']', ',', "'x'", ':', '[', ']', ',', "'r'", ':', '[', ']', ',', "'s_nom'", ':', '[', ']', ',', "'s_nom_min'", ':', '[', ']', ',', "'s_max_pu'", ':', '[', ']', ',', "'s_nom_extendable'", ':', '[', ']', ',', "'capital_cost'", ':', '[', ']', ',', "'length'", ':', '[', ']', '}', 'storage', '=', '{', "'name'", ':', '[', ']', ',', "'bus'", ':', '[', ']', ',', "'p_nom'", ':', '[', ']', ',', "'p_nom_extendable'", ':', '[', ']', ',', "'p_nom_min'", ':', '[', ']', ',', "'p_nom_max'", ':', '[', ']', ',', "'capital_cost'", ':', '[', ']', ',', "'max_hours'", ':', '[', ']', '}', '# create dictionary representing generators and associated buses', 'for', 'gen', 'in', 'generators', ':', 'bus_name', '=', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'repr', '(', 'gen', ')', ']', ')', 'generator', '[', "'name'", ']', '.', 'append', '(', 'repr', '(', 'gen', ')', ')', 'generator', '[', "'bus'", ']', '.', 'append', '(', 'bus_name', ')', 'generator', '[', "'control'", ']', '.', 'append', '(', "'PQ'", ')', 'generator', '[', "'p_nom'", ']', '.', 'append', '(', 'gen', '.', 'nominal_capacity', '/', '1e3', ')', 'generator', '[', "'type'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', 'gen', '.', 'type', ',', 'gen', '.', 'subtype', ']', ')', ')', 'generator', '[', "'p_nom_extendable'", ']', '.', 'append', '(', 'False', ')', 'generator', '[', "'p_nom_min'", ']', '.', 'append', '(', '0', ')', '# 0.3', 'generator', '[', "'p_nom_max'", ']', '.', 'append', '(', '0', ')', 'generator', '[', "'capital_cost'", ']', '.', 'append', '(', '0', ')', 'bus', '[', "'name'", ']', '.', 'append', '(', 'bus_name', ')', 'bus', '[', "'v_nom'", ']', '.', 'append', '(', 'gen', '.', 'grid', '.', 'voltage_nom', ')', 'bus', '[', "'x'", ']', '.', 'append', '(', 'None', ')', 'bus', '[', "'y'", ']', '.', 'append', '(', 'None', ')', '# create dictionary representing branch tees', 'for', 'bt', 'in', 'branch_tees', ':', 'bus', '[', "'name'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'repr', '(', 'bt', ')', ']', ')', ')', 'bus', '[', "'v_nom'", ']', '.', 'append', '(', 'bt', '.', 'grid', '.', 'voltage_nom', ')', 'bus', '[', "'x'", ']', '.', 'append', '(', 'None', ')', 'bus', '[', "'y'", ']', '.', 'append', '(', 'None', ')', '# create dataframes representing loads and associated buses', 'for', 'lo', 'in', 'loads', ':', 'bus_name', '=', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'repr', '(', 'lo', ')', ']', ')', 'load', '[', "'name'", ']', '.', 'append', '(', 'repr', '(', 'lo', ')', ')', 'load', '[', "'bus'", ']', '.', 'append', '(', 'bus_name', ')', 'bus', '[', "'name'", ']', '.', 'append', '(', 'bus_name', ')', 'bus', '[', "'v_nom'", ']', '.', 'append', '(', 'lo', '.', 'grid', '.', 'voltage_nom', ')', 'bus', '[', "'x'", ']', '.', 'append', '(', 'None', ')', 'bus', '[', "'y'", ']', '.', 'append', '(', 'None', ')', '# create dataframe for lines', 'for', 'l', 'in', 'lines', ':', 'line', '[', "'name'", ']', '.', 'append', '(', 'repr', '(', 'l', '[', "'line'", ']', ')', ')', 'if', 'l', '[', "'adj_nodes'", ']', '[', '0', ']', 'in', 'lv_stations', ':', 'line', '[', "'bus0'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'l', '[', "'adj_nodes'", ']', '[', '0', ']', '.', '__repr__', '(', 'side', '=', "'lv'", ')', ']', ')', ')', 'else', ':', 'line', '[', "'bus0'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'repr', '(', 'l', '[', "'adj_nodes'", ']', '[', '0', ']', ')', ']', ')', ')', 'if', 'l', '[', "'adj_nodes'", ']', '[', '1', ']', 'in', 'lv_stations', ':', 'line', '[', "'bus1'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'l', '[', "'adj_nodes'", ']', '[', '1', ']', '.', '__repr__', '(', 'side', '=', "'lv'", ')', ']', ')', ')', 'else', ':', 'line', '[', "'bus1'", ']', '.', 'append', '(', "'_'", '.', 'join', '(', '[', "'Bus'", ',', 'repr', '(', 'l', '[', "'adj_nodes'", ']', '[', '1', ']', ')', ']', ')', ')', 'line', '[', "'type'", ']', '.', 'append', '(', '""', ')', 'line', '[', "'x'", ']', '.', 'append', '(', 'l', '[', "'line'", ']', '.', 'type', '[', "'L'", ']', '*', 'omega', '/', '1e3', '*', 'l', '[', "'line'", ']', '.', 'length', ')', 'line', '[', "'r'", ']', '.', 'append', '(', 'l', '[', "'line'", ']', '.', 'type', '[', "'R'", ']', '*', 'l', '[', "'line'", ']', '.', 'length', ')', 's_nom', '=', 'sqrt', '(', '3', ')', '*', 'l', '[', "'line'", ']', '.', 'type', '[', "'I_max_th'", ']', '*', 'l', '[', "'line'", ']', '.', 'type', '[', "'U_n'", ']', '/', '1e3', 'line', '[', "'s_nom'", ']', '.', 'append', '(', 's_nom', ')', 'line', '[', "'s_nom_min'", ']', '.', 'append', '(', 's_nom', ')', 'line', '[', "'s_max_pu'", ']', '.', 'append', '(', '0.6', ')', 'line', '[', "'s_nom_extendable'", ']', '.', 'append', '(', 'True', ')', 'line', '[', "'capital_cost'", ']', '.', 'append', '(', '100', ')', 'line', '[', "'length'", ']', '.', 'append', '(', 'l', '[', "'line'", ']', '.', 'length', ')', 'lv_components', '=', '{', "'Generator'", ':', 'pd', '.', 'DataFrame', '(', 'generator', ')', '.', 'set_index', '(', "'name'", ')', ',', "'Bus'", ':', 'pd', '.', 'DataFrame', '(', 'bus', ')', '.', 'set_index', '(', "'name'", ')', ',', "'Load'", ':', 'pd', '.', 'DataFrame', '(', 'load', ')', '.', 'set_index', '(', "'name'", ')', ',', "'Line'", ':', 'pd', '.', 'DataFrame', '(', 'line', ')', '.', 'set_index', '(', "'name'", ')', ',', "'StorageUnit'", ':', 'pd', '.', 'DataFrame', '(', 'storage', ')', '.', 'set_index', '(', "'name'", ')', '}', 'return', 'lv_components'] | Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit' | ['Convert', 'LV', 'grid', 'topology', 'to', 'PyPSA', 'representation'] | train | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/pypsa_io_lopf.py#L436-L587 |
4,496 | allenai/allennlp | allennlp/semparse/domain_languages/nlvr_language.py | NlvrLanguage.top | def top(self, objects: Set[Object]) -> Set[Object]:
"""
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
min_y_loc = min([obj.y_loc for obj in box_objects])
return_set.update(set([obj for obj in box_objects if obj.y_loc == min_y_loc]))
return return_set | python | def top(self, objects: Set[Object]) -> Set[Object]:
"""
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
min_y_loc = min([obj.y_loc for obj in box_objects])
return_set.update(set([obj for obj in box_objects if obj.y_loc == min_y_loc]))
return return_set | ['def', 'top', '(', 'self', ',', 'objects', ':', 'Set', '[', 'Object', ']', ')', '->', 'Set', '[', 'Object', ']', ':', 'objects_per_box', '=', 'self', '.', '_separate_objects_by_boxes', '(', 'objects', ')', 'return_set', ':', 'Set', '[', 'Object', ']', '=', 'set', '(', ')', 'for', '_', ',', 'box_objects', 'in', 'objects_per_box', '.', 'items', '(', ')', ':', 'min_y_loc', '=', 'min', '(', '[', 'obj', '.', 'y_loc', 'for', 'obj', 'in', 'box_objects', ']', ')', 'return_set', '.', 'update', '(', 'set', '(', '[', 'obj', 'for', 'obj', 'in', 'box_objects', 'if', 'obj', '.', 'y_loc', '==', 'min_y_loc', ']', ')', ')', 'return', 'return_set'] | Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box. | ['Return', 'the', 'topmost', 'objects', '(', 'i', '.', 'e', '.', 'minimum', 'y_loc', ')', '.', 'The', 'comparison', 'is', 'done', 'separately', 'for', 'each', 'box', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L344-L354 |
4,497 | theislab/scanpy | scanpy/preprocessing/_deprecated/highly_variable_genes.py | filter_genes_cv_deprecated | def filter_genes_cv_deprecated(X, Ecutoff, cvFilter):
"""Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.')
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + .0001) > cvFilter
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset | python | def filter_genes_cv_deprecated(X, Ecutoff, cvFilter):
"""Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.')
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + .0001) > cvFilter
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset | ['def', 'filter_genes_cv_deprecated', '(', 'X', ',', 'Ecutoff', ',', 'cvFilter', ')', ':', 'if', 'issparse', '(', 'X', ')', ':', 'raise', 'ValueError', '(', "'Not defined for sparse input. See `filter_genes_dispersion`.'", ')', 'mean_filter', '=', 'np', '.', 'mean', '(', 'X', ',', 'axis', '=', '0', ')', '>', 'Ecutoff', 'var_filter', '=', 'np', '.', 'std', '(', 'X', ',', 'axis', '=', '0', ')', '/', '(', 'np', '.', 'mean', '(', 'X', ',', 'axis', '=', '0', ')', '+', '.0001', ')', '>', 'cvFilter', 'gene_subset', '=', 'np', '.', 'nonzero', '(', 'np', '.', 'all', '(', '[', 'mean_filter', ',', 'var_filter', ']', ',', 'axis', '=', '0', ')', ')', '[', '0', ']', 'return', 'gene_subset'] | Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017). | ['Filter', 'genes', 'by', 'coefficient', 'of', 'variance', 'and', 'mean', '.'] | train | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/preprocessing/_deprecated/highly_variable_genes.py#L196-L208 |
4,498 | boriel/zxbasic | arch/zx48k/optimizer.py | Registers.reset_flags | def reset_flags(self):
""" Resets flags to an "unknown state"
"""
self.C = None
self.Z = None
self.P = None
self.S = None | python | def reset_flags(self):
""" Resets flags to an "unknown state"
"""
self.C = None
self.Z = None
self.P = None
self.S = None | ['def', 'reset_flags', '(', 'self', ')', ':', 'self', '.', 'C', '=', 'None', 'self', '.', 'Z', '=', 'None', 'self', '.', 'P', '=', 'None', 'self', '.', 'S', '=', 'None'] | Resets flags to an "unknown state" | ['Resets', 'flags', 'to', 'an', 'unknown', 'state'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L412-L418 |
4,499 | cmbruns/pyopenvr | src/openvr/glframework/glfw_app.py | GlfwBaseApp.render_scene | def render_scene(self):
"render scene one time"
self.init_gl() # should be a no-op after the first frame is rendered
glfw.make_context_current(self.window)
self.renderer.render_scene()
# Done rendering
# glfw.swap_buffers(self.window) # avoid double buffering to avoid stalling
glFlush() # single buffering
glfw.poll_events() | python | def render_scene(self):
"render scene one time"
self.init_gl() # should be a no-op after the first frame is rendered
glfw.make_context_current(self.window)
self.renderer.render_scene()
# Done rendering
# glfw.swap_buffers(self.window) # avoid double buffering to avoid stalling
glFlush() # single buffering
glfw.poll_events() | ['def', 'render_scene', '(', 'self', ')', ':', 'self', '.', 'init_gl', '(', ')', '# should be a no-op after the first frame is rendered', 'glfw', '.', 'make_context_current', '(', 'self', '.', 'window', ')', 'self', '.', 'renderer', '.', 'render_scene', '(', ')', '# Done rendering', '# glfw.swap_buffers(self.window) # avoid double buffering to avoid stalling', 'glFlush', '(', ')', '# single buffering', 'glfw', '.', 'poll_events', '(', ')'] | render scene one time | ['render', 'scene', 'one', 'time'] | train | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/glframework/glfw_app.py#L56-L64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.