Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
8,500 | SatelliteQE/nailgun | nailgun/entity_mixins.py | EntityCreateMixin.create_json | def create_json(self, create_missing=None):
"""Create an entity.
Call :meth:`create_raw`. Check the response status code, decode JSON
and return the decoded JSON as a dict.
:return: A dict. The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If the response JSON can not be decoded.
"""
response = self.create_raw(create_missing)
response.raise_for_status()
return response.json() | python | def create_json(self, create_missing=None):
"""Create an entity.
Call :meth:`create_raw`. Check the response status code, decode JSON
and return the decoded JSON as a dict.
:return: A dict. The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If the response JSON can not be decoded.
"""
response = self.create_raw(create_missing)
response.raise_for_status()
return response.json() | ['def', 'create_json', '(', 'self', ',', 'create_missing', '=', 'None', ')', ':', 'response', '=', 'self', '.', 'create_raw', '(', 'create_missing', ')', 'response', '.', 'raise_for_status', '(', ')', 'return', 'response', '.', 'json', '(', ')'] | Create an entity.
Call :meth:`create_raw`. Check the response status code, decode JSON
and return the decoded JSON as a dict.
:return: A dict. The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If the response JSON can not be decoded. | ['Create', 'an', 'entity', '.'] | train | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L921-L935 |
8,501 | 475Cumulus/TBone | tbone/db/models.py | create_collection | async def create_collection(db, model_class: MongoCollectionMixin):
'''
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
'''
name = model_class.get_collection_name()
if name:
try:
# create collection
coll = await db.create_collection(name, **model_class._meta.creation_args)
except CollectionInvalid: # collection already exists
coll = db[name]
# create indices
if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list):
for index in model_class._meta.indices:
try:
index_kwargs = {
'name': index.get('name', '_'.join([x[0] for x in index['fields']])),
'unique': index.get('unique', False),
'sparse': index.get('sparse', False),
'expireAfterSeconds': index.get('expireAfterSeconds', None),
'background': True
}
if 'partialFilterExpression' in index:
index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {})
await db[name].create_index(
index['fields'],
**index_kwargs
)
except OperationFailure as ex:
pass # index already exists ? TODO: do something with this
return coll
return None | python | async def create_collection(db, model_class: MongoCollectionMixin):
'''
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
'''
name = model_class.get_collection_name()
if name:
try:
# create collection
coll = await db.create_collection(name, **model_class._meta.creation_args)
except CollectionInvalid: # collection already exists
coll = db[name]
# create indices
if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list):
for index in model_class._meta.indices:
try:
index_kwargs = {
'name': index.get('name', '_'.join([x[0] for x in index['fields']])),
'unique': index.get('unique', False),
'sparse': index.get('sparse', False),
'expireAfterSeconds': index.get('expireAfterSeconds', None),
'background': True
}
if 'partialFilterExpression' in index:
index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {})
await db[name].create_index(
index['fields'],
**index_kwargs
)
except OperationFailure as ex:
pass # index already exists ? TODO: do something with this
return coll
return None | ['async', 'def', 'create_collection', '(', 'db', ',', 'model_class', ':', 'MongoCollectionMixin', ')', ':', 'name', '=', 'model_class', '.', 'get_collection_name', '(', ')', 'if', 'name', ':', 'try', ':', '# create collection', 'coll', '=', 'await', 'db', '.', 'create_collection', '(', 'name', ',', '*', '*', 'model_class', '.', '_meta', '.', 'creation_args', ')', 'except', 'CollectionInvalid', ':', '# collection already exists', 'coll', '=', 'db', '[', 'name', ']', '# create indices', 'if', 'hasattr', '(', 'model_class', '.', '_meta', ',', "'indices'", ')', 'and', 'isinstance', '(', 'model_class', '.', '_meta', '.', 'indices', ',', 'list', ')', ':', 'for', 'index', 'in', 'model_class', '.', '_meta', '.', 'indices', ':', 'try', ':', 'index_kwargs', '=', '{', "'name'", ':', 'index', '.', 'get', '(', "'name'", ',', "'_'", '.', 'join', '(', '[', 'x', '[', '0', ']', 'for', 'x', 'in', 'index', '[', "'fields'", ']', ']', ')', ')', ',', "'unique'", ':', 'index', '.', 'get', '(', "'unique'", ',', 'False', ')', ',', "'sparse'", ':', 'index', '.', 'get', '(', "'sparse'", ',', 'False', ')', ',', "'expireAfterSeconds'", ':', 'index', '.', 'get', '(', "'expireAfterSeconds'", ',', 'None', ')', ',', "'background'", ':', 'True', '}', 'if', "'partialFilterExpression'", 'in', 'index', ':', 'index_kwargs', '[', "'partialFilterExpression'", ']', '=', 'index', '.', 'get', '(', "'partialFilterExpression'", ',', '{', '}', ')', 'await', 'db', '[', 'name', ']', '.', 'create_index', '(', 'index', '[', "'fields'", ']', ',', '*', '*', 'index_kwargs', ')', 'except', 'OperationFailure', 'as', 'ex', ':', 'pass', '# index already exists ? TODO: do something with this', 'return', 'coll', 'return', 'None'] | Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin`` | ['Creates', 'a', 'MongoDB', 'collection', 'and', 'all', 'the', 'declared', 'indices', 'in', 'the', 'model', 's', 'Meta', 'class'] | train | https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/db/models.py#L336-L378 |
8,502 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py | WPModule.get_default_frame | def get_default_frame(self):
'''default frame for waypoints'''
if self.settings.terrainalt == 'Auto':
if self.get_mav_param('TERRAIN_FOLLOW',0) == 1:
return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT
return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
if self.settings.terrainalt == 'True':
return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT
return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT | python | def get_default_frame(self):
'''default frame for waypoints'''
if self.settings.terrainalt == 'Auto':
if self.get_mav_param('TERRAIN_FOLLOW',0) == 1:
return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT
return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
if self.settings.terrainalt == 'True':
return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT
return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT | ['def', 'get_default_frame', '(', 'self', ')', ':', 'if', 'self', '.', 'settings', '.', 'terrainalt', '==', "'Auto'", ':', 'if', 'self', '.', 'get_mav_param', '(', "'TERRAIN_FOLLOW'", ',', '0', ')', '==', '1', ':', 'return', 'mavutil', '.', 'mavlink', '.', 'MAV_FRAME_GLOBAL_TERRAIN_ALT', 'return', 'mavutil', '.', 'mavlink', '.', 'MAV_FRAME_GLOBAL_RELATIVE_ALT', 'if', 'self', '.', 'settings', '.', 'terrainalt', '==', "'True'", ':', 'return', 'mavutil', '.', 'mavlink', '.', 'MAV_FRAME_GLOBAL_TERRAIN_ALT', 'return', 'mavutil', '.', 'mavlink', '.', 'MAV_FRAME_GLOBAL_RELATIVE_ALT'] | default frame for waypoints | ['default', 'frame', 'for', 'waypoints'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py#L202-L210 |
8,503 | msiedlarek/wiring | wiring/scanning/scan.py | scan_to_graph | def scan_to_graph(python_modules, graph, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
graph.register_provider(specification, provider)
scan(python_modules, callback, ignore=ignore) | python | def scan_to_graph(python_modules, graph, ignore=tuple()):
"""
Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`.
"""
def callback(specification, provider):
graph.register_provider(specification, provider)
scan(python_modules, callback, ignore=ignore) | ['def', 'scan_to_graph', '(', 'python_modules', ',', 'graph', ',', 'ignore', '=', 'tuple', '(', ')', ')', ':', 'def', 'callback', '(', 'specification', ',', 'provider', ')', ':', 'graph', '.', 'register_provider', '(', 'specification', ',', 'provider', ')', 'scan', '(', 'python_modules', ',', 'callback', ',', 'ignore', '=', 'ignore', ')'] | Scans `python_modules` with :py:func:`scan` and registers found providers
in `graph`.
`ignore` argument is passed through to :py:func:`scan`. | ['Scans', 'python_modules', 'with', ':', 'py', ':', 'func', ':', 'scan', 'and', 'registers', 'found', 'providers', 'in', 'graph', '.'] | train | https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/scanning/scan.py#L28-L37 |
8,504 | QualiSystems/vCenterShell | package/cloudshell/cp/vcenter/network/vnic/vnic_service.py | VNicService.device_is_attached_to_network | def device_is_attached_to_network(device, network_name):
"""
Checks if the device has a backing with of the right network name
:param <vim.vm.Device> device: instance of adapter
:param <str> network_name: network name
:return:
"""
try:
backing = device.backing
except:
return False
if hasattr(backing, 'network') and hasattr(backing.network, 'name'):
return network_name == backing.network.name
elif hasattr(backing, 'port') and hasattr(backing.port, 'portgroupKey'):
return network_name == backing.port.portgroupKey
return False | python | def device_is_attached_to_network(device, network_name):
"""
Checks if the device has a backing with of the right network name
:param <vim.vm.Device> device: instance of adapter
:param <str> network_name: network name
:return:
"""
try:
backing = device.backing
except:
return False
if hasattr(backing, 'network') and hasattr(backing.network, 'name'):
return network_name == backing.network.name
elif hasattr(backing, 'port') and hasattr(backing.port, 'portgroupKey'):
return network_name == backing.port.portgroupKey
return False | ['def', 'device_is_attached_to_network', '(', 'device', ',', 'network_name', ')', ':', 'try', ':', 'backing', '=', 'device', '.', 'backing', 'except', ':', 'return', 'False', 'if', 'hasattr', '(', 'backing', ',', "'network'", ')', 'and', 'hasattr', '(', 'backing', '.', 'network', ',', "'name'", ')', ':', 'return', 'network_name', '==', 'backing', '.', 'network', '.', 'name', 'elif', 'hasattr', '(', 'backing', ',', "'port'", ')', 'and', 'hasattr', '(', 'backing', '.', 'port', ',', "'portgroupKey'", ')', ':', 'return', 'network_name', '==', 'backing', '.', 'port', '.', 'portgroupKey', 'return', 'False'] | Checks if the device has a backing with of the right network name
:param <vim.vm.Device> device: instance of adapter
:param <str> network_name: network name
:return: | ['Checks', 'if', 'the', 'device', 'has', 'a', 'backing', 'with', 'of', 'the', 'right', 'network', 'name', ':', 'param', '<vim', '.', 'vm', '.', 'Device', '>', 'device', ':', 'instance', 'of', 'adapter', ':', 'param', '<str', '>', 'network_name', ':', 'network', 'name', ':', 'return', ':'] | train | https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/network/vnic/vnic_service.py#L91-L107 |
8,505 | openspending/ckanext-budgets | ckanext/budgets/plugin.py | BudgetDataPackagePlugin.are_budget_data_package_fields_filled_in | def are_budget_data_package_fields_filled_in(self, resource):
"""
Check if the budget data package fields are all filled in because
if not then this can't be a budget data package
"""
fields = ['country', 'currency', 'year', 'status']
return all([self.in_resource(f, resource) for f in fields]) | python | def are_budget_data_package_fields_filled_in(self, resource):
"""
Check if the budget data package fields are all filled in because
if not then this can't be a budget data package
"""
fields = ['country', 'currency', 'year', 'status']
return all([self.in_resource(f, resource) for f in fields]) | ['def', 'are_budget_data_package_fields_filled_in', '(', 'self', ',', 'resource', ')', ':', 'fields', '=', '[', "'country'", ',', "'currency'", ',', "'year'", ',', "'status'", ']', 'return', 'all', '(', '[', 'self', '.', 'in_resource', '(', 'f', ',', 'resource', ')', 'for', 'f', 'in', 'fields', ']', ')'] | Check if the budget data package fields are all filled in because
if not then this can't be a budget data package | ['Check', 'if', 'the', 'budget', 'data', 'package', 'fields', 'are', 'all', 'filled', 'in', 'because', 'if', 'not', 'then', 'this', 'can', 't', 'be', 'a', 'budget', 'data', 'package'] | train | https://github.com/openspending/ckanext-budgets/blob/07dde5a4fdec6b36ceb812b70f0c31cdecb40cfc/ckanext/budgets/plugin.py#L228-L234 |
8,506 | cvxopt/chompack | src/python/misc.py | symmetrize | def symmetrize(A):
"""
Returns a symmetric matrix from a sparse square matrix :math:`A`. Only the
lower triangular entries of :math:`A` are accessed.
"""
assert type(A) is spmatrix, "argument must be a sparse matrix"
assert A.size[0] == A.size[1], "argument must me a square matrix"
idx = [i for i,ij in enumerate(zip(A.I,A.J)) if ij[0] > ij[1]]
return tril(A) + spmatrix(A.V[idx], A.J[idx], A.I[idx], A.size) | python | def symmetrize(A):
"""
Returns a symmetric matrix from a sparse square matrix :math:`A`. Only the
lower triangular entries of :math:`A` are accessed.
"""
assert type(A) is spmatrix, "argument must be a sparse matrix"
assert A.size[0] == A.size[1], "argument must me a square matrix"
idx = [i for i,ij in enumerate(zip(A.I,A.J)) if ij[0] > ij[1]]
return tril(A) + spmatrix(A.V[idx], A.J[idx], A.I[idx], A.size) | ['def', 'symmetrize', '(', 'A', ')', ':', 'assert', 'type', '(', 'A', ')', 'is', 'spmatrix', ',', '"argument must be a sparse matrix"', 'assert', 'A', '.', 'size', '[', '0', ']', '==', 'A', '.', 'size', '[', '1', ']', ',', '"argument must me a square matrix"', 'idx', '=', '[', 'i', 'for', 'i', ',', 'ij', 'in', 'enumerate', '(', 'zip', '(', 'A', '.', 'I', ',', 'A', '.', 'J', ')', ')', 'if', 'ij', '[', '0', ']', '>', 'ij', '[', '1', ']', ']', 'return', 'tril', '(', 'A', ')', '+', 'spmatrix', '(', 'A', '.', 'V', '[', 'idx', ']', ',', 'A', '.', 'J', '[', 'idx', ']', ',', 'A', '.', 'I', '[', 'idx', ']', ',', 'A', '.', 'size', ')'] | Returns a symmetric matrix from a sparse square matrix :math:`A`. Only the
lower triangular entries of :math:`A` are accessed. | ['Returns', 'a', 'symmetric', 'matrix', 'from', 'a', 'sparse', 'square', 'matrix', ':', 'math', ':', 'A', '.', 'Only', 'the', 'lower', 'triangular', 'entries', 'of', ':', 'math', ':', 'A', 'are', 'accessed', '.'] | train | https://github.com/cvxopt/chompack/blob/e07106b58b8055c34f6201e8c954482f86987833/src/python/misc.py#L32-L40 |
8,507 | Fantomas42/django-blog-zinnia | zinnia/views/mixins/entry_protection.py | EntryProtectionMixin.get | def get(self, request, *args, **kwargs):
"""
Do the login and password protection.
"""
response = super(EntryProtectionMixin, self).get(
request, *args, **kwargs)
if self.object.login_required and not request.user.is_authenticated:
return self.login()
if (self.object.password and self.object.password !=
self.request.session.get(self.session_key % self.object.pk)):
return self.password()
return response | python | def get(self, request, *args, **kwargs):
"""
Do the login and password protection.
"""
response = super(EntryProtectionMixin, self).get(
request, *args, **kwargs)
if self.object.login_required and not request.user.is_authenticated:
return self.login()
if (self.object.password and self.object.password !=
self.request.session.get(self.session_key % self.object.pk)):
return self.password()
return response | ['def', 'get', '(', 'self', ',', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'response', '=', 'super', '(', 'EntryProtectionMixin', ',', 'self', ')', '.', 'get', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'self', '.', 'object', '.', 'login_required', 'and', 'not', 'request', '.', 'user', '.', 'is_authenticated', ':', 'return', 'self', '.', 'login', '(', ')', 'if', '(', 'self', '.', 'object', '.', 'password', 'and', 'self', '.', 'object', '.', 'password', '!=', 'self', '.', 'request', '.', 'session', '.', 'get', '(', 'self', '.', 'session_key', '%', 'self', '.', 'object', '.', 'pk', ')', ')', ':', 'return', 'self', '.', 'password', '(', ')', 'return', 'response'] | Do the login and password protection. | ['Do', 'the', 'login', 'and', 'password', 'protection', '.'] | train | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/mixins/entry_protection.py#L44-L55 |
8,508 | svenevs/exhale | exhale/utils.py | sanitize | def sanitize(name):
"""
Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace.
"""
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | python | def sanitize(name):
"""
Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace.
"""
return name.replace(
"<", "<"
).replace(
">", ">"
).replace(
"&", "&"
).replace(
"< ", "<"
).replace(
" >", ">"
).replace(
" &", "&"
).replace(
"& ", "&"
) | ['def', 'sanitize', '(', 'name', ')', ':', 'return', 'name', '.', 'replace', '(', '"<"', ',', '"<"', ')', '.', 'replace', '(', '">"', ',', '">"', ')', '.', 'replace', '(', '"&"', ',', '"&"', ')', '.', 'replace', '(', '"< "', ',', '"<"', ')', '.', 'replace', '(', '" >"', ',', '">"', ')', '.', 'replace', '(', '" &"', ',', '"&"', ')', '.', 'replace', '(', '"& "', ',', '"&"', ')'] | Sanitize the specified ``name`` for use with breathe directives.
**Parameters**
``name`` (:class:`python:str`)
The name to be sanitized.
**Return**
:class:`python:str`
The input ``name`` sanitized to use with breathe directives (primarily for use
with ``.. doxygenfunction::``). Replacements such as ``"<" -> "<"`` are
performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is
particularly sensitive with respect to whitespace. | ['Sanitize', 'the', 'specified', 'name', 'for', 'use', 'with', 'breathe', 'directives', '.'] | train | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/utils.py#L255-L286 |
8,509 | alejandroautalan/pygubu | pygubudesigner/previewer.py | PreviewHelper._get_slot | def _get_slot(self):
"Returns the next coordinates for a preview"
x = y = 10
for k, p in self.previews.items():
y += p.height() + self.padding
return x, y | python | def _get_slot(self):
"Returns the next coordinates for a preview"
x = y = 10
for k, p in self.previews.items():
y += p.height() + self.padding
return x, y | ['def', '_get_slot', '(', 'self', ')', ':', 'x', '=', 'y', '=', '10', 'for', 'k', ',', 'p', 'in', 'self', '.', 'previews', '.', 'items', '(', ')', ':', 'y', '+=', 'p', '.', 'height', '(', ')', '+', 'self', '.', 'padding', 'return', 'x', ',', 'y'] | Returns the next coordinates for a preview | ['Returns', 'the', 'next', 'coordinates', 'for', 'a', 'preview'] | train | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L511-L517 |
8,510 | samastur/pyimagediet | pyimagediet/process.py | determine_type | def determine_type(filename):
'''Determine the file type and return it.'''
ftype = magic.from_file(filename, mime=True).decode('utf8')
if ftype == 'text/plain':
ftype = 'text'
elif ftype == 'image/svg+xml':
ftype = 'svg'
else:
ftype = ftype.split('/')[1]
return ftype | python | def determine_type(filename):
'''Determine the file type and return it.'''
ftype = magic.from_file(filename, mime=True).decode('utf8')
if ftype == 'text/plain':
ftype = 'text'
elif ftype == 'image/svg+xml':
ftype = 'svg'
else:
ftype = ftype.split('/')[1]
return ftype | ['def', 'determine_type', '(', 'filename', ')', ':', 'ftype', '=', 'magic', '.', 'from_file', '(', 'filename', ',', 'mime', '=', 'True', ')', '.', 'decode', '(', "'utf8'", ')', 'if', 'ftype', '==', "'text/plain'", ':', 'ftype', '=', "'text'", 'elif', 'ftype', '==', "'image/svg+xml'", ':', 'ftype', '=', "'svg'", 'else', ':', 'ftype', '=', 'ftype', '.', 'split', '(', "'/'", ')', '[', '1', ']', 'return', 'ftype'] | Determine the file type and return it. | ['Determine', 'the', 'file', 'type', 'and', 'return', 'it', '.'] | train | https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L46-L55 |
8,511 | DeepHorizons/iarm | iarm_kernel/iarmkernel.py | ArmKernel.magic_postpone_execution | def magic_postpone_execution(self, line):
"""
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.postpone_execution = True
elif line == 'false':
self.interpreter.postpone_execution = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'} | python | def magic_postpone_execution(self, line):
"""
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.postpone_execution = True
elif line == 'false':
self.interpreter.postpone_execution = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'} | ['def', 'magic_postpone_execution', '(', 'self', ',', 'line', ')', ':', 'line', '=', 'line', '.', 'strip', '(', ')', '.', 'lower', '(', ')', 'if', 'not', 'line', 'or', 'line', '==', "'true'", ':', 'self', '.', 'interpreter', '.', 'postpone_execution', '=', 'True', 'elif', 'line', '==', "'false'", ':', 'self', '.', 'interpreter', '.', 'postpone_execution', '=', 'False', 'else', ':', 'stream_content', '=', '{', "'name'", ':', "'stderr'", ',', "'text'", ':', '"unknwon value \'{}\'"', '.', 'format', '(', 'line', ')', '}', 'self', '.', 'send_response', '(', 'self', '.', 'iopub_socket', ',', "'stream'", ',', 'stream_content', ')', 'return', '{', "'status'", ':', "'error'", ',', "'execution_count'", ':', 'self', '.', 'execution_count', ',', "'ename'", ':', 'ValueError', '.', '__name__', ',', "'evalue'", ':', '"unknwon value \'{}\'"', '.', 'format', '(', 'line', ')', ',', "'traceback'", ':', "'???'", '}'] | Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false` | ['Postpone', 'execution', 'of', 'instructions', 'until', 'explicitly', 'run'] | train | https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm_kernel/iarmkernel.py#L83-L113 |
8,512 | sternoru/goscalecms | goscale/views.py | signup | def signup(request, **kwargs):
"""
Overrides allauth.account.views.signup
"""
if not ALLAUTH:
return http.HttpResponse(_('allauth not installed...'))
if request.method == "POST" and 'login' in request.POST:
form_class = LoginForm
form = form_class(request.POST)
redirect_field_name = "next"
success_url = get_default_redirect(request, redirect_field_name)
if form.is_valid():
return form.login(request, redirect_url=success_url)
response = allauth_signup(request, **kwargs)
return response | python | def signup(request, **kwargs):
"""
Overrides allauth.account.views.signup
"""
if not ALLAUTH:
return http.HttpResponse(_('allauth not installed...'))
if request.method == "POST" and 'login' in request.POST:
form_class = LoginForm
form = form_class(request.POST)
redirect_field_name = "next"
success_url = get_default_redirect(request, redirect_field_name)
if form.is_valid():
return form.login(request, redirect_url=success_url)
response = allauth_signup(request, **kwargs)
return response | ['def', 'signup', '(', 'request', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'ALLAUTH', ':', 'return', 'http', '.', 'HttpResponse', '(', '_', '(', "'allauth not installed...'", ')', ')', 'if', 'request', '.', 'method', '==', '"POST"', 'and', "'login'", 'in', 'request', '.', 'POST', ':', 'form_class', '=', 'LoginForm', 'form', '=', 'form_class', '(', 'request', '.', 'POST', ')', 'redirect_field_name', '=', '"next"', 'success_url', '=', 'get_default_redirect', '(', 'request', ',', 'redirect_field_name', ')', 'if', 'form', '.', 'is_valid', '(', ')', ':', 'return', 'form', '.', 'login', '(', 'request', ',', 'redirect_url', '=', 'success_url', ')', 'response', '=', 'allauth_signup', '(', 'request', ',', '*', '*', 'kwargs', ')', 'return', 'response'] | Overrides allauth.account.views.signup | ['Overrides', 'allauth', '.', 'account', '.', 'views', '.', 'signup'] | train | https://github.com/sternoru/goscalecms/blob/7eee50357c47ebdfe3e573a8b4be3b67892d229e/goscale/views.py#L38-L52 |
8,513 | agoragames/haigha | haigha/frames/frame.py | Frame.read_frames | def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval | python | def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval | ['def', 'read_frames', '(', 'cls', ',', 'reader', ')', ':', 'rval', '=', 'deque', '(', ')', 'while', 'True', ':', 'frame_start_pos', '=', 'reader', '.', 'tell', '(', ')', 'try', ':', 'frame', '=', 'Frame', '.', '_read_frame', '(', 'reader', ')', 'except', 'Reader', '.', 'BufferUnderflow', ':', '# No more data in the stream', 'frame', '=', 'None', 'except', 'Reader', '.', 'ReaderError', 'as', 'e', ':', '# Some other format error', 'raise', 'Frame', '.', 'FormatError', ',', 'str', '(', 'e', ')', ',', 'sys', '.', 'exc_info', '(', ')', '[', '-', '1', ']', 'except', 'struct', '.', 'error', 'as', 'e', ':', 'raise', 'Frame', '.', 'FormatError', ',', 'str', '(', 'e', ')', ',', 'sys', '.', 'exc_info', '(', ')', '[', '-', '1', ']', 'if', 'frame', 'is', 'None', ':', 'reader', '.', 'seek', '(', 'frame_start_pos', ')', 'break', 'rval', '.', 'append', '(', 'frame', ')', 'return', 'rval'] | Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived. | ['Read', 'one', 'or', 'more', 'frames', 'from', 'an', 'IO', 'stream', '.', 'Buffer', 'must', 'support', 'file', 'object', 'interface', '.'] | train | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/frame.py#L49-L84 |
8,514 | agile-geoscience/welly | welly/curve.py | Curve.interpolate_where | def interpolate_where(self, condition):
"""
Remove then interpolate across
"""
raise NotImplementedError()
self[self < 0] = np.nan
return self.interpolate() | python | def interpolate_where(self, condition):
"""
Remove then interpolate across
"""
raise NotImplementedError()
self[self < 0] = np.nan
return self.interpolate() | ['def', 'interpolate_where', '(', 'self', ',', 'condition', ')', ':', 'raise', 'NotImplementedError', '(', ')', 'self', '[', 'self', '<', '0', ']', '=', 'np', '.', 'nan', 'return', 'self', '.', 'interpolate', '(', ')'] | Remove then interpolate across | ['Remove', 'then', 'interpolate', 'across'] | train | https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/curve.py#L466-L472 |
8,515 | tanghaibao/goatools | goatools/grouper/plotobj.py | PltGroupedGos._plot_go_group | def _plot_go_group(self, hdrgo, usrgos, pltargs, go2parentids):
"""Plot an exploratory GO DAG for a single Group of user GOs."""
gosubdagplotnts = self._get_gosubdagplotnts(hdrgo, usrgos, pltargs, go2parentids)
# Create pngs and return png names
pngs = [obj.wrplt(pltargs.fout_dir, pltargs.plt_ext) for obj in gosubdagplotnts]
return pngs | python | def _plot_go_group(self, hdrgo, usrgos, pltargs, go2parentids):
"""Plot an exploratory GO DAG for a single Group of user GOs."""
gosubdagplotnts = self._get_gosubdagplotnts(hdrgo, usrgos, pltargs, go2parentids)
# Create pngs and return png names
pngs = [obj.wrplt(pltargs.fout_dir, pltargs.plt_ext) for obj in gosubdagplotnts]
return pngs | ['def', '_plot_go_group', '(', 'self', ',', 'hdrgo', ',', 'usrgos', ',', 'pltargs', ',', 'go2parentids', ')', ':', 'gosubdagplotnts', '=', 'self', '.', '_get_gosubdagplotnts', '(', 'hdrgo', ',', 'usrgos', ',', 'pltargs', ',', 'go2parentids', ')', '# Create pngs and return png names', 'pngs', '=', '[', 'obj', '.', 'wrplt', '(', 'pltargs', '.', 'fout_dir', ',', 'pltargs', '.', 'plt_ext', ')', 'for', 'obj', 'in', 'gosubdagplotnts', ']', 'return', 'pngs'] | Plot an exploratory GO DAG for a single Group of user GOs. | ['Plot', 'an', 'exploratory', 'GO', 'DAG', 'for', 'a', 'single', 'Group', 'of', 'user', 'GOs', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/plotobj.py#L150-L155 |
8,516 | quintusdias/glymur | glymur/jp2box.py | FragmentTableBox._validate | def _validate(self, writing=False):
"""Self-validate the box before writing."""
box_ids = [box.box_id for box in self.box]
if len(box_ids) != 1 or box_ids[0] != 'flst':
msg = ("Fragment table boxes must have a single fragment list "
"box as a child box.")
self._dispatch_validation_error(msg, writing=writing) | python | def _validate(self, writing=False):
"""Self-validate the box before writing."""
box_ids = [box.box_id for box in self.box]
if len(box_ids) != 1 or box_ids[0] != 'flst':
msg = ("Fragment table boxes must have a single fragment list "
"box as a child box.")
self._dispatch_validation_error(msg, writing=writing) | ['def', '_validate', '(', 'self', ',', 'writing', '=', 'False', ')', ':', 'box_ids', '=', '[', 'box', '.', 'box_id', 'for', 'box', 'in', 'self', '.', 'box', ']', 'if', 'len', '(', 'box_ids', ')', '!=', '1', 'or', 'box_ids', '[', '0', ']', '!=', "'flst'", ':', 'msg', '=', '(', '"Fragment table boxes must have a single fragment list "', '"box as a child box."', ')', 'self', '.', '_dispatch_validation_error', '(', 'msg', ',', 'writing', '=', 'writing', ')'] | Self-validate the box before writing. | ['Self', '-', 'validate', 'the', 'box', 'before', 'writing', '.'] | train | https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2box.py#L1515-L1521 |
8,517 | apache/incubator-mxnet | python/mxnet/ndarray/sparse.py | _new_alloc_handle | def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl | python | def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl | ['def', '_new_alloc_handle', '(', 'stype', ',', 'shape', ',', 'ctx', ',', 'delay_alloc', ',', 'dtype', ',', 'aux_types', ',', 'aux_shapes', '=', 'None', ')', ':', 'hdl', '=', 'NDArrayHandle', '(', ')', 'for', 'aux_t', 'in', 'aux_types', ':', 'if', 'np', '.', 'dtype', '(', 'aux_t', ')', '!=', 'np', '.', 'dtype', '(', '"int64"', ')', ':', 'raise', 'NotImplementedError', '(', '"only int64 is supported for aux types"', ')', 'aux_type_ids', '=', '[', 'int', '(', '_DTYPE_NP_TO_MX', '[', 'np', '.', 'dtype', '(', 'aux_t', ')', '.', 'type', ']', ')', 'for', 'aux_t', 'in', 'aux_types', ']', 'aux_shapes', '=', '[', '(', '0', ',', ')', 'for', 'aux_t', 'in', 'aux_types', ']', 'if', 'aux_shapes', 'is', 'None', 'else', 'aux_shapes', 'aux_shape_lens', '=', '[', 'len', '(', 'aux_shape', ')', 'for', 'aux_shape', 'in', 'aux_shapes', ']', 'aux_shapes', '=', 'py_sum', '(', 'aux_shapes', ',', '(', ')', ')', 'num_aux', '=', 'mx_uint', '(', 'len', '(', 'aux_types', ')', ')', 'check_call', '(', '_LIB', '.', 'MXNDArrayCreateSparseEx', '(', 'ctypes', '.', 'c_int', '(', 'int', '(', '_STORAGE_TYPE_STR_TO_ID', '[', 'stype', ']', ')', ')', ',', 'c_array_buf', '(', 'mx_uint', ',', 'native_array', '(', "'I'", ',', 'shape', ')', ')', ',', 'mx_uint', '(', 'len', '(', 'shape', ')', ')', ',', 'ctypes', '.', 'c_int', '(', 'ctx', '.', 'device_typeid', ')', ',', 'ctypes', '.', 'c_int', '(', 'ctx', '.', 'device_id', ')', ',', 'ctypes', '.', 'c_int', '(', 'int', '(', 'delay_alloc', ')', ')', ',', 'ctypes', '.', 'c_int', '(', 'int', '(', '_DTYPE_NP_TO_MX', '[', 'np', '.', 'dtype', '(', 'dtype', ')', '.', 'type', ']', ')', ')', ',', 'num_aux', ',', 'c_array_buf', '(', 'ctypes', '.', 'c_int', ',', 'native_array', '(', "'i'", ',', 'aux_type_ids', ')', ')', ',', 'c_array_buf', '(', 'mx_uint', ',', 'native_array', '(', "'I'", ',', 'aux_shape_lens', ')', ')', ',', 'c_array_buf', '(', 'mx_uint', ',', 'native_array', '(', "'I'", ',', 'aux_shapes', ')', ')', ',', 'ctypes', '.', 'byref', '(', 'hdl', ')', ')', ')', 'return', 'hdl'] | Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle | ['Return', 'a', 'new', 'handle', 'with', 'specified', 'storage', 'type', 'shape', 'dtype', 'and', 'context', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L72-L104 |
8,518 | pdkit/pdkit | pdkit/gait_processor.py | GaitProcessor.gait | def gait(self, x):
"""
Extract gait features from estimated heel strikes and accelerometer data.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:return number_of_steps: Estimated number of steps based on heel strikes [number of steps].
:rtype number_of_steps: int
:return velocity: Velocity (if distance is provided) [meters/second].
:rtype velocity: float
:return avg_step_length: Average step length (if distance is provided) [meters].
:rtype avg_step_length: float
:return avg_stride_length: Average stride length (if distance is provided) [meters].
:rtyoe avg_stride_length: float
:return cadence: Number of steps divided by duration [steps/second].
:rtype cadence: float
:return array step_durations: Step duration [seconds].
:rtype step_durations: np.ndarray
:return float avg_step_duration: Average step duration [seconds].
:rtype avg_step_duration: float
:return float sd_step_durations: Standard deviation of step durations [seconds].
:rtype sd_step_durations: np.ndarray
:return list strides: Stride timings for each side [seconds].
:rtype strides: numpy.ndarray
:return float avg_number_of_strides: Estimated number of strides based on alternating heel strikes [number of strides].
:rtype avg_number_of_strides: float
:return list stride_durations: Estimated stride durations [seconds].
:rtype stride_durations: numpy.ndarray
:return float avg_stride_duration: Average stride duration [seconds].
:rtype avg_stride_duration: float
:return float sd_step_durations: Standard deviation of stride durations [seconds].
:rtype sd_step_duration: float
:return float step_regularity: Measure of step regularity along axis [percentage consistency of the step-to-step pattern].
:rtype step_regularity: float
:return float stride_regularity: Measure of stride regularity along axis [percentage consistency of the stride-to-stride pattern].
:rtype stride_regularity: float
:return float symmetry: Measure of gait symmetry along axis [difference between step and stride regularity].
:rtype symmetry: float
"""
data = x
strikes, _ = self.heel_strikes(data)
step_durations = []
for i in range(1, np.size(strikes)):
step_durations.append(strikes[i] - strikes[i-1])
avg_step_duration = np.mean(step_durations)
sd_step_durations = np.std(step_durations)
number_of_steps = np.size(strikes)
strides1 = strikes[0::2]
strides2 = strikes[1::2]
stride_durations1 = []
for i in range(1, np.size(strides1)):
stride_durations1.append(strides1[i] - strides1[i-1])
stride_durations2 = []
for i in range(1, np.size(strides2)):
stride_durations2.append(strides2[i] - strides2[i-1])
strides = [strides1, strides2]
stride_durations = [stride_durations1, stride_durations2]
avg_number_of_strides = np.mean([np.size(strides1), np.size(strides2)])
avg_stride_duration = np.mean((np.mean(stride_durations1),
np.mean(stride_durations2)))
sd_stride_durations = np.mean((np.std(stride_durations1),
np.std(stride_durations2)))
step_period = np.int(np.round(1 / avg_step_duration))
stride_period = np.int(np.round(1 / avg_stride_duration))
step_regularity, stride_regularity, symmetry = self.gait_regularity_symmetry(data,
average_step_duration=avg_step_duration,
average_stride_duration=avg_stride_duration)
cadence = None
if self.duration:
cadence = number_of_steps / self.duration
velocity = None
avg_step_length = None
avg_stride_length = None
if self.distance:
velocity = self.distance / self.duration
avg_step_length = number_of_steps / self.distance
avg_stride_length = avg_number_of_strides / self.distance
return [number_of_steps, cadence,
velocity,
avg_step_length,
avg_stride_length,
step_durations,
avg_step_duration,
sd_step_durations,
strides,
stride_durations,
avg_number_of_strides,
avg_stride_duration,
sd_stride_durations,
step_regularity,
stride_regularity,
symmetry] | python | def gait(self, x):
"""
Extract gait features from estimated heel strikes and accelerometer data.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:return number_of_steps: Estimated number of steps based on heel strikes [number of steps].
:rtype number_of_steps: int
:return velocity: Velocity (if distance is provided) [meters/second].
:rtype velocity: float
:return avg_step_length: Average step length (if distance is provided) [meters].
:rtype avg_step_length: float
:return avg_stride_length: Average stride length (if distance is provided) [meters].
:rtyoe avg_stride_length: float
:return cadence: Number of steps divided by duration [steps/second].
:rtype cadence: float
:return array step_durations: Step duration [seconds].
:rtype step_durations: np.ndarray
:return float avg_step_duration: Average step duration [seconds].
:rtype avg_step_duration: float
:return float sd_step_durations: Standard deviation of step durations [seconds].
:rtype sd_step_durations: np.ndarray
:return list strides: Stride timings for each side [seconds].
:rtype strides: numpy.ndarray
:return float avg_number_of_strides: Estimated number of strides based on alternating heel strikes [number of strides].
:rtype avg_number_of_strides: float
:return list stride_durations: Estimated stride durations [seconds].
:rtype stride_durations: numpy.ndarray
:return float avg_stride_duration: Average stride duration [seconds].
:rtype avg_stride_duration: float
:return float sd_step_durations: Standard deviation of stride durations [seconds].
:rtype sd_step_duration: float
:return float step_regularity: Measure of step regularity along axis [percentage consistency of the step-to-step pattern].
:rtype step_regularity: float
:return float stride_regularity: Measure of stride regularity along axis [percentage consistency of the stride-to-stride pattern].
:rtype stride_regularity: float
:return float symmetry: Measure of gait symmetry along axis [difference between step and stride regularity].
:rtype symmetry: float
"""
data = x
strikes, _ = self.heel_strikes(data)
step_durations = []
for i in range(1, np.size(strikes)):
step_durations.append(strikes[i] - strikes[i-1])
avg_step_duration = np.mean(step_durations)
sd_step_durations = np.std(step_durations)
number_of_steps = np.size(strikes)
strides1 = strikes[0::2]
strides2 = strikes[1::2]
stride_durations1 = []
for i in range(1, np.size(strides1)):
stride_durations1.append(strides1[i] - strides1[i-1])
stride_durations2 = []
for i in range(1, np.size(strides2)):
stride_durations2.append(strides2[i] - strides2[i-1])
strides = [strides1, strides2]
stride_durations = [stride_durations1, stride_durations2]
avg_number_of_strides = np.mean([np.size(strides1), np.size(strides2)])
avg_stride_duration = np.mean((np.mean(stride_durations1),
np.mean(stride_durations2)))
sd_stride_durations = np.mean((np.std(stride_durations1),
np.std(stride_durations2)))
step_period = np.int(np.round(1 / avg_step_duration))
stride_period = np.int(np.round(1 / avg_stride_duration))
step_regularity, stride_regularity, symmetry = self.gait_regularity_symmetry(data,
average_step_duration=avg_step_duration,
average_stride_duration=avg_stride_duration)
cadence = None
if self.duration:
cadence = number_of_steps / self.duration
velocity = None
avg_step_length = None
avg_stride_length = None
if self.distance:
velocity = self.distance / self.duration
avg_step_length = number_of_steps / self.distance
avg_stride_length = avg_number_of_strides / self.distance
return [number_of_steps, cadence,
velocity,
avg_step_length,
avg_stride_length,
step_durations,
avg_step_duration,
sd_step_durations,
strides,
stride_durations,
avg_number_of_strides,
avg_stride_duration,
sd_stride_durations,
step_regularity,
stride_regularity,
symmetry] | ['def', 'gait', '(', 'self', ',', 'x', ')', ':', 'data', '=', 'x', 'strikes', ',', '_', '=', 'self', '.', 'heel_strikes', '(', 'data', ')', 'step_durations', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'np', '.', 'size', '(', 'strikes', ')', ')', ':', 'step_durations', '.', 'append', '(', 'strikes', '[', 'i', ']', '-', 'strikes', '[', 'i', '-', '1', ']', ')', 'avg_step_duration', '=', 'np', '.', 'mean', '(', 'step_durations', ')', 'sd_step_durations', '=', 'np', '.', 'std', '(', 'step_durations', ')', 'number_of_steps', '=', 'np', '.', 'size', '(', 'strikes', ')', 'strides1', '=', 'strikes', '[', '0', ':', ':', '2', ']', 'strides2', '=', 'strikes', '[', '1', ':', ':', '2', ']', 'stride_durations1', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'np', '.', 'size', '(', 'strides1', ')', ')', ':', 'stride_durations1', '.', 'append', '(', 'strides1', '[', 'i', ']', '-', 'strides1', '[', 'i', '-', '1', ']', ')', 'stride_durations2', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'np', '.', 'size', '(', 'strides2', ')', ')', ':', 'stride_durations2', '.', 'append', '(', 'strides2', '[', 'i', ']', '-', 'strides2', '[', 'i', '-', '1', ']', ')', 'strides', '=', '[', 'strides1', ',', 'strides2', ']', 'stride_durations', '=', '[', 'stride_durations1', ',', 'stride_durations2', ']', 'avg_number_of_strides', '=', 'np', '.', 'mean', '(', '[', 'np', '.', 'size', '(', 'strides1', ')', ',', 'np', '.', 'size', '(', 'strides2', ')', ']', ')', 'avg_stride_duration', '=', 'np', '.', 'mean', '(', '(', 'np', '.', 'mean', '(', 'stride_durations1', ')', ',', 'np', '.', 'mean', '(', 'stride_durations2', ')', ')', ')', 'sd_stride_durations', '=', 'np', '.', 'mean', '(', '(', 'np', '.', 'std', '(', 'stride_durations1', ')', ',', 'np', '.', 'std', '(', 'stride_durations2', ')', ')', ')', 'step_period', '=', 'np', '.', 'int', '(', 'np', '.', 'round', '(', '1', '/', 'avg_step_duration', ')', ')', 'stride_period', '=', 'np', '.', 'int', '(', 'np', '.', 'round', '(', '1', '/', 'avg_stride_duration', ')', ')', 'step_regularity', ',', 'stride_regularity', ',', 'symmetry', '=', 'self', '.', 'gait_regularity_symmetry', '(', 'data', ',', 'average_step_duration', '=', 'avg_step_duration', ',', 'average_stride_duration', '=', 'avg_stride_duration', ')', 'cadence', '=', 'None', 'if', 'self', '.', 'duration', ':', 'cadence', '=', 'number_of_steps', '/', 'self', '.', 'duration', 'velocity', '=', 'None', 'avg_step_length', '=', 'None', 'avg_stride_length', '=', 'None', 'if', 'self', '.', 'distance', ':', 'velocity', '=', 'self', '.', 'distance', '/', 'self', '.', 'duration', 'avg_step_length', '=', 'number_of_steps', '/', 'self', '.', 'distance', 'avg_stride_length', '=', 'avg_number_of_strides', '/', 'self', '.', 'distance', 'return', '[', 'number_of_steps', ',', 'cadence', ',', 'velocity', ',', 'avg_step_length', ',', 'avg_stride_length', ',', 'step_durations', ',', 'avg_step_duration', ',', 'sd_step_durations', ',', 'strides', ',', 'stride_durations', ',', 'avg_number_of_strides', ',', 'avg_stride_duration', ',', 'sd_stride_durations', ',', 'step_regularity', ',', 'stride_regularity', ',', 'symmetry', ']'] | Extract gait features from estimated heel strikes and accelerometer data.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:return number_of_steps: Estimated number of steps based on heel strikes [number of steps].
:rtype number_of_steps: int
:return velocity: Velocity (if distance is provided) [meters/second].
:rtype velocity: float
:return avg_step_length: Average step length (if distance is provided) [meters].
:rtype avg_step_length: float
:return avg_stride_length: Average stride length (if distance is provided) [meters].
:rtyoe avg_stride_length: float
:return cadence: Number of steps divided by duration [steps/second].
:rtype cadence: float
:return array step_durations: Step duration [seconds].
:rtype step_durations: np.ndarray
:return float avg_step_duration: Average step duration [seconds].
:rtype avg_step_duration: float
:return float sd_step_durations: Standard deviation of step durations [seconds].
:rtype sd_step_durations: np.ndarray
:return list strides: Stride timings for each side [seconds].
:rtype strides: numpy.ndarray
:return float avg_number_of_strides: Estimated number of strides based on alternating heel strikes [number of strides].
:rtype avg_number_of_strides: float
:return list stride_durations: Estimated stride durations [seconds].
:rtype stride_durations: numpy.ndarray
:return float avg_stride_duration: Average stride duration [seconds].
:rtype avg_stride_duration: float
:return float sd_step_durations: Standard deviation of stride durations [seconds].
:rtype sd_step_duration: float
:return float step_regularity: Measure of step regularity along axis [percentage consistency of the step-to-step pattern].
:rtype step_regularity: float
:return float stride_regularity: Measure of stride regularity along axis [percentage consistency of the stride-to-stride pattern].
:rtype stride_regularity: float
:return float symmetry: Measure of gait symmetry along axis [difference between step and stride regularity].
:rtype symmetry: float | ['Extract', 'gait', 'features', 'from', 'estimated', 'heel', 'strikes', 'and', 'accelerometer', 'data', '.'] | train | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/gait_processor.py#L431-L539 |
8,519 | petl-developers/petl | petl/transform/setops.py | hashcomplement | def hashcomplement(a, b, strict=False):
"""
Alternative implementation of :func:`petl.transform.setops.complement`,
where the complement is executed by constructing an in-memory set for all
rows found in the right hand table, then iterating over rows from the
left hand table.
May be faster and/or more resource efficient where the right table is small
and the left table is large.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used, i.e.,
only rows in `a` not found in `b` are returned.
"""
return HashComplementView(a, b, strict=strict) | python | def hashcomplement(a, b, strict=False):
"""
Alternative implementation of :func:`petl.transform.setops.complement`,
where the complement is executed by constructing an in-memory set for all
rows found in the right hand table, then iterating over rows from the
left hand table.
May be faster and/or more resource efficient where the right table is small
and the left table is large.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used, i.e.,
only rows in `a` not found in `b` are returned.
"""
return HashComplementView(a, b, strict=strict) | ['def', 'hashcomplement', '(', 'a', ',', 'b', ',', 'strict', '=', 'False', ')', ':', 'return', 'HashComplementView', '(', 'a', ',', 'b', ',', 'strict', '=', 'strict', ')'] | Alternative implementation of :func:`petl.transform.setops.complement`,
where the complement is executed by constructing an in-memory set for all
rows found in the right hand table, then iterating over rows from the
left hand table.
May be faster and/or more resource efficient where the right table is small
and the left table is large.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used, i.e.,
only rows in `a` not found in `b` are returned. | ['Alternative', 'implementation', 'of', ':', 'func', ':', 'petl', '.', 'transform', '.', 'setops', '.', 'complement', 'where', 'the', 'complement', 'is', 'executed', 'by', 'constructing', 'an', 'in', '-', 'memory', 'set', 'for', 'all', 'rows', 'found', 'in', 'the', 'right', 'hand', 'table', 'then', 'iterating', 'over', 'rows', 'from', 'the', 'left', 'hand', 'table', '.'] | train | https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/setops.py#L430-L447 |
8,520 | hardbyte/python-can | can/interfaces/systec/ucan.py | UcanServer.get_status | def get_status(self, channel=Channel.CHANNEL_CH0):
"""
Returns the error status of a specific CAN channel.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with CAN and USB status (see structure :class:`Status`).
:rtype: tuple(int, int)
"""
status = Status()
UcanGetStatusEx(self._handle, channel, byref(status))
return status.can_status, status.usb_status | python | def get_status(self, channel=Channel.CHANNEL_CH0):
"""
Returns the error status of a specific CAN channel.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with CAN and USB status (see structure :class:`Status`).
:rtype: tuple(int, int)
"""
status = Status()
UcanGetStatusEx(self._handle, channel, byref(status))
return status.can_status, status.usb_status | ['def', 'get_status', '(', 'self', ',', 'channel', '=', 'Channel', '.', 'CHANNEL_CH0', ')', ':', 'status', '=', 'Status', '(', ')', 'UcanGetStatusEx', '(', 'self', '.', '_handle', ',', 'channel', ',', 'byref', '(', 'status', ')', ')', 'return', 'status', '.', 'can_status', ',', 'status', '.', 'usb_status'] | Returns the error status of a specific CAN channel.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with CAN and USB status (see structure :class:`Status`).
:rtype: tuple(int, int) | ['Returns', 'the', 'error', 'status', 'of', 'a', 'specific', 'CAN', 'channel', '.'] | train | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L450-L460 |
8,521 | openid/python-openid | openid/yadis/accept.py | parseAcceptHeader | def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept] | python | def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept] | ['def', 'parseAcceptHeader', '(', 'value', ')', ':', 'chunks', '=', '[', 'chunk', '.', 'strip', '(', ')', 'for', 'chunk', 'in', 'value', '.', 'split', '(', "','", ')', ']', 'accept', '=', '[', ']', 'for', 'chunk', 'in', 'chunks', ':', 'parts', '=', '[', 's', '.', 'strip', '(', ')', 'for', 's', 'in', 'chunk', '.', 'split', '(', "';'", ')', ']', 'mtype', '=', 'parts', '.', 'pop', '(', '0', ')', 'if', "'/'", 'not', 'in', 'mtype', ':', '# This is not a MIME type, so ignore the bad data', 'continue', 'main', ',', 'sub', '=', 'mtype', '.', 'split', '(', "'/'", ',', '1', ')', 'for', 'ext', 'in', 'parts', ':', 'if', "'='", 'in', 'ext', ':', 'k', ',', 'v', '=', 'ext', '.', 'split', '(', "'='", ',', '1', ')', 'if', 'k', '==', "'q'", ':', 'try', ':', 'q', '=', 'float', '(', 'v', ')', 'break', 'except', 'ValueError', ':', '# Ignore poorly formed q-values', 'pass', 'else', ':', 'q', '=', '1.0', 'accept', '.', 'append', '(', '(', 'q', ',', 'main', ',', 'sub', ')', ')', 'accept', '.', 'sort', '(', ')', 'accept', '.', 'reverse', '(', ')', 'return', '[', '(', 'main', ',', 'sub', ',', 'q', ')', 'for', '(', 'q', ',', 'main', ',', 'sub', ')', 'in', 'accept', ']'] | Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)] | ['Parse', 'an', 'accept', 'header', 'ignoring', 'any', 'accept', '-', 'extensions'] | train | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/accept.py#L35-L72 |
8,522 | markokr/rarfile | rarfile.py | load_le32 | def load_le32(buf, pos):
"""Load little-endian 32-bit integer"""
end = pos + 4
if end > len(buf):
raise BadRarFile('cannot load le32')
return S_LONG.unpack_from(buf, pos)[0], pos + 4 | python | def load_le32(buf, pos):
"""Load little-endian 32-bit integer"""
end = pos + 4
if end > len(buf):
raise BadRarFile('cannot load le32')
return S_LONG.unpack_from(buf, pos)[0], pos + 4 | ['def', 'load_le32', '(', 'buf', ',', 'pos', ')', ':', 'end', '=', 'pos', '+', '4', 'if', 'end', '>', 'len', '(', 'buf', ')', ':', 'raise', 'BadRarFile', '(', "'cannot load le32'", ')', 'return', 'S_LONG', '.', 'unpack_from', '(', 'buf', ',', 'pos', ')', '[', '0', ']', ',', 'pos', '+', '4'] | Load little-endian 32-bit integer | ['Load', 'little', '-', 'endian', '32', '-', 'bit', 'integer'] | train | https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/rarfile.py#L2616-L2621 |
8,523 | jedie/DragonPy | dragonpy/utils/starter.py | _run | def _run(*args, **kwargs):
"""
Run current executable via subprocess and given args
"""
verbose = kwargs.pop("verbose", False)
if verbose:
click.secho(" ".join([repr(i) for i in args]), bg='blue', fg='white')
executable = args[0]
if not os.path.isfile(executable):
raise RuntimeError("First argument %r is not a existing file!" % executable)
if not os.access(executable, os.X_OK):
raise RuntimeError("First argument %r exist, but is not executeable!" % executable)
return subprocess.Popen(args, **kwargs) | python | def _run(*args, **kwargs):
"""
Run current executable via subprocess and given args
"""
verbose = kwargs.pop("verbose", False)
if verbose:
click.secho(" ".join([repr(i) for i in args]), bg='blue', fg='white')
executable = args[0]
if not os.path.isfile(executable):
raise RuntimeError("First argument %r is not a existing file!" % executable)
if not os.access(executable, os.X_OK):
raise RuntimeError("First argument %r exist, but is not executeable!" % executable)
return subprocess.Popen(args, **kwargs) | ['def', '_run', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'verbose', '=', 'kwargs', '.', 'pop', '(', '"verbose"', ',', 'False', ')', 'if', 'verbose', ':', 'click', '.', 'secho', '(', '" "', '.', 'join', '(', '[', 'repr', '(', 'i', ')', 'for', 'i', 'in', 'args', ']', ')', ',', 'bg', '=', "'blue'", ',', 'fg', '=', "'white'", ')', 'executable', '=', 'args', '[', '0', ']', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'executable', ')', ':', 'raise', 'RuntimeError', '(', '"First argument %r is not a existing file!"', '%', 'executable', ')', 'if', 'not', 'os', '.', 'access', '(', 'executable', ',', 'os', '.', 'X_OK', ')', ':', 'raise', 'RuntimeError', '(', '"First argument %r exist, but is not executeable!"', '%', 'executable', ')', 'return', 'subprocess', '.', 'Popen', '(', 'args', ',', '*', '*', 'kwargs', ')'] | Run current executable via subprocess and given args | ['Run', 'current', 'executable', 'via', 'subprocess', 'and', 'given', 'args'] | train | https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/utils/starter.py#L53-L67 |
8,524 | kontron/python-aardvark | pyaardvark/aardvark.py | Aardvark.i2c_master_write_read | def i2c_master_write_read(self, i2c_address, data, length):
"""Make an I2C write/read access.
First an I2C write access is issued. No stop condition will be
generated. Instead the read access begins with a repeated start.
This method is useful for accessing most addressable I2C devices like
EEPROMs, port expander, etc.
Basically, this is just a convenient function which internally uses
`i2c_master_write` and `i2c_master_read`.
"""
self.i2c_master_write(i2c_address, data, I2C_NO_STOP)
return self.i2c_master_read(i2c_address, length) | python | def i2c_master_write_read(self, i2c_address, data, length):
"""Make an I2C write/read access.
First an I2C write access is issued. No stop condition will be
generated. Instead the read access begins with a repeated start.
This method is useful for accessing most addressable I2C devices like
EEPROMs, port expander, etc.
Basically, this is just a convenient function which internally uses
`i2c_master_write` and `i2c_master_read`.
"""
self.i2c_master_write(i2c_address, data, I2C_NO_STOP)
return self.i2c_master_read(i2c_address, length) | ['def', 'i2c_master_write_read', '(', 'self', ',', 'i2c_address', ',', 'data', ',', 'length', ')', ':', 'self', '.', 'i2c_master_write', '(', 'i2c_address', ',', 'data', ',', 'I2C_NO_STOP', ')', 'return', 'self', '.', 'i2c_master_read', '(', 'i2c_address', ',', 'length', ')'] | Make an I2C write/read access.
First an I2C write access is issued. No stop condition will be
generated. Instead the read access begins with a repeated start.
This method is useful for accessing most addressable I2C devices like
EEPROMs, port expander, etc.
Basically, this is just a convenient function which internally uses
`i2c_master_write` and `i2c_master_read`. | ['Make', 'an', 'I2C', 'write', '/', 'read', 'access', '.'] | train | https://github.com/kontron/python-aardvark/blob/9827f669fbdc5bceb98e7d08a294b4e4e455d0d5/pyaardvark/aardvark.py#L437-L451 |
8,525 | deepmind/sonnet | sonnet/python/modules/scale_gradient.py | _scale_gradient_op | def _scale_gradient_op(dtype):
"""Create an op that scales gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these ops
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The scale value is given as an argument for the forward pass
method so that it can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being scaled.
Returns:
The op that scales gradients.
"""
def scale_gradient_backward(op, grad):
scale = op.inputs[1]
scaled_grad = grad * scale
return scaled_grad, None
# Note that if the forward pass implementation involved the creation of ops,
# _scale_gradient_op would require some memoization mechanism.
def scale_gradient_forward(x, scale):
del scale # Unused.
return x
func_name = "ScaleGradient_{}".format(dtype.name)
return function.Defun(
dtype, dtype,
python_grad_func=scale_gradient_backward,
func_name=func_name)(scale_gradient_forward) | python | def _scale_gradient_op(dtype):
"""Create an op that scales gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these ops
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The scale value is given as an argument for the forward pass
method so that it can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being scaled.
Returns:
The op that scales gradients.
"""
def scale_gradient_backward(op, grad):
scale = op.inputs[1]
scaled_grad = grad * scale
return scaled_grad, None
# Note that if the forward pass implementation involved the creation of ops,
# _scale_gradient_op would require some memoization mechanism.
def scale_gradient_forward(x, scale):
del scale # Unused.
return x
func_name = "ScaleGradient_{}".format(dtype.name)
return function.Defun(
dtype, dtype,
python_grad_func=scale_gradient_backward,
func_name=func_name)(scale_gradient_forward) | ['def', '_scale_gradient_op', '(', 'dtype', ')', ':', 'def', 'scale_gradient_backward', '(', 'op', ',', 'grad', ')', ':', 'scale', '=', 'op', '.', 'inputs', '[', '1', ']', 'scaled_grad', '=', 'grad', '*', 'scale', 'return', 'scaled_grad', ',', 'None', '# Note that if the forward pass implementation involved the creation of ops,', '# _scale_gradient_op would require some memoization mechanism.', 'def', 'scale_gradient_forward', '(', 'x', ',', 'scale', ')', ':', 'del', 'scale', '# Unused.', 'return', 'x', 'func_name', '=', '"ScaleGradient_{}"', '.', 'format', '(', 'dtype', '.', 'name', ')', 'return', 'function', '.', 'Defun', '(', 'dtype', ',', 'dtype', ',', 'python_grad_func', '=', 'scale_gradient_backward', ',', 'func_name', '=', 'func_name', ')', '(', 'scale_gradient_forward', ')'] | Create an op that scales gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these ops
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The scale value is given as an argument for the forward pass
method so that it can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being scaled.
Returns:
The op that scales gradients. | ['Create', 'an', 'op', 'that', 'scales', 'gradients', 'using', 'a', 'Defun', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/scale_gradient.py#L27-L61 |
8,526 | williamfzc/ConnectionTracer | ConnectionTracer/connection.py | reboot_adb_server | def reboot_adb_server():
""" execute 'adb devices' to start adb server """
_reboot_count = 0
_max_retry = 1
def _reboot():
nonlocal _reboot_count
if _reboot_count >= _max_retry:
raise RuntimeError('fail after retry {} times'.format(_max_retry))
_reboot_count += 1
return_code = subprocess.call(['adb', 'devices'], stdout=subprocess.DEVNULL)
if bool(return_code):
warnings.warn('return not zero, execute "adb version" failed')
raise EnvironmentError('adb did not work :(')
return _reboot | python | def reboot_adb_server():
""" execute 'adb devices' to start adb server """
_reboot_count = 0
_max_retry = 1
def _reboot():
nonlocal _reboot_count
if _reboot_count >= _max_retry:
raise RuntimeError('fail after retry {} times'.format(_max_retry))
_reboot_count += 1
return_code = subprocess.call(['adb', 'devices'], stdout=subprocess.DEVNULL)
if bool(return_code):
warnings.warn('return not zero, execute "adb version" failed')
raise EnvironmentError('adb did not work :(')
return _reboot | ['def', 'reboot_adb_server', '(', ')', ':', '_reboot_count', '=', '0', '_max_retry', '=', '1', 'def', '_reboot', '(', ')', ':', 'nonlocal', '_reboot_count', 'if', '_reboot_count', '>=', '_max_retry', ':', 'raise', 'RuntimeError', '(', "'fail after retry {} times'", '.', 'format', '(', '_max_retry', ')', ')', '_reboot_count', '+=', '1', 'return_code', '=', 'subprocess', '.', 'call', '(', '[', "'adb'", ',', "'devices'", ']', ',', 'stdout', '=', 'subprocess', '.', 'DEVNULL', ')', 'if', 'bool', '(', 'return_code', ')', ':', 'warnings', '.', 'warn', '(', '\'return not zero, execute "adb version" failed\'', ')', 'raise', 'EnvironmentError', '(', "'adb did not work :('", ')', 'return', '_reboot'] | execute 'adb devices' to start adb server | ['execute', 'adb', 'devices', 'to', 'start', 'adb', 'server'] | train | https://github.com/williamfzc/ConnectionTracer/blob/190003e374d6903cb82d2d21a1378979dc419ed3/ConnectionTracer/connection.py#L49-L65 |
8,527 | spacetelescope/drizzlepac | drizzlepac/tweakutils.py | read_ASCII_cols | def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802
""" Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'.
"""
# build dictionary representing format of each row
# Format of dictionary: {'colname':col_number,...}
# This provides the mapping between column name and column number
coldict = {}
with open(infile, 'r') as f:
flines = f.readlines()
for l in flines: # interpret each line from catalog file
if l[0].lstrip() == '#' or l.lstrip() == '':
continue
else:
# convert first row of data into column definitions using indices
coldict = {str(i + 1): i for i, _ in enumerate(l.split())}
break
numcols = len(cols)
outarr = [[] for _ in range(numcols)]
convert_radec = False
# Now, map specified columns to columns in file and populate output arrays
for l in flines: # interpret each line from catalog file
l = l.strip()
lspl = l.split()
# skip blank lines, comment lines, or lines with
# fewer columns than requested by user
if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l:
continue
# For each 'column' requested by user, pull data from row
for c, i in zip(cols, list(range(numcols))):
cnames = parse_colname(c)
if len(cnames) > 1:
# interpret multi-column specification as one value
outval = ''
for cn in cnames:
cnum = coldict[cn]
cval = lspl[cnum]
outval += cval + ' '
outarr[i].append(outval)
convert_radec = True
else:
# pull single value from row for this column
cnum = coldict[cnames[0]]
if isfloat(lspl[cnum]):
cval = float(lspl[cnum])
else:
cval = lspl[cnum]
# Check for multi-column values given as "nn:nn:nn.s"
if ':' in cval:
cval = cval.replace(':', ' ')
convert_radec = True
outarr[i].append(cval)
# convert multi-column RA/Dec specifications
if convert_radec:
outra = []
outdec = []
for ra, dec in zip(outarr[0], outarr[1]):
radd, decdd = radec_hmstodd(ra, dec)
outra.append(radd)
outdec.append(decdd)
outarr[0] = outra
outarr[1] = outdec
# convert all lists to numpy arrays
for c in range(len(outarr)):
outarr[c] = np.array(outarr[c])
return outarr | python | def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802
""" Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'.
"""
# build dictionary representing format of each row
# Format of dictionary: {'colname':col_number,...}
# This provides the mapping between column name and column number
coldict = {}
with open(infile, 'r') as f:
flines = f.readlines()
for l in flines: # interpret each line from catalog file
if l[0].lstrip() == '#' or l.lstrip() == '':
continue
else:
# convert first row of data into column definitions using indices
coldict = {str(i + 1): i for i, _ in enumerate(l.split())}
break
numcols = len(cols)
outarr = [[] for _ in range(numcols)]
convert_radec = False
# Now, map specified columns to columns in file and populate output arrays
for l in flines: # interpret each line from catalog file
l = l.strip()
lspl = l.split()
# skip blank lines, comment lines, or lines with
# fewer columns than requested by user
if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l:
continue
# For each 'column' requested by user, pull data from row
for c, i in zip(cols, list(range(numcols))):
cnames = parse_colname(c)
if len(cnames) > 1:
# interpret multi-column specification as one value
outval = ''
for cn in cnames:
cnum = coldict[cn]
cval = lspl[cnum]
outval += cval + ' '
outarr[i].append(outval)
convert_radec = True
else:
# pull single value from row for this column
cnum = coldict[cnames[0]]
if isfloat(lspl[cnum]):
cval = float(lspl[cnum])
else:
cval = lspl[cnum]
# Check for multi-column values given as "nn:nn:nn.s"
if ':' in cval:
cval = cval.replace(':', ' ')
convert_radec = True
outarr[i].append(cval)
# convert multi-column RA/Dec specifications
if convert_radec:
outra = []
outdec = []
for ra, dec in zip(outarr[0], outarr[1]):
radd, decdd = radec_hmstodd(ra, dec)
outra.append(radd)
outdec.append(decdd)
outarr[0] = outra
outarr[1] = outdec
# convert all lists to numpy arrays
for c in range(len(outarr)):
outarr[c] = np.array(outarr[c])
return outarr | ['def', 'read_ASCII_cols', '(', 'infile', ',', 'cols', '=', '[', '1', ',', '2', ',', '3', ']', ')', ':', '# noqa: N802', '# build dictionary representing format of each row', "# Format of dictionary: {'colname':col_number,...}", '# This provides the mapping between column name and column number', 'coldict', '=', '{', '}', 'with', 'open', '(', 'infile', ',', "'r'", ')', 'as', 'f', ':', 'flines', '=', 'f', '.', 'readlines', '(', ')', 'for', 'l', 'in', 'flines', ':', '# interpret each line from catalog file', 'if', 'l', '[', '0', ']', '.', 'lstrip', '(', ')', '==', "'#'", 'or', 'l', '.', 'lstrip', '(', ')', '==', "''", ':', 'continue', 'else', ':', '# convert first row of data into column definitions using indices', 'coldict', '=', '{', 'str', '(', 'i', '+', '1', ')', ':', 'i', 'for', 'i', ',', '_', 'in', 'enumerate', '(', 'l', '.', 'split', '(', ')', ')', '}', 'break', 'numcols', '=', 'len', '(', 'cols', ')', 'outarr', '=', '[', '[', ']', 'for', '_', 'in', 'range', '(', 'numcols', ')', ']', 'convert_radec', '=', 'False', '# Now, map specified columns to columns in file and populate output arrays', 'for', 'l', 'in', 'flines', ':', '# interpret each line from catalog file', 'l', '=', 'l', '.', 'strip', '(', ')', 'lspl', '=', 'l', '.', 'split', '(', ')', '# skip blank lines, comment lines, or lines with', '# fewer columns than requested by user', 'if', 'not', 'l', 'or', 'len', '(', 'lspl', ')', '<', 'numcols', 'or', 'l', '[', '0', ']', '==', "'#'", 'or', '"INDEF"', 'in', 'l', ':', 'continue', "# For each 'column' requested by user, pull data from row", 'for', 'c', ',', 'i', 'in', 'zip', '(', 'cols', ',', 'list', '(', 'range', '(', 'numcols', ')', ')', ')', ':', 'cnames', '=', 'parse_colname', '(', 'c', ')', 'if', 'len', '(', 'cnames', ')', '>', '1', ':', '# interpret multi-column specification as one value', 'outval', '=', "''", 'for', 'cn', 'in', 'cnames', ':', 'cnum', '=', 'coldict', '[', 'cn', ']', 'cval', '=', 'lspl', '[', 'cnum', ']', 'outval', '+=', 'cval', '+', "' '", 'outarr', '[', 'i', ']', '.', 'append', '(', 'outval', ')', 'convert_radec', '=', 'True', 'else', ':', '# pull single value from row for this column', 'cnum', '=', 'coldict', '[', 'cnames', '[', '0', ']', ']', 'if', 'isfloat', '(', 'lspl', '[', 'cnum', ']', ')', ':', 'cval', '=', 'float', '(', 'lspl', '[', 'cnum', ']', ')', 'else', ':', 'cval', '=', 'lspl', '[', 'cnum', ']', '# Check for multi-column values given as "nn:nn:nn.s"', 'if', "':'", 'in', 'cval', ':', 'cval', '=', 'cval', '.', 'replace', '(', "':'", ',', "' '", ')', 'convert_radec', '=', 'True', 'outarr', '[', 'i', ']', '.', 'append', '(', 'cval', ')', '# convert multi-column RA/Dec specifications', 'if', 'convert_radec', ':', 'outra', '=', '[', ']', 'outdec', '=', '[', ']', 'for', 'ra', ',', 'dec', 'in', 'zip', '(', 'outarr', '[', '0', ']', ',', 'outarr', '[', '1', ']', ')', ':', 'radd', ',', 'decdd', '=', 'radec_hmstodd', '(', 'ra', ',', 'dec', ')', 'outra', '.', 'append', '(', 'radd', ')', 'outdec', '.', 'append', '(', 'decdd', ')', 'outarr', '[', '0', ']', '=', 'outra', 'outarr', '[', '1', ']', '=', 'outdec', '# convert all lists to numpy arrays', 'for', 'c', 'in', 'range', '(', 'len', '(', 'outarr', ')', ')', ':', 'outarr', '[', 'c', ']', '=', 'np', '.', 'array', '(', 'outarr', '[', 'c', ']', ')', 'return', 'outarr'] | Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'. | ['Interpret', 'input', 'ASCII', 'file', 'to', 'return', 'arrays', 'for', 'specified', 'columns', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/tweakutils.py#L450-L541 |
8,528 | blockstack/blockstack-core | blockstack/lib/atlas.py | AtlasZonefileCrawler.find_zonefile_origins | def find_zonefile_origins( self, missing_zfinfo, peer_hostports ):
"""
Find out which peers can serve which zonefiles
"""
zonefile_origins = {} # map peer hostport to list of zonefile hashes
# which peers can serve each zonefile?
for zfhash in missing_zfinfo.keys():
for peer_hostport in peer_hostports:
if not zonefile_origins.has_key(peer_hostport):
zonefile_origins[peer_hostport] = []
if peer_hostport in missing_zfinfo[zfhash]['peers']:
zonefile_origins[peer_hostport].append( zfhash )
return zonefile_origins | python | def find_zonefile_origins( self, missing_zfinfo, peer_hostports ):
"""
Find out which peers can serve which zonefiles
"""
zonefile_origins = {} # map peer hostport to list of zonefile hashes
# which peers can serve each zonefile?
for zfhash in missing_zfinfo.keys():
for peer_hostport in peer_hostports:
if not zonefile_origins.has_key(peer_hostport):
zonefile_origins[peer_hostport] = []
if peer_hostport in missing_zfinfo[zfhash]['peers']:
zonefile_origins[peer_hostport].append( zfhash )
return zonefile_origins | ['def', 'find_zonefile_origins', '(', 'self', ',', 'missing_zfinfo', ',', 'peer_hostports', ')', ':', 'zonefile_origins', '=', '{', '}', '# map peer hostport to list of zonefile hashes', '# which peers can serve each zonefile?', 'for', 'zfhash', 'in', 'missing_zfinfo', '.', 'keys', '(', ')', ':', 'for', 'peer_hostport', 'in', 'peer_hostports', ':', 'if', 'not', 'zonefile_origins', '.', 'has_key', '(', 'peer_hostport', ')', ':', 'zonefile_origins', '[', 'peer_hostport', ']', '=', '[', ']', 'if', 'peer_hostport', 'in', 'missing_zfinfo', '[', 'zfhash', ']', '[', "'peers'", ']', ':', 'zonefile_origins', '[', 'peer_hostport', ']', '.', 'append', '(', 'zfhash', ')', 'return', 'zonefile_origins'] | Find out which peers can serve which zonefiles | ['Find', 'out', 'which', 'peers', 'can', 'serve', 'which', 'zonefiles'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3316-L3331 |
8,529 | fred49/linshare-api | linshareapi/cache.py | Invalid.override_familly | def override_familly(self, args):
"""Look in the current wrapped object to find a cache configuration to
override the current default configuration."""
resourceapi = args[0]
cache_cfg = resourceapi.cache
if cache_cfg.has_key('familly'):
self.familly = cache_cfg['familly']
if cache_cfg.has_key('whole_familly'):
self.whole_familly = cache_cfg['whole_familly']
if self.familly is None:
raise Exception("Invalid familly value for Cache decorator.") | python | def override_familly(self, args):
"""Look in the current wrapped object to find a cache configuration to
override the current default configuration."""
resourceapi = args[0]
cache_cfg = resourceapi.cache
if cache_cfg.has_key('familly'):
self.familly = cache_cfg['familly']
if cache_cfg.has_key('whole_familly'):
self.whole_familly = cache_cfg['whole_familly']
if self.familly is None:
raise Exception("Invalid familly value for Cache decorator.") | ['def', 'override_familly', '(', 'self', ',', 'args', ')', ':', 'resourceapi', '=', 'args', '[', '0', ']', 'cache_cfg', '=', 'resourceapi', '.', 'cache', 'if', 'cache_cfg', '.', 'has_key', '(', "'familly'", ')', ':', 'self', '.', 'familly', '=', 'cache_cfg', '[', "'familly'", ']', 'if', 'cache_cfg', '.', 'has_key', '(', "'whole_familly'", ')', ':', 'self', '.', 'whole_familly', '=', 'cache_cfg', '[', "'whole_familly'", ']', 'if', 'self', '.', 'familly', 'is', 'None', ':', 'raise', 'Exception', '(', '"Invalid familly value for Cache decorator."', ')'] | Look in the current wrapped object to find a cache configuration to
override the current default configuration. | ['Look', 'in', 'the', 'current', 'wrapped', 'object', 'to', 'find', 'a', 'cache', 'configuration', 'to', 'override', 'the', 'current', 'default', 'configuration', '.'] | train | https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/cache.py#L117-L127 |
8,530 | mapnik/Cascadenik | cascadenik/compile.py | expand_source_declarations | def expand_source_declarations(map_el, dirs, local_conf):
""" This provides mechanism for externalizing and sharing data sources. The datasource configs are
python files, and layers reference sections within that config:
<DataSourcesConfig src="datasources.cfg" />
<Layer class="road major" source_name="planet_osm_major_roads" />
<Layer class="road minor" source_name="planet_osm_minor_roads" />
See example_dscfg.mml and example.cfg at the root of the cascadenik directory for an example.
"""
ds = sources.DataSources(dirs.source, local_conf)
# build up the configuration
for spec in map_el.findall('DataSourcesConfig'):
map_el.remove(spec)
src_text, local_base = fetch_embedded_or_remote_src(spec, dirs)
if not src_text:
continue
ds.add_config(src_text, local_base)
# now transform the xml
# add in base datasources
for base_name in ds.templates:
b = Element("Datasource", name=base_name)
for pname, pvalue in ds.sources[base_name]['parameters'].items():
p = Element("Parameter", name=pname)
p.text = str(pvalue)
b.append(p)
map_el.insert(0, b)
# expand layer data sources
for layer in map_el.findall('Layer'):
if 'source_name' not in layer.attrib:
continue
if layer.attrib['source_name'] not in ds.sources:
raise Exception("Datasource '%s' referenced, but not defined in layer:\n%s" % (layer.attrib['source_name'], ElementTree.tostring(layer)))
# create the nested datasource object
b = Element("Datasource")
dsrc = ds.sources[layer.attrib['source_name']]
if 'template' in dsrc:
b.attrib['base'] = dsrc['template']
# set the SRS if present
if 'layer_srs' in dsrc:
layer.attrib['srs'] = dsrc['layer_srs']
for pname, pvalue in dsrc['parameters'].items():
p = Element("Parameter", name=pname)
p.text = pvalue
b.append(p)
layer.append(b)
del layer.attrib['source_name'] | python | def expand_source_declarations(map_el, dirs, local_conf):
""" This provides mechanism for externalizing and sharing data sources. The datasource configs are
python files, and layers reference sections within that config:
<DataSourcesConfig src="datasources.cfg" />
<Layer class="road major" source_name="planet_osm_major_roads" />
<Layer class="road minor" source_name="planet_osm_minor_roads" />
See example_dscfg.mml and example.cfg at the root of the cascadenik directory for an example.
"""
ds = sources.DataSources(dirs.source, local_conf)
# build up the configuration
for spec in map_el.findall('DataSourcesConfig'):
map_el.remove(spec)
src_text, local_base = fetch_embedded_or_remote_src(spec, dirs)
if not src_text:
continue
ds.add_config(src_text, local_base)
# now transform the xml
# add in base datasources
for base_name in ds.templates:
b = Element("Datasource", name=base_name)
for pname, pvalue in ds.sources[base_name]['parameters'].items():
p = Element("Parameter", name=pname)
p.text = str(pvalue)
b.append(p)
map_el.insert(0, b)
# expand layer data sources
for layer in map_el.findall('Layer'):
if 'source_name' not in layer.attrib:
continue
if layer.attrib['source_name'] not in ds.sources:
raise Exception("Datasource '%s' referenced, but not defined in layer:\n%s" % (layer.attrib['source_name'], ElementTree.tostring(layer)))
# create the nested datasource object
b = Element("Datasource")
dsrc = ds.sources[layer.attrib['source_name']]
if 'template' in dsrc:
b.attrib['base'] = dsrc['template']
# set the SRS if present
if 'layer_srs' in dsrc:
layer.attrib['srs'] = dsrc['layer_srs']
for pname, pvalue in dsrc['parameters'].items():
p = Element("Parameter", name=pname)
p.text = pvalue
b.append(p)
layer.append(b)
del layer.attrib['source_name'] | ['def', 'expand_source_declarations', '(', 'map_el', ',', 'dirs', ',', 'local_conf', ')', ':', 'ds', '=', 'sources', '.', 'DataSources', '(', 'dirs', '.', 'source', ',', 'local_conf', ')', '# build up the configuration', 'for', 'spec', 'in', 'map_el', '.', 'findall', '(', "'DataSourcesConfig'", ')', ':', 'map_el', '.', 'remove', '(', 'spec', ')', 'src_text', ',', 'local_base', '=', 'fetch_embedded_or_remote_src', '(', 'spec', ',', 'dirs', ')', 'if', 'not', 'src_text', ':', 'continue', 'ds', '.', 'add_config', '(', 'src_text', ',', 'local_base', ')', '# now transform the xml', '# add in base datasources', 'for', 'base_name', 'in', 'ds', '.', 'templates', ':', 'b', '=', 'Element', '(', '"Datasource"', ',', 'name', '=', 'base_name', ')', 'for', 'pname', ',', 'pvalue', 'in', 'ds', '.', 'sources', '[', 'base_name', ']', '[', "'parameters'", ']', '.', 'items', '(', ')', ':', 'p', '=', 'Element', '(', '"Parameter"', ',', 'name', '=', 'pname', ')', 'p', '.', 'text', '=', 'str', '(', 'pvalue', ')', 'b', '.', 'append', '(', 'p', ')', 'map_el', '.', 'insert', '(', '0', ',', 'b', ')', '# expand layer data sources', 'for', 'layer', 'in', 'map_el', '.', 'findall', '(', "'Layer'", ')', ':', 'if', "'source_name'", 'not', 'in', 'layer', '.', 'attrib', ':', 'continue', 'if', 'layer', '.', 'attrib', '[', "'source_name'", ']', 'not', 'in', 'ds', '.', 'sources', ':', 'raise', 'Exception', '(', '"Datasource \'%s\' referenced, but not defined in layer:\\n%s"', '%', '(', 'layer', '.', 'attrib', '[', "'source_name'", ']', ',', 'ElementTree', '.', 'tostring', '(', 'layer', ')', ')', ')', '# create the nested datasource object ', 'b', '=', 'Element', '(', '"Datasource"', ')', 'dsrc', '=', 'ds', '.', 'sources', '[', 'layer', '.', 'attrib', '[', "'source_name'", ']', ']', 'if', "'template'", 'in', 'dsrc', ':', 'b', '.', 'attrib', '[', "'base'", ']', '=', 'dsrc', '[', "'template'", ']', '# set the SRS if present', 'if', "'layer_srs'", 'in', 'dsrc', ':', 'layer', '.', 'attrib', '[', "'srs'", ']', '=', 'dsrc', '[', "'layer_srs'", ']', 'for', 'pname', ',', 'pvalue', 'in', 'dsrc', '[', "'parameters'", ']', '.', 'items', '(', ')', ':', 'p', '=', 'Element', '(', '"Parameter"', ',', 'name', '=', 'pname', ')', 'p', '.', 'text', '=', 'pvalue', 'b', '.', 'append', '(', 'p', ')', 'layer', '.', 'append', '(', 'b', ')', 'del', 'layer', '.', 'attrib', '[', "'source_name'", ']'] | This provides mechanism for externalizing and sharing data sources. The datasource configs are
python files, and layers reference sections within that config:
<DataSourcesConfig src="datasources.cfg" />
<Layer class="road major" source_name="planet_osm_major_roads" />
<Layer class="road minor" source_name="planet_osm_minor_roads" />
See example_dscfg.mml and example.cfg at the root of the cascadenik directory for an example. | ['This', 'provides', 'mechanism', 'for', 'externalizing', 'and', 'sharing', 'data', 'sources', '.', 'The', 'datasource', 'configs', 'are', 'python', 'files', 'and', 'layers', 'reference', 'sections', 'within', 'that', 'config', ':', '<DataSourcesConfig', 'src', '=', 'datasources', '.', 'cfg', '/', '>', '<Layer', 'class', '=', 'road', 'major', 'source_name', '=', 'planet_osm_major_roads', '/', '>', '<Layer', 'class', '=', 'road', 'minor', 'source_name', '=', 'planet_osm_minor_roads', '/', '>', 'See', 'example_dscfg', '.', 'mml', 'and', 'example', '.', 'cfg', 'at', 'the', 'root', 'of', 'the', 'cascadenik', 'directory', 'for', 'an', 'example', '.'] | train | https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L671-L731 |
8,531 | inveniosoftware-attic/invenio-client | invenio_client/connector.py | InvenioConnector._init_browser | def _init_browser(self):
"""Overide in appropriate way to prepare a logged in browser."""
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url + "/youraccount/login")
try:
self.browser.fill('nickname', self.user)
self.browser.fill('password', self.password)
except:
self.browser.fill('p_un', self.user)
self.browser.fill('p_pw', self.password)
self.browser.fill('login_method', self.login_method)
self.browser.find_by_css('input[type=submit]').click() | python | def _init_browser(self):
"""Overide in appropriate way to prepare a logged in browser."""
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url + "/youraccount/login")
try:
self.browser.fill('nickname', self.user)
self.browser.fill('password', self.password)
except:
self.browser.fill('p_un', self.user)
self.browser.fill('p_pw', self.password)
self.browser.fill('login_method', self.login_method)
self.browser.find_by_css('input[type=submit]').click() | ['def', '_init_browser', '(', 'self', ')', ':', 'self', '.', 'browser', '=', 'splinter', '.', 'Browser', '(', "'phantomjs'", ')', 'self', '.', 'browser', '.', 'visit', '(', 'self', '.', 'server_url', '+', '"/youraccount/login"', ')', 'try', ':', 'self', '.', 'browser', '.', 'fill', '(', "'nickname'", ',', 'self', '.', 'user', ')', 'self', '.', 'browser', '.', 'fill', '(', "'password'", ',', 'self', '.', 'password', ')', 'except', ':', 'self', '.', 'browser', '.', 'fill', '(', "'p_un'", ',', 'self', '.', 'user', ')', 'self', '.', 'browser', '.', 'fill', '(', "'p_pw'", ',', 'self', '.', 'password', ')', 'self', '.', 'browser', '.', 'fill', '(', "'login_method'", ',', 'self', '.', 'login_method', ')', 'self', '.', 'browser', '.', 'find_by_css', '(', "'input[type=submit]'", ')', '.', 'click', '(', ')'] | Overide in appropriate way to prepare a logged in browser. | ['Overide', 'in', 'appropriate', 'way', 'to', 'prepare', 'a', 'logged', 'in', 'browser', '.'] | train | https://github.com/inveniosoftware-attic/invenio-client/blob/3f9ddb6f3b3ce3a21d399d1098d6769bf05cdd6c/invenio_client/connector.py#L141-L152 |
8,532 | PmagPy/PmagPy | pmagpy/ipmag.py | inc_from_lat | def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc | python | def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc | ['def', 'inc_from_lat', '(', 'lat', ')', ':', 'rad', '=', 'old_div', '(', 'np', '.', 'pi', ',', '180.', ')', 'inc', '=', 'old_div', '(', 'np', '.', 'arctan', '(', '2', '*', 'np', '.', 'tan', '(', 'lat', '*', 'rad', ')', ')', ',', 'rad', ')', 'return', 'inc'] | Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation | ['Calculate', 'inclination', 'predicted', 'from', 'latitude', 'using', 'the', 'dipole', 'equation'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1426-L1440 |
8,533 | hapylestat/apputils | apputils/settings/general.py | Configuration._load_modules | def _load_modules(self):
"""
Load modules-related configuration listened in modules section
Before loading:
"modules": {
"mal": "myanimelist.json",
"ann": "animenewsnetwork.json"
}
After loading:
"modules": {
"mal": {
....
},
"ann": {
....
}
}
"""
if self.exists("modules"):
for item in self._json["modules"]:
try:
json_data = json.loads(self._load_from_configs(self._json["modules"][item]))
self._json["modules"][item] = json_data
except Exception as err:
raise FileNotFoundError("Couldn't load module {} configuration from {}: {}".format(item, self._json["modules"][item], err)) | python | def _load_modules(self):
"""
Load modules-related configuration listened in modules section
Before loading:
"modules": {
"mal": "myanimelist.json",
"ann": "animenewsnetwork.json"
}
After loading:
"modules": {
"mal": {
....
},
"ann": {
....
}
}
"""
if self.exists("modules"):
for item in self._json["modules"]:
try:
json_data = json.loads(self._load_from_configs(self._json["modules"][item]))
self._json["modules"][item] = json_data
except Exception as err:
raise FileNotFoundError("Couldn't load module {} configuration from {}: {}".format(item, self._json["modules"][item], err)) | ['def', '_load_modules', '(', 'self', ')', ':', 'if', 'self', '.', 'exists', '(', '"modules"', ')', ':', 'for', 'item', 'in', 'self', '.', '_json', '[', '"modules"', ']', ':', 'try', ':', 'json_data', '=', 'json', '.', 'loads', '(', 'self', '.', '_load_from_configs', '(', 'self', '.', '_json', '[', '"modules"', ']', '[', 'item', ']', ')', ')', 'self', '.', '_json', '[', '"modules"', ']', '[', 'item', ']', '=', 'json_data', 'except', 'Exception', 'as', 'err', ':', 'raise', 'FileNotFoundError', '(', '"Couldn\'t load module {} configuration from {}: {}"', '.', 'format', '(', 'item', ',', 'self', '.', '_json', '[', '"modules"', ']', '[', 'item', ']', ',', 'err', ')', ')'] | Load modules-related configuration listened in modules section
Before loading:
"modules": {
"mal": "myanimelist.json",
"ann": "animenewsnetwork.json"
}
After loading:
"modules": {
"mal": {
....
},
"ann": {
....
}
} | ['Load', 'modules', '-', 'related', 'configuration', 'listened', 'in', 'modules', 'section', 'Before', 'loading', ':', 'modules', ':', '{', 'mal', ':', 'myanimelist', '.', 'json', 'ann', ':', 'animenewsnetwork', '.', 'json', '}', 'After', 'loading', ':', 'modules', ':', '{', 'mal', ':', '{', '....', '}', 'ann', ':', '{', '....', '}', '}'] | train | https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/settings/general.py#L104-L128 |
8,534 | hendrix/hendrix | hendrix/contrib/cache/backends/memory_cache.py | MemoryCacheBackend.addResource | def addResource(self, content, uri, headers):
"""
Adds the a hendrix.contrib.cache.resource.CachedResource to the
ReverseProxy cache connection
"""
self.cache[uri] = CachedResource(content, headers) | python | def addResource(self, content, uri, headers):
"""
Adds the a hendrix.contrib.cache.resource.CachedResource to the
ReverseProxy cache connection
"""
self.cache[uri] = CachedResource(content, headers) | ['def', 'addResource', '(', 'self', ',', 'content', ',', 'uri', ',', 'headers', ')', ':', 'self', '.', 'cache', '[', 'uri', ']', '=', 'CachedResource', '(', 'content', ',', 'headers', ')'] | Adds the a hendrix.contrib.cache.resource.CachedResource to the
ReverseProxy cache connection | ['Adds', 'the', 'a', 'hendrix', '.', 'contrib', '.', 'cache', '.', 'resource', '.', 'CachedResource', 'to', 'the', 'ReverseProxy', 'cache', 'connection'] | train | https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/backends/memory_cache.py#L15-L20 |
8,535 | ml4ai/delphi | delphi/translators/for2py/arrays.py | array_values | def array_values(expr):
"""Given an expression expr denoting a list of values, array_values(expr)
returns a list of values for that expression."""
if isinstance(expr, Array):
return expr.get_elems(all_subs(expr._bounds))
elif isinstance(expr, list):
vals = [array_values(x) for x in expr]
return flatten(vals)
else:
return [expr] | python | def array_values(expr):
"""Given an expression expr denoting a list of values, array_values(expr)
returns a list of values for that expression."""
if isinstance(expr, Array):
return expr.get_elems(all_subs(expr._bounds))
elif isinstance(expr, list):
vals = [array_values(x) for x in expr]
return flatten(vals)
else:
return [expr] | ['def', 'array_values', '(', 'expr', ')', ':', 'if', 'isinstance', '(', 'expr', ',', 'Array', ')', ':', 'return', 'expr', '.', 'get_elems', '(', 'all_subs', '(', 'expr', '.', '_bounds', ')', ')', 'elif', 'isinstance', '(', 'expr', ',', 'list', ')', ':', 'vals', '=', '[', 'array_values', '(', 'x', ')', 'for', 'x', 'in', 'expr', ']', 'return', 'flatten', '(', 'vals', ')', 'else', ':', 'return', '[', 'expr', ']'] | Given an expression expr denoting a list of values, array_values(expr)
returns a list of values for that expression. | ['Given', 'an', 'expression', 'expr', 'denoting', 'a', 'list', 'of', 'values', 'array_values', '(', 'expr', ')', 'returns', 'a', 'list', 'of', 'values', 'for', 'that', 'expression', '.'] | train | https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/arrays.py#L179-L188 |
8,536 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py | brocade_snmp.snmp_server_group_write | def snmp_server_group_write(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
group = ET.SubElement(snmp_server, "group")
group_name_key = ET.SubElement(group, "group-name")
group_name_key.text = kwargs.pop('group_name')
group_version_key = ET.SubElement(group, "group-version")
group_version_key.text = kwargs.pop('group_version')
write = ET.SubElement(group, "write")
write.text = kwargs.pop('write')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def snmp_server_group_write(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
group = ET.SubElement(snmp_server, "group")
group_name_key = ET.SubElement(group, "group-name")
group_name_key.text = kwargs.pop('group_name')
group_version_key = ET.SubElement(group, "group-version")
group_version_key.text = kwargs.pop('group_version')
write = ET.SubElement(group, "write")
write.text = kwargs.pop('write')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'snmp_server_group_write', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'snmp_server', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"snmp-server"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-snmp"', ')', 'group', '=', 'ET', '.', 'SubElement', '(', 'snmp_server', ',', '"group"', ')', 'group_name_key', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"group-name"', ')', 'group_name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'group_name'", ')', 'group_version_key', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"group-version"', ')', 'group_version_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'group_version'", ')', 'write', '=', 'ET', '.', 'SubElement', '(', 'group', ',', '"write"', ')', 'write', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'write'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py#L656-L670 |
8,537 | xtrementl/focus | focus/plugin/modules/tasks.py | _print_tasks | def _print_tasks(env, tasks, mark_active=False):
""" Prints task information using io stream.
`env`
``Environment`` object.
`tasks`
List of tuples (task_name, options, block_options).
`mark_active`
Set to ``True`` to mark active task.
"""
if env.task.active and mark_active:
active_task = env.task.name
else:
active_task = None
for task, options, blocks in tasks:
# print heading
invalid = False
if task == active_task:
method = 'success'
else:
if options is None and blocks is None:
method = 'error'
invalid = True
else:
method = 'write'
opts = list(options or [])
blks = list(blocks or [])
write = getattr(env.io, method)
write('~' * 80)
write(' ' + task)
write('~' * 80)
env.io.write('')
# non-block options
if opts:
for opt, values in opts:
env.io.write(' {0}: {1}'.format(opt,
', '.join(str(v) for v in values)))
env.io.write('')
# block options
if blks:
had_options = False
for block, options in blks:
if options:
had_options = True
env.io.write(' {{ {0} }}'.format(block))
for opt, values in options:
env.io.write(' {0}: {1}'.format(opt,
', '.join(str(v) for v in values)))
env.io.write('')
if not had_options:
blks = None
if not opts and not blks:
if invalid:
env.io.write(' Invalid task.')
else:
env.io.write(' Empty task.')
env.io.write('') | python | def _print_tasks(env, tasks, mark_active=False):
""" Prints task information using io stream.
`env`
``Environment`` object.
`tasks`
List of tuples (task_name, options, block_options).
`mark_active`
Set to ``True`` to mark active task.
"""
if env.task.active and mark_active:
active_task = env.task.name
else:
active_task = None
for task, options, blocks in tasks:
# print heading
invalid = False
if task == active_task:
method = 'success'
else:
if options is None and blocks is None:
method = 'error'
invalid = True
else:
method = 'write'
opts = list(options or [])
blks = list(blocks or [])
write = getattr(env.io, method)
write('~' * 80)
write(' ' + task)
write('~' * 80)
env.io.write('')
# non-block options
if opts:
for opt, values in opts:
env.io.write(' {0}: {1}'.format(opt,
', '.join(str(v) for v in values)))
env.io.write('')
# block options
if blks:
had_options = False
for block, options in blks:
if options:
had_options = True
env.io.write(' {{ {0} }}'.format(block))
for opt, values in options:
env.io.write(' {0}: {1}'.format(opt,
', '.join(str(v) for v in values)))
env.io.write('')
if not had_options:
blks = None
if not opts and not blks:
if invalid:
env.io.write(' Invalid task.')
else:
env.io.write(' Empty task.')
env.io.write('') | ['def', '_print_tasks', '(', 'env', ',', 'tasks', ',', 'mark_active', '=', 'False', ')', ':', 'if', 'env', '.', 'task', '.', 'active', 'and', 'mark_active', ':', 'active_task', '=', 'env', '.', 'task', '.', 'name', 'else', ':', 'active_task', '=', 'None', 'for', 'task', ',', 'options', ',', 'blocks', 'in', 'tasks', ':', '# print heading', 'invalid', '=', 'False', 'if', 'task', '==', 'active_task', ':', 'method', '=', "'success'", 'else', ':', 'if', 'options', 'is', 'None', 'and', 'blocks', 'is', 'None', ':', 'method', '=', "'error'", 'invalid', '=', 'True', 'else', ':', 'method', '=', "'write'", 'opts', '=', 'list', '(', 'options', 'or', '[', ']', ')', 'blks', '=', 'list', '(', 'blocks', 'or', '[', ']', ')', 'write', '=', 'getattr', '(', 'env', '.', 'io', ',', 'method', ')', 'write', '(', "'~'", '*', '80', ')', 'write', '(', "' '", '+', 'task', ')', 'write', '(', "'~'", '*', '80', ')', 'env', '.', 'io', '.', 'write', '(', "''", ')', '# non-block options', 'if', 'opts', ':', 'for', 'opt', ',', 'values', 'in', 'opts', ':', 'env', '.', 'io', '.', 'write', '(', "' {0}: {1}'", '.', 'format', '(', 'opt', ',', "', '", '.', 'join', '(', 'str', '(', 'v', ')', 'for', 'v', 'in', 'values', ')', ')', ')', 'env', '.', 'io', '.', 'write', '(', "''", ')', '# block options', 'if', 'blks', ':', 'had_options', '=', 'False', 'for', 'block', ',', 'options', 'in', 'blks', ':', 'if', 'options', ':', 'had_options', '=', 'True', 'env', '.', 'io', '.', 'write', '(', "' {{ {0} }}'", '.', 'format', '(', 'block', ')', ')', 'for', 'opt', ',', 'values', 'in', 'options', ':', 'env', '.', 'io', '.', 'write', '(', "' {0}: {1}'", '.', 'format', '(', 'opt', ',', "', '", '.', 'join', '(', 'str', '(', 'v', ')', 'for', 'v', 'in', 'values', ')', ')', ')', 'env', '.', 'io', '.', 'write', '(', "''", ')', 'if', 'not', 'had_options', ':', 'blks', '=', 'None', 'if', 'not', 'opts', 'and', 'not', 'blks', ':', 'if', 'invalid', ':', 'env', '.', 'io', '.', 'write', '(', "' Invalid task.'", ')', 'else', ':', 'env', '.', 'io', '.', 'write', '(', "' Empty task.'", ')', 'env', '.', 'io', '.', 'write', '(', "''", ')'] | Prints task information using io stream.
`env`
``Environment`` object.
`tasks`
List of tuples (task_name, options, block_options).
`mark_active`
Set to ``True`` to mark active task. | ['Prints', 'task', 'information', 'using', 'io', 'stream', '.'] | train | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/tasks.py#L18-L86 |
8,538 | denisenkom/django-sqlserver | sqlserver/base.py | _get_new_connection | def _get_new_connection(self, conn_params):
"""Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '')
conn = self.Database.connect(**conn_params)
return conn | python | def _get_new_connection(self, conn_params):
"""Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '')
conn = self.Database.connect(**conn_params)
return conn | ['def', '_get_new_connection', '(', 'self', ',', 'conn_params', ')', ':', 'self', '.', '__connection_string', '=', 'conn_params', '.', 'get', '(', "'connection_string'", ',', "''", ')', 'conn', '=', 'self', '.', 'Database', '.', 'connect', '(', '*', '*', 'conn_params', ')', 'return', 'conn'] | Opens a connection to the database. | ['Opens', 'a', 'connection', 'to', 'the', 'database', '.'] | train | https://github.com/denisenkom/django-sqlserver/blob/f5d5dc8637799746f1bd11bd8c479d3acd468581/sqlserver/base.py#L216-L220 |
8,539 | aws/sagemaker-python-sdk | src/sagemaker/session.py | Session.default_bucket | def default_bucket(self):
"""Return the name of the default bucket to use in relevant Amazon SageMaker interactions.
Returns:
str: The name of the default bucket, which is of the form: ``sagemaker-{region}-{AWS account ID}``.
"""
if self._default_bucket:
return self._default_bucket
account = self.boto_session.client('sts').get_caller_identity()['Account']
region = self.boto_session.region_name
default_bucket = 'sagemaker-{}-{}'.format(region, account)
s3 = self.boto_session.resource('s3')
try:
# 'us-east-1' cannot be specified because it is the default region:
# https://github.com/boto/boto3/issues/125
if region == 'us-east-1':
s3.create_bucket(Bucket=default_bucket)
else:
s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region})
LOGGER.info('Created S3 bucket: {}'.format(default_bucket))
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BucketAlreadyOwnedByYou':
pass
elif error_code == 'OperationAborted' and 'conflicting conditional operation' in message:
# If this bucket is already being concurrently created, we don't need to create it again.
pass
elif error_code == 'TooManyBuckets':
# Succeed if the default bucket exists
s3.meta.client.head_bucket(Bucket=default_bucket)
else:
raise
self._default_bucket = default_bucket
return self._default_bucket | python | def default_bucket(self):
"""Return the name of the default bucket to use in relevant Amazon SageMaker interactions.
Returns:
str: The name of the default bucket, which is of the form: ``sagemaker-{region}-{AWS account ID}``.
"""
if self._default_bucket:
return self._default_bucket
account = self.boto_session.client('sts').get_caller_identity()['Account']
region = self.boto_session.region_name
default_bucket = 'sagemaker-{}-{}'.format(region, account)
s3 = self.boto_session.resource('s3')
try:
# 'us-east-1' cannot be specified because it is the default region:
# https://github.com/boto/boto3/issues/125
if region == 'us-east-1':
s3.create_bucket(Bucket=default_bucket)
else:
s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region})
LOGGER.info('Created S3 bucket: {}'.format(default_bucket))
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BucketAlreadyOwnedByYou':
pass
elif error_code == 'OperationAborted' and 'conflicting conditional operation' in message:
# If this bucket is already being concurrently created, we don't need to create it again.
pass
elif error_code == 'TooManyBuckets':
# Succeed if the default bucket exists
s3.meta.client.head_bucket(Bucket=default_bucket)
else:
raise
self._default_bucket = default_bucket
return self._default_bucket | ['def', 'default_bucket', '(', 'self', ')', ':', 'if', 'self', '.', '_default_bucket', ':', 'return', 'self', '.', '_default_bucket', 'account', '=', 'self', '.', 'boto_session', '.', 'client', '(', "'sts'", ')', '.', 'get_caller_identity', '(', ')', '[', "'Account'", ']', 'region', '=', 'self', '.', 'boto_session', '.', 'region_name', 'default_bucket', '=', "'sagemaker-{}-{}'", '.', 'format', '(', 'region', ',', 'account', ')', 's3', '=', 'self', '.', 'boto_session', '.', 'resource', '(', "'s3'", ')', 'try', ':', "# 'us-east-1' cannot be specified because it is the default region:", '# https://github.com/boto/boto3/issues/125', 'if', 'region', '==', "'us-east-1'", ':', 's3', '.', 'create_bucket', '(', 'Bucket', '=', 'default_bucket', ')', 'else', ':', 's3', '.', 'create_bucket', '(', 'Bucket', '=', 'default_bucket', ',', 'CreateBucketConfiguration', '=', '{', "'LocationConstraint'", ':', 'region', '}', ')', 'LOGGER', '.', 'info', '(', "'Created S3 bucket: {}'", '.', 'format', '(', 'default_bucket', ')', ')', 'except', 'ClientError', 'as', 'e', ':', 'error_code', '=', 'e', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', 'message', '=', 'e', '.', 'response', '[', "'Error'", ']', '[', "'Message'", ']', 'if', 'error_code', '==', "'BucketAlreadyOwnedByYou'", ':', 'pass', 'elif', 'error_code', '==', "'OperationAborted'", 'and', "'conflicting conditional operation'", 'in', 'message', ':', "# If this bucket is already being concurrently created, we don't need to create it again.", 'pass', 'elif', 'error_code', '==', "'TooManyBuckets'", ':', '# Succeed if the default bucket exists', 's3', '.', 'meta', '.', 'client', '.', 'head_bucket', '(', 'Bucket', '=', 'default_bucket', ')', 'else', ':', 'raise', 'self', '.', '_default_bucket', '=', 'default_bucket', 'return', 'self', '.', '_default_bucket'] | Return the name of the default bucket to use in relevant Amazon SageMaker interactions.
Returns:
str: The name of the default bucket, which is of the form: ``sagemaker-{region}-{AWS account ID}``. | ['Return', 'the', 'name', 'of', 'the', 'default', 'bucket', 'to', 'use', 'in', 'relevant', 'Amazon', 'SageMaker', 'interactions', '.'] | train | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/session.py#L171-L211 |
8,540 | riptano/ccm | ccmlib/common.py | wait_for_any_log | def wait_for_any_log(nodes, pattern, timeout, filename='system.log', marks=None):
"""
Look for a pattern in the system.log of any in a given list
of nodes.
@param nodes The list of nodes whose logs to scan
@param pattern The target pattern
@param timeout How long to wait for the pattern. Note that
strictly speaking, timeout is not really a timeout,
but a maximum number of attempts. This implies that
the all the grepping takes no time at all, so it is
somewhat inaccurate, but probably close enough.
@param marks A dict of nodes to marks in the file. Keys must match the first param list.
@return The first node in whose log the pattern was found
"""
if marks is None:
marks = {}
for _ in range(timeout):
for node in nodes:
found = node.grep_log(pattern, filename=filename, from_mark=marks.get(node, None))
if found:
return node
time.sleep(1)
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) +
" Unable to find: " + repr(pattern) + " in any node log within " + str(timeout) + "s") | python | def wait_for_any_log(nodes, pattern, timeout, filename='system.log', marks=None):
"""
Look for a pattern in the system.log of any in a given list
of nodes.
@param nodes The list of nodes whose logs to scan
@param pattern The target pattern
@param timeout How long to wait for the pattern. Note that
strictly speaking, timeout is not really a timeout,
but a maximum number of attempts. This implies that
the all the grepping takes no time at all, so it is
somewhat inaccurate, but probably close enough.
@param marks A dict of nodes to marks in the file. Keys must match the first param list.
@return The first node in whose log the pattern was found
"""
if marks is None:
marks = {}
for _ in range(timeout):
for node in nodes:
found = node.grep_log(pattern, filename=filename, from_mark=marks.get(node, None))
if found:
return node
time.sleep(1)
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) +
" Unable to find: " + repr(pattern) + " in any node log within " + str(timeout) + "s") | ['def', 'wait_for_any_log', '(', 'nodes', ',', 'pattern', ',', 'timeout', ',', 'filename', '=', "'system.log'", ',', 'marks', '=', 'None', ')', ':', 'if', 'marks', 'is', 'None', ':', 'marks', '=', '{', '}', 'for', '_', 'in', 'range', '(', 'timeout', ')', ':', 'for', 'node', 'in', 'nodes', ':', 'found', '=', 'node', '.', 'grep_log', '(', 'pattern', ',', 'filename', '=', 'filename', ',', 'from_mark', '=', 'marks', '.', 'get', '(', 'node', ',', 'None', ')', ')', 'if', 'found', ':', 'return', 'node', 'time', '.', 'sleep', '(', '1', ')', 'raise', 'TimeoutError', '(', 'time', '.', 'strftime', '(', '"%d %b %Y %H:%M:%S"', ',', 'time', '.', 'gmtime', '(', ')', ')', '+', '" Unable to find: "', '+', 'repr', '(', 'pattern', ')', '+', '" in any node log within "', '+', 'str', '(', 'timeout', ')', '+', '"s"', ')'] | Look for a pattern in the system.log of any in a given list
of nodes.
@param nodes The list of nodes whose logs to scan
@param pattern The target pattern
@param timeout How long to wait for the pattern. Note that
strictly speaking, timeout is not really a timeout,
but a maximum number of attempts. This implies that
the all the grepping takes no time at all, so it is
somewhat inaccurate, but probably close enough.
@param marks A dict of nodes to marks in the file. Keys must match the first param list.
@return The first node in whose log the pattern was found | ['Look', 'for', 'a', 'pattern', 'in', 'the', 'system', '.', 'log', 'of', 'any', 'in', 'a', 'given', 'list', 'of', 'nodes', '.'] | train | https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/common.py#L769-L793 |
8,541 | paragbaxi/qualysapi | qualysapi/connector.py | QGConnector.url_api_version | def url_api_version(self, api_version):
""" Return base API url string for the QualysGuard api_version and server.
"""
# Set base url depending on API version.
if api_version == 1:
# QualysGuard API v1 url.
url = "https://%s/msp/" % (self.server,)
elif api_version == 2:
# QualysGuard API v2 url.
url = "https://%s/" % (self.server,)
elif api_version == 'was':
# QualysGuard REST v3 API url (Portal API).
url = "https://%s/qps/rest/3.0/" % (self.server,)
elif api_version == 'am':
# QualysGuard REST v1 API url (Portal API).
url = "https://%s/qps/rest/1.0/" % (self.server,)
elif api_version == 'am2':
# QualysGuard REST v1 API url (Portal API).
url = "https://%s/qps/rest/2.0/" % (self.server,)
else:
raise Exception("Unknown QualysGuard API Version Number (%s)" % (api_version,))
logger.debug("Base url =\n%s" % (url))
return url | python | def url_api_version(self, api_version):
""" Return base API url string for the QualysGuard api_version and server.
"""
# Set base url depending on API version.
if api_version == 1:
# QualysGuard API v1 url.
url = "https://%s/msp/" % (self.server,)
elif api_version == 2:
# QualysGuard API v2 url.
url = "https://%s/" % (self.server,)
elif api_version == 'was':
# QualysGuard REST v3 API url (Portal API).
url = "https://%s/qps/rest/3.0/" % (self.server,)
elif api_version == 'am':
# QualysGuard REST v1 API url (Portal API).
url = "https://%s/qps/rest/1.0/" % (self.server,)
elif api_version == 'am2':
# QualysGuard REST v1 API url (Portal API).
url = "https://%s/qps/rest/2.0/" % (self.server,)
else:
raise Exception("Unknown QualysGuard API Version Number (%s)" % (api_version,))
logger.debug("Base url =\n%s" % (url))
return url | ['def', 'url_api_version', '(', 'self', ',', 'api_version', ')', ':', '# Set base url depending on API version.', 'if', 'api_version', '==', '1', ':', '# QualysGuard API v1 url.', 'url', '=', '"https://%s/msp/"', '%', '(', 'self', '.', 'server', ',', ')', 'elif', 'api_version', '==', '2', ':', '# QualysGuard API v2 url.', 'url', '=', '"https://%s/"', '%', '(', 'self', '.', 'server', ',', ')', 'elif', 'api_version', '==', "'was'", ':', '# QualysGuard REST v3 API url (Portal API).', 'url', '=', '"https://%s/qps/rest/3.0/"', '%', '(', 'self', '.', 'server', ',', ')', 'elif', 'api_version', '==', "'am'", ':', '# QualysGuard REST v1 API url (Portal API).', 'url', '=', '"https://%s/qps/rest/1.0/"', '%', '(', 'self', '.', 'server', ',', ')', 'elif', 'api_version', '==', "'am2'", ':', '# QualysGuard REST v1 API url (Portal API).', 'url', '=', '"https://%s/qps/rest/2.0/"', '%', '(', 'self', '.', 'server', ',', ')', 'else', ':', 'raise', 'Exception', '(', '"Unknown QualysGuard API Version Number (%s)"', '%', '(', 'api_version', ',', ')', ')', 'logger', '.', 'debug', '(', '"Base url =\\n%s"', '%', '(', 'url', ')', ')', 'return', 'url'] | Return base API url string for the QualysGuard api_version and server. | ['Return', 'base', 'API', 'url', 'string', 'for', 'the', 'QualysGuard', 'api_version', 'and', 'server', '.'] | train | https://github.com/paragbaxi/qualysapi/blob/2c8bf1d5d300117403062885c8e10b5665eb4615/qualysapi/connector.py#L118-L141 |
8,542 | ssalentin/plip | plip/modules/chimeraplip.py | ChimeraVisualizer.update_model_dict | def update_model_dict(self):
"""Updates the model dictionary"""
dct = {}
models = self.chimera.openModels
for md in models.list():
dct[md.name] = md.id
self.model_dict = dct | python | def update_model_dict(self):
"""Updates the model dictionary"""
dct = {}
models = self.chimera.openModels
for md in models.list():
dct[md.name] = md.id
self.model_dict = dct | ['def', 'update_model_dict', '(', 'self', ')', ':', 'dct', '=', '{', '}', 'models', '=', 'self', '.', 'chimera', '.', 'openModels', 'for', 'md', 'in', 'models', '.', 'list', '(', ')', ':', 'dct', '[', 'md', '.', 'name', ']', '=', 'md', '.', 'id', 'self', '.', 'model_dict', '=', 'dct'] | Updates the model dictionary | ['Updates', 'the', 'model', 'dictionary'] | train | https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/chimeraplip.py#L43-L49 |
8,543 | manns/pyspread | pyspread/src/gui/_widgets.py | PythonSTC.expand | def expand(self, line, do_expand, force=False, vislevels=0, level=-1):
"""Multi-purpose expand method from original STC class"""
lastchild = self.GetLastChild(line, level)
line += 1
while line <= lastchild:
if force:
if vislevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
elif do_expand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
self.SetFoldExpanded(line, vislevels - 1)
line = self.expand(line, do_expand, force, vislevels - 1)
else:
expandsub = do_expand and self.GetFoldExpanded(line)
line = self.expand(line, expandsub, force, vislevels - 1)
else:
line += 1
return line | python | def expand(self, line, do_expand, force=False, vislevels=0, level=-1):
"""Multi-purpose expand method from original STC class"""
lastchild = self.GetLastChild(line, level)
line += 1
while line <= lastchild:
if force:
if vislevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
elif do_expand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
self.SetFoldExpanded(line, vislevels - 1)
line = self.expand(line, do_expand, force, vislevels - 1)
else:
expandsub = do_expand and self.GetFoldExpanded(line)
line = self.expand(line, expandsub, force, vislevels - 1)
else:
line += 1
return line | ['def', 'expand', '(', 'self', ',', 'line', ',', 'do_expand', ',', 'force', '=', 'False', ',', 'vislevels', '=', '0', ',', 'level', '=', '-', '1', ')', ':', 'lastchild', '=', 'self', '.', 'GetLastChild', '(', 'line', ',', 'level', ')', 'line', '+=', '1', 'while', 'line', '<=', 'lastchild', ':', 'if', 'force', ':', 'if', 'vislevels', '>', '0', ':', 'self', '.', 'ShowLines', '(', 'line', ',', 'line', ')', 'else', ':', 'self', '.', 'HideLines', '(', 'line', ',', 'line', ')', 'elif', 'do_expand', ':', 'self', '.', 'ShowLines', '(', 'line', ',', 'line', ')', 'if', 'level', '==', '-', '1', ':', 'level', '=', 'self', '.', 'GetFoldLevel', '(', 'line', ')', 'if', 'level', '&', 'stc', '.', 'STC_FOLDLEVELHEADERFLAG', ':', 'if', 'force', ':', 'self', '.', 'SetFoldExpanded', '(', 'line', ',', 'vislevels', '-', '1', ')', 'line', '=', 'self', '.', 'expand', '(', 'line', ',', 'do_expand', ',', 'force', ',', 'vislevels', '-', '1', ')', 'else', ':', 'expandsub', '=', 'do_expand', 'and', 'self', '.', 'GetFoldExpanded', '(', 'line', ')', 'line', '=', 'self', '.', 'expand', '(', 'line', ',', 'expandsub', ',', 'force', ',', 'vislevels', '-', '1', ')', 'else', ':', 'line', '+=', '1', 'return', 'line'] | Multi-purpose expand method from original STC class | ['Multi', '-', 'purpose', 'expand', 'method', 'from', 'original', 'STC', 'class'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L363-L392 |
8,544 | camptocamp/Studio | studio/controllers/datasources.py | DatasourcesController.showmapfile | def showmapfile(self,datastore_id=None, datasource_id=None):
""" GET /datastores/{datastore_id}/datasources/{datasource_id}/mapfile: Get
the JSON representation of a specific datasource's default MapFile LAYER
block. """
datastore = self._get_datastore_by_id(datastore_id)
if datastore is None:
abort(404)
classification = None
if 'classification' in request.params and 'attribute' in request.params:
classification = {}
classification['type'] = request.params['classification']
classification['attribute'] = request.params['attribute']
if classification['type'] == "quantile":
classification['intervals'] = request.params['intervals']
palette = {}
palette['type']='ramp'
if 'colortype' in request.params:
palette['type'] = request.params['colortype']
if 'startcolor' in request.params:
c = request.params['startcolor']
if c.startswith('#'):
c = c[1:]
palette['startcolor'] = [int(c[0:2],16),int(c[2:4],16),int(c[4:6],16)]
if 'endcolor' in request.params:
c = request.params['endcolor']
if c.startswith('#'):
c = c[1:]
palette['endcolor'] = [int(c[0:2],16),int(c[2:4],16),int(c[4:6],16)]
if 'interpolation' in request.params:
palette['interpolation'] = request.params['interpolation']
if 'theme' in request.params:
palette['theme'] = int(request.params['theme'])
classification['palette'] = palette
mapfile = get_mapfile(datastore.ogrstring, datasource_id, classification)
if 'metadata' not in mapfile:
mapfile['metadata']={}
mapfile['metadata']['datastoreid']=datastore_id
mapfile['metadata']['datasourceid']=datasource_id
if mapfile is None:
abort(404)
return mapfile | python | def showmapfile(self,datastore_id=None, datasource_id=None):
""" GET /datastores/{datastore_id}/datasources/{datasource_id}/mapfile: Get
the JSON representation of a specific datasource's default MapFile LAYER
block. """
datastore = self._get_datastore_by_id(datastore_id)
if datastore is None:
abort(404)
classification = None
if 'classification' in request.params and 'attribute' in request.params:
classification = {}
classification['type'] = request.params['classification']
classification['attribute'] = request.params['attribute']
if classification['type'] == "quantile":
classification['intervals'] = request.params['intervals']
palette = {}
palette['type']='ramp'
if 'colortype' in request.params:
palette['type'] = request.params['colortype']
if 'startcolor' in request.params:
c = request.params['startcolor']
if c.startswith('#'):
c = c[1:]
palette['startcolor'] = [int(c[0:2],16),int(c[2:4],16),int(c[4:6],16)]
if 'endcolor' in request.params:
c = request.params['endcolor']
if c.startswith('#'):
c = c[1:]
palette['endcolor'] = [int(c[0:2],16),int(c[2:4],16),int(c[4:6],16)]
if 'interpolation' in request.params:
palette['interpolation'] = request.params['interpolation']
if 'theme' in request.params:
palette['theme'] = int(request.params['theme'])
classification['palette'] = palette
mapfile = get_mapfile(datastore.ogrstring, datasource_id, classification)
if 'metadata' not in mapfile:
mapfile['metadata']={}
mapfile['metadata']['datastoreid']=datastore_id
mapfile['metadata']['datasourceid']=datasource_id
if mapfile is None:
abort(404)
return mapfile | ['def', 'showmapfile', '(', 'self', ',', 'datastore_id', '=', 'None', ',', 'datasource_id', '=', 'None', ')', ':', 'datastore', '=', 'self', '.', '_get_datastore_by_id', '(', 'datastore_id', ')', 'if', 'datastore', 'is', 'None', ':', 'abort', '(', '404', ')', 'classification', '=', 'None', 'if', "'classification'", 'in', 'request', '.', 'params', 'and', "'attribute'", 'in', 'request', '.', 'params', ':', 'classification', '=', '{', '}', 'classification', '[', "'type'", ']', '=', 'request', '.', 'params', '[', "'classification'", ']', 'classification', '[', "'attribute'", ']', '=', 'request', '.', 'params', '[', "'attribute'", ']', 'if', 'classification', '[', "'type'", ']', '==', '"quantile"', ':', 'classification', '[', "'intervals'", ']', '=', 'request', '.', 'params', '[', "'intervals'", ']', 'palette', '=', '{', '}', 'palette', '[', "'type'", ']', '=', "'ramp'", 'if', "'colortype'", 'in', 'request', '.', 'params', ':', 'palette', '[', "'type'", ']', '=', 'request', '.', 'params', '[', "'colortype'", ']', 'if', "'startcolor'", 'in', 'request', '.', 'params', ':', 'c', '=', 'request', '.', 'params', '[', "'startcolor'", ']', 'if', 'c', '.', 'startswith', '(', "'#'", ')', ':', 'c', '=', 'c', '[', '1', ':', ']', 'palette', '[', "'startcolor'", ']', '=', '[', 'int', '(', 'c', '[', '0', ':', '2', ']', ',', '16', ')', ',', 'int', '(', 'c', '[', '2', ':', '4', ']', ',', '16', ')', ',', 'int', '(', 'c', '[', '4', ':', '6', ']', ',', '16', ')', ']', 'if', "'endcolor'", 'in', 'request', '.', 'params', ':', 'c', '=', 'request', '.', 'params', '[', "'endcolor'", ']', 'if', 'c', '.', 'startswith', '(', "'#'", ')', ':', 'c', '=', 'c', '[', '1', ':', ']', 'palette', '[', "'endcolor'", ']', '=', '[', 'int', '(', 'c', '[', '0', ':', '2', ']', ',', '16', ')', ',', 'int', '(', 'c', '[', '2', ':', '4', ']', ',', '16', ')', ',', 'int', '(', 'c', '[', '4', ':', '6', ']', ',', '16', ')', ']', 'if', "'interpolation'", 'in', 'request', '.', 'params', ':', 'palette', '[', "'interpolation'", ']', '=', 'request', '.', 'params', '[', "'interpolation'", ']', 'if', "'theme'", 'in', 'request', '.', 'params', ':', 'palette', '[', "'theme'", ']', '=', 'int', '(', 'request', '.', 'params', '[', "'theme'", ']', ')', 'classification', '[', "'palette'", ']', '=', 'palette', 'mapfile', '=', 'get_mapfile', '(', 'datastore', '.', 'ogrstring', ',', 'datasource_id', ',', 'classification', ')', 'if', "'metadata'", 'not', 'in', 'mapfile', ':', 'mapfile', '[', "'metadata'", ']', '=', '{', '}', 'mapfile', '[', "'metadata'", ']', '[', "'datastoreid'", ']', '=', 'datastore_id', 'mapfile', '[', "'metadata'", ']', '[', "'datasourceid'", ']', '=', 'datasource_id', 'if', 'mapfile', 'is', 'None', ':', 'abort', '(', '404', ')', 'return', 'mapfile'] | GET /datastores/{datastore_id}/datasources/{datasource_id}/mapfile: Get
the JSON representation of a specific datasource's default MapFile LAYER
block. | ['GET', '/', 'datastores', '/', '{', 'datastore_id', '}', '/', 'datasources', '/', '{', 'datasource_id', '}', '/', 'mapfile', ':', 'Get', 'the', 'JSON', 'representation', 'of', 'a', 'specific', 'datasource', 's', 'default', 'MapFile', 'LAYER', 'block', '.'] | train | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/datasources.py#L111-L153 |
8,545 | slundberg/shap | shap/benchmark/models.py | cric__ridge | def cric__ridge():
""" Ridge Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l2")
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model | python | def cric__ridge():
""" Ridge Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l2")
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model | ['def', 'cric__ridge', '(', ')', ':', 'model', '=', 'sklearn', '.', 'linear_model', '.', 'LogisticRegression', '(', 'penalty', '=', '"l2"', ')', '# we want to explain the raw probability outputs of the trees', 'model', '.', 'predict', '=', 'lambda', 'X', ':', 'model', '.', 'predict_proba', '(', 'X', ')', '[', ':', ',', '1', ']', 'return', 'model'] | Ridge Regression | ['Ridge', 'Regression'] | train | https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L143-L151 |
8,546 | devassistant/devassistant | devassistant/yaml_loader.py | YamlLoader.load_yaml_by_relpath | def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):
"""Load a yaml file with path that is relative to one of given directories.
Args:
directories: list of directories to search
name: relative path of the yaml file to load
log_debug: log all messages as debug
Returns:
tuple (fullpath, loaded yaml structure) or None if not found
"""
for d in directories:
if d.startswith(os.path.expanduser('~')) and not os.path.exists(d):
os.makedirs(d)
possible_path = os.path.join(d, rel_path)
if os.path.exists(possible_path):
loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug)
if loaded is not None:
return (possible_path, cls.load_yaml_by_path(possible_path))
return None | python | def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):
"""Load a yaml file with path that is relative to one of given directories.
Args:
directories: list of directories to search
name: relative path of the yaml file to load
log_debug: log all messages as debug
Returns:
tuple (fullpath, loaded yaml structure) or None if not found
"""
for d in directories:
if d.startswith(os.path.expanduser('~')) and not os.path.exists(d):
os.makedirs(d)
possible_path = os.path.join(d, rel_path)
if os.path.exists(possible_path):
loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug)
if loaded is not None:
return (possible_path, cls.load_yaml_by_path(possible_path))
return None | ['def', 'load_yaml_by_relpath', '(', 'cls', ',', 'directories', ',', 'rel_path', ',', 'log_debug', '=', 'False', ')', ':', 'for', 'd', 'in', 'directories', ':', 'if', 'd', '.', 'startswith', '(', 'os', '.', 'path', '.', 'expanduser', '(', "'~'", ')', ')', 'and', 'not', 'os', '.', 'path', '.', 'exists', '(', 'd', ')', ':', 'os', '.', 'makedirs', '(', 'd', ')', 'possible_path', '=', 'os', '.', 'path', '.', 'join', '(', 'd', ',', 'rel_path', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'possible_path', ')', ':', 'loaded', '=', 'cls', '.', 'load_yaml_by_path', '(', 'possible_path', ',', 'log_debug', '=', 'log_debug', ')', 'if', 'loaded', 'is', 'not', 'None', ':', 'return', '(', 'possible_path', ',', 'cls', '.', 'load_yaml_by_path', '(', 'possible_path', ')', ')', 'return', 'None'] | Load a yaml file with path that is relative to one of given directories.
Args:
directories: list of directories to search
name: relative path of the yaml file to load
log_debug: log all messages as debug
Returns:
tuple (fullpath, loaded yaml structure) or None if not found | ['Load', 'a', 'yaml', 'file', 'with', 'path', 'that', 'is', 'relative', 'to', 'one', 'of', 'given', 'directories', '.'] | train | https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/yaml_loader.py#L41-L60 |
8,547 | squaresLab/BugZoo | bugzoo/mgr/tool.py | ToolManager.upload | def upload(self, tool: Tool) -> bool:
"""
Attempts to upload the Docker image for a given tool to
`DockerHub <https://hub.docker.com>`_.
"""
return self.__installation.build.upload(tool.image) | python | def upload(self, tool: Tool) -> bool:
"""
Attempts to upload the Docker image for a given tool to
`DockerHub <https://hub.docker.com>`_.
"""
return self.__installation.build.upload(tool.image) | ['def', 'upload', '(', 'self', ',', 'tool', ':', 'Tool', ')', '->', 'bool', ':', 'return', 'self', '.', '__installation', '.', 'build', '.', 'upload', '(', 'tool', '.', 'image', ')'] | Attempts to upload the Docker image for a given tool to
`DockerHub <https://hub.docker.com>`_. | ['Attempts', 'to', 'upload', 'the', 'Docker', 'image', 'for', 'a', 'given', 'tool', 'to', 'DockerHub', '<https', ':', '//', 'hub', '.', 'docker', '.', 'com', '>', '_', '.'] | train | https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/tool.py#L107-L112 |
8,548 | pyamg/pyamg | pyamg/util/linalg.py | pinv_array | def pinv_array(a, cond=None):
"""Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a)
"""
n = a.shape[0]
m = a.shape[1]
if m == 1:
# Pseudo-inverse of 1 x 1 matrices is trivial
zero_entries = (a == 0.0).nonzero()[0]
a[zero_entries] = 1.0
a[:] = 1.0/a
a[zero_entries] = 0.0
del zero_entries
else:
# The block size is greater than 1
# Create necessary arrays and function pointers for calculating pinv
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(np.ones((1,), dtype=a.dtype)))
RHS = np.eye(m, dtype=a.dtype)
lwork = _compute_lwork(gelss_lwork, m, m, m)
# Choose tolerance for which singular values are zero in *gelss below
if cond is None:
t = a.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block of a
for kk in range(n):
gelssoutput = gelss(a[kk], RHS, cond=cond, lwork=lwork,
overwrite_a=True, overwrite_b=False)
a[kk] = gelssoutput[1] | python | def pinv_array(a, cond=None):
"""Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a)
"""
n = a.shape[0]
m = a.shape[1]
if m == 1:
# Pseudo-inverse of 1 x 1 matrices is trivial
zero_entries = (a == 0.0).nonzero()[0]
a[zero_entries] = 1.0
a[:] = 1.0/a
a[zero_entries] = 0.0
del zero_entries
else:
# The block size is greater than 1
# Create necessary arrays and function pointers for calculating pinv
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(np.ones((1,), dtype=a.dtype)))
RHS = np.eye(m, dtype=a.dtype)
lwork = _compute_lwork(gelss_lwork, m, m, m)
# Choose tolerance for which singular values are zero in *gelss below
if cond is None:
t = a.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block of a
for kk in range(n):
gelssoutput = gelss(a[kk], RHS, cond=cond, lwork=lwork,
overwrite_a=True, overwrite_b=False)
a[kk] = gelssoutput[1] | ['def', 'pinv_array', '(', 'a', ',', 'cond', '=', 'None', ')', ':', 'n', '=', 'a', '.', 'shape', '[', '0', ']', 'm', '=', 'a', '.', 'shape', '[', '1', ']', 'if', 'm', '==', '1', ':', '# Pseudo-inverse of 1 x 1 matrices is trivial', 'zero_entries', '=', '(', 'a', '==', '0.0', ')', '.', 'nonzero', '(', ')', '[', '0', ']', 'a', '[', 'zero_entries', ']', '=', '1.0', 'a', '[', ':', ']', '=', '1.0', '/', 'a', 'a', '[', 'zero_entries', ']', '=', '0.0', 'del', 'zero_entries', 'else', ':', '# The block size is greater than 1', '# Create necessary arrays and function pointers for calculating pinv', 'gelss', ',', 'gelss_lwork', '=', 'get_lapack_funcs', '(', '(', "'gelss'", ',', "'gelss_lwork'", ')', ',', '(', 'np', '.', 'ones', '(', '(', '1', ',', ')', ',', 'dtype', '=', 'a', '.', 'dtype', ')', ')', ')', 'RHS', '=', 'np', '.', 'eye', '(', 'm', ',', 'dtype', '=', 'a', '.', 'dtype', ')', 'lwork', '=', '_compute_lwork', '(', 'gelss_lwork', ',', 'm', ',', 'm', ',', 'm', ')', '# Choose tolerance for which singular values are zero in *gelss below', 'if', 'cond', 'is', 'None', ':', 't', '=', 'a', '.', 'dtype', '.', 'char', 'eps', '=', 'np', '.', 'finfo', '(', 'np', '.', 'float', ')', '.', 'eps', 'feps', '=', 'np', '.', 'finfo', '(', 'np', '.', 'single', ')', '.', 'eps', 'geps', '=', 'np', '.', 'finfo', '(', 'np', '.', 'longfloat', ')', '.', 'eps', '_array_precision', '=', '{', "'f'", ':', '0', ',', "'d'", ':', '1', ',', "'g'", ':', '2', ',', "'F'", ':', '0', ',', "'D'", ':', '1', ',', "'G'", ':', '2', '}', 'cond', '=', '{', '0', ':', 'feps', '*', '1e3', ',', '1', ':', 'eps', '*', '1e6', ',', '2', ':', 'geps', '*', '1e6', '}', '[', '_array_precision', '[', 't', ']', ']', '# Invert each block of a', 'for', 'kk', 'in', 'range', '(', 'n', ')', ':', 'gelssoutput', '=', 'gelss', '(', 'a', '[', 'kk', ']', ',', 'RHS', ',', 'cond', '=', 'cond', ',', 'lwork', '=', 'lwork', ',', 'overwrite_a', '=', 'True', ',', 'overwrite_b', '=', 'False', ')', 'a', '[', 'kk', ']', '=', 'gelssoutput', '[', '1', ']'] | Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a) | ['Calculate', 'the', 'Moore', '-', 'Penrose', 'pseudo', 'inverse', 'of', 'each', 'block', 'of', 'the', 'three', 'dimensional', 'array', 'a', '.'] | train | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L573-L637 |
8,549 | samuelcolvin/pydantic | pydantic/class_validators.py | make_generic_validator | def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable':
"""
Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted.
"""
sig = signature(validator)
args = list(sig.parameters.keys())
first_arg = args.pop(0)
if first_arg == 'self':
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
elif first_arg == 'cls':
# assume the second argument is value
return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:])))
else:
# assume the first argument was value which has already been removed
return wraps(validator)(_generic_validator_basic(validator, sig, set(args))) | python | def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable':
"""
Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted.
"""
sig = signature(validator)
args = list(sig.parameters.keys())
first_arg = args.pop(0)
if first_arg == 'self':
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
elif first_arg == 'cls':
# assume the second argument is value
return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:])))
else:
# assume the first argument was value which has already been removed
return wraps(validator)(_generic_validator_basic(validator, sig, set(args))) | ['def', 'make_generic_validator', '(', 'validator', ':', 'AnyCallable', ')', '->', "'ValidatorCallable'", ':', 'sig', '=', 'signature', '(', 'validator', ')', 'args', '=', 'list', '(', 'sig', '.', 'parameters', '.', 'keys', '(', ')', ')', 'first_arg', '=', 'args', '.', 'pop', '(', '0', ')', 'if', 'first_arg', '==', "'self'", ':', 'raise', 'ConfigError', '(', 'f\'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, \'', 'f\'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.\'', ')', 'elif', 'first_arg', '==', "'cls'", ':', '# assume the second argument is value', 'return', 'wraps', '(', 'validator', ')', '(', '_generic_validator_cls', '(', 'validator', ',', 'sig', ',', 'set', '(', 'args', '[', '1', ':', ']', ')', ')', ')', 'else', ':', '# assume the first argument was value which has already been removed', 'return', 'wraps', '(', 'validator', ')', '(', '_generic_validator_basic', '(', 'validator', ',', 'sig', ',', 'set', '(', 'args', ')', ')', ')'] | Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted. | ['Make', 'a', 'generic', 'function', 'which', 'calls', 'a', 'validator', 'with', 'the', 'right', 'arguments', '.'] | train | https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/class_validators.py#L122-L145 |
8,550 | nugget/python-insteonplm | insteonplm/devices/__init__.py | ControlFlags.byte | def byte(self):
"""Return a byte representation of ControlFlags."""
flags = int(self._in_use) << 7 \
| int(self._controller) << 6 \
| int(self._bit5) << 5 \
| int(self._bit4) << 4 \
| int(self._used_before) << 1
return flags | python | def byte(self):
"""Return a byte representation of ControlFlags."""
flags = int(self._in_use) << 7 \
| int(self._controller) << 6 \
| int(self._bit5) << 5 \
| int(self._bit4) << 4 \
| int(self._used_before) << 1
return flags | ['def', 'byte', '(', 'self', ')', ':', 'flags', '=', 'int', '(', 'self', '.', '_in_use', ')', '<<', '7', '|', 'int', '(', 'self', '.', '_controller', ')', '<<', '6', '|', 'int', '(', 'self', '.', '_bit5', ')', '<<', '5', '|', 'int', '(', 'self', '.', '_bit4', ')', '<<', '4', '|', 'int', '(', 'self', '.', '_used_before', ')', '<<', '1', 'return', 'flags'] | Return a byte representation of ControlFlags. | ['Return', 'a', 'byte', 'representation', 'of', 'ControlFlags', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/devices/__init__.py#L1099-L1106 |
8,551 | ANTsX/ANTsPy | ants/utils/scalar_rgb_vector.py | rgb_to_vector | def rgb_to_vector(image):
"""
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
"""
if image.pixeltype != 'unsigned char':
image = image.clone('unsigned char')
idim = image.dimension
libfn = utils.get_lib_fn('RgbToVector%i' % idim)
new_ptr = libfn(image.pointer)
new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension,
components=3, pointer=new_ptr, is_rgb=False)
return new_img | python | def rgb_to_vector(image):
"""
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
"""
if image.pixeltype != 'unsigned char':
image = image.clone('unsigned char')
idim = image.dimension
libfn = utils.get_lib_fn('RgbToVector%i' % idim)
new_ptr = libfn(image.pointer)
new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension,
components=3, pointer=new_ptr, is_rgb=False)
return new_img | ['def', 'rgb_to_vector', '(', 'image', ')', ':', 'if', 'image', '.', 'pixeltype', '!=', "'unsigned char'", ':', 'image', '=', 'image', '.', 'clone', '(', "'unsigned char'", ')', 'idim', '=', 'image', '.', 'dimension', 'libfn', '=', 'utils', '.', 'get_lib_fn', '(', "'RgbToVector%i'", '%', 'idim', ')', 'new_ptr', '=', 'libfn', '(', 'image', '.', 'pointer', ')', 'new_img', '=', 'iio', '.', 'ANTsImage', '(', 'pixeltype', '=', 'image', '.', 'pixeltype', ',', 'dimension', '=', 'image', '.', 'dimension', ',', 'components', '=', '3', ',', 'pointer', '=', 'new_ptr', ',', 'is_rgb', '=', 'False', ')', 'return', 'new_img'] | Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb() | ['Convert', 'an', 'RGB', 'ANTsImage', 'to', 'a', 'Vector', 'ANTsImage'] | train | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/scalar_rgb_vector.py#L78-L106 |
8,552 | MouseLand/rastermap | rastermap/roi.py | triangle_area | def triangle_area(p0, p1, p2):
if p2.ndim < 2:
p2 = p2[np.newaxis, :]
'''p2 can be a vector'''
area = 0.5 * np.abs(p0[0] * p1[1] - p0[0] * p2[:,1] +
p1[0] * p2[:,1] - p1[0] * p0[1] +
p2[:,0] * p0[1] - p2[:,0] * p1[1])
return area | python | def triangle_area(p0, p1, p2):
if p2.ndim < 2:
p2 = p2[np.newaxis, :]
'''p2 can be a vector'''
area = 0.5 * np.abs(p0[0] * p1[1] - p0[0] * p2[:,1] +
p1[0] * p2[:,1] - p1[0] * p0[1] +
p2[:,0] * p0[1] - p2[:,0] * p1[1])
return area | ['def', 'triangle_area', '(', 'p0', ',', 'p1', ',', 'p2', ')', ':', 'if', 'p2', '.', 'ndim', '<', '2', ':', 'p2', '=', 'p2', '[', 'np', '.', 'newaxis', ',', ':', ']', 'area', '=', '0.5', '*', 'np', '.', 'abs', '(', 'p0', '[', '0', ']', '*', 'p1', '[', '1', ']', '-', 'p0', '[', '0', ']', '*', 'p2', '[', ':', ',', '1', ']', '+', 'p1', '[', '0', ']', '*', 'p2', '[', ':', ',', '1', ']', '-', 'p1', '[', '0', ']', '*', 'p0', '[', '1', ']', '+', 'p2', '[', ':', ',', '0', ']', '*', 'p0', '[', '1', ']', '-', 'p2', '[', ':', ',', '0', ']', '*', 'p1', '[', '1', ']', ')', 'return', 'area'] | p2 can be a vector | ['p2', 'can', 'be', 'a', 'vector'] | train | https://github.com/MouseLand/rastermap/blob/eee7a46db80b6e33207543778e11618d0fed08a6/rastermap/roi.py#L5-L12 |
8,553 | marcomusy/vtkplotter | vtkplotter/vtkio.py | Video.addFrame | def addFrame(self):
"""Add frame to current video."""
fr = "/tmp/vpvid/" + str(len(self.frames)) + ".png"
screenshot(fr)
self.frames.append(fr) | python | def addFrame(self):
"""Add frame to current video."""
fr = "/tmp/vpvid/" + str(len(self.frames)) + ".png"
screenshot(fr)
self.frames.append(fr) | ['def', 'addFrame', '(', 'self', ')', ':', 'fr', '=', '"/tmp/vpvid/"', '+', 'str', '(', 'len', '(', 'self', '.', 'frames', ')', ')', '+', '".png"', 'screenshot', '(', 'fr', ')', 'self', '.', 'frames', '.', 'append', '(', 'fr', ')'] | Add frame to current video. | ['Add', 'frame', 'to', 'current', 'video', '.'] | train | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L837-L841 |
8,554 | juju/charm-helpers | charmhelpers/fetch/centos.py | add_source | def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL with a rpm package
@param key: A key to be added to the system's keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk.
"""
if source is None:
log('Source is not present. Skipping')
return
if source.startswith('http'):
directory = '/etc/yum.repos.d/'
for filename in os.listdir(directory):
with open(directory + filename, 'r') as rpm_file:
if source in rpm_file.read():
break
else:
log("Add source: {!r}".format(source))
# write in the charms.repo
with open(directory + 'Charms.repo', 'a') as rpm_file:
rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
rpm_file.write('name=%s\n' % source[7:])
rpm_file.write('baseurl=%s\n\n' % source)
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['rpm', '--import', key_file.name])
else:
subprocess.check_call(['rpm', '--import', key]) | python | def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL with a rpm package
@param key: A key to be added to the system's keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk.
"""
if source is None:
log('Source is not present. Skipping')
return
if source.startswith('http'):
directory = '/etc/yum.repos.d/'
for filename in os.listdir(directory):
with open(directory + filename, 'r') as rpm_file:
if source in rpm_file.read():
break
else:
log("Add source: {!r}".format(source))
# write in the charms.repo
with open(directory + 'Charms.repo', 'a') as rpm_file:
rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
rpm_file.write('name=%s\n' % source[7:])
rpm_file.write('baseurl=%s\n\n' % source)
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['rpm', '--import', key_file.name])
else:
subprocess.check_call(['rpm', '--import', key]) | ['def', 'add_source', '(', 'source', ',', 'key', '=', 'None', ')', ':', 'if', 'source', 'is', 'None', ':', 'log', '(', "'Source is not present. Skipping'", ')', 'return', 'if', 'source', '.', 'startswith', '(', "'http'", ')', ':', 'directory', '=', "'/etc/yum.repos.d/'", 'for', 'filename', 'in', 'os', '.', 'listdir', '(', 'directory', ')', ':', 'with', 'open', '(', 'directory', '+', 'filename', ',', "'r'", ')', 'as', 'rpm_file', ':', 'if', 'source', 'in', 'rpm_file', '.', 'read', '(', ')', ':', 'break', 'else', ':', 'log', '(', '"Add source: {!r}"', '.', 'format', '(', 'source', ')', ')', '# write in the charms.repo', 'with', 'open', '(', 'directory', '+', "'Charms.repo'", ',', "'a'", ')', 'as', 'rpm_file', ':', 'rpm_file', '.', 'write', '(', "'[%s]\\n'", '%', 'source', '[', '7', ':', ']', '.', 'replace', '(', "'/'", ',', "'_'", ')', ')', 'rpm_file', '.', 'write', '(', "'name=%s\\n'", '%', 'source', '[', '7', ':', ']', ')', 'rpm_file', '.', 'write', '(', "'baseurl=%s\\n\\n'", '%', 'source', ')', 'else', ':', 'log', '(', '"Unknown source: {!r}"', '.', 'format', '(', 'source', ')', ')', 'if', 'key', ':', 'if', "'-----BEGIN PGP PUBLIC KEY BLOCK-----'", 'in', 'key', ':', 'with', 'NamedTemporaryFile', '(', "'w+'", ')', 'as', 'key_file', ':', 'key_file', '.', 'write', '(', 'key', ')', 'key_file', '.', 'flush', '(', ')', 'key_file', '.', 'seek', '(', '0', ')', 'subprocess', '.', 'check_call', '(', '[', "'rpm'", ',', "'--import'", ',', 'key_file', '.', 'name', ']', ')', 'else', ':', 'subprocess', '.', 'check_call', '(', '[', "'rpm'", ',', "'--import'", ',', 'key', ']', ')'] | Add a package source to this system.
@param source: a URL with a rpm package
@param key: A key to be added to the system's keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. | ['Add', 'a', 'package', 'source', 'to', 'this', 'system', '.'] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/centos.py#L97-L137 |
8,555 | saltstack/salt | salt/transport/zeromq.py | _set_tcp_keepalive | def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
) | python | def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
) | ['def', '_set_tcp_keepalive', '(', 'zmq_socket', ',', 'opts', ')', ':', 'if', 'hasattr', '(', 'zmq', ',', "'TCP_KEEPALIVE'", ')', 'and', 'opts', ':', 'if', "'tcp_keepalive'", 'in', 'opts', ':', 'zmq_socket', '.', 'setsockopt', '(', 'zmq', '.', 'TCP_KEEPALIVE', ',', 'opts', '[', "'tcp_keepalive'", ']', ')', 'if', "'tcp_keepalive_idle'", 'in', 'opts', ':', 'zmq_socket', '.', 'setsockopt', '(', 'zmq', '.', 'TCP_KEEPALIVE_IDLE', ',', 'opts', '[', "'tcp_keepalive_idle'", ']', ')', 'if', "'tcp_keepalive_cnt'", 'in', 'opts', ':', 'zmq_socket', '.', 'setsockopt', '(', 'zmq', '.', 'TCP_KEEPALIVE_CNT', ',', 'opts', '[', "'tcp_keepalive_cnt'", ']', ')', 'if', "'tcp_keepalive_intvl'", 'in', 'opts', ':', 'zmq_socket', '.', 'setsockopt', '(', 'zmq', '.', 'TCP_KEEPALIVE_INTVL', ',', 'opts', '[', "'tcp_keepalive_intvl'", ']', ')'] | Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects! | ['Ensure', 'that', 'TCP', 'keepalives', 'are', 'set', 'as', 'specified', 'in', 'opts', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/transport/zeromq.py#L800-L829 |
8,556 | brutasse/graphite-api | graphite_api/functions.py | legendValue | def legendValue(requestContext, seriesList, *valueTypes):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Appends a value to the metric name in the legend. Currently one or several
of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si`
(default) or `binary`, in that case values will be formatted in the
corresponding system.
Example::
&target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si')
"""
valueFuncs = {
'avg': lambda s: safeDiv(safeSum(s), safeLen(s)),
'total': safeSum,
'min': safeMin,
'max': safeMax,
'last': safeLast,
}
system = None
if valueTypes[-1] in ('si', 'binary'):
system = valueTypes[-1]
valueTypes = valueTypes[:-1]
for valueType in valueTypes:
valueFunc = valueFuncs.get(valueType, lambda s: '(?)')
if system is None:
for series in seriesList:
series.name += " (%s: %s)" % (valueType, valueFunc(series))
else:
for series in seriesList:
value = valueFunc(series)
formatted = None
if value is not None:
formatted = "%.2f%s" % format_units(value, system=system)
series.name = "%-20s%-5s%-10s" % (series.name, valueType,
formatted)
return seriesList | python | def legendValue(requestContext, seriesList, *valueTypes):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Appends a value to the metric name in the legend. Currently one or several
of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si`
(default) or `binary`, in that case values will be formatted in the
corresponding system.
Example::
&target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si')
"""
valueFuncs = {
'avg': lambda s: safeDiv(safeSum(s), safeLen(s)),
'total': safeSum,
'min': safeMin,
'max': safeMax,
'last': safeLast,
}
system = None
if valueTypes[-1] in ('si', 'binary'):
system = valueTypes[-1]
valueTypes = valueTypes[:-1]
for valueType in valueTypes:
valueFunc = valueFuncs.get(valueType, lambda s: '(?)')
if system is None:
for series in seriesList:
series.name += " (%s: %s)" % (valueType, valueFunc(series))
else:
for series in seriesList:
value = valueFunc(series)
formatted = None
if value is not None:
formatted = "%.2f%s" % format_units(value, system=system)
series.name = "%-20s%-5s%-10s" % (series.name, valueType,
formatted)
return seriesList | ['def', 'legendValue', '(', 'requestContext', ',', 'seriesList', ',', '*', 'valueTypes', ')', ':', 'valueFuncs', '=', '{', "'avg'", ':', 'lambda', 's', ':', 'safeDiv', '(', 'safeSum', '(', 's', ')', ',', 'safeLen', '(', 's', ')', ')', ',', "'total'", ':', 'safeSum', ',', "'min'", ':', 'safeMin', ',', "'max'", ':', 'safeMax', ',', "'last'", ':', 'safeLast', ',', '}', 'system', '=', 'None', 'if', 'valueTypes', '[', '-', '1', ']', 'in', '(', "'si'", ',', "'binary'", ')', ':', 'system', '=', 'valueTypes', '[', '-', '1', ']', 'valueTypes', '=', 'valueTypes', '[', ':', '-', '1', ']', 'for', 'valueType', 'in', 'valueTypes', ':', 'valueFunc', '=', 'valueFuncs', '.', 'get', '(', 'valueType', ',', 'lambda', 's', ':', "'(?)'", ')', 'if', 'system', 'is', 'None', ':', 'for', 'series', 'in', 'seriesList', ':', 'series', '.', 'name', '+=', '" (%s: %s)"', '%', '(', 'valueType', ',', 'valueFunc', '(', 'series', ')', ')', 'else', ':', 'for', 'series', 'in', 'seriesList', ':', 'value', '=', 'valueFunc', '(', 'series', ')', 'formatted', '=', 'None', 'if', 'value', 'is', 'not', 'None', ':', 'formatted', '=', '"%.2f%s"', '%', 'format_units', '(', 'value', ',', 'system', '=', 'system', ')', 'series', '.', 'name', '=', '"%-20s%-5s%-10s"', '%', '(', 'series', '.', 'name', ',', 'valueType', ',', 'formatted', ')', 'return', 'seriesList'] | Takes one metric or a wildcard seriesList and a string in quotes.
Appends a value to the metric name in the legend. Currently one or several
of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si`
(default) or `binary`, in that case values will be formatted in the
corresponding system.
Example::
&target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si') | ['Takes', 'one', 'metric', 'or', 'a', 'wildcard', 'seriesList', 'and', 'a', 'string', 'in', 'quotes', '.', 'Appends', 'a', 'value', 'to', 'the', 'metric', 'name', 'in', 'the', 'legend', '.', 'Currently', 'one', 'or', 'several', 'of', ':', 'last', 'avg', 'total', 'min', 'max', '.', 'The', 'last', 'argument', 'can', 'be', 'si', '(', 'default', ')', 'or', 'binary', 'in', 'that', 'case', 'values', 'will', 'be', 'formatted', 'in', 'the', 'corresponding', 'system', '.'] | train | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1966-L2003 |
8,557 | googleapis/google-cloud-python | tasks/google/cloud/tasks_v2/gapic/cloud_tasks_client.py | CloudTasksClient.task_path | def task_path(cls, project, location, queue, task):
"""Return a fully-qualified task string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/queues/{queue}/tasks/{task}",
project=project,
location=location,
queue=queue,
task=task,
) | python | def task_path(cls, project, location, queue, task):
"""Return a fully-qualified task string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/queues/{queue}/tasks/{task}",
project=project,
location=location,
queue=queue,
task=task,
) | ['def', 'task_path', '(', 'cls', ',', 'project', ',', 'location', ',', 'queue', ',', 'task', ')', ':', 'return', 'google', '.', 'api_core', '.', 'path_template', '.', 'expand', '(', '"projects/{project}/locations/{location}/queues/{queue}/tasks/{task}"', ',', 'project', '=', 'project', ',', 'location', '=', 'location', ',', 'queue', '=', 'queue', ',', 'task', '=', 'task', ',', ')'] | Return a fully-qualified task string. | ['Return', 'a', 'fully', '-', 'qualified', 'task', 'string', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/tasks/google/cloud/tasks_v2/gapic/cloud_tasks_client.py#L107-L115 |
8,558 | mushkevych/scheduler | synergy/system/event_clock.py | EventClock.next_run_in | def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the EventClock instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
smallest_timedelta = timedelta(days=99, hours=0, minutes=0, seconds=0, microseconds=0, milliseconds=0)
for event_time in self.timestamps:
next_trigger = event_time.next_trigger_frequency(utc_now)
if next_trigger - utc_now < smallest_timedelta:
smallest_timedelta = next_trigger - utc_now
return smallest_timedelta
else:
return None | python | def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the EventClock instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
smallest_timedelta = timedelta(days=99, hours=0, minutes=0, seconds=0, microseconds=0, milliseconds=0)
for event_time in self.timestamps:
next_trigger = event_time.next_trigger_frequency(utc_now)
if next_trigger - utc_now < smallest_timedelta:
smallest_timedelta = next_trigger - utc_now
return smallest_timedelta
else:
return None | ['def', 'next_run_in', '(', 'self', ',', 'utc_now', '=', 'None', ')', ':', 'if', 'utc_now', 'is', 'None', ':', 'utc_now', '=', 'datetime', '.', 'utcnow', '(', ')', 'if', 'self', '.', 'is_alive', '(', ')', ':', 'smallest_timedelta', '=', 'timedelta', '(', 'days', '=', '99', ',', 'hours', '=', '0', ',', 'minutes', '=', '0', ',', 'seconds', '=', '0', ',', 'microseconds', '=', '0', ',', 'milliseconds', '=', '0', ')', 'for', 'event_time', 'in', 'self', '.', 'timestamps', ':', 'next_trigger', '=', 'event_time', '.', 'next_trigger_frequency', '(', 'utc_now', ')', 'if', 'next_trigger', '-', 'utc_now', '<', 'smallest_timedelta', ':', 'smallest_timedelta', '=', 'next_trigger', '-', 'utc_now', 'return', 'smallest_timedelta', 'else', ':', 'return', 'None'] | :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the EventClock instance is not running | [':', 'param', 'utc_now', ':', 'optional', 'parameter', 'to', 'be', 'used', 'by', 'Unit', 'Tests', 'as', 'a', 'definition', 'of', 'now', ':', 'return', ':', 'timedelta', 'instance', 'presenting', 'amount', 'of', 'time', 'before', 'the', 'trigger', 'is', 'triggered', 'next', 'time', 'or', 'None', 'if', 'the', 'EventClock', 'instance', 'is', 'not', 'running'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/event_clock.py#L122-L138 |
8,559 | molmod/molmod | molmod/pairff.py | PairFF.update_coordinates | def update_coordinates(self, coordinates=None):
"""Update the coordinates (and derived quantities)
Argument:
coordinates -- new Cartesian coordinates of the system
"""
if coordinates is not None:
self.coordinates = coordinates
self.numc = len(self.coordinates)
self.distances = np.zeros((self.numc, self.numc), float)
self.deltas = np.zeros((self.numc, self.numc, 3), float)
self.directions = np.zeros((self.numc, self.numc, 3), float)
self.dirouters = np.zeros((self.numc, self.numc, 3, 3), float)
for index1, coordinate1 in enumerate(self.coordinates):
for index2, coordinate2 in enumerate(self.coordinates):
delta = coordinate1 - coordinate2
self.deltas[index1, index2] = delta
distance = np.linalg.norm(delta)
self.distances[index1, index2] = distance
if index1 != index2:
tmp = delta/distance
self.directions[index1, index2] = tmp
self.dirouters[index1, index2] = np.outer(tmp, tmp) | python | def update_coordinates(self, coordinates=None):
"""Update the coordinates (and derived quantities)
Argument:
coordinates -- new Cartesian coordinates of the system
"""
if coordinates is not None:
self.coordinates = coordinates
self.numc = len(self.coordinates)
self.distances = np.zeros((self.numc, self.numc), float)
self.deltas = np.zeros((self.numc, self.numc, 3), float)
self.directions = np.zeros((self.numc, self.numc, 3), float)
self.dirouters = np.zeros((self.numc, self.numc, 3, 3), float)
for index1, coordinate1 in enumerate(self.coordinates):
for index2, coordinate2 in enumerate(self.coordinates):
delta = coordinate1 - coordinate2
self.deltas[index1, index2] = delta
distance = np.linalg.norm(delta)
self.distances[index1, index2] = distance
if index1 != index2:
tmp = delta/distance
self.directions[index1, index2] = tmp
self.dirouters[index1, index2] = np.outer(tmp, tmp) | ['def', 'update_coordinates', '(', 'self', ',', 'coordinates', '=', 'None', ')', ':', 'if', 'coordinates', 'is', 'not', 'None', ':', 'self', '.', 'coordinates', '=', 'coordinates', 'self', '.', 'numc', '=', 'len', '(', 'self', '.', 'coordinates', ')', 'self', '.', 'distances', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'numc', ',', 'self', '.', 'numc', ')', ',', 'float', ')', 'self', '.', 'deltas', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'numc', ',', 'self', '.', 'numc', ',', '3', ')', ',', 'float', ')', 'self', '.', 'directions', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'numc', ',', 'self', '.', 'numc', ',', '3', ')', ',', 'float', ')', 'self', '.', 'dirouters', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'numc', ',', 'self', '.', 'numc', ',', '3', ',', '3', ')', ',', 'float', ')', 'for', 'index1', ',', 'coordinate1', 'in', 'enumerate', '(', 'self', '.', 'coordinates', ')', ':', 'for', 'index2', ',', 'coordinate2', 'in', 'enumerate', '(', 'self', '.', 'coordinates', ')', ':', 'delta', '=', 'coordinate1', '-', 'coordinate2', 'self', '.', 'deltas', '[', 'index1', ',', 'index2', ']', '=', 'delta', 'distance', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'delta', ')', 'self', '.', 'distances', '[', 'index1', ',', 'index2', ']', '=', 'distance', 'if', 'index1', '!=', 'index2', ':', 'tmp', '=', 'delta', '/', 'distance', 'self', '.', 'directions', '[', 'index1', ',', 'index2', ']', '=', 'tmp', 'self', '.', 'dirouters', '[', 'index1', ',', 'index2', ']', '=', 'np', '.', 'outer', '(', 'tmp', ',', 'tmp', ')'] | Update the coordinates (and derived quantities)
Argument:
coordinates -- new Cartesian coordinates of the system | ['Update', 'the', 'coordinates', '(', 'and', 'derived', 'quantities', ')'] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L67-L89 |
8,560 | vreon/figment | examples/theworldfoundry/theworldfoundry/components/spatial.py | Spatial.emit | def emit(self, sound, exclude=set()):
"""Send text to entities nearby this one."""
nearby = self.nearby()
try:
exclude = set(exclude)
except TypeError:
exclude = set([exclude])
exclude.add(self.entity)
listeners = nearby - exclude
for listener in listeners:
listener.tell(sound) | python | def emit(self, sound, exclude=set()):
"""Send text to entities nearby this one."""
nearby = self.nearby()
try:
exclude = set(exclude)
except TypeError:
exclude = set([exclude])
exclude.add(self.entity)
listeners = nearby - exclude
for listener in listeners:
listener.tell(sound) | ['def', 'emit', '(', 'self', ',', 'sound', ',', 'exclude', '=', 'set', '(', ')', ')', ':', 'nearby', '=', 'self', '.', 'nearby', '(', ')', 'try', ':', 'exclude', '=', 'set', '(', 'exclude', ')', 'except', 'TypeError', ':', 'exclude', '=', 'set', '(', '[', 'exclude', ']', ')', 'exclude', '.', 'add', '(', 'self', '.', 'entity', ')', 'listeners', '=', 'nearby', '-', 'exclude', 'for', 'listener', 'in', 'listeners', ':', 'listener', '.', 'tell', '(', 'sound', ')'] | Send text to entities nearby this one. | ['Send', 'text', 'to', 'entities', 'nearby', 'this', 'one', '.'] | train | https://github.com/vreon/figment/blob/78248b53d06bc525004a0f5b19c45afd1536083c/examples/theworldfoundry/theworldfoundry/components/spatial.py#L283-L293 |
8,561 | speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.print_output | def print_output(self):
""" Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively.
"""
if self.response_format == "csv":
for key in self.measures:
if "TIMING_" in key:
self.measures[key] = "NA"
if not self.quiet:
print
print self.type.upper() + " RESULTS:"
keys = [e for e in self.measures if 'COUNT_' in e]
keys.sort()
print "Counts:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
keys = [e for e in self.measures if 'COLLECTION_' in e]
keys.sort()
print
print "Collection measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
if self.response_format == "TextGrid":
keys = [e for e in self.measures if 'TIMING_' in e]
keys.sort()
print
print "Time-based measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
#write to CSV file
if self.target_file:
with open(self.target_file, 'w') as outfile:
header = ['file_id'] + \
[self.type + "_" + e for e in self.measures if 'COUNT_' in e] + \
[self.type + "_" + e for e in self.measures if 'COLLECTION_' in e] + \
[self.type + "_" + e for e in self.measures if 'TIMING_' in e]
writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
#the split/join gets rid of the type appended just above
writer.writerow([self.measures["file_id"]] +
[self.measures["_".join(e.split('_')[1:])] for e in header[1:]]) | python | def print_output(self):
""" Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively.
"""
if self.response_format == "csv":
for key in self.measures:
if "TIMING_" in key:
self.measures[key] = "NA"
if not self.quiet:
print
print self.type.upper() + " RESULTS:"
keys = [e for e in self.measures if 'COUNT_' in e]
keys.sort()
print "Counts:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
keys = [e for e in self.measures if 'COLLECTION_' in e]
keys.sort()
print
print "Collection measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
if self.response_format == "TextGrid":
keys = [e for e in self.measures if 'TIMING_' in e]
keys.sort()
print
print "Time-based measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
#write to CSV file
if self.target_file:
with open(self.target_file, 'w') as outfile:
header = ['file_id'] + \
[self.type + "_" + e for e in self.measures if 'COUNT_' in e] + \
[self.type + "_" + e for e in self.measures if 'COLLECTION_' in e] + \
[self.type + "_" + e for e in self.measures if 'TIMING_' in e]
writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
#the split/join gets rid of the type appended just above
writer.writerow([self.measures["file_id"]] +
[self.measures["_".join(e.split('_')[1:])] for e in header[1:]]) | ['def', 'print_output', '(', 'self', ')', ':', 'if', 'self', '.', 'response_format', '==', '"csv"', ':', 'for', 'key', 'in', 'self', '.', 'measures', ':', 'if', '"TIMING_"', 'in', 'key', ':', 'self', '.', 'measures', '[', 'key', ']', '=', '"NA"', 'if', 'not', 'self', '.', 'quiet', ':', 'print', 'print', 'self', '.', 'type', '.', 'upper', '(', ')', '+', '" RESULTS:"', 'keys', '=', '[', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'COUNT_'", 'in', 'e', ']', 'keys', '.', 'sort', '(', ')', 'print', '"Counts:"', 'print_table', '(', '[', '(', 'entry', ',', 'str', '(', 'self', '.', 'measures', '[', 'entry', ']', ')', ')', 'for', 'entry', 'in', 'keys', ']', ')', 'keys', '=', '[', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'COLLECTION_'", 'in', 'e', ']', 'keys', '.', 'sort', '(', ')', 'print', 'print', '"Collection measures:"', 'print_table', '(', '[', '(', 'entry', ',', 'str', '(', 'self', '.', 'measures', '[', 'entry', ']', ')', ')', 'for', 'entry', 'in', 'keys', ']', ')', 'if', 'self', '.', 'response_format', '==', '"TextGrid"', ':', 'keys', '=', '[', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'TIMING_'", 'in', 'e', ']', 'keys', '.', 'sort', '(', ')', 'print', 'print', '"Time-based measures:"', 'print_table', '(', '[', '(', 'entry', ',', 'str', '(', 'self', '.', 'measures', '[', 'entry', ']', ')', ')', 'for', 'entry', 'in', 'keys', ']', ')', '#write to CSV file', 'if', 'self', '.', 'target_file', ':', 'with', 'open', '(', 'self', '.', 'target_file', ',', "'w'", ')', 'as', 'outfile', ':', 'header', '=', '[', "'file_id'", ']', '+', '[', 'self', '.', 'type', '+', '"_"', '+', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'COUNT_'", 'in', 'e', ']', '+', '[', 'self', '.', 'type', '+', '"_"', '+', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'COLLECTION_'", 'in', 'e', ']', '+', '[', 'self', '.', 'type', '+', '"_"', '+', 'e', 'for', 'e', 'in', 'self', '.', 'measures', 'if', "'TIMING_'", 'in', 'e', ']', 'writer', '=', 'csv', '.', 'writer', '(', 'outfile', ',', 'quoting', '=', 'csv', '.', 'QUOTE_MINIMAL', ')', 'writer', '.', 'writerow', '(', 'header', ')', '#the split/join gets rid of the type appended just above', 'writer', '.', 'writerow', '(', '[', 'self', '.', 'measures', '[', '"file_id"', ']', ']', '+', '[', 'self', '.', 'measures', '[', '"_"', '.', 'join', '(', 'e', '.', 'split', '(', "'_'", ')', '[', '1', ':', ']', ')', ']', 'for', 'e', 'in', 'header', '[', '1', ':', ']', ']', ')'] | Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively. | ['Outputs', 'final', 'list', 'of', 'measures', 'to', 'screen', 'a', 'csv', 'file', '.'] | train | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1546-L1593 |
8,562 | ashmastaflash/kal-wrapper | kalibrate/fn.py | options_string_builder | def options_string_builder(option_mapping, args):
"""Return arguments for CLI invocation of kal."""
options_string = ""
for option, flag in option_mapping.items():
if option in args:
options_string += str(" %s %s" % (flag, str(args[option])))
return options_string | python | def options_string_builder(option_mapping, args):
"""Return arguments for CLI invocation of kal."""
options_string = ""
for option, flag in option_mapping.items():
if option in args:
options_string += str(" %s %s" % (flag, str(args[option])))
return options_string | ['def', 'options_string_builder', '(', 'option_mapping', ',', 'args', ')', ':', 'options_string', '=', '""', 'for', 'option', ',', 'flag', 'in', 'option_mapping', '.', 'items', '(', ')', ':', 'if', 'option', 'in', 'args', ':', 'options_string', '+=', 'str', '(', '" %s %s"', '%', '(', 'flag', ',', 'str', '(', 'args', '[', 'option', ']', ')', ')', ')', 'return', 'options_string'] | Return arguments for CLI invocation of kal. | ['Return', 'arguments', 'for', 'CLI', 'invocation', 'of', 'kal', '.'] | train | https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L6-L12 |
8,563 | noahbenson/neuropythy | neuropythy/optimize/core.py | gaussian | def gaussian(f=Ellipsis, mu=0, sigma=1, scale=1, invert=False, normalize=False):
'''
gaussian() yields a potential function f(x) that calculates a Gaussian function over x; the
formula used is given below.
gaussian(g) yields a function h(x) such that, if f(x) is yielded by gaussian(), h(x) = f(g(x)).
The formula employed by the Gaussian function is as follows, with mu, sigma, and scale all being
parameters that one can provide via optional arguments:
scale * exp(0.5 * ((x - mu) / sigma)**2)
The following optional arguments may be given:
* mu (default: 0) specifies the mean of the Gaussian.
* sigma (default: 1) specifies the standard deviation (sigma) parameger of the Gaussian.
* scale (default: 1) specifies the scale to use.
* invert (default: False) specifies whether the Gaussian should be inverted. If inverted, then
the formula, scale * exp(...), is replaced with scale * (1 - exp(...)).
* normalize (default: False) specifies whether the result should be multiplied by the inverse
of the area under the uninverted and unscaled curve; i.e., if normalize is True, the entire
result is multiplied by 1/sqrt(2*pi*sigma**2).
'''
f = to_potential(f)
F = exp(-0.5 * ((f - mu) / sigma)**2)
if invert: F = 1 - F
F = F * scale
if normalize: F = F / (np.sqrt(2.0*np.pi) * sigma)
return F | python | def gaussian(f=Ellipsis, mu=0, sigma=1, scale=1, invert=False, normalize=False):
'''
gaussian() yields a potential function f(x) that calculates a Gaussian function over x; the
formula used is given below.
gaussian(g) yields a function h(x) such that, if f(x) is yielded by gaussian(), h(x) = f(g(x)).
The formula employed by the Gaussian function is as follows, with mu, sigma, and scale all being
parameters that one can provide via optional arguments:
scale * exp(0.5 * ((x - mu) / sigma)**2)
The following optional arguments may be given:
* mu (default: 0) specifies the mean of the Gaussian.
* sigma (default: 1) specifies the standard deviation (sigma) parameger of the Gaussian.
* scale (default: 1) specifies the scale to use.
* invert (default: False) specifies whether the Gaussian should be inverted. If inverted, then
the formula, scale * exp(...), is replaced with scale * (1 - exp(...)).
* normalize (default: False) specifies whether the result should be multiplied by the inverse
of the area under the uninverted and unscaled curve; i.e., if normalize is True, the entire
result is multiplied by 1/sqrt(2*pi*sigma**2).
'''
f = to_potential(f)
F = exp(-0.5 * ((f - mu) / sigma)**2)
if invert: F = 1 - F
F = F * scale
if normalize: F = F / (np.sqrt(2.0*np.pi) * sigma)
return F | ['def', 'gaussian', '(', 'f', '=', 'Ellipsis', ',', 'mu', '=', '0', ',', 'sigma', '=', '1', ',', 'scale', '=', '1', ',', 'invert', '=', 'False', ',', 'normalize', '=', 'False', ')', ':', 'f', '=', 'to_potential', '(', 'f', ')', 'F', '=', 'exp', '(', '-', '0.5', '*', '(', '(', 'f', '-', 'mu', ')', '/', 'sigma', ')', '**', '2', ')', 'if', 'invert', ':', 'F', '=', '1', '-', 'F', 'F', '=', 'F', '*', 'scale', 'if', 'normalize', ':', 'F', '=', 'F', '/', '(', 'np', '.', 'sqrt', '(', '2.0', '*', 'np', '.', 'pi', ')', '*', 'sigma', ')', 'return', 'F'] | gaussian() yields a potential function f(x) that calculates a Gaussian function over x; the
formula used is given below.
gaussian(g) yields a function h(x) such that, if f(x) is yielded by gaussian(), h(x) = f(g(x)).
The formula employed by the Gaussian function is as follows, with mu, sigma, and scale all being
parameters that one can provide via optional arguments:
scale * exp(0.5 * ((x - mu) / sigma)**2)
The following optional arguments may be given:
* mu (default: 0) specifies the mean of the Gaussian.
* sigma (default: 1) specifies the standard deviation (sigma) parameger of the Gaussian.
* scale (default: 1) specifies the scale to use.
* invert (default: False) specifies whether the Gaussian should be inverted. If inverted, then
the formula, scale * exp(...), is replaced with scale * (1 - exp(...)).
* normalize (default: False) specifies whether the result should be multiplied by the inverse
of the area under the uninverted and unscaled curve; i.e., if normalize is True, the entire
result is multiplied by 1/sqrt(2*pi*sigma**2). | ['gaussian', '()', 'yields', 'a', 'potential', 'function', 'f', '(', 'x', ')', 'that', 'calculates', 'a', 'Gaussian', 'function', 'over', 'x', ';', 'the', 'formula', 'used', 'is', 'given', 'below', '.', 'gaussian', '(', 'g', ')', 'yields', 'a', 'function', 'h', '(', 'x', ')', 'such', 'that', 'if', 'f', '(', 'x', ')', 'is', 'yielded', 'by', 'gaussian', '()', 'h', '(', 'x', ')', '=', 'f', '(', 'g', '(', 'x', '))', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/optimize/core.py#L1044-L1069 |
8,564 | Iotic-Labs/py-IoticAgent | src/IoticAgent/IOT/Point.py | Feed.set_recent_config | def set_recent_config(self, max_samples=0):
"""Update/configure recent data settings for this Feed. If the container does not support recent storage or it
is not enabled for this owner, this function will have no effect.
`max_samples` (optional) (int) how many shares to store for later retrieval. If not supported by container, this
argument will be ignored. A value of zero disables this feature whilst a negative value requests the maximum
sample store amount.
Returns QAPI recent config function payload
#!python
{
"maxSamples": 0
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
evt = self._client._request_point_recent_config(self._type, self.lid, self.pid, max_samples)
self._client._wait_and_except_if_failed(evt)
return evt.payload | python | def set_recent_config(self, max_samples=0):
"""Update/configure recent data settings for this Feed. If the container does not support recent storage or it
is not enabled for this owner, this function will have no effect.
`max_samples` (optional) (int) how many shares to store for later retrieval. If not supported by container, this
argument will be ignored. A value of zero disables this feature whilst a negative value requests the maximum
sample store amount.
Returns QAPI recent config function payload
#!python
{
"maxSamples": 0
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
evt = self._client._request_point_recent_config(self._type, self.lid, self.pid, max_samples)
self._client._wait_and_except_if_failed(evt)
return evt.payload | ['def', 'set_recent_config', '(', 'self', ',', 'max_samples', '=', '0', ')', ':', 'evt', '=', 'self', '.', '_client', '.', '_request_point_recent_config', '(', 'self', '.', '_type', ',', 'self', '.', 'lid', ',', 'self', '.', 'pid', ',', 'max_samples', ')', 'self', '.', '_client', '.', '_wait_and_except_if_failed', '(', 'evt', ')', 'return', 'evt', '.', 'payload'] | Update/configure recent data settings for this Feed. If the container does not support recent storage or it
is not enabled for this owner, this function will have no effect.
`max_samples` (optional) (int) how many shares to store for later retrieval. If not supported by container, this
argument will be ignored. A value of zero disables this feature whilst a negative value requests the maximum
sample store amount.
Returns QAPI recent config function payload
#!python
{
"maxSamples": 0
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure | ['Update', '/', 'configure', 'recent', 'data', 'settings', 'for', 'this', 'Feed', '.', 'If', 'the', 'container', 'does', 'not', 'support', 'recent', 'storage', 'or', 'it', 'is', 'not', 'enabled', 'for', 'this', 'owner', 'this', 'function', 'will', 'have', 'no', 'effect', '.'] | train | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Point.py#L405-L429 |
8,565 | odlgroup/odl | odl/operator/default_ops.py | PowerOperator._call | def _call(self, x, out=None):
"""Take the power of ``x`` and write to ``out`` if given."""
if out is None:
return x ** self.exponent
elif self.__domain_is_field:
raise ValueError('cannot use `out` with field')
else:
out.assign(x)
out **= self.exponent | python | def _call(self, x, out=None):
"""Take the power of ``x`` and write to ``out`` if given."""
if out is None:
return x ** self.exponent
elif self.__domain_is_field:
raise ValueError('cannot use `out` with field')
else:
out.assign(x)
out **= self.exponent | ['def', '_call', '(', 'self', ',', 'x', ',', 'out', '=', 'None', ')', ':', 'if', 'out', 'is', 'None', ':', 'return', 'x', '**', 'self', '.', 'exponent', 'elif', 'self', '.', '__domain_is_field', ':', 'raise', 'ValueError', '(', "'cannot use `out` with field'", ')', 'else', ':', 'out', '.', 'assign', '(', 'x', ')', 'out', '**=', 'self', '.', 'exponent'] | Take the power of ``x`` and write to ``out`` if given. | ['Take', 'the', 'power', 'of', 'x', 'and', 'write', 'to', 'out', 'if', 'given', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/default_ops.py#L447-L455 |
8,566 | ThreatConnect-Inc/tcex | tcex/tcex_ti/mappings/victim.py | Victim.update_network_asset | def update_network_asset(self, asset_id, name, asset_type):
"""
Updates a Network Asset
Args:
name: The name provided to the network asset
asset_type: The type provided to the network asset
asset_id:
Returns:
"""
self.update_asset('NETWORK', asset_id, name, asset_type) | python | def update_network_asset(self, asset_id, name, asset_type):
"""
Updates a Network Asset
Args:
name: The name provided to the network asset
asset_type: The type provided to the network asset
asset_id:
Returns:
"""
self.update_asset('NETWORK', asset_id, name, asset_type) | ['def', 'update_network_asset', '(', 'self', ',', 'asset_id', ',', 'name', ',', 'asset_type', ')', ':', 'self', '.', 'update_asset', '(', "'NETWORK'", ',', 'asset_id', ',', 'name', ',', 'asset_type', ')'] | Updates a Network Asset
Args:
name: The name provided to the network asset
asset_type: The type provided to the network asset
asset_id:
Returns: | ['Updates', 'a', 'Network', 'Asset', 'Args', ':', 'name', ':', 'The', 'name', 'provided', 'to', 'the', 'network', 'asset', 'asset_type', ':', 'The', 'type', 'provided', 'to', 'the', 'network', 'asset', 'asset_id', ':'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/victim.py#L526-L537 |
8,567 | humilis/humilis-lambdautils | lambdautils/state.py | set_state | def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
"""Set Lambda state value."""
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can't produce state table name: unable to set state "
"item '{}'".format(key))
logger.error(msg)
raise StateTableError(msg)
return
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value,
table_name))
if serializer:
try:
value = serializer(value)
except TypeError:
logger.error(
"Value for state key '{}' is not json-serializable".format(
key))
raise
if namespace:
key = "{}:{}".format(namespace, key)
if shard_id:
key = "{}:{}".format(shard_id, key)
item = {"id": key, "value": value}
if ttl:
item["ttl"] = {"N": str(int(time.time() + ttl))}
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=500,
wait_exponential_max=5000,
stop_max_delay=10000)
def put_item():
try:
return table.put_item(Item=item)
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
resp = put_item()
logger.info("Response from DynamoDB: '{}'".format(resp))
return resp | python | def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
"""Set Lambda state value."""
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can't produce state table name: unable to set state "
"item '{}'".format(key))
logger.error(msg)
raise StateTableError(msg)
return
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value,
table_name))
if serializer:
try:
value = serializer(value)
except TypeError:
logger.error(
"Value for state key '{}' is not json-serializable".format(
key))
raise
if namespace:
key = "{}:{}".format(namespace, key)
if shard_id:
key = "{}:{}".format(shard_id, key)
item = {"id": key, "value": value}
if ttl:
item["ttl"] = {"N": str(int(time.time() + ttl))}
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=500,
wait_exponential_max=5000,
stop_max_delay=10000)
def put_item():
try:
return table.put_item(Item=item)
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
resp = put_item()
logger.info("Response from DynamoDB: '{}'".format(resp))
return resp | ['def', 'set_state', '(', 'key', ',', 'value', ',', 'namespace', '=', 'None', ',', 'table_name', '=', 'None', ',', 'environment', '=', 'None', ',', 'layer', '=', 'None', ',', 'stage', '=', 'None', ',', 'shard_id', '=', 'None', ',', 'consistent', '=', 'True', ',', 'serializer', '=', 'json', '.', 'dumps', ',', 'wait_exponential_multiplier', '=', '500', ',', 'wait_exponential_max', '=', '5000', ',', 'stop_max_delay', '=', '10000', ',', 'ttl', '=', 'None', ')', ':', 'if', 'table_name', 'is', 'None', ':', 'table_name', '=', '_state_table_name', '(', 'environment', '=', 'environment', ',', 'layer', '=', 'layer', ',', 'stage', '=', 'stage', ')', 'if', 'not', 'table_name', ':', 'msg', '=', '(', '"Can\'t produce state table name: unable to set state "', '"item \'{}\'"', '.', 'format', '(', 'key', ')', ')', 'logger', '.', 'error', '(', 'msg', ')', 'raise', 'StateTableError', '(', 'msg', ')', 'return', 'dynamodb', '=', 'boto3', '.', 'resource', '(', '"dynamodb"', ')', 'table', '=', 'dynamodb', '.', 'Table', '(', 'table_name', ')', 'logger', '.', 'info', '(', '"Putting {} -> {} in DynamoDB table {}"', '.', 'format', '(', 'key', ',', 'value', ',', 'table_name', ')', ')', 'if', 'serializer', ':', 'try', ':', 'value', '=', 'serializer', '(', 'value', ')', 'except', 'TypeError', ':', 'logger', '.', 'error', '(', '"Value for state key \'{}\' is not json-serializable"', '.', 'format', '(', 'key', ')', ')', 'raise', 'if', 'namespace', ':', 'key', '=', '"{}:{}"', '.', 'format', '(', 'namespace', ',', 'key', ')', 'if', 'shard_id', ':', 'key', '=', '"{}:{}"', '.', 'format', '(', 'shard_id', ',', 'key', ')', 'item', '=', '{', '"id"', ':', 'key', ',', '"value"', ':', 'value', '}', 'if', 'ttl', ':', 'item', '[', '"ttl"', ']', '=', '{', '"N"', ':', 'str', '(', 'int', '(', 'time', '.', 'time', '(', ')', '+', 'ttl', ')', ')', '}', '@', 'retry', '(', 'retry_on_exception', '=', '_is_critical_exception', ',', 'wait_exponential_multiplier', '=', '500', ',', 'wait_exponential_max', '=', '5000', ',', 'stop_max_delay', '=', '10000', ')', 'def', 'put_item', '(', ')', ':', 'try', ':', 'return', 'table', '.', 'put_item', '(', 'Item', '=', 'item', ')', 'except', 'Exception', 'as', 'err', ':', 'if', '_is_dynamodb_critical_exception', '(', 'err', ')', ':', 'raise', 'CriticalError', '(', 'err', ')', 'else', ':', 'raise', 'resp', '=', 'put_item', '(', ')', 'logger', '.', 'info', '(', '"Response from DynamoDB: \'{}\'"', '.', 'format', '(', 'resp', ')', ')', 'return', 'resp'] | Set Lambda state value. | ['Set', 'Lambda', 'state', 'value', '.'] | train | https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L270-L324 |
8,568 | mjirik/io3d | io3d/datareader.py | DataReader._read_with_sitk | def _read_with_sitk(datapath):
"""Reads file using SimpleITK. Returns array of pixels (image located in datapath) and its metadata.
:param datapath: path to file (img or dicom)
:return: tuple (data3d, metadata), where data3d is array of pixels
"""
try:
import SimpleITK as Sitk
except ImportError as e:
logger.error("Unable to import SimpleITK. On Windows try version 1.0.1")
image = Sitk.ReadImage(datapath)
data3d = dcmtools.get_pixel_array_from_sitk(image)
# data3d, original_dtype = dcmreaddata.get_pixel_array_from_dcmobj(image)
metadata = _metadata(image, datapath)
return data3d, metadata | python | def _read_with_sitk(datapath):
"""Reads file using SimpleITK. Returns array of pixels (image located in datapath) and its metadata.
:param datapath: path to file (img or dicom)
:return: tuple (data3d, metadata), where data3d is array of pixels
"""
try:
import SimpleITK as Sitk
except ImportError as e:
logger.error("Unable to import SimpleITK. On Windows try version 1.0.1")
image = Sitk.ReadImage(datapath)
data3d = dcmtools.get_pixel_array_from_sitk(image)
# data3d, original_dtype = dcmreaddata.get_pixel_array_from_dcmobj(image)
metadata = _metadata(image, datapath)
return data3d, metadata | ['def', '_read_with_sitk', '(', 'datapath', ')', ':', 'try', ':', 'import', 'SimpleITK', 'as', 'Sitk', 'except', 'ImportError', 'as', 'e', ':', 'logger', '.', 'error', '(', '"Unable to import SimpleITK. On Windows try version 1.0.1"', ')', 'image', '=', 'Sitk', '.', 'ReadImage', '(', 'datapath', ')', 'data3d', '=', 'dcmtools', '.', 'get_pixel_array_from_sitk', '(', 'image', ')', '# data3d, original_dtype = dcmreaddata.get_pixel_array_from_dcmobj(image)', 'metadata', '=', '_metadata', '(', 'image', ',', 'datapath', ')', 'return', 'data3d', ',', 'metadata'] | Reads file using SimpleITK. Returns array of pixels (image located in datapath) and its metadata.
:param datapath: path to file (img or dicom)
:return: tuple (data3d, metadata), where data3d is array of pixels | ['Reads', 'file', 'using', 'SimpleITK', '.', 'Returns', 'array', 'of', 'pixels', '(', 'image', 'located', 'in', 'datapath', ')', 'and', 'its', 'metadata', '.'] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datareader.py#L259-L273 |
8,569 | has2k1/plotnine | plotnine/scales/scale.py | scale.map_df | def map_df(self, df):
"""
Map df
"""
if len(df) == 0:
return
aesthetics = set(self.aesthetics) & set(df.columns)
for ae in aesthetics:
df[ae] = self.map(df[ae])
return df | python | def map_df(self, df):
"""
Map df
"""
if len(df) == 0:
return
aesthetics = set(self.aesthetics) & set(df.columns)
for ae in aesthetics:
df[ae] = self.map(df[ae])
return df | ['def', 'map_df', '(', 'self', ',', 'df', ')', ':', 'if', 'len', '(', 'df', ')', '==', '0', ':', 'return', 'aesthetics', '=', 'set', '(', 'self', '.', 'aesthetics', ')', '&', 'set', '(', 'df', '.', 'columns', ')', 'for', 'ae', 'in', 'aesthetics', ':', 'df', '[', 'ae', ']', '=', 'self', '.', 'map', '(', 'df', '[', 'ae', ']', ')', 'return', 'df'] | Map df | ['Map', 'df'] | train | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scale.py#L222-L233 |
8,570 | mandiant/ioc_writer | ioc_writer/ioc_api.py | fix_schema_node_ordering | def fix_schema_node_ordering(parent):
"""
Fix the ordering of children under the criteria node to ensure that IndicatorItem/Indicator order
is preserved, as per XML Schema.
:return:
"""
children = parent.getchildren()
i_nodes = [node for node in children if node.tag == 'IndicatorItem']
ii_nodes = [node for node in children if node.tag == 'Indicator']
if not ii_nodes:
return
# Remove all the children
for node in children:
parent.remove(node)
# Add the Indicator nodes back
for node in i_nodes:
parent.append(node)
# Now add the IndicatorItem nodes back
for node in ii_nodes:
parent.append(node)
# Now recurse
for node in ii_nodes:
fix_schema_node_ordering(node) | python | def fix_schema_node_ordering(parent):
"""
Fix the ordering of children under the criteria node to ensure that IndicatorItem/Indicator order
is preserved, as per XML Schema.
:return:
"""
children = parent.getchildren()
i_nodes = [node for node in children if node.tag == 'IndicatorItem']
ii_nodes = [node for node in children if node.tag == 'Indicator']
if not ii_nodes:
return
# Remove all the children
for node in children:
parent.remove(node)
# Add the Indicator nodes back
for node in i_nodes:
parent.append(node)
# Now add the IndicatorItem nodes back
for node in ii_nodes:
parent.append(node)
# Now recurse
for node in ii_nodes:
fix_schema_node_ordering(node) | ['def', 'fix_schema_node_ordering', '(', 'parent', ')', ':', 'children', '=', 'parent', '.', 'getchildren', '(', ')', 'i_nodes', '=', '[', 'node', 'for', 'node', 'in', 'children', 'if', 'node', '.', 'tag', '==', "'IndicatorItem'", ']', 'ii_nodes', '=', '[', 'node', 'for', 'node', 'in', 'children', 'if', 'node', '.', 'tag', '==', "'Indicator'", ']', 'if', 'not', 'ii_nodes', ':', 'return', '# Remove all the children', 'for', 'node', 'in', 'children', ':', 'parent', '.', 'remove', '(', 'node', ')', '# Add the Indicator nodes back', 'for', 'node', 'in', 'i_nodes', ':', 'parent', '.', 'append', '(', 'node', ')', '# Now add the IndicatorItem nodes back', 'for', 'node', 'in', 'ii_nodes', ':', 'parent', '.', 'append', '(', 'node', ')', '# Now recurse', 'for', 'node', 'in', 'ii_nodes', ':', 'fix_schema_node_ordering', '(', 'node', ')'] | Fix the ordering of children under the criteria node to ensure that IndicatorItem/Indicator order
is preserved, as per XML Schema.
:return: | ['Fix', 'the', 'ordering', 'of', 'children', 'under', 'the', 'criteria', 'node', 'to', 'ensure', 'that', 'IndicatorItem', '/', 'Indicator', 'order', 'is', 'preserved', 'as', 'per', 'XML', 'Schema', '.', ':', 'return', ':'] | train | https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L756-L778 |
8,571 | LettError/MutatorMath | Lib/mutatorMath/objects/mutator.py | Mutator.collectLocations | def collectLocations(self):
"""
Return a dictionary with all objects.
"""
pts = []
for l, (value, deltaName) in self.items():
pts.append(Location(l))
return pts | python | def collectLocations(self):
"""
Return a dictionary with all objects.
"""
pts = []
for l, (value, deltaName) in self.items():
pts.append(Location(l))
return pts | ['def', 'collectLocations', '(', 'self', ')', ':', 'pts', '=', '[', ']', 'for', 'l', ',', '(', 'value', ',', 'deltaName', ')', 'in', 'self', '.', 'items', '(', ')', ':', 'pts', '.', 'append', '(', 'Location', '(', 'l', ')', ')', 'return', 'pts'] | Return a dictionary with all objects. | ['Return', 'a', 'dictionary', 'with', 'all', 'objects', '.'] | train | https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/objects/mutator.py#L166-L173 |
8,572 | diamondman/proteusisc | proteusisc/promise.py | TDOPromiseCollection.add | def add(self, promise, bitoffset, *, _offsetideal=None):
"""Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.
"""
#This Assumes that things are added in order.
#Sorting or checking should likely be added.
if _offsetideal is None:
_offsetideal = bitoffset
if isinstance(promise, TDOPromise):
newpromise = promise.makesubatoffset(
bitoffset, _offsetideal=_offsetideal)
self._promises.append(newpromise)
elif isinstance(promise, TDOPromiseCollection):
for p in promise._promises:
self.add(p, bitoffset, _offsetideal=_offsetideal) | python | def add(self, promise, bitoffset, *, _offsetideal=None):
"""Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.
"""
#This Assumes that things are added in order.
#Sorting or checking should likely be added.
if _offsetideal is None:
_offsetideal = bitoffset
if isinstance(promise, TDOPromise):
newpromise = promise.makesubatoffset(
bitoffset, _offsetideal=_offsetideal)
self._promises.append(newpromise)
elif isinstance(promise, TDOPromiseCollection):
for p in promise._promises:
self.add(p, bitoffset, _offsetideal=_offsetideal) | ['def', 'add', '(', 'self', ',', 'promise', ',', 'bitoffset', ',', '*', ',', '_offsetideal', '=', 'None', ')', ':', '#This Assumes that things are added in order.', '#Sorting or checking should likely be added.', 'if', '_offsetideal', 'is', 'None', ':', '_offsetideal', '=', 'bitoffset', 'if', 'isinstance', '(', 'promise', ',', 'TDOPromise', ')', ':', 'newpromise', '=', 'promise', '.', 'makesubatoffset', '(', 'bitoffset', ',', '_offsetideal', '=', '_offsetideal', ')', 'self', '.', '_promises', '.', 'append', '(', 'newpromise', ')', 'elif', 'isinstance', '(', 'promise', ',', 'TDOPromiseCollection', ')', ':', 'for', 'p', 'in', 'promise', '.', '_promises', ':', 'self', '.', 'add', '(', 'p', ',', 'bitoffset', ',', '_offsetideal', '=', '_offsetideal', ')'] | Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control. | ['Add', 'a', 'promise', 'to', 'the', 'promise', 'collection', 'at', 'an', 'optional', 'offset', '.'] | train | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/promise.py#L239-L257 |
8,573 | erocarrera/pefile | peutils.py | is_probably_packed | def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing compressed data and the data makes
up for more than 20% of the total file size, the function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
# Assume that the file is packed when no data is available
if not total_pe_data_length:
return True
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based on looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if ((1.0 * total_compressed_data)/total_pe_data_length) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data | python | def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing compressed data and the data makes
up for more than 20% of the total file size, the function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
# Assume that the file is packed when no data is available
if not total_pe_data_length:
return True
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based on looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if ((1.0 * total_compressed_data)/total_pe_data_length) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data | ['def', 'is_probably_packed', '(', 'pe', ')', ':', '# Calculate the lenth of the data up to the end of the last section in the', "# file. Overlay data won't be taken into account", '#', 'total_pe_data_length', '=', 'len', '(', 'pe', '.', 'trim', '(', ')', ')', '# Assume that the file is packed when no data is available', 'if', 'not', 'total_pe_data_length', ':', 'return', 'True', 'has_significant_amount_of_compressed_data', '=', 'False', "# If some of the sections have high entropy and they make for more than 20% of the file's size", "# it's assumed that it could be an installer or a packed file", 'total_compressed_data', '=', '0', 'for', 'section', 'in', 'pe', '.', 'sections', ':', 's_entropy', '=', 'section', '.', 'get_entropy', '(', ')', 's_length', '=', 'len', '(', 'section', '.', 'get_data', '(', ')', ')', '# The value of 7.4 is empircal, based on looking at a few files packed', '# by different packers', 'if', 's_entropy', '>', '7.4', ':', 'total_compressed_data', '+=', 's_length', 'if', '(', '(', '1.0', '*', 'total_compressed_data', ')', '/', 'total_pe_data_length', ')', '>', '.2', ':', 'has_significant_amount_of_compressed_data', '=', 'True', 'return', 'has_significant_amount_of_compressed_data'] | Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing compressed data and the data makes
up for more than 20% of the total file size, the function will
return True. | ['Returns', 'True', 'is', 'there', 'is', 'a', 'high', 'likelihood', 'that', 'a', 'file', 'is', 'packed', 'or', 'contains', 'compressed', 'data', '.'] | train | https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/peutils.py#L550-L583 |
8,574 | f3at/feat | src/feat/web/webserver.py | Site.cleanup | def cleanup(self):
'''
Cleans up existing connections giving them time to finish the currect
request.
'''
self.debug("Cleanup called on Site.")
if not self.connections:
return defer.succeed(None)
self.debug("Waiting for all the connections to close.")
result = self._notifier.wait('idle')
for connection in self.connections:
connection.persistent = False
if not connection.requests:
connection.transport.loseConnection()
else:
request = connection.requests[0]
peer = connection.transport.getPeer()
self.debug("Site is still processing a %s request from %s:%s"
" to path: %s. It will be given a time to finish",
request.method, peer.host, peer.port, request.path)
return result | python | def cleanup(self):
'''
Cleans up existing connections giving them time to finish the currect
request.
'''
self.debug("Cleanup called on Site.")
if not self.connections:
return defer.succeed(None)
self.debug("Waiting for all the connections to close.")
result = self._notifier.wait('idle')
for connection in self.connections:
connection.persistent = False
if not connection.requests:
connection.transport.loseConnection()
else:
request = connection.requests[0]
peer = connection.transport.getPeer()
self.debug("Site is still processing a %s request from %s:%s"
" to path: %s. It will be given a time to finish",
request.method, peer.host, peer.port, request.path)
return result | ['def', 'cleanup', '(', 'self', ')', ':', 'self', '.', 'debug', '(', '"Cleanup called on Site."', ')', 'if', 'not', 'self', '.', 'connections', ':', 'return', 'defer', '.', 'succeed', '(', 'None', ')', 'self', '.', 'debug', '(', '"Waiting for all the connections to close."', ')', 'result', '=', 'self', '.', '_notifier', '.', 'wait', '(', "'idle'", ')', 'for', 'connection', 'in', 'self', '.', 'connections', ':', 'connection', '.', 'persistent', '=', 'False', 'if', 'not', 'connection', '.', 'requests', ':', 'connection', '.', 'transport', '.', 'loseConnection', '(', ')', 'else', ':', 'request', '=', 'connection', '.', 'requests', '[', '0', ']', 'peer', '=', 'connection', '.', 'transport', '.', 'getPeer', '(', ')', 'self', '.', 'debug', '(', '"Site is still processing a %s request from %s:%s"', '" to path: %s. It will be given a time to finish"', ',', 'request', '.', 'method', ',', 'peer', '.', 'host', ',', 'peer', '.', 'port', ',', 'request', '.', 'path', ')', 'return', 'result'] | Cleans up existing connections giving them time to finish the currect
request. | ['Cleans', 'up', 'existing', 'connections', 'giving', 'them', 'time', 'to', 'finish', 'the', 'currect', 'request', '.'] | train | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/web/webserver.py#L655-L675 |
8,575 | sighingnow/parsec.py | src/parsec/__init__.py | letter | def letter():
'''Parse a letter in alphabet.'''
@Parser
def letter_parser(text, index=0):
if index < len(text) and text[index].isalpha():
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'a letter')
return letter_parser | python | def letter():
'''Parse a letter in alphabet.'''
@Parser
def letter_parser(text, index=0):
if index < len(text) and text[index].isalpha():
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'a letter')
return letter_parser | ['def', 'letter', '(', ')', ':', '@', 'Parser', 'def', 'letter_parser', '(', 'text', ',', 'index', '=', '0', ')', ':', 'if', 'index', '<', 'len', '(', 'text', ')', 'and', 'text', '[', 'index', ']', '.', 'isalpha', '(', ')', ':', 'return', 'Value', '.', 'success', '(', 'index', '+', '1', ',', 'text', '[', 'index', ']', ')', 'else', ':', 'return', 'Value', '.', 'failure', '(', 'index', ',', "'a letter'", ')', 'return', 'letter_parser'] | Parse a letter in alphabet. | ['Parse', 'a', 'letter', 'in', 'alphabet', '.'] | train | https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L605-L613 |
8,576 | mdiener/grace | grace/py27/slimit/parser.py | Parser.p_with_statement | def p_with_statement(self, p):
"""with_statement : WITH LPAREN expr RPAREN statement"""
p[0] = ast.With(expr=p[3], statement=p[5]) | python | def p_with_statement(self, p):
"""with_statement : WITH LPAREN expr RPAREN statement"""
p[0] = ast.With(expr=p[3], statement=p[5]) | ['def', 'p_with_statement', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'ast', '.', 'With', '(', 'expr', '=', 'p', '[', '3', ']', ',', 'statement', '=', 'p', '[', '5', ']', ')'] | with_statement : WITH LPAREN expr RPAREN statement | ['with_statement', ':', 'WITH', 'LPAREN', 'expr', 'RPAREN', 'statement'] | train | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L1078-L1080 |
8,577 | mozilla/django-tidings | tidings/events.py | Event._watches_belonging_to_user | def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches | python | def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches | ['def', '_watches_belonging_to_user', '(', 'cls', ',', 'user_or_email', ',', 'object_id', '=', 'None', ',', '*', '*', 'filters', ')', ':', '# If we have trouble distinguishing subsets and such, we could store a', '# number_of_filters on the Watch.', 'cls', '.', '_validate_filters', '(', 'filters', ')', 'if', 'isinstance', '(', 'user_or_email', ',', 'string_types', ')', ':', 'user_condition', '=', 'Q', '(', 'email', '=', 'user_or_email', ')', 'elif', 'user_or_email', '.', 'is_authenticated', ':', 'user_condition', '=', 'Q', '(', 'user', '=', 'user_or_email', ')', 'else', ':', 'return', 'Watch', '.', 'objects', '.', 'none', '(', ')', '# Filter by stuff in the Watch row:', 'watches', '=', 'getattr', '(', 'Watch', ',', "'uncached'", ',', 'Watch', '.', 'objects', ')', '.', 'filter', '(', 'user_condition', ',', 'Q', '(', 'content_type', '=', 'ContentType', '.', 'objects', '.', 'get_for_model', '(', 'cls', '.', 'content_type', ')', ')', 'if', 'cls', '.', 'content_type', 'else', 'Q', '(', ')', ',', 'Q', '(', 'object_id', '=', 'object_id', ')', 'if', 'object_id', 'else', 'Q', '(', ')', ',', 'event_type', '=', 'cls', '.', 'event_type', ')', '.', 'extra', '(', 'where', '=', '[', "'(SELECT count(*) FROM tidings_watchfilter WHERE '", "'tidings_watchfilter.watch_id='", "'tidings_watch.id)=%s'", ']', ',', 'params', '=', '[', 'len', '(', 'filters', ')', ']', ')', '# Optimization: If the subselect ends up being slow, store the number', '# of filters in each Watch row or try a GROUP BY.', '# Apply 1-to-many filters:', 'for', 'k', ',', 'v', 'in', 'iteritems', '(', 'filters', ')', ':', 'watches', '=', 'watches', '.', 'filter', '(', 'filters__name', '=', 'k', ',', 'filters__value', '=', 'hash_to_unsigned', '(', 'v', ')', ')', 'return', 'watches'] | Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet. | ['Return', 'a', 'QuerySet', 'of', 'watches', 'having', 'the', 'given', 'user', 'or', 'email', 'having', '(', 'only', ')', 'the', 'given', 'filters', 'and', 'having', 'the', 'event_type', 'and', 'content_type', 'attrs', 'of', 'the', 'class', '.'] | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L286-L335 |
8,578 | saltstack/salt | salt/modules/tuned.py | active | def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile')) | python | def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile')) | ['def', 'active', '(', ')', ':', '# turn off all profiles', 'result', '=', '__salt__', '[', "'cmd.run'", ']', '(', "'tuned-adm active'", ')', 'pattern', '=', 're', '.', 'compile', '(', "r'''(?P<stmt>Current active profile:) (?P<profile>\\w+.*)'''", ')', 'match', '=', 're', '.', 'match', '(', 'pattern', ',', 'result', ')', 'return', "'{0}'", '.', 'format', '(', 'match', '.', 'group', '(', "'profile'", ')', ')'] | Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active | ['Return', 'current', 'active', 'profile'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tuned.py#L58-L73 |
8,579 | rosshamish/catanlog | catanlog.py | CatanLog.log_player_trades_with_other_player | def log_player_trades_with_other_player(self, player, to_other, other, to_player):
"""
:param player: catan.game.Player
:param to_other: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param other: catan.board.Player
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
"""
self._log('{0} trades '.format(player.color))
# to_other items
self._log('[')
for i, (num, res) in enumerate(to_other):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log(' to player {0} for '.format(other.color))
# to_player items
self._log('[')
for i, (num, res) in enumerate(to_player):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log('\n') | python | def log_player_trades_with_other_player(self, player, to_other, other, to_player):
"""
:param player: catan.game.Player
:param to_other: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param other: catan.board.Player
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
"""
self._log('{0} trades '.format(player.color))
# to_other items
self._log('[')
for i, (num, res) in enumerate(to_other):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log(' to player {0} for '.format(other.color))
# to_player items
self._log('[')
for i, (num, res) in enumerate(to_player):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log('\n') | ['def', 'log_player_trades_with_other_player', '(', 'self', ',', 'player', ',', 'to_other', ',', 'other', ',', 'to_player', ')', ':', 'self', '.', '_log', '(', "'{0} trades '", '.', 'format', '(', 'player', '.', 'color', ')', ')', '# to_other items', 'self', '.', '_log', '(', "'['", ')', 'for', 'i', ',', '(', 'num', ',', 'res', ')', 'in', 'enumerate', '(', 'to_other', ')', ':', 'if', 'i', '>', '0', ':', 'self', '.', '_log', '(', "', '", ')', 'self', '.', '_log', '(', "'{0} {1}'", '.', 'format', '(', 'num', ',', 'res', '.', 'value', ')', ')', 'self', '.', '_log', '(', "']'", ')', 'self', '.', '_log', '(', "' to player {0} for '", '.', 'format', '(', 'other', '.', 'color', ')', ')', '# to_player items', 'self', '.', '_log', '(', "'['", ')', 'for', 'i', ',', '(', 'num', ',', 'res', ')', 'in', 'enumerate', '(', 'to_player', ')', ':', 'if', 'i', '>', '0', ':', 'self', '.', '_log', '(', "', '", ')', 'self', '.', '_log', '(', "'{0} {1}'", '.', 'format', '(', 'num', ',', 'res', '.', 'value', ')', ')', 'self', '.', '_log', '(', "']'", ')', 'self', '.', '_log', '(', "'\\n'", ')'] | :param player: catan.game.Player
:param to_other: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param other: catan.board.Player
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] | [':', 'param', 'player', ':', 'catan', '.', 'game', '.', 'Player', ':', 'param', 'to_other', ':', 'list', 'of', 'tuples', '[', '(', 'int', 'game', '.', 'board', '.', 'Terrain', ')', '(', 'int', 'game', '.', 'board', '.', 'Terrain', ')', ']', ':', 'param', 'other', ':', 'catan', '.', 'board', '.', 'Player', ':', 'param', 'to_player', ':', 'list', 'of', 'tuples', '[', '(', 'int', 'game', '.', 'board', '.', 'Terrain', ')', '(', 'int', 'game', '.', 'board', '.', 'Terrain', ')', ']'] | train | https://github.com/rosshamish/catanlog/blob/6f204920d9b67fd53fc6ff6a1c7b6a756b009bf0/catanlog.py#L237-L264 |
8,580 | bunq/sdk_python | bunq/sdk/model/generated/object_.py | EventObject.get_referenced_object | def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._BunqMeTab is not None:
return self._BunqMeTab
if self._BunqMeTabResultResponse is not None:
return self._BunqMeTabResultResponse
if self._BunqMeFundraiserResult is not None:
return self._BunqMeFundraiserResult
if self._Card is not None:
return self._Card
if self._CardDebit is not None:
return self._CardDebit
if self._DraftPayment is not None:
return self._DraftPayment
if self._FeatureAnnouncement is not None:
return self._FeatureAnnouncement
if self._IdealMerchantTransaction is not None:
return self._IdealMerchantTransaction
if self._Invoice is not None:
return self._Invoice
if self._ScheduledPayment is not None:
return self._ScheduledPayment
if self._ScheduledPaymentBatch is not None:
return self._ScheduledPaymentBatch
if self._ScheduledInstance is not None:
return self._ScheduledInstance
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._BankSwitchServiceNetherlandsIncomingPayment is not None:
return self._BankSwitchServiceNetherlandsIncomingPayment
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestInquiryBatch is not None:
return self._RequestInquiryBatch
if self._RequestInquiry is not None:
return self._RequestInquiry
if self._RequestResponse is not None:
return self._RequestResponse
if self._RewardRecipient is not None:
return self._RewardRecipient
if self._RewardSender is not None:
return self._RewardSender
if self._ShareInviteBankInquiryBatch is not None:
return self._ShareInviteBankInquiryBatch
if self._ShareInviteBankInquiry is not None:
return self._ShareInviteBankInquiry
if self._ShareInviteBankResponse is not None:
return self._ShareInviteBankResponse
if self._SofortMerchantTransaction is not None:
return self._SofortMerchantTransaction
if self._TabResultInquiry is not None:
return self._TabResultInquiry
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._TransferwiseTransfer is not None:
return self._TransferwiseTransfer
raise exception.BunqException(self._ERROR_NULL_FIELDS) | python | def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._BunqMeTab is not None:
return self._BunqMeTab
if self._BunqMeTabResultResponse is not None:
return self._BunqMeTabResultResponse
if self._BunqMeFundraiserResult is not None:
return self._BunqMeFundraiserResult
if self._Card is not None:
return self._Card
if self._CardDebit is not None:
return self._CardDebit
if self._DraftPayment is not None:
return self._DraftPayment
if self._FeatureAnnouncement is not None:
return self._FeatureAnnouncement
if self._IdealMerchantTransaction is not None:
return self._IdealMerchantTransaction
if self._Invoice is not None:
return self._Invoice
if self._ScheduledPayment is not None:
return self._ScheduledPayment
if self._ScheduledPaymentBatch is not None:
return self._ScheduledPaymentBatch
if self._ScheduledInstance is not None:
return self._ScheduledInstance
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._BankSwitchServiceNetherlandsIncomingPayment is not None:
return self._BankSwitchServiceNetherlandsIncomingPayment
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestInquiryBatch is not None:
return self._RequestInquiryBatch
if self._RequestInquiry is not None:
return self._RequestInquiry
if self._RequestResponse is not None:
return self._RequestResponse
if self._RewardRecipient is not None:
return self._RewardRecipient
if self._RewardSender is not None:
return self._RewardSender
if self._ShareInviteBankInquiryBatch is not None:
return self._ShareInviteBankInquiryBatch
if self._ShareInviteBankInquiry is not None:
return self._ShareInviteBankInquiry
if self._ShareInviteBankResponse is not None:
return self._ShareInviteBankResponse
if self._SofortMerchantTransaction is not None:
return self._SofortMerchantTransaction
if self._TabResultInquiry is not None:
return self._TabResultInquiry
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._TransferwiseTransfer is not None:
return self._TransferwiseTransfer
raise exception.BunqException(self._ERROR_NULL_FIELDS) | ['def', 'get_referenced_object', '(', 'self', ')', ':', 'if', 'self', '.', '_BunqMeTab', 'is', 'not', 'None', ':', 'return', 'self', '.', '_BunqMeTab', 'if', 'self', '.', '_BunqMeTabResultResponse', 'is', 'not', 'None', ':', 'return', 'self', '.', '_BunqMeTabResultResponse', 'if', 'self', '.', '_BunqMeFundraiserResult', 'is', 'not', 'None', ':', 'return', 'self', '.', '_BunqMeFundraiserResult', 'if', 'self', '.', '_Card', 'is', 'not', 'None', ':', 'return', 'self', '.', '_Card', 'if', 'self', '.', '_CardDebit', 'is', 'not', 'None', ':', 'return', 'self', '.', '_CardDebit', 'if', 'self', '.', '_DraftPayment', 'is', 'not', 'None', ':', 'return', 'self', '.', '_DraftPayment', 'if', 'self', '.', '_FeatureAnnouncement', 'is', 'not', 'None', ':', 'return', 'self', '.', '_FeatureAnnouncement', 'if', 'self', '.', '_IdealMerchantTransaction', 'is', 'not', 'None', ':', 'return', 'self', '.', '_IdealMerchantTransaction', 'if', 'self', '.', '_Invoice', 'is', 'not', 'None', ':', 'return', 'self', '.', '_Invoice', 'if', 'self', '.', '_ScheduledPayment', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ScheduledPayment', 'if', 'self', '.', '_ScheduledPaymentBatch', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ScheduledPaymentBatch', 'if', 'self', '.', '_ScheduledInstance', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ScheduledInstance', 'if', 'self', '.', '_MasterCardAction', 'is', 'not', 'None', ':', 'return', 'self', '.', '_MasterCardAction', 'if', 'self', '.', '_BankSwitchServiceNetherlandsIncomingPayment', 'is', 'not', 'None', ':', 'return', 'self', '.', '_BankSwitchServiceNetherlandsIncomingPayment', 'if', 'self', '.', '_Payment', 'is', 'not', 'None', ':', 'return', 'self', '.', '_Payment', 'if', 'self', '.', '_PaymentBatch', 'is', 'not', 'None', ':', 'return', 'self', '.', '_PaymentBatch', 'if', 'self', '.', '_RequestInquiryBatch', 'is', 'not', 'None', ':', 'return', 'self', '.', '_RequestInquiryBatch', 'if', 'self', '.', '_RequestInquiry', 'is', 'not', 'None', ':', 'return', 'self', '.', '_RequestInquiry', 'if', 'self', '.', '_RequestResponse', 'is', 'not', 'None', ':', 'return', 'self', '.', '_RequestResponse', 'if', 'self', '.', '_RewardRecipient', 'is', 'not', 'None', ':', 'return', 'self', '.', '_RewardRecipient', 'if', 'self', '.', '_RewardSender', 'is', 'not', 'None', ':', 'return', 'self', '.', '_RewardSender', 'if', 'self', '.', '_ShareInviteBankInquiryBatch', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ShareInviteBankInquiryBatch', 'if', 'self', '.', '_ShareInviteBankInquiry', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ShareInviteBankInquiry', 'if', 'self', '.', '_ShareInviteBankResponse', 'is', 'not', 'None', ':', 'return', 'self', '.', '_ShareInviteBankResponse', 'if', 'self', '.', '_SofortMerchantTransaction', 'is', 'not', 'None', ':', 'return', 'self', '.', '_SofortMerchantTransaction', 'if', 'self', '.', '_TabResultInquiry', 'is', 'not', 'None', ':', 'return', 'self', '.', '_TabResultInquiry', 'if', 'self', '.', '_TabResultResponse', 'is', 'not', 'None', ':', 'return', 'self', '.', '_TabResultResponse', 'if', 'self', '.', '_TransferwiseTransfer', 'is', 'not', 'None', ':', 'return', 'self', '.', '_TransferwiseTransfer', 'raise', 'exception', '.', 'BunqException', '(', 'self', '.', '_ERROR_NULL_FIELDS', ')'] | :rtype: core.BunqModel
:raise: BunqException | [':', 'rtype', ':', 'core', '.', 'BunqModel', ':', 'raise', ':', 'BunqException'] | train | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/object_.py#L3552-L3642 |
8,581 | kstaniek/condoor | condoor/__main__.py | URL.convert | def convert(self, value, param, ctx):
"""Convert to URL scheme."""
if not isinstance(value, tuple):
parsed = urlparse.urlparse(value)
if parsed.scheme not in ('telnet', 'ssh'):
self.fail('invalid URL scheme (%s). Only telnet and ssh URLs are '
'allowed' % parsed, param, ctx)
return value | python | def convert(self, value, param, ctx):
"""Convert to URL scheme."""
if not isinstance(value, tuple):
parsed = urlparse.urlparse(value)
if parsed.scheme not in ('telnet', 'ssh'):
self.fail('invalid URL scheme (%s). Only telnet and ssh URLs are '
'allowed' % parsed, param, ctx)
return value | ['def', 'convert', '(', 'self', ',', 'value', ',', 'param', ',', 'ctx', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'tuple', ')', ':', 'parsed', '=', 'urlparse', '.', 'urlparse', '(', 'value', ')', 'if', 'parsed', '.', 'scheme', 'not', 'in', '(', "'telnet'", ',', "'ssh'", ')', ':', 'self', '.', 'fail', '(', "'invalid URL scheme (%s). Only telnet and ssh URLs are '", "'allowed'", '%', 'parsed', ',', 'param', ',', 'ctx', ')', 'return', 'value'] | Convert to URL scheme. | ['Convert', 'to', 'URL', 'scheme', '.'] | train | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/__main__.py#L40-L47 |
8,582 | contentful/contentful-management.py | contentful_management/editor_interface.py | EditorInterface.base_url | def base_url(self, space_id, content_type_id, environment_id=None, **kwargs):
"""
Returns the URI for the editor interface.
"""
return "spaces/{0}{1}/content_types/{2}/editor_interface".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
content_type_id
) | python | def base_url(self, space_id, content_type_id, environment_id=None, **kwargs):
"""
Returns the URI for the editor interface.
"""
return "spaces/{0}{1}/content_types/{2}/editor_interface".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
content_type_id
) | ['def', 'base_url', '(', 'self', ',', 'space_id', ',', 'content_type_id', ',', 'environment_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', '"spaces/{0}{1}/content_types/{2}/editor_interface"', '.', 'format', '(', 'space_id', ',', "'/environments/{0}'", '.', 'format', '(', 'environment_id', ')', 'if', 'environment_id', 'is', 'not', 'None', 'else', "''", ',', 'content_type_id', ')'] | Returns the URI for the editor interface. | ['Returns', 'the', 'URI', 'for', 'the', 'editor', 'interface', '.'] | train | https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/editor_interface.py#L27-L36 |
8,583 | mitsei/dlkit | dlkit/records/assessment/qti/inline_choice_records.py | InlineChoiceTextQuestionFormRecord._init_metadata | def _init_metadata(self):
"""stub"""
QuestionTextFormRecord._init_metadata(self)
self._choices_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choices'),
'element_label': 'Choices',
'instructions': 'Enter as many choices as you wish',
'required': True,
'read_only': False,
'linked': False,
'array': True,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
self._choice_text_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choice_text'),
'element_label': 'choice text',
'instructions': 'enter the text for this choice',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [{
'text': '',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
}],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
} | python | def _init_metadata(self):
"""stub"""
QuestionTextFormRecord._init_metadata(self)
self._choices_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choices'),
'element_label': 'Choices',
'instructions': 'Enter as many choices as you wish',
'required': True,
'read_only': False,
'linked': False,
'array': True,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
self._choice_text_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choice_text'),
'element_label': 'choice text',
'instructions': 'enter the text for this choice',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [{
'text': '',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
}],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
} | ['def', '_init_metadata', '(', 'self', ')', ':', 'QuestionTextFormRecord', '.', '_init_metadata', '(', 'self', ')', 'self', '.', '_choices_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'choices'", ')', ',', "'element_label'", ':', "'Choices'", ',', "'instructions'", ':', "'Enter as many choices as you wish'", ',', "'required'", ':', 'True', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'True', ',', "'default_object_values'", ':', '[', '{', '}', ']', ',', "'syntax'", ':', "'OBJECT'", ',', "'object_set'", ':', '[', ']', '}', 'self', '.', '_choice_text_metadata', '=', '{', "'element_id'", ':', 'Id', '(', 'self', '.', 'my_osid_object_form', '.', '_authority', ',', 'self', '.', 'my_osid_object_form', '.', '_namespace', ',', "'choice_text'", ')', ',', "'element_label'", ':', "'choice text'", ',', "'instructions'", ':', "'enter the text for this choice'", ',', "'required'", ':', 'True', ',', "'read_only'", ':', 'False', ',', "'linked'", ':', 'False', ',', "'array'", ':', 'False', ',', "'default_string_values'", ':', '[', '{', "'text'", ':', "''", ',', "'languageTypeId'", ':', 'str', '(', 'DEFAULT_LANGUAGE_TYPE', ')', ',', "'scriptTypeId'", ':', 'str', '(', 'DEFAULT_SCRIPT_TYPE', ')', ',', "'formatTypeId'", ':', 'str', '(', 'DEFAULT_FORMAT_TYPE', ')', ',', '}', ']', ',', "'syntax'", ':', "'STRING'", ',', "'minimum_string_length'", ':', '0', ',', "'maximum_string_length'", ':', '1024', ',', "'string_set'", ':', '[', ']', '}'] | stub | ['stub'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/inline_choice_records.py#L255-L292 |
8,584 | spacetelescope/stsci.tools | lib/stsci/tools/basicpar.py | IrafPar._setChoiceDict | def _setChoiceDict(self):
"""Create dictionary for choice list"""
# value is name of choice parameter (same as key)
self.choiceDict = {}
for c in self.choice: self.choiceDict[c] = c | python | def _setChoiceDict(self):
"""Create dictionary for choice list"""
# value is name of choice parameter (same as key)
self.choiceDict = {}
for c in self.choice: self.choiceDict[c] = c | ['def', '_setChoiceDict', '(', 'self', ')', ':', '# value is name of choice parameter (same as key)', 'self', '.', 'choiceDict', '=', '{', '}', 'for', 'c', 'in', 'self', '.', 'choice', ':', 'self', '.', 'choiceDict', '[', 'c', ']', '=', 'c'] | Create dictionary for choice list | ['Create', 'dictionary', 'for', 'choice', 'list'] | train | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/basicpar.py#L621-L625 |
8,585 | dhermes/bezier | scripts/doc_template_release.py | main | def main():
"""Populate the templates with release-specific fields.
Requires user input for the CircleCI, AppVeyor, Coveralls.io and Travis
build IDs.
"""
version = get_version()
circleci_build = six.moves.input("CircleCI Build ID: ")
appveyor_build = six.moves.input("AppVeyor Build ID: ")
coveralls_build = six.moves.input("Coveralls Build ID: ")
travis_build = six.moves.input("Travis Build ID: ")
populate_readme(
version, circleci_build, appveyor_build, coveralls_build, travis_build
)
populate_index(
version, circleci_build, appveyor_build, coveralls_build, travis_build
)
populate_native_libraries(version)
populate_development(version) | python | def main():
"""Populate the templates with release-specific fields.
Requires user input for the CircleCI, AppVeyor, Coveralls.io and Travis
build IDs.
"""
version = get_version()
circleci_build = six.moves.input("CircleCI Build ID: ")
appveyor_build = six.moves.input("AppVeyor Build ID: ")
coveralls_build = six.moves.input("Coveralls Build ID: ")
travis_build = six.moves.input("Travis Build ID: ")
populate_readme(
version, circleci_build, appveyor_build, coveralls_build, travis_build
)
populate_index(
version, circleci_build, appveyor_build, coveralls_build, travis_build
)
populate_native_libraries(version)
populate_development(version) | ['def', 'main', '(', ')', ':', 'version', '=', 'get_version', '(', ')', 'circleci_build', '=', 'six', '.', 'moves', '.', 'input', '(', '"CircleCI Build ID: "', ')', 'appveyor_build', '=', 'six', '.', 'moves', '.', 'input', '(', '"AppVeyor Build ID: "', ')', 'coveralls_build', '=', 'six', '.', 'moves', '.', 'input', '(', '"Coveralls Build ID: "', ')', 'travis_build', '=', 'six', '.', 'moves', '.', 'input', '(', '"Travis Build ID: "', ')', 'populate_readme', '(', 'version', ',', 'circleci_build', ',', 'appveyor_build', ',', 'coveralls_build', ',', 'travis_build', ')', 'populate_index', '(', 'version', ',', 'circleci_build', ',', 'appveyor_build', ',', 'coveralls_build', ',', 'travis_build', ')', 'populate_native_libraries', '(', 'version', ')', 'populate_development', '(', 'version', ')'] | Populate the templates with release-specific fields.
Requires user input for the CircleCI, AppVeyor, Coveralls.io and Travis
build IDs. | ['Populate', 'the', 'templates', 'with', 'release', '-', 'specific', 'fields', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/scripts/doc_template_release.py#L161-L179 |
8,586 | cyrus-/cypy | cypy/np/plotting.py | raster | def raster(times, indices, max_time=None, max_index=None,
x_label="Timestep", y_label="Index", **kwargs):
"""Plots a raster plot given times and indices of events."""
# set default size to 1
if 's' not in kwargs:
kwargs['s'] = 1
scatter(times, indices, **kwargs)
if max_time is None:
max_time = max(times)
if max_index is None:
max_index = max(indices)
axis((0, max_time, 0, max_index))
if x_label is not None: xlabel(x_label)
if y_label is not None: ylabel(y_label) | python | def raster(times, indices, max_time=None, max_index=None,
x_label="Timestep", y_label="Index", **kwargs):
"""Plots a raster plot given times and indices of events."""
# set default size to 1
if 's' not in kwargs:
kwargs['s'] = 1
scatter(times, indices, **kwargs)
if max_time is None:
max_time = max(times)
if max_index is None:
max_index = max(indices)
axis((0, max_time, 0, max_index))
if x_label is not None: xlabel(x_label)
if y_label is not None: ylabel(y_label) | ['def', 'raster', '(', 'times', ',', 'indices', ',', 'max_time', '=', 'None', ',', 'max_index', '=', 'None', ',', 'x_label', '=', '"Timestep"', ',', 'y_label', '=', '"Index"', ',', '*', '*', 'kwargs', ')', ':', '# set default size to 1', 'if', "'s'", 'not', 'in', 'kwargs', ':', 'kwargs', '[', "'s'", ']', '=', '1', 'scatter', '(', 'times', ',', 'indices', ',', '*', '*', 'kwargs', ')', 'if', 'max_time', 'is', 'None', ':', 'max_time', '=', 'max', '(', 'times', ')', 'if', 'max_index', 'is', 'None', ':', 'max_index', '=', 'max', '(', 'indices', ')', 'axis', '(', '(', '0', ',', 'max_time', ',', '0', ',', 'max_index', ')', ')', 'if', 'x_label', 'is', 'not', 'None', ':', 'xlabel', '(', 'x_label', ')', 'if', 'y_label', 'is', 'not', 'None', ':', 'ylabel', '(', 'y_label', ')'] | Plots a raster plot given times and indices of events. | ['Plots', 'a', 'raster', 'plot', 'given', 'times', 'and', 'indices', 'of', 'events', '.'] | train | https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/np/plotting.py#L25-L39 |
8,587 | saltstack/salt | salt/modules/virt.py | _parse_caps_devices_features | def _parse_caps_devices_features(node):
'''
Parse the devices or features list of the domain capatilities
'''
result = {}
for child in node:
if child.get('supported') == 'yes':
enums = [_parse_caps_enum(node) for node in child.findall('enum')]
result[child.tag] = {item[0]: item[1] for item in enums if item[0]}
return result | python | def _parse_caps_devices_features(node):
'''
Parse the devices or features list of the domain capatilities
'''
result = {}
for child in node:
if child.get('supported') == 'yes':
enums = [_parse_caps_enum(node) for node in child.findall('enum')]
result[child.tag] = {item[0]: item[1] for item in enums if item[0]}
return result | ['def', '_parse_caps_devices_features', '(', 'node', ')', ':', 'result', '=', '{', '}', 'for', 'child', 'in', 'node', ':', 'if', 'child', '.', 'get', '(', "'supported'", ')', '==', "'yes'", ':', 'enums', '=', '[', '_parse_caps_enum', '(', 'node', ')', 'for', 'node', 'in', 'child', '.', 'findall', '(', "'enum'", ')', ']', 'result', '[', 'child', '.', 'tag', ']', '=', '{', 'item', '[', '0', ']', ':', 'item', '[', '1', ']', 'for', 'item', 'in', 'enums', 'if', 'item', '[', '0', ']', '}', 'return', 'result'] | Parse the devices or features list of the domain capatilities | ['Parse', 'the', 'devices', 'or', 'features', 'list', 'of', 'the', 'domain', 'capatilities'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L4312-L4321 |
8,588 | noxdafox/clipspy | clips/classes.py | Instance.unmake | def unmake(self):
"""This method is equivalent to delete except that it uses
message-passing instead of directly deleting the instance.
"""
if lib.EnvUnmakeInstance(self._env, self._ist) != 1:
raise CLIPSError(self._env) | python | def unmake(self):
"""This method is equivalent to delete except that it uses
message-passing instead of directly deleting the instance.
"""
if lib.EnvUnmakeInstance(self._env, self._ist) != 1:
raise CLIPSError(self._env) | ['def', 'unmake', '(', 'self', ')', ':', 'if', 'lib', '.', 'EnvUnmakeInstance', '(', 'self', '.', '_env', ',', 'self', '.', '_ist', ')', '!=', '1', ':', 'raise', 'CLIPSError', '(', 'self', '.', '_env', ')'] | This method is equivalent to delete except that it uses
message-passing instead of directly deleting the instance. | ['This', 'method', 'is', 'equivalent', 'to', 'delete', 'except', 'that', 'it', 'uses', 'message', '-', 'passing', 'instead', 'of', 'directly', 'deleting', 'the', 'instance', '.'] | train | https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L659-L665 |
8,589 | datajoint/datajoint-python | datajoint/autopopulate.py | AutoPopulate.key_source | def key_source(self):
"""
:return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls.
"""
def parent_gen(self):
if self.target.full_table_name not in self.connection.dependencies:
self.connection.dependencies.load()
for parent_name, fk_props in self.target.parents(primary=True).items():
if not parent_name.isdigit(): # simple foreign key
yield FreeTable(self.connection, parent_name).proj()
else:
grandparent = list(self.connection.dependencies.in_edges(parent_name))[0][0]
yield FreeTable(self.connection, grandparent).proj(**{
attr: ref for attr, ref in fk_props['attr_map'].items() if ref != attr})
if self._key_source is None:
parents = parent_gen(self)
try:
self._key_source = next(parents)
except StopIteration:
raise DataJointError('A relation must have primary dependencies for auto-populate to work') from None
for q in parents:
self._key_source *= q
return self._key_source | python | def key_source(self):
"""
:return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls.
"""
def parent_gen(self):
if self.target.full_table_name not in self.connection.dependencies:
self.connection.dependencies.load()
for parent_name, fk_props in self.target.parents(primary=True).items():
if not parent_name.isdigit(): # simple foreign key
yield FreeTable(self.connection, parent_name).proj()
else:
grandparent = list(self.connection.dependencies.in_edges(parent_name))[0][0]
yield FreeTable(self.connection, grandparent).proj(**{
attr: ref for attr, ref in fk_props['attr_map'].items() if ref != attr})
if self._key_source is None:
parents = parent_gen(self)
try:
self._key_source = next(parents)
except StopIteration:
raise DataJointError('A relation must have primary dependencies for auto-populate to work') from None
for q in parents:
self._key_source *= q
return self._key_source | ['def', 'key_source', '(', 'self', ')', ':', 'def', 'parent_gen', '(', 'self', ')', ':', 'if', 'self', '.', 'target', '.', 'full_table_name', 'not', 'in', 'self', '.', 'connection', '.', 'dependencies', ':', 'self', '.', 'connection', '.', 'dependencies', '.', 'load', '(', ')', 'for', 'parent_name', ',', 'fk_props', 'in', 'self', '.', 'target', '.', 'parents', '(', 'primary', '=', 'True', ')', '.', 'items', '(', ')', ':', 'if', 'not', 'parent_name', '.', 'isdigit', '(', ')', ':', '# simple foreign key', 'yield', 'FreeTable', '(', 'self', '.', 'connection', ',', 'parent_name', ')', '.', 'proj', '(', ')', 'else', ':', 'grandparent', '=', 'list', '(', 'self', '.', 'connection', '.', 'dependencies', '.', 'in_edges', '(', 'parent_name', ')', ')', '[', '0', ']', '[', '0', ']', 'yield', 'FreeTable', '(', 'self', '.', 'connection', ',', 'grandparent', ')', '.', 'proj', '(', '*', '*', '{', 'attr', ':', 'ref', 'for', 'attr', ',', 'ref', 'in', 'fk_props', '[', "'attr_map'", ']', '.', 'items', '(', ')', 'if', 'ref', '!=', 'attr', '}', ')', 'if', 'self', '.', '_key_source', 'is', 'None', ':', 'parents', '=', 'parent_gen', '(', 'self', ')', 'try', ':', 'self', '.', '_key_source', '=', 'next', '(', 'parents', ')', 'except', 'StopIteration', ':', 'raise', 'DataJointError', '(', "'A relation must have primary dependencies for auto-populate to work'", ')', 'from', 'None', 'for', 'q', 'in', 'parents', ':', 'self', '.', '_key_source', '*=', 'q', 'return', 'self', '.', '_key_source'] | :return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls. | [':', 'return', ':', 'the', 'relation', 'whose', 'primary', 'key', 'values', 'are', 'passed', 'sequentially', 'to', 'the', 'make', 'method', 'when', 'populate', '()', 'is', 'called', '.', 'The', 'default', 'value', 'is', 'the', 'join', 'of', 'the', 'parent', 'relations', '.', 'Users', 'may', 'override', 'to', 'change', 'the', 'granularity', 'or', 'the', 'scope', 'of', 'populate', '()', 'calls', '.'] | train | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/autopopulate.py#L28-L54 |
8,590 | pypa/pipenv | pipenv/utils.py | translate_markers | def translate_markers(pipfile_entry):
"""Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
"""
if not isinstance(pipfile_entry, Mapping):
raise TypeError("Entry is not a pipfile formatted mapping.")
from .vendor.distlib.markers import DEFAULT_CONTEXT as marker_context
from .vendor.packaging.markers import Marker
from .vendor.vistir.misc import dedup
allowed_marker_keys = ["markers"] + [k for k in marker_context.keys()]
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
pipfile_markers = [k for k in provided_keys if k in allowed_marker_keys]
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
if "markers" in new_pipfile:
marker = str(Marker(new_pipfile.pop("markers")))
if 'extra' not in marker:
marker_set.add(marker)
for m in pipfile_markers:
entry = "{0}".format(pipfile_entry[m])
if m != "markers":
marker_set.add(str(Marker("{0}{1}".format(m, entry))))
new_pipfile.pop(m)
if marker_set:
new_pipfile["markers"] = str(Marker(" or ".join(
"{0}".format(s) if " and " in s else s
for s in sorted(dedup(marker_set))
))).replace('"', "'")
return new_pipfile | python | def translate_markers(pipfile_entry):
"""Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
"""
if not isinstance(pipfile_entry, Mapping):
raise TypeError("Entry is not a pipfile formatted mapping.")
from .vendor.distlib.markers import DEFAULT_CONTEXT as marker_context
from .vendor.packaging.markers import Marker
from .vendor.vistir.misc import dedup
allowed_marker_keys = ["markers"] + [k for k in marker_context.keys()]
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
pipfile_markers = [k for k in provided_keys if k in allowed_marker_keys]
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
if "markers" in new_pipfile:
marker = str(Marker(new_pipfile.pop("markers")))
if 'extra' not in marker:
marker_set.add(marker)
for m in pipfile_markers:
entry = "{0}".format(pipfile_entry[m])
if m != "markers":
marker_set.add(str(Marker("{0}{1}".format(m, entry))))
new_pipfile.pop(m)
if marker_set:
new_pipfile["markers"] = str(Marker(" or ".join(
"{0}".format(s) if " and " in s else s
for s in sorted(dedup(marker_set))
))).replace('"', "'")
return new_pipfile | ['def', 'translate_markers', '(', 'pipfile_entry', ')', ':', 'if', 'not', 'isinstance', '(', 'pipfile_entry', ',', 'Mapping', ')', ':', 'raise', 'TypeError', '(', '"Entry is not a pipfile formatted mapping."', ')', 'from', '.', 'vendor', '.', 'distlib', '.', 'markers', 'import', 'DEFAULT_CONTEXT', 'as', 'marker_context', 'from', '.', 'vendor', '.', 'packaging', '.', 'markers', 'import', 'Marker', 'from', '.', 'vendor', '.', 'vistir', '.', 'misc', 'import', 'dedup', 'allowed_marker_keys', '=', '[', '"markers"', ']', '+', '[', 'k', 'for', 'k', 'in', 'marker_context', '.', 'keys', '(', ')', ']', 'provided_keys', '=', 'list', '(', 'pipfile_entry', '.', 'keys', '(', ')', ')', 'if', 'hasattr', '(', 'pipfile_entry', ',', '"keys"', ')', 'else', '[', ']', 'pipfile_markers', '=', '[', 'k', 'for', 'k', 'in', 'provided_keys', 'if', 'k', 'in', 'allowed_marker_keys', ']', 'new_pipfile', '=', 'dict', '(', 'pipfile_entry', ')', '.', 'copy', '(', ')', 'marker_set', '=', 'set', '(', ')', 'if', '"markers"', 'in', 'new_pipfile', ':', 'marker', '=', 'str', '(', 'Marker', '(', 'new_pipfile', '.', 'pop', '(', '"markers"', ')', ')', ')', 'if', "'extra'", 'not', 'in', 'marker', ':', 'marker_set', '.', 'add', '(', 'marker', ')', 'for', 'm', 'in', 'pipfile_markers', ':', 'entry', '=', '"{0}"', '.', 'format', '(', 'pipfile_entry', '[', 'm', ']', ')', 'if', 'm', '!=', '"markers"', ':', 'marker_set', '.', 'add', '(', 'str', '(', 'Marker', '(', '"{0}{1}"', '.', 'format', '(', 'm', ',', 'entry', ')', ')', ')', ')', 'new_pipfile', '.', 'pop', '(', 'm', ')', 'if', 'marker_set', ':', 'new_pipfile', '[', '"markers"', ']', '=', 'str', '(', 'Marker', '(', '" or "', '.', 'join', '(', '"{0}"', '.', 'format', '(', 's', ')', 'if', '" and "', 'in', 's', 'else', 's', 'for', 's', 'in', 'sorted', '(', 'dedup', '(', 'marker_set', ')', ')', ')', ')', ')', '.', 'replace', '(', '\'"\'', ',', '"\'"', ')', 'return', 'new_pipfile'] | Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries | ['Take', 'a', 'pipfile', 'entry', 'and', 'normalize', 'its', 'markers'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/utils.py#L1597-L1633 |
8,591 | Rapptz/discord.py | discord/iterators.py | GuildIterator._retrieve_guilds_before_strategy | async def _retrieve_guilds_before_strategy(self, retrieve):
"""Retrieve guilds using before parameter."""
before = self.before.id if self.before else None
data = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]['id']))
return data | python | async def _retrieve_guilds_before_strategy(self, retrieve):
"""Retrieve guilds using before parameter."""
before = self.before.id if self.before else None
data = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]['id']))
return data | ['async', 'def', '_retrieve_guilds_before_strategy', '(', 'self', ',', 'retrieve', ')', ':', 'before', '=', 'self', '.', 'before', '.', 'id', 'if', 'self', '.', 'before', 'else', 'None', 'data', '=', 'await', 'self', '.', 'get_guilds', '(', 'retrieve', ',', 'before', '=', 'before', ')', 'if', 'len', '(', 'data', ')', ':', 'if', 'self', '.', 'limit', 'is', 'not', 'None', ':', 'self', '.', 'limit', '-=', 'retrieve', 'self', '.', 'before', '=', 'Object', '(', 'id', '=', 'int', '(', 'data', '[', '-', '1', ']', '[', "'id'", ']', ')', ')', 'return', 'data'] | Retrieve guilds using before parameter. | ['Retrieve', 'guilds', 'using', 'before', 'parameter', '.'] | train | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/iterators.py#L571-L579 |
8,592 | robehickman/simple-http-file-sync | shttpfs/plain_storage.py | plain_storage.get_single_file_info | def get_single_file_info(self, rel_path):
""" Gets last change time for a single file """
f_path = self.get_full_file_path(rel_path)
return get_single_file_info(f_path, rel_path) | python | def get_single_file_info(self, rel_path):
""" Gets last change time for a single file """
f_path = self.get_full_file_path(rel_path)
return get_single_file_info(f_path, rel_path) | ['def', 'get_single_file_info', '(', 'self', ',', 'rel_path', ')', ':', 'f_path', '=', 'self', '.', 'get_full_file_path', '(', 'rel_path', ')', 'return', 'get_single_file_info', '(', 'f_path', ',', 'rel_path', ')'] | Gets last change time for a single file | ['Gets', 'last', 'change', 'time', 'for', 'a', 'single', 'file'] | train | https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/plain_storage.py#L16-L20 |
8,593 | tango-controls/pytango | tango/databaseds/database.py | DataBase.DbGetAttributeAlias | def DbGetAttributeAlias(self, argin):
""" Get the attribute name for the given alias.
If alias not found in database, returns an empty string.
:param argin: The attribute alias name
:type: tango.DevString
:return: The attribute name (device/attribute)
:rtype: tango.DevString """
self._log.debug("In DbGetAttributeAlias()")
return self.db.get_attribute_alias(argin) | python | def DbGetAttributeAlias(self, argin):
""" Get the attribute name for the given alias.
If alias not found in database, returns an empty string.
:param argin: The attribute alias name
:type: tango.DevString
:return: The attribute name (device/attribute)
:rtype: tango.DevString """
self._log.debug("In DbGetAttributeAlias()")
return self.db.get_attribute_alias(argin) | ['def', 'DbGetAttributeAlias', '(', 'self', ',', 'argin', ')', ':', 'self', '.', '_log', '.', 'debug', '(', '"In DbGetAttributeAlias()"', ')', 'return', 'self', '.', 'db', '.', 'get_attribute_alias', '(', 'argin', ')'] | Get the attribute name for the given alias.
If alias not found in database, returns an empty string.
:param argin: The attribute alias name
:type: tango.DevString
:return: The attribute name (device/attribute)
:rtype: tango.DevString | ['Get', 'the', 'attribute', 'name', 'for', 'the', 'given', 'alias', '.', 'If', 'alias', 'not', 'found', 'in', 'database', 'returns', 'an', 'empty', 'string', '.'] | train | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L932-L941 |
8,594 | apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | xmlNode.replaceNode | def replaceNode(self, cur):
"""Unlink the old node from its current context, prune the new
one at the same place. If @cur was already inserted in a
document it is first unlinked from its existing context. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlReplaceNode(self._o, cur__o)
if ret is None:raise treeError('xmlReplaceNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | python | def replaceNode(self, cur):
"""Unlink the old node from its current context, prune the new
one at the same place. If @cur was already inserted in a
document it is first unlinked from its existing context. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlReplaceNode(self._o, cur__o)
if ret is None:raise treeError('xmlReplaceNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | ['def', 'replaceNode', '(', 'self', ',', 'cur', ')', ':', 'if', 'cur', 'is', 'None', ':', 'cur__o', '=', 'None', 'else', ':', 'cur__o', '=', 'cur', '.', '_o', 'ret', '=', 'libxml2mod', '.', 'xmlReplaceNode', '(', 'self', '.', '_o', ',', 'cur__o', ')', 'if', 'ret', 'is', 'None', ':', 'raise', 'treeError', '(', "'xmlReplaceNode() failed'", ')', '__tmp', '=', 'xmlNode', '(', '_obj', '=', 'ret', ')', 'return', '__tmp'] | Unlink the old node from its current context, prune the new
one at the same place. If @cur was already inserted in a
document it is first unlinked from its existing context. | ['Unlink', 'the', 'old', 'node', 'from', 'its', 'current', 'context', 'prune', 'the', 'new', 'one', 'at', 'the', 'same', 'place', '.', 'If'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3491-L3500 |
8,595 | ktbyers/netmiko | netmiko/_textfsm/_texttable.py | TextTable.Map | def Map(self, function):
"""Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append().
"""
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
filtered_row = function(row)
if filtered_row:
new_table.Append(filtered_row)
return new_table | python | def Map(self, function):
"""Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append().
"""
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
filtered_row = function(row)
if filtered_row:
new_table.Append(filtered_row)
return new_table | ['def', 'Map', '(', 'self', ',', 'function', ')', ':', 'new_table', '=', 'self', '.', '__class__', '(', ')', '# pylint: disable=protected-access', 'new_table', '.', '_table', '=', '[', 'self', '.', 'header', ']', 'for', 'row', 'in', 'self', ':', 'filtered_row', '=', 'function', '(', 'row', ')', 'if', 'filtered_row', ':', 'new_table', '.', 'Append', '(', 'filtered_row', ')', 'return', 'new_table'] | Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append(). | ['Applies', 'the', 'function', 'to', 'every', 'row', 'in', 'the', 'table', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_texttable.py#L420-L440 |
8,596 | inasafe/inasafe | safe/report/impact_report.py | InaSAFEReportContext.north_arrow | def north_arrow(self, north_arrow_path):
"""Set image that will be used as north arrow in reports.
:param north_arrow_path: Path to the north arrow image.
:type north_arrow_path: str
"""
if isinstance(north_arrow_path, str) and os.path.exists(
north_arrow_path):
self._north_arrow = north_arrow_path
else:
self._north_arrow = default_north_arrow_path() | python | def north_arrow(self, north_arrow_path):
"""Set image that will be used as north arrow in reports.
:param north_arrow_path: Path to the north arrow image.
:type north_arrow_path: str
"""
if isinstance(north_arrow_path, str) and os.path.exists(
north_arrow_path):
self._north_arrow = north_arrow_path
else:
self._north_arrow = default_north_arrow_path() | ['def', 'north_arrow', '(', 'self', ',', 'north_arrow_path', ')', ':', 'if', 'isinstance', '(', 'north_arrow_path', ',', 'str', ')', 'and', 'os', '.', 'path', '.', 'exists', '(', 'north_arrow_path', ')', ':', 'self', '.', '_north_arrow', '=', 'north_arrow_path', 'else', ':', 'self', '.', '_north_arrow', '=', 'default_north_arrow_path', '(', ')'] | Set image that will be used as north arrow in reports.
:param north_arrow_path: Path to the north arrow image.
:type north_arrow_path: str | ['Set', 'image', 'that', 'will', 'be', 'used', 'as', 'north', 'arrow', 'in', 'reports', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/impact_report.py#L69-L79 |
8,597 | sdss/sdss_access | python/sdss_access/sync/rsync.py | RsyncAccess.remote | def remote(self, username=None, password=None, inquire=None):
""" Configures remote access """
self.set_netloc(sdss=True) # simplifies things to have a single sdss machine in .netrc
self.set_auth(username=username, password=password, inquire=inquire)
self.set_netloc(dtn=not self.public)
self.set_remote_base(scheme="rsync") | python | def remote(self, username=None, password=None, inquire=None):
""" Configures remote access """
self.set_netloc(sdss=True) # simplifies things to have a single sdss machine in .netrc
self.set_auth(username=username, password=password, inquire=inquire)
self.set_netloc(dtn=not self.public)
self.set_remote_base(scheme="rsync") | ['def', 'remote', '(', 'self', ',', 'username', '=', 'None', ',', 'password', '=', 'None', ',', 'inquire', '=', 'None', ')', ':', 'self', '.', 'set_netloc', '(', 'sdss', '=', 'True', ')', '# simplifies things to have a single sdss machine in .netrc', 'self', '.', 'set_auth', '(', 'username', '=', 'username', ',', 'password', '=', 'password', ',', 'inquire', '=', 'inquire', ')', 'self', '.', 'set_netloc', '(', 'dtn', '=', 'not', 'self', '.', 'public', ')', 'self', '.', 'set_remote_base', '(', 'scheme', '=', '"rsync"', ')'] | Configures remote access | ['Configures', 'remote', 'access'] | train | https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/sync/rsync.py#L28-L34 |
8,598 | newville/wxmplot | wxmplot/baseframe.py | BaseFrame.unzoom_all | def unzoom_all(self,event=None):
"""zoom out full data range """
if self.panel is not None:
self.panel.unzoom_all(event=event) | python | def unzoom_all(self,event=None):
"""zoom out full data range """
if self.panel is not None:
self.panel.unzoom_all(event=event) | ['def', 'unzoom_all', '(', 'self', ',', 'event', '=', 'None', ')', ':', 'if', 'self', '.', 'panel', 'is', 'not', 'None', ':', 'self', '.', 'panel', '.', 'unzoom_all', '(', 'event', '=', 'event', ')'] | zoom out full data range | ['zoom', 'out', 'full', 'data', 'range'] | train | https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/baseframe.py#L80-L83 |
8,599 | saltstack/salt | salt/utils/crypt.py | pem_finger | def pem_finger(path=None, key=None, sum_type='sha256'):
'''
Pass in either a raw pem string, or the path on disk to the location of a
pem file, and the type of cryptographic hash to use. The default is SHA256.
The fingerprint of the pem will be returned.
If neither a key nor a path are passed in, a blank string will be returned.
'''
if not key:
if not os.path.isfile(path):
return ''
with salt.utils.files.fopen(path, 'rb') as fp_:
key = b''.join([x for x in fp_.readlines() if x.strip()][1:-1])
pre = getattr(hashlib, sum_type)(key).hexdigest()
finger = ''
for ind, _ in enumerate(pre):
if ind % 2:
# Is odd
finger += '{0}:'.format(pre[ind])
else:
finger += pre[ind]
return finger.rstrip(':') | python | def pem_finger(path=None, key=None, sum_type='sha256'):
'''
Pass in either a raw pem string, or the path on disk to the location of a
pem file, and the type of cryptographic hash to use. The default is SHA256.
The fingerprint of the pem will be returned.
If neither a key nor a path are passed in, a blank string will be returned.
'''
if not key:
if not os.path.isfile(path):
return ''
with salt.utils.files.fopen(path, 'rb') as fp_:
key = b''.join([x for x in fp_.readlines() if x.strip()][1:-1])
pre = getattr(hashlib, sum_type)(key).hexdigest()
finger = ''
for ind, _ in enumerate(pre):
if ind % 2:
# Is odd
finger += '{0}:'.format(pre[ind])
else:
finger += pre[ind]
return finger.rstrip(':') | ['def', 'pem_finger', '(', 'path', '=', 'None', ',', 'key', '=', 'None', ',', 'sum_type', '=', "'sha256'", ')', ':', 'if', 'not', 'key', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'return', "''", 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'path', ',', "'rb'", ')', 'as', 'fp_', ':', 'key', '=', "b''", '.', 'join', '(', '[', 'x', 'for', 'x', 'in', 'fp_', '.', 'readlines', '(', ')', 'if', 'x', '.', 'strip', '(', ')', ']', '[', '1', ':', '-', '1', ']', ')', 'pre', '=', 'getattr', '(', 'hashlib', ',', 'sum_type', ')', '(', 'key', ')', '.', 'hexdigest', '(', ')', 'finger', '=', "''", 'for', 'ind', ',', '_', 'in', 'enumerate', '(', 'pre', ')', ':', 'if', 'ind', '%', '2', ':', '# Is odd', 'finger', '+=', "'{0}:'", '.', 'format', '(', 'pre', '[', 'ind', ']', ')', 'else', ':', 'finger', '+=', 'pre', '[', 'ind', ']', 'return', 'finger', '.', 'rstrip', '(', "':'", ')'] | Pass in either a raw pem string, or the path on disk to the location of a
pem file, and the type of cryptographic hash to use. The default is SHA256.
The fingerprint of the pem will be returned.
If neither a key nor a path are passed in, a blank string will be returned. | ['Pass', 'in', 'either', 'a', 'raw', 'pem', 'string', 'or', 'the', 'path', 'on', 'disk', 'to', 'the', 'location', 'of', 'a', 'pem', 'file', 'and', 'the', 'type', 'of', 'cryptographic', 'hash', 'to', 'use', '.', 'The', 'default', 'is', 'SHA256', '.', 'The', 'fingerprint', 'of', 'the', 'pem', 'will', 'be', 'returned', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/crypt.py#L117-L140 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.