Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,200 | podio/podio-py | pypodio2/encode.py | multipart_encode | def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
return MultipartYielder(params, boundary, cb), headers | python | def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
return MultipartYielder(params, boundary, cb), headers | ['def', 'multipart_encode', '(', 'params', ',', 'boundary', '=', 'None', ',', 'cb', '=', 'None', ')', ':', 'if', 'boundary', 'is', 'None', ':', 'boundary', '=', 'gen_boundary', '(', ')', 'else', ':', 'boundary', '=', 'urllib', '.', 'quote_plus', '(', 'boundary', ')', 'headers', '=', 'get_headers', '(', 'params', ',', 'boundary', ')', 'params', '=', 'MultipartParam', '.', 'from_params', '(', 'params', ')', 'return', 'MultipartYielder', '(', 'params', ',', 'boundary', ',', 'cb', ')', ',', 'headers'] | Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s | ['Encode', 'params', 'as', 'multipart', '/', 'form', '-', 'data', '.'] | train | https://github.com/podio/podio-py/blob/5ce956034a06c98b0ef18fcd940b36da0908ad6c/pypodio2/encode.py#L385-L433 |
4,201 | GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | PipelineFuture._inherit_outputs | def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False):
"""Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
"""
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record) | python | def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False):
"""Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
"""
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record) | ['def', '_inherit_outputs', '(', 'self', ',', 'pipeline_name', ',', 'already_defined', ',', 'resolve_outputs', '=', 'False', ')', ':', 'for', 'name', ',', 'slot_key', 'in', 'already_defined', '.', 'iteritems', '(', ')', ':', 'if', 'not', 'isinstance', '(', 'slot_key', ',', 'db', '.', 'Key', ')', ':', 'slot_key', '=', 'db', '.', 'Key', '(', 'slot_key', ')', 'slot', '=', 'self', '.', '_output_dict', '.', 'get', '(', 'name', ')', 'if', 'slot', 'is', 'None', ':', 'if', 'self', '.', '_strict', ':', 'raise', 'UnexpectedPipelineError', '(', '\'Inherited output named "%s" must be filled but \'', '\'not declared for pipeline class "%s"\'', '%', '(', 'name', ',', 'pipeline_name', ')', ')', 'else', ':', 'self', '.', '_output_dict', '[', 'name', ']', '=', 'Slot', '(', 'name', '=', 'name', ',', 'slot_key', '=', 'slot_key', ')', 'else', ':', 'slot', '.', 'key', '=', 'slot_key', 'slot', '.', '_exists', '=', 'True', 'if', 'resolve_outputs', ':', 'slot_key_dict', '=', 'dict', '(', '(', 's', '.', 'key', ',', 's', ')', 'for', 's', 'in', 'self', '.', '_output_dict', '.', 'itervalues', '(', ')', ')', 'all_slots', '=', 'db', '.', 'get', '(', 'slot_key_dict', '.', 'keys', '(', ')', ')', 'for', 'slot', ',', 'slot_record', 'in', 'zip', '(', 'slot_key_dict', '.', 'itervalues', '(', ')', ',', 'all_slots', ')', ':', 'if', 'slot_record', 'is', 'None', ':', 'raise', 'UnexpectedPipelineError', '(', '\'Inherited output named "%s" for pipeline class "%s" is \'', '\'missing its Slot in the datastore: "%s"\'', '%', '(', 'slot', '.', 'name', ',', 'pipeline_name', ',', 'slot', '.', 'key', ')', ')', 'slot', '=', 'slot_key_dict', '[', 'slot_record', '.', 'key', '(', ')', ']', 'slot', '.', '_set_value', '(', 'slot_record', ')'] | Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore. | ['Inherits', 'outputs', 'from', 'a', 'calling', 'Pipeline', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L314-L358 |
4,202 | softlayer/softlayer-python | SoftLayer/managers/vs.py | VSManager.create_instances | def create_instances(self, config_list):
"""Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi
"""
tags = [conf.pop('tags', None) for conf in config_list]
resp = self.guest.createObjects([self._generate_create_dict(**kwargs)
for kwargs in config_list])
for instance, tag in zip(resp, tags):
if tag is not None:
self.set_tags(tag, guest_id=instance['id'])
return resp | python | def create_instances(self, config_list):
"""Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi
"""
tags = [conf.pop('tags', None) for conf in config_list]
resp = self.guest.createObjects([self._generate_create_dict(**kwargs)
for kwargs in config_list])
for instance, tag in zip(resp, tags):
if tag is not None:
self.set_tags(tag, guest_id=instance['id'])
return resp | ['def', 'create_instances', '(', 'self', ',', 'config_list', ')', ':', 'tags', '=', '[', 'conf', '.', 'pop', '(', "'tags'", ',', 'None', ')', 'for', 'conf', 'in', 'config_list', ']', 'resp', '=', 'self', '.', 'guest', '.', 'createObjects', '(', '[', 'self', '.', '_generate_create_dict', '(', '*', '*', 'kwargs', ')', 'for', 'kwargs', 'in', 'config_list', ']', ')', 'for', 'instance', ',', 'tag', 'in', 'zip', '(', 'resp', ',', 'tags', ')', ':', 'if', 'tag', 'is', 'not', 'None', ':', 'self', '.', 'set_tags', '(', 'tag', ',', 'guest_id', '=', 'instance', '[', "'id'", ']', ')', 'return', 'resp'] | Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi | ['Creates', 'multiple', 'virtual', 'server', 'instances', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/vs.py#L594-L644 |
4,203 | materialsvirtuallab/monty | monty/fnmatch.py | WildCard.filter | def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames | python | def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames | ['def', 'filter', '(', 'self', ',', 'names', ')', ':', 'names', '=', 'list_strings', '(', 'names', ')', 'fnames', '=', '[', ']', 'for', 'f', 'in', 'names', ':', 'for', 'pat', 'in', 'self', '.', 'pats', ':', 'if', 'fnmatch', '.', 'fnmatch', '(', 'f', ',', 'pat', ')', ':', 'fnames', '.', 'append', '(', 'f', ')', 'return', 'fnames'] | Returns a list with the names matching the pattern. | ['Returns', 'a', 'list', 'with', 'the', 'names', 'matching', 'the', 'pattern', '.'] | train | https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/fnmatch.py#L41-L53 |
4,204 | saltstack/salt | salt/states/pip_state.py | _fulfills_version_spec | def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True | python | def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True | ['def', '_fulfills_version_spec', '(', 'version', ',', 'version_spec', ')', ':', 'for', 'oper', ',', 'spec', 'in', 'version_spec', ':', 'if', 'oper', 'is', 'None', ':', 'continue', 'if', 'not', 'salt', '.', 'utils', '.', 'versions', '.', 'compare', '(', 'ver1', '=', 'version', ',', 'oper', '=', 'oper', ',', 'ver2', '=', 'spec', ',', 'cmp_func', '=', '_pep440_version_cmp', ')', ':', 'return', 'False', 'return', 'True'] | Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version. | ['Check', 'version', 'number', 'against', 'version', 'specification', 'info', 'and', 'return', 'a', 'boolean', 'value', 'based', 'on', 'whether', 'or', 'not', 'the', 'version', 'number', 'meets', 'the', 'specified', 'version', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L92-L103 |
4,205 | taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.backendStatus | def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs) | python | def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs) | ['def', 'backendStatus', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_makeApiCall', '(', 'self', '.', 'funcinfo', '[', '"backendStatus"', ']', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental`` | ['Backend', 'Status'] | train | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L298-L315 |
4,206 | johnnoone/aioconsul | aioconsul/common/addr.py | parse_addr | def parse_addr(addr, *, proto=None, host=None):
"""Parses an address
Returns:
Address: the parsed address
"""
port = None
if isinstance(addr, Address):
return addr
elif isinstance(addr, str):
if addr.startswith('http://'):
proto, addr = 'http', addr[7:]
if addr.startswith('udp://'):
proto, addr = 'udp', addr[6:]
elif addr.startswith('tcp://'):
proto, addr = 'tcp', addr[6:]
elif addr.startswith('unix://'):
proto, addr = 'unix', addr[7:]
a, _, b = addr.partition(':')
host = a or host
port = b or port
elif isinstance(addr, (tuple, list)):
# list is not good
a, b = addr
host = a or host
port = b or port
elif isinstance(addr, int):
port = addr
else:
raise ValueError('bad value')
if port is not None:
port = int(port)
return Address(proto, host, port) | python | def parse_addr(addr, *, proto=None, host=None):
"""Parses an address
Returns:
Address: the parsed address
"""
port = None
if isinstance(addr, Address):
return addr
elif isinstance(addr, str):
if addr.startswith('http://'):
proto, addr = 'http', addr[7:]
if addr.startswith('udp://'):
proto, addr = 'udp', addr[6:]
elif addr.startswith('tcp://'):
proto, addr = 'tcp', addr[6:]
elif addr.startswith('unix://'):
proto, addr = 'unix', addr[7:]
a, _, b = addr.partition(':')
host = a or host
port = b or port
elif isinstance(addr, (tuple, list)):
# list is not good
a, b = addr
host = a or host
port = b or port
elif isinstance(addr, int):
port = addr
else:
raise ValueError('bad value')
if port is not None:
port = int(port)
return Address(proto, host, port) | ['def', 'parse_addr', '(', 'addr', ',', '*', ',', 'proto', '=', 'None', ',', 'host', '=', 'None', ')', ':', 'port', '=', 'None', 'if', 'isinstance', '(', 'addr', ',', 'Address', ')', ':', 'return', 'addr', 'elif', 'isinstance', '(', 'addr', ',', 'str', ')', ':', 'if', 'addr', '.', 'startswith', '(', "'http://'", ')', ':', 'proto', ',', 'addr', '=', "'http'", ',', 'addr', '[', '7', ':', ']', 'if', 'addr', '.', 'startswith', '(', "'udp://'", ')', ':', 'proto', ',', 'addr', '=', "'udp'", ',', 'addr', '[', '6', ':', ']', 'elif', 'addr', '.', 'startswith', '(', "'tcp://'", ')', ':', 'proto', ',', 'addr', '=', "'tcp'", ',', 'addr', '[', '6', ':', ']', 'elif', 'addr', '.', 'startswith', '(', "'unix://'", ')', ':', 'proto', ',', 'addr', '=', "'unix'", ',', 'addr', '[', '7', ':', ']', 'a', ',', '_', ',', 'b', '=', 'addr', '.', 'partition', '(', "':'", ')', 'host', '=', 'a', 'or', 'host', 'port', '=', 'b', 'or', 'port', 'elif', 'isinstance', '(', 'addr', ',', '(', 'tuple', ',', 'list', ')', ')', ':', '# list is not good', 'a', ',', 'b', '=', 'addr', 'host', '=', 'a', 'or', 'host', 'port', '=', 'b', 'or', 'port', 'elif', 'isinstance', '(', 'addr', ',', 'int', ')', ':', 'port', '=', 'addr', 'else', ':', 'raise', 'ValueError', '(', "'bad value'", ')', 'if', 'port', 'is', 'not', 'None', ':', 'port', '=', 'int', '(', 'port', ')', 'return', 'Address', '(', 'proto', ',', 'host', ',', 'port', ')'] | Parses an address
Returns:
Address: the parsed address | ['Parses', 'an', 'address'] | train | https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/common/addr.py#L12-L46 |
4,207 | JarryShaw/PyPCAPKit | src/protocols/transport/tcp.py | TCP._read_mptcp_dss | def _read_mptcp_dss(self, bits, size, kind):
"""Read Data Sequence Signal (Data ACK and Data Sequence Mapping) option.
Positional arguments:
* bits - str, 4-bit data
* size - int, length of option
* kind - int, 30 (Multipath TCP)
Returns:
* dict -- extracted Data Sequence Signal (DSS) option
Structure of DSS [RFC 6824]:
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------+---------------+-------+----------------------+
| Kind | Length |Subtype| (reserved) |F|m|M|a|A|
+---------------+---------------+-------+----------------------+
| Data ACK (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Data sequence number (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Checksum (2 octets) |
+-------------------------------+------------------------------+
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+--------------------------------------------------------------+
| |
| Data Sequence Number (8 octets) |
| |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Zeros (2 octets) |
+-------------------------------+------------------------------+
Octets Bits Name Description
0 0 tcp.mp.kind Kind (30)
1 8 tcp.mp.length Length
2 16 tcp.mp.subtype Subtype (2)
2 20 - Reserved (must be zero)
3 27 tcp.mp.dss.flags.fin DATA_FIN (F)
3 28 tcp.mp.dss.flags.dsn_len DSN Length (m)
3 29 tcp.mp.dss.flags.data_pre DSN, SSN, Data-Level Length, CHKSUM Present (M)
3 30 tcp.mp.dss.flags.ack_len ACK Length (a)
3 31 tcp.mp.dss.flags.ack_pre Data ACK Present (A)
4 32 tcp.mp.dss.ack Data ACK (4/8 octets)
8-12 64-96 tcp.mp.dss.dsn DSN (4/8 octets)
12-20 48-160 tcp.mp.dss.ssn Subflow Sequence Number
16-24 128-192 tcp.mp.dss.dl_len Data-Level Length
18-26 144-208 tcp.mp.dss.checksum Checksum
"""
bits = self._read_binary(1)
mflg = 8 if int(bits[4]) else 4
Mflg = True if int(bits[5]) else False
aflg = 8 if int(bits[6]) else 4
Aflg = True if int(bits[7]) else False
ack_ = self._read_fileng(aflg) if Aflg else None
dsn_ = self._read_unpack(mflg) if Mflg else None
ssn_ = self._read_unpack(4) if Mflg else None
dll_ = self._read_unpack(2) if Mflg else None
chk_ = self._read_fileng(2) if Mflg else None
data = dict(
kind=kind,
length=size + 1,
subtype='DSS',
dss=dict(
flags=dict(
fin=True if int(bits[3]) else False,
dsn_len=mflg,
data_pre=Mflg,
ack_len=aflg,
ack_pre=Aflg,
),
ack=ack_,
dsn=dsn_,
ssn=ssn_,
dl_len=dll_,
checksum=chk_,
),
)
return data | python | def _read_mptcp_dss(self, bits, size, kind):
"""Read Data Sequence Signal (Data ACK and Data Sequence Mapping) option.
Positional arguments:
* bits - str, 4-bit data
* size - int, length of option
* kind - int, 30 (Multipath TCP)
Returns:
* dict -- extracted Data Sequence Signal (DSS) option
Structure of DSS [RFC 6824]:
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------+---------------+-------+----------------------+
| Kind | Length |Subtype| (reserved) |F|m|M|a|A|
+---------------+---------------+-------+----------------------+
| Data ACK (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Data sequence number (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Checksum (2 octets) |
+-------------------------------+------------------------------+
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+--------------------------------------------------------------+
| |
| Data Sequence Number (8 octets) |
| |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Zeros (2 octets) |
+-------------------------------+------------------------------+
Octets Bits Name Description
0 0 tcp.mp.kind Kind (30)
1 8 tcp.mp.length Length
2 16 tcp.mp.subtype Subtype (2)
2 20 - Reserved (must be zero)
3 27 tcp.mp.dss.flags.fin DATA_FIN (F)
3 28 tcp.mp.dss.flags.dsn_len DSN Length (m)
3 29 tcp.mp.dss.flags.data_pre DSN, SSN, Data-Level Length, CHKSUM Present (M)
3 30 tcp.mp.dss.flags.ack_len ACK Length (a)
3 31 tcp.mp.dss.flags.ack_pre Data ACK Present (A)
4 32 tcp.mp.dss.ack Data ACK (4/8 octets)
8-12 64-96 tcp.mp.dss.dsn DSN (4/8 octets)
12-20 48-160 tcp.mp.dss.ssn Subflow Sequence Number
16-24 128-192 tcp.mp.dss.dl_len Data-Level Length
18-26 144-208 tcp.mp.dss.checksum Checksum
"""
bits = self._read_binary(1)
mflg = 8 if int(bits[4]) else 4
Mflg = True if int(bits[5]) else False
aflg = 8 if int(bits[6]) else 4
Aflg = True if int(bits[7]) else False
ack_ = self._read_fileng(aflg) if Aflg else None
dsn_ = self._read_unpack(mflg) if Mflg else None
ssn_ = self._read_unpack(4) if Mflg else None
dll_ = self._read_unpack(2) if Mflg else None
chk_ = self._read_fileng(2) if Mflg else None
data = dict(
kind=kind,
length=size + 1,
subtype='DSS',
dss=dict(
flags=dict(
fin=True if int(bits[3]) else False,
dsn_len=mflg,
data_pre=Mflg,
ack_len=aflg,
ack_pre=Aflg,
),
ack=ack_,
dsn=dsn_,
ssn=ssn_,
dl_len=dll_,
checksum=chk_,
),
)
return data | ['def', '_read_mptcp_dss', '(', 'self', ',', 'bits', ',', 'size', ',', 'kind', ')', ':', 'bits', '=', 'self', '.', '_read_binary', '(', '1', ')', 'mflg', '=', '8', 'if', 'int', '(', 'bits', '[', '4', ']', ')', 'else', '4', 'Mflg', '=', 'True', 'if', 'int', '(', 'bits', '[', '5', ']', ')', 'else', 'False', 'aflg', '=', '8', 'if', 'int', '(', 'bits', '[', '6', ']', ')', 'else', '4', 'Aflg', '=', 'True', 'if', 'int', '(', 'bits', '[', '7', ']', ')', 'else', 'False', 'ack_', '=', 'self', '.', '_read_fileng', '(', 'aflg', ')', 'if', 'Aflg', 'else', 'None', 'dsn_', '=', 'self', '.', '_read_unpack', '(', 'mflg', ')', 'if', 'Mflg', 'else', 'None', 'ssn_', '=', 'self', '.', '_read_unpack', '(', '4', ')', 'if', 'Mflg', 'else', 'None', 'dll_', '=', 'self', '.', '_read_unpack', '(', '2', ')', 'if', 'Mflg', 'else', 'None', 'chk_', '=', 'self', '.', '_read_fileng', '(', '2', ')', 'if', 'Mflg', 'else', 'None', 'data', '=', 'dict', '(', 'kind', '=', 'kind', ',', 'length', '=', 'size', '+', '1', ',', 'subtype', '=', "'DSS'", ',', 'dss', '=', 'dict', '(', 'flags', '=', 'dict', '(', 'fin', '=', 'True', 'if', 'int', '(', 'bits', '[', '3', ']', ')', 'else', 'False', ',', 'dsn_len', '=', 'mflg', ',', 'data_pre', '=', 'Mflg', ',', 'ack_len', '=', 'aflg', ',', 'ack_pre', '=', 'Aflg', ',', ')', ',', 'ack', '=', 'ack_', ',', 'dsn', '=', 'dsn_', ',', 'ssn', '=', 'ssn_', ',', 'dl_len', '=', 'dll_', ',', 'checksum', '=', 'chk_', ',', ')', ',', ')', 'return', 'data'] | Read Data Sequence Signal (Data ACK and Data Sequence Mapping) option.
Positional arguments:
* bits - str, 4-bit data
* size - int, length of option
* kind - int, 30 (Multipath TCP)
Returns:
* dict -- extracted Data Sequence Signal (DSS) option
Structure of DSS [RFC 6824]:
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------+---------------+-------+----------------------+
| Kind | Length |Subtype| (reserved) |F|m|M|a|A|
+---------------+---------------+-------+----------------------+
| Data ACK (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Data sequence number (4 or 8 octets, depending on flags) |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Checksum (2 octets) |
+-------------------------------+------------------------------+
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+--------------------------------------------------------------+
| |
| Data Sequence Number (8 octets) |
| |
+--------------------------------------------------------------+
| Subflow Sequence Number (4 octets) |
+-------------------------------+------------------------------+
| Data-Level Length (2 octets) | Zeros (2 octets) |
+-------------------------------+------------------------------+
Octets Bits Name Description
0 0 tcp.mp.kind Kind (30)
1 8 tcp.mp.length Length
2 16 tcp.mp.subtype Subtype (2)
2 20 - Reserved (must be zero)
3 27 tcp.mp.dss.flags.fin DATA_FIN (F)
3 28 tcp.mp.dss.flags.dsn_len DSN Length (m)
3 29 tcp.mp.dss.flags.data_pre DSN, SSN, Data-Level Length, CHKSUM Present (M)
3 30 tcp.mp.dss.flags.ack_len ACK Length (a)
3 31 tcp.mp.dss.flags.ack_pre Data ACK Present (A)
4 32 tcp.mp.dss.ack Data ACK (4/8 octets)
8-12 64-96 tcp.mp.dss.dsn DSN (4/8 octets)
12-20 48-160 tcp.mp.dss.ssn Subflow Sequence Number
16-24 128-192 tcp.mp.dss.dl_len Data-Level Length
18-26 144-208 tcp.mp.dss.checksum Checksum | ['Read', 'Data', 'Sequence', 'Signal', '(', 'Data', 'ACK', 'and', 'Data', 'Sequence', 'Mapping', ')', 'option', '.'] | train | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/transport/tcp.py#L986-L1072 |
4,208 | openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | DfaServer.set_static_ip_address | def set_static_ip_address(self, payload):
"""Set static ip address for a VM."""
# This request is received from CLI for setting ip address of an
# instance.
macaddr = payload.get('mac')
ipaddr = payload.get('ip')
# Find the entry associated with the mac in the database.
req = dict(mac=macaddr)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info('Updating IP address: %(ip)s %(mac)s.',
{'ip': ipaddr, 'mac': macaddr})
# Send request to update the rule.
try:
rule_info = dict(ip=ipaddr, mac=macaddr,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update rules.")
else:
# Update the database.
params = dict(columns=dict(ip=ipaddr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ipaddr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | python | def set_static_ip_address(self, payload):
"""Set static ip address for a VM."""
# This request is received from CLI for setting ip address of an
# instance.
macaddr = payload.get('mac')
ipaddr = payload.get('ip')
# Find the entry associated with the mac in the database.
req = dict(mac=macaddr)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info('Updating IP address: %(ip)s %(mac)s.',
{'ip': ipaddr, 'mac': macaddr})
# Send request to update the rule.
try:
rule_info = dict(ip=ipaddr, mac=macaddr,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update rules.")
else:
# Update the database.
params = dict(columns=dict(ip=ipaddr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ipaddr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | ['def', 'set_static_ip_address', '(', 'self', ',', 'payload', ')', ':', '# This request is received from CLI for setting ip address of an', '# instance.', 'macaddr', '=', 'payload', '.', 'get', '(', "'mac'", ')', 'ipaddr', '=', 'payload', '.', 'get', '(', "'ip'", ')', '# Find the entry associated with the mac in the database.', 'req', '=', 'dict', '(', 'mac', '=', 'macaddr', ')', 'instances', '=', 'self', '.', 'get_vms_for_this_req', '(', '*', '*', 'req', ')', 'for', 'vm', 'in', 'instances', ':', 'LOG', '.', 'info', '(', "'Updating IP address: %(ip)s %(mac)s.'", ',', '{', "'ip'", ':', 'ipaddr', ',', "'mac'", ':', 'macaddr', '}', ')', '# Send request to update the rule.', 'try', ':', 'rule_info', '=', 'dict', '(', 'ip', '=', 'ipaddr', ',', 'mac', '=', 'macaddr', ',', 'port', '=', 'vm', '.', 'port_id', ',', 'status', '=', "'up'", ')', 'self', '.', 'neutron_event', '.', 'update_ip_rule', '(', 'str', '(', 'vm', '.', 'host', ')', ',', 'str', '(', 'rule_info', ')', ')', 'except', '(', 'rpc', '.', 'MessagingTimeout', ',', 'rpc', '.', 'RPCException', ',', 'rpc', '.', 'RemoteError', ')', ':', 'LOG', '.', 'error', '(', '"RPC error: Failed to update rules."', ')', 'else', ':', '# Update the database.', 'params', '=', 'dict', '(', 'columns', '=', 'dict', '(', 'ip', '=', 'ipaddr', ')', ')', 'self', '.', 'update_vm_db', '(', 'vm', '.', 'port_id', ',', '*', '*', 'params', ')', '# Send update to the agent.', 'vm_info', '=', 'dict', '(', 'status', '=', 'vm', '.', 'status', ',', 'vm_mac', '=', 'vm', '.', 'mac', ',', 'segmentation_id', '=', 'vm', '.', 'segmentation_id', ',', 'host', '=', 'vm', '.', 'host', ',', 'port_uuid', '=', 'vm', '.', 'port_id', ',', 'net_uuid', '=', 'vm', '.', 'network_id', ',', 'oui', '=', 'dict', '(', 'ip_addr', '=', 'ipaddr', ',', 'vm_name', '=', 'vm', '.', 'name', ',', 'vm_uuid', '=', 'vm', '.', 'instance_id', ',', 'gw_mac', '=', 'vm', '.', 'gw_mac', ',', 'fwd_mod', '=', 'vm', '.', 'fwd_mod', ',', 'oui_id', '=', "'cisco'", ')', ')', 'try', ':', 'self', '.', 'neutron_event', '.', 'send_vm_info', '(', 'vm', '.', 'host', ',', 'str', '(', 'vm_info', ')', ')', 'except', '(', 'rpc', '.', 'MessagingTimeout', ',', 'rpc', '.', 'RPCException', ',', 'rpc', '.', 'RemoteError', ')', ':', 'LOG', '.', 'error', '(', "'Failed to send VM info to agent.'", ')'] | Set static ip address for a VM. | ['Set', 'static', 'ip', 'address', 'for', 'a', 'VM', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1510-L1555 |
4,209 | manns/pyspread | pyspread/src/lib/vlc.py | libvlc_set_user_agent | def libvlc_set_user_agent(p_instance, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_set_user_agent', None) or \
_Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, name, http) | python | def libvlc_set_user_agent(p_instance, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_set_user_agent', None) or \
_Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, name, http) | ['def', 'libvlc_set_user_agent', '(', 'p_instance', ',', 'name', ',', 'http', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_set_user_agent'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_set_user_agent'", ',', '(', '(', '1', ',', ')', ',', '(', '1', ',', ')', ',', '(', '1', ',', ')', ',', ')', ',', 'None', ',', 'None', ',', 'Instance', ',', 'ctypes', '.', 'c_char_p', ',', 'ctypes', '.', 'c_char_p', ')', 'return', 'f', '(', 'p_instance', ',', 'name', ',', 'http', ')'] | Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later. | ['Sets', 'the', 'application', 'name', '.', 'LibVLC', 'passes', 'this', 'as', 'the', 'user', 'agent', 'string', 'when', 'a', 'protocol', 'requires', 'it', '.'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L3838-L3849 |
4,210 | tensorflow/tensor2tensor | tensor2tensor/layers/modalities.py | ctc_symbol_loss | def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights) | python | def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights) | ['def', 'ctc_symbol_loss', '(', 'top_out', ',', 'targets', ',', 'model_hparams', ',', 'vocab_size', ',', 'weight_fn', ')', ':', 'del', 'model_hparams', ',', 'vocab_size', '# unused arg', 'logits', '=', 'top_out', 'with', 'tf', '.', 'name_scope', '(', '"ctc_loss"', ',', 'values', '=', '[', 'logits', ',', 'targets', ']', ')', ':', '# For CTC we assume targets are 1d, [batch, length, 1, 1] here.', 'targets_shape', '=', 'targets', '.', 'get_shape', '(', ')', '.', 'as_list', '(', ')', 'assert', 'len', '(', 'targets_shape', ')', '==', '4', 'assert', 'targets_shape', '[', '2', ']', '==', '1', 'assert', 'targets_shape', '[', '3', ']', '==', '1', 'targets', '=', 'tf', '.', 'squeeze', '(', 'targets', ',', 'axis', '=', '[', '2', ',', '3', ']', ')', 'logits', '=', 'tf', '.', 'squeeze', '(', 'logits', ',', 'axis', '=', '[', '2', ',', '3', ']', ')', 'targets_mask', '=', '1', '-', 'tf', '.', 'to_int32', '(', 'tf', '.', 'equal', '(', 'targets', ',', '0', ')', ')', 'targets_lengths', '=', 'tf', '.', 'reduce_sum', '(', 'targets_mask', ',', 'axis', '=', '1', ')', 'sparse_targets', '=', 'tf', '.', 'keras', '.', 'backend', '.', 'ctc_label_dense_to_sparse', '(', 'targets', ',', 'targets_lengths', ')', 'xent', '=', 'tf', '.', 'nn', '.', 'ctc_loss', '(', 'sparse_targets', ',', 'logits', ',', 'targets_lengths', ',', 'time_major', '=', 'False', ',', 'preprocess_collapse_repeated', '=', 'False', ',', 'ctc_merge_repeated', '=', 'False', ')', 'weights', '=', 'weight_fn', '(', 'targets', ')', 'return', 'tf', '.', 'reduce_sum', '(', 'xent', ')', ',', 'tf', '.', 'reduce_sum', '(', 'weights', ')'] | Compute the CTC loss. | ['Compute', 'the', 'CTC', 'loss', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L638-L662 |
4,211 | saltstack/salt | salt/states/boto_s3_bucket.py | _compare_replication | def _compare_replication(current, desired, region, key, keyid, profile):
'''
Replication accepts a non-ARN role name, but always returns an ARN
'''
if desired is not None and desired.get('Role'):
desired = copy.deepcopy(desired)
desired['Role'] = _get_role_arn(desired['Role'],
region=region, key=key, keyid=keyid, profile=profile)
return __utils__['boto3.json_objs_equal'](current, desired) | python | def _compare_replication(current, desired, region, key, keyid, profile):
'''
Replication accepts a non-ARN role name, but always returns an ARN
'''
if desired is not None and desired.get('Role'):
desired = copy.deepcopy(desired)
desired['Role'] = _get_role_arn(desired['Role'],
region=region, key=key, keyid=keyid, profile=profile)
return __utils__['boto3.json_objs_equal'](current, desired) | ['def', '_compare_replication', '(', 'current', ',', 'desired', ',', 'region', ',', 'key', ',', 'keyid', ',', 'profile', ')', ':', 'if', 'desired', 'is', 'not', 'None', 'and', 'desired', '.', 'get', '(', "'Role'", ')', ':', 'desired', '=', 'copy', '.', 'deepcopy', '(', 'desired', ')', 'desired', '[', "'Role'", ']', '=', '_get_role_arn', '(', 'desired', '[', "'Role'", ']', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'return', '__utils__', '[', "'boto3.json_objs_equal'", ']', '(', 'current', ',', 'desired', ')'] | Replication accepts a non-ARN role name, but always returns an ARN | ['Replication', 'accepts', 'a', 'non', '-', 'ARN', 'role', 'name', 'but', 'always', 'returns', 'an', 'ARN'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_s3_bucket.py#L326-L334 |
4,212 | tcalmant/ipopo | samples/run_remote.py | InstallUtils.transport_jsonrpc | def transport_jsonrpc(self):
"""
Installs the JSON-RPC transport bundles and instantiates components
"""
# Install the bundle
self.context.install_bundle("pelix.remote.json_rpc").start()
with use_waiting_list(self.context) as ipopo:
# Instantiate the discovery
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_EXPORTER, "pelix-jsonrpc-exporter"
)
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_IMPORTER, "pelix-jsonrpc-importer"
) | python | def transport_jsonrpc(self):
"""
Installs the JSON-RPC transport bundles and instantiates components
"""
# Install the bundle
self.context.install_bundle("pelix.remote.json_rpc").start()
with use_waiting_list(self.context) as ipopo:
# Instantiate the discovery
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_EXPORTER, "pelix-jsonrpc-exporter"
)
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_IMPORTER, "pelix-jsonrpc-importer"
) | ['def', 'transport_jsonrpc', '(', 'self', ')', ':', '# Install the bundle', 'self', '.', 'context', '.', 'install_bundle', '(', '"pelix.remote.json_rpc"', ')', '.', 'start', '(', ')', 'with', 'use_waiting_list', '(', 'self', '.', 'context', ')', 'as', 'ipopo', ':', '# Instantiate the discovery', 'ipopo', '.', 'add', '(', 'rs', '.', 'FACTORY_TRANSPORT_JSONRPC_EXPORTER', ',', '"pelix-jsonrpc-exporter"', ')', 'ipopo', '.', 'add', '(', 'rs', '.', 'FACTORY_TRANSPORT_JSONRPC_IMPORTER', ',', '"pelix-jsonrpc-importer"', ')'] | Installs the JSON-RPC transport bundles and instantiates components | ['Installs', 'the', 'JSON', '-', 'RPC', 'transport', 'bundles', 'and', 'instantiates', 'components'] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/samples/run_remote.py#L172-L186 |
4,213 | thefactory/marathon-python | marathon/client.py | MarathonClient.list_tasks | def list_tasks(self, app_id=None, **kwargs):
"""List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
"""
response = self._do_request(
'GET', '/v2/apps/%s/tasks' % app_id if app_id else '/v2/tasks')
tasks = self._parse_response(
response, MarathonTask, is_list=True, resource_name='tasks')
[setattr(t, 'app_id', app_id)
for t in tasks if app_id and t.app_id is None]
for k, v in kwargs.items():
tasks = [o for o in tasks if getattr(o, k) == v]
return tasks | python | def list_tasks(self, app_id=None, **kwargs):
"""List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
"""
response = self._do_request(
'GET', '/v2/apps/%s/tasks' % app_id if app_id else '/v2/tasks')
tasks = self._parse_response(
response, MarathonTask, is_list=True, resource_name='tasks')
[setattr(t, 'app_id', app_id)
for t in tasks if app_id and t.app_id is None]
for k, v in kwargs.items():
tasks = [o for o in tasks if getattr(o, k) == v]
return tasks | ['def', 'list_tasks', '(', 'self', ',', 'app_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'response', '=', 'self', '.', '_do_request', '(', "'GET'", ',', "'/v2/apps/%s/tasks'", '%', 'app_id', 'if', 'app_id', 'else', "'/v2/tasks'", ')', 'tasks', '=', 'self', '.', '_parse_response', '(', 'response', ',', 'MarathonTask', ',', 'is_list', '=', 'True', ',', 'resource_name', '=', "'tasks'", ')', '[', 'setattr', '(', 't', ',', "'app_id'", ',', 'app_id', ')', 'for', 't', 'in', 'tasks', 'if', 'app_id', 'and', 't', '.', 'app_id', 'is', 'None', ']', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'tasks', '=', '[', 'o', 'for', 'o', 'in', 'tasks', 'if', 'getattr', '(', 'o', ',', 'k', ')', '==', 'v', ']', 'return', 'tasks'] | List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`] | ['List', 'running', 'tasks', 'optionally', 'filtered', 'by', 'app_id', '.'] | train | https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L498-L516 |
4,214 | nickmilon/Hellas | Hellas/Athens.py | dms2dd | def dms2dd(degrees, minutes, seconds, direction):
"""convert degrees, minutes, seconds to dd
:param string direction: one of N S W E
"""
dd = (degrees + minutes/60.0) + (seconds/3600.0) # 60.0 fraction for python 2+ compatibility
return dd * -1 if direction == 'S' or direction == 'W' else dd | python | def dms2dd(degrees, minutes, seconds, direction):
"""convert degrees, minutes, seconds to dd
:param string direction: one of N S W E
"""
dd = (degrees + minutes/60.0) + (seconds/3600.0) # 60.0 fraction for python 2+ compatibility
return dd * -1 if direction == 'S' or direction == 'W' else dd | ['def', 'dms2dd', '(', 'degrees', ',', 'minutes', ',', 'seconds', ',', 'direction', ')', ':', 'dd', '=', '(', 'degrees', '+', 'minutes', '/', '60.0', ')', '+', '(', 'seconds', '/', '3600.0', ')', '# 60.0 fraction for python 2+ compatibility', 'return', 'dd', '*', '-', '1', 'if', 'direction', '==', "'S'", 'or', 'direction', '==', "'W'", 'else', 'dd'] | convert degrees, minutes, seconds to dd
:param string direction: one of N S W E | ['convert', 'degrees', 'minutes', 'seconds', 'to', 'dd', ':', 'param', 'string', 'direction', ':', 'one', 'of', 'N', 'S', 'W', 'E'] | train | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Athens.py#L83-L88 |
4,215 | aganezov/bg | bg/breakpoint_graph.py | BreakpointGraph.split_bgedge | def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True,
key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) | python | def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True,
key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) | ['def', 'split_bgedge', '(', 'self', ',', 'bgedge', ',', 'guidance', '=', 'None', ',', 'sorted_guidance', '=', 'False', ',', 'account_for_colors_multiplicity_in_guidance', '=', 'True', ',', 'key', '=', 'None', ')', ':', 'self', '.', '__split_bgedge', '(', 'bgedge', '=', 'bgedge', ',', 'guidance', '=', 'guidance', ',', 'sorted_guidance', '=', 'sorted_guidance', ',', 'account_for_colors_multiplicity_in_guidance', '=', 'account_for_colors_multiplicity_in_guidance', ',', 'key', '=', 'key', ')'] | Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes | ['Splits', 'a', ':', 'class', ':', 'bg', '.', 'edge', '.', 'BGEdge', 'in', 'current', ':', 'class', ':', 'BreakpointGraph', 'most', 'similar', 'to', 'supplied', 'one', '(', 'if', 'no', 'unique', 'identifier', 'key', 'is', 'provided', ')', 'with', 'respect', 'to', 'supplied', 'guidance', '.'] | train | https://github.com/aganezov/bg/blob/1ec758193441e49e7b34e0da09571480f4c24455/bg/breakpoint_graph.py#L528-L547 |
4,216 | Erotemic/utool | utool/_internal/py2_syntax_funcs.py | ignores_exc_tb | def ignores_exc_tb(*args, **kwargs):
"""
PYTHON 2 ONLY VERSION -- needs to be in its own file for syntactic reasons
ignore_exc_tb decorates a function and remove both itself
and the function from any exception traceback that occurs.
This is useful to decorate other trivial decorators
which are polluting your stacktrace.
if IGNORE_TRACEBACK is False then this decorator does nothing
(and it should do nothing in production code!)
References:
https://github.com/jcrocholl/pep8/issues/34 # NOQA
http://legacy.python.org/dev/peps/pep-3109/
"""
outer_wrapper = kwargs.get('outer_wrapper', True)
def ignores_exc_tb_closure(func):
if not IGNORE_TRACEBACK:
# if the global enforces that we should not ignore anytracebacks
# then just return the original function without any modifcation
return func
from utool import util_decor
#@wraps(func)
def wrp_noexectb(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
# Define function to reraise with python 2 syntax
#exc_type, exc_value, exc_traceback = sys.exc_info()
# Code to remove this decorator from traceback
# Remove two levels to remove this one as well
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
exc_traceback = exc_traceback.tb_next
exc_traceback = exc_traceback.tb_next
#exc_traceback = exc_traceback.tb_next
except Exception:
print('too many reraise')
pass
raise exc_type, exc_value, exc_traceback
if outer_wrapper:
wrp_noexectb = util_decor.preserve_sig(wrp_noexectb, func)
return wrp_noexectb
if len(args) == 1:
# called with one arg means its a function call
func = args[0]
return ignores_exc_tb_closure(func)
else:
# called with no args means kwargs as specified
return ignores_exc_tb_closure | python | def ignores_exc_tb(*args, **kwargs):
"""
PYTHON 2 ONLY VERSION -- needs to be in its own file for syntactic reasons
ignore_exc_tb decorates a function and remove both itself
and the function from any exception traceback that occurs.
This is useful to decorate other trivial decorators
which are polluting your stacktrace.
if IGNORE_TRACEBACK is False then this decorator does nothing
(and it should do nothing in production code!)
References:
https://github.com/jcrocholl/pep8/issues/34 # NOQA
http://legacy.python.org/dev/peps/pep-3109/
"""
outer_wrapper = kwargs.get('outer_wrapper', True)
def ignores_exc_tb_closure(func):
if not IGNORE_TRACEBACK:
# if the global enforces that we should not ignore anytracebacks
# then just return the original function without any modifcation
return func
from utool import util_decor
#@wraps(func)
def wrp_noexectb(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
# Define function to reraise with python 2 syntax
#exc_type, exc_value, exc_traceback = sys.exc_info()
# Code to remove this decorator from traceback
# Remove two levels to remove this one as well
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
exc_traceback = exc_traceback.tb_next
exc_traceback = exc_traceback.tb_next
#exc_traceback = exc_traceback.tb_next
except Exception:
print('too many reraise')
pass
raise exc_type, exc_value, exc_traceback
if outer_wrapper:
wrp_noexectb = util_decor.preserve_sig(wrp_noexectb, func)
return wrp_noexectb
if len(args) == 1:
# called with one arg means its a function call
func = args[0]
return ignores_exc_tb_closure(func)
else:
# called with no args means kwargs as specified
return ignores_exc_tb_closure | ['def', 'ignores_exc_tb', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'outer_wrapper', '=', 'kwargs', '.', 'get', '(', "'outer_wrapper'", ',', 'True', ')', 'def', 'ignores_exc_tb_closure', '(', 'func', ')', ':', 'if', 'not', 'IGNORE_TRACEBACK', ':', '# if the global enforces that we should not ignore anytracebacks', '# then just return the original function without any modifcation', 'return', 'func', 'from', 'utool', 'import', 'util_decor', '#@wraps(func)', 'def', 'wrp_noexectb', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', ':', '# Define function to reraise with python 2 syntax', '#exc_type, exc_value, exc_traceback = sys.exc_info()', '# Code to remove this decorator from traceback', '# Remove two levels to remove this one as well', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', '=', 'sys', '.', 'exc_info', '(', ')', 'try', ':', 'exc_traceback', '=', 'exc_traceback', '.', 'tb_next', 'exc_traceback', '=', 'exc_traceback', '.', 'tb_next', '#exc_traceback = exc_traceback.tb_next', 'except', 'Exception', ':', 'print', '(', "'too many reraise'", ')', 'pass', 'raise', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', 'if', 'outer_wrapper', ':', 'wrp_noexectb', '=', 'util_decor', '.', 'preserve_sig', '(', 'wrp_noexectb', ',', 'func', ')', 'return', 'wrp_noexectb', 'if', 'len', '(', 'args', ')', '==', '1', ':', '# called with one arg means its a function call', 'func', '=', 'args', '[', '0', ']', 'return', 'ignores_exc_tb_closure', '(', 'func', ')', 'else', ':', '# called with no args means kwargs as specified', 'return', 'ignores_exc_tb_closure'] | PYTHON 2 ONLY VERSION -- needs to be in its own file for syntactic reasons
ignore_exc_tb decorates a function and remove both itself
and the function from any exception traceback that occurs.
This is useful to decorate other trivial decorators
which are polluting your stacktrace.
if IGNORE_TRACEBACK is False then this decorator does nothing
(and it should do nothing in production code!)
References:
https://github.com/jcrocholl/pep8/issues/34 # NOQA
http://legacy.python.org/dev/peps/pep-3109/ | ['PYTHON', '2', 'ONLY', 'VERSION', '--', 'needs', 'to', 'be', 'in', 'its', 'own', 'file', 'for', 'syntactic', 'reasons'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/py2_syntax_funcs.py#L10-L61 |
4,217 | blockstack/blockstack-core | blockstack/lib/config.py | get_epoch_namespace_prices | def get_epoch_namespace_prices( block_height, units ):
"""
get the list of namespace prices by block height
"""
assert units in ['BTC', TOKEN_TYPE_STACKS], 'Invalid unit {}'.format(units)
epoch_config = get_epoch_config( block_height )
if units == 'BTC':
return epoch_config['namespace_prices']
else:
return epoch_config['namespace_prices_stacks'] | python | def get_epoch_namespace_prices( block_height, units ):
"""
get the list of namespace prices by block height
"""
assert units in ['BTC', TOKEN_TYPE_STACKS], 'Invalid unit {}'.format(units)
epoch_config = get_epoch_config( block_height )
if units == 'BTC':
return epoch_config['namespace_prices']
else:
return epoch_config['namespace_prices_stacks'] | ['def', 'get_epoch_namespace_prices', '(', 'block_height', ',', 'units', ')', ':', 'assert', 'units', 'in', '[', "'BTC'", ',', 'TOKEN_TYPE_STACKS', ']', ',', "'Invalid unit {}'", '.', 'format', '(', 'units', ')', 'epoch_config', '=', 'get_epoch_config', '(', 'block_height', ')', 'if', 'units', '==', "'BTC'", ':', 'return', 'epoch_config', '[', "'namespace_prices'", ']', 'else', ':', 'return', 'epoch_config', '[', "'namespace_prices_stacks'", ']'] | get the list of namespace prices by block height | ['get', 'the', 'list', 'of', 'namespace', 'prices', 'by', 'block', 'height'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/config.py#L1062-L1073 |
4,218 | rmst/chi | chi/rl/async_dqn.py | delling_network | def delling_network():
""" Architecture according to Duelling DQN:
https://arxiv.org/abs/1511.06581
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
xv = layers.fully_connected(x, 512)
val = layers.fully_connected(xv, 1, activation_fn=None)
# val = tf.squeeze(val, 1)
xa = layers.fully_connected(x, 512)
adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)
q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
q = tf.identity(q, name='Q')
return q | python | def delling_network():
""" Architecture according to Duelling DQN:
https://arxiv.org/abs/1511.06581
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
xv = layers.fully_connected(x, 512)
val = layers.fully_connected(xv, 1, activation_fn=None)
# val = tf.squeeze(val, 1)
xa = layers.fully_connected(x, 512)
adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)
q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
q = tf.identity(q, name='Q')
return q | ['def', 'delling_network', '(', ')', ':', '@', 'tt', '.', 'model', '(', 'tracker', '=', 'tf', '.', 'train', '.', 'ExponentialMovingAverage', '(', '1', '-', '.0005', ')', ',', '# TODO: replace with original weight freeze', 'optimizer', '=', 'tf', '.', 'train', '.', 'RMSPropOptimizer', '(', '6.25e-5', ',', '.95', ',', '.95', ',', '.01', ')', ')', 'def', 'q_network', '(', 'x', ')', ':', 'x', '/=', '255', 'x', '=', 'layers', '.', 'conv2d', '(', 'x', ',', '32', ',', '8', ',', '4', ')', 'x', '=', 'layers', '.', 'conv2d', '(', 'x', ',', '64', ',', '4', ',', '2', ')', 'x', '=', 'layers', '.', 'conv2d', '(', 'x', ',', '64', ',', '3', ',', '1', ')', 'x', '=', 'layers', '.', 'flatten', '(', 'x', ')', 'xv', '=', 'layers', '.', 'fully_connected', '(', 'x', ',', '512', ')', 'val', '=', 'layers', '.', 'fully_connected', '(', 'xv', ',', '1', ',', 'activation_fn', '=', 'None', ')', '# val = tf.squeeze(val, 1)', 'xa', '=', 'layers', '.', 'fully_connected', '(', 'x', ',', '512', ')', 'adv', '=', 'layers', '.', 'fully_connected', '(', 'xa', ',', 'env', '.', 'action_space', '.', 'n', ',', 'activation_fn', '=', 'None', ')', 'q', '=', 'val', '+', 'adv', '-', 'tf', '.', 'reduce_mean', '(', 'adv', ',', 'axis', '=', '1', ',', 'keep_dims', '=', 'True', ')', 'q', '=', 'tf', '.', 'identity', '(', 'q', ',', 'name', '=', "'Q'", ')', 'return', 'q'] | Architecture according to Duelling DQN:
https://arxiv.org/abs/1511.06581 | ['Architecture', 'according', 'to', 'Duelling', 'DQN', ':', 'https', ':', '//', 'arxiv', '.', 'org', '/', 'abs', '/', '1511', '.', '06581'] | train | https://github.com/rmst/chi/blob/b9205127f3736eb6ebbf6bb2960c4bbb747142b7/chi/rl/async_dqn.py#L218-L241 |
4,219 | yyuu/botornado | boto/ec2/connection.py | EC2Connection.get_all_bundle_tasks | def get_all_bundle_tasks(self, bundle_ids=None, filters=None):
"""
Retrieve current bundling tasks. If no bundle id is specified, all
tasks are retrieved.
:type bundle_ids: list
:param bundle_ids: A list of strings containing identifiers for
previously created bundling tasks.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
"""
params = {}
if bundle_ids:
self.build_list_params(params, bundle_ids, 'BundleId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeBundleTasks', params,
[('item', BundleInstanceTask)], verb='POST') | python | def get_all_bundle_tasks(self, bundle_ids=None, filters=None):
"""
Retrieve current bundling tasks. If no bundle id is specified, all
tasks are retrieved.
:type bundle_ids: list
:param bundle_ids: A list of strings containing identifiers for
previously created bundling tasks.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
"""
params = {}
if bundle_ids:
self.build_list_params(params, bundle_ids, 'BundleId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeBundleTasks', params,
[('item', BundleInstanceTask)], verb='POST') | ['def', 'get_all_bundle_tasks', '(', 'self', ',', 'bundle_ids', '=', 'None', ',', 'filters', '=', 'None', ')', ':', 'params', '=', '{', '}', 'if', 'bundle_ids', ':', 'self', '.', 'build_list_params', '(', 'params', ',', 'bundle_ids', ',', "'BundleId'", ')', 'if', 'filters', ':', 'self', '.', 'build_filter_params', '(', 'params', ',', 'filters', ')', 'return', 'self', '.', 'get_list', '(', "'DescribeBundleTasks'", ',', 'params', ',', '[', '(', "'item'", ',', 'BundleInstanceTask', ')', ']', ',', 'verb', '=', "'POST'", ')'] | Retrieve current bundling tasks. If no bundle id is specified, all
tasks are retrieved.
:type bundle_ids: list
:param bundle_ids: A list of strings containing identifiers for
previously created bundling tasks.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details. | ['Retrieve', 'current', 'bundling', 'tasks', '.', 'If', 'no', 'bundle', 'id', 'is', 'specified', 'all', 'tasks', 'are', 'retrieved', '.'] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L2533-L2560 |
4,220 | QuantEcon/QuantEcon.py | quantecon/game_theory/vertex_enumeration.py | _get_mixed_actions | def _get_mixed_actions(labeling_bits, equation_tup, trans_recips):
"""
From a labeling for player 0, a tuple of hyperplane equations of the
polar polytopes, and a tuple of the reciprocals of the translations,
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
labeling_bits : scalar(np.uint64)
Integer with set bits representing a labeling of a mixed action
of player 0.
equation_tup : tuple(ndarray(float, ndim=1))
Tuple of hyperplane equations of the polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions.
"""
m, n = equation_tup[0].shape[0] - 1, equation_tup[1].shape[0] - 1
out = np.empty(m+n)
for pl, (start, stop, skip) in enumerate([(0, m, np.uint64(1)),
(m, m+n, np.uint64(0))]):
sum_ = 0.
for i in range(start, stop):
if (labeling_bits & np.uint64(1)) == skip:
out[i] = 0
else:
out[i] = equation_tup[pl][i-start] * trans_recips[pl] - \
equation_tup[pl][-1]
sum_ += out[i]
labeling_bits = labeling_bits >> np.uint64(1)
if sum_ != 0:
out[start:stop] /= sum_
return out[:m], out[m:] | python | def _get_mixed_actions(labeling_bits, equation_tup, trans_recips):
"""
From a labeling for player 0, a tuple of hyperplane equations of the
polar polytopes, and a tuple of the reciprocals of the translations,
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
labeling_bits : scalar(np.uint64)
Integer with set bits representing a labeling of a mixed action
of player 0.
equation_tup : tuple(ndarray(float, ndim=1))
Tuple of hyperplane equations of the polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions.
"""
m, n = equation_tup[0].shape[0] - 1, equation_tup[1].shape[0] - 1
out = np.empty(m+n)
for pl, (start, stop, skip) in enumerate([(0, m, np.uint64(1)),
(m, m+n, np.uint64(0))]):
sum_ = 0.
for i in range(start, stop):
if (labeling_bits & np.uint64(1)) == skip:
out[i] = 0
else:
out[i] = equation_tup[pl][i-start] * trans_recips[pl] - \
equation_tup[pl][-1]
sum_ += out[i]
labeling_bits = labeling_bits >> np.uint64(1)
if sum_ != 0:
out[start:stop] /= sum_
return out[:m], out[m:] | ['def', '_get_mixed_actions', '(', 'labeling_bits', ',', 'equation_tup', ',', 'trans_recips', ')', ':', 'm', ',', 'n', '=', 'equation_tup', '[', '0', ']', '.', 'shape', '[', '0', ']', '-', '1', ',', 'equation_tup', '[', '1', ']', '.', 'shape', '[', '0', ']', '-', '1', 'out', '=', 'np', '.', 'empty', '(', 'm', '+', 'n', ')', 'for', 'pl', ',', '(', 'start', ',', 'stop', ',', 'skip', ')', 'in', 'enumerate', '(', '[', '(', '0', ',', 'm', ',', 'np', '.', 'uint64', '(', '1', ')', ')', ',', '(', 'm', ',', 'm', '+', 'n', ',', 'np', '.', 'uint64', '(', '0', ')', ')', ']', ')', ':', 'sum_', '=', '0.', 'for', 'i', 'in', 'range', '(', 'start', ',', 'stop', ')', ':', 'if', '(', 'labeling_bits', '&', 'np', '.', 'uint64', '(', '1', ')', ')', '==', 'skip', ':', 'out', '[', 'i', ']', '=', '0', 'else', ':', 'out', '[', 'i', ']', '=', 'equation_tup', '[', 'pl', ']', '[', 'i', '-', 'start', ']', '*', 'trans_recips', '[', 'pl', ']', '-', 'equation_tup', '[', 'pl', ']', '[', '-', '1', ']', 'sum_', '+=', 'out', '[', 'i', ']', 'labeling_bits', '=', 'labeling_bits', '>>', 'np', '.', 'uint64', '(', '1', ')', 'if', 'sum_', '!=', '0', ':', 'out', '[', 'start', ':', 'stop', ']', '/=', 'sum_', 'return', 'out', '[', ':', 'm', ']', ',', 'out', '[', 'm', ':', ']'] | From a labeling for player 0, a tuple of hyperplane equations of the
polar polytopes, and a tuple of the reciprocals of the translations,
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
labeling_bits : scalar(np.uint64)
Integer with set bits representing a labeling of a mixed action
of player 0.
equation_tup : tuple(ndarray(float, ndim=1))
Tuple of hyperplane equations of the polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions. | ['From', 'a', 'labeling', 'for', 'player', '0', 'a', 'tuple', 'of', 'hyperplane', 'equations', 'of', 'the', 'polar', 'polytopes', 'and', 'a', 'tuple', 'of', 'the', 'reciprocals', 'of', 'the', 'translations', 'return', 'a', 'tuple', 'of', 'the', 'corresponding', 'normalized', 'mixed', 'actions', '.'] | train | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/vertex_enumeration.py#L294-L335 |
4,221 | inasafe/inasafe | safe/gui/tools/wizard/step_kw00_purpose.py | StepKwPurpose.clear_further_steps | def clear_further_steps(self):
"""Clear all further steps in order to properly calculate the prev step
"""
self.parent.step_kw_hazard_category.lstHazardCategories.clear()
self.parent.step_kw_subcategory.lstSubcategories.clear()
self.parent.step_kw_layermode.lstLayerModes.clear()
self.parent.step_kw_unit.lstUnits.clear()
self.parent.step_kw_field.lstFields.clear()
self.parent.step_kw_classification.lstClassifications.clear()
self.parent.step_kw_threshold.classes.clear()
self.parent.step_kw_multi_classifications.clear()
self.parent.step_kw_inasafe_fields.clear()
self.parent.step_kw_default_inasafe_fields.clear()
self.parent.step_kw_inasafe_raster_default_values.clear()
self.parent.step_kw_fields_mapping.clear()
self.parent.step_kw_multi_classifications.clear() | python | def clear_further_steps(self):
"""Clear all further steps in order to properly calculate the prev step
"""
self.parent.step_kw_hazard_category.lstHazardCategories.clear()
self.parent.step_kw_subcategory.lstSubcategories.clear()
self.parent.step_kw_layermode.lstLayerModes.clear()
self.parent.step_kw_unit.lstUnits.clear()
self.parent.step_kw_field.lstFields.clear()
self.parent.step_kw_classification.lstClassifications.clear()
self.parent.step_kw_threshold.classes.clear()
self.parent.step_kw_multi_classifications.clear()
self.parent.step_kw_inasafe_fields.clear()
self.parent.step_kw_default_inasafe_fields.clear()
self.parent.step_kw_inasafe_raster_default_values.clear()
self.parent.step_kw_fields_mapping.clear()
self.parent.step_kw_multi_classifications.clear() | ['def', 'clear_further_steps', '(', 'self', ')', ':', 'self', '.', 'parent', '.', 'step_kw_hazard_category', '.', 'lstHazardCategories', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_subcategory', '.', 'lstSubcategories', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_layermode', '.', 'lstLayerModes', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_unit', '.', 'lstUnits', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_field', '.', 'lstFields', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_classification', '.', 'lstClassifications', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_threshold', '.', 'classes', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_multi_classifications', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_inasafe_fields', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_default_inasafe_fields', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_inasafe_raster_default_values', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_fields_mapping', '.', 'clear', '(', ')', 'self', '.', 'parent', '.', 'step_kw_multi_classifications', '.', 'clear', '(', ')'] | Clear all further steps in order to properly calculate the prev step | ['Clear', 'all', 'further', 'steps', 'in', 'order', 'to', 'properly', 'calculate', 'the', 'prev', 'step'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw00_purpose.py#L99-L116 |
4,222 | testing-cabal/systemfixtures | systemfixtures/filesystem.py | FakeFilesystem.add | def add(self, path):
"""Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string.
"""
if not path.startswith(os.sep):
raise ValueError("Non-absolute path '{}'".format(path))
path = path.rstrip(os.sep)
while True:
self._paths[path] = None
path, _ = os.path.split(path)
if path == os.sep:
break | python | def add(self, path):
"""Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string.
"""
if not path.startswith(os.sep):
raise ValueError("Non-absolute path '{}'".format(path))
path = path.rstrip(os.sep)
while True:
self._paths[path] = None
path, _ = os.path.split(path)
if path == os.sep:
break | ['def', 'add', '(', 'self', ',', 'path', ')', ':', 'if', 'not', 'path', '.', 'startswith', '(', 'os', '.', 'sep', ')', ':', 'raise', 'ValueError', '(', '"Non-absolute path \'{}\'"', '.', 'format', '(', 'path', ')', ')', 'path', '=', 'path', '.', 'rstrip', '(', 'os', '.', 'sep', ')', 'while', 'True', ':', 'self', '.', '_paths', '[', 'path', ']', '=', 'None', 'path', ',', '_', '=', 'os', '.', 'path', '.', 'split', '(', 'path', ')', 'if', 'path', '==', 'os', '.', 'sep', ':', 'break'] | Add a path to the overlay filesytem.
Any filesystem operation involving the this path or any sub-paths
of it will be transparently redirected to temporary root dir.
@path: An absolute path string. | ['Add', 'a', 'path', 'to', 'the', 'overlay', 'filesytem', '.'] | train | https://github.com/testing-cabal/systemfixtures/blob/adf1b822bf83dc2a2f6bf7b85b5d8055e5e6ccd4/systemfixtures/filesystem.py#L65-L80 |
4,223 | gem/oq-engine | openquake/commonlib/calc.py | make_hmap | def make_hmap(pmap, imtls, poes):
"""
Compute the hazard maps associated to the passed probability map.
:param pmap: hazard curves in the form of a ProbabilityMap
:param imtls: DictArray with M intensity measure types
:param poes: P PoEs where to compute the maps
:returns: a ProbabilityMap with size (N, M, P)
"""
M, P = len(imtls), len(poes)
hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32)
if len(pmap) == 0:
return hmap # empty hazard map
for i, imt in enumerate(imtls):
curves = numpy.array([pmap[sid].array[imtls(imt), 0]
for sid in pmap.sids])
data = compute_hazard_maps(curves, imtls[imt], poes) # array (N, P)
for sid, value in zip(pmap.sids, data):
array = hmap[sid].array
for j, val in enumerate(value):
array[i, j] = val
return hmap | python | def make_hmap(pmap, imtls, poes):
"""
Compute the hazard maps associated to the passed probability map.
:param pmap: hazard curves in the form of a ProbabilityMap
:param imtls: DictArray with M intensity measure types
:param poes: P PoEs where to compute the maps
:returns: a ProbabilityMap with size (N, M, P)
"""
M, P = len(imtls), len(poes)
hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32)
if len(pmap) == 0:
return hmap # empty hazard map
for i, imt in enumerate(imtls):
curves = numpy.array([pmap[sid].array[imtls(imt), 0]
for sid in pmap.sids])
data = compute_hazard_maps(curves, imtls[imt], poes) # array (N, P)
for sid, value in zip(pmap.sids, data):
array = hmap[sid].array
for j, val in enumerate(value):
array[i, j] = val
return hmap | ['def', 'make_hmap', '(', 'pmap', ',', 'imtls', ',', 'poes', ')', ':', 'M', ',', 'P', '=', 'len', '(', 'imtls', ')', ',', 'len', '(', 'poes', ')', 'hmap', '=', 'probability_map', '.', 'ProbabilityMap', '.', 'build', '(', 'M', ',', 'P', ',', 'pmap', ',', 'dtype', '=', 'F32', ')', 'if', 'len', '(', 'pmap', ')', '==', '0', ':', 'return', 'hmap', '# empty hazard map', 'for', 'i', ',', 'imt', 'in', 'enumerate', '(', 'imtls', ')', ':', 'curves', '=', 'numpy', '.', 'array', '(', '[', 'pmap', '[', 'sid', ']', '.', 'array', '[', 'imtls', '(', 'imt', ')', ',', '0', ']', 'for', 'sid', 'in', 'pmap', '.', 'sids', ']', ')', 'data', '=', 'compute_hazard_maps', '(', 'curves', ',', 'imtls', '[', 'imt', ']', ',', 'poes', ')', '# array (N, P)', 'for', 'sid', ',', 'value', 'in', 'zip', '(', 'pmap', '.', 'sids', ',', 'data', ')', ':', 'array', '=', 'hmap', '[', 'sid', ']', '.', 'array', 'for', 'j', ',', 'val', 'in', 'enumerate', '(', 'value', ')', ':', 'array', '[', 'i', ',', 'j', ']', '=', 'val', 'return', 'hmap'] | Compute the hazard maps associated to the passed probability map.
:param pmap: hazard curves in the form of a ProbabilityMap
:param imtls: DictArray with M intensity measure types
:param poes: P PoEs where to compute the maps
:returns: a ProbabilityMap with size (N, M, P) | ['Compute', 'the', 'hazard', 'maps', 'associated', 'to', 'the', 'passed', 'probability', 'map', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/calc.py#L184-L205 |
4,224 | alefnula/tea | tea/console/format.py | format_page | def format_page(text):
"""Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
"""
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page | python | def format_page(text):
"""Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
"""
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page | ['def', 'format_page', '(', 'text', ')', ':', 'width', '=', 'max', '(', 'map', '(', 'len', ',', 'text', '.', 'splitlines', '(', ')', ')', ')', 'page', '=', '"+-"', '+', '"-"', '*', 'width', '+', '"-+\\n"', 'for', 'line', 'in', 'text', '.', 'splitlines', '(', ')', ':', 'page', '+=', '"| "', '+', 'line', '.', 'ljust', '(', 'width', ')', '+', '" |\\n"', 'page', '+=', '"+-"', '+', '"-"', '*', 'width', '+', '"-+\\n"', 'return', 'page'] | Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string. | ['Format', 'the', 'text', 'for', 'output', 'adding', 'ASCII', 'frame', 'around', 'the', 'text', '.', 'Args', ':', 'text', '(', 'str', ')', ':', 'Text', 'that', 'needs', 'to', 'be', 'formatted', '.', 'Returns', ':', 'str', ':', 'Formatted', 'string', '.'] | train | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/console/format.py#L16-L30 |
4,225 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | MAVLink.mission_request_partial_list_encode | def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
'''
return MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index) | python | def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t)
'''
return MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index) | ['def', 'mission_request_partial_list_encode', '(', 'self', ',', 'target_system', ',', 'target_component', ',', 'start_index', ',', 'end_index', ')', ':', 'return', 'MAVLink_mission_request_partial_list_message', '(', 'target_system', ',', 'target_component', ',', 'start_index', ',', 'end_index', ')'] | Request a partial list of mission items from the system/component.
http://qgroundcontrol.org/mavlink/waypoint_protocol.
If start and end index are the same, just send one
waypoint.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default (int16_t)
end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t) | ['Request', 'a', 'partial', 'list', 'of', 'mission', 'items', 'from', 'the', 'system', '/', 'component', '.', 'http', ':', '//', 'qgroundcontrol', '.', 'org', '/', 'mavlink', '/', 'waypoint_protocol', '.', 'If', 'start', 'and', 'end', 'index', 'are', 'the', 'same', 'just', 'send', 'one', 'waypoint', '.'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9407-L9420 |
4,226 | saltstack/salt | salt/key.py | Key.gen_keys | def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None):
'''
Generate minion RSA public keypair
'''
keydir, keyname, keysize, user = self._get_key_attrs(keydir, keyname,
keysize, user)
salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase)
return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + '.pub')) | python | def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None):
'''
Generate minion RSA public keypair
'''
keydir, keyname, keysize, user = self._get_key_attrs(keydir, keyname,
keysize, user)
salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase)
return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + '.pub')) | ['def', 'gen_keys', '(', 'self', ',', 'keydir', '=', 'None', ',', 'keyname', '=', 'None', ',', 'keysize', '=', 'None', ',', 'user', '=', 'None', ')', ':', 'keydir', ',', 'keyname', ',', 'keysize', ',', 'user', '=', 'self', '.', '_get_key_attrs', '(', 'keydir', ',', 'keyname', ',', 'keysize', ',', 'user', ')', 'salt', '.', 'crypt', '.', 'gen_keys', '(', 'keydir', ',', 'keyname', ',', 'keysize', ',', 'user', ',', 'self', '.', 'passphrase', ')', 'return', 'salt', '.', 'utils', '.', 'crypt', '.', 'pem_finger', '(', 'os', '.', 'path', '.', 'join', '(', 'keydir', ',', 'keyname', '+', "'.pub'", ')', ')'] | Generate minion RSA public keypair | ['Generate', 'minion', 'RSA', 'public', 'keypair'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L343-L350 |
4,227 | pypa/pipenv | pipenv/patched/notpip/_vendor/distro.py | LinuxDistribution.name | def name(self, pretty=False):
"""
Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name') \
or self.uname_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name') \
or self.uname_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or '' | python | def name(self, pretty=False):
"""
Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name') \
or self.uname_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name') \
or self.uname_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or '' | ['def', 'name', '(', 'self', ',', 'pretty', '=', 'False', ')', ':', 'name', '=', 'self', '.', 'os_release_attr', '(', "'name'", ')', 'or', 'self', '.', 'lsb_release_attr', '(', "'distributor_id'", ')', 'or', 'self', '.', 'distro_release_attr', '(', "'name'", ')', 'or', 'self', '.', 'uname_attr', '(', "'name'", ')', 'if', 'pretty', ':', 'name', '=', 'self', '.', 'os_release_attr', '(', "'pretty_name'", ')', 'or', 'self', '.', 'lsb_release_attr', '(', "'description'", ')', 'if', 'not', 'name', ':', 'name', '=', 'self', '.', 'distro_release_attr', '(', "'name'", ')', 'or', 'self', '.', 'uname_attr', '(', "'name'", ')', 'version', '=', 'self', '.', 'version', '(', 'pretty', '=', 'True', ')', 'if', 'version', ':', 'name', '=', 'name', '+', "' '", '+', 'version', 'return', 'name', 'or', "''"] | Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`. | ['Return', 'the', 'name', 'of', 'the', 'OS', 'distribution', 'as', 'a', 'string', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/distro.py#L706-L725 |
4,228 | bhmm/bhmm | bhmm/output_models/outputmodel.py | OutputModel.set_implementation | def set_implementation(self, impl):
"""
Sets the implementation of this module
Parameters
----------
impl : str
One of ["python", "c"]
"""
if impl.lower() == 'python':
self.__impl__ = self.__IMPL_PYTHON__
elif impl.lower() == 'c':
self.__impl__ = self.__IMPL_C__
else:
import warnings
warnings.warn('Implementation '+impl+' is not known. Using the fallback python implementation.')
self.__impl__ = self.__IMPL_PYTHON__ | python | def set_implementation(self, impl):
"""
Sets the implementation of this module
Parameters
----------
impl : str
One of ["python", "c"]
"""
if impl.lower() == 'python':
self.__impl__ = self.__IMPL_PYTHON__
elif impl.lower() == 'c':
self.__impl__ = self.__IMPL_C__
else:
import warnings
warnings.warn('Implementation '+impl+' is not known. Using the fallback python implementation.')
self.__impl__ = self.__IMPL_PYTHON__ | ['def', 'set_implementation', '(', 'self', ',', 'impl', ')', ':', 'if', 'impl', '.', 'lower', '(', ')', '==', "'python'", ':', 'self', '.', '__impl__', '=', 'self', '.', '__IMPL_PYTHON__', 'elif', 'impl', '.', 'lower', '(', ')', '==', "'c'", ':', 'self', '.', '__impl__', '=', 'self', '.', '__IMPL_C__', 'else', ':', 'import', 'warnings', 'warnings', '.', 'warn', '(', "'Implementation '", '+', 'impl', '+', "' is not known. Using the fallback python implementation.'", ')', 'self', '.', '__impl__', '=', 'self', '.', '__IMPL_PYTHON__'] | Sets the implementation of this module
Parameters
----------
impl : str
One of ["python", "c"] | ['Sets', 'the', 'implementation', 'of', 'this', 'module'] | train | https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/output_models/outputmodel.py#L69-L86 |
4,229 | TheHive-Project/Cortex-Analyzers | analyzers/MaxMind/ipaddr.py | _get_prefix_length | def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0 | python | def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0 | ['def', '_get_prefix_length', '(', 'number1', ',', 'number2', ',', 'bits', ')', ':', 'for', 'i', 'in', 'range', '(', 'bits', ')', ':', 'if', 'number1', '>>', 'i', '==', 'number2', '>>', 'i', ':', 'return', 'bits', '-', 'i', 'return', '0'] | Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers. | ['Get', 'the', 'number', 'of', 'leading', 'bits', 'that', 'are', 'same', 'for', 'two', 'numbers', '.'] | train | https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L170-L185 |
4,230 | systemd/python-systemd | systemd/journal.py | stream | def stream(identifier=None, priority=LOG_INFO, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
"""
if identifier is None:
if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c':
identifier = 'python'
else:
identifier = _sys.argv[0]
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1) | python | def stream(identifier=None, priority=LOG_INFO, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
"""
if identifier is None:
if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c':
identifier = 'python'
else:
identifier = _sys.argv[0]
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1) | ['def', 'stream', '(', 'identifier', '=', 'None', ',', 'priority', '=', 'LOG_INFO', ',', 'level_prefix', '=', 'False', ')', ':', 'if', 'identifier', 'is', 'None', ':', 'if', 'not', '_sys', '.', 'argv', 'or', 'not', '_sys', '.', 'argv', '[', '0', ']', 'or', '_sys', '.', 'argv', '[', '0', ']', '==', "'-c'", ':', 'identifier', '=', "'python'", 'else', ':', 'identifier', '=', '_sys', '.', 'argv', '[', '0', ']', 'fd', '=', 'stream_fd', '(', 'identifier', ',', 'priority', ',', 'level_prefix', ')', 'return', '_os', '.', 'fdopen', '(', 'fd', ',', "'w'", ',', '1', ')'] | r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information. | ['r', 'Return', 'a', 'file', 'object', 'wrapping', 'a', 'stream', 'to', 'journal', '.'] | train | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L460-L501 |
4,231 | ivelum/graphql-py | graphql/parser.py | GraphQLParser.p_operation_definition4 | def p_operation_definition4(self, p):
"""
operation_definition : operation_type name selection_set
"""
p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2]) | python | def p_operation_definition4(self, p):
"""
operation_definition : operation_type name selection_set
"""
p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2]) | ['def', 'p_operation_definition4', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'self', '.', 'operation_cls', '(', 'p', '[', '1', ']', ')', '(', 'selections', '=', 'p', '[', '3', ']', ',', 'name', '=', 'p', '[', '2', ']', ')'] | operation_definition : operation_type name selection_set | ['operation_definition', ':', 'operation_type', 'name', 'selection_set'] | train | https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L126-L130 |
4,232 | ClimateImpactLab/DataFS | datafs/config/helpers.py | check_requirements | def check_requirements(to_populate, prompts, helper=False):
'''
Iterates through required values, checking to_populate for required values
If a key in prompts is missing in to_populate and ``helper==True``,
prompts the user using the values in to_populate. Otherwise, raises an
error.
Parameters
----------
to_populate : dict
Data dictionary to fill. Prompts given to the user are taken from this
dictionary.
prompts : dict
Keys and prompts to use when filling ``to_populate``
'''
for kw, prompt in prompts.items():
if helper:
if kw not in to_populate:
to_populate[kw] = click.prompt(prompt)
else:
msg = (
'Required value "{}" not found. '
'Use helper=True or the --helper '
'flag for assistance.'.format(kw))
assert kw in to_populate, msg | python | def check_requirements(to_populate, prompts, helper=False):
'''
Iterates through required values, checking to_populate for required values
If a key in prompts is missing in to_populate and ``helper==True``,
prompts the user using the values in to_populate. Otherwise, raises an
error.
Parameters
----------
to_populate : dict
Data dictionary to fill. Prompts given to the user are taken from this
dictionary.
prompts : dict
Keys and prompts to use when filling ``to_populate``
'''
for kw, prompt in prompts.items():
if helper:
if kw not in to_populate:
to_populate[kw] = click.prompt(prompt)
else:
msg = (
'Required value "{}" not found. '
'Use helper=True or the --helper '
'flag for assistance.'.format(kw))
assert kw in to_populate, msg | ['def', 'check_requirements', '(', 'to_populate', ',', 'prompts', ',', 'helper', '=', 'False', ')', ':', 'for', 'kw', ',', 'prompt', 'in', 'prompts', '.', 'items', '(', ')', ':', 'if', 'helper', ':', 'if', 'kw', 'not', 'in', 'to_populate', ':', 'to_populate', '[', 'kw', ']', '=', 'click', '.', 'prompt', '(', 'prompt', ')', 'else', ':', 'msg', '=', '(', '\'Required value "{}" not found. \'', "'Use helper=True or the --helper '", "'flag for assistance.'", '.', 'format', '(', 'kw', ')', ')', 'assert', 'kw', 'in', 'to_populate', ',', 'msg'] | Iterates through required values, checking to_populate for required values
If a key in prompts is missing in to_populate and ``helper==True``,
prompts the user using the values in to_populate. Otherwise, raises an
error.
Parameters
----------
to_populate : dict
Data dictionary to fill. Prompts given to the user are taken from this
dictionary.
prompts : dict
Keys and prompts to use when filling ``to_populate`` | ['Iterates', 'through', 'required', 'values', 'checking', 'to_populate', 'for', 'required', 'values'] | train | https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/config/helpers.py#L158-L187 |
4,233 | Microsoft/nni | examples/trials/ga_squad/graph.py | Graph.is_topology | def is_topology(self, layers=None):
'''
valid the topology
'''
if layers is None:
layers = self.layers
layers_nodle = []
result = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
layers_nodle.append(i)
while True:
flag_break = True
layers_toremove = []
for layer1 in layers_nodle:
flag_arrive = True
for layer2 in layers[layer1].input:
if layer2 in layers_nodle:
flag_arrive = False
if flag_arrive is True:
for layer2 in layers[layer1].output:
# Size is error
if layers[layer2].set_size(layer1, layers[layer1].size) is False:
return False
layers_toremove.append(layer1)
result.append(layer1)
flag_break = False
for layer in layers_toremove:
layers_nodle.remove(layer)
result.append('|')
if flag_break:
break
# There is loop in graph || some layers can't to arrive
if layers_nodle:
return False
return result | python | def is_topology(self, layers=None):
'''
valid the topology
'''
if layers is None:
layers = self.layers
layers_nodle = []
result = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
layers_nodle.append(i)
while True:
flag_break = True
layers_toremove = []
for layer1 in layers_nodle:
flag_arrive = True
for layer2 in layers[layer1].input:
if layer2 in layers_nodle:
flag_arrive = False
if flag_arrive is True:
for layer2 in layers[layer1].output:
# Size is error
if layers[layer2].set_size(layer1, layers[layer1].size) is False:
return False
layers_toremove.append(layer1)
result.append(layer1)
flag_break = False
for layer in layers_toremove:
layers_nodle.remove(layer)
result.append('|')
if flag_break:
break
# There is loop in graph || some layers can't to arrive
if layers_nodle:
return False
return result | ['def', 'is_topology', '(', 'self', ',', 'layers', '=', 'None', ')', ':', 'if', 'layers', 'is', 'None', ':', 'layers', '=', 'self', '.', 'layers', 'layers_nodle', '=', '[', ']', 'result', '=', '[', ']', 'for', 'i', ',', 'layer', 'in', 'enumerate', '(', 'layers', ')', ':', 'if', 'layer', '.', 'is_delete', 'is', 'False', ':', 'layers_nodle', '.', 'append', '(', 'i', ')', 'while', 'True', ':', 'flag_break', '=', 'True', 'layers_toremove', '=', '[', ']', 'for', 'layer1', 'in', 'layers_nodle', ':', 'flag_arrive', '=', 'True', 'for', 'layer2', 'in', 'layers', '[', 'layer1', ']', '.', 'input', ':', 'if', 'layer2', 'in', 'layers_nodle', ':', 'flag_arrive', '=', 'False', 'if', 'flag_arrive', 'is', 'True', ':', 'for', 'layer2', 'in', 'layers', '[', 'layer1', ']', '.', 'output', ':', '# Size is error', 'if', 'layers', '[', 'layer2', ']', '.', 'set_size', '(', 'layer1', ',', 'layers', '[', 'layer1', ']', '.', 'size', ')', 'is', 'False', ':', 'return', 'False', 'layers_toremove', '.', 'append', '(', 'layer1', ')', 'result', '.', 'append', '(', 'layer1', ')', 'flag_break', '=', 'False', 'for', 'layer', 'in', 'layers_toremove', ':', 'layers_nodle', '.', 'remove', '(', 'layer', ')', 'result', '.', 'append', '(', "'|'", ')', 'if', 'flag_break', ':', 'break', "# There is loop in graph || some layers can't to arrive", 'if', 'layers_nodle', ':', 'return', 'False', 'return', 'result'] | valid the topology | ['valid', 'the', 'topology'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/ga_squad/graph.py#L133-L168 |
4,234 | libtcod/python-tcod | tcod/image.py | Image.blit_2x | def blit_2x(
self,
console: tcod.console.Console,
dest_x: int,
dest_y: int,
img_x: int = 0,
img_y: int = 0,
img_width: int = -1,
img_height: int = -1,
) -> None:
"""Blit onto a Console with double resolution.
Args:
console (Console): Blit destination Console.
dest_x (int): Console tile X position starting from the left at 0.
dest_y (int): Console tile Y position starting from the top at 0.
img_x (int): Left corner pixel of the Image to blit
img_y (int): Top corner pixel of the Image to blit
img_width (int): Width of the Image to blit.
Use -1 for the full Image width.
img_height (int): Height of the Image to blit.
Use -1 for the full Image height.
"""
lib.TCOD_image_blit_2x(
self.image_c,
_console(console),
dest_x,
dest_y,
img_x,
img_y,
img_width,
img_height,
) | python | def blit_2x(
self,
console: tcod.console.Console,
dest_x: int,
dest_y: int,
img_x: int = 0,
img_y: int = 0,
img_width: int = -1,
img_height: int = -1,
) -> None:
"""Blit onto a Console with double resolution.
Args:
console (Console): Blit destination Console.
dest_x (int): Console tile X position starting from the left at 0.
dest_y (int): Console tile Y position starting from the top at 0.
img_x (int): Left corner pixel of the Image to blit
img_y (int): Top corner pixel of the Image to blit
img_width (int): Width of the Image to blit.
Use -1 for the full Image width.
img_height (int): Height of the Image to blit.
Use -1 for the full Image height.
"""
lib.TCOD_image_blit_2x(
self.image_c,
_console(console),
dest_x,
dest_y,
img_x,
img_y,
img_width,
img_height,
) | ['def', 'blit_2x', '(', 'self', ',', 'console', ':', 'tcod', '.', 'console', '.', 'Console', ',', 'dest_x', ':', 'int', ',', 'dest_y', ':', 'int', ',', 'img_x', ':', 'int', '=', '0', ',', 'img_y', ':', 'int', '=', '0', ',', 'img_width', ':', 'int', '=', '-', '1', ',', 'img_height', ':', 'int', '=', '-', '1', ',', ')', '->', 'None', ':', 'lib', '.', 'TCOD_image_blit_2x', '(', 'self', '.', 'image_c', ',', '_console', '(', 'console', ')', ',', 'dest_x', ',', 'dest_y', ',', 'img_x', ',', 'img_y', ',', 'img_width', ',', 'img_height', ',', ')'] | Blit onto a Console with double resolution.
Args:
console (Console): Blit destination Console.
dest_x (int): Console tile X position starting from the left at 0.
dest_y (int): Console tile Y position starting from the top at 0.
img_x (int): Left corner pixel of the Image to blit
img_y (int): Top corner pixel of the Image to blit
img_width (int): Width of the Image to blit.
Use -1 for the full Image width.
img_height (int): Height of the Image to blit.
Use -1 for the full Image height. | ['Blit', 'onto', 'a', 'Console', 'with', 'double', 'resolution', '.'] | train | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/image.py#L254-L286 |
4,235 | google/transitfeed | examples/google_random_queries.py | WriteOutput | def WriteOutput(title, locations, limit, f):
"""Write html to f for up to limit trips between locations.
Args:
title: String used in html title
locations: list of (lat, lng) tuples
limit: maximum number of queries in the html
f: a file object
"""
output_prefix = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>%(title)s</title>
</head>
<body>
Random queries for %(title)s<p>
This list of random queries should speed up important manual testing. Here are
some things to check when looking at the results of a query.
<ul>
<li> Check the agency attribution under the trip results:
<ul>
<li> has correct name and spelling of the agency
<li> opens a page with general information about the service
</ul>
<li> For each alternate trip check that each of these is reasonable:
<ul>
<li> the total time of the trip
<li> the time for each leg. Bad data frequently results in a leg going a long
way in a few minutes.
<li> the icons and mode names (Tram, Bus, etc) are correct for each leg
<li> the route names and headsigns are correctly formatted and not
redundant.
For a good example see <a
href="https://developers.google.com/transit/gtfs/examples/display-to-users">
the screenshots in the Google Transit Feed Specification</a>.
<li> the shape line on the map looks correct. Make sure the polyline does
not zig-zag, loop, skip stops or jump far away unless the trip does the
same thing.
<li> the route is active on the day the trip planner returns
</ul>
</ul>
If you find a problem be sure to save the URL. This file is generated randomly.
<ol>
""" % locals()
output_suffix = """
</ol>
</body>
</html>
""" % locals()
f.write(transitfeed.EncodeUnicode(output_prefix))
for source, destination in zip(locations[0:limit], locations[1:limit + 1]):
f.write(transitfeed.EncodeUnicode("<li>%s\n" %
LatLngsToGoogleLink(source, destination)))
f.write(transitfeed.EncodeUnicode(output_suffix)) | python | def WriteOutput(title, locations, limit, f):
"""Write html to f for up to limit trips between locations.
Args:
title: String used in html title
locations: list of (lat, lng) tuples
limit: maximum number of queries in the html
f: a file object
"""
output_prefix = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>%(title)s</title>
</head>
<body>
Random queries for %(title)s<p>
This list of random queries should speed up important manual testing. Here are
some things to check when looking at the results of a query.
<ul>
<li> Check the agency attribution under the trip results:
<ul>
<li> has correct name and spelling of the agency
<li> opens a page with general information about the service
</ul>
<li> For each alternate trip check that each of these is reasonable:
<ul>
<li> the total time of the trip
<li> the time for each leg. Bad data frequently results in a leg going a long
way in a few minutes.
<li> the icons and mode names (Tram, Bus, etc) are correct for each leg
<li> the route names and headsigns are correctly formatted and not
redundant.
For a good example see <a
href="https://developers.google.com/transit/gtfs/examples/display-to-users">
the screenshots in the Google Transit Feed Specification</a>.
<li> the shape line on the map looks correct. Make sure the polyline does
not zig-zag, loop, skip stops or jump far away unless the trip does the
same thing.
<li> the route is active on the day the trip planner returns
</ul>
</ul>
If you find a problem be sure to save the URL. This file is generated randomly.
<ol>
""" % locals()
output_suffix = """
</ol>
</body>
</html>
""" % locals()
f.write(transitfeed.EncodeUnicode(output_prefix))
for source, destination in zip(locations[0:limit], locations[1:limit + 1]):
f.write(transitfeed.EncodeUnicode("<li>%s\n" %
LatLngsToGoogleLink(source, destination)))
f.write(transitfeed.EncodeUnicode(output_suffix)) | ['def', 'WriteOutput', '(', 'title', ',', 'locations', ',', 'limit', ',', 'f', ')', ':', 'output_prefix', '=', '"""\n<html>\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\n<title>%(title)s</title>\n</head>\n<body>\nRandom queries for %(title)s<p>\nThis list of random queries should speed up important manual testing. Here are\nsome things to check when looking at the results of a query.\n<ul>\n <li> Check the agency attribution under the trip results:\n <ul>\n <li> has correct name and spelling of the agency\n <li> opens a page with general information about the service\n </ul>\n <li> For each alternate trip check that each of these is reasonable:\n <ul>\n <li> the total time of the trip\n <li> the time for each leg. Bad data frequently results in a leg going a long\n way in a few minutes.\n <li> the icons and mode names (Tram, Bus, etc) are correct for each leg\n <li> the route names and headsigns are correctly formatted and not\n redundant.\n For a good example see <a\n href="https://developers.google.com/transit/gtfs/examples/display-to-users">\n the screenshots in the Google Transit Feed Specification</a>.\n <li> the shape line on the map looks correct. Make sure the polyline does\n not zig-zag, loop, skip stops or jump far away unless the trip does the\n same thing.\n <li> the route is active on the day the trip planner returns\n </ul>\n</ul>\nIf you find a problem be sure to save the URL. This file is generated randomly.\n<ol>\n"""', '%', 'locals', '(', ')', 'output_suffix', '=', '"""\n</ol>\n</body>\n</html>\n"""', '%', 'locals', '(', ')', 'f', '.', 'write', '(', 'transitfeed', '.', 'EncodeUnicode', '(', 'output_prefix', ')', ')', 'for', 'source', ',', 'destination', 'in', 'zip', '(', 'locations', '[', '0', ':', 'limit', ']', ',', 'locations', '[', '1', ':', 'limit', '+', '1', ']', ')', ':', 'f', '.', 'write', '(', 'transitfeed', '.', 'EncodeUnicode', '(', '"<li>%s\\n"', '%', 'LatLngsToGoogleLink', '(', 'source', ',', 'destination', ')', ')', ')', 'f', '.', 'write', '(', 'transitfeed', '.', 'EncodeUnicode', '(', 'output_suffix', ')', ')'] | Write html to f for up to limit trips between locations.
Args:
title: String used in html title
locations: list of (lat, lng) tuples
limit: maximum number of queries in the html
f: a file object | ['Write', 'html', 'to', 'f', 'for', 'up', 'to', 'limit', 'trips', 'between', 'locations', '.'] | train | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/examples/google_random_queries.py#L122-L178 |
4,236 | vxgmichel/aiostream | aiostream/stream/combine.py | chain | async def chain(*sources):
"""Chain asynchronous sequences together, in the order they are given.
Note: the sequences are not iterated until it is required,
so if the operation is interrupted, the remaining sequences
will be left untouched.
"""
for source in sources:
async with streamcontext(source) as streamer:
async for item in streamer:
yield item | python | async def chain(*sources):
"""Chain asynchronous sequences together, in the order they are given.
Note: the sequences are not iterated until it is required,
so if the operation is interrupted, the remaining sequences
will be left untouched.
"""
for source in sources:
async with streamcontext(source) as streamer:
async for item in streamer:
yield item | ['async', 'def', 'chain', '(', '*', 'sources', ')', ':', 'for', 'source', 'in', 'sources', ':', 'async', 'with', 'streamcontext', '(', 'source', ')', 'as', 'streamer', ':', 'async', 'for', 'item', 'in', 'streamer', ':', 'yield', 'item'] | Chain asynchronous sequences together, in the order they are given.
Note: the sequences are not iterated until it is required,
so if the operation is interrupted, the remaining sequences
will be left untouched. | ['Chain', 'asynchronous', 'sequences', 'together', 'in', 'the', 'order', 'they', 'are', 'given', '.'] | train | https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/combine.py#L18-L28 |
4,237 | BernardFW/bernard | src/bernard/i18n/utils.py | LocalesFlatDict.update | def update(self, new_data: Dict[Text, Dict[Text, Text]]):
"""
Receive an update from a loader.
:param new_data: New translation data from the loader
"""
for locale, data in new_data.items():
if locale not in self.dict:
self.dict[locale] = {}
self.dict[locale].update(data) | python | def update(self, new_data: Dict[Text, Dict[Text, Text]]):
"""
Receive an update from a loader.
:param new_data: New translation data from the loader
"""
for locale, data in new_data.items():
if locale not in self.dict:
self.dict[locale] = {}
self.dict[locale].update(data) | ['def', 'update', '(', 'self', ',', 'new_data', ':', 'Dict', '[', 'Text', ',', 'Dict', '[', 'Text', ',', 'Text', ']', ']', ')', ':', 'for', 'locale', ',', 'data', 'in', 'new_data', '.', 'items', '(', ')', ':', 'if', 'locale', 'not', 'in', 'self', '.', 'dict', ':', 'self', '.', 'dict', '[', 'locale', ']', '=', '{', '}', 'self', '.', 'dict', '[', 'locale', ']', '.', 'update', '(', 'data', ')'] | Receive an update from a loader.
:param new_data: New translation data from the loader | ['Receive', 'an', 'update', 'from', 'a', 'loader', '.'] | train | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/utils.py#L109-L120 |
4,238 | mitsei/dlkit | dlkit/records/osid/base_records.py | FilesRecord.get_url_by_label | def get_url_by_label(self, label, asset_content_type=None):
"""stub"""
return self._get_asset_content(self.get_asset_id_by_label(label)).get_url() | python | def get_url_by_label(self, label, asset_content_type=None):
"""stub"""
return self._get_asset_content(self.get_asset_id_by_label(label)).get_url() | ['def', 'get_url_by_label', '(', 'self', ',', 'label', ',', 'asset_content_type', '=', 'None', ')', ':', 'return', 'self', '.', '_get_asset_content', '(', 'self', '.', 'get_asset_id_by_label', '(', 'label', ')', ')', '.', 'get_url', '(', ')'] | stub | ['stub'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L1776-L1778 |
4,239 | marrow/mongo | marrow/mongo/query/query.py | Q._iop | def _iop(self, operation, other, *allowed):
"""An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution.
"""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(
self=self, op=operation))
def _t(o):
for value in o:
yield None if value is None else f.transformer.foreign(value, (f, self._document))
other = other if len(other) > 1 else other[0]
values = list(_t(other))
return Filter({self._name: {operation: values}}) | python | def _iop(self, operation, other, *allowed):
"""An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution.
"""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(
self=self, op=operation))
def _t(o):
for value in o:
yield None if value is None else f.transformer.foreign(value, (f, self._document))
other = other if len(other) > 1 else other[0]
values = list(_t(other))
return Filter({self._name: {operation: values}}) | ['def', '_iop', '(', 'self', ',', 'operation', ',', 'other', ',', '*', 'allowed', ')', ':', 'f', '=', 'self', '.', '_field', 'if', 'self', '.', '_combining', ':', '# We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).', 'return', 'reduce', '(', 'self', '.', '_combining', ',', '(', 'q', '.', '_iop', '(', 'operation', ',', 'other', ',', '*', 'allowed', ')', 'for', 'q', 'in', 'f', ')', ')', '# pylint:disable=protected-access', '# Optimize this away in production; diagnosic aide.', 'if', '__debug__', 'and', '_complex_safety_check', '(', 'f', ',', '{', 'operation', '}', '|', 'set', '(', 'allowed', ')', ')', ':', '# pragma: no cover', 'raise', 'NotImplementedError', '(', '"{self!r} does not allow {op} comparison."', '.', 'format', '(', 'self', '=', 'self', ',', 'op', '=', 'operation', ')', ')', 'def', '_t', '(', 'o', ')', ':', 'for', 'value', 'in', 'o', ':', 'yield', 'None', 'if', 'value', 'is', 'None', 'else', 'f', '.', 'transformer', '.', 'foreign', '(', 'value', ',', '(', 'f', ',', 'self', '.', '_document', ')', ')', 'other', '=', 'other', 'if', 'len', '(', 'other', ')', '>', '1', 'else', 'other', '[', '0', ']', 'values', '=', 'list', '(', '_t', '(', 'other', ')', ')', 'return', 'Filter', '(', '{', 'self', '.', '_name', ':', '{', 'operation', ':', 'values', '}', '}', ')'] | An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution. | ['An', 'iterative', 'operation', 'operating', 'on', 'multiple', 'values', '.', 'Consumes', 'iterators', 'to', 'construct', 'a', 'concrete', 'list', 'at', 'time', 'of', 'execution', '.'] | train | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L172-L196 |
4,240 | thiezn/iperf3-python | iperf3/iperf3.py | Client.protocol | def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol | python | def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol | ['def', 'protocol', '(', 'self', ')', ':', 'proto_id', '=', 'self', '.', 'lib', '.', 'iperf_get_test_protocol_id', '(', 'self', '.', '_test', ')', 'if', 'proto_id', '==', 'SOCK_STREAM', ':', 'self', '.', '_protocol', '=', "'tcp'", 'elif', 'proto_id', '==', 'SOCK_DGRAM', ':', 'self', '.', '_protocol', '=', "'udp'", 'return', 'self', '.', '_protocol'] | The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str | ['The', 'iperf3', 'instance', 'protocol'] | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L457-L471 |
4,241 | talkincode/txradius | txradius/mschap/des_c.py | c2ln | def c2ln(c,l1,l2,n):
"char[n] to two unsigned long???"
c = c + n
l1, l2 = U32(0), U32(0)
f = 0
if n == 8:
l2 = l2 | (U32(c[7]) << 24)
f = 1
if f or (n == 7):
l2 = l2 | (U32(c[6]) << 16)
f = 1
if f or (n == 6):
l2 = l2 | (U32(c[5]) << 8)
f = 1
if f or (n == 5):
l2 = l2 | U32(c[4])
f = 1
if f or (n == 4):
l1 = l1 | (U32(c[3]) << 24)
f = 1
if f or (n == 3):
l1 = l1 | (U32(c[2]) << 16)
f = 1
if f or (n == 2):
l1 = l1 | (U32(c[1]) << 8)
f = 1
if f or (n == 1):
l1 = l1 | U32(c[0])
return (l1, l2) | python | def c2ln(c,l1,l2,n):
"char[n] to two unsigned long???"
c = c + n
l1, l2 = U32(0), U32(0)
f = 0
if n == 8:
l2 = l2 | (U32(c[7]) << 24)
f = 1
if f or (n == 7):
l2 = l2 | (U32(c[6]) << 16)
f = 1
if f or (n == 6):
l2 = l2 | (U32(c[5]) << 8)
f = 1
if f or (n == 5):
l2 = l2 | U32(c[4])
f = 1
if f or (n == 4):
l1 = l1 | (U32(c[3]) << 24)
f = 1
if f or (n == 3):
l1 = l1 | (U32(c[2]) << 16)
f = 1
if f or (n == 2):
l1 = l1 | (U32(c[1]) << 8)
f = 1
if f or (n == 1):
l1 = l1 | U32(c[0])
return (l1, l2) | ['def', 'c2ln', '(', 'c', ',', 'l1', ',', 'l2', ',', 'n', ')', ':', 'c', '=', 'c', '+', 'n', 'l1', ',', 'l2', '=', 'U32', '(', '0', ')', ',', 'U32', '(', '0', ')', 'f', '=', '0', 'if', 'n', '==', '8', ':', 'l2', '=', 'l2', '|', '(', 'U32', '(', 'c', '[', '7', ']', ')', '<<', '24', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '7', ')', ':', 'l2', '=', 'l2', '|', '(', 'U32', '(', 'c', '[', '6', ']', ')', '<<', '16', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '6', ')', ':', 'l2', '=', 'l2', '|', '(', 'U32', '(', 'c', '[', '5', ']', ')', '<<', '8', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '5', ')', ':', 'l2', '=', 'l2', '|', 'U32', '(', 'c', '[', '4', ']', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '4', ')', ':', 'l1', '=', 'l1', '|', '(', 'U32', '(', 'c', '[', '3', ']', ')', '<<', '24', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '3', ')', ':', 'l1', '=', 'l1', '|', '(', 'U32', '(', 'c', '[', '2', ']', ')', '<<', '16', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '2', ')', ':', 'l1', '=', 'l1', '|', '(', 'U32', '(', 'c', '[', '1', ']', ')', '<<', '8', ')', 'f', '=', '1', 'if', 'f', 'or', '(', 'n', '==', '1', ')', ':', 'l1', '=', 'l1', '|', 'U32', '(', 'c', '[', '0', ']', ')', 'return', '(', 'l1', ',', 'l2', ')'] | char[n] to two unsigned long??? | ['char', '[', 'n', ']', 'to', 'two', 'unsigned', 'long???'] | train | https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/mschap/des_c.py#L35-L64 |
4,242 | arne-cl/discoursegraphs | src/discoursegraphs/readwrite/decour.py | DecourDocumentGraph._add_dominance_relation | def _add_dominance_relation(self, source, target):
"""add a dominance relation to this docgraph"""
# TODO: fix #39, so we don't need to add nodes by hand
self.add_node(target, layers={self.ns, self.ns+':unit'})
self.add_edge(source, target,
layers={self.ns, self.ns+':discourse'},
edge_type=EdgeTypes.dominance_relation) | python | def _add_dominance_relation(self, source, target):
"""add a dominance relation to this docgraph"""
# TODO: fix #39, so we don't need to add nodes by hand
self.add_node(target, layers={self.ns, self.ns+':unit'})
self.add_edge(source, target,
layers={self.ns, self.ns+':discourse'},
edge_type=EdgeTypes.dominance_relation) | ['def', '_add_dominance_relation', '(', 'self', ',', 'source', ',', 'target', ')', ':', "# TODO: fix #39, so we don't need to add nodes by hand", 'self', '.', 'add_node', '(', 'target', ',', 'layers', '=', '{', 'self', '.', 'ns', ',', 'self', '.', 'ns', '+', "':unit'", '}', ')', 'self', '.', 'add_edge', '(', 'source', ',', 'target', ',', 'layers', '=', '{', 'self', '.', 'ns', ',', 'self', '.', 'ns', '+', "':discourse'", '}', ',', 'edge_type', '=', 'EdgeTypes', '.', 'dominance_relation', ')'] | add a dominance relation to this docgraph | ['add', 'a', 'dominance', 'relation', 'to', 'this', 'docgraph'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/decour.py#L143-L149 |
4,243 | icgood/pymap | pymap/parsing/command/select.py | IdleCommand.parse_done | def parse_done(self, buf: memoryview) -> Tuple[bool, memoryview]:
"""Parse the continuation line sent by the client to end the ``IDLE``
command.
Args:
buf: The continuation line to parse.
"""
match = self._pattern.match(buf)
if not match:
raise NotParseable(buf)
done = match.group(1).upper() == self.continuation
buf = buf[match.end(0):]
return done, buf | python | def parse_done(self, buf: memoryview) -> Tuple[bool, memoryview]:
"""Parse the continuation line sent by the client to end the ``IDLE``
command.
Args:
buf: The continuation line to parse.
"""
match = self._pattern.match(buf)
if not match:
raise NotParseable(buf)
done = match.group(1).upper() == self.continuation
buf = buf[match.end(0):]
return done, buf | ['def', 'parse_done', '(', 'self', ',', 'buf', ':', 'memoryview', ')', '->', 'Tuple', '[', 'bool', ',', 'memoryview', ']', ':', 'match', '=', 'self', '.', '_pattern', '.', 'match', '(', 'buf', ')', 'if', 'not', 'match', ':', 'raise', 'NotParseable', '(', 'buf', ')', 'done', '=', 'match', '.', 'group', '(', '1', ')', '.', 'upper', '(', ')', '==', 'self', '.', 'continuation', 'buf', '=', 'buf', '[', 'match', '.', 'end', '(', '0', ')', ':', ']', 'return', 'done', ',', 'buf'] | Parse the continuation line sent by the client to end the ``IDLE``
command.
Args:
buf: The continuation line to parse. | ['Parse', 'the', 'continuation', 'line', 'sent', 'by', 'the', 'client', 'to', 'end', 'the', 'IDLE', 'command', '.'] | train | https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/parsing/command/select.py#L485-L498 |
4,244 | ejeschke/ginga | ginga/ImageView.py | ImageViewBase.auto_levels_cb | def auto_levels_cb(self, setting, value):
"""Handle callback related to changes in auto-cut levels."""
# Did we change the method?
method = self.t_['autocut_method']
params = self.t_.get('autocut_params', [])
params = dict(params)
if method != str(self.autocuts):
ac_class = AutoCuts.get_autocuts(method)
self.autocuts = ac_class(self.logger, **params)
else:
self.autocuts.update_params(**params)
# Redo the auto levels
#if self.t_['autocuts'] != 'off':
# NOTE: users seems to expect that when the auto cuts parameters
# are changed that the cuts should be immediately recalculated
self.auto_levels() | python | def auto_levels_cb(self, setting, value):
"""Handle callback related to changes in auto-cut levels."""
# Did we change the method?
method = self.t_['autocut_method']
params = self.t_.get('autocut_params', [])
params = dict(params)
if method != str(self.autocuts):
ac_class = AutoCuts.get_autocuts(method)
self.autocuts = ac_class(self.logger, **params)
else:
self.autocuts.update_params(**params)
# Redo the auto levels
#if self.t_['autocuts'] != 'off':
# NOTE: users seems to expect that when the auto cuts parameters
# are changed that the cuts should be immediately recalculated
self.auto_levels() | ['def', 'auto_levels_cb', '(', 'self', ',', 'setting', ',', 'value', ')', ':', '# Did we change the method?', 'method', '=', 'self', '.', 't_', '[', "'autocut_method'", ']', 'params', '=', 'self', '.', 't_', '.', 'get', '(', "'autocut_params'", ',', '[', ']', ')', 'params', '=', 'dict', '(', 'params', ')', 'if', 'method', '!=', 'str', '(', 'self', '.', 'autocuts', ')', ':', 'ac_class', '=', 'AutoCuts', '.', 'get_autocuts', '(', 'method', ')', 'self', '.', 'autocuts', '=', 'ac_class', '(', 'self', '.', 'logger', ',', '*', '*', 'params', ')', 'else', ':', 'self', '.', 'autocuts', '.', 'update_params', '(', '*', '*', 'params', ')', '# Redo the auto levels', "#if self.t_['autocuts'] != 'off':", '# NOTE: users seems to expect that when the auto cuts parameters', '# are changed that the cuts should be immediately recalculated', 'self', '.', 'auto_levels', '(', ')'] | Handle callback related to changes in auto-cut levels. | ['Handle', 'callback', 'related', 'to', 'changes', 'in', 'auto', '-', 'cut', 'levels', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L2655-L2672 |
4,245 | joeyespo/gitpress | gitpress/plugins.py | add_plugin | def add_plugin(plugin, directory=None):
"""Adds the specified plugin. This returns False if it was already added."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True | python | def add_plugin(plugin, directory=None):
"""Adds the specified plugin. This returns False if it was already added."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True | ['def', 'add_plugin', '(', 'plugin', ',', 'directory', '=', 'None', ')', ':', 'repo', '=', 'require_repo', '(', 'directory', ')', 'plugins', '=', 'get_value', '(', 'repo', ',', "'plugins'", ',', 'expect_type', '=', 'dict', ')', 'if', 'plugin', 'in', 'plugins', ':', 'return', 'False', 'plugins', '[', 'plugin', ']', '=', '{', '}', 'set_value', '(', 'repo', ',', "'plugins'", ',', 'plugins', ')', 'return', 'True'] | Adds the specified plugin. This returns False if it was already added. | ['Adds', 'the', 'specified', 'plugin', '.', 'This', 'returns', 'False', 'if', 'it', 'was', 'already', 'added', '.'] | train | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/plugins.py#L14-L23 |
4,246 | lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.add_embed_field | def add_embed_field(self, **kwargs):
"""
set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline
"""
self.fields.append({
'name': kwargs.get('name'),
'value': kwargs.get('value'),
'inline': kwargs.get('inline', True)
}) | python | def add_embed_field(self, **kwargs):
"""
set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline
"""
self.fields.append({
'name': kwargs.get('name'),
'value': kwargs.get('value'),
'inline': kwargs.get('inline', True)
}) | ['def', 'add_embed_field', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'fields', '.', 'append', '(', '{', "'name'", ':', 'kwargs', '.', 'get', '(', "'name'", ')', ',', "'value'", ':', 'kwargs', '.', 'get', '(', "'value'", ')', ',', "'inline'", ':', 'kwargs', '.', 'get', '(', "'inline'", ',', 'True', ')', '}', ')'] | set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline | ['set', 'field', 'of', 'embed', ':', 'keyword', 'name', ':', 'name', 'of', 'the', 'field', ':', 'keyword', 'value', ':', 'value', 'of', 'the', 'field', ':', 'keyword', 'inline', ':', '(', 'optional', ')', 'whether', 'or', 'not', 'this', 'field', 'should', 'display', 'inline'] | train | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L259-L270 |
4,247 | StackStorm/pybind | pybind/slxos/v17r_1_01a/mpls_config/router/mpls/mpls_cmds_holder/autobw_template/__init__.py | autobw_template._set_adjustment_threshold | def _set_adjustment_threshold(self, v, load=False):
"""
Setter method for adjustment_threshold, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/autobw_template/adjustment_threshold (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_adjustment_threshold is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adjustment_threshold() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adjustment_threshold must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__adjustment_threshold = t
if hasattr(self, '_set'):
self._set() | python | def _set_adjustment_threshold(self, v, load=False):
"""
Setter method for adjustment_threshold, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/autobw_template/adjustment_threshold (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_adjustment_threshold is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adjustment_threshold() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adjustment_threshold must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__adjustment_threshold = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_adjustment_threshold', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'adjustment_threshold', '.', 'adjustment_threshold', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"adjustment-threshold"', ',', 'rest_name', '=', '"adjustment-threshold"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Set adjustment-threshold'", ',', "u'cli-full-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-mpls'", ',', 'defining_module', '=', "'brocade-mpls'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""adjustment_threshold must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=adjustment_threshold.adjustment_threshold, is_container=\'container\', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Set adjustment-threshold\', u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__adjustment_threshold', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for adjustment_threshold, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/autobw_template/adjustment_threshold (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_adjustment_threshold is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adjustment_threshold() directly. | ['Setter', 'method', 'for', 'adjustment_threshold', 'mapped', 'from', 'YANG', 'variable', '/', 'mpls_config', '/', 'router', '/', 'mpls', '/', 'mpls_cmds_holder', '/', 'autobw_template', '/', 'adjustment_threshold', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_adjustment_threshold', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_adjustment_threshold', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_config/router/mpls/mpls_cmds_holder/autobw_template/__init__.py#L171-L192 |
4,248 | SiLab-Bonn/pyBAR | pybar/fei4_run_base.py | Fei4RunBase.exit_sync | def exit_sync(self):
''' Waiting for all threads to appear, then continue.
'''
if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]:
raise RuntimeError('Thread name "%s" is not valid.')
if self._scan_threads and self.current_module_handle not in self._curr_sync_threads:
raise RuntimeError('Thread "%s" is not reading FIFO.')
with self._sync_lock:
self._curr_sync_threads.remove(self.current_module_handle)
self._exit_sync_event.clear()
while not self._exit_sync_event.wait(0.01):
if self.abort_run.is_set():
break
with self._sync_lock:
if len(set(self._curr_sync_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == 0 or not self._scan_threads:
self._exit_sync_event.set() | python | def exit_sync(self):
''' Waiting for all threads to appear, then continue.
'''
if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]:
raise RuntimeError('Thread name "%s" is not valid.')
if self._scan_threads and self.current_module_handle not in self._curr_sync_threads:
raise RuntimeError('Thread "%s" is not reading FIFO.')
with self._sync_lock:
self._curr_sync_threads.remove(self.current_module_handle)
self._exit_sync_event.clear()
while not self._exit_sync_event.wait(0.01):
if self.abort_run.is_set():
break
with self._sync_lock:
if len(set(self._curr_sync_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == 0 or not self._scan_threads:
self._exit_sync_event.set() | ['def', 'exit_sync', '(', 'self', ')', ':', 'if', 'self', '.', '_scan_threads', 'and', 'self', '.', 'current_module_handle', 'not', 'in', '[', 't', '.', 'name', 'for', 't', 'in', 'self', '.', '_scan_threads', ']', ':', 'raise', 'RuntimeError', '(', '\'Thread name "%s" is not valid.\'', ')', 'if', 'self', '.', '_scan_threads', 'and', 'self', '.', 'current_module_handle', 'not', 'in', 'self', '.', '_curr_sync_threads', ':', 'raise', 'RuntimeError', '(', '\'Thread "%s" is not reading FIFO.\'', ')', 'with', 'self', '.', '_sync_lock', ':', 'self', '.', '_curr_sync_threads', '.', 'remove', '(', 'self', '.', 'current_module_handle', ')', 'self', '.', '_exit_sync_event', '.', 'clear', '(', ')', 'while', 'not', 'self', '.', '_exit_sync_event', '.', 'wait', '(', '0.01', ')', ':', 'if', 'self', '.', 'abort_run', '.', 'is_set', '(', ')', ':', 'break', 'with', 'self', '.', '_sync_lock', ':', 'if', 'len', '(', 'set', '(', 'self', '.', '_curr_sync_threads', ')', '&', 'set', '(', '[', 't', '.', 'name', 'for', 't', 'in', 'self', '.', '_scan_threads', 'if', 't', '.', 'is_alive', '(', ')', ']', ')', ')', '==', '0', 'or', 'not', 'self', '.', '_scan_threads', ':', 'self', '.', '_exit_sync_event', '.', 'set', '(', ')'] | Waiting for all threads to appear, then continue. | ['Waiting', 'for', 'all', 'threads', 'to', 'appear', 'then', 'continue', '.'] | train | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4_run_base.py#L1186-L1201 |
4,249 | booktype/python-ooxml | ooxml/doc.py | StylesCollection.get_by_name | def get_by_name(self, name, style_type = None):
"""Find style by it's descriptive name.
:Returns:
Returns found style of type :class:`ooxml.doc.Style`.
"""
for st in self.styles.values():
if st:
if st.name == name:
return st
if style_type and not st:
st = self.styles.get(self.default_styles[style_type], None)
return st | python | def get_by_name(self, name, style_type = None):
"""Find style by it's descriptive name.
:Returns:
Returns found style of type :class:`ooxml.doc.Style`.
"""
for st in self.styles.values():
if st:
if st.name == name:
return st
if style_type and not st:
st = self.styles.get(self.default_styles[style_type], None)
return st | ['def', 'get_by_name', '(', 'self', ',', 'name', ',', 'style_type', '=', 'None', ')', ':', 'for', 'st', 'in', 'self', '.', 'styles', '.', 'values', '(', ')', ':', 'if', 'st', ':', 'if', 'st', '.', 'name', '==', 'name', ':', 'return', 'st', 'if', 'style_type', 'and', 'not', 'st', ':', 'st', '=', 'self', '.', 'styles', '.', 'get', '(', 'self', '.', 'default_styles', '[', 'style_type', ']', ',', 'None', ')', 'return', 'st'] | Find style by it's descriptive name.
:Returns:
Returns found style of type :class:`ooxml.doc.Style`. | ['Find', 'style', 'by', 'it', 's', 'descriptive', 'name', '.'] | train | https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/doc.py#L55-L68 |
4,250 | UCL-INGI/INGInious | inginious/frontend/pages/api/_api_page.py | APIPage.POST | def POST(self, *args, **kwargs):
""" POST request """
return self._handle_api(self.API_POST, args, kwargs) | python | def POST(self, *args, **kwargs):
""" POST request """
return self._handle_api(self.API_POST, args, kwargs) | ['def', 'POST', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_handle_api', '(', 'self', '.', 'API_POST', ',', 'args', ',', 'kwargs', ')'] | POST request | ['POST', 'request'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/_api_page.py#L27-L29 |
4,251 | mezz64/pyEmby | pyemby/server.py | EmbyServer.start | def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.") | python | def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.") | ['def', 'start', '(', 'self', ')', ':', 'asyncio', '.', 'ensure_future', '(', 'self', '.', 'register', '(', ')', ',', 'loop', '=', 'self', '.', '_event_loop', ')', 'if', 'self', '.', '_own_loop', ':', '_LOGGER', '.', 'info', '(', '"Starting up our own event loop."', ')', 'self', '.', '_event_loop', '.', 'run_forever', '(', ')', 'self', '.', '_event_loop', '.', 'close', '(', ')', '_LOGGER', '.', 'info', '(', '"Connection shut down."', ')'] | Public method for initiating connectivity with the emby server. | ['Public', 'method', 'for', 'initiating', 'connectivity', 'with', 'the', 'emby', 'server', '.'] | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L156-L164 |
4,252 | clalancette/pycdlib | pycdlib/udf.py | NSRVolumeStructure.parse | def parse(self, data, extent):
# type: (bytes, int) -> None
'''
Parse the passed in data into a UDF NSR Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure already initialized')
(structure_type, self.standard_ident, structure_version,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if self.standard_ident not in [b'NSR02', b'NSR03']:
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
self.orig_extent_loc = extent
self._initialized = True | python | def parse(self, data, extent):
# type: (bytes, int) -> None
'''
Parse the passed in data into a UDF NSR Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure already initialized')
(structure_type, self.standard_ident, structure_version,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if self.standard_ident not in [b'NSR02', b'NSR03']:
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
self.orig_extent_loc = extent
self._initialized = True | ['def', 'parse', '(', 'self', ',', 'data', ',', 'extent', ')', ':', '# type: (bytes, int) -> None', 'if', 'self', '.', '_initialized', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInternalError', '(', "'UDF NSR Volume Structure already initialized'", ')', '(', 'structure_type', ',', 'self', '.', 'standard_ident', ',', 'structure_version', ',', 'reserved_unused', ')', '=', 'struct', '.', 'unpack_from', '(', 'self', '.', 'FMT', ',', 'data', ',', '0', ')', 'if', 'structure_type', '!=', '0', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid structure type'", ')', 'if', 'self', '.', 'standard_ident', 'not', 'in', '[', "b'NSR02'", ',', "b'NSR03'", ']', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid standard identifier'", ')', 'if', 'structure_version', '!=', '1', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid structure version'", ')', 'self', '.', 'orig_extent_loc', '=', 'extent', 'self', '.', '_initialized', '=', 'True'] | Parse the passed in data into a UDF NSR Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing. | ['Parse', 'the', 'passed', 'in', 'data', 'into', 'a', 'UDF', 'NSR', 'Volume', 'Structure', '.'] | train | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L282-L310 |
4,253 | moble/quaternion | calculus.py | derivative | def derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt | python | def derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt | ['def', 'derivative', '(', 'f', ',', 't', ')', ':', 'dfdt', '=', 'np', '.', 'empty_like', '(', 'f', ')', 'if', '(', 'f', '.', 'ndim', '==', '1', ')', ':', '_derivative', '(', 'f', ',', 't', ',', 'dfdt', ')', 'elif', '(', 'f', '.', 'ndim', '==', '2', ')', ':', '_derivative_2d', '(', 'f', ',', 't', ',', 'dfdt', ')', 'elif', '(', 'f', '.', 'ndim', '==', '3', ')', ':', '_derivative_3d', '(', 'f', ',', 't', ',', 'dfdt', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '"Taking derivatives of {0}-dimensional arrays is not yet implemented"', '.', 'format', '(', 'f', '.', 'ndim', ')', ')', 'return', 'dfdt'] | Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas. | ['Fourth', '-', 'order', 'finite', '-', 'differencing', 'with', 'non', '-', 'uniform', 'time', 'steps'] | train | https://github.com/moble/quaternion/blob/7a323e81b391d6892e2874073e495e0beb057e85/calculus.py#L9-L28 |
4,254 | websocket-client/websocket-client | websocket/_app.py | WebSocketApp.send | def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.") | python | def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.") | ['def', 'send', '(', 'self', ',', 'data', ',', 'opcode', '=', 'ABNF', '.', 'OPCODE_TEXT', ')', ':', 'if', 'not', 'self', '.', 'sock', 'or', 'self', '.', 'sock', '.', 'send', '(', 'data', ',', 'opcode', ')', '==', '0', ':', 'raise', 'WebSocketConnectionClosedException', '(', '"Connection is already closed."', ')'] | send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT. | ['send', 'message', '.', 'data', ':', 'message', 'to', 'send', '.', 'If', 'you', 'set', 'opcode', 'to', 'OPCODE_TEXT', 'data', 'must', 'be', 'utf', '-', '8', 'string', 'or', 'unicode', '.', 'opcode', ':', 'operation', 'code', 'of', 'data', '.', 'default', 'is', 'OPCODE_TEXT', '.'] | train | https://github.com/websocket-client/websocket-client/blob/3c25814664fef5b78716ed8841123ed1c0d17824/websocket/_app.py#L145-L155 |
4,255 | watson-developer-cloud/python-sdk | ibm_watson/speech_to_text_v1.py | Word._from_dict | def _from_dict(cls, _dict):
"""Initialize a Word object from a json dictionary."""
args = {}
if 'word' in _dict:
args['word'] = _dict.get('word')
else:
raise ValueError(
'Required property \'word\' not present in Word JSON')
if 'sounds_like' in _dict:
args['sounds_like'] = _dict.get('sounds_like')
else:
raise ValueError(
'Required property \'sounds_like\' not present in Word JSON')
if 'display_as' in _dict:
args['display_as'] = _dict.get('display_as')
else:
raise ValueError(
'Required property \'display_as\' not present in Word JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError(
'Required property \'count\' not present in Word JSON')
if 'source' in _dict:
args['source'] = _dict.get('source')
else:
raise ValueError(
'Required property \'source\' not present in Word JSON')
if 'error' in _dict:
args['error'] = [
WordError._from_dict(x) for x in (_dict.get('error'))
]
return cls(**args) | python | def _from_dict(cls, _dict):
"""Initialize a Word object from a json dictionary."""
args = {}
if 'word' in _dict:
args['word'] = _dict.get('word')
else:
raise ValueError(
'Required property \'word\' not present in Word JSON')
if 'sounds_like' in _dict:
args['sounds_like'] = _dict.get('sounds_like')
else:
raise ValueError(
'Required property \'sounds_like\' not present in Word JSON')
if 'display_as' in _dict:
args['display_as'] = _dict.get('display_as')
else:
raise ValueError(
'Required property \'display_as\' not present in Word JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError(
'Required property \'count\' not present in Word JSON')
if 'source' in _dict:
args['source'] = _dict.get('source')
else:
raise ValueError(
'Required property \'source\' not present in Word JSON')
if 'error' in _dict:
args['error'] = [
WordError._from_dict(x) for x in (_dict.get('error'))
]
return cls(**args) | ['def', '_from_dict', '(', 'cls', ',', '_dict', ')', ':', 'args', '=', '{', '}', 'if', "'word'", 'in', '_dict', ':', 'args', '[', "'word'", ']', '=', '_dict', '.', 'get', '(', "'word'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'word\\' not present in Word JSON'", ')', 'if', "'sounds_like'", 'in', '_dict', ':', 'args', '[', "'sounds_like'", ']', '=', '_dict', '.', 'get', '(', "'sounds_like'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'sounds_like\\' not present in Word JSON'", ')', 'if', "'display_as'", 'in', '_dict', ':', 'args', '[', "'display_as'", ']', '=', '_dict', '.', 'get', '(', "'display_as'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'display_as\\' not present in Word JSON'", ')', 'if', "'count'", 'in', '_dict', ':', 'args', '[', "'count'", ']', '=', '_dict', '.', 'get', '(', "'count'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'count\\' not present in Word JSON'", ')', 'if', "'source'", 'in', '_dict', ':', 'args', '[', "'source'", ']', '=', '_dict', '.', 'get', '(', "'source'", ')', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'source\\' not present in Word JSON'", ')', 'if', "'error'", 'in', '_dict', ':', 'args', '[', "'error'", ']', '=', '[', 'WordError', '.', '_from_dict', '(', 'x', ')', 'for', 'x', 'in', '(', '_dict', '.', 'get', '(', "'error'", ')', ')', ']', 'return', 'cls', '(', '*', '*', 'args', ')'] | Initialize a Word object from a json dictionary. | ['Initialize', 'a', 'Word', 'object', 'from', 'a', 'json', 'dictionary', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L5261-L5293 |
4,256 | saltstack/salt | salt/cloud/clouds/opennebula.py | vn_info | def vn_info(call=None, kwargs=None):
'''
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_info function requires either a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info | python | def vn_info(call=None, kwargs=None):
'''
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_info function requires either a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info | ['def', 'vn_info', '(', 'call', '=', 'None', ',', 'kwargs', '=', 'None', ')', ':', 'if', 'call', '!=', "'function'", ':', 'raise', 'SaltCloudSystemExit', '(', "'The vn_info function must be called with -f or --function.'", ')', 'if', 'kwargs', 'is', 'None', ':', 'kwargs', '=', '{', '}', 'name', '=', 'kwargs', '.', 'get', '(', "'name'", ',', 'None', ')', 'vn_id', '=', 'kwargs', '.', 'get', '(', "'vn_id'", ',', 'None', ')', 'if', 'vn_id', ':', 'if', 'name', ':', 'log', '.', 'warning', '(', "'Both the \\'vn_id\\' and \\'name\\' arguments were provided. '", "'\\'vn_id\\' will take precedence.'", ')', 'elif', 'name', ':', 'vn_id', '=', 'get_vn_id', '(', 'kwargs', '=', '{', "'name'", ':', 'name', '}', ')', 'else', ':', 'raise', 'SaltCloudSystemExit', '(', "'The vn_info function requires either a \\'name\\' or a \\'vn_id\\' '", "'to be provided.'", ')', 'server', ',', 'user', ',', 'password', '=', '_get_xml_rpc', '(', ')', 'auth', '=', "':'", '.', 'join', '(', '[', 'user', ',', 'password', ']', ')', 'response', '=', 'server', '.', 'one', '.', 'vn', '.', 'info', '(', 'auth', ',', 'int', '(', 'vn_id', ')', ')', 'if', 'response', '[', '0', ']', 'is', 'False', ':', 'return', 'response', '[', '1', ']', 'else', ':', 'info', '=', '{', '}', 'tree', '=', '_get_xml', '(', 'response', '[', '1', ']', ')', 'info', '[', 'tree', '.', 'find', '(', "'NAME'", ')', '.', 'text', ']', '=', '_xml_to_dict', '(', 'tree', ')', 'return', 'info'] | Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public | ['Retrieves', 'information', 'for', 'the', 'virtual', 'network', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L4242-L4298 |
4,257 | acutesoftware/AIKIF | aikif/lib/cls_filelist.py | FileList.save_filelist | def save_filelist(self, opFile, opFormat, delim=',', qu='"'):
"""
uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat.
"""
op_folder = os.path.dirname(opFile)
if op_folder is not None: # short filename passed
if not os.path.exists(op_folder):
os.makedirs(op_folder)
with open(opFile,'w') as fout:
fout.write("fullFilename" + delim)
for colHeading in opFormat:
fout.write(colHeading + delim)
fout.write('\n')
for f in self.filelist:
line = qu + f + qu + delim
try:
for fld in opFormat:
if fld == "name":
line = line + qu + os.path.basename(f) + qu + delim
if fld == "date":
line = line + qu + self.GetDateAsString(f) + qu + delim
if fld == "size":
line = line + qu + str(os.path.getsize(f)) + qu + delim
if fld == "path":
line = line + qu + os.path.dirname(f) + qu + delim
except IOError:
line += '\n' # no metadata
try:
fout.write (str(line.encode('ascii', 'ignore').decode('utf-8')))
fout.write ('\n')
except IOError:
#print("Cant print line - cls_filelist line 304")
pass | python | def save_filelist(self, opFile, opFormat, delim=',', qu='"'):
"""
uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat.
"""
op_folder = os.path.dirname(opFile)
if op_folder is not None: # short filename passed
if not os.path.exists(op_folder):
os.makedirs(op_folder)
with open(opFile,'w') as fout:
fout.write("fullFilename" + delim)
for colHeading in opFormat:
fout.write(colHeading + delim)
fout.write('\n')
for f in self.filelist:
line = qu + f + qu + delim
try:
for fld in opFormat:
if fld == "name":
line = line + qu + os.path.basename(f) + qu + delim
if fld == "date":
line = line + qu + self.GetDateAsString(f) + qu + delim
if fld == "size":
line = line + qu + str(os.path.getsize(f)) + qu + delim
if fld == "path":
line = line + qu + os.path.dirname(f) + qu + delim
except IOError:
line += '\n' # no metadata
try:
fout.write (str(line.encode('ascii', 'ignore').decode('utf-8')))
fout.write ('\n')
except IOError:
#print("Cant print line - cls_filelist line 304")
pass | ['def', 'save_filelist', '(', 'self', ',', 'opFile', ',', 'opFormat', ',', 'delim', '=', "','", ',', 'qu', '=', '\'"\'', ')', ':', 'op_folder', '=', 'os', '.', 'path', '.', 'dirname', '(', 'opFile', ')', 'if', 'op_folder', 'is', 'not', 'None', ':', '# short filename passed\r', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'op_folder', ')', ':', 'os', '.', 'makedirs', '(', 'op_folder', ')', 'with', 'open', '(', 'opFile', ',', "'w'", ')', 'as', 'fout', ':', 'fout', '.', 'write', '(', '"fullFilename"', '+', 'delim', ')', 'for', 'colHeading', 'in', 'opFormat', ':', 'fout', '.', 'write', '(', 'colHeading', '+', 'delim', ')', 'fout', '.', 'write', '(', "'\\n'", ')', 'for', 'f', 'in', 'self', '.', 'filelist', ':', 'line', '=', 'qu', '+', 'f', '+', 'qu', '+', 'delim', 'try', ':', 'for', 'fld', 'in', 'opFormat', ':', 'if', 'fld', '==', '"name"', ':', 'line', '=', 'line', '+', 'qu', '+', 'os', '.', 'path', '.', 'basename', '(', 'f', ')', '+', 'qu', '+', 'delim', 'if', 'fld', '==', '"date"', ':', 'line', '=', 'line', '+', 'qu', '+', 'self', '.', 'GetDateAsString', '(', 'f', ')', '+', 'qu', '+', 'delim', 'if', 'fld', '==', '"size"', ':', 'line', '=', 'line', '+', 'qu', '+', 'str', '(', 'os', '.', 'path', '.', 'getsize', '(', 'f', ')', ')', '+', 'qu', '+', 'delim', 'if', 'fld', '==', '"path"', ':', 'line', '=', 'line', '+', 'qu', '+', 'os', '.', 'path', '.', 'dirname', '(', 'f', ')', '+', 'qu', '+', 'delim', 'except', 'IOError', ':', 'line', '+=', "'\\n'", '# no metadata\r', 'try', ':', 'fout', '.', 'write', '(', 'str', '(', 'line', '.', 'encode', '(', "'ascii'", ',', "'ignore'", ')', '.', 'decode', '(', "'utf-8'", ')', ')', ')', 'fout', '.', 'write', '(', "'\\n'", ')', 'except', 'IOError', ':', '#print("Cant print line - cls_filelist line 304")\r', 'pass'] | uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat. | ['uses', 'a', 'List', 'of', 'files', 'and', 'collects', 'meta', 'data', 'on', 'them', 'and', 'saves', 'to', 'an', 'text', 'file', 'as', 'a', 'list', 'or', 'with', 'metadata', 'depending', 'on', 'opFormat', '.'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_filelist.py#L192-L228 |
4,258 | cloud-custodian/cloud-custodian | c7n/policy.py | PolicyCollection.resource_types | def resource_types(self):
"""resource types used by the collection."""
rtypes = set()
for p in self.policies:
rtypes.add(p.resource_type)
return rtypes | python | def resource_types(self):
"""resource types used by the collection."""
rtypes = set()
for p in self.policies:
rtypes.add(p.resource_type)
return rtypes | ['def', 'resource_types', '(', 'self', ')', ':', 'rtypes', '=', 'set', '(', ')', 'for', 'p', 'in', 'self', '.', 'policies', ':', 'rtypes', '.', 'add', '(', 'p', '.', 'resource_type', ')', 'return', 'rtypes'] | resource types used by the collection. | ['resource', 'types', 'used', 'by', 'the', 'collection', '.'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/policy.py#L118-L123 |
4,259 | readbeyond/aeneas | aeneas/extra/ctw_speect.py | CustomTTSWrapper._synthesize_single_python_helper | def _synthesize_single_python_helper(
self,
text,
voice_code,
output_file_path=None,
return_audio_data=True
):
"""
This is an helper function to synthesize a single text fragment via a Python call.
If ``output_file_path`` is ``None``,
the audio data will not persist to file at the end of the method.
:rtype: tuple (result, (duration, sample_rate, encoding, data))
"""
# return zero if text is the empty string
if len(text) == 0:
#
# NOTE values of sample_rate, encoding, data
# do not matter if the duration is 0.000,
# so set them to None instead of the more precise:
# return (True, (TimeValue("0.000"), 16000, "pcm_s16le", numpy.array([])))
#
self.log(u"len(text) is zero: returning 0.000")
return (True, (TimeValue("0.000"), None, None, None))
#
# NOTE in this example, we assume that the Speect voice data files
# are located in the same directory of this .py source file
# and that the voice JSON file is called "voice.json"
#
# NOTE the voice_code value is ignored in this example,
# since we have only one TTS voice,
# but in general one might select a voice file to load,
# depending on voice_code;
# in fact, we could have created the ``voice`` object
# only once, in the constructor, instead of creating it
# each time this function is invoked,
# achieving slightly faster synthesis
#
voice_json_path = gf.safe_str(gf.absolute_path("voice.json", __file__))
voice = speect.SVoice(voice_json_path)
utt = voice.synth(text)
audio = utt.features["audio"]
if output_file_path is None:
self.log(u"output_file_path is None => not saving to file")
else:
self.log(u"output_file_path is not None => saving to file...")
# NOTE apparently, save_riff needs the path to be a byte string
audio.save_riff(gf.safe_str(output_file_path))
self.log(u"output_file_path is not None => saving to file... done")
# return immediately if returning audio data is not needed
if not return_audio_data:
self.log(u"return_audio_data is True => return immediately")
return (True, None)
# get length and data using speect Python API
self.log(u"return_audio_data is True => read and return audio data")
waveform = audio.get_audio_waveform()
audio_sample_rate = int(waveform["samplerate"])
audio_length = TimeValue(audio.num_samples() / audio_sample_rate)
audio_format = "pcm16"
audio_samples = numpy.fromstring(
waveform["samples"],
dtype=numpy.int16
).astype("float64") / 32768
return (True, (
audio_length,
audio_sample_rate,
audio_format,
audio_samples
)) | python | def _synthesize_single_python_helper(
self,
text,
voice_code,
output_file_path=None,
return_audio_data=True
):
"""
This is an helper function to synthesize a single text fragment via a Python call.
If ``output_file_path`` is ``None``,
the audio data will not persist to file at the end of the method.
:rtype: tuple (result, (duration, sample_rate, encoding, data))
"""
# return zero if text is the empty string
if len(text) == 0:
#
# NOTE values of sample_rate, encoding, data
# do not matter if the duration is 0.000,
# so set them to None instead of the more precise:
# return (True, (TimeValue("0.000"), 16000, "pcm_s16le", numpy.array([])))
#
self.log(u"len(text) is zero: returning 0.000")
return (True, (TimeValue("0.000"), None, None, None))
#
# NOTE in this example, we assume that the Speect voice data files
# are located in the same directory of this .py source file
# and that the voice JSON file is called "voice.json"
#
# NOTE the voice_code value is ignored in this example,
# since we have only one TTS voice,
# but in general one might select a voice file to load,
# depending on voice_code;
# in fact, we could have created the ``voice`` object
# only once, in the constructor, instead of creating it
# each time this function is invoked,
# achieving slightly faster synthesis
#
voice_json_path = gf.safe_str(gf.absolute_path("voice.json", __file__))
voice = speect.SVoice(voice_json_path)
utt = voice.synth(text)
audio = utt.features["audio"]
if output_file_path is None:
self.log(u"output_file_path is None => not saving to file")
else:
self.log(u"output_file_path is not None => saving to file...")
# NOTE apparently, save_riff needs the path to be a byte string
audio.save_riff(gf.safe_str(output_file_path))
self.log(u"output_file_path is not None => saving to file... done")
# return immediately if returning audio data is not needed
if not return_audio_data:
self.log(u"return_audio_data is True => return immediately")
return (True, None)
# get length and data using speect Python API
self.log(u"return_audio_data is True => read and return audio data")
waveform = audio.get_audio_waveform()
audio_sample_rate = int(waveform["samplerate"])
audio_length = TimeValue(audio.num_samples() / audio_sample_rate)
audio_format = "pcm16"
audio_samples = numpy.fromstring(
waveform["samples"],
dtype=numpy.int16
).astype("float64") / 32768
return (True, (
audio_length,
audio_sample_rate,
audio_format,
audio_samples
)) | ['def', '_synthesize_single_python_helper', '(', 'self', ',', 'text', ',', 'voice_code', ',', 'output_file_path', '=', 'None', ',', 'return_audio_data', '=', 'True', ')', ':', '# return zero if text is the empty string', 'if', 'len', '(', 'text', ')', '==', '0', ':', '#', '# NOTE values of sample_rate, encoding, data', '# do not matter if the duration is 0.000,', '# so set them to None instead of the more precise:', '# return (True, (TimeValue("0.000"), 16000, "pcm_s16le", numpy.array([])))', '#', 'self', '.', 'log', '(', 'u"len(text) is zero: returning 0.000"', ')', 'return', '(', 'True', ',', '(', 'TimeValue', '(', '"0.000"', ')', ',', 'None', ',', 'None', ',', 'None', ')', ')', '#', '# NOTE in this example, we assume that the Speect voice data files', '# are located in the same directory of this .py source file', '# and that the voice JSON file is called "voice.json"', '#', '# NOTE the voice_code value is ignored in this example,', '# since we have only one TTS voice,', '# but in general one might select a voice file to load,', '# depending on voice_code;', '# in fact, we could have created the ``voice`` object', '# only once, in the constructor, instead of creating it', '# each time this function is invoked,', '# achieving slightly faster synthesis', '#', 'voice_json_path', '=', 'gf', '.', 'safe_str', '(', 'gf', '.', 'absolute_path', '(', '"voice.json"', ',', '__file__', ')', ')', 'voice', '=', 'speect', '.', 'SVoice', '(', 'voice_json_path', ')', 'utt', '=', 'voice', '.', 'synth', '(', 'text', ')', 'audio', '=', 'utt', '.', 'features', '[', '"audio"', ']', 'if', 'output_file_path', 'is', 'None', ':', 'self', '.', 'log', '(', 'u"output_file_path is None => not saving to file"', ')', 'else', ':', 'self', '.', 'log', '(', 'u"output_file_path is not None => saving to file..."', ')', '# NOTE apparently, save_riff needs the path to be a byte string', 'audio', '.', 'save_riff', '(', 'gf', '.', 'safe_str', '(', 'output_file_path', ')', ')', 'self', '.', 'log', '(', 'u"output_file_path is not None => saving to file... done"', ')', '# return immediately if returning audio data is not needed', 'if', 'not', 'return_audio_data', ':', 'self', '.', 'log', '(', 'u"return_audio_data is True => return immediately"', ')', 'return', '(', 'True', ',', 'None', ')', '# get length and data using speect Python API', 'self', '.', 'log', '(', 'u"return_audio_data is True => read and return audio data"', ')', 'waveform', '=', 'audio', '.', 'get_audio_waveform', '(', ')', 'audio_sample_rate', '=', 'int', '(', 'waveform', '[', '"samplerate"', ']', ')', 'audio_length', '=', 'TimeValue', '(', 'audio', '.', 'num_samples', '(', ')', '/', 'audio_sample_rate', ')', 'audio_format', '=', '"pcm16"', 'audio_samples', '=', 'numpy', '.', 'fromstring', '(', 'waveform', '[', '"samples"', ']', ',', 'dtype', '=', 'numpy', '.', 'int16', ')', '.', 'astype', '(', '"float64"', ')', '/', '32768', 'return', '(', 'True', ',', '(', 'audio_length', ',', 'audio_sample_rate', ',', 'audio_format', ',', 'audio_samples', ')', ')'] | This is an helper function to synthesize a single text fragment via a Python call.
If ``output_file_path`` is ``None``,
the audio data will not persist to file at the end of the method.
:rtype: tuple (result, (duration, sample_rate, encoding, data)) | ['This', 'is', 'an', 'helper', 'function', 'to', 'synthesize', 'a', 'single', 'text', 'fragment', 'via', 'a', 'Python', 'call', '.'] | train | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/extra/ctw_speect.py#L91-L163 |
4,260 | pandas-dev/pandas | pandas/core/algorithms.py | quantile | def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score) | python | def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score) | ['def', 'quantile', '(', 'x', ',', 'q', ',', 'interpolation_method', '=', "'fraction'", ')', ':', 'x', '=', 'np', '.', 'asarray', '(', 'x', ')', 'mask', '=', 'isna', '(', 'x', ')', 'x', '=', 'x', '[', '~', 'mask', ']', 'values', '=', 'np', '.', 'sort', '(', 'x', ')', 'def', '_interpolate', '(', 'a', ',', 'b', ',', 'fraction', ')', ':', '"""Returns the point at the given fraction between a and b, where\n \'fraction\' must be between 0 and 1.\n """', 'return', 'a', '+', '(', 'b', '-', 'a', ')', '*', 'fraction', 'def', '_get_score', '(', 'at', ')', ':', 'if', 'len', '(', 'values', ')', '==', '0', ':', 'return', 'np', '.', 'nan', 'idx', '=', 'at', '*', '(', 'len', '(', 'values', ')', '-', '1', ')', 'if', 'idx', '%', '1', '==', '0', ':', 'score', '=', 'values', '[', 'int', '(', 'idx', ')', ']', 'else', ':', 'if', 'interpolation_method', '==', "'fraction'", ':', 'score', '=', '_interpolate', '(', 'values', '[', 'int', '(', 'idx', ')', ']', ',', 'values', '[', 'int', '(', 'idx', ')', '+', '1', ']', ',', 'idx', '%', '1', ')', 'elif', 'interpolation_method', '==', "'lower'", ':', 'score', '=', 'values', '[', 'np', '.', 'floor', '(', 'idx', ')', ']', 'elif', 'interpolation_method', '==', "'higher'", ':', 'score', '=', 'values', '[', 'np', '.', 'ceil', '(', 'idx', ')', ']', 'else', ':', 'raise', 'ValueError', '(', '"interpolation_method can only be \'fraction\' "', '", \'lower\' or \'higher\'"', ')', 'return', 'score', 'if', 'is_scalar', '(', 'q', ')', ':', 'return', '_get_score', '(', 'q', ')', 'else', ':', 'q', '=', 'np', '.', 'asarray', '(', 'q', ',', 'np', '.', 'float64', ')', 'return', 'algos', '.', 'arrmap_float64', '(', 'q', ',', '_get_score', ')'] | Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5 | ['Compute', 'sample', 'quantile', 'or', 'quantiles', 'of', 'the', 'input', 'array', '.', 'For', 'example', 'q', '=', '0', '.', '5', 'computes', 'the', 'median', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L966-L1043 |
4,261 | tino/pyFirmata | pyfirmata/util.py | from_two_bytes | def from_two_bytes(bytes):
"""
Return an integer from two 7 bit bytes.
"""
lsb, msb = bytes
try:
# Usually bytes have been converted to integers with ord already
return msb << 7 | lsb
except TypeError:
# But add this for easy testing
# One of them can be a string, or both
try:
lsb = ord(lsb)
except TypeError:
pass
try:
msb = ord(msb)
except TypeError:
pass
return msb << 7 | lsb | python | def from_two_bytes(bytes):
"""
Return an integer from two 7 bit bytes.
"""
lsb, msb = bytes
try:
# Usually bytes have been converted to integers with ord already
return msb << 7 | lsb
except TypeError:
# But add this for easy testing
# One of them can be a string, or both
try:
lsb = ord(lsb)
except TypeError:
pass
try:
msb = ord(msb)
except TypeError:
pass
return msb << 7 | lsb | ['def', 'from_two_bytes', '(', 'bytes', ')', ':', 'lsb', ',', 'msb', '=', 'bytes', 'try', ':', '# Usually bytes have been converted to integers with ord already', 'return', 'msb', '<<', '7', '|', 'lsb', 'except', 'TypeError', ':', '# But add this for easy testing', '# One of them can be a string, or both', 'try', ':', 'lsb', '=', 'ord', '(', 'lsb', ')', 'except', 'TypeError', ':', 'pass', 'try', ':', 'msb', '=', 'ord', '(', 'msb', ')', 'except', 'TypeError', ':', 'pass', 'return', 'msb', '<<', '7', '|', 'lsb'] | Return an integer from two 7 bit bytes. | ['Return', 'an', 'integer', 'from', 'two', '7', 'bit', 'bytes', '.'] | train | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L86-L105 |
4,262 | nabetama/slacky | slacky/rest/rest.py | Groups.list | def list(self, **kwargs):
""" https://api.slack.com/methods/groups.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get() | python | def list(self, **kwargs):
""" https://api.slack.com/methods/groups.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get() | ['def', 'list', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'kwargs', ':', 'self', '.', 'params', '.', 'update', '(', 'kwargs', ')', 'return', 'FromUrl', '(', "'https://slack.com/api/groups.list'", ',', 'self', '.', '_requests', ')', '(', 'data', '=', 'self', '.', 'params', ')', '.', 'get', '(', ')'] | https://api.slack.com/methods/groups.list | ['https', ':', '//', 'api', '.', 'slack', '.', 'com', '/', 'methods', '/', 'groups', '.', 'list'] | train | https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L467-L472 |
4,263 | OpenGov/carpenter | carpenter/blocks/tableanalyzer.py | TableAnalyzer._find_valid_block | def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True | python | def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True | ['def', '_find_valid_block', '(', 'self', ',', 'table', ',', 'worksheet', ',', 'flags', ',', 'units', ',', 'used_cells', ',', 'start_pos', ',', 'end_pos', ')', ':', 'for', 'row_index', 'in', 'range', '(', 'len', '(', 'table', ')', ')', ':', 'if', 'row_index', '<', 'start_pos', '[', '0', ']', 'or', 'row_index', '>', 'end_pos', '[', '0', ']', ':', 'continue', 'convRow', '=', 'table', '[', 'row_index', ']', 'used_row', '=', 'used_cells', '[', 'row_index', ']', 'for', 'column_index', ',', 'conv', 'in', 'enumerate', '(', 'convRow', ')', ':', 'if', '(', 'column_index', '<', 'start_pos', '[', '1', ']', 'or', 'column_index', '>', 'end_pos', '[', '1', ']', 'or', 'used_row', '[', 'column_index', ']', ')', ':', 'continue', '# Is non empty cell?', 'if', 'not', 'is_empty_cell', '(', 'conv', ')', ':', 'block_start', ',', 'block_end', '=', 'self', '.', '_find_block_bounds', '(', 'table', ',', 'used_cells', ',', '(', 'row_index', ',', 'column_index', ')', ',', 'start_pos', ',', 'end_pos', ')', 'if', '(', 'block_end', '[', '0', ']', '>', 'block_start', '[', '0', ']', 'and', 'block_end', '[', '1', ']', '>', 'block_start', '[', '1', ']', ')', ':', 'try', ':', 'return', 'TableBlock', '(', 'table', ',', 'used_cells', ',', 'block_start', ',', 'block_end', ',', 'worksheet', ',', 'flags', ',', 'units', ',', 'self', '.', 'assume_complete_blocks', ',', 'self', '.', 'max_title_rows', ')', 'except', 'InvalidBlockError', ':', 'pass', '# Prevent infinite loops if something goes wrong', 'used_cells', '[', 'row_index', ']', '[', 'column_index', ']', '=', 'True'] | Searches for the next location where a valid block could reside and constructs the block
object representing that location. | ['Searches', 'for', 'the', 'next', 'location', 'where', 'a', 'valid', 'block', 'could', 'reside', 'and', 'constructs', 'the', 'block', 'object', 'representing', 'that', 'location', '.'] | train | https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L198-L223 |
4,264 | tensorflow/tensor2tensor | tensor2tensor/models/lstm.py | lstm_seq2seq_internal_attention_bid_encoder | def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
train):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
inputs_length = common_layers.length_from_embedding(inputs)
# Flatten inputs.
inputs = common_layers.flatten4d3d(inputs)
# LSTM encoder.
encoder_outputs, final_encoder_state = lstm_bid_encoder(
inputs, inputs_length, hparams, train, "encoder")
# LSTM decoder with attention
shifted_targets = common_layers.shift_right(targets)
# Add 1 to account for the padding added to the left from shift_right
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
hparams_decoder = copy.copy(hparams)
hparams_decoder.hidden_size = 2 * hparams.hidden_size
decoder_outputs = lstm_attention_decoder(
common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
"decoder", final_encoder_state, encoder_outputs,
inputs_length, targets_length)
return tf.expand_dims(decoder_outputs, axis=2) | python | def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
train):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"):
inputs_length = common_layers.length_from_embedding(inputs)
# Flatten inputs.
inputs = common_layers.flatten4d3d(inputs)
# LSTM encoder.
encoder_outputs, final_encoder_state = lstm_bid_encoder(
inputs, inputs_length, hparams, train, "encoder")
# LSTM decoder with attention
shifted_targets = common_layers.shift_right(targets)
# Add 1 to account for the padding added to the left from shift_right
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
hparams_decoder = copy.copy(hparams)
hparams_decoder.hidden_size = 2 * hparams.hidden_size
decoder_outputs = lstm_attention_decoder(
common_layers.flatten4d3d(shifted_targets), hparams_decoder, train,
"decoder", final_encoder_state, encoder_outputs,
inputs_length, targets_length)
return tf.expand_dims(decoder_outputs, axis=2) | ['def', 'lstm_seq2seq_internal_attention_bid_encoder', '(', 'inputs', ',', 'targets', ',', 'hparams', ',', 'train', ')', ':', 'with', 'tf', '.', 'variable_scope', '(', '"lstm_seq2seq_attention_bid_encoder"', ')', ':', 'inputs_length', '=', 'common_layers', '.', 'length_from_embedding', '(', 'inputs', ')', '# Flatten inputs.', 'inputs', '=', 'common_layers', '.', 'flatten4d3d', '(', 'inputs', ')', '# LSTM encoder.', 'encoder_outputs', ',', 'final_encoder_state', '=', 'lstm_bid_encoder', '(', 'inputs', ',', 'inputs_length', ',', 'hparams', ',', 'train', ',', '"encoder"', ')', '# LSTM decoder with attention', 'shifted_targets', '=', 'common_layers', '.', 'shift_right', '(', 'targets', ')', '# Add 1 to account for the padding added to the left from shift_right', 'targets_length', '=', 'common_layers', '.', 'length_from_embedding', '(', 'shifted_targets', ')', '+', '1', 'hparams_decoder', '=', 'copy', '.', 'copy', '(', 'hparams', ')', 'hparams_decoder', '.', 'hidden_size', '=', '2', '*', 'hparams', '.', 'hidden_size', 'decoder_outputs', '=', 'lstm_attention_decoder', '(', 'common_layers', '.', 'flatten4d3d', '(', 'shifted_targets', ')', ',', 'hparams_decoder', ',', 'train', ',', '"decoder"', ',', 'final_encoder_state', ',', 'encoder_outputs', ',', 'inputs_length', ',', 'targets_length', ')', 'return', 'tf', '.', 'expand_dims', '(', 'decoder_outputs', ',', 'axis', '=', '2', ')'] | LSTM seq2seq model with attention, main step used for training. | ['LSTM', 'seq2seq', 'model', 'with', 'attention', 'main', 'step', 'used', 'for', 'training', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L305-L325 |
4,265 | maaku/python-bitcoin | bitcoin/base58.py | b58encode | def b58encode(b, errors='strict'):
"Encode bytes to a base58-encoded string."
len_ = len(b)
# Convert big-endian bytes to integer
n = BigInteger.deserialize(BytesIO(b), len_)
# Divide that integer into base58
res = []
while n > 0:
n, r = divmod (n, 58)
res.append(b58digits[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
pad = 0
for c in b:
if c == six.int2byte(0): pad += 1
else: break
return (b58digits[0] * pad + res, len_) | python | def b58encode(b, errors='strict'):
"Encode bytes to a base58-encoded string."
len_ = len(b)
# Convert big-endian bytes to integer
n = BigInteger.deserialize(BytesIO(b), len_)
# Divide that integer into base58
res = []
while n > 0:
n, r = divmod (n, 58)
res.append(b58digits[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
pad = 0
for c in b:
if c == six.int2byte(0): pad += 1
else: break
return (b58digits[0] * pad + res, len_) | ['def', 'b58encode', '(', 'b', ',', 'errors', '=', "'strict'", ')', ':', 'len_', '=', 'len', '(', 'b', ')', '# Convert big-endian bytes to integer', 'n', '=', 'BigInteger', '.', 'deserialize', '(', 'BytesIO', '(', 'b', ')', ',', 'len_', ')', '# Divide that integer into base58', 'res', '=', '[', ']', 'while', 'n', '>', '0', ':', 'n', ',', 'r', '=', 'divmod', '(', 'n', ',', '58', ')', 'res', '.', 'append', '(', 'b58digits', '[', 'r', ']', ')', 'res', '=', "''", '.', 'join', '(', 'res', '[', ':', ':', '-', '1', ']', ')', '# Encode leading zeros as base58 zeros', 'pad', '=', '0', 'for', 'c', 'in', 'b', ':', 'if', 'c', '==', 'six', '.', 'int2byte', '(', '0', ')', ':', 'pad', '+=', '1', 'else', ':', 'break', 'return', '(', 'b58digits', '[', '0', ']', '*', 'pad', '+', 'res', ',', 'len_', ')'] | Encode bytes to a base58-encoded string. | ['Encode', 'bytes', 'to', 'a', 'base58', '-', 'encoded', 'string', '.'] | train | https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/base58.py#L24-L43 |
4,266 | Kortemme-Lab/klab | klab/bio/pdb.py | PDB._determine_heterogen_chain_type | def _determine_heterogen_chain_type(residue_types):
'''We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions).
residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH.
'''
residue_type_id_lengths = set(map(len, residue_types))
if (len(residue_types) > 0):
if len(residue_types.difference(common_solution_ids)) == 0:
return 'Solution'
elif (len(residue_type_id_lengths) == 1) and (3 in residue_type_id_lengths) and (len(residue_types.difference(common_solution_ids)) > 0):
# The last expression discounts chains which only contain solution molecules e.g. HOH
return 'Ligand'
return 'Heterogen' | python | def _determine_heterogen_chain_type(residue_types):
'''We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions).
residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH.
'''
residue_type_id_lengths = set(map(len, residue_types))
if (len(residue_types) > 0):
if len(residue_types.difference(common_solution_ids)) == 0:
return 'Solution'
elif (len(residue_type_id_lengths) == 1) and (3 in residue_type_id_lengths) and (len(residue_types.difference(common_solution_ids)) > 0):
# The last expression discounts chains which only contain solution molecules e.g. HOH
return 'Ligand'
return 'Heterogen' | ['def', '_determine_heterogen_chain_type', '(', 'residue_types', ')', ':', 'residue_type_id_lengths', '=', 'set', '(', 'map', '(', 'len', ',', 'residue_types', ')', ')', 'if', '(', 'len', '(', 'residue_types', ')', '>', '0', ')', ':', 'if', 'len', '(', 'residue_types', '.', 'difference', '(', 'common_solution_ids', ')', ')', '==', '0', ':', 'return', "'Solution'", 'elif', '(', 'len', '(', 'residue_type_id_lengths', ')', '==', '1', ')', 'and', '(', '3', 'in', 'residue_type_id_lengths', ')', 'and', '(', 'len', '(', 'residue_types', '.', 'difference', '(', 'common_solution_ids', ')', ')', '>', '0', ')', ':', '# The last expression discounts chains which only contain solution molecules e.g. HOH', 'return', "'Ligand'", 'return', "'Heterogen'"] | We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions).
residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH. | ['We', 'distinguish', 'three', 'types', 'of', 'heterogen', 'chain', ':', 'i', ')', 'all', 'solution', ';', 'ii', ')', 'all', 'ligand', ';', 'or', 'iii', ')', 'other', '(', 'a', 'mix', 'of', 'solution', 'ligand', 'and', '/', 'or', 'ions', ')', '.', 'residue_types', 'should', 'be', 'a', 'Set', 'of', 'sequence', 'identifers', 'e', '.', 'g', '.', 'GTP', 'ZN', 'HOH', '.'] | train | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L1798-L1809 |
4,267 | eternnoir/pyTelegramBotAPI | telebot/__init__.py | TeleBot.get_user_profile_photos | def get_user_profile_photos(self, user_id, offset=None, limit=None):
"""
Retrieves the user profile photos of the person with 'user_id'
See https://core.telegram.org/bots/api#getuserprofilephotos
:param user_id:
:param offset:
:param limit:
:return: API reply.
"""
result = apihelper.get_user_profile_photos(self.token, user_id, offset, limit)
return types.UserProfilePhotos.de_json(result) | python | def get_user_profile_photos(self, user_id, offset=None, limit=None):
"""
Retrieves the user profile photos of the person with 'user_id'
See https://core.telegram.org/bots/api#getuserprofilephotos
:param user_id:
:param offset:
:param limit:
:return: API reply.
"""
result = apihelper.get_user_profile_photos(self.token, user_id, offset, limit)
return types.UserProfilePhotos.de_json(result) | ['def', 'get_user_profile_photos', '(', 'self', ',', 'user_id', ',', 'offset', '=', 'None', ',', 'limit', '=', 'None', ')', ':', 'result', '=', 'apihelper', '.', 'get_user_profile_photos', '(', 'self', '.', 'token', ',', 'user_id', ',', 'offset', ',', 'limit', ')', 'return', 'types', '.', 'UserProfilePhotos', '.', 'de_json', '(', 'result', ')'] | Retrieves the user profile photos of the person with 'user_id'
See https://core.telegram.org/bots/api#getuserprofilephotos
:param user_id:
:param offset:
:param limit:
:return: API reply. | ['Retrieves', 'the', 'user', 'profile', 'photos', 'of', 'the', 'person', 'with', 'user_id', 'See', 'https', ':', '//', 'core', '.', 'telegram', '.', 'org', '/', 'bots', '/', 'api#getuserprofilephotos', ':', 'param', 'user_id', ':', ':', 'param', 'offset', ':', ':', 'param', 'limit', ':', ':', 'return', ':', 'API', 'reply', '.'] | train | https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L491-L501 |
4,268 | aheadley/python-crunchyroll | crunchyroll/apis/android_manga.py | AndroidMangaApi._build_request | def _build_request(self, method, url, params=None):
"""Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!"
"""
full_params = self._get_base_params()
if params is not None:
full_params.update(params)
try:
request_func = lambda u, d: \
getattr(self._connector, method.lower())(u, params=d,
headers=self._request_headers)
except AttributeError:
raise ApiException('Invalid request method')
# TODO: need to catch a network here and raise as ApiNetworkException
def do_request():
logger.debug('Sending %s request "%s" with params: %r',
method, url, full_params)
try:
resp = request_func(url, full_params)
logger.debug('Received response code: %d', resp.status_code)
except requests.RequestException as err:
raise ApiNetworkException(err)
try:
resp_json = resp.json()
except TypeError:
resp_json = resp.json
method_returns_list = False
try:
resp_json['error']
except TypeError:
logger.warn('Api method did not return map: %s', method)
method_returns_list = True
except KeyError:
logger.warn('Api method did not return map with error key: %s', method)
if method_returns_list is None:
raise ApiBadResponseException(resp.content)
elif method_returns_list:
data = resp_json
else:
try:
if resp_json['error']:
raise ApiError('%s: %s' % (resp_json['code'], resp_json['message']))
except KeyError:
data = resp_json
else:
data = resp_json['data']
self._do_post_request_tasks(data)
self._last_response = resp
return data
return do_request | python | def _build_request(self, method, url, params=None):
"""Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!"
"""
full_params = self._get_base_params()
if params is not None:
full_params.update(params)
try:
request_func = lambda u, d: \
getattr(self._connector, method.lower())(u, params=d,
headers=self._request_headers)
except AttributeError:
raise ApiException('Invalid request method')
# TODO: need to catch a network here and raise as ApiNetworkException
def do_request():
logger.debug('Sending %s request "%s" with params: %r',
method, url, full_params)
try:
resp = request_func(url, full_params)
logger.debug('Received response code: %d', resp.status_code)
except requests.RequestException as err:
raise ApiNetworkException(err)
try:
resp_json = resp.json()
except TypeError:
resp_json = resp.json
method_returns_list = False
try:
resp_json['error']
except TypeError:
logger.warn('Api method did not return map: %s', method)
method_returns_list = True
except KeyError:
logger.warn('Api method did not return map with error key: %s', method)
if method_returns_list is None:
raise ApiBadResponseException(resp.content)
elif method_returns_list:
data = resp_json
else:
try:
if resp_json['error']:
raise ApiError('%s: %s' % (resp_json['code'], resp_json['message']))
except KeyError:
data = resp_json
else:
data = resp_json['data']
self._do_post_request_tasks(data)
self._last_response = resp
return data
return do_request | ['def', '_build_request', '(', 'self', ',', 'method', ',', 'url', ',', 'params', '=', 'None', ')', ':', 'full_params', '=', 'self', '.', '_get_base_params', '(', ')', 'if', 'params', 'is', 'not', 'None', ':', 'full_params', '.', 'update', '(', 'params', ')', 'try', ':', 'request_func', '=', 'lambda', 'u', ',', 'd', ':', 'getattr', '(', 'self', '.', '_connector', ',', 'method', '.', 'lower', '(', ')', ')', '(', 'u', ',', 'params', '=', 'd', ',', 'headers', '=', 'self', '.', '_request_headers', ')', 'except', 'AttributeError', ':', 'raise', 'ApiException', '(', "'Invalid request method'", ')', '# TODO: need to catch a network here and raise as ApiNetworkException', 'def', 'do_request', '(', ')', ':', 'logger', '.', 'debug', '(', '\'Sending %s request "%s" with params: %r\'', ',', 'method', ',', 'url', ',', 'full_params', ')', 'try', ':', 'resp', '=', 'request_func', '(', 'url', ',', 'full_params', ')', 'logger', '.', 'debug', '(', "'Received response code: %d'", ',', 'resp', '.', 'status_code', ')', 'except', 'requests', '.', 'RequestException', 'as', 'err', ':', 'raise', 'ApiNetworkException', '(', 'err', ')', 'try', ':', 'resp_json', '=', 'resp', '.', 'json', '(', ')', 'except', 'TypeError', ':', 'resp_json', '=', 'resp', '.', 'json', 'method_returns_list', '=', 'False', 'try', ':', 'resp_json', '[', "'error'", ']', 'except', 'TypeError', ':', 'logger', '.', 'warn', '(', "'Api method did not return map: %s'", ',', 'method', ')', 'method_returns_list', '=', 'True', 'except', 'KeyError', ':', 'logger', '.', 'warn', '(', "'Api method did not return map with error key: %s'", ',', 'method', ')', 'if', 'method_returns_list', 'is', 'None', ':', 'raise', 'ApiBadResponseException', '(', 'resp', '.', 'content', ')', 'elif', 'method_returns_list', ':', 'data', '=', 'resp_json', 'else', ':', 'try', ':', 'if', 'resp_json', '[', "'error'", ']', ':', 'raise', 'ApiError', '(', "'%s: %s'", '%', '(', 'resp_json', '[', "'code'", ']', ',', 'resp_json', '[', "'message'", ']', ')', ')', 'except', 'KeyError', ':', 'data', '=', 'resp_json', 'else', ':', 'data', '=', 'resp_json', '[', "'data'", ']', 'self', '.', '_do_post_request_tasks', '(', 'data', ')', 'self', '.', '_last_response', '=', 'resp', 'return', 'data', 'return', 'do_request'] | Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!" | ['Build', 'a', 'function', 'to', 'do', 'an', 'API', 'request'] | train | https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android_manga.py#L107-L161 |
4,269 | abalkin/tz | pavement.py | doc_open | def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index)) | python | def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index)) | ['def', 'doc_open', '(', ')', ':', 'doc_index', '=', 'os', '.', 'path', '.', 'join', '(', 'DOCS_DIRECTORY', ',', "'build'", ',', "'html'", ',', "'index.html'", ')', 'if', 'sys', '.', 'platform', '==', "'darwin'", ':', '# Mac OS X', 'subprocess', '.', 'check_call', '(', '[', "'open'", ',', 'doc_index', ']', ')', 'elif', 'sys', '.', 'platform', '==', "'win32'", ':', '# Windows', 'subprocess', '.', 'check_call', '(', '[', "'start'", ',', 'doc_index', ']', ',', 'shell', '=', 'True', ')', 'elif', 'sys', '.', 'platform', '==', "'linux2'", ':', '# All freedesktop-compatible desktops', 'subprocess', '.', 'check_call', '(', '[', "'xdg-open'", ',', 'doc_index', ']', ')', 'else', ':', 'print_failure_message', '(', '"Unsupported platform. Please open `{0}\' manually."', '.', 'format', '(', 'doc_index', ')', ')'] | Build the HTML docs and open them in a web browser. | ['Build', 'the', 'HTML', 'docs', 'and', 'open', 'them', 'in', 'a', 'web', 'browser', '.'] | train | https://github.com/abalkin/tz/blob/f25fca6afbf1abd46fd7aeb978282823c7dab5ab/pavement.py#L219-L234 |
4,270 | CSchoel/nolds | nolds/measures.py | lyap_e_len | def lyap_e_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
"""
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len | python | def lyap_e_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
"""
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len | ['def', 'lyap_e_len', '(', '*', '*', 'kwargs', ')', ':', 'm', '=', '(', 'kwargs', '[', "'emb_dim'", ']', '-', '1', ')', '//', '(', 'kwargs', '[', "'matrix_dim'", ']', '-', '1', ')', '# minimum length required to find single orbit vector', 'min_len', '=', 'kwargs', '[', "'emb_dim'", ']', '# we need to follow each starting point of an orbit vector for m more steps', 'min_len', '+=', 'm', '# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each', 'min_len', '+=', 'kwargs', '[', "'min_tsep'", ']', '*', '2', '# we need at least min_nb neighbors for each orbit vector', 'min_len', '+=', 'kwargs', '[', "'min_nb'", ']', 'return', 'min_len'] | Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters | ['Helper', 'function', 'that', 'calculates', 'the', 'minimum', 'number', 'of', 'data', 'points', 'required', 'to', 'use', 'lyap_e', '.'] | train | https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/measures.py#L345-L370 |
4,271 | ronhanson/python-tbx | tbx/file.py | readlinkabs | def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
assert (os.path.islink(l))
p = os.readlink(l)
if os.path.isabs(p):
return os.path.abspath(p)
return os.path.abspath(os.path.join(os.path.dirname(l), p)) | python | def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
assert (os.path.islink(l))
p = os.readlink(l)
if os.path.isabs(p):
return os.path.abspath(p)
return os.path.abspath(os.path.join(os.path.dirname(l), p)) | ['def', 'readlinkabs', '(', 'l', ')', ':', 'assert', '(', 'os', '.', 'path', '.', 'islink', '(', 'l', ')', ')', 'p', '=', 'os', '.', 'readlink', '(', 'l', ')', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'p', ')', ':', 'return', 'os', '.', 'path', '.', 'abspath', '(', 'p', ')', 'return', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'l', ')', ',', 'p', ')', ')'] | Return an absolute path for the destination
of a symlink | ['Return', 'an', 'absolute', 'path', 'for', 'the', 'destination', 'of', 'a', 'symlink'] | train | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/file.py#L68-L77 |
4,272 | kakwa/ldapcherry | ldapcherry/__init__.py | LdapCherry.login | def login(self, login, password, url=None):
"""login page
"""
auth = self._auth(login, password)
cherrypy.session['isadmin'] = auth['isadmin']
cherrypy.session['connected'] = auth['connected']
if auth['connected']:
if auth['isadmin']:
message = \
"login success for user '%(user)s' as administrator" % {
'user': login
}
else:
message = \
"login success for user '%(user)s' as normal user" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.INFO
)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = login
if url is None:
redirect = "/"
else:
redirect = url
raise cherrypy.HTTPRedirect(redirect)
else:
message = "login failed for user '%(user)s'" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.WARNING
)
if url is None:
qs = ''
else:
qs = '?url=' + quote_plus(url)
raise cherrypy.HTTPRedirect("/signin" + qs) | python | def login(self, login, password, url=None):
"""login page
"""
auth = self._auth(login, password)
cherrypy.session['isadmin'] = auth['isadmin']
cherrypy.session['connected'] = auth['connected']
if auth['connected']:
if auth['isadmin']:
message = \
"login success for user '%(user)s' as administrator" % {
'user': login
}
else:
message = \
"login success for user '%(user)s' as normal user" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.INFO
)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = login
if url is None:
redirect = "/"
else:
redirect = url
raise cherrypy.HTTPRedirect(redirect)
else:
message = "login failed for user '%(user)s'" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.WARNING
)
if url is None:
qs = ''
else:
qs = '?url=' + quote_plus(url)
raise cherrypy.HTTPRedirect("/signin" + qs) | ['def', 'login', '(', 'self', ',', 'login', ',', 'password', ',', 'url', '=', 'None', ')', ':', 'auth', '=', 'self', '.', '_auth', '(', 'login', ',', 'password', ')', 'cherrypy', '.', 'session', '[', "'isadmin'", ']', '=', 'auth', '[', "'isadmin'", ']', 'cherrypy', '.', 'session', '[', "'connected'", ']', '=', 'auth', '[', "'connected'", ']', 'if', 'auth', '[', "'connected'", ']', ':', 'if', 'auth', '[', "'isadmin'", ']', ':', 'message', '=', '"login success for user \'%(user)s\' as administrator"', '%', '{', "'user'", ':', 'login', '}', 'else', ':', 'message', '=', '"login success for user \'%(user)s\' as normal user"', '%', '{', "'user'", ':', 'login', '}', 'cherrypy', '.', 'log', '.', 'error', '(', 'msg', '=', 'message', ',', 'severity', '=', 'logging', '.', 'INFO', ')', 'cherrypy', '.', 'session', '[', 'SESSION_KEY', ']', '=', 'cherrypy', '.', 'request', '.', 'login', '=', 'login', 'if', 'url', 'is', 'None', ':', 'redirect', '=', '"/"', 'else', ':', 'redirect', '=', 'url', 'raise', 'cherrypy', '.', 'HTTPRedirect', '(', 'redirect', ')', 'else', ':', 'message', '=', '"login failed for user \'%(user)s\'"', '%', '{', "'user'", ':', 'login', '}', 'cherrypy', '.', 'log', '.', 'error', '(', 'msg', '=', 'message', ',', 'severity', '=', 'logging', '.', 'WARNING', ')', 'if', 'url', 'is', 'None', ':', 'qs', '=', "''", 'else', ':', 'qs', '=', "'?url='", '+', 'quote_plus', '(', 'url', ')', 'raise', 'cherrypy', '.', 'HTTPRedirect', '(', '"/signin"', '+', 'qs', ')'] | login page | ['login', 'page'] | train | https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L891-L931 |
4,273 | swisscom/cleanerversion | versions/models.py | VersionedQuerySet.querytime | def querytime(self, value):
"""
Sets self._querytime as well as self.query.querytime.
:param value: None or datetime
:return:
"""
self._querytime = value
self.query.querytime = value | python | def querytime(self, value):
"""
Sets self._querytime as well as self.query.querytime.
:param value: None or datetime
:return:
"""
self._querytime = value
self.query.querytime = value | ['def', 'querytime', '(', 'self', ',', 'value', ')', ':', 'self', '.', '_querytime', '=', 'value', 'self', '.', 'query', '.', 'querytime', '=', 'value'] | Sets self._querytime as well as self.query.querytime.
:param value: None or datetime
:return: | ['Sets', 'self', '.', '_querytime', 'as', 'well', 'as', 'self', '.', 'query', '.', 'querytime', '.', ':', 'param', 'value', ':', 'None', 'or', 'datetime', ':', 'return', ':'] | train | https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/models.py#L475-L482 |
4,274 | mikedh/trimesh | trimesh/voxel.py | local_voxelize | def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):
"""
Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space
"""
from scipy import ndimage
# make sure point is correct type/shape
point = np.asanyarray(point, dtype=np.float64).reshape(3)
# this is a gotcha- radius sounds a lot like it should be in
# float model space, not int voxel space so check
if not isinstance(radius, int):
raise ValueError('radius needs to be an integer number of cubes!')
# Bounds of region
bounds = np.concatenate((point - (radius + 0.5) * pitch,
point + (radius + 0.5) * pitch))
# faces that intersect axis aligned bounding box
faces = list(mesh.triangles_tree.intersection(bounds))
# didn't hit anything so exit
if len(faces) == 0:
return np.array([], dtype=np.bool), np.zeros(3)
local = mesh.submesh([[f] for f in faces], append=True)
# Translate mesh so point is at 0,0,0
local.apply_translation(-point)
sparse, origin = voxelize_subdivide(local, pitch, **kwargs)
matrix = sparse_to_matrix(sparse)
# Find voxel index for point
center = np.round(-origin / pitch).astype(np.int64)
# pad matrix if necessary
prepad = np.maximum(radius - center, 0)
postpad = np.maximum(center + radius + 1 - matrix.shape, 0)
matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1),
mode='constant')
center += prepad
# Extract voxels within the bounding box
voxels = matrix[center[0] - radius:center[0] + radius + 1,
center[1] - radius:center[1] + radius + 1,
center[2] - radius:center[2] + radius + 1]
local_origin = point - radius * pitch # origin of local voxels
# Fill internal regions
if fill:
regions, n = ndimage.measurements.label(~voxels)
distance = ndimage.morphology.distance_transform_cdt(~voxels)
representatives = [np.unravel_index((distance * (regions == i)).argmax(),
distance.shape) for i in range(1, n + 1)]
contains = mesh.contains(
np.asarray(representatives) *
pitch +
local_origin)
where = np.where(contains)[0] + 1
# use in1d vs isin for older numpy versions
internal = np.in1d(regions.flatten(), where).reshape(regions.shape)
voxels = np.logical_or(voxels, internal)
return voxels, local_origin | python | def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):
"""
Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space
"""
from scipy import ndimage
# make sure point is correct type/shape
point = np.asanyarray(point, dtype=np.float64).reshape(3)
# this is a gotcha- radius sounds a lot like it should be in
# float model space, not int voxel space so check
if not isinstance(radius, int):
raise ValueError('radius needs to be an integer number of cubes!')
# Bounds of region
bounds = np.concatenate((point - (radius + 0.5) * pitch,
point + (radius + 0.5) * pitch))
# faces that intersect axis aligned bounding box
faces = list(mesh.triangles_tree.intersection(bounds))
# didn't hit anything so exit
if len(faces) == 0:
return np.array([], dtype=np.bool), np.zeros(3)
local = mesh.submesh([[f] for f in faces], append=True)
# Translate mesh so point is at 0,0,0
local.apply_translation(-point)
sparse, origin = voxelize_subdivide(local, pitch, **kwargs)
matrix = sparse_to_matrix(sparse)
# Find voxel index for point
center = np.round(-origin / pitch).astype(np.int64)
# pad matrix if necessary
prepad = np.maximum(radius - center, 0)
postpad = np.maximum(center + radius + 1 - matrix.shape, 0)
matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1),
mode='constant')
center += prepad
# Extract voxels within the bounding box
voxels = matrix[center[0] - radius:center[0] + radius + 1,
center[1] - radius:center[1] + radius + 1,
center[2] - radius:center[2] + radius + 1]
local_origin = point - radius * pitch # origin of local voxels
# Fill internal regions
if fill:
regions, n = ndimage.measurements.label(~voxels)
distance = ndimage.morphology.distance_transform_cdt(~voxels)
representatives = [np.unravel_index((distance * (regions == i)).argmax(),
distance.shape) for i in range(1, n + 1)]
contains = mesh.contains(
np.asarray(representatives) *
pitch +
local_origin)
where = np.where(contains)[0] + 1
# use in1d vs isin for older numpy versions
internal = np.in1d(regions.flatten(), where).reshape(regions.shape)
voxels = np.logical_or(voxels, internal)
return voxels, local_origin | ['def', 'local_voxelize', '(', 'mesh', ',', 'point', ',', 'pitch', ',', 'radius', ',', 'fill', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'from', 'scipy', 'import', 'ndimage', '# make sure point is correct type/shape', 'point', '=', 'np', '.', 'asanyarray', '(', 'point', ',', 'dtype', '=', 'np', '.', 'float64', ')', '.', 'reshape', '(', '3', ')', '# this is a gotcha- radius sounds a lot like it should be in', '# float model space, not int voxel space so check', 'if', 'not', 'isinstance', '(', 'radius', ',', 'int', ')', ':', 'raise', 'ValueError', '(', "'radius needs to be an integer number of cubes!'", ')', '# Bounds of region', 'bounds', '=', 'np', '.', 'concatenate', '(', '(', 'point', '-', '(', 'radius', '+', '0.5', ')', '*', 'pitch', ',', 'point', '+', '(', 'radius', '+', '0.5', ')', '*', 'pitch', ')', ')', '# faces that intersect axis aligned bounding box', 'faces', '=', 'list', '(', 'mesh', '.', 'triangles_tree', '.', 'intersection', '(', 'bounds', ')', ')', "# didn't hit anything so exit", 'if', 'len', '(', 'faces', ')', '==', '0', ':', 'return', 'np', '.', 'array', '(', '[', ']', ',', 'dtype', '=', 'np', '.', 'bool', ')', ',', 'np', '.', 'zeros', '(', '3', ')', 'local', '=', 'mesh', '.', 'submesh', '(', '[', '[', 'f', ']', 'for', 'f', 'in', 'faces', ']', ',', 'append', '=', 'True', ')', '# Translate mesh so point is at 0,0,0', 'local', '.', 'apply_translation', '(', '-', 'point', ')', 'sparse', ',', 'origin', '=', 'voxelize_subdivide', '(', 'local', ',', 'pitch', ',', '*', '*', 'kwargs', ')', 'matrix', '=', 'sparse_to_matrix', '(', 'sparse', ')', '# Find voxel index for point', 'center', '=', 'np', '.', 'round', '(', '-', 'origin', '/', 'pitch', ')', '.', 'astype', '(', 'np', '.', 'int64', ')', '# pad matrix if necessary', 'prepad', '=', 'np', '.', 'maximum', '(', 'radius', '-', 'center', ',', '0', ')', 'postpad', '=', 'np', '.', 'maximum', '(', 'center', '+', 'radius', '+', '1', '-', 'matrix', '.', 'shape', ',', '0', ')', 'matrix', '=', 'np', '.', 'pad', '(', 'matrix', ',', 'np', '.', 'stack', '(', '(', 'prepad', ',', 'postpad', ')', ',', 'axis', '=', '-', '1', ')', ',', 'mode', '=', "'constant'", ')', 'center', '+=', 'prepad', '# Extract voxels within the bounding box', 'voxels', '=', 'matrix', '[', 'center', '[', '0', ']', '-', 'radius', ':', 'center', '[', '0', ']', '+', 'radius', '+', '1', ',', 'center', '[', '1', ']', '-', 'radius', ':', 'center', '[', '1', ']', '+', 'radius', '+', '1', ',', 'center', '[', '2', ']', '-', 'radius', ':', 'center', '[', '2', ']', '+', 'radius', '+', '1', ']', 'local_origin', '=', 'point', '-', 'radius', '*', 'pitch', '# origin of local voxels', '# Fill internal regions', 'if', 'fill', ':', 'regions', ',', 'n', '=', 'ndimage', '.', 'measurements', '.', 'label', '(', '~', 'voxels', ')', 'distance', '=', 'ndimage', '.', 'morphology', '.', 'distance_transform_cdt', '(', '~', 'voxels', ')', 'representatives', '=', '[', 'np', '.', 'unravel_index', '(', '(', 'distance', '*', '(', 'regions', '==', 'i', ')', ')', '.', 'argmax', '(', ')', ',', 'distance', '.', 'shape', ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'n', '+', '1', ')', ']', 'contains', '=', 'mesh', '.', 'contains', '(', 'np', '.', 'asarray', '(', 'representatives', ')', '*', 'pitch', '+', 'local_origin', ')', 'where', '=', 'np', '.', 'where', '(', 'contains', ')', '[', '0', ']', '+', '1', '# use in1d vs isin for older numpy versions', 'internal', '=', 'np', '.', 'in1d', '(', 'regions', '.', 'flatten', '(', ')', ',', 'where', ')', '.', 'reshape', '(', 'regions', '.', 'shape', ')', 'voxels', '=', 'np', '.', 'logical_or', '(', 'voxels', ',', 'internal', ')', 'return', 'voxels', ',', 'local_origin'] | Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space | ['Voxelize', 'a', 'mesh', 'in', 'the', 'region', 'of', 'a', 'cube', 'around', 'a', 'point', '.', 'When', 'fill', '=', 'True', 'uses', 'proximity', '.', 'contains', 'to', 'fill', 'the', 'resulting', 'voxels', 'so', 'may', 'be', 'meaningless', 'for', 'non', '-', 'watertight', 'meshes', '.', 'Useful', 'to', 'reduce', 'memory', 'cost', 'for', 'small', 'values', 'of', 'pitch', 'as', 'opposed', 'to', 'global', 'voxelization', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/voxel.py#L425-L513 |
4,275 | NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.get_jobs_from_queue | def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs | python | def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs | ['def', 'get_jobs_from_queue', '(', 'self', ',', 'queue', ':', 'str', ',', 'max_jobs', ':', 'int', ')', '->', 'List', '[', 'Job', ']', ':', 'jobs_json_string', '=', 'self', '.', '_run_script', '(', 'self', '.', '_get_jobs_from_queue', ',', 'self', '.', '_to_namespaced', '(', 'queue', ')', ',', 'self', '.', '_to_namespaced', '(', 'RUNNING_JOBS_KEY', '.', 'format', '(', 'self', '.', '_id', ')', ')', ',', 'JobStatus', '.', 'RUNNING', '.', 'value', ',', 'max_jobs', ')', 'jobs', '=', 'json', '.', 'loads', '(', 'jobs_json_string', '.', 'decode', '(', ')', ')', 'jobs', '=', '[', 'Job', '.', 'deserialize', '(', 'job', ')', 'for', 'job', 'in', 'jobs', ']', 'return', 'jobs'] | Get jobs from a queue. | ['Get', 'jobs', 'from', 'a', 'queue', '.'] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L145-L158 |
4,276 | saltstack/salt | salt/modules/xapi_virt.py | get_macs | def get_macs(vm_):
'''
Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name>
'''
macs = []
nics = get_nics(vm_)
if nics is None:
return None
for nic in nics:
macs.append(nic)
return macs | python | def get_macs(vm_):
'''
Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name>
'''
macs = []
nics = get_nics(vm_)
if nics is None:
return None
for nic in nics:
macs.append(nic)
return macs | ['def', 'get_macs', '(', 'vm_', ')', ':', 'macs', '=', '[', ']', 'nics', '=', 'get_nics', '(', 'vm_', ')', 'if', 'nics', 'is', 'None', ':', 'return', 'None', 'for', 'nic', 'in', 'nics', ':', 'macs', '.', 'append', '(', 'nic', ')', 'return', 'macs'] | Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name> | ['Return', 'a', 'list', 'off', 'MAC', 'addresses', 'from', 'the', 'named', 'vm'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xapi_virt.py#L380-L397 |
4,277 | DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py | Cursor.count | def count(self, with_limit_and_skip=False):
"""Get the size of the results set for this query.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`.
"""
validate_boolean("with_limit_and_skip", with_limit_and_skip)
cmd = SON([("count", self.__collection.name),
("query", self.__spec)])
if self.__max_time_ms is not None:
cmd["maxTimeMS"] = self.__max_time_ms
if self.__comment:
cmd["$comment"] = self.__comment
if self.__hint is not None:
cmd["hint"] = self.__hint
if with_limit_and_skip:
if self.__limit:
cmd["limit"] = self.__limit
if self.__skip:
cmd["skip"] = self.__skip
return self.__collection._count(cmd, self.__collation) | python | def count(self, with_limit_and_skip=False):
"""Get the size of the results set for this query.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`.
"""
validate_boolean("with_limit_and_skip", with_limit_and_skip)
cmd = SON([("count", self.__collection.name),
("query", self.__spec)])
if self.__max_time_ms is not None:
cmd["maxTimeMS"] = self.__max_time_ms
if self.__comment:
cmd["$comment"] = self.__comment
if self.__hint is not None:
cmd["hint"] = self.__hint
if with_limit_and_skip:
if self.__limit:
cmd["limit"] = self.__limit
if self.__skip:
cmd["skip"] = self.__skip
return self.__collection._count(cmd, self.__collation) | ['def', 'count', '(', 'self', ',', 'with_limit_and_skip', '=', 'False', ')', ':', 'validate_boolean', '(', '"with_limit_and_skip"', ',', 'with_limit_and_skip', ')', 'cmd', '=', 'SON', '(', '[', '(', '"count"', ',', 'self', '.', '__collection', '.', 'name', ')', ',', '(', '"query"', ',', 'self', '.', '__spec', ')', ']', ')', 'if', 'self', '.', '__max_time_ms', 'is', 'not', 'None', ':', 'cmd', '[', '"maxTimeMS"', ']', '=', 'self', '.', '__max_time_ms', 'if', 'self', '.', '__comment', ':', 'cmd', '[', '"$comment"', ']', '=', 'self', '.', '__comment', 'if', 'self', '.', '__hint', 'is', 'not', 'None', ':', 'cmd', '[', '"hint"', ']', '=', 'self', '.', '__hint', 'if', 'with_limit_and_skip', ':', 'if', 'self', '.', '__limit', ':', 'cmd', '[', '"limit"', ']', '=', 'self', '.', '__limit', 'if', 'self', '.', '__skip', ':', 'cmd', '[', '"skip"', ']', '=', 'self', '.', '__skip', 'return', 'self', '.', '__collection', '.', '_count', '(', 'cmd', ',', 'self', '.', '__collation', ')'] | Get the size of the results set for this query.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint`
applied to the query. In the following example the hint is passed to
the count command:
collection.find({'field': 'value'}).hint('field_1').count()
The :meth:`count` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `with_limit_and_skip` (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
.. note:: The `with_limit_and_skip` parameter requires server
version **>= 1.1.4-**
.. versionchanged:: 2.8
The :meth:`~count` method now supports :meth:`~hint`. | ['Get', 'the', 'size', 'of', 'the', 'results', 'set', 'for', 'this', 'query', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L680-L727 |
4,278 | rosenbrockc/fortpy | fortpy/isense/rtupdate.py | ModuleUpdater.update_instance_extent | def update_instance_extent(self, instance, module, operation):
"""Updates a new instance that was added to a module to be complete
if the end token is present in any remaining, overlapping operations.
"""
#Essentially, we want to look in the rest of the statements that are
#part of the current operation to see how many more of them pertain
#to the new instance that was added.
#New signatures only result in instances being added if mode is "insert"
#or "replace". In both cases, the important code is in the buffered
#statements, *not* the cached version. Iterate the remaining statements
#in the buffer and look for the end_token for the instance. If we don't
#find it, check for overlap between the operations' index specifiers.
instance.end -= operation.curlength
end_token = instance.end_token
(ibuffer, length) = self._find_end_token(end_token, operation)
cum_length = length
opstack = [operation]
while ibuffer is None and opstack[-1].index + 1 < len(self._operations):
#We didn't find a natural termination to the new instance. Look for
#overlap in the operations
noperation = self._operations[opstack[-1].index + 1]
#We only want to check the next operation if it is a neighbor
#in line numbers in the buffer.
if noperation.ibuffer[0] - opstack[-1].ibuffer[1] == 1:
(ibuffer, length) = self._find_end_token(end_token, noperation)
cum_length += length
opstack.append(noperation)
else:
break
if ibuffer is not None:
instance.incomplete = False
instance.end += cum_length
for op in opstack:
op.bar_extent = True
op.set_element(instance)
else:
#We set the element for the current operation to be the new instance
#for the rest of statements in its set.
operation.set_element(instance) | python | def update_instance_extent(self, instance, module, operation):
"""Updates a new instance that was added to a module to be complete
if the end token is present in any remaining, overlapping operations.
"""
#Essentially, we want to look in the rest of the statements that are
#part of the current operation to see how many more of them pertain
#to the new instance that was added.
#New signatures only result in instances being added if mode is "insert"
#or "replace". In both cases, the important code is in the buffered
#statements, *not* the cached version. Iterate the remaining statements
#in the buffer and look for the end_token for the instance. If we don't
#find it, check for overlap between the operations' index specifiers.
instance.end -= operation.curlength
end_token = instance.end_token
(ibuffer, length) = self._find_end_token(end_token, operation)
cum_length = length
opstack = [operation]
while ibuffer is None and opstack[-1].index + 1 < len(self._operations):
#We didn't find a natural termination to the new instance. Look for
#overlap in the operations
noperation = self._operations[opstack[-1].index + 1]
#We only want to check the next operation if it is a neighbor
#in line numbers in the buffer.
if noperation.ibuffer[0] - opstack[-1].ibuffer[1] == 1:
(ibuffer, length) = self._find_end_token(end_token, noperation)
cum_length += length
opstack.append(noperation)
else:
break
if ibuffer is not None:
instance.incomplete = False
instance.end += cum_length
for op in opstack:
op.bar_extent = True
op.set_element(instance)
else:
#We set the element for the current operation to be the new instance
#for the rest of statements in its set.
operation.set_element(instance) | ['def', 'update_instance_extent', '(', 'self', ',', 'instance', ',', 'module', ',', 'operation', ')', ':', '#Essentially, we want to look in the rest of the statements that are', '#part of the current operation to see how many more of them pertain ', '#to the new instance that was added.', '#New signatures only result in instances being added if mode is "insert"', '#or "replace". In both cases, the important code is in the buffered', '#statements, *not* the cached version. Iterate the remaining statements', "#in the buffer and look for the end_token for the instance. If we don't", "#find it, check for overlap between the operations' index specifiers.", 'instance', '.', 'end', '-=', 'operation', '.', 'curlength', 'end_token', '=', 'instance', '.', 'end_token', '(', 'ibuffer', ',', 'length', ')', '=', 'self', '.', '_find_end_token', '(', 'end_token', ',', 'operation', ')', 'cum_length', '=', 'length', 'opstack', '=', '[', 'operation', ']', 'while', 'ibuffer', 'is', 'None', 'and', 'opstack', '[', '-', '1', ']', '.', 'index', '+', '1', '<', 'len', '(', 'self', '.', '_operations', ')', ':', "#We didn't find a natural termination to the new instance. Look for", '#overlap in the operations', 'noperation', '=', 'self', '.', '_operations', '[', 'opstack', '[', '-', '1', ']', '.', 'index', '+', '1', ']', '#We only want to check the next operation if it is a neighbor', '#in line numbers in the buffer.', 'if', 'noperation', '.', 'ibuffer', '[', '0', ']', '-', 'opstack', '[', '-', '1', ']', '.', 'ibuffer', '[', '1', ']', '==', '1', ':', '(', 'ibuffer', ',', 'length', ')', '=', 'self', '.', '_find_end_token', '(', 'end_token', ',', 'noperation', ')', 'cum_length', '+=', 'length', 'opstack', '.', 'append', '(', 'noperation', ')', 'else', ':', 'break', 'if', 'ibuffer', 'is', 'not', 'None', ':', 'instance', '.', 'incomplete', '=', 'False', 'instance', '.', 'end', '+=', 'cum_length', 'for', 'op', 'in', 'opstack', ':', 'op', '.', 'bar_extent', '=', 'True', 'op', '.', 'set_element', '(', 'instance', ')', 'else', ':', '#We set the element for the current operation to be the new instance', '#for the rest of statements in its set.', 'operation', '.', 'set_element', '(', 'instance', ')'] | Updates a new instance that was added to a module to be complete
if the end token is present in any remaining, overlapping operations. | ['Updates', 'a', 'new', 'instance', 'that', 'was', 'added', 'to', 'a', 'module', 'to', 'be', 'complete', 'if', 'the', 'end', 'token', 'is', 'present', 'in', 'any', 'remaining', 'overlapping', 'operations', '.'] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/rtupdate.py#L586-L627 |
4,279 | jahuth/litus | spikes.py | SpikeContainer.temporal_firing_rate | def temporal_firing_rate(self,time_dimension=0,resolution=1.0,units=None,
min_t=None,max_t=None,weight_function=None,normalize_time=False,
normalize_n=False,start_units_with_0=True,cell_dimension='N'):
"""
Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index
"""
units = self._default_units(units)
if self.data_format == 'spike_times':
converted_dimension,st = self.spike_times.get_converted(0,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
st = st[(st>=min_t)*(st<max_t)]
bins = converted_dimension.linspace_by_resolution(resolution,end_at_end=True,extra_bins=0)
H,edg = np.histogram(st,bins=bins)
if normalize_time:
H = H/(convert_time(resolution,from_units=units,to_units='s')) # make it Hertz
if normalize_n:
H = H/(len(np.unique(self.spike_times[cell_dimension])))
return H,edg | python | def temporal_firing_rate(self,time_dimension=0,resolution=1.0,units=None,
min_t=None,max_t=None,weight_function=None,normalize_time=False,
normalize_n=False,start_units_with_0=True,cell_dimension='N'):
"""
Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index
"""
units = self._default_units(units)
if self.data_format == 'spike_times':
converted_dimension,st = self.spike_times.get_converted(0,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
st = st[(st>=min_t)*(st<max_t)]
bins = converted_dimension.linspace_by_resolution(resolution,end_at_end=True,extra_bins=0)
H,edg = np.histogram(st,bins=bins)
if normalize_time:
H = H/(convert_time(resolution,from_units=units,to_units='s')) # make it Hertz
if normalize_n:
H = H/(len(np.unique(self.spike_times[cell_dimension])))
return H,edg | ['def', 'temporal_firing_rate', '(', 'self', ',', 'time_dimension', '=', '0', ',', 'resolution', '=', '1.0', ',', 'units', '=', 'None', ',', 'min_t', '=', 'None', ',', 'max_t', '=', 'None', ',', 'weight_function', '=', 'None', ',', 'normalize_time', '=', 'False', ',', 'normalize_n', '=', 'False', ',', 'start_units_with_0', '=', 'True', ',', 'cell_dimension', '=', "'N'", ')', ':', 'units', '=', 'self', '.', '_default_units', '(', 'units', ')', 'if', 'self', '.', 'data_format', '==', "'spike_times'", ':', 'converted_dimension', ',', 'st', '=', 'self', '.', 'spike_times', '.', 'get_converted', '(', '0', ',', 'units', ')', 'if', 'min_t', 'is', 'None', ':', 'min_t', '=', 'converted_dimension', '.', 'min', 'if', 'max_t', 'is', 'None', ':', 'max_t', '=', 'converted_dimension', '.', 'max', 'st', '=', 'st', '[', '(', 'st', '>=', 'min_t', ')', '*', '(', 'st', '<', 'max_t', ')', ']', 'bins', '=', 'converted_dimension', '.', 'linspace_by_resolution', '(', 'resolution', ',', 'end_at_end', '=', 'True', ',', 'extra_bins', '=', '0', ')', 'H', ',', 'edg', '=', 'np', '.', 'histogram', '(', 'st', ',', 'bins', '=', 'bins', ')', 'if', 'normalize_time', ':', 'H', '=', 'H', '/', '(', 'convert_time', '(', 'resolution', ',', 'from_units', '=', 'units', ',', 'to_units', '=', "'s'", ')', ')', '# make it Hertz', 'if', 'normalize_n', ':', 'H', '=', 'H', '/', '(', 'len', '(', 'np', '.', 'unique', '(', 'self', '.', 'spike_times', '[', 'cell_dimension', ']', ')', ')', ')', 'return', 'H', ',', 'edg'] | Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index | ['Outputs', 'a', 'time', 'histogram', 'of', 'spikes', '.'] | train | https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L1201-L1232 |
4,280 | saltstack/salt | salt/netapi/rest_wsgi.py | start | def start():
'''
Start simple_server()
'''
from wsgiref.simple_server import make_server
# When started outside of salt-api __opts__ will not be injected
if '__opts__' not in globals():
globals()['__opts__'] = get_opts()
if __virtual__() is False:
raise SystemExit(1)
mod_opts = __opts__.get(__virtualname__, {})
# pylint: disable=C0103
httpd = make_server('localhost', mod_opts['port'], application)
try:
httpd.serve_forever()
except KeyboardInterrupt:
raise SystemExit(0) | python | def start():
'''
Start simple_server()
'''
from wsgiref.simple_server import make_server
# When started outside of salt-api __opts__ will not be injected
if '__opts__' not in globals():
globals()['__opts__'] = get_opts()
if __virtual__() is False:
raise SystemExit(1)
mod_opts = __opts__.get(__virtualname__, {})
# pylint: disable=C0103
httpd = make_server('localhost', mod_opts['port'], application)
try:
httpd.serve_forever()
except KeyboardInterrupt:
raise SystemExit(0) | ['def', 'start', '(', ')', ':', 'from', 'wsgiref', '.', 'simple_server', 'import', 'make_server', '# When started outside of salt-api __opts__ will not be injected', 'if', "'__opts__'", 'not', 'in', 'globals', '(', ')', ':', 'globals', '(', ')', '[', "'__opts__'", ']', '=', 'get_opts', '(', ')', 'if', '__virtual__', '(', ')', 'is', 'False', ':', 'raise', 'SystemExit', '(', '1', ')', 'mod_opts', '=', '__opts__', '.', 'get', '(', '__virtualname__', ',', '{', '}', ')', '# pylint: disable=C0103', 'httpd', '=', 'make_server', '(', "'localhost'", ',', 'mod_opts', '[', "'port'", ']', ',', 'application', ')', 'try', ':', 'httpd', '.', 'serve_forever', '(', ')', 'except', 'KeyboardInterrupt', ':', 'raise', 'SystemExit', '(', '0', ')'] | Start simple_server() | ['Start', 'simple_server', '()'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_wsgi.py#L308-L329 |
4,281 | HumanCellAtlas/cloud-blobstore | cloud_blobstore/s3.py | S3BlobStore.get_user_metadata | def get_user_metadata(
self,
bucket: str,
key: str
) -> typing.Dict[str, str]:
"""
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or
suffixes for the metadata keys, they should be stripped before being returned.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which metadata is being
retrieved.
:return: a dictionary mapping metadata keys to metadata values.
"""
try:
response = self.get_all_metadata(bucket, key)
metadata = response['Metadata'].copy()
response = self.s3_client.get_object_tagging(
Bucket=bucket,
Key=key,
)
for tag in response['TagSet']:
key, value = tag['Key'], tag['Value']
metadata[key] = value
return metadata
except botocore.exceptions.ClientError as ex:
if str(ex.response['Error']['Code']) == \
str(requests.codes.not_found):
raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex
raise BlobStoreUnknownError(ex) | python | def get_user_metadata(
self,
bucket: str,
key: str
) -> typing.Dict[str, str]:
"""
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or
suffixes for the metadata keys, they should be stripped before being returned.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which metadata is being
retrieved.
:return: a dictionary mapping metadata keys to metadata values.
"""
try:
response = self.get_all_metadata(bucket, key)
metadata = response['Metadata'].copy()
response = self.s3_client.get_object_tagging(
Bucket=bucket,
Key=key,
)
for tag in response['TagSet']:
key, value = tag['Key'], tag['Value']
metadata[key] = value
return metadata
except botocore.exceptions.ClientError as ex:
if str(ex.response['Error']['Code']) == \
str(requests.codes.not_found):
raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex
raise BlobStoreUnknownError(ex) | ['def', 'get_user_metadata', '(', 'self', ',', 'bucket', ':', 'str', ',', 'key', ':', 'str', ')', '->', 'typing', '.', 'Dict', '[', 'str', ',', 'str', ']', ':', 'try', ':', 'response', '=', 'self', '.', 'get_all_metadata', '(', 'bucket', ',', 'key', ')', 'metadata', '=', 'response', '[', "'Metadata'", ']', '.', 'copy', '(', ')', 'response', '=', 'self', '.', 's3_client', '.', 'get_object_tagging', '(', 'Bucket', '=', 'bucket', ',', 'Key', '=', 'key', ',', ')', 'for', 'tag', 'in', 'response', '[', "'TagSet'", ']', ':', 'key', ',', 'value', '=', 'tag', '[', "'Key'", ']', ',', 'tag', '[', "'Value'", ']', 'metadata', '[', 'key', ']', '=', 'value', 'return', 'metadata', 'except', 'botocore', '.', 'exceptions', '.', 'ClientError', 'as', 'ex', ':', 'if', 'str', '(', 'ex', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', ')', '==', 'str', '(', 'requests', '.', 'codes', '.', 'not_found', ')', ':', 'raise', 'BlobNotFoundError', '(', 'f"Could not find s3://{bucket}/{key}"', ')', 'from', 'ex', 'raise', 'BlobStoreUnknownError', '(', 'ex', ')'] | Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or
suffixes for the metadata keys, they should be stripped before being returned.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which metadata is being
retrieved.
:return: a dictionary mapping metadata keys to metadata values. | ['Retrieves', 'the', 'user', 'metadata', 'for', 'a', 'given', 'object', 'in', 'a', 'given', 'bucket', '.', 'If', 'the', 'platform', 'has', 'any', 'mandatory', 'prefixes', 'or', 'suffixes', 'for', 'the', 'metadata', 'keys', 'they', 'should', 'be', 'stripped', 'before', 'being', 'returned', '.', ':', 'param', 'bucket', ':', 'the', 'bucket', 'the', 'object', 'resides', 'in', '.', ':', 'param', 'key', ':', 'the', 'key', 'of', 'the', 'object', 'for', 'which', 'metadata', 'is', 'being', 'retrieved', '.', ':', 'return', ':', 'a', 'dictionary', 'mapping', 'metadata', 'keys', 'to', 'metadata', 'values', '.'] | train | https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/s3.py#L326-L356 |
4,282 | google/grr | grr/server/grr_response_server/flow.py | StartFlow | def StartFlow(client_id=None,
cpu_limit=None,
creator=None,
flow_args=None,
flow_cls=None,
network_bytes_limit=None,
original_flow=None,
output_plugins=None,
start_at=None,
parent_flow_obj=None,
parent_hunt_id=None,
**kwargs):
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent_flow_obj: A parent flow object. None if this is a top level flow.
parent_hunt_id: String identifying parent hunt. Can't be passed together
with parent_flow_obj.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
if parent_flow_obj is not None and parent_hunt_id is not None:
raise ValueError(
"parent_flow_obj and parent_hunt_id are mutually exclusive.")
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
stats_collector_instance.Get().IncrementCounter(
"grr_flow_invalid_flow_count")
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
FilterArgsFromSemanticProtobuf(flow_args, kwargs)
# At this point we should exhaust all the keyword args. If any are left
# over, we do not know what to do with them so raise.
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
create_time=rdfvalue.RDFDatetime.Now(),
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING")
if parent_hunt_id is not None and parent_flow_obj is None:
rdf_flow.flow_id = parent_hunt_id
if IsLegacyHunt(parent_hunt_id):
rdf_flow.flow_id = rdf_flow.flow_id[2:]
else:
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent_flow_obj:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent_flow_obj: # A flow is a nested flow.
parent_rdf_flow = parent_flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
rdf_flow.flow_id)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent_hunt_id: # A flow is a root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent_hunt_id
else: # A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins,
rdf_flow.long_flow_id,
token=access_control.ACLToken(username=rdf_flow.creator))
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
logging.info(u"Scheduling %s(%s) on %s (%s)", rdf_flow.long_flow_id,
rdf_flow.flow_class_name, client_id, start_at or "now")
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised and
# terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)
if parent_flow_obj is not None:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent_flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
# Publish an audit event, only for top level flows.
# TODO(amoser): split urn field into dedicated strings.
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=creator,
action="RUN_FLOW",
flow_name=rdf_flow.flow_class_name,
urn=rdf_flow.long_flow_id,
client=client_id))
return rdf_flow.flow_id | python | def StartFlow(client_id=None,
cpu_limit=None,
creator=None,
flow_args=None,
flow_cls=None,
network_bytes_limit=None,
original_flow=None,
output_plugins=None,
start_at=None,
parent_flow_obj=None,
parent_hunt_id=None,
**kwargs):
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent_flow_obj: A parent flow object. None if this is a top level flow.
parent_hunt_id: String identifying parent hunt. Can't be passed together
with parent_flow_obj.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
if parent_flow_obj is not None and parent_hunt_id is not None:
raise ValueError(
"parent_flow_obj and parent_hunt_id are mutually exclusive.")
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
stats_collector_instance.Get().IncrementCounter(
"grr_flow_invalid_flow_count")
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
FilterArgsFromSemanticProtobuf(flow_args, kwargs)
# At this point we should exhaust all the keyword args. If any are left
# over, we do not know what to do with them so raise.
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
create_time=rdfvalue.RDFDatetime.Now(),
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING")
if parent_hunt_id is not None and parent_flow_obj is None:
rdf_flow.flow_id = parent_hunt_id
if IsLegacyHunt(parent_hunt_id):
rdf_flow.flow_id = rdf_flow.flow_id[2:]
else:
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent_flow_obj:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent_flow_obj: # A flow is a nested flow.
parent_rdf_flow = parent_flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
rdf_flow.flow_id)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent_hunt_id: # A flow is a root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent_hunt_id
else: # A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins,
rdf_flow.long_flow_id,
token=access_control.ACLToken(username=rdf_flow.creator))
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
logging.info(u"Scheduling %s(%s) on %s (%s)", rdf_flow.long_flow_id,
rdf_flow.flow_class_name, client_id, start_at or "now")
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised and
# terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)
if parent_flow_obj is not None:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent_flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
# Publish an audit event, only for top level flows.
# TODO(amoser): split urn field into dedicated strings.
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=creator,
action="RUN_FLOW",
flow_name=rdf_flow.flow_class_name,
urn=rdf_flow.long_flow_id,
client=client_id))
return rdf_flow.flow_id | ['def', 'StartFlow', '(', 'client_id', '=', 'None', ',', 'cpu_limit', '=', 'None', ',', 'creator', '=', 'None', ',', 'flow_args', '=', 'None', ',', 'flow_cls', '=', 'None', ',', 'network_bytes_limit', '=', 'None', ',', 'original_flow', '=', 'None', ',', 'output_plugins', '=', 'None', ',', 'start_at', '=', 'None', ',', 'parent_flow_obj', '=', 'None', ',', 'parent_hunt_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'parent_flow_obj', 'is', 'not', 'None', 'and', 'parent_hunt_id', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', '"parent_flow_obj and parent_hunt_id are mutually exclusive."', ')', '# Is the required flow a known flow?', 'try', ':', 'registry', '.', 'FlowRegistry', '.', 'FlowClassByName', '(', 'flow_cls', '.', '__name__', ')', 'except', 'ValueError', ':', 'stats_collector_instance', '.', 'Get', '(', ')', '.', 'IncrementCounter', '(', '"grr_flow_invalid_flow_count"', ')', 'raise', 'ValueError', '(', '"Unable to locate flow %s"', '%', 'flow_cls', '.', '__name__', ')', 'if', 'not', 'client_id', ':', 'raise', 'ValueError', '(', '"Client_id is needed to start a flow."', ')', '# Now parse the flow args into the new object from the keywords.', 'if', 'flow_args', 'is', 'None', ':', 'flow_args', '=', 'flow_cls', '.', 'args_type', '(', ')', 'FilterArgsFromSemanticProtobuf', '(', 'flow_args', ',', 'kwargs', ')', '# At this point we should exhaust all the keyword args. If any are left', '# over, we do not know what to do with them so raise.', 'if', 'kwargs', ':', 'raise', 'type_info', '.', 'UnknownArg', '(', '"Unknown parameters to StartFlow: %s"', '%', 'kwargs', ')', '# Check that the flow args are valid.', 'flow_args', '.', 'Validate', '(', ')', 'rdf_flow', '=', 'rdf_flow_objects', '.', 'Flow', '(', 'client_id', '=', 'client_id', ',', 'flow_class_name', '=', 'flow_cls', '.', '__name__', ',', 'args', '=', 'flow_args', ',', 'create_time', '=', 'rdfvalue', '.', 'RDFDatetime', '.', 'Now', '(', ')', ',', 'creator', '=', 'creator', ',', 'output_plugins', '=', 'output_plugins', ',', 'original_flow', '=', 'original_flow', ',', 'flow_state', '=', '"RUNNING"', ')', 'if', 'parent_hunt_id', 'is', 'not', 'None', 'and', 'parent_flow_obj', 'is', 'None', ':', 'rdf_flow', '.', 'flow_id', '=', 'parent_hunt_id', 'if', 'IsLegacyHunt', '(', 'parent_hunt_id', ')', ':', 'rdf_flow', '.', 'flow_id', '=', 'rdf_flow', '.', 'flow_id', '[', '2', ':', ']', 'else', ':', 'rdf_flow', '.', 'flow_id', '=', 'RandomFlowId', '(', ')', '# For better performance, only do conflicting IDs check for top-level flows.', 'if', 'not', 'parent_flow_obj', ':', 'try', ':', 'data_store', '.', 'REL_DB', '.', 'ReadFlowObject', '(', 'client_id', ',', 'rdf_flow', '.', 'flow_id', ')', 'raise', 'CanNotStartFlowWithExistingIdError', '(', 'client_id', ',', 'rdf_flow', '.', 'flow_id', ')', 'except', 'db', '.', 'UnknownFlowError', ':', 'pass', 'if', 'parent_flow_obj', ':', '# A flow is a nested flow.', 'parent_rdf_flow', '=', 'parent_flow_obj', '.', 'rdf_flow', 'rdf_flow', '.', 'long_flow_id', '=', '"%s/%s"', '%', '(', 'parent_rdf_flow', '.', 'long_flow_id', ',', 'rdf_flow', '.', 'flow_id', ')', 'rdf_flow', '.', 'parent_flow_id', '=', 'parent_rdf_flow', '.', 'flow_id', 'rdf_flow', '.', 'parent_hunt_id', '=', 'parent_rdf_flow', '.', 'parent_hunt_id', 'rdf_flow', '.', 'parent_request_id', '=', 'parent_flow_obj', '.', 'GetCurrentOutboundId', '(', ')', 'if', 'parent_rdf_flow', '.', 'creator', ':', 'rdf_flow', '.', 'creator', '=', 'parent_rdf_flow', '.', 'creator', 'elif', 'parent_hunt_id', ':', '# A flow is a root-level hunt-induced flow.', 'rdf_flow', '.', 'long_flow_id', '=', '"%s/%s"', '%', '(', 'client_id', ',', 'rdf_flow', '.', 'flow_id', ')', 'rdf_flow', '.', 'parent_hunt_id', '=', 'parent_hunt_id', 'else', ':', '# A flow is a root-level non-hunt flow.', 'rdf_flow', '.', 'long_flow_id', '=', '"%s/%s"', '%', '(', 'client_id', ',', 'rdf_flow', '.', 'flow_id', ')', 'if', 'output_plugins', ':', 'rdf_flow', '.', 'output_plugins_states', '=', 'GetOutputPluginStates', '(', 'output_plugins', ',', 'rdf_flow', '.', 'long_flow_id', ',', 'token', '=', 'access_control', '.', 'ACLToken', '(', 'username', '=', 'rdf_flow', '.', 'creator', ')', ')', 'if', 'network_bytes_limit', 'is', 'not', 'None', ':', 'rdf_flow', '.', 'network_bytes_limit', '=', 'network_bytes_limit', 'if', 'cpu_limit', 'is', 'not', 'None', ':', 'rdf_flow', '.', 'cpu_limit', '=', 'cpu_limit', 'logging', '.', 'info', '(', 'u"Scheduling %s(%s) on %s (%s)"', ',', 'rdf_flow', '.', 'long_flow_id', ',', 'rdf_flow', '.', 'flow_class_name', ',', 'client_id', ',', 'start_at', 'or', '"now"', ')', 'rdf_flow', '.', 'current_state', '=', '"Start"', 'flow_obj', '=', 'flow_cls', '(', 'rdf_flow', ')', 'if', 'start_at', 'is', 'None', ':', '# Store an initial version of the flow straight away. This is needed so the', "# database doesn't raise consistency errors due to missing parent keys when", '# writing logs / errors / results which might happen in Start().', 'data_store', '.', 'REL_DB', '.', 'WriteFlowObject', '(', 'flow_obj', '.', 'rdf_flow', ')', '# Just run the first state inline. NOTE: Running synchronously means', '# that this runs on the thread that starts the flow. The advantage is', '# that that Start method can raise any errors immediately.', 'flow_obj', '.', 'Start', '(', ')', '# The flow does not need to actually remain running.', 'if', 'not', 'flow_obj', '.', 'outstanding_requests', ':', 'flow_obj', '.', 'RunStateMethod', '(', '"End"', ')', '# Additional check for the correct state in case the End method raised and', '# terminated the flow.', 'if', 'flow_obj', '.', 'IsRunning', '(', ')', ':', 'flow_obj', '.', 'MarkDone', '(', ')', 'else', ':', 'flow_obj', '.', 'CallState', '(', '"Start"', ',', 'start_time', '=', 'start_at', ')', 'flow_obj', '.', 'PersistState', '(', ')', 'data_store', '.', 'REL_DB', '.', 'WriteFlowObject', '(', 'flow_obj', '.', 'rdf_flow', ')', 'if', 'parent_flow_obj', 'is', 'not', 'None', ':', '# We can optimize here and not write requests/responses to the database', '# since we have to do this for the parent flow at some point anyways.', 'parent_flow_obj', '.', 'MergeQueuedMessages', '(', 'flow_obj', ')', 'else', ':', 'flow_obj', '.', 'FlushQueuedMessages', '(', ')', '# Publish an audit event, only for top level flows.', '# TODO(amoser): split urn field into dedicated strings.', 'events', '.', 'Events', '.', 'PublishEvent', '(', '"Audit"', ',', 'rdf_events', '.', 'AuditEvent', '(', 'user', '=', 'creator', ',', 'action', '=', '"RUN_FLOW"', ',', 'flow_name', '=', 'rdf_flow', '.', 'flow_class_name', ',', 'urn', '=', 'rdf_flow', '.', 'long_flow_id', ',', 'client', '=', 'client_id', ')', ')', 'return', 'rdf_flow', '.', 'flow_id'] | The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent_flow_obj: A parent flow object. None if this is a top level flow.
parent_hunt_id: String identifying parent hunt. Can't be passed together
with parent_flow_obj.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided. | ['The', 'main', 'factory', 'function', 'for', 'creating', 'and', 'executing', 'a', 'new', 'flow', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow.py#L338-L508 |
4,283 | mon/ifstools | ifstools/handlers/TexFolder.py | ImageCanvas.load | def load(self, draw_bbox = False, **kwargs):
''' Makes the canvas.
This could be far speedier if it copied raw pixels, but that would
take far too much time to write vs using Image inbuilts '''
im = Image.new('RGBA', self.img_size)
draw = None
if draw_bbox:
draw = ImageDraw.Draw(im)
for sprite in self.images:
data = sprite.load()
sprite_im = Image.open(BytesIO(data))
size = sprite.imgrect
im.paste(sprite_im, (size[0], size[2]))
if draw_bbox:
draw.rectangle((size[0], size[2], size[1], size[3]), outline='red')
del draw
b = BytesIO()
im.save(b, format = 'PNG')
return b.getvalue() | python | def load(self, draw_bbox = False, **kwargs):
''' Makes the canvas.
This could be far speedier if it copied raw pixels, but that would
take far too much time to write vs using Image inbuilts '''
im = Image.new('RGBA', self.img_size)
draw = None
if draw_bbox:
draw = ImageDraw.Draw(im)
for sprite in self.images:
data = sprite.load()
sprite_im = Image.open(BytesIO(data))
size = sprite.imgrect
im.paste(sprite_im, (size[0], size[2]))
if draw_bbox:
draw.rectangle((size[0], size[2], size[1], size[3]), outline='red')
del draw
b = BytesIO()
im.save(b, format = 'PNG')
return b.getvalue() | ['def', 'load', '(', 'self', ',', 'draw_bbox', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'im', '=', 'Image', '.', 'new', '(', "'RGBA'", ',', 'self', '.', 'img_size', ')', 'draw', '=', 'None', 'if', 'draw_bbox', ':', 'draw', '=', 'ImageDraw', '.', 'Draw', '(', 'im', ')', 'for', 'sprite', 'in', 'self', '.', 'images', ':', 'data', '=', 'sprite', '.', 'load', '(', ')', 'sprite_im', '=', 'Image', '.', 'open', '(', 'BytesIO', '(', 'data', ')', ')', 'size', '=', 'sprite', '.', 'imgrect', 'im', '.', 'paste', '(', 'sprite_im', ',', '(', 'size', '[', '0', ']', ',', 'size', '[', '2', ']', ')', ')', 'if', 'draw_bbox', ':', 'draw', '.', 'rectangle', '(', '(', 'size', '[', '0', ']', ',', 'size', '[', '2', ']', ',', 'size', '[', '1', ']', ',', 'size', '[', '3', ']', ')', ',', 'outline', '=', "'red'", ')', 'del', 'draw', 'b', '=', 'BytesIO', '(', ')', 'im', '.', 'save', '(', 'b', ',', 'format', '=', "'PNG'", ')', 'return', 'b', '.', 'getvalue', '(', ')'] | Makes the canvas.
This could be far speedier if it copied raw pixels, but that would
take far too much time to write vs using Image inbuilts | ['Makes', 'the', 'canvas', '.', 'This', 'could', 'be', 'far', 'speedier', 'if', 'it', 'copied', 'raw', 'pixels', 'but', 'that', 'would', 'take', 'far', 'too', 'much', 'time', 'to', 'write', 'vs', 'using', 'Image', 'inbuilts'] | train | https://github.com/mon/ifstools/blob/ccd9c1c3632aa22cdcc4e064f17e07803b1d27ba/ifstools/handlers/TexFolder.py#L35-L56 |
4,284 | anomaly/prestans | prestans/rest/request.py | Request.body_template | def body_template(self, value):
"""
Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution.
"""
if self.method == VERB.GET:
raise AssertionError("body_template cannot be set for GET requests")
if value is None:
self.logger.warning("body_template is None, parsing will be ignored")
return
if not isinstance(value, DataCollection):
msg = "body_template must be an instance of %s.%s" % (
DataCollection.__module__,
DataCollection.__name__
)
raise AssertionError(msg)
self._body_template = value
# get a deserializer based on the Content-Type header
# do this here so the handler gets a chance to setup extra serializers
self.set_deserializer_by_mime_type(self.content_type) | python | def body_template(self, value):
"""
Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution.
"""
if self.method == VERB.GET:
raise AssertionError("body_template cannot be set for GET requests")
if value is None:
self.logger.warning("body_template is None, parsing will be ignored")
return
if not isinstance(value, DataCollection):
msg = "body_template must be an instance of %s.%s" % (
DataCollection.__module__,
DataCollection.__name__
)
raise AssertionError(msg)
self._body_template = value
# get a deserializer based on the Content-Type header
# do this here so the handler gets a chance to setup extra serializers
self.set_deserializer_by_mime_type(self.content_type) | ['def', 'body_template', '(', 'self', ',', 'value', ')', ':', 'if', 'self', '.', 'method', '==', 'VERB', '.', 'GET', ':', 'raise', 'AssertionError', '(', '"body_template cannot be set for GET requests"', ')', 'if', 'value', 'is', 'None', ':', 'self', '.', 'logger', '.', 'warning', '(', '"body_template is None, parsing will be ignored"', ')', 'return', 'if', 'not', 'isinstance', '(', 'value', ',', 'DataCollection', ')', ':', 'msg', '=', '"body_template must be an instance of %s.%s"', '%', '(', 'DataCollection', '.', '__module__', ',', 'DataCollection', '.', '__name__', ')', 'raise', 'AssertionError', '(', 'msg', ')', 'self', '.', '_body_template', '=', 'value', '# get a deserializer based on the Content-Type header', '# do this here so the handler gets a chance to setup extra serializers', 'self', '.', 'set_deserializer_by_mime_type', '(', 'self', '.', 'content_type', ')'] | Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution. | ['Must', 'be', 'an', 'instance', 'of', 'a', 'prestans', '.', 'types', '.', 'DataCollection', 'subclass', ';', 'this', 'is', 'generally', 'set', 'during', 'the', 'RequestHandler', 'lifecycle', '.', 'Setting', 'this', 'spwans', 'the', 'parsing', 'process', 'of', 'the', 'body', '.', 'If', 'the', 'HTTP', 'verb', 'is', 'GET', 'an', 'AssertionError', 'is', 'thrown', '.', 'Use', 'with', 'extreme', 'caution', '.'] | train | https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/request.py#L116-L142 |
4,285 | gccxml/pygccxml | pygccxml/declarations/pattern_parser.py | parser_t.find_args | def find_args(self, text, start=None):
"""implementation details"""
if start is None:
start = 0
first_occurance = text.find(self.__begin, start)
if first_occurance == -1:
return self.NOT_FOUND
previous_found, found = first_occurance + 1, 0
while True:
found = self.__find_args_separator(text, previous_found)
if found == -1:
return self.NOT_FOUND
elif text[found] == self.__end:
return first_occurance, found
else:
previous_found = found + 1 | python | def find_args(self, text, start=None):
"""implementation details"""
if start is None:
start = 0
first_occurance = text.find(self.__begin, start)
if first_occurance == -1:
return self.NOT_FOUND
previous_found, found = first_occurance + 1, 0
while True:
found = self.__find_args_separator(text, previous_found)
if found == -1:
return self.NOT_FOUND
elif text[found] == self.__end:
return first_occurance, found
else:
previous_found = found + 1 | ['def', 'find_args', '(', 'self', ',', 'text', ',', 'start', '=', 'None', ')', ':', 'if', 'start', 'is', 'None', ':', 'start', '=', '0', 'first_occurance', '=', 'text', '.', 'find', '(', 'self', '.', '__begin', ',', 'start', ')', 'if', 'first_occurance', '==', '-', '1', ':', 'return', 'self', '.', 'NOT_FOUND', 'previous_found', ',', 'found', '=', 'first_occurance', '+', '1', ',', '0', 'while', 'True', ':', 'found', '=', 'self', '.', '__find_args_separator', '(', 'text', ',', 'previous_found', ')', 'if', 'found', '==', '-', '1', ':', 'return', 'self', '.', 'NOT_FOUND', 'elif', 'text', '[', 'found', ']', '==', 'self', '.', '__end', ':', 'return', 'first_occurance', ',', 'found', 'else', ':', 'previous_found', '=', 'found', '+', '1'] | implementation details | ['implementation', 'details'] | train | https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/pattern_parser.py#L155-L170 |
4,286 | pyokagan/pyglreg | glreg.py | Registry.get_supports | def get_supports(self):
"""Returns set of extension support strings referenced in this Registry
:return: set of extension support strings
"""
out = set()
for ext in self.extensions.values():
out.update(ext.get_supports())
return out | python | def get_supports(self):
"""Returns set of extension support strings referenced in this Registry
:return: set of extension support strings
"""
out = set()
for ext in self.extensions.values():
out.update(ext.get_supports())
return out | ['def', 'get_supports', '(', 'self', ')', ':', 'out', '=', 'set', '(', ')', 'for', 'ext', 'in', 'self', '.', 'extensions', '.', 'values', '(', ')', ':', 'out', '.', 'update', '(', 'ext', '.', 'get_supports', '(', ')', ')', 'return', 'out'] | Returns set of extension support strings referenced in this Registry
:return: set of extension support strings | ['Returns', 'set', 'of', 'extension', 'support', 'strings', 'referenced', 'in', 'this', 'Registry'] | train | https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L592-L600 |
4,287 | saltstack/salt | salt/modules/boto_iot.py | create_topic_rule | def create_topic_rule(ruleName, sql, actions, description,
ruleDisabled=False,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, create a topic rule.
Returns {created: true} if the rule was created and returns
{created: False} if the rule was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.create_topic_rule my_rule "SELECT * FROM 'some/thing'" \\
'[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\
"targetArn":"arn:::::something","roleArn":"arn:::::something"}}]'
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic_rule(ruleName=ruleName,
topicRulePayload={
'sql': sql,
'description': description,
'actions': actions,
'ruleDisabled': ruleDisabled
})
return {'created': True}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | python | def create_topic_rule(ruleName, sql, actions, description,
ruleDisabled=False,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, create a topic rule.
Returns {created: true} if the rule was created and returns
{created: False} if the rule was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.create_topic_rule my_rule "SELECT * FROM 'some/thing'" \\
'[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\
"targetArn":"arn:::::something","roleArn":"arn:::::something"}}]'
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic_rule(ruleName=ruleName,
topicRulePayload={
'sql': sql,
'description': description,
'actions': actions,
'ruleDisabled': ruleDisabled
})
return {'created': True}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | ['def', 'create_topic_rule', '(', 'ruleName', ',', 'sql', ',', 'actions', ',', 'description', ',', 'ruleDisabled', '=', 'False', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'try', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'conn', '.', 'create_topic_rule', '(', 'ruleName', '=', 'ruleName', ',', 'topicRulePayload', '=', '{', "'sql'", ':', 'sql', ',', "'description'", ':', 'description', ',', "'actions'", ':', 'actions', ',', "'ruleDisabled'", ':', 'ruleDisabled', '}', ')', 'return', '{', "'created'", ':', 'True', '}', 'except', 'ClientError', 'as', 'e', ':', 'return', '{', "'created'", ':', 'False', ',', "'error'", ':', '__utils__', '[', "'boto3.get_error'", ']', '(', 'e', ')', '}'] | Given a valid config, create a topic rule.
Returns {created: true} if the rule was created and returns
{created: False} if the rule was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.create_topic_rule my_rule "SELECT * FROM 'some/thing'" \\
'[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\
"targetArn":"arn:::::something","roleArn":"arn:::::something"}}]' | ['Given', 'a', 'valid', 'config', 'create', 'a', 'topic', 'rule', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iot.py#L726-L756 |
4,288 | kristianfoerster/melodist | melodist/precipitation.py | aggregate_precipitation | def aggregate_precipitation(vec_data,hourly=True, percentile=50):
"""Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model
"""
cascade_opt = cascade.CascadeStatistics()
cascade_opt.percentile = percentile
# length of input time series
n_in = len(vec_data)
n_out = np.floor(n_in/2)
# alternative:
# 1st step: new time series
vec_time = vec_data.index
vdn0 = []
vtn0 = []
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i])
vtn0.append(vec_time[i])
j = j+1
vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0})
# length of new time series
n_out = len(vdn)
# series of box types:
vbtype = np.zeros((n_out, ), dtype=np.int)
# fields for empirical probabilities
# counts
nb = np.zeros((2, 4))
nbxx = np.zeros((2, 4))
# class boundaries for histograms
# wclassbounds = np.linspace(0, 1, num=8)
wlower = np.array([0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571]) # wclassbounds[0:7]
wupper = np.array([0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0]) # wclassbounds[1:8]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
meanvol = np.percentile(vdn.precip[vdn.precip > 0.],
cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.])
cascade_opt.threshold = np.array([meanvol])
# 2nd step: classify boxes at the upper level
for i in range(0, n_out):
if vdn.precip.values[i] > 0.: # rain?
if i == 0: # only starting or isolated
if vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
else:
vbtype[i] = cascade.BoxTypes.isolated
elif i == n_out-1: # only ending or isolated
if vdn.precip.values[i-1] > 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.isolated
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.enclosed
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.dry # no rain
# 3rd step: examine branching
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
if vdn.precip.values[j] > 0:
if vdn.precip.values[j] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
nb[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0:
# P(1/0)
cascade_opt.p10[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0:
# P(0/1)
cascade_opt.p01[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0:
# P(x/x)
cascade_opt.pxx[belowabove, vbtype[j]-1] += 1
nbxx[belowabove, vbtype[j]-1] += 1
# weights
r1 = vec_data.precip.values[i-1]
r2 = vec_data.precip.values[i]
wxxval = r1 / (r1 + r2)
# Test
if abs(r1+r2-vdn.precip.values[j]) > 1.E-3:
print('i=' + str(i) + ', j=' + str(j) +
', r1=' + str(r1) + ", r2=" + str(r2) +
", Summe=" + str(vdn.precip.values[j]))
print(vec_data.index[i])
print(vdn.index[j])
print('error')
return cascade_opt, vdn
for k in range(0, 7):
if wxxval > wlower[k] and wxxval <= wupper[k]:
cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1
break
j = j + 1
# 4th step: transform counts to percentages
cascade_opt.p01 = cascade_opt.p01 / nb
cascade_opt.p10 = cascade_opt.p10 / nb
cascade_opt.pxx = cascade_opt.pxx / nb
with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below
for k in range(0, 7):
cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :]
# In some cases, the time series are too short for deriving statistics.
if (np.isnan(cascade_opt.p01).any() or
np.isnan(cascade_opt.p10).any() or
np.isnan(cascade_opt.pxx).any()):
print("ERROR (branching probabilities):")
print("Invalid statistics. Default values will be returned. "
"Try to use longer time series or apply statistics "
"derived for another station.")
cascade_opt.fill_with_sample_data()
# For some box types, the corresponding probabilities might yield nan.
# If this happens, nan values will be replaced by 1/7 in order to provide
# valid values for disaggregation.
if np.isnan(cascade_opt.wxx).any():
print("Warning (weighting probabilities):")
print("The derived cascade statistics are not valid as some "
"probabilities are undefined! ", end="")
print("Try to use longer time series that might be more "
"appropriate for deriving statistics. ", end="")
print("As a workaround, default values according to equally "
"distributed probabilities ", end="")
print("will be applied...", end="")
cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0
wxx = np.zeros((2, 4))
for k in range(0, 7):
wxx[:, :] += cascade_opt.wxx[k, :, :]
if wxx.any() > 1.001 or wxx.any() < 0.999:
print("failed! Using default values!")
cascade_opt.fill_with_sample_data()
else:
print("OK!")
return cascade_opt, vdn | python | def aggregate_precipitation(vec_data,hourly=True, percentile=50):
"""Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model
"""
cascade_opt = cascade.CascadeStatistics()
cascade_opt.percentile = percentile
# length of input time series
n_in = len(vec_data)
n_out = np.floor(n_in/2)
# alternative:
# 1st step: new time series
vec_time = vec_data.index
vdn0 = []
vtn0 = []
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i])
vtn0.append(vec_time[i])
j = j+1
vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0})
# length of new time series
n_out = len(vdn)
# series of box types:
vbtype = np.zeros((n_out, ), dtype=np.int)
# fields for empirical probabilities
# counts
nb = np.zeros((2, 4))
nbxx = np.zeros((2, 4))
# class boundaries for histograms
# wclassbounds = np.linspace(0, 1, num=8)
wlower = np.array([0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571]) # wclassbounds[0:7]
wupper = np.array([0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0]) # wclassbounds[1:8]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
meanvol = np.percentile(vdn.precip[vdn.precip > 0.],
cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.])
cascade_opt.threshold = np.array([meanvol])
# 2nd step: classify boxes at the upper level
for i in range(0, n_out):
if vdn.precip.values[i] > 0.: # rain?
if i == 0: # only starting or isolated
if vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
else:
vbtype[i] = cascade.BoxTypes.isolated
elif i == n_out-1: # only ending or isolated
if vdn.precip.values[i-1] > 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.isolated
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.enclosed
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.dry # no rain
# 3rd step: examine branching
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
if vdn.precip.values[j] > 0:
if vdn.precip.values[j] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
nb[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0:
# P(1/0)
cascade_opt.p10[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0:
# P(0/1)
cascade_opt.p01[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0:
# P(x/x)
cascade_opt.pxx[belowabove, vbtype[j]-1] += 1
nbxx[belowabove, vbtype[j]-1] += 1
# weights
r1 = vec_data.precip.values[i-1]
r2 = vec_data.precip.values[i]
wxxval = r1 / (r1 + r2)
# Test
if abs(r1+r2-vdn.precip.values[j]) > 1.E-3:
print('i=' + str(i) + ', j=' + str(j) +
', r1=' + str(r1) + ", r2=" + str(r2) +
", Summe=" + str(vdn.precip.values[j]))
print(vec_data.index[i])
print(vdn.index[j])
print('error')
return cascade_opt, vdn
for k in range(0, 7):
if wxxval > wlower[k] and wxxval <= wupper[k]:
cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1
break
j = j + 1
# 4th step: transform counts to percentages
cascade_opt.p01 = cascade_opt.p01 / nb
cascade_opt.p10 = cascade_opt.p10 / nb
cascade_opt.pxx = cascade_opt.pxx / nb
with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below
for k in range(0, 7):
cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :]
# In some cases, the time series are too short for deriving statistics.
if (np.isnan(cascade_opt.p01).any() or
np.isnan(cascade_opt.p10).any() or
np.isnan(cascade_opt.pxx).any()):
print("ERROR (branching probabilities):")
print("Invalid statistics. Default values will be returned. "
"Try to use longer time series or apply statistics "
"derived for another station.")
cascade_opt.fill_with_sample_data()
# For some box types, the corresponding probabilities might yield nan.
# If this happens, nan values will be replaced by 1/7 in order to provide
# valid values for disaggregation.
if np.isnan(cascade_opt.wxx).any():
print("Warning (weighting probabilities):")
print("The derived cascade statistics are not valid as some "
"probabilities are undefined! ", end="")
print("Try to use longer time series that might be more "
"appropriate for deriving statistics. ", end="")
print("As a workaround, default values according to equally "
"distributed probabilities ", end="")
print("will be applied...", end="")
cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0
wxx = np.zeros((2, 4))
for k in range(0, 7):
wxx[:, :] += cascade_opt.wxx[k, :, :]
if wxx.any() > 1.001 or wxx.any() < 0.999:
print("failed! Using default values!")
cascade_opt.fill_with_sample_data()
else:
print("OK!")
return cascade_opt, vdn | ['def', 'aggregate_precipitation', '(', 'vec_data', ',', 'hourly', '=', 'True', ',', 'percentile', '=', '50', ')', ':', 'cascade_opt', '=', 'cascade', '.', 'CascadeStatistics', '(', ')', 'cascade_opt', '.', 'percentile', '=', 'percentile', '# length of input time series', 'n_in', '=', 'len', '(', 'vec_data', ')', 'n_out', '=', 'np', '.', 'floor', '(', 'n_in', '/', '2', ')', '# alternative:', '# 1st step: new time series', 'vec_time', '=', 'vec_data', '.', 'index', 'vdn0', '=', '[', ']', 'vtn0', '=', '[', ']', 'j', '=', '0', 'for', 'i', 'in', 'range', '(', '0', ',', 'n_in', ')', ':', 'if', 'np', '.', 'mod', '(', 'i', ',', '2', ')', '!=', '0', ':', 'vdn0', '.', 'append', '(', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '+', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', ']', ')', 'vtn0', '.', 'append', '(', 'vec_time', '[', 'i', ']', ')', 'j', '=', 'j', '+', '1', 'vdn', '=', 'pd', '.', 'DataFrame', '(', 'index', '=', 'vtn0', ',', 'data', '=', '{', "'precip'", ':', 'vdn0', '}', ')', '# length of new time series', 'n_out', '=', 'len', '(', 'vdn', ')', '# series of box types:', 'vbtype', '=', 'np', '.', 'zeros', '(', '(', 'n_out', ',', ')', ',', 'dtype', '=', 'np', '.', 'int', ')', '# fields for empirical probabilities', '# counts', 'nb', '=', 'np', '.', 'zeros', '(', '(', '2', ',', '4', ')', ')', 'nbxx', '=', 'np', '.', 'zeros', '(', '(', '2', ',', '4', ')', ')', '# class boundaries for histograms', '# wclassbounds = np.linspace(0, 1, num=8)', 'wlower', '=', 'np', '.', 'array', '(', '[', '0', ',', '0.1429', ',', '0.2857', ',', '0.4286', ',', '0.5714', ',', '0.7143', ',', '0.8571', ']', ')', '# wclassbounds[0:7]', 'wupper', '=', 'np', '.', 'array', '(', '[', '0.1429', ',', '0.2857', ',', '0.4286', ',', '0.5714', ',', '0.7143', ',', '0.8571', ',', '1.0', ']', ')', '# wclassbounds[1:8]', '# evaluate mean rainfall intensity for wet boxes', '# these values should be determined during the aggregation phase!!!!!', '# mean volume threshold', 'meanvol', '=', 'np', '.', 'percentile', '(', 'vdn', '.', 'precip', '[', 'vdn', '.', 'precip', '>', '0.', ']', ',', 'cascade_opt', '.', 'percentile', ')', '# np.mean(vdn.precip[vdn.precip>0.])', 'cascade_opt', '.', 'threshold', '=', 'np', '.', 'array', '(', '[', 'meanvol', ']', ')', '# 2nd step: classify boxes at the upper level', 'for', 'i', 'in', 'range', '(', '0', ',', 'n_out', ')', ':', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', ']', '>', '0.', ':', '# rain?', 'if', 'i', '==', '0', ':', '# only starting or isolated', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '+', '1', ']', '>', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'starting', 'else', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'isolated', 'elif', 'i', '==', 'n_out', '-', '1', ':', '# only ending or isolated', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '>', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'ending', 'else', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'isolated', 'else', ':', '# neither at at the end nor at the beginning', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '==', '0.', 'and', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '+', '1', ']', '==', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'isolated', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '==', '0.', 'and', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '+', '1', ']', '>', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'starting', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '>', '0.', 'and', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '+', '1', ']', '>', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'enclosed', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '>', '0.', 'and', 'vdn', '.', 'precip', '.', 'values', '[', 'i', '+', '1', ']', '==', '0.', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'ending', 'else', ':', 'vbtype', '[', 'i', ']', '=', 'cascade', '.', 'BoxTypes', '.', 'dry', '# no rain', '# 3rd step: examine branching', 'j', '=', '0', 'for', 'i', 'in', 'range', '(', '0', ',', 'n_in', ')', ':', 'if', 'np', '.', 'mod', '(', 'i', ',', '2', ')', '!=', '0', ':', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'j', ']', '>', '0', ':', 'if', 'vdn', '.', 'precip', '.', 'values', '[', 'j', ']', '>', 'meanvol', ':', 'belowabove', '=', '1', '# above mean', 'else', ':', 'belowabove', '=', '0', '# below mean', 'nb', '[', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', 'if', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '>', '0', 'and', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', ']', '==', '0', ':', '# P(1/0)', 'cascade_opt', '.', 'p10', '[', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', 'if', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '==', '0', 'and', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', ']', '>', '0', ':', '# P(0/1)', 'cascade_opt', '.', 'p01', '[', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', 'if', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', '>', '0', 'and', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', ']', '>', '0', ':', '# P(x/x)', 'cascade_opt', '.', 'pxx', '[', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', 'nbxx', '[', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', '# weights', 'r1', '=', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', '-', '1', ']', 'r2', '=', 'vec_data', '.', 'precip', '.', 'values', '[', 'i', ']', 'wxxval', '=', 'r1', '/', '(', 'r1', '+', 'r2', ')', '# Test', 'if', 'abs', '(', 'r1', '+', 'r2', '-', 'vdn', '.', 'precip', '.', 'values', '[', 'j', ']', ')', '>', '1.E-3', ':', 'print', '(', "'i='", '+', 'str', '(', 'i', ')', '+', "', j='", '+', 'str', '(', 'j', ')', '+', "', r1='", '+', 'str', '(', 'r1', ')', '+', '", r2="', '+', 'str', '(', 'r2', ')', '+', '", Summe="', '+', 'str', '(', 'vdn', '.', 'precip', '.', 'values', '[', 'j', ']', ')', ')', 'print', '(', 'vec_data', '.', 'index', '[', 'i', ']', ')', 'print', '(', 'vdn', '.', 'index', '[', 'j', ']', ')', 'print', '(', "'error'", ')', 'return', 'cascade_opt', ',', 'vdn', 'for', 'k', 'in', 'range', '(', '0', ',', '7', ')', ':', 'if', 'wxxval', '>', 'wlower', '[', 'k', ']', 'and', 'wxxval', '<=', 'wupper', '[', 'k', ']', ':', 'cascade_opt', '.', 'wxx', '[', 'k', ',', 'belowabove', ',', 'vbtype', '[', 'j', ']', '-', '1', ']', '+=', '1', 'break', 'j', '=', 'j', '+', '1', '# 4th step: transform counts to percentages', 'cascade_opt', '.', 'p01', '=', 'cascade_opt', '.', 'p01', '/', 'nb', 'cascade_opt', '.', 'p10', '=', 'cascade_opt', '.', 'p10', '/', 'nb', 'cascade_opt', '.', 'pxx', '=', 'cascade_opt', '.', 'pxx', '/', 'nb', 'with', 'np', '.', 'errstate', '(', 'divide', '=', "'ignore'", ',', 'invalid', '=', "'ignore'", ')', ':', '# do not issue warnings here when dividing by zero, this is handled below', 'for', 'k', 'in', 'range', '(', '0', ',', '7', ')', ':', 'cascade_opt', '.', 'wxx', '[', 'k', ',', ':', ',', ':', ']', '=', 'cascade_opt', '.', 'wxx', '[', 'k', ',', ':', ',', ':', ']', '/', 'nbxx', '[', ':', ',', ':', ']', '# In some cases, the time series are too short for deriving statistics.', 'if', '(', 'np', '.', 'isnan', '(', 'cascade_opt', '.', 'p01', ')', '.', 'any', '(', ')', 'or', 'np', '.', 'isnan', '(', 'cascade_opt', '.', 'p10', ')', '.', 'any', '(', ')', 'or', 'np', '.', 'isnan', '(', 'cascade_opt', '.', 'pxx', ')', '.', 'any', '(', ')', ')', ':', 'print', '(', '"ERROR (branching probabilities):"', ')', 'print', '(', '"Invalid statistics. Default values will be returned. "', '"Try to use longer time series or apply statistics "', '"derived for another station."', ')', 'cascade_opt', '.', 'fill_with_sample_data', '(', ')', '# For some box types, the corresponding probabilities might yield nan.', '# If this happens, nan values will be replaced by 1/7 in order to provide', '# valid values for disaggregation.', 'if', 'np', '.', 'isnan', '(', 'cascade_opt', '.', 'wxx', ')', '.', 'any', '(', ')', ':', 'print', '(', '"Warning (weighting probabilities):"', ')', 'print', '(', '"The derived cascade statistics are not valid as some "', '"probabilities are undefined! "', ',', 'end', '=', '""', ')', 'print', '(', '"Try to use longer time series that might be more "', '"appropriate for deriving statistics. "', ',', 'end', '=', '""', ')', 'print', '(', '"As a workaround, default values according to equally "', '"distributed probabilities "', ',', 'end', '=', '""', ')', 'print', '(', '"will be applied..."', ',', 'end', '=', '""', ')', 'cascade_opt', '.', 'wxx', '[', 'np', '.', 'isnan', '(', 'cascade_opt', '.', 'wxx', ')', ']', '=', '1.0', '/', '7.0', 'wxx', '=', 'np', '.', 'zeros', '(', '(', '2', ',', '4', ')', ')', 'for', 'k', 'in', 'range', '(', '0', ',', '7', ')', ':', 'wxx', '[', ':', ',', ':', ']', '+=', 'cascade_opt', '.', 'wxx', '[', 'k', ',', ':', ',', ':', ']', 'if', 'wxx', '.', 'any', '(', ')', '>', '1.001', 'or', 'wxx', '.', 'any', '(', ')', '<', '0.999', ':', 'print', '(', '"failed! Using default values!"', ')', 'cascade_opt', '.', 'fill_with_sample_data', '(', ')', 'else', ':', 'print', '(', '"OK!"', ')', 'return', 'cascade_opt', ',', 'vdn'] | Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model | ['Aggregates', 'highly', 'resolved', 'precipitation', 'data', 'and', 'creates', 'statistics'] | train | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L403-L586 |
4,289 | androguard/androguard | androguard/decompiler/dad/util.py | build_path | def build_path(graph, node1, node2, path=None):
"""
Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path.
"""
if path is None:
path = []
if node1 is node2:
return path
path.append(node2)
for pred in graph.all_preds(node2):
if pred in path:
continue
build_path(graph, node1, pred, path)
return path | python | def build_path(graph, node1, node2, path=None):
"""
Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path.
"""
if path is None:
path = []
if node1 is node2:
return path
path.append(node2)
for pred in graph.all_preds(node2):
if pred in path:
continue
build_path(graph, node1, pred, path)
return path | ['def', 'build_path', '(', 'graph', ',', 'node1', ',', 'node2', ',', 'path', '=', 'None', ')', ':', 'if', 'path', 'is', 'None', ':', 'path', '=', '[', ']', 'if', 'node1', 'is', 'node2', ':', 'return', 'path', 'path', '.', 'append', '(', 'node2', ')', 'for', 'pred', 'in', 'graph', '.', 'all_preds', '(', 'node2', ')', ':', 'if', 'pred', 'in', 'path', ':', 'continue', 'build_path', '(', 'graph', ',', 'node1', ',', 'pred', ',', 'path', ')', 'return', 'path'] | Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path. | ['Build', 'the', 'path', 'from', 'node1', 'to', 'node2', '.', 'The', 'path', 'is', 'composed', 'of', 'all', 'the', 'nodes', 'between', 'node1', 'and', 'node2', 'node1', 'excluded', '.', 'Although', 'if', 'there', 'is', 'a', 'loop', 'starting', 'from', 'node1', 'it', 'will', 'be', 'included', 'in', 'the', 'path', '.'] | train | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/decompiler/dad/util.py#L100-L116 |
4,290 | odlgroup/odl | odl/space/base_tensors.py | TensorSpace.complex_space | def complex_space(self):
"""The space corresponding to this space's `complex_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`complex_space` not defined for non-numeric `dtype`')
return self.astype(self.complex_dtype) | python | def complex_space(self):
"""The space corresponding to this space's `complex_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`complex_space` not defined for non-numeric `dtype`')
return self.astype(self.complex_dtype) | ['def', 'complex_space', '(', 'self', ')', ':', 'if', 'not', 'is_numeric_dtype', '(', 'self', '.', 'dtype', ')', ':', 'raise', 'ValueError', '(', "'`complex_space` not defined for non-numeric `dtype`'", ')', 'return', 'self', '.', 'astype', '(', 'self', '.', 'complex_dtype', ')'] | The space corresponding to this space's `complex_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type. | ['The', 'space', 'corresponding', 'to', 'this', 'space', 's', 'complex_dtype', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/base_tensors.py#L193-L204 |
4,291 | JarryShaw/PyPCAPKit | src/protocols/internet/ipv6_route.py | IPv6_Route._read_data_type_none | def _read_data_type_none(self, length):
"""Read IPv6-Route unknown type data.
Structure of IPv6-Route unknown type data [RFC 8200][RFC 5095]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Routing Type | Segments Left |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. type-specific data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 route.data Type-Specific Data
"""
_data = self._read_fileng(length)
data = dict(
data=_data,
)
return data | python | def _read_data_type_none(self, length):
"""Read IPv6-Route unknown type data.
Structure of IPv6-Route unknown type data [RFC 8200][RFC 5095]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Routing Type | Segments Left |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. type-specific data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 route.data Type-Specific Data
"""
_data = self._read_fileng(length)
data = dict(
data=_data,
)
return data | ['def', '_read_data_type_none', '(', 'self', ',', 'length', ')', ':', '_data', '=', 'self', '.', '_read_fileng', '(', 'length', ')', 'data', '=', 'dict', '(', 'data', '=', '_data', ',', ')', 'return', 'data'] | Read IPv6-Route unknown type data.
Structure of IPv6-Route unknown type data [RFC 8200][RFC 5095]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Routing Type | Segments Left |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. type-specific data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 route.data Type-Specific Data | ['Read', 'IPv6', '-', 'Route', 'unknown', 'type', 'data', '.'] | train | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv6_route.py#L169-L197 |
4,292 | adamrehn/ue4cli | ue4cli/Utility.py | Utility.join | def join(delim, items, quotes=False):
"""
Joins the supplied list of strings after removing any empty strings from the list
"""
transform = lambda s: s
if quotes == True:
transform = lambda s: s if ' ' not in s else '"{}"'.format(s)
stripped = list([transform(i) for i in items if len(i) > 0])
if len(stripped) > 0:
return delim.join(stripped)
return '' | python | def join(delim, items, quotes=False):
"""
Joins the supplied list of strings after removing any empty strings from the list
"""
transform = lambda s: s
if quotes == True:
transform = lambda s: s if ' ' not in s else '"{}"'.format(s)
stripped = list([transform(i) for i in items if len(i) > 0])
if len(stripped) > 0:
return delim.join(stripped)
return '' | ['def', 'join', '(', 'delim', ',', 'items', ',', 'quotes', '=', 'False', ')', ':', 'transform', '=', 'lambda', 's', ':', 's', 'if', 'quotes', '==', 'True', ':', 'transform', '=', 'lambda', 's', ':', 's', 'if', "' '", 'not', 'in', 's', 'else', '\'"{}"\'', '.', 'format', '(', 's', ')', 'stripped', '=', 'list', '(', '[', 'transform', '(', 'i', ')', 'for', 'i', 'in', 'items', 'if', 'len', '(', 'i', ')', '>', '0', ']', ')', 'if', 'len', '(', 'stripped', ')', '>', '0', ':', 'return', 'delim', '.', 'join', '(', 'stripped', ')', 'return', "''"] | Joins the supplied list of strings after removing any empty strings from the list | ['Joins', 'the', 'supplied', 'list', 'of', 'strings', 'after', 'removing', 'any', 'empty', 'strings', 'from', 'the', 'list'] | train | https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/Utility.py#L72-L83 |
4,293 | googleapis/google-cloud-python | storage/google/cloud/storage/_helpers.py | _scalar_property | def _scalar_property(fieldname):
"""Create a property descriptor around the :class:`_PropertyMixin` helpers.
"""
def _getter(self):
"""Scalar property getter."""
return self._properties.get(fieldname)
def _setter(self, value):
"""Scalar property setter."""
self._patch_property(fieldname, value)
return property(_getter, _setter) | python | def _scalar_property(fieldname):
"""Create a property descriptor around the :class:`_PropertyMixin` helpers.
"""
def _getter(self):
"""Scalar property getter."""
return self._properties.get(fieldname)
def _setter(self, value):
"""Scalar property setter."""
self._patch_property(fieldname, value)
return property(_getter, _setter) | ['def', '_scalar_property', '(', 'fieldname', ')', ':', 'def', '_getter', '(', 'self', ')', ':', '"""Scalar property getter."""', 'return', 'self', '.', '_properties', '.', 'get', '(', 'fieldname', ')', 'def', '_setter', '(', 'self', ',', 'value', ')', ':', '"""Scalar property setter."""', 'self', '.', '_patch_property', '(', 'fieldname', ',', 'value', ')', 'return', 'property', '(', '_getter', ',', '_setter', ')'] | Create a property descriptor around the :class:`_PropertyMixin` helpers. | ['Create', 'a', 'property', 'descriptor', 'around', 'the', ':', 'class', ':', '_PropertyMixin', 'helpers', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/_helpers.py#L216-L228 |
4,294 | JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgets/reftrackwidget.py | OptionSelector.setup_ui | def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
labels = self.reftrack.get_option_labels()
self.browser = ComboBoxBrowser(len(labels), headers=labels)
self.browser_vbox.addWidget(self.browser) | python | def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
labels = self.reftrack.get_option_labels()
self.browser = ComboBoxBrowser(len(labels), headers=labels)
self.browser_vbox.addWidget(self.browser) | ['def', 'setup_ui', '(', 'self', ',', ')', ':', 'labels', '=', 'self', '.', 'reftrack', '.', 'get_option_labels', '(', ')', 'self', '.', 'browser', '=', 'ComboBoxBrowser', '(', 'len', '(', 'labels', ')', ',', 'headers', '=', 'labels', ')', 'self', '.', 'browser_vbox', '.', 'addWidget', '(', 'self', '.', 'browser', ')'] | Setup the ui
:returns: None
:rtype: None
:raises: None | ['Setup', 'the', 'ui'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L39-L48 |
4,295 | nornir-automation/nornir | nornir/plugins/tasks/networking/napalm_cli.py | napalm_cli | def napalm_cli(task: Task, commands: List[str]) -> Result:
"""
Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution
"""
device = task.host.get_connection("napalm", task.nornir.config)
result = device.cli(commands)
return Result(host=task.host, result=result) | python | def napalm_cli(task: Task, commands: List[str]) -> Result:
"""
Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution
"""
device = task.host.get_connection("napalm", task.nornir.config)
result = device.cli(commands)
return Result(host=task.host, result=result) | ['def', 'napalm_cli', '(', 'task', ':', 'Task', ',', 'commands', ':', 'List', '[', 'str', ']', ')', '->', 'Result', ':', 'device', '=', 'task', '.', 'host', '.', 'get_connection', '(', '"napalm"', ',', 'task', '.', 'nornir', '.', 'config', ')', 'result', '=', 'device', '.', 'cli', '(', 'commands', ')', 'return', 'Result', '(', 'host', '=', 'task', '.', 'host', ',', 'result', '=', 'result', ')'] | Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution | ['Run', 'commands', 'on', 'remote', 'devices', 'using', 'napalm'] | train | https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/plugins/tasks/networking/napalm_cli.py#L6-L19 |
4,296 | google/apitools | apitools/base/py/base_api.py | BaseApiService.__CombineGlobalParams | def __CombineGlobalParams(self, global_params, default_params):
"""Combine the given params with the defaults."""
util.Typecheck(global_params, (type(None), self.__client.params_type))
result = self.__client.params_type()
global_params = global_params or self.__client.params_type()
for field in result.all_fields():
value = global_params.get_assigned_value(field.name)
if value is None:
value = default_params.get_assigned_value(field.name)
if value not in (None, [], ()):
setattr(result, field.name, value)
return result | python | def __CombineGlobalParams(self, global_params, default_params):
"""Combine the given params with the defaults."""
util.Typecheck(global_params, (type(None), self.__client.params_type))
result = self.__client.params_type()
global_params = global_params or self.__client.params_type()
for field in result.all_fields():
value = global_params.get_assigned_value(field.name)
if value is None:
value = default_params.get_assigned_value(field.name)
if value not in (None, [], ()):
setattr(result, field.name, value)
return result | ['def', '__CombineGlobalParams', '(', 'self', ',', 'global_params', ',', 'default_params', ')', ':', 'util', '.', 'Typecheck', '(', 'global_params', ',', '(', 'type', '(', 'None', ')', ',', 'self', '.', '__client', '.', 'params_type', ')', ')', 'result', '=', 'self', '.', '__client', '.', 'params_type', '(', ')', 'global_params', '=', 'global_params', 'or', 'self', '.', '__client', '.', 'params_type', '(', ')', 'for', 'field', 'in', 'result', '.', 'all_fields', '(', ')', ':', 'value', '=', 'global_params', '.', 'get_assigned_value', '(', 'field', '.', 'name', ')', 'if', 'value', 'is', 'None', ':', 'value', '=', 'default_params', '.', 'get_assigned_value', '(', 'field', '.', 'name', ')', 'if', 'value', 'not', 'in', '(', 'None', ',', '[', ']', ',', '(', ')', ')', ':', 'setattr', '(', 'result', ',', 'field', '.', 'name', ',', 'value', ')', 'return', 'result'] | Combine the given params with the defaults. | ['Combine', 'the', 'given', 'params', 'with', 'the', 'defaults', '.'] | train | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L511-L522 |
4,297 | romanz/trezor-agent | libagent/gpg/keyring.py | gpg_version | def gpg_version(sp=subprocess):
"""Get a keygrip of the primary GPG key of the specified user."""
args = gpg_command(['--version'])
output = check_output(args=args, sp=sp)
line = output.split(b'\n')[0] # b'gpg (GnuPG) 2.1.11'
line = line.split(b' ')[-1] # b'2.1.11'
line = line.split(b'-')[0] # remove trailing version parts
return line.split(b'v')[-1] | python | def gpg_version(sp=subprocess):
"""Get a keygrip of the primary GPG key of the specified user."""
args = gpg_command(['--version'])
output = check_output(args=args, sp=sp)
line = output.split(b'\n')[0] # b'gpg (GnuPG) 2.1.11'
line = line.split(b' ')[-1] # b'2.1.11'
line = line.split(b'-')[0] # remove trailing version parts
return line.split(b'v')[-1] | ['def', 'gpg_version', '(', 'sp', '=', 'subprocess', ')', ':', 'args', '=', 'gpg_command', '(', '[', "'--version'", ']', ')', 'output', '=', 'check_output', '(', 'args', '=', 'args', ',', 'sp', '=', 'sp', ')', 'line', '=', 'output', '.', 'split', '(', "b'\\n'", ')', '[', '0', ']', "# b'gpg (GnuPG) 2.1.11'", 'line', '=', 'line', '.', 'split', '(', "b' '", ')', '[', '-', '1', ']', "# b'2.1.11'", 'line', '=', 'line', '.', 'split', '(', "b'-'", ')', '[', '0', ']', '# remove trailing version parts', 'return', 'line', '.', 'split', '(', "b'v'", ')', '[', '-', '1', ']'] | Get a keygrip of the primary GPG key of the specified user. | ['Get', 'a', 'keygrip', 'of', 'the', 'primary', 'GPG', 'key', 'of', 'the', 'specified', 'user', '.'] | train | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L223-L230 |
4,298 | grauwoelfchen/flask-dotenv | flask_dotenv.py | DotEnv.init_app | def init_app(self, app, env_file=None, verbose_mode=False):
"""Imports .env file."""
if self.app is None:
self.app = app
self.verbose_mode = verbose_mode
if env_file is None:
env_file = os.path.join(os.getcwd(), ".env")
if not os.path.exists(env_file):
warnings.warn("can't read {0} - it doesn't exist".format(env_file))
else:
self.__import_vars(env_file) | python | def init_app(self, app, env_file=None, verbose_mode=False):
"""Imports .env file."""
if self.app is None:
self.app = app
self.verbose_mode = verbose_mode
if env_file is None:
env_file = os.path.join(os.getcwd(), ".env")
if not os.path.exists(env_file):
warnings.warn("can't read {0} - it doesn't exist".format(env_file))
else:
self.__import_vars(env_file) | ['def', 'init_app', '(', 'self', ',', 'app', ',', 'env_file', '=', 'None', ',', 'verbose_mode', '=', 'False', ')', ':', 'if', 'self', '.', 'app', 'is', 'None', ':', 'self', '.', 'app', '=', 'app', 'self', '.', 'verbose_mode', '=', 'verbose_mode', 'if', 'env_file', 'is', 'None', ':', 'env_file', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'getcwd', '(', ')', ',', '".env"', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'env_file', ')', ':', 'warnings', '.', 'warn', '(', '"can\'t read {0} - it doesn\'t exist"', '.', 'format', '(', 'env_file', ')', ')', 'else', ':', 'self', '.', '__import_vars', '(', 'env_file', ')'] | Imports .env file. | ['Imports', '.', 'env', 'file', '.'] | train | https://github.com/grauwoelfchen/flask-dotenv/blob/7dc811fff18570c4b6803ce48c3ecca7eebabe51/flask_dotenv.py#L24-L35 |
4,299 | apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _ColumnFunctionTransformation._load_version | def _load_version(cls, unpickler, version):
"""
A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model | python | def _load_version(cls, unpickler, version):
"""
A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model | ['def', '_load_version', '(', 'cls', ',', 'unpickler', ',', 'version', ')', ':', 'state', ',', '_exclude', ',', '_features', '=', 'unpickler', '.', 'load', '(', ')', 'features', '=', 'state', '[', "'features'", ']', 'excluded_features', '=', 'state', '[', "'excluded_features'", ']', 'model', '=', 'cls', '.', '__new__', '(', 'cls', ')', 'model', '.', '_setup', '(', ')', 'model', '.', '__proxy__', '.', 'update', '(', 'state', ')', 'model', '.', '_exclude', '=', '_exclude', 'model', '.', '_features', '=', '_features', 'return', 'model'] | A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer. | ['A', 'function', 'to', 'load', 'a', 'previously', 'saved', 'SentenceSplitter', 'instance', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L72-L95 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.