Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,000 | dw/mitogen | ansible_mitogen/process.py | MuxProcess.start | def start(cls, _init_logging=True):
"""
Arrange for the subprocess to be started, if it is not already running.
The parent process picks a UNIX socket path the child will use prior to
fork, creates a socketpair used essentially as a semaphore, then blocks
waiting for the child to indicate the UNIX socket is ready for use.
:param bool _init_logging:
For testing, if :data:`False`, don't initialize logging.
"""
if cls.worker_sock is not None:
return
if faulthandler is not None:
faulthandler.enable()
mitogen.utils.setup_gil()
cls.unix_listener_path = mitogen.unix.make_socket_path()
cls.worker_sock, cls.child_sock = socket.socketpair()
atexit.register(lambda: clean_shutdown(cls.worker_sock))
mitogen.core.set_cloexec(cls.worker_sock.fileno())
mitogen.core.set_cloexec(cls.child_sock.fileno())
cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None
if cls.profiling:
mitogen.core.enable_profiling()
if _init_logging:
ansible_mitogen.logging.setup()
cls.original_env = dict(os.environ)
cls.child_pid = os.fork()
if cls.child_pid:
save_pid('controller')
ansible_mitogen.logging.set_process_name('top')
ansible_mitogen.affinity.policy.assign_controller()
cls.child_sock.close()
cls.child_sock = None
mitogen.core.io_op(cls.worker_sock.recv, 1)
else:
save_pid('mux')
ansible_mitogen.logging.set_process_name('mux')
ansible_mitogen.affinity.policy.assign_muxprocess()
cls.worker_sock.close()
cls.worker_sock = None
self = cls()
self.worker_main() | python | def start(cls, _init_logging=True):
"""
Arrange for the subprocess to be started, if it is not already running.
The parent process picks a UNIX socket path the child will use prior to
fork, creates a socketpair used essentially as a semaphore, then blocks
waiting for the child to indicate the UNIX socket is ready for use.
:param bool _init_logging:
For testing, if :data:`False`, don't initialize logging.
"""
if cls.worker_sock is not None:
return
if faulthandler is not None:
faulthandler.enable()
mitogen.utils.setup_gil()
cls.unix_listener_path = mitogen.unix.make_socket_path()
cls.worker_sock, cls.child_sock = socket.socketpair()
atexit.register(lambda: clean_shutdown(cls.worker_sock))
mitogen.core.set_cloexec(cls.worker_sock.fileno())
mitogen.core.set_cloexec(cls.child_sock.fileno())
cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None
if cls.profiling:
mitogen.core.enable_profiling()
if _init_logging:
ansible_mitogen.logging.setup()
cls.original_env = dict(os.environ)
cls.child_pid = os.fork()
if cls.child_pid:
save_pid('controller')
ansible_mitogen.logging.set_process_name('top')
ansible_mitogen.affinity.policy.assign_controller()
cls.child_sock.close()
cls.child_sock = None
mitogen.core.io_op(cls.worker_sock.recv, 1)
else:
save_pid('mux')
ansible_mitogen.logging.set_process_name('mux')
ansible_mitogen.affinity.policy.assign_muxprocess()
cls.worker_sock.close()
cls.worker_sock = None
self = cls()
self.worker_main() | ['def', 'start', '(', 'cls', ',', '_init_logging', '=', 'True', ')', ':', 'if', 'cls', '.', 'worker_sock', 'is', 'not', 'None', ':', 'return', 'if', 'faulthandler', 'is', 'not', 'None', ':', 'faulthandler', '.', 'enable', '(', ')', 'mitogen', '.', 'utils', '.', 'setup_gil', '(', ')', 'cls', '.', 'unix_listener_path', '=', 'mitogen', '.', 'unix', '.', 'make_socket_path', '(', ')', 'cls', '.', 'worker_sock', ',', 'cls', '.', 'child_sock', '=', 'socket', '.', 'socketpair', '(', ')', 'atexit', '.', 'register', '(', 'lambda', ':', 'clean_shutdown', '(', 'cls', '.', 'worker_sock', ')', ')', 'mitogen', '.', 'core', '.', 'set_cloexec', '(', 'cls', '.', 'worker_sock', '.', 'fileno', '(', ')', ')', 'mitogen', '.', 'core', '.', 'set_cloexec', '(', 'cls', '.', 'child_sock', '.', 'fileno', '(', ')', ')', 'cls', '.', 'profiling', '=', 'os', '.', 'environ', '.', 'get', '(', "'MITOGEN_PROFILING'", ')', 'is', 'not', 'None', 'if', 'cls', '.', 'profiling', ':', 'mitogen', '.', 'core', '.', 'enable_profiling', '(', ')', 'if', '_init_logging', ':', 'ansible_mitogen', '.', 'logging', '.', 'setup', '(', ')', 'cls', '.', 'original_env', '=', 'dict', '(', 'os', '.', 'environ', ')', 'cls', '.', 'child_pid', '=', 'os', '.', 'fork', '(', ')', 'if', 'cls', '.', 'child_pid', ':', 'save_pid', '(', "'controller'", ')', 'ansible_mitogen', '.', 'logging', '.', 'set_process_name', '(', "'top'", ')', 'ansible_mitogen', '.', 'affinity', '.', 'policy', '.', 'assign_controller', '(', ')', 'cls', '.', 'child_sock', '.', 'close', '(', ')', 'cls', '.', 'child_sock', '=', 'None', 'mitogen', '.', 'core', '.', 'io_op', '(', 'cls', '.', 'worker_sock', '.', 'recv', ',', '1', ')', 'else', ':', 'save_pid', '(', "'mux'", ')', 'ansible_mitogen', '.', 'logging', '.', 'set_process_name', '(', "'mux'", ')', 'ansible_mitogen', '.', 'affinity', '.', 'policy', '.', 'assign_muxprocess', '(', ')', 'cls', '.', 'worker_sock', '.', 'close', '(', ')', 'cls', '.', 'worker_sock', '=', 'None', 'self', '=', 'cls', '(', ')', 'self', '.', 'worker_main', '(', ')'] | Arrange for the subprocess to be started, if it is not already running.
The parent process picks a UNIX socket path the child will use prior to
fork, creates a socketpair used essentially as a semaphore, then blocks
waiting for the child to indicate the UNIX socket is ready for use.
:param bool _init_logging:
For testing, if :data:`False`, don't initialize logging. | ['Arrange', 'for', 'the', 'subprocess', 'to', 'be', 'started', 'if', 'it', 'is', 'not', 'already', 'running', '.'] | train | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/process.py#L161-L207 |
5,001 | yvesalexandre/bandicoot | bandicoot/helper/stops.py | dbscan | def dbscan(points, eps, minpts):
"""
Implementation of [DBSCAN]_ (*A density-based algorithm for discovering
clusters in large spatial databases with noise*). It accepts a list of
points (lat, lon) and returns the labels associated with the points.
References
----------
.. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August).
A density-based algorithm for discovering clusters in large
spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231).
"""
next_label = 0
n = len(points)
labels = [None] * n
distance_matrix = compute_distance_matrix(points)
neighbors = [get_neighbors(distance_matrix, i, eps) for i in range(n)]
for i in range(n):
if labels[i] is not None:
continue
if len(neighbors[i]) < minpts:
continue
labels[i] = next_label
candidates = [i]
while len(candidates) > 0:
c = candidates.pop()
for j in neighbors[c]:
if labels[j] is None:
labels[j] = next_label
if len(neighbors[j]) >= minpts:
candidates.append(j)
next_label += 1
return labels | python | def dbscan(points, eps, minpts):
"""
Implementation of [DBSCAN]_ (*A density-based algorithm for discovering
clusters in large spatial databases with noise*). It accepts a list of
points (lat, lon) and returns the labels associated with the points.
References
----------
.. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August).
A density-based algorithm for discovering clusters in large
spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231).
"""
next_label = 0
n = len(points)
labels = [None] * n
distance_matrix = compute_distance_matrix(points)
neighbors = [get_neighbors(distance_matrix, i, eps) for i in range(n)]
for i in range(n):
if labels[i] is not None:
continue
if len(neighbors[i]) < minpts:
continue
labels[i] = next_label
candidates = [i]
while len(candidates) > 0:
c = candidates.pop()
for j in neighbors[c]:
if labels[j] is None:
labels[j] = next_label
if len(neighbors[j]) >= minpts:
candidates.append(j)
next_label += 1
return labels | ['def', 'dbscan', '(', 'points', ',', 'eps', ',', 'minpts', ')', ':', 'next_label', '=', '0', 'n', '=', 'len', '(', 'points', ')', 'labels', '=', '[', 'None', ']', '*', 'n', 'distance_matrix', '=', 'compute_distance_matrix', '(', 'points', ')', 'neighbors', '=', '[', 'get_neighbors', '(', 'distance_matrix', ',', 'i', ',', 'eps', ')', 'for', 'i', 'in', 'range', '(', 'n', ')', ']', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'if', 'labels', '[', 'i', ']', 'is', 'not', 'None', ':', 'continue', 'if', 'len', '(', 'neighbors', '[', 'i', ']', ')', '<', 'minpts', ':', 'continue', 'labels', '[', 'i', ']', '=', 'next_label', 'candidates', '=', '[', 'i', ']', 'while', 'len', '(', 'candidates', ')', '>', '0', ':', 'c', '=', 'candidates', '.', 'pop', '(', ')', 'for', 'j', 'in', 'neighbors', '[', 'c', ']', ':', 'if', 'labels', '[', 'j', ']', 'is', 'None', ':', 'labels', '[', 'j', ']', '=', 'next_label', 'if', 'len', '(', 'neighbors', '[', 'j', ']', ')', '>=', 'minpts', ':', 'candidates', '.', 'append', '(', 'j', ')', 'next_label', '+=', '1', 'return', 'labels'] | Implementation of [DBSCAN]_ (*A density-based algorithm for discovering
clusters in large spatial databases with noise*). It accepts a list of
points (lat, lon) and returns the labels associated with the points.
References
----------
.. [DBSCAN] Ester, M., Kriegel, H. P., Sander, J., & Xu, X. (1996, August).
A density-based algorithm for discovering clusters in large
spatial databases with noise. In Kdd (Vol. 96, No. 34, pp. 226-231). | ['Implementation', 'of', '[', 'DBSCAN', ']', '_', '(', '*', 'A', 'density', '-', 'based', 'algorithm', 'for', 'discovering', 'clusters', 'in', 'large', 'spatial', 'databases', 'with', 'noise', '*', ')', '.', 'It', 'accepts', 'a', 'list', 'of', 'points', '(', 'lat', 'lon', ')', 'and', 'returns', 'the', 'labels', 'associated', 'with', 'the', 'points', '.'] | train | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/stops.py#L46-L86 |
5,002 | pyroscope/pyrobase | src/pyrobase/bencode.py | bread | def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close() | python | def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close() | ['def', 'bread', '(', 'stream', ')', ':', 'if', 'hasattr', '(', 'stream', ',', '"read"', ')', ':', 'return', 'bdecode', '(', 'stream', '.', 'read', '(', ')', ')', 'else', ':', 'handle', '=', 'open', '(', 'stream', ',', '"rb"', ')', 'try', ':', 'return', 'bdecode', '(', 'handle', '.', 'read', '(', ')', ')', 'finally', ':', 'handle', '.', 'close', '(', ')'] | Decode a file or stream to an object. | ['Decode', 'a', 'file', 'or', 'stream', 'to', 'an', 'object', '.'] | train | https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L170-L180 |
5,003 | OSSOS/MOP | src/ossos/core/ossos/pipeline/mk_mopheader.py | run | def run(expnum, ccd, version, dry_run=False, prefix="", force=False, ignore_dependency=False):
"""Run the OSSOS mopheader script.
"""
message = storage.SUCCESS
logging.info("Attempting to get status on header for {} {}".format(expnum, ccd))
if storage.get_status(task, prefix, expnum, version, ccd) and not force:
logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd))
return message
with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run):
try:
logging.info("Building a mopheader ")
if not storage.get_status(dependency, prefix, expnum, "p", 36) and not ignore_dependency:
raise IOError("{} not yet run for {}".format(dependency, expnum))
# confirm destination directory exists.
destdir = os.path.dirname(storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir)
# get image from the vospace storage area
logging.info("Retrieving image from VOSpace")
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
# launch the stepZjmp program
logging.info("Launching stepZ on %s %d" % (expnum, ccd))
expname = os.path.basename(filename).strip('.fits')
logging.info(util.exec_prog(['stepZjmp',
'-f',
expname]))
# if this is a dry run then we are finished
if dry_run:
return message
# push the header to the VOSpace
mopheader_filename = expname+".mopheader"
destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader')
source = mopheader_filename
count = 0
with open(source, 'r'):
while True:
try:
count += 1
logging.info("Attempt {} to copy {} -> {}".format(count, source, destination))
storage.copy(source, destination)
break
except Exception as ex:
if count > 10:
raise ex
logging.info(message)
except CalledProcessError as cpe:
message = str(cpe.output)
logging.error(message)
except Exception as e:
message = str(e)
logging.error(message)
if not dry_run:
storage.set_status(task, prefix, expnum, version=version, ccd=ccd, status=message)
return message | python | def run(expnum, ccd, version, dry_run=False, prefix="", force=False, ignore_dependency=False):
"""Run the OSSOS mopheader script.
"""
message = storage.SUCCESS
logging.info("Attempting to get status on header for {} {}".format(expnum, ccd))
if storage.get_status(task, prefix, expnum, version, ccd) and not force:
logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd))
return message
with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run):
try:
logging.info("Building a mopheader ")
if not storage.get_status(dependency, prefix, expnum, "p", 36) and not ignore_dependency:
raise IOError("{} not yet run for {}".format(dependency, expnum))
# confirm destination directory exists.
destdir = os.path.dirname(storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir)
# get image from the vospace storage area
logging.info("Retrieving image from VOSpace")
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
# launch the stepZjmp program
logging.info("Launching stepZ on %s %d" % (expnum, ccd))
expname = os.path.basename(filename).strip('.fits')
logging.info(util.exec_prog(['stepZjmp',
'-f',
expname]))
# if this is a dry run then we are finished
if dry_run:
return message
# push the header to the VOSpace
mopheader_filename = expname+".mopheader"
destination = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='mopheader')
source = mopheader_filename
count = 0
with open(source, 'r'):
while True:
try:
count += 1
logging.info("Attempt {} to copy {} -> {}".format(count, source, destination))
storage.copy(source, destination)
break
except Exception as ex:
if count > 10:
raise ex
logging.info(message)
except CalledProcessError as cpe:
message = str(cpe.output)
logging.error(message)
except Exception as e:
message = str(e)
logging.error(message)
if not dry_run:
storage.set_status(task, prefix, expnum, version=version, ccd=ccd, status=message)
return message | ['def', 'run', '(', 'expnum', ',', 'ccd', ',', 'version', ',', 'dry_run', '=', 'False', ',', 'prefix', '=', '""', ',', 'force', '=', 'False', ',', 'ignore_dependency', '=', 'False', ')', ':', 'message', '=', 'storage', '.', 'SUCCESS', 'logging', '.', 'info', '(', '"Attempting to get status on header for {} {}"', '.', 'format', '(', 'expnum', ',', 'ccd', ')', ')', 'if', 'storage', '.', 'get_status', '(', 'task', ',', 'prefix', ',', 'expnum', ',', 'version', ',', 'ccd', ')', 'and', 'not', 'force', ':', 'logging', '.', 'info', '(', '"{} completed successfully for {} {} {} {}"', '.', 'format', '(', 'task', ',', 'prefix', ',', 'expnum', ',', 'version', ',', 'ccd', ')', ')', 'return', 'message', 'with', 'storage', '.', 'LoggingManager', '(', 'task', ',', 'prefix', ',', 'expnum', ',', 'ccd', ',', 'version', ',', 'dry_run', ')', ':', 'try', ':', 'logging', '.', 'info', '(', '"Building a mopheader "', ')', 'if', 'not', 'storage', '.', 'get_status', '(', 'dependency', ',', 'prefix', ',', 'expnum', ',', '"p"', ',', '36', ')', 'and', 'not', 'ignore_dependency', ':', 'raise', 'IOError', '(', '"{} not yet run for {}"', '.', 'format', '(', 'dependency', ',', 'expnum', ')', ')', '# confirm destination directory exists.', 'destdir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'storage', '.', 'dbimages_uri', '(', 'expnum', ',', 'ccd', ',', 'prefix', '=', 'prefix', ',', 'version', '=', 'version', ',', 'ext', '=', "'fits'", ')', ')', 'if', 'not', 'dry_run', ':', 'storage', '.', 'mkdir', '(', 'destdir', ')', '# get image from the vospace storage area', 'logging', '.', 'info', '(', '"Retrieving image from VOSpace"', ')', 'filename', '=', 'storage', '.', 'get_image', '(', 'expnum', ',', 'ccd', ',', 'version', '=', 'version', ',', 'prefix', '=', 'prefix', ')', '# launch the stepZjmp program ', 'logging', '.', 'info', '(', '"Launching stepZ on %s %d"', '%', '(', 'expnum', ',', 'ccd', ')', ')', 'expname', '=', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', '.', 'strip', '(', "'.fits'", ')', 'logging', '.', 'info', '(', 'util', '.', 'exec_prog', '(', '[', "'stepZjmp'", ',', "'-f'", ',', 'expname', ']', ')', ')', '# if this is a dry run then we are finished', 'if', 'dry_run', ':', 'return', 'message', '# push the header to the VOSpace', 'mopheader_filename', '=', 'expname', '+', '".mopheader"', 'destination', '=', 'storage', '.', 'dbimages_uri', '(', 'expnum', ',', 'ccd', ',', 'prefix', '=', 'prefix', ',', 'version', '=', 'version', ',', 'ext', '=', "'mopheader'", ')', 'source', '=', 'mopheader_filename', 'count', '=', '0', 'with', 'open', '(', 'source', ',', "'r'", ')', ':', 'while', 'True', ':', 'try', ':', 'count', '+=', '1', 'logging', '.', 'info', '(', '"Attempt {} to copy {} -> {}"', '.', 'format', '(', 'count', ',', 'source', ',', 'destination', ')', ')', 'storage', '.', 'copy', '(', 'source', ',', 'destination', ')', 'break', 'except', 'Exception', 'as', 'ex', ':', 'if', 'count', '>', '10', ':', 'raise', 'ex', 'logging', '.', 'info', '(', 'message', ')', 'except', 'CalledProcessError', 'as', 'cpe', ':', 'message', '=', 'str', '(', 'cpe', '.', 'output', ')', 'logging', '.', 'error', '(', 'message', ')', 'except', 'Exception', 'as', 'e', ':', 'message', '=', 'str', '(', 'e', ')', 'logging', '.', 'error', '(', 'message', ')', 'if', 'not', 'dry_run', ':', 'storage', '.', 'set_status', '(', 'task', ',', 'prefix', ',', 'expnum', ',', 'version', '=', 'version', ',', 'ccd', '=', 'ccd', ',', 'status', '=', 'message', ')', 'return', 'message'] | Run the OSSOS mopheader script. | ['Run', 'the', 'OSSOS', 'mopheader', 'script', '.'] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/pipeline/mk_mopheader.py#L39-L100 |
5,004 | tango-controls/pytango | tango/pytango_pprint.py | __struct_params_s | def __struct_params_s(obj, separator=', ', f=repr, fmt='%s = %s'):
"""method wrapper for printing all elements of a struct"""
s = separator.join([__single_param(obj, n, f, fmt) for n in dir(obj) if __inc_param(obj, n)])
return s | python | def __struct_params_s(obj, separator=', ', f=repr, fmt='%s = %s'):
"""method wrapper for printing all elements of a struct"""
s = separator.join([__single_param(obj, n, f, fmt) for n in dir(obj) if __inc_param(obj, n)])
return s | ['def', '__struct_params_s', '(', 'obj', ',', 'separator', '=', "', '", ',', 'f', '=', 'repr', ',', 'fmt', '=', "'%s = %s'", ')', ':', 's', '=', 'separator', '.', 'join', '(', '[', '__single_param', '(', 'obj', ',', 'n', ',', 'f', ',', 'fmt', ')', 'for', 'n', 'in', 'dir', '(', 'obj', ')', 'if', '__inc_param', '(', 'obj', ',', 'n', ')', ']', ')', 'return', 's'] | method wrapper for printing all elements of a struct | ['method', 'wrapper', 'for', 'printing', 'all', 'elements', 'of', 'a', 'struct'] | train | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/pytango_pprint.py#L59-L62 |
5,005 | pyvisa/pyvisa-py | pyvisa-py/protocols/usbutil.py | find_interfaces | def find_interfaces(device, **kwargs):
"""
:param device:
:return:
"""
interfaces = []
try:
for cfg in device:
try:
interfaces.extend(usb_find_desc(cfg, find_all=True, **kwargs))
except:
pass
except:
pass
return interfaces | python | def find_interfaces(device, **kwargs):
"""
:param device:
:return:
"""
interfaces = []
try:
for cfg in device:
try:
interfaces.extend(usb_find_desc(cfg, find_all=True, **kwargs))
except:
pass
except:
pass
return interfaces | ['def', 'find_interfaces', '(', 'device', ',', '*', '*', 'kwargs', ')', ':', 'interfaces', '=', '[', ']', 'try', ':', 'for', 'cfg', 'in', 'device', ':', 'try', ':', 'interfaces', '.', 'extend', '(', 'usb_find_desc', '(', 'cfg', ',', 'find_all', '=', 'True', ',', '*', '*', 'kwargs', ')', ')', 'except', ':', 'pass', 'except', ':', 'pass', 'return', 'interfaces'] | :param device:
:return: | [':', 'param', 'device', ':', ':', 'return', ':'] | train | https://github.com/pyvisa/pyvisa-py/blob/dfbd509409675b59d71bb741cd72c5f256efd4cd/pyvisa-py/protocols/usbutil.py#L200-L214 |
5,006 | swevm/scaleio-py | scaleiopy/scaleio.py | ScaleIO.storage_pools | def storage_pools(self):
"""
Returns a `list` of all the `System` objects to the cluster. Updates every time - no caching.
:return: a `list` of all the `System` objects known to the cluster.
:rtype: list
"""
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/StoragePool/instances")).json()
all_storage_pools = []
for storage_pool_object in response:
all_storage_pools.append(SIO_Storage_Pool.from_dict(storage_pool_object))
return all_storage_pools | python | def storage_pools(self):
"""
Returns a `list` of all the `System` objects to the cluster. Updates every time - no caching.
:return: a `list` of all the `System` objects known to the cluster.
:rtype: list
"""
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/StoragePool/instances")).json()
all_storage_pools = []
for storage_pool_object in response:
all_storage_pools.append(SIO_Storage_Pool.from_dict(storage_pool_object))
return all_storage_pools | ['def', 'storage_pools', '(', 'self', ')', ':', 'self', '.', 'connection', '.', '_check_login', '(', ')', 'response', '=', 'self', '.', 'connection', '.', '_do_get', '(', '"{}/{}"', '.', 'format', '(', 'self', '.', 'connection', '.', '_api_url', ',', '"types/StoragePool/instances"', ')', ')', '.', 'json', '(', ')', 'all_storage_pools', '=', '[', ']', 'for', 'storage_pool_object', 'in', 'response', ':', 'all_storage_pools', '.', 'append', '(', 'SIO_Storage_Pool', '.', 'from_dict', '(', 'storage_pool_object', ')', ')', 'return', 'all_storage_pools'] | Returns a `list` of all the `System` objects to the cluster. Updates every time - no caching.
:return: a `list` of all the `System` objects known to the cluster.
:rtype: list | ['Returns', 'a', 'list', 'of', 'all', 'the', 'System', 'objects', 'to', 'the', 'cluster', '.', 'Updates', 'every', 'time', '-', 'no', 'caching', '.', ':', 'return', ':', 'a', 'list', 'of', 'all', 'the', 'System', 'objects', 'known', 'to', 'the', 'cluster', '.', ':', 'rtype', ':', 'list'] | train | https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/scaleio.py#L144-L155 |
5,007 | androguard/androguard | androguard/core/bytecodes/apk.py | show_Certificate | def show_Certificate(cert, short=False):
"""
Print Fingerprints, Issuer and Subject of an X509 Certificate.
:param cert: X509 Certificate to print
:param short: Print in shortform for DN (Default: False)
:type cert: :class:`asn1crypto.x509.Certificate`
:type short: Boolean
"""
print("SHA1 Fingerprint: {}".format(cert.sha1_fingerprint))
print("SHA256 Fingerprint: {}".format(cert.sha256_fingerprint))
print("Issuer: {}".format(get_certificate_name_string(cert.issuer.native, short=short)))
print("Subject: {}".format(get_certificate_name_string(cert.subject.native, short=short))) | python | def show_Certificate(cert, short=False):
"""
Print Fingerprints, Issuer and Subject of an X509 Certificate.
:param cert: X509 Certificate to print
:param short: Print in shortform for DN (Default: False)
:type cert: :class:`asn1crypto.x509.Certificate`
:type short: Boolean
"""
print("SHA1 Fingerprint: {}".format(cert.sha1_fingerprint))
print("SHA256 Fingerprint: {}".format(cert.sha256_fingerprint))
print("Issuer: {}".format(get_certificate_name_string(cert.issuer.native, short=short)))
print("Subject: {}".format(get_certificate_name_string(cert.subject.native, short=short))) | ['def', 'show_Certificate', '(', 'cert', ',', 'short', '=', 'False', ')', ':', 'print', '(', '"SHA1 Fingerprint: {}"', '.', 'format', '(', 'cert', '.', 'sha1_fingerprint', ')', ')', 'print', '(', '"SHA256 Fingerprint: {}"', '.', 'format', '(', 'cert', '.', 'sha256_fingerprint', ')', ')', 'print', '(', '"Issuer: {}"', '.', 'format', '(', 'get_certificate_name_string', '(', 'cert', '.', 'issuer', '.', 'native', ',', 'short', '=', 'short', ')', ')', ')', 'print', '(', '"Subject: {}"', '.', 'format', '(', 'get_certificate_name_string', '(', 'cert', '.', 'subject', '.', 'native', ',', 'short', '=', 'short', ')', ')', ')'] | Print Fingerprints, Issuer and Subject of an X509 Certificate.
:param cert: X509 Certificate to print
:param short: Print in shortform for DN (Default: False)
:type cert: :class:`asn1crypto.x509.Certificate`
:type short: Boolean | ['Print', 'Fingerprints', 'Issuer', 'and', 'Subject', 'of', 'an', 'X509', 'Certificate', '.'] | train | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L2068-L2081 |
5,008 | data-8/datascience | datascience/tables.py | _fill_with_zeros | def _fill_with_zeros(partials, rows, zero=None):
"""Find and return values from rows for all partials. In cases where no
row matches a partial, zero is assumed as value. For a row, the first
(n-1) fields are assumed to be the partial, and the last field,
the value, where n is the total number of fields in each row. It is
assumed that there is a unique row for each partial.
partials -- single field values or tuples of field values
rows -- table rows
zero -- value used when no rows match a particular partial
"""
assert len(rows) > 0
if not _is_non_string_iterable(partials):
# Convert partials to tuple for comparison against row slice later
partials = [(partial,) for partial in partials]
# Construct mapping of partials to values in rows
mapping = {}
for row in rows:
mapping[tuple(row[:-1])] = row[-1]
if zero is None:
# Try to infer zero from given row values.
array = np.array(tuple(mapping.values()))
if len(array.shape) == 1:
zero = array.dtype.type()
return np.array([mapping.get(partial, zero) for partial in partials]) | python | def _fill_with_zeros(partials, rows, zero=None):
"""Find and return values from rows for all partials. In cases where no
row matches a partial, zero is assumed as value. For a row, the first
(n-1) fields are assumed to be the partial, and the last field,
the value, where n is the total number of fields in each row. It is
assumed that there is a unique row for each partial.
partials -- single field values or tuples of field values
rows -- table rows
zero -- value used when no rows match a particular partial
"""
assert len(rows) > 0
if not _is_non_string_iterable(partials):
# Convert partials to tuple for comparison against row slice later
partials = [(partial,) for partial in partials]
# Construct mapping of partials to values in rows
mapping = {}
for row in rows:
mapping[tuple(row[:-1])] = row[-1]
if zero is None:
# Try to infer zero from given row values.
array = np.array(tuple(mapping.values()))
if len(array.shape) == 1:
zero = array.dtype.type()
return np.array([mapping.get(partial, zero) for partial in partials]) | ['def', '_fill_with_zeros', '(', 'partials', ',', 'rows', ',', 'zero', '=', 'None', ')', ':', 'assert', 'len', '(', 'rows', ')', '>', '0', 'if', 'not', '_is_non_string_iterable', '(', 'partials', ')', ':', '# Convert partials to tuple for comparison against row slice later', 'partials', '=', '[', '(', 'partial', ',', ')', 'for', 'partial', 'in', 'partials', ']', '# Construct mapping of partials to values in rows', 'mapping', '=', '{', '}', 'for', 'row', 'in', 'rows', ':', 'mapping', '[', 'tuple', '(', 'row', '[', ':', '-', '1', ']', ')', ']', '=', 'row', '[', '-', '1', ']', 'if', 'zero', 'is', 'None', ':', '# Try to infer zero from given row values.', 'array', '=', 'np', '.', 'array', '(', 'tuple', '(', 'mapping', '.', 'values', '(', ')', ')', ')', 'if', 'len', '(', 'array', '.', 'shape', ')', '==', '1', ':', 'zero', '=', 'array', '.', 'dtype', '.', 'type', '(', ')', 'return', 'np', '.', 'array', '(', '[', 'mapping', '.', 'get', '(', 'partial', ',', 'zero', ')', 'for', 'partial', 'in', 'partials', ']', ')'] | Find and return values from rows for all partials. In cases where no
row matches a partial, zero is assumed as value. For a row, the first
(n-1) fields are assumed to be the partial, and the last field,
the value, where n is the total number of fields in each row. It is
assumed that there is a unique row for each partial.
partials -- single field values or tuples of field values
rows -- table rows
zero -- value used when no rows match a particular partial | ['Find', 'and', 'return', 'values', 'from', 'rows', 'for', 'all', 'partials', '.', 'In', 'cases', 'where', 'no', 'row', 'matches', 'a', 'partial', 'zero', 'is', 'assumed', 'as', 'value', '.', 'For', 'a', 'row', 'the', 'first', '(', 'n', '-', '1', ')', 'fields', 'are', 'assumed', 'to', 'be', 'the', 'partial', 'and', 'the', 'last', 'field', 'the', 'value', 'where', 'n', 'is', 'the', 'total', 'number', 'of', 'fields', 'in', 'each', 'row', '.', 'It', 'is', 'assumed', 'that', 'there', 'is', 'a', 'unique', 'row', 'for', 'each', 'partial', '.', 'partials', '--', 'single', 'field', 'values', 'or', 'tuples', 'of', 'field', 'values', 'rows', '--', 'table', 'rows', 'zero', '--', 'value', 'used', 'when', 'no', 'rows', 'match', 'a', 'particular', 'partial'] | train | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2796-L2821 |
5,009 | ecell/ecell4 | ecell4/util/viz.py | plot_movie | def plot_movie(*args, **kwargs):
"""
Generate a movie from received instances of World and show them.
See also plot_movie_with_elegans and plot_movie_with_matplotlib.
Parameters
----------
worlds : list of World
Worlds to render.
interactive : bool, default True
Choose a visualizer. If False, show the plot with matplotlib.
If True (only available on IPython Notebook), show it with elegans.
"""
interactive = kwargs.pop('interactive', False)
if interactive:
plot_movie_with_elegans(*args, **kwargs)
else:
plot_movie_with_matplotlib(*args, **kwargs) | python | def plot_movie(*args, **kwargs):
"""
Generate a movie from received instances of World and show them.
See also plot_movie_with_elegans and plot_movie_with_matplotlib.
Parameters
----------
worlds : list of World
Worlds to render.
interactive : bool, default True
Choose a visualizer. If False, show the plot with matplotlib.
If True (only available on IPython Notebook), show it with elegans.
"""
interactive = kwargs.pop('interactive', False)
if interactive:
plot_movie_with_elegans(*args, **kwargs)
else:
plot_movie_with_matplotlib(*args, **kwargs) | ['def', 'plot_movie', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'interactive', '=', 'kwargs', '.', 'pop', '(', "'interactive'", ',', 'False', ')', 'if', 'interactive', ':', 'plot_movie_with_elegans', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'else', ':', 'plot_movie_with_matplotlib', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Generate a movie from received instances of World and show them.
See also plot_movie_with_elegans and plot_movie_with_matplotlib.
Parameters
----------
worlds : list of World
Worlds to render.
interactive : bool, default True
Choose a visualizer. If False, show the plot with matplotlib.
If True (only available on IPython Notebook), show it with elegans. | ['Generate', 'a', 'movie', 'from', 'received', 'instances', 'of', 'World', 'and', 'show', 'them', '.', 'See', 'also', 'plot_movie_with_elegans', 'and', 'plot_movie_with_matplotlib', '.'] | train | https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L85-L103 |
5,010 | MatterMiners/cobald | cobald/daemon/runners/asyncio_runner.py | AsyncioRunner._start_payloads | async def _start_payloads(self):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
task = self.event_loop.create_task(coroutine())
self._tasks.add(task)
self._payloads.clear()
await asyncio.sleep(0) | python | async def _start_payloads(self):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
task = self.event_loop.create_task(coroutine())
self._tasks.add(task)
self._payloads.clear()
await asyncio.sleep(0) | ['async', 'def', '_start_payloads', '(', 'self', ')', ':', 'with', 'self', '.', '_lock', ':', 'for', 'coroutine', 'in', 'self', '.', '_payloads', ':', 'task', '=', 'self', '.', 'event_loop', '.', 'create_task', '(', 'coroutine', '(', ')', ')', 'self', '.', '_tasks', '.', 'add', '(', 'task', ')', 'self', '.', '_payloads', '.', 'clear', '(', ')', 'await', 'asyncio', '.', 'sleep', '(', '0', ')'] | Start all queued payloads | ['Start', 'all', 'queued', 'payloads'] | train | https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/runners/asyncio_runner.py#L42-L49 |
5,011 | fprimex/zdesk | zdesk/zdesk_api.py | ZendeskAPI.channels_voice_greeting_create | def channels_voice_greeting_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings"
api_path = "/api/v2/channels/voice/greetings.json"
return self.call(api_path, method="POST", data=data, **kwargs) | python | def channels_voice_greeting_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings"
api_path = "/api/v2/channels/voice/greetings.json"
return self.call(api_path, method="POST", data=data, **kwargs) | ['def', 'channels_voice_greeting_create', '(', 'self', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'api_path', '=', '"/api/v2/channels/voice/greetings.json"', 'return', 'self', '.', 'call', '(', 'api_path', ',', 'method', '=', '"POST"', ',', 'data', '=', 'data', ',', '*', '*', 'kwargs', ')'] | https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings | ['https', ':', '//', 'developer', '.', 'zendesk', '.', 'com', '/', 'rest_api', '/', 'docs', '/', 'voice', '-', 'api', '/', 'greetings#create', '-', 'greetings'] | train | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L634-L637 |
5,012 | Sanji-IO/sanji | sanji/message.py | Message.match | def match(self, route):
"""
Match input route and return new Message instance
with parsed content
"""
_resource = trim_resource(self.resource)
self.method = self.method.lower()
resource_match = route.resource_regex.search(_resource)
if resource_match is None:
return None
# build params and querystring
params = resource_match.groupdict()
querystring = params.pop("querystring", "")
setattr(self, "param", params)
setattr(self, "query", parse_querystring(querystring))
return copy.deepcopy(self) | python | def match(self, route):
"""
Match input route and return new Message instance
with parsed content
"""
_resource = trim_resource(self.resource)
self.method = self.method.lower()
resource_match = route.resource_regex.search(_resource)
if resource_match is None:
return None
# build params and querystring
params = resource_match.groupdict()
querystring = params.pop("querystring", "")
setattr(self, "param", params)
setattr(self, "query", parse_querystring(querystring))
return copy.deepcopy(self) | ['def', 'match', '(', 'self', ',', 'route', ')', ':', '_resource', '=', 'trim_resource', '(', 'self', '.', 'resource', ')', 'self', '.', 'method', '=', 'self', '.', 'method', '.', 'lower', '(', ')', 'resource_match', '=', 'route', '.', 'resource_regex', '.', 'search', '(', '_resource', ')', 'if', 'resource_match', 'is', 'None', ':', 'return', 'None', '# build params and querystring', 'params', '=', 'resource_match', '.', 'groupdict', '(', ')', 'querystring', '=', 'params', '.', 'pop', '(', '"querystring"', ',', '""', ')', 'setattr', '(', 'self', ',', '"param"', ',', 'params', ')', 'setattr', '(', 'self', ',', '"query"', ',', 'parse_querystring', '(', 'querystring', ')', ')', 'return', 'copy', '.', 'deepcopy', '(', 'self', ')'] | Match input route and return new Message instance
with parsed content | ['Match', 'input', 'route', 'and', 'return', 'new', 'Message', 'instance', 'with', 'parsed', 'content'] | train | https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/message.py#L167-L184 |
5,013 | saltstack/salt | salt/utils/stringutils.py | check_whitelist_blacklist | def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
'''
Check a whitelist and/or blacklist to see if the value matches it.
value
The item to check the whitelist and/or blacklist against.
whitelist
The list of items that are white-listed. If ``value`` is found
in the whitelist, then the function returns ``True``. Otherwise,
it returns ``False``.
blacklist
The list of items that are black-listed. If ``value`` is found
in the blacklist, then the function returns ``False``. Otherwise,
it returns ``True``.
If both a whitelist and a blacklist are provided, value membership
in the blacklist will be examined first. If the value is not found
in the blacklist, then the whitelist is checked. If the value isn't
found in the whitelist, the function returns ``False``.
'''
# Normalize the input so that we have a list
if blacklist:
if isinstance(blacklist, six.string_types):
blacklist = [blacklist]
if not hasattr(blacklist, '__iter__'):
raise TypeError(
'Expecting iterable blacklist, but got {0} ({1})'.format(
type(blacklist).__name__, blacklist
)
)
else:
blacklist = []
if whitelist:
if isinstance(whitelist, six.string_types):
whitelist = [whitelist]
if not hasattr(whitelist, '__iter__'):
raise TypeError(
'Expecting iterable whitelist, but got {0} ({1})'.format(
type(whitelist).__name__, whitelist
)
)
else:
whitelist = []
_blacklist_match = any(expr_match(value, expr) for expr in blacklist)
_whitelist_match = any(expr_match(value, expr) for expr in whitelist)
if blacklist and not whitelist:
# Blacklist but no whitelist
return not _blacklist_match
elif whitelist and not blacklist:
# Whitelist but no blacklist
return _whitelist_match
elif blacklist and whitelist:
# Both whitelist and blacklist
return not _blacklist_match and _whitelist_match
else:
# No blacklist or whitelist passed
return True | python | def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
'''
Check a whitelist and/or blacklist to see if the value matches it.
value
The item to check the whitelist and/or blacklist against.
whitelist
The list of items that are white-listed. If ``value`` is found
in the whitelist, then the function returns ``True``. Otherwise,
it returns ``False``.
blacklist
The list of items that are black-listed. If ``value`` is found
in the blacklist, then the function returns ``False``. Otherwise,
it returns ``True``.
If both a whitelist and a blacklist are provided, value membership
in the blacklist will be examined first. If the value is not found
in the blacklist, then the whitelist is checked. If the value isn't
found in the whitelist, the function returns ``False``.
'''
# Normalize the input so that we have a list
if blacklist:
if isinstance(blacklist, six.string_types):
blacklist = [blacklist]
if not hasattr(blacklist, '__iter__'):
raise TypeError(
'Expecting iterable blacklist, but got {0} ({1})'.format(
type(blacklist).__name__, blacklist
)
)
else:
blacklist = []
if whitelist:
if isinstance(whitelist, six.string_types):
whitelist = [whitelist]
if not hasattr(whitelist, '__iter__'):
raise TypeError(
'Expecting iterable whitelist, but got {0} ({1})'.format(
type(whitelist).__name__, whitelist
)
)
else:
whitelist = []
_blacklist_match = any(expr_match(value, expr) for expr in blacklist)
_whitelist_match = any(expr_match(value, expr) for expr in whitelist)
if blacklist and not whitelist:
# Blacklist but no whitelist
return not _blacklist_match
elif whitelist and not blacklist:
# Whitelist but no blacklist
return _whitelist_match
elif blacklist and whitelist:
# Both whitelist and blacklist
return not _blacklist_match and _whitelist_match
else:
# No blacklist or whitelist passed
return True | ['def', 'check_whitelist_blacklist', '(', 'value', ',', 'whitelist', '=', 'None', ',', 'blacklist', '=', 'None', ')', ':', '# Normalize the input so that we have a list', 'if', 'blacklist', ':', 'if', 'isinstance', '(', 'blacklist', ',', 'six', '.', 'string_types', ')', ':', 'blacklist', '=', '[', 'blacklist', ']', 'if', 'not', 'hasattr', '(', 'blacklist', ',', "'__iter__'", ')', ':', 'raise', 'TypeError', '(', "'Expecting iterable blacklist, but got {0} ({1})'", '.', 'format', '(', 'type', '(', 'blacklist', ')', '.', '__name__', ',', 'blacklist', ')', ')', 'else', ':', 'blacklist', '=', '[', ']', 'if', 'whitelist', ':', 'if', 'isinstance', '(', 'whitelist', ',', 'six', '.', 'string_types', ')', ':', 'whitelist', '=', '[', 'whitelist', ']', 'if', 'not', 'hasattr', '(', 'whitelist', ',', "'__iter__'", ')', ':', 'raise', 'TypeError', '(', "'Expecting iterable whitelist, but got {0} ({1})'", '.', 'format', '(', 'type', '(', 'whitelist', ')', '.', '__name__', ',', 'whitelist', ')', ')', 'else', ':', 'whitelist', '=', '[', ']', '_blacklist_match', '=', 'any', '(', 'expr_match', '(', 'value', ',', 'expr', ')', 'for', 'expr', 'in', 'blacklist', ')', '_whitelist_match', '=', 'any', '(', 'expr_match', '(', 'value', ',', 'expr', ')', 'for', 'expr', 'in', 'whitelist', ')', 'if', 'blacklist', 'and', 'not', 'whitelist', ':', '# Blacklist but no whitelist', 'return', 'not', '_blacklist_match', 'elif', 'whitelist', 'and', 'not', 'blacklist', ':', '# Whitelist but no blacklist', 'return', '_whitelist_match', 'elif', 'blacklist', 'and', 'whitelist', ':', '# Both whitelist and blacklist', 'return', 'not', '_blacklist_match', 'and', '_whitelist_match', 'else', ':', '# No blacklist or whitelist passed', 'return', 'True'] | Check a whitelist and/or blacklist to see if the value matches it.
value
The item to check the whitelist and/or blacklist against.
whitelist
The list of items that are white-listed. If ``value`` is found
in the whitelist, then the function returns ``True``. Otherwise,
it returns ``False``.
blacklist
The list of items that are black-listed. If ``value`` is found
in the blacklist, then the function returns ``False``. Otherwise,
it returns ``True``.
If both a whitelist and a blacklist are provided, value membership
in the blacklist will be examined first. If the value is not found
in the blacklist, then the whitelist is checked. If the value isn't
found in the whitelist, the function returns ``False``. | ['Check', 'a', 'whitelist', 'and', '/', 'or', 'blacklist', 'to', 'see', 'if', 'the', 'value', 'matches', 'it', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L393-L454 |
5,014 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/PathList.py | PathListCache._PathList_key | def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist | python | def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist | ['def', '_PathList_key', '(', 'self', ',', 'pathlist', ')', ':', 'if', 'SCons', '.', 'Util', '.', 'is_Sequence', '(', 'pathlist', ')', ':', 'pathlist', '=', 'tuple', '(', 'SCons', '.', 'Util', '.', 'flatten', '(', 'pathlist', ')', ')', 'return', 'pathlist'] | Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path. | ['Returns', 'the', 'key', 'for', 'memoization', 'of', 'PathLists', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/PathList.py#L177-L192 |
5,015 | bitcraze/crazyflie-lib-python | cflib/utils/callbacks.py | Caller.add_callback | def add_callback(self, cb):
""" Register cb as a new callback. Will not register duplicates. """
if ((cb in self.callbacks) is False):
self.callbacks.append(cb) | python | def add_callback(self, cb):
""" Register cb as a new callback. Will not register duplicates. """
if ((cb in self.callbacks) is False):
self.callbacks.append(cb) | ['def', 'add_callback', '(', 'self', ',', 'cb', ')', ':', 'if', '(', '(', 'cb', 'in', 'self', '.', 'callbacks', ')', 'is', 'False', ')', ':', 'self', '.', 'callbacks', '.', 'append', '(', 'cb', ')'] | Register cb as a new callback. Will not register duplicates. | ['Register', 'cb', 'as', 'a', 'new', 'callback', '.', 'Will', 'not', 'register', 'duplicates', '.'] | train | https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/utils/callbacks.py#L42-L45 |
5,016 | DAI-Lab/Copulas | copulas/bivariate/gumbel.py | Gumbel.probability_density | def probability_density(self, X):
"""Compute density function for given copula family."""
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return np.multiply(U, V)
else:
a = np.power(np.multiply(U, V), -1)
tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta)
b = np.power(tmp, -2 + 2.0 / self.theta)
c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1)
d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta)
return self.cumulative_distribution(X) * a * b * c * d | python | def probability_density(self, X):
"""Compute density function for given copula family."""
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return np.multiply(U, V)
else:
a = np.power(np.multiply(U, V), -1)
tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta)
b = np.power(tmp, -2 + 2.0 / self.theta)
c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1)
d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta)
return self.cumulative_distribution(X) * a * b * c * d | ['def', 'probability_density', '(', 'self', ',', 'X', ')', ':', 'self', '.', 'check_fit', '(', ')', 'U', ',', 'V', '=', 'self', '.', 'split_matrix', '(', 'X', ')', 'if', 'self', '.', 'theta', '==', '1', ':', 'return', 'np', '.', 'multiply', '(', 'U', ',', 'V', ')', 'else', ':', 'a', '=', 'np', '.', 'power', '(', 'np', '.', 'multiply', '(', 'U', ',', 'V', ')', ',', '-', '1', ')', 'tmp', '=', 'np', '.', 'power', '(', '-', 'np', '.', 'log', '(', 'U', ')', ',', 'self', '.', 'theta', ')', '+', 'np', '.', 'power', '(', '-', 'np', '.', 'log', '(', 'V', ')', ',', 'self', '.', 'theta', ')', 'b', '=', 'np', '.', 'power', '(', 'tmp', ',', '-', '2', '+', '2.0', '/', 'self', '.', 'theta', ')', 'c', '=', 'np', '.', 'power', '(', 'np', '.', 'multiply', '(', 'np', '.', 'log', '(', 'U', ')', ',', 'np', '.', 'log', '(', 'V', ')', ')', ',', 'self', '.', 'theta', '-', '1', ')', 'd', '=', '1', '+', '(', 'self', '.', 'theta', '-', '1', ')', '*', 'np', '.', 'power', '(', 'tmp', ',', '-', '1.0', '/', 'self', '.', 'theta', ')', 'return', 'self', '.', 'cumulative_distribution', '(', 'X', ')', '*', 'a', '*', 'b', '*', 'c', '*', 'd'] | Compute density function for given copula family. | ['Compute', 'density', 'function', 'for', 'given', 'copula', 'family', '.'] | train | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/bivariate/gumbel.py#L20-L35 |
5,017 | napalm-automation/napalm-logs | napalm_logs/utils/__init__.py | ClientAuth.reconnect | def reconnect(self):
'''
Try to reconnect and re-authenticate with the server.
'''
log.debug('Closing the SSH socket.')
try:
self.ssl_skt.close()
except socket.error:
log.error('The socket seems to be closed already.')
log.debug('Re-opening the SSL socket.')
self.authenticate() | python | def reconnect(self):
'''
Try to reconnect and re-authenticate with the server.
'''
log.debug('Closing the SSH socket.')
try:
self.ssl_skt.close()
except socket.error:
log.error('The socket seems to be closed already.')
log.debug('Re-opening the SSL socket.')
self.authenticate() | ['def', 'reconnect', '(', 'self', ')', ':', 'log', '.', 'debug', '(', "'Closing the SSH socket.'", ')', 'try', ':', 'self', '.', 'ssl_skt', '.', 'close', '(', ')', 'except', 'socket', '.', 'error', ':', 'log', '.', 'error', '(', "'The socket seems to be closed already.'", ')', 'log', '.', 'debug', '(', "'Re-opening the SSL socket.'", ')', 'self', '.', 'authenticate', '(', ')'] | Try to reconnect and re-authenticate with the server. | ['Try', 'to', 'reconnect', 'and', 're', '-', 'authenticate', 'with', 'the', 'server', '.'] | train | https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/utils/__init__.py#L94-L104 |
5,018 | ajenhl/tacl | tacl/results.py | Results.add_label_work_count | def add_label_work_count(self):
"""Adds to each result row a count of the number of works within the
label contain that n-gram.
This counts works that have at least one witness carrying the
n-gram.
This correctly handles cases where an n-gram has only zero
counts for a given work (possible with zero-fill followed by
filtering by maximum count).
"""
self._logger.info('Adding label work count')
def add_label_text_count(df):
work_maxima = df.groupby(constants.WORK_FIELDNAME,
sort=False).any()
df.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = work_maxima[
constants.COUNT_FIELDNAME].sum()
return df
if self._matches.empty:
self._matches[constants.LABEL_WORK_COUNT_FIELDNAME] = 0
else:
self._matches.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = 0
self._matches = self._matches.groupby(
[constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME],
sort=False).apply(add_label_text_count)
self._logger.info('Finished adding label work count') | python | def add_label_work_count(self):
"""Adds to each result row a count of the number of works within the
label contain that n-gram.
This counts works that have at least one witness carrying the
n-gram.
This correctly handles cases where an n-gram has only zero
counts for a given work (possible with zero-fill followed by
filtering by maximum count).
"""
self._logger.info('Adding label work count')
def add_label_text_count(df):
work_maxima = df.groupby(constants.WORK_FIELDNAME,
sort=False).any()
df.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = work_maxima[
constants.COUNT_FIELDNAME].sum()
return df
if self._matches.empty:
self._matches[constants.LABEL_WORK_COUNT_FIELDNAME] = 0
else:
self._matches.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = 0
self._matches = self._matches.groupby(
[constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME],
sort=False).apply(add_label_text_count)
self._logger.info('Finished adding label work count') | ['def', 'add_label_work_count', '(', 'self', ')', ':', 'self', '.', '_logger', '.', 'info', '(', "'Adding label work count'", ')', 'def', 'add_label_text_count', '(', 'df', ')', ':', 'work_maxima', '=', 'df', '.', 'groupby', '(', 'constants', '.', 'WORK_FIELDNAME', ',', 'sort', '=', 'False', ')', '.', 'any', '(', ')', 'df', '.', 'loc', '[', ':', ',', 'constants', '.', 'LABEL_WORK_COUNT_FIELDNAME', ']', '=', 'work_maxima', '[', 'constants', '.', 'COUNT_FIELDNAME', ']', '.', 'sum', '(', ')', 'return', 'df', 'if', 'self', '.', '_matches', '.', 'empty', ':', 'self', '.', '_matches', '[', 'constants', '.', 'LABEL_WORK_COUNT_FIELDNAME', ']', '=', '0', 'else', ':', 'self', '.', '_matches', '.', 'loc', '[', ':', ',', 'constants', '.', 'LABEL_WORK_COUNT_FIELDNAME', ']', '=', '0', 'self', '.', '_matches', '=', 'self', '.', '_matches', '.', 'groupby', '(', '[', 'constants', '.', 'LABEL_FIELDNAME', ',', 'constants', '.', 'NGRAM_FIELDNAME', ']', ',', 'sort', '=', 'False', ')', '.', 'apply', '(', 'add_label_text_count', ')', 'self', '.', '_logger', '.', 'info', '(', "'Finished adding label work count'", ')'] | Adds to each result row a count of the number of works within the
label contain that n-gram.
This counts works that have at least one witness carrying the
n-gram.
This correctly handles cases where an n-gram has only zero
counts for a given work (possible with zero-fill followed by
filtering by maximum count). | ['Adds', 'to', 'each', 'result', 'row', 'a', 'count', 'of', 'the', 'number', 'of', 'works', 'within', 'the', 'label', 'contain', 'that', 'n', '-', 'gram', '.'] | train | https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/results.py#L88-L116 |
5,019 | beetbox/pylastfp | lastfp/__init__.py | match | def match(apikey, pcmiter, samplerate, duration, channels=2, metadata=None):
"""Given a PCM data stream, perform fingerprinting and look up the
metadata for the audio. pcmiter must be an iterable of blocks of
PCM data (buffers). duration is the total length of the track in
seconds (an integer). metadata may be a dictionary containing
existing metadata for the file (optional keys: "artist", "album",
and "title"). Returns a list of track info dictionaries
describing the candidate metadata returned by Last.fm. Raises a
subclass of FingerprintError if any step fails.
"""
fpdata = extract(pcmiter, samplerate, channels)
fpid = fpid_query(duration, fpdata, metadata)
return metadata_query(fpid, apikey) | python | def match(apikey, pcmiter, samplerate, duration, channels=2, metadata=None):
"""Given a PCM data stream, perform fingerprinting and look up the
metadata for the audio. pcmiter must be an iterable of blocks of
PCM data (buffers). duration is the total length of the track in
seconds (an integer). metadata may be a dictionary containing
existing metadata for the file (optional keys: "artist", "album",
and "title"). Returns a list of track info dictionaries
describing the candidate metadata returned by Last.fm. Raises a
subclass of FingerprintError if any step fails.
"""
fpdata = extract(pcmiter, samplerate, channels)
fpid = fpid_query(duration, fpdata, metadata)
return metadata_query(fpid, apikey) | ['def', 'match', '(', 'apikey', ',', 'pcmiter', ',', 'samplerate', ',', 'duration', ',', 'channels', '=', '2', ',', 'metadata', '=', 'None', ')', ':', 'fpdata', '=', 'extract', '(', 'pcmiter', ',', 'samplerate', ',', 'channels', ')', 'fpid', '=', 'fpid_query', '(', 'duration', ',', 'fpdata', ',', 'metadata', ')', 'return', 'metadata_query', '(', 'fpid', ',', 'apikey', ')'] | Given a PCM data stream, perform fingerprinting and look up the
metadata for the audio. pcmiter must be an iterable of blocks of
PCM data (buffers). duration is the total length of the track in
seconds (an integer). metadata may be a dictionary containing
existing metadata for the file (optional keys: "artist", "album",
and "title"). Returns a list of track info dictionaries
describing the candidate metadata returned by Last.fm. Raises a
subclass of FingerprintError if any step fails. | ['Given', 'a', 'PCM', 'data', 'stream', 'perform', 'fingerprinting', 'and', 'look', 'up', 'the', 'metadata', 'for', 'the', 'audio', '.', 'pcmiter', 'must', 'be', 'an', 'iterable', 'of', 'blocks', 'of', 'PCM', 'data', '(', 'buffers', ')', '.', 'duration', 'is', 'the', 'total', 'length', 'of', 'the', 'track', 'in', 'seconds', '(', 'an', 'integer', ')', '.', 'metadata', 'may', 'be', 'a', 'dictionary', 'containing', 'existing', 'metadata', 'for', 'the', 'file', '(', 'optional', 'keys', ':', 'artist', 'album', 'and', 'title', ')', '.', 'Returns', 'a', 'list', 'of', 'track', 'info', 'dictionaries', 'describing', 'the', 'candidate', 'metadata', 'returned', 'by', 'Last', '.', 'fm', '.', 'Raises', 'a', 'subclass', 'of', 'FingerprintError', 'if', 'any', 'step', 'fails', '.'] | train | https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L206-L218 |
5,020 | estnltk/estnltk | estnltk/wordnet/wn.py | Synset._min_depth | def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth | python | def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth | ['def', '_min_depth', '(', 'self', ')', ':', 'if', '"min_depth"', 'in', 'self', '.', '__dict__', ':', 'return', 'self', '.', '__dict__', '[', '"min_depth"', ']', 'min_depth', '=', '0', 'hypernyms', '=', 'self', '.', 'hypernyms', '(', ')', 'if', 'hypernyms', ':', 'min_depth', '=', '1', '+', 'min', '(', 'h', '.', '_min_depth', '(', ')', 'for', 'h', 'in', 'hypernyms', ')', 'self', '.', '__dict__', '[', '"min_depth"', ']', '=', 'min_depth', 'return', 'min_depth'] | Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root. | ['Finds', 'minimum', 'path', 'length', 'from', 'the', 'root', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L451-L473 |
5,021 | clalancette/pycdlib | pycdlib/rockridge.py | RRPNRecord.new | def new(self, dev_t_high, dev_t_low):
# type: (int, int) -> None
'''
Create a new Rock Ridge POSIX device number record.
Parameters:
dev_t_high - The high-order 32-bits of the device number.
dev_t_low - The low-order 32-bits of the device number.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PN record already initialized!')
self.dev_t_high = dev_t_high
self.dev_t_low = dev_t_low
self._initialized = True | python | def new(self, dev_t_high, dev_t_low):
# type: (int, int) -> None
'''
Create a new Rock Ridge POSIX device number record.
Parameters:
dev_t_high - The high-order 32-bits of the device number.
dev_t_low - The low-order 32-bits of the device number.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PN record already initialized!')
self.dev_t_high = dev_t_high
self.dev_t_low = dev_t_low
self._initialized = True | ['def', 'new', '(', 'self', ',', 'dev_t_high', ',', 'dev_t_low', ')', ':', '# type: (int, int) -> None', 'if', 'self', '.', '_initialized', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInternalError', '(', "'PN record already initialized!'", ')', 'self', '.', 'dev_t_high', '=', 'dev_t_high', 'self', '.', 'dev_t_low', '=', 'dev_t_low', 'self', '.', '_initialized', '=', 'True'] | Create a new Rock Ridge POSIX device number record.
Parameters:
dev_t_high - The high-order 32-bits of the device number.
dev_t_low - The low-order 32-bits of the device number.
Returns:
Nothing. | ['Create', 'a', 'new', 'Rock', 'Ridge', 'POSIX', 'device', 'number', 'record', '.'] | train | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L781-L798 |
5,022 | flatangle/flatlib | flatlib/dignities/accidental.py | AccidentalDignity.inSignJoy | def inSignJoy(self):
""" Returns if the object is in its sign of joy. """
return props.object.signJoy[self.obj.id] == self.obj.sign | python | def inSignJoy(self):
""" Returns if the object is in its sign of joy. """
return props.object.signJoy[self.obj.id] == self.obj.sign | ['def', 'inSignJoy', '(', 'self', ')', ':', 'return', 'props', '.', 'object', '.', 'signJoy', '[', 'self', '.', 'obj', '.', 'id', ']', '==', 'self', '.', 'obj', '.', 'sign'] | Returns if the object is in its sign of joy. | ['Returns', 'if', 'the', 'object', 'is', 'in', 'its', 'sign', 'of', 'joy', '.'] | train | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L215-L217 |
5,023 | lxc/python2-lxc | lxc/__init__.py | Container.set_config_item | def set_config_item(self, key, value):
"""
Set a config key to a provided value.
The value can be a list for the keys supporting multiple values.
"""
try:
old_value = self.get_config_item(key)
except KeyError:
old_value = None
# Get everything to unicode with python2
if isinstance(value, str):
value = value.decode()
elif isinstance(value, list):
for i in range(len(value)):
if isinstance(value[i], str):
value[i] = value[i].decode()
# Check if it's a list
def set_key(key, value):
self.clear_config_item(key)
if isinstance(value, list):
for entry in value:
if not _lxc.Container.set_config_item(self, key, entry):
return False
else:
_lxc.Container.set_config_item(self, key, value)
set_key(key, value)
new_value = self.get_config_item(key)
# loglevel is special and won't match the string we set
if key == "lxc.loglevel":
new_value = value
if (isinstance(value, unicode) and isinstance(new_value, unicode) and
value == new_value):
return True
elif (isinstance(value, list) and isinstance(new_value, list) and
set(value) == set(new_value)):
return True
elif (isinstance(value, unicode) and isinstance(new_value, list) and
set([value]) == set(new_value)):
return True
elif old_value:
set_key(key, old_value)
return False
else:
self.clear_config_item(key)
return False | python | def set_config_item(self, key, value):
"""
Set a config key to a provided value.
The value can be a list for the keys supporting multiple values.
"""
try:
old_value = self.get_config_item(key)
except KeyError:
old_value = None
# Get everything to unicode with python2
if isinstance(value, str):
value = value.decode()
elif isinstance(value, list):
for i in range(len(value)):
if isinstance(value[i], str):
value[i] = value[i].decode()
# Check if it's a list
def set_key(key, value):
self.clear_config_item(key)
if isinstance(value, list):
for entry in value:
if not _lxc.Container.set_config_item(self, key, entry):
return False
else:
_lxc.Container.set_config_item(self, key, value)
set_key(key, value)
new_value = self.get_config_item(key)
# loglevel is special and won't match the string we set
if key == "lxc.loglevel":
new_value = value
if (isinstance(value, unicode) and isinstance(new_value, unicode) and
value == new_value):
return True
elif (isinstance(value, list) and isinstance(new_value, list) and
set(value) == set(new_value)):
return True
elif (isinstance(value, unicode) and isinstance(new_value, list) and
set([value]) == set(new_value)):
return True
elif old_value:
set_key(key, old_value)
return False
else:
self.clear_config_item(key)
return False | ['def', 'set_config_item', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'try', ':', 'old_value', '=', 'self', '.', 'get_config_item', '(', 'key', ')', 'except', 'KeyError', ':', 'old_value', '=', 'None', '# Get everything to unicode with python2', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'value', '=', 'value', '.', 'decode', '(', ')', 'elif', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'value', ')', ')', ':', 'if', 'isinstance', '(', 'value', '[', 'i', ']', ',', 'str', ')', ':', 'value', '[', 'i', ']', '=', 'value', '[', 'i', ']', '.', 'decode', '(', ')', "# Check if it's a list", 'def', 'set_key', '(', 'key', ',', 'value', ')', ':', 'self', '.', 'clear_config_item', '(', 'key', ')', 'if', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'for', 'entry', 'in', 'value', ':', 'if', 'not', '_lxc', '.', 'Container', '.', 'set_config_item', '(', 'self', ',', 'key', ',', 'entry', ')', ':', 'return', 'False', 'else', ':', '_lxc', '.', 'Container', '.', 'set_config_item', '(', 'self', ',', 'key', ',', 'value', ')', 'set_key', '(', 'key', ',', 'value', ')', 'new_value', '=', 'self', '.', 'get_config_item', '(', 'key', ')', "# loglevel is special and won't match the string we set", 'if', 'key', '==', '"lxc.loglevel"', ':', 'new_value', '=', 'value', 'if', '(', 'isinstance', '(', 'value', ',', 'unicode', ')', 'and', 'isinstance', '(', 'new_value', ',', 'unicode', ')', 'and', 'value', '==', 'new_value', ')', ':', 'return', 'True', 'elif', '(', 'isinstance', '(', 'value', ',', 'list', ')', 'and', 'isinstance', '(', 'new_value', ',', 'list', ')', 'and', 'set', '(', 'value', ')', '==', 'set', '(', 'new_value', ')', ')', ':', 'return', 'True', 'elif', '(', 'isinstance', '(', 'value', ',', 'unicode', ')', 'and', 'isinstance', '(', 'new_value', ',', 'list', ')', 'and', 'set', '(', '[', 'value', ']', ')', '==', 'set', '(', 'new_value', ')', ')', ':', 'return', 'True', 'elif', 'old_value', ':', 'set_key', '(', 'key', ',', 'old_value', ')', 'return', 'False', 'else', ':', 'self', '.', 'clear_config_item', '(', 'key', ')', 'return', 'False'] | Set a config key to a provided value.
The value can be a list for the keys supporting multiple values. | ['Set', 'a', 'config', 'key', 'to', 'a', 'provided', 'value', '.', 'The', 'value', 'can', 'be', 'a', 'list', 'for', 'the', 'keys', 'supporting', 'multiple', 'values', '.'] | train | https://github.com/lxc/python2-lxc/blob/b7ec757d2bea1e5787c3e65b1359b8893491ef90/lxc/__init__.py#L364-L413 |
5,024 | earwig/mwparserfromhell | mwparserfromhell/nodes/tag.py | Tag.get | def get(self, name):
"""Get the attribute with the given *name*.
The returned object is a :class:`.Attribute` instance. Raises
:exc:`ValueError` if no attribute has this name. Since multiple
attributes can have the same name, we'll return the last match, since
all but the last are ignored by the MediaWiki parser.
"""
for attr in reversed(self.attributes):
if attr.name == name.strip():
return attr
raise ValueError(name) | python | def get(self, name):
"""Get the attribute with the given *name*.
The returned object is a :class:`.Attribute` instance. Raises
:exc:`ValueError` if no attribute has this name. Since multiple
attributes can have the same name, we'll return the last match, since
all but the last are ignored by the MediaWiki parser.
"""
for attr in reversed(self.attributes):
if attr.name == name.strip():
return attr
raise ValueError(name) | ['def', 'get', '(', 'self', ',', 'name', ')', ':', 'for', 'attr', 'in', 'reversed', '(', 'self', '.', 'attributes', ')', ':', 'if', 'attr', '.', 'name', '==', 'name', '.', 'strip', '(', ')', ':', 'return', 'attr', 'raise', 'ValueError', '(', 'name', ')'] | Get the attribute with the given *name*.
The returned object is a :class:`.Attribute` instance. Raises
:exc:`ValueError` if no attribute has this name. Since multiple
attributes can have the same name, we'll return the last match, since
all but the last are ignored by the MediaWiki parser. | ['Get', 'the', 'attribute', 'with', 'the', 'given', '*', 'name', '*', '.'] | train | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/nodes/tag.py#L265-L276 |
5,025 | IdentityPython/pysaml2 | src/saml2/__init__.py | extension_elements_to_elements | def extension_elements_to_elements(extension_elements, schemas):
""" Create a list of elements each one matching one of the
given extension elements. This is of course dependent on the access
to schemas that describe the extension elements.
:param extension_elements: The list of extension elements
:param schemas: Imported Python modules that represent the different
known schemas used for the extension elements
:return: A list of elements, representing the set of extension elements
that was possible to match against a Class in the given schemas.
The elements returned are the native representation of the elements
according to the schemas.
"""
res = []
if isinstance(schemas, list):
pass
elif isinstance(schemas, dict):
schemas = list(schemas.values())
else:
return res
for extension_element in extension_elements:
for schema in schemas:
inst = extension_element_to_element(extension_element,
schema.ELEMENT_FROM_STRING,
schema.NAMESPACE)
if inst:
res.append(inst)
break
return res | python | def extension_elements_to_elements(extension_elements, schemas):
""" Create a list of elements each one matching one of the
given extension elements. This is of course dependent on the access
to schemas that describe the extension elements.
:param extension_elements: The list of extension elements
:param schemas: Imported Python modules that represent the different
known schemas used for the extension elements
:return: A list of elements, representing the set of extension elements
that was possible to match against a Class in the given schemas.
The elements returned are the native representation of the elements
according to the schemas.
"""
res = []
if isinstance(schemas, list):
pass
elif isinstance(schemas, dict):
schemas = list(schemas.values())
else:
return res
for extension_element in extension_elements:
for schema in schemas:
inst = extension_element_to_element(extension_element,
schema.ELEMENT_FROM_STRING,
schema.NAMESPACE)
if inst:
res.append(inst)
break
return res | ['def', 'extension_elements_to_elements', '(', 'extension_elements', ',', 'schemas', ')', ':', 'res', '=', '[', ']', 'if', 'isinstance', '(', 'schemas', ',', 'list', ')', ':', 'pass', 'elif', 'isinstance', '(', 'schemas', ',', 'dict', ')', ':', 'schemas', '=', 'list', '(', 'schemas', '.', 'values', '(', ')', ')', 'else', ':', 'return', 'res', 'for', 'extension_element', 'in', 'extension_elements', ':', 'for', 'schema', 'in', 'schemas', ':', 'inst', '=', 'extension_element_to_element', '(', 'extension_element', ',', 'schema', '.', 'ELEMENT_FROM_STRING', ',', 'schema', '.', 'NAMESPACE', ')', 'if', 'inst', ':', 'res', '.', 'append', '(', 'inst', ')', 'break', 'return', 'res'] | Create a list of elements each one matching one of the
given extension elements. This is of course dependent on the access
to schemas that describe the extension elements.
:param extension_elements: The list of extension elements
:param schemas: Imported Python modules that represent the different
known schemas used for the extension elements
:return: A list of elements, representing the set of extension elements
that was possible to match against a Class in the given schemas.
The elements returned are the native representation of the elements
according to the schemas. | ['Create', 'a', 'list', 'of', 'elements', 'each', 'one', 'matching', 'one', 'of', 'the', 'given', 'extension', 'elements', '.', 'This', 'is', 'of', 'course', 'dependent', 'on', 'the', 'access', 'to', 'schemas', 'that', 'describe', 'the', 'extension', 'elements', '.'] | train | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/__init__.py#L982-L1013 |
5,026 | oauthlib/oauthlib | oauthlib/oauth1/rfc5849/request_validator.py | RequestValidator.check_access_token | def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper) | python | def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper) | ['def', 'check_access_token', '(', 'self', ',', 'request_token', ')', ':', 'lower', ',', 'upper', '=', 'self', '.', 'access_token_length', 'return', '(', 'set', '(', 'request_token', ')', '<=', 'self', '.', 'safe_characters', 'and', 'lower', '<=', 'len', '(', 'request_token', ')', '<=', 'upper', ')'] | Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper. | ['Checks', 'that', 'the', 'token', 'contains', 'only', 'safe', 'characters', 'and', 'is', 'no', 'shorter', 'than', 'lower', 'and', 'no', 'longer', 'than', 'upper', '.'] | train | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/request_validator.py#L174-L180 |
5,027 | onnx/onnxmltools | onnxmltools/convert/coreml/shape_calculators/neural_network/Convolution.py | calculate_convolution_output_shapes | def calculate_convolution_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W']
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
params = operator.raw_operator.convolution
input_shape = operator.inputs[0].type.shape
operator.outputs[0].type.shape = [0, 0, 0, 0] # Initialize output shape. It will be modified below.
output_shape = operator.outputs[0].type.shape
# Adjust N-axis
output_shape[0] = input_shape[0]
# Adjust C-axis
output_shape[1] = params.outputChannels
# Set up default and non-default parameters
dilations = [1, 1]
if len(params.dilationFactor) > 0:
dilations = [params.dilationFactor[0], params.dilationFactor[1]]
kernel_shape = [3, 3]
if len(params.kernelSize) > 0:
kernel_shape = params.kernelSize
strides = [1, 1]
if len(params.stride) > 0:
strides = params.stride
specified_output_shape = [0, 0] # Only used with convolution transpose
if params.isDeconvolution and len(params.outputShape) > 0:
specified_output_shape = list(int(i) for i in params.outputShape)
pad_mode = params.WhichOneof('ConvolutionPaddingType')
if pad_mode == 'valid' and len(params.valid.paddingAmounts.borderAmounts) > 0:
pad_amounts = params.valid.paddingAmounts.borderAmounts
pad_heads = [pad_amounts[0].startEdgeSize, pad_amounts[1].startEdgeSize]
pad_tails = [pad_amounts[0].endEdgeSize, pad_amounts[1].endEdgeSize]
else:
# Padding amounts are useless for same padding and valid padding uses [0, 0] by default.
pad_heads = [0, 0]
pad_tails = [0, 0]
# Adjust H- and W-axes
for i in range(2):
if params.isDeconvolution:
output_shape[i + 2] = calculate_convolution_transpose_1D_output_shape(
input_shape[i + 2], kernel_shape[i], dilations[i], strides[i],
pad_mode, pad_heads[i], pad_tails[i], specified_output_shape[i])
else:
output_shape[i + 2] = calculate_convolution_and_pooling_1D_output_shape(
input_shape[i + 2], kernel_shape[i], dilations[i], strides[i],
pad_mode, pad_heads[i], pad_tails[i]) | python | def calculate_convolution_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W']
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
params = operator.raw_operator.convolution
input_shape = operator.inputs[0].type.shape
operator.outputs[0].type.shape = [0, 0, 0, 0] # Initialize output shape. It will be modified below.
output_shape = operator.outputs[0].type.shape
# Adjust N-axis
output_shape[0] = input_shape[0]
# Adjust C-axis
output_shape[1] = params.outputChannels
# Set up default and non-default parameters
dilations = [1, 1]
if len(params.dilationFactor) > 0:
dilations = [params.dilationFactor[0], params.dilationFactor[1]]
kernel_shape = [3, 3]
if len(params.kernelSize) > 0:
kernel_shape = params.kernelSize
strides = [1, 1]
if len(params.stride) > 0:
strides = params.stride
specified_output_shape = [0, 0] # Only used with convolution transpose
if params.isDeconvolution and len(params.outputShape) > 0:
specified_output_shape = list(int(i) for i in params.outputShape)
pad_mode = params.WhichOneof('ConvolutionPaddingType')
if pad_mode == 'valid' and len(params.valid.paddingAmounts.borderAmounts) > 0:
pad_amounts = params.valid.paddingAmounts.borderAmounts
pad_heads = [pad_amounts[0].startEdgeSize, pad_amounts[1].startEdgeSize]
pad_tails = [pad_amounts[0].endEdgeSize, pad_amounts[1].endEdgeSize]
else:
# Padding amounts are useless for same padding and valid padding uses [0, 0] by default.
pad_heads = [0, 0]
pad_tails = [0, 0]
# Adjust H- and W-axes
for i in range(2):
if params.isDeconvolution:
output_shape[i + 2] = calculate_convolution_transpose_1D_output_shape(
input_shape[i + 2], kernel_shape[i], dilations[i], strides[i],
pad_mode, pad_heads[i], pad_tails[i], specified_output_shape[i])
else:
output_shape[i + 2] = calculate_convolution_and_pooling_1D_output_shape(
input_shape[i + 2], kernel_shape[i], dilations[i], strides[i],
pad_mode, pad_heads[i], pad_tails[i]) | ['def', 'calculate_convolution_output_shapes', '(', 'operator', ')', ':', 'check_input_and_output_numbers', '(', 'operator', ',', 'input_count_range', '=', '1', ',', 'output_count_range', '=', '1', ')', 'params', '=', 'operator', '.', 'raw_operator', '.', 'convolution', 'input_shape', '=', 'operator', '.', 'inputs', '[', '0', ']', '.', 'type', '.', 'shape', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '.', 'shape', '=', '[', '0', ',', '0', ',', '0', ',', '0', ']', '# Initialize output shape. It will be modified below.', 'output_shape', '=', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '.', 'shape', '# Adjust N-axis', 'output_shape', '[', '0', ']', '=', 'input_shape', '[', '0', ']', '# Adjust C-axis', 'output_shape', '[', '1', ']', '=', 'params', '.', 'outputChannels', '# Set up default and non-default parameters', 'dilations', '=', '[', '1', ',', '1', ']', 'if', 'len', '(', 'params', '.', 'dilationFactor', ')', '>', '0', ':', 'dilations', '=', '[', 'params', '.', 'dilationFactor', '[', '0', ']', ',', 'params', '.', 'dilationFactor', '[', '1', ']', ']', 'kernel_shape', '=', '[', '3', ',', '3', ']', 'if', 'len', '(', 'params', '.', 'kernelSize', ')', '>', '0', ':', 'kernel_shape', '=', 'params', '.', 'kernelSize', 'strides', '=', '[', '1', ',', '1', ']', 'if', 'len', '(', 'params', '.', 'stride', ')', '>', '0', ':', 'strides', '=', 'params', '.', 'stride', 'specified_output_shape', '=', '[', '0', ',', '0', ']', '# Only used with convolution transpose', 'if', 'params', '.', 'isDeconvolution', 'and', 'len', '(', 'params', '.', 'outputShape', ')', '>', '0', ':', 'specified_output_shape', '=', 'list', '(', 'int', '(', 'i', ')', 'for', 'i', 'in', 'params', '.', 'outputShape', ')', 'pad_mode', '=', 'params', '.', 'WhichOneof', '(', "'ConvolutionPaddingType'", ')', 'if', 'pad_mode', '==', "'valid'", 'and', 'len', '(', 'params', '.', 'valid', '.', 'paddingAmounts', '.', 'borderAmounts', ')', '>', '0', ':', 'pad_amounts', '=', 'params', '.', 'valid', '.', 'paddingAmounts', '.', 'borderAmounts', 'pad_heads', '=', '[', 'pad_amounts', '[', '0', ']', '.', 'startEdgeSize', ',', 'pad_amounts', '[', '1', ']', '.', 'startEdgeSize', ']', 'pad_tails', '=', '[', 'pad_amounts', '[', '0', ']', '.', 'endEdgeSize', ',', 'pad_amounts', '[', '1', ']', '.', 'endEdgeSize', ']', 'else', ':', '# Padding amounts are useless for same padding and valid padding uses [0, 0] by default.', 'pad_heads', '=', '[', '0', ',', '0', ']', 'pad_tails', '=', '[', '0', ',', '0', ']', '# Adjust H- and W-axes', 'for', 'i', 'in', 'range', '(', '2', ')', ':', 'if', 'params', '.', 'isDeconvolution', ':', 'output_shape', '[', 'i', '+', '2', ']', '=', 'calculate_convolution_transpose_1D_output_shape', '(', 'input_shape', '[', 'i', '+', '2', ']', ',', 'kernel_shape', '[', 'i', ']', ',', 'dilations', '[', 'i', ']', ',', 'strides', '[', 'i', ']', ',', 'pad_mode', ',', 'pad_heads', '[', 'i', ']', ',', 'pad_tails', '[', 'i', ']', ',', 'specified_output_shape', '[', 'i', ']', ')', 'else', ':', 'output_shape', '[', 'i', '+', '2', ']', '=', 'calculate_convolution_and_pooling_1D_output_shape', '(', 'input_shape', '[', 'i', '+', '2', ']', ',', 'kernel_shape', '[', 'i', ']', ',', 'dilations', '[', 'i', ']', ',', 'strides', '[', 'i', ']', ',', 'pad_mode', ',', 'pad_heads', '[', 'i', ']', ',', 'pad_tails', '[', 'i', ']', ')'] | Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W'] | ['Allowed', 'input', '/', 'output', 'patterns', 'are', '1', '.', '[', 'N', 'C', 'H', 'W', ']', '---', '>', '[', 'N', 'C', 'H', 'W', ']'] | train | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/neural_network/Convolution.py#L53-L104 |
5,028 | google/openhtf | openhtf/plugs/user_input.py | ConsolePrompt.run | def run(self):
"""Main logic for this thread to execute."""
if platform.system() == 'Windows':
# Windows doesn't support file-like objects for select(), so fall back
# to raw_input().
response = input(''.join((self._message,
os.linesep,
PROMPT)))
self._answered = True
self._callback(response)
return
# First, display the prompt to the console.
console_output.cli_print(self._message, color=self._color,
end=os.linesep, logger=None)
console_output.cli_print(PROMPT, color=self._color, end='', logger=None)
sys.stdout.flush()
# Before reading, clear any lingering buffered terminal input.
termios.tcflush(sys.stdin, termios.TCIFLUSH)
line = ''
while not self._stop_event.is_set():
inputs, _, _ = select.select([sys.stdin], [], [], 0.001)
if sys.stdin in inputs:
new = os.read(sys.stdin.fileno(), 1024)
if not new:
# Hit EOF!
# They hit ^D (to insert EOF). Tell them to hit ^C if they
# want to actually quit.
print('Hit ^C (Ctrl+c) to exit.')
break
line += new.decode('utf-8')
if '\n' in line:
response = line[:line.find('\n')]
self._answered = True
self._callback(response)
return | python | def run(self):
"""Main logic for this thread to execute."""
if platform.system() == 'Windows':
# Windows doesn't support file-like objects for select(), so fall back
# to raw_input().
response = input(''.join((self._message,
os.linesep,
PROMPT)))
self._answered = True
self._callback(response)
return
# First, display the prompt to the console.
console_output.cli_print(self._message, color=self._color,
end=os.linesep, logger=None)
console_output.cli_print(PROMPT, color=self._color, end='', logger=None)
sys.stdout.flush()
# Before reading, clear any lingering buffered terminal input.
termios.tcflush(sys.stdin, termios.TCIFLUSH)
line = ''
while not self._stop_event.is_set():
inputs, _, _ = select.select([sys.stdin], [], [], 0.001)
if sys.stdin in inputs:
new = os.read(sys.stdin.fileno(), 1024)
if not new:
# Hit EOF!
# They hit ^D (to insert EOF). Tell them to hit ^C if they
# want to actually quit.
print('Hit ^C (Ctrl+c) to exit.')
break
line += new.decode('utf-8')
if '\n' in line:
response = line[:line.find('\n')]
self._answered = True
self._callback(response)
return | ['def', 'run', '(', 'self', ')', ':', 'if', 'platform', '.', 'system', '(', ')', '==', "'Windows'", ':', "# Windows doesn't support file-like objects for select(), so fall back", '# to raw_input().', 'response', '=', 'input', '(', "''", '.', 'join', '(', '(', 'self', '.', '_message', ',', 'os', '.', 'linesep', ',', 'PROMPT', ')', ')', ')', 'self', '.', '_answered', '=', 'True', 'self', '.', '_callback', '(', 'response', ')', 'return', '# First, display the prompt to the console.', 'console_output', '.', 'cli_print', '(', 'self', '.', '_message', ',', 'color', '=', 'self', '.', '_color', ',', 'end', '=', 'os', '.', 'linesep', ',', 'logger', '=', 'None', ')', 'console_output', '.', 'cli_print', '(', 'PROMPT', ',', 'color', '=', 'self', '.', '_color', ',', 'end', '=', "''", ',', 'logger', '=', 'None', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')', '# Before reading, clear any lingering buffered terminal input.', 'termios', '.', 'tcflush', '(', 'sys', '.', 'stdin', ',', 'termios', '.', 'TCIFLUSH', ')', 'line', '=', "''", 'while', 'not', 'self', '.', '_stop_event', '.', 'is_set', '(', ')', ':', 'inputs', ',', '_', ',', '_', '=', 'select', '.', 'select', '(', '[', 'sys', '.', 'stdin', ']', ',', '[', ']', ',', '[', ']', ',', '0.001', ')', 'if', 'sys', '.', 'stdin', 'in', 'inputs', ':', 'new', '=', 'os', '.', 'read', '(', 'sys', '.', 'stdin', '.', 'fileno', '(', ')', ',', '1024', ')', 'if', 'not', 'new', ':', '# Hit EOF!', '# They hit ^D (to insert EOF). Tell them to hit ^C if they', '# want to actually quit.', 'print', '(', "'Hit ^C (Ctrl+c) to exit.'", ')', 'break', 'line', '+=', 'new', '.', 'decode', '(', "'utf-8'", ')', 'if', "'\\n'", 'in', 'line', ':', 'response', '=', 'line', '[', ':', 'line', '.', 'find', '(', "'\\n'", ')', ']', 'self', '.', '_answered', '=', 'True', 'self', '.', '_callback', '(', 'response', ')', 'return'] | Main logic for this thread to execute. | ['Main', 'logic', 'for', 'this', 'thread', 'to', 'execute', '.'] | train | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/user_input.py#L90-L127 |
5,029 | loli/medpy | medpy/features/intensity.py | centerdistance | def centerdistance(image, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing) | python | def centerdistance(image, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing) | ['def', 'centerdistance', '(', 'image', ',', 'voxelspacing', '=', 'None', ',', 'mask', '=', 'slice', '(', 'None', ')', ')', ':', 'if', 'type', '(', 'image', ')', '==', 'tuple', 'or', 'type', '(', 'image', ')', '==', 'list', ':', 'image', '=', 'image', '[', '0', ']', 'return', '_extract_feature', '(', '_extract_centerdistance', ',', 'image', ',', 'mask', ',', 'voxelspacing', '=', 'voxelspacing', ')'] | r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1 | ['r', 'Takes', 'a', 'simple', 'or', 'multi', '-', 'spectral', 'image', 'and', 'returns', 'its', 'voxel', '-', 'wise', 'center', 'distance', 'in', 'mm', '.', 'A', 'multi', '-', 'spectral', 'image', 'must', 'be', 'supplied', 'as', 'a', 'list', 'or', 'tuple', 'of', 'its', 'spectra', '.', 'Optionally', 'a', 'binary', 'mask', 'can', 'be', 'supplied', 'to', 'select', 'the', 'voxels', 'for', 'which', 'the', 'feature', 'should', 'be', 'extracted', '.', 'The', 'center', 'distance', 'is', 'the', 'exact', 'euclidean', 'distance', 'in', 'mm', 'of', 'each', 'voxels', 'center', 'to', 'the', 'central', 'point', 'of', 'the', 'overal', 'image', 'volume', '.', 'Note', 'that', 'this', 'feature', 'is', 'independent', 'of', 'the', 'actual', 'image', 'content', 'but', 'depends', 'solely', 'on', 'its', 'shape', '.', 'Therefore', 'always', 'a', 'one', '-', 'dimensional', 'feature', 'is', 'returned', 'even', 'if', 'a', 'multi', '-', 'spectral', 'image', 'has', 'been', 'supplied', '.'] | train | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L59-L96 |
5,030 | yahoo/TensorFlowOnSpark | tensorflowonspark/reservation.py | MessageSocket.receive | def receive(self, sock):
"""Receive a message on ``sock``."""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg | python | def receive(self, sock):
"""Receive a message on ``sock``."""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg | ['def', 'receive', '(', 'self', ',', 'sock', ')', ':', 'msg', '=', 'None', 'data', '=', "b''", 'recv_done', '=', 'False', 'recv_len', '=', '-', '1', 'while', 'not', 'recv_done', ':', 'buf', '=', 'sock', '.', 'recv', '(', 'BUFSIZE', ')', 'if', 'buf', 'is', 'None', 'or', 'len', '(', 'buf', ')', '==', '0', ':', 'raise', 'Exception', '(', '"socket closed"', ')', 'if', 'recv_len', '==', '-', '1', ':', 'recv_len', '=', 'struct', '.', 'unpack', '(', "'>I'", ',', 'buf', '[', ':', '4', ']', ')', '[', '0', ']', 'data', '+=', 'buf', '[', '4', ':', ']', 'recv_len', '-=', 'len', '(', 'data', ')', 'else', ':', 'data', '+=', 'buf', 'recv_len', '-=', 'len', '(', 'buf', ')', 'recv_done', '=', '(', 'recv_len', '==', '0', ')', 'msg', '=', 'pickle', '.', 'loads', '(', 'data', ')', 'return', 'msg'] | Receive a message on ``sock``. | ['Receive', 'a', 'message', 'on', 'sock', '.'] | train | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/tensorflowonspark/reservation.py#L69-L89 |
5,031 | OSSOS/MOP | src/ossos/web/web/auth/gms.py | isMember | def isMember(userid, password, group):
"""Test to see if the given userid/password combo is an authenticated member of group.
userid: CADC Username (str)
password: CADC Password (str)
group: CADC GMS group (str)
"""
try:
certfile = getCert(userid, password)
group_url = getGroupsURL(certfile, group)
logging.debug("group url: %s" % ( group_url))
con = httplib.HTTPSConnection(_SERVER,
443,
key_file=certfile.name,
cert_file=certfile.name,
timeout=600)
con.connect()
con.request("GET", group_url)
resp = con.getresponse()
if resp.status == 200:
return True
except Exception as e:
logging.error(str(e))
#logging.debug(str(resp.status))
return False | python | def isMember(userid, password, group):
"""Test to see if the given userid/password combo is an authenticated member of group.
userid: CADC Username (str)
password: CADC Password (str)
group: CADC GMS group (str)
"""
try:
certfile = getCert(userid, password)
group_url = getGroupsURL(certfile, group)
logging.debug("group url: %s" % ( group_url))
con = httplib.HTTPSConnection(_SERVER,
443,
key_file=certfile.name,
cert_file=certfile.name,
timeout=600)
con.connect()
con.request("GET", group_url)
resp = con.getresponse()
if resp.status == 200:
return True
except Exception as e:
logging.error(str(e))
#logging.debug(str(resp.status))
return False | ['def', 'isMember', '(', 'userid', ',', 'password', ',', 'group', ')', ':', 'try', ':', 'certfile', '=', 'getCert', '(', 'userid', ',', 'password', ')', 'group_url', '=', 'getGroupsURL', '(', 'certfile', ',', 'group', ')', 'logging', '.', 'debug', '(', '"group url: %s"', '%', '(', 'group_url', ')', ')', 'con', '=', 'httplib', '.', 'HTTPSConnection', '(', '_SERVER', ',', '443', ',', 'key_file', '=', 'certfile', '.', 'name', ',', 'cert_file', '=', 'certfile', '.', 'name', ',', 'timeout', '=', '600', ')', 'con', '.', 'connect', '(', ')', 'con', '.', 'request', '(', '"GET"', ',', 'group_url', ')', 'resp', '=', 'con', '.', 'getresponse', '(', ')', 'if', 'resp', '.', 'status', '==', '200', ':', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', 'logging', '.', 'error', '(', 'str', '(', 'e', ')', ')', '#logging.debug(str(resp.status))', 'return', 'False'] | Test to see if the given userid/password combo is an authenticated member of group.
userid: CADC Username (str)
password: CADC Password (str)
group: CADC GMS group (str) | ['Test', 'to', 'see', 'if', 'the', 'given', 'userid', '/', 'password', 'combo', 'is', 'an', 'authenticated', 'member', 'of', 'group', '.'] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/web/web/auth/gms.py#L123-L153 |
5,032 | scrapinghub/flatson | flatson/flatson.py | Flatson.flatten | def flatten(self, obj):
"""Return a list with the field values
"""
return [self._serialize(f, obj) for f in self.fields] | python | def flatten(self, obj):
"""Return a list with the field values
"""
return [self._serialize(f, obj) for f in self.fields] | ['def', 'flatten', '(', 'self', ',', 'obj', ')', ':', 'return', '[', 'self', '.', '_serialize', '(', 'f', ',', 'obj', ')', 'for', 'f', 'in', 'self', '.', 'fields', ']'] | Return a list with the field values | ['Return', 'a', 'list', 'with', 'the', 'field', 'values'] | train | https://github.com/scrapinghub/flatson/blob/dcbcea32ad6d4df1df85fff8366bce40438d469a/flatson/flatson.py#L131-L134 |
5,033 | huyingxi/Synonyms | synonyms/synonyms.py | nearby | def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores | python | def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores | ['def', 'nearby', '(', 'word', ')', ':', 'w', '=', 'any2unicode', '(', 'word', ')', '# read from cache', 'if', 'w', 'in', '_cache_nearby', ':', 'return', '_cache_nearby', '[', 'w', ']', 'words', ',', 'scores', '=', '[', ']', ',', '[', ']', 'try', ':', 'for', 'x', 'in', '_vectors', '.', 'neighbours', '(', 'w', ')', ':', 'words', '.', 'append', '(', 'x', '[', '0', ']', ')', 'scores', '.', 'append', '(', 'x', '[', '1', ']', ')', 'except', ':', 'pass', '# ignore key error, OOV', '# put into cache', '_cache_nearby', '[', 'w', ']', '=', '(', 'words', ',', 'scores', ')', 'return', 'words', ',', 'scores'] | Nearby word | ['Nearby', 'word'] | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L290-L306 |
5,034 | DataKitchen/DKCloudCommand | DKCloudCommand/cli/__main__.py | kitchen_delete | def kitchen_delete(backend, kitchen):
"""
Provide the name of the kitchen to delete
"""
click.secho('%s - Deleting kitchen %s' % (get_datetime(), kitchen), fg='green')
master = 'master'
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen))
else:
raise click.ClickException('Cannot delete the kitchen called %s' % master) | python | def kitchen_delete(backend, kitchen):
"""
Provide the name of the kitchen to delete
"""
click.secho('%s - Deleting kitchen %s' % (get_datetime(), kitchen), fg='green')
master = 'master'
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen))
else:
raise click.ClickException('Cannot delete the kitchen called %s' % master) | ['def', 'kitchen_delete', '(', 'backend', ',', 'kitchen', ')', ':', 'click', '.', 'secho', '(', "'%s - Deleting kitchen %s'", '%', '(', 'get_datetime', '(', ')', ',', 'kitchen', ')', ',', 'fg', '=', "'green'", ')', 'master', '=', "'master'", 'if', 'kitchen', '.', 'lower', '(', ')', '!=', 'master', '.', 'lower', '(', ')', ':', 'check_and_print', '(', 'DKCloudCommandRunner', '.', 'delete_kitchen', '(', 'backend', '.', 'dki', ',', 'kitchen', ')', ')', 'else', ':', 'raise', 'click', '.', 'ClickException', '(', "'Cannot delete the kitchen called %s'", '%', 'master', ')'] | Provide the name of the kitchen to delete | ['Provide', 'the', 'name', 'of', 'the', 'kitchen', 'to', 'delete'] | train | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/cli/__main__.py#L369-L378 |
5,035 | pandas-dev/pandas | pandas/core/arrays/base.py | ExtensionArray.searchsorted | def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter) | python | def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter) | ['def', 'searchsorted', '(', 'self', ',', 'value', ',', 'side', '=', '"left"', ',', 'sorter', '=', 'None', ')', ':', '# Note: the base tests provided by pandas only test the basics.', '# We do not test', '# 1. Values outside the range of the `data_for_sorting` fixture', '# 2. Values between the values in the `data_for_sorting` fixture', '# 3. Missing values.', 'arr', '=', 'self', '.', 'astype', '(', 'object', ')', 'return', 'arr', '.', 'searchsorted', '(', 'value', ',', 'side', '=', 'side', ',', 'sorter', '=', 'sorter', ')'] | Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy. | ['Find', 'indices', 'where', 'elements', 'should', 'be', 'inserted', 'to', 'maintain', 'order', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L550-L596 |
5,036 | MrYsLab/pymata-aio | pymata_aio/pymata3.py | PyMata3.i2c_read_request | def i2c_read_request(self, address, register, number_of_bytes, read_type,
cb=None, cb_type=None):
"""
This method issues an i2c read request for a single read,continuous
read or a stop, specified by the read_type.
Because different i2c devices return data at different rates,
if a callback is not specified, the user must first call this method
and then call i2c_read_data after waiting for sufficient time for the
i2c device to respond.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use I2C_READ | I2C_RESTART_TX for those cases.
:param address: i2c device
:param register: i2c register number
:param number_of_bytes: number of bytes to be returned
:param read_type: Constants.I2C_READ, Constants.I2C_READ_CONTINUOUSLY
or Constants.I2C_STOP_READING.
Constants.I2C_RESTART_TX may be OR'ed when required
:param cb: optional callback reference
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value
"""
task = asyncio.ensure_future(self.core.i2c_read_request(address, register,
number_of_bytes,
read_type,
cb,
cb_type))
self.loop.run_until_complete(task) | python | def i2c_read_request(self, address, register, number_of_bytes, read_type,
cb=None, cb_type=None):
"""
This method issues an i2c read request for a single read,continuous
read or a stop, specified by the read_type.
Because different i2c devices return data at different rates,
if a callback is not specified, the user must first call this method
and then call i2c_read_data after waiting for sufficient time for the
i2c device to respond.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use I2C_READ | I2C_RESTART_TX for those cases.
:param address: i2c device
:param register: i2c register number
:param number_of_bytes: number of bytes to be returned
:param read_type: Constants.I2C_READ, Constants.I2C_READ_CONTINUOUSLY
or Constants.I2C_STOP_READING.
Constants.I2C_RESTART_TX may be OR'ed when required
:param cb: optional callback reference
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value
"""
task = asyncio.ensure_future(self.core.i2c_read_request(address, register,
number_of_bytes,
read_type,
cb,
cb_type))
self.loop.run_until_complete(task) | ['def', 'i2c_read_request', '(', 'self', ',', 'address', ',', 'register', ',', 'number_of_bytes', ',', 'read_type', ',', 'cb', '=', 'None', ',', 'cb_type', '=', 'None', ')', ':', 'task', '=', 'asyncio', '.', 'ensure_future', '(', 'self', '.', 'core', '.', 'i2c_read_request', '(', 'address', ',', 'register', ',', 'number_of_bytes', ',', 'read_type', ',', 'cb', ',', 'cb_type', ')', ')', 'self', '.', 'loop', '.', 'run_until_complete', '(', 'task', ')'] | This method issues an i2c read request for a single read,continuous
read or a stop, specified by the read_type.
Because different i2c devices return data at different rates,
if a callback is not specified, the user must first call this method
and then call i2c_read_data after waiting for sufficient time for the
i2c device to respond.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use I2C_READ | I2C_RESTART_TX for those cases.
:param address: i2c device
:param register: i2c register number
:param number_of_bytes: number of bytes to be returned
:param read_type: Constants.I2C_READ, Constants.I2C_READ_CONTINUOUSLY
or Constants.I2C_STOP_READING.
Constants.I2C_RESTART_TX may be OR'ed when required
:param cb: optional callback reference
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value | ['This', 'method', 'issues', 'an', 'i2c', 'read', 'request', 'for', 'a', 'single', 'read', 'continuous', 'read', 'or', 'a', 'stop', 'specified', 'by', 'the', 'read_type', '.', 'Because', 'different', 'i2c', 'devices', 'return', 'data', 'at', 'different', 'rates', 'if', 'a', 'callback', 'is', 'not', 'specified', 'the', 'user', 'must', 'first', 'call', 'this', 'method', 'and', 'then', 'call', 'i2c_read_data', 'after', 'waiting', 'for', 'sufficient', 'time', 'for', 'the', 'i2c', 'device', 'to', 'respond', '.', 'Some', 'devices', 'require', 'that', 'transmission', 'be', 'restarted', '(', 'e', '.', 'g', '.', 'MMA8452Q', 'accelerometer', ')', '.', 'Use', 'I2C_READ', '|', 'I2C_RESTART_TX', 'for', 'those', 'cases', '.'] | train | https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata3.py#L403-L439 |
5,037 | jochym/Elastic | elastic/elastic.py | get_elementary_deformations | def get_elementary_deformations(cryst, n=5, d=2):
'''Generate elementary deformations for elastic tensor calculation.
The deformations are created based on the symmetry of the crystal and
are limited to the non-equivalet axes of the crystal.
:param cryst: Atoms object, basic structure
:param n: integer, number of deformations per non-equivalent axis
:param d: float, size of the maximum deformation in percent and degrees
:returns: list of deformed structures
'''
# Deformation look-up table
# Perhaps the number of deformations for trigonal
# system could be reduced to [0,3] but better safe then sorry
deform = {
"Cubic": [[0, 3], regular],
"Hexagonal": [[0, 2, 3, 5], hexagonal],
"Trigonal": [[0, 1, 2, 3, 4, 5], trigonal],
"Tetragonal": [[0, 2, 3, 5], tetragonal],
"Orthorombic": [[0, 1, 2, 3, 4, 5], orthorombic],
"Monoclinic": [[0, 1, 2, 3, 4, 5], monoclinic],
"Triclinic": [[0, 1, 2, 3, 4, 5], triclinic]
}
lattyp, brav, sg_name, sg_nr = get_lattice_type(cryst)
# Decide which deformations should be used
axis, symm = deform[brav]
systems = []
for a in axis:
if a < 3: # tetragonal deformation
for dx in linspace(-d, d, n):
systems.append(
get_cart_deformed_cell(cryst, axis=a, size=dx))
elif a < 6: # sheer deformation (skip the zero angle)
for dx in linspace(d/10.0, d, n):
systems.append(
get_cart_deformed_cell(cryst, axis=a, size=dx))
return systems | python | def get_elementary_deformations(cryst, n=5, d=2):
'''Generate elementary deformations for elastic tensor calculation.
The deformations are created based on the symmetry of the crystal and
are limited to the non-equivalet axes of the crystal.
:param cryst: Atoms object, basic structure
:param n: integer, number of deformations per non-equivalent axis
:param d: float, size of the maximum deformation in percent and degrees
:returns: list of deformed structures
'''
# Deformation look-up table
# Perhaps the number of deformations for trigonal
# system could be reduced to [0,3] but better safe then sorry
deform = {
"Cubic": [[0, 3], regular],
"Hexagonal": [[0, 2, 3, 5], hexagonal],
"Trigonal": [[0, 1, 2, 3, 4, 5], trigonal],
"Tetragonal": [[0, 2, 3, 5], tetragonal],
"Orthorombic": [[0, 1, 2, 3, 4, 5], orthorombic],
"Monoclinic": [[0, 1, 2, 3, 4, 5], monoclinic],
"Triclinic": [[0, 1, 2, 3, 4, 5], triclinic]
}
lattyp, brav, sg_name, sg_nr = get_lattice_type(cryst)
# Decide which deformations should be used
axis, symm = deform[brav]
systems = []
for a in axis:
if a < 3: # tetragonal deformation
for dx in linspace(-d, d, n):
systems.append(
get_cart_deformed_cell(cryst, axis=a, size=dx))
elif a < 6: # sheer deformation (skip the zero angle)
for dx in linspace(d/10.0, d, n):
systems.append(
get_cart_deformed_cell(cryst, axis=a, size=dx))
return systems | ['def', 'get_elementary_deformations', '(', 'cryst', ',', 'n', '=', '5', ',', 'd', '=', '2', ')', ':', '# Deformation look-up table', '# Perhaps the number of deformations for trigonal', '# system could be reduced to [0,3] but better safe then sorry', 'deform', '=', '{', '"Cubic"', ':', '[', '[', '0', ',', '3', ']', ',', 'regular', ']', ',', '"Hexagonal"', ':', '[', '[', '0', ',', '2', ',', '3', ',', '5', ']', ',', 'hexagonal', ']', ',', '"Trigonal"', ':', '[', '[', '0', ',', '1', ',', '2', ',', '3', ',', '4', ',', '5', ']', ',', 'trigonal', ']', ',', '"Tetragonal"', ':', '[', '[', '0', ',', '2', ',', '3', ',', '5', ']', ',', 'tetragonal', ']', ',', '"Orthorombic"', ':', '[', '[', '0', ',', '1', ',', '2', ',', '3', ',', '4', ',', '5', ']', ',', 'orthorombic', ']', ',', '"Monoclinic"', ':', '[', '[', '0', ',', '1', ',', '2', ',', '3', ',', '4', ',', '5', ']', ',', 'monoclinic', ']', ',', '"Triclinic"', ':', '[', '[', '0', ',', '1', ',', '2', ',', '3', ',', '4', ',', '5', ']', ',', 'triclinic', ']', '}', 'lattyp', ',', 'brav', ',', 'sg_name', ',', 'sg_nr', '=', 'get_lattice_type', '(', 'cryst', ')', '# Decide which deformations should be used', 'axis', ',', 'symm', '=', 'deform', '[', 'brav', ']', 'systems', '=', '[', ']', 'for', 'a', 'in', 'axis', ':', 'if', 'a', '<', '3', ':', '# tetragonal deformation', 'for', 'dx', 'in', 'linspace', '(', '-', 'd', ',', 'd', ',', 'n', ')', ':', 'systems', '.', 'append', '(', 'get_cart_deformed_cell', '(', 'cryst', ',', 'axis', '=', 'a', ',', 'size', '=', 'dx', ')', ')', 'elif', 'a', '<', '6', ':', '# sheer deformation (skip the zero angle)', 'for', 'dx', 'in', 'linspace', '(', 'd', '/', '10.0', ',', 'd', ',', 'n', ')', ':', 'systems', '.', 'append', '(', 'get_cart_deformed_cell', '(', 'cryst', ',', 'axis', '=', 'a', ',', 'size', '=', 'dx', ')', ')', 'return', 'systems'] | Generate elementary deformations for elastic tensor calculation.
The deformations are created based on the symmetry of the crystal and
are limited to the non-equivalet axes of the crystal.
:param cryst: Atoms object, basic structure
:param n: integer, number of deformations per non-equivalent axis
:param d: float, size of the maximum deformation in percent and degrees
:returns: list of deformed structures | ['Generate', 'elementary', 'deformations', 'for', 'elastic', 'tensor', 'calculation', '.'] | train | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/elastic/elastic.py#L443-L482 |
5,038 | twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py | WorkflowCumulativeStatisticsInstance._proxy | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkflowCumulativeStatisticsContext for this WorkflowCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkflowCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
workflow_sid=self._solution['workflow_sid'],
)
return self._context | python | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkflowCumulativeStatisticsContext for this WorkflowCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkflowCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
workflow_sid=self._solution['workflow_sid'],
)
return self._context | ['def', '_proxy', '(', 'self', ')', ':', 'if', 'self', '.', '_context', 'is', 'None', ':', 'self', '.', '_context', '=', 'WorkflowCumulativeStatisticsContext', '(', 'self', '.', '_version', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', 'workflow_sid', '=', 'self', '.', '_solution', '[', "'workflow_sid'", ']', ',', ')', 'return', 'self', '.', '_context'] | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkflowCumulativeStatisticsContext for this WorkflowCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext | ['Generate', 'an', 'instance', 'context', 'for', 'the', 'instance', 'the', 'context', 'is', 'capable', 'of', 'performing', 'various', 'actions', '.', 'All', 'instance', 'actions', 'are', 'proxied', 'to', 'the', 'context'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py#L229-L243 |
5,039 | agoragames/haigha | haigha/channel.py | Channel._flush_pending_events | def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft()) | python | def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft()) | ['def', '_flush_pending_events', '(', 'self', ')', ':', 'while', 'len', '(', 'self', '.', '_pending_events', ')', 'and', 'isinstance', '(', 'self', '.', '_pending_events', '[', '0', ']', ',', 'Frame', ')', ':', 'self', '.', '_connection', '.', 'send_frame', '(', 'self', '.', '_pending_events', '.', 'popleft', '(', ')', ')'] | Send pending frames that are in the event queue. | ['Send', 'pending', 'frames', 'that', 'are', 'in', 'the', 'event', 'queue', '.'] | train | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L387-L393 |
5,040 | Erotemic/utool | utool/util_dict.py | order_dict_by | def order_dict_by(dict_, key_order):
r"""
Reorders items in a dictionary according to a custom key order
Args:
dict_ (dict_): a dictionary
key_order (list): custom key order
Returns:
OrderedDict: sorted_dict
CommandLine:
python -m utool.util_dict --exec-order_dict_by
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}
>>> key_order = [4, 2, 3, 1]
>>> sorted_dict = order_dict_by(dict_, key_order)
>>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))
>>> print(result)
>>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'
"""
dict_keys = set(dict_.keys())
other_keys = dict_keys - set(key_order)
key_order = it.chain(key_order, other_keys)
sorted_dict = OrderedDict(
(key, dict_[key]) for key in key_order if key in dict_keys
)
return sorted_dict | python | def order_dict_by(dict_, key_order):
r"""
Reorders items in a dictionary according to a custom key order
Args:
dict_ (dict_): a dictionary
key_order (list): custom key order
Returns:
OrderedDict: sorted_dict
CommandLine:
python -m utool.util_dict --exec-order_dict_by
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}
>>> key_order = [4, 2, 3, 1]
>>> sorted_dict = order_dict_by(dict_, key_order)
>>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))
>>> print(result)
>>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}'
"""
dict_keys = set(dict_.keys())
other_keys = dict_keys - set(key_order)
key_order = it.chain(key_order, other_keys)
sorted_dict = OrderedDict(
(key, dict_[key]) for key in key_order if key in dict_keys
)
return sorted_dict | ['def', 'order_dict_by', '(', 'dict_', ',', 'key_order', ')', ':', 'dict_keys', '=', 'set', '(', 'dict_', '.', 'keys', '(', ')', ')', 'other_keys', '=', 'dict_keys', '-', 'set', '(', 'key_order', ')', 'key_order', '=', 'it', '.', 'chain', '(', 'key_order', ',', 'other_keys', ')', 'sorted_dict', '=', 'OrderedDict', '(', '(', 'key', ',', 'dict_', '[', 'key', ']', ')', 'for', 'key', 'in', 'key_order', 'if', 'key', 'in', 'dict_keys', ')', 'return', 'sorted_dict'] | r"""
Reorders items in a dictionary according to a custom key order
Args:
dict_ (dict_): a dictionary
key_order (list): custom key order
Returns:
OrderedDict: sorted_dict
CommandLine:
python -m utool.util_dict --exec-order_dict_by
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 1, 2: 2, 3: 3, 4: 4}
>>> key_order = [4, 2, 3, 1]
>>> sorted_dict = order_dict_by(dict_, key_order)
>>> result = ('sorted_dict = %s' % (ut.repr4(sorted_dict, nl=False),))
>>> print(result)
>>> assert result == 'sorted_dict = {4: 4, 2: 2, 3: 3, 1: 1}' | ['r', 'Reorders', 'items', 'in', 'a', 'dictionary', 'according', 'to', 'a', 'custom', 'key', 'order'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L2006-L2038 |
5,041 | adafruit/Adafruit_CircuitPython_framebuf | adafruit_framebuf.py | MVLSBFormat.fill_rect | def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
while height > 0:
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
for w_w in range(width):
framebuf.buf[index + w_w] = (framebuf.buf[index + w_w] & ~(0x01 << offset)) |\
((color != 0) << offset)
y += 1
height -= 1 | python | def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
while height > 0:
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
for w_w in range(width):
framebuf.buf[index + w_w] = (framebuf.buf[index + w_w] & ~(0x01 << offset)) |\
((color != 0) << offset)
y += 1
height -= 1 | ['def', 'fill_rect', '(', 'framebuf', ',', 'x', ',', 'y', ',', 'width', ',', 'height', ',', 'color', ')', ':', '# pylint: disable=too-many-arguments', 'while', 'height', '>', '0', ':', 'index', '=', '(', 'y', '>>', '3', ')', '*', 'framebuf', '.', 'stride', '+', 'x', 'offset', '=', 'y', '&', '0x07', 'for', 'w_w', 'in', 'range', '(', 'width', ')', ':', 'framebuf', '.', 'buf', '[', 'index', '+', 'w_w', ']', '=', '(', 'framebuf', '.', 'buf', '[', 'index', '+', 'w_w', ']', '&', '~', '(', '0x01', '<<', 'offset', ')', ')', '|', '(', '(', 'color', '!=', '0', ')', '<<', 'offset', ')', 'y', '+=', '1', 'height', '-=', '1'] | Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior. | ['Draw', 'a', 'rectangle', 'at', 'the', 'given', 'location', 'size', 'and', 'color', '.', 'The', 'fill_rect', 'method', 'draws', 'both', 'the', 'outline', 'and', 'interior', '.'] | train | https://github.com/adafruit/Adafruit_CircuitPython_framebuf/blob/b9f62c4b71efa963150f9c5a0284b61c7add9d02/adafruit_framebuf.py#L120-L131 |
5,042 | twisted/txacme | src/txacme/client.py | JWSClient.head | def head(self, url, *args, **kwargs):
"""
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
"""
with LOG_JWS_HEAD().context():
return DeferredContext(
self._send_request(u'HEAD', url, *args, **kwargs)
).addActionFinish() | python | def head(self, url, *args, **kwargs):
"""
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
"""
with LOG_JWS_HEAD().context():
return DeferredContext(
self._send_request(u'HEAD', url, *args, **kwargs)
).addActionFinish() | ['def', 'head', '(', 'self', ',', 'url', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'with', 'LOG_JWS_HEAD', '(', ')', '.', 'context', '(', ')', ':', 'return', 'DeferredContext', '(', 'self', '.', '_send_request', '(', "u'HEAD'", ',', 'url', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ')', '.', 'addActionFinish', '(', ')'] | Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to. | ['Send', 'HEAD', 'request', 'without', 'checking', 'the', 'response', '.'] | train | https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L774-L786 |
5,043 | RJT1990/pyflux | pyflux/tsm.py | TSM.draw_latent_variables | def draw_latent_variables(self, nsims=5000):
""" Draws latent variables from the model (for Bayesian inference)
Parameters
----------
nsims : int
How many draws to take
Returns
----------
- np.ndarray of draws
"""
if self.latent_variables.estimation_method is None:
raise Exception("No latent variables estimated!")
elif self.latent_variables.estimation_method == 'BBVI':
return np.array([i.q.draw_variable_local(size=nsims) for i in self.latent_variables.z_list])
elif self.latent_variables.estimation_method == "M-H":
chain = np.array([self.latent_variables.z_list[i].sample for i in range(len(self.latent_variables.z_list))])
return chain[:,np.random.choice(chain.shape[1], nsims)]
else:
raise Exception("No latent variables estimated through Bayesian inference") | python | def draw_latent_variables(self, nsims=5000):
""" Draws latent variables from the model (for Bayesian inference)
Parameters
----------
nsims : int
How many draws to take
Returns
----------
- np.ndarray of draws
"""
if self.latent_variables.estimation_method is None:
raise Exception("No latent variables estimated!")
elif self.latent_variables.estimation_method == 'BBVI':
return np.array([i.q.draw_variable_local(size=nsims) for i in self.latent_variables.z_list])
elif self.latent_variables.estimation_method == "M-H":
chain = np.array([self.latent_variables.z_list[i].sample for i in range(len(self.latent_variables.z_list))])
return chain[:,np.random.choice(chain.shape[1], nsims)]
else:
raise Exception("No latent variables estimated through Bayesian inference") | ['def', 'draw_latent_variables', '(', 'self', ',', 'nsims', '=', '5000', ')', ':', 'if', 'self', '.', 'latent_variables', '.', 'estimation_method', 'is', 'None', ':', 'raise', 'Exception', '(', '"No latent variables estimated!"', ')', 'elif', 'self', '.', 'latent_variables', '.', 'estimation_method', '==', "'BBVI'", ':', 'return', 'np', '.', 'array', '(', '[', 'i', '.', 'q', '.', 'draw_variable_local', '(', 'size', '=', 'nsims', ')', 'for', 'i', 'in', 'self', '.', 'latent_variables', '.', 'z_list', ']', ')', 'elif', 'self', '.', 'latent_variables', '.', 'estimation_method', '==', '"M-H"', ':', 'chain', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'latent_variables', '.', 'z_list', '[', 'i', ']', '.', 'sample', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'latent_variables', '.', 'z_list', ')', ')', ']', ')', 'return', 'chain', '[', ':', ',', 'np', '.', 'random', '.', 'choice', '(', 'chain', '.', 'shape', '[', '1', ']', ',', 'nsims', ')', ']', 'else', ':', 'raise', 'Exception', '(', '"No latent variables estimated through Bayesian inference"', ')'] | Draws latent variables from the model (for Bayesian inference)
Parameters
----------
nsims : int
How many draws to take
Returns
----------
- np.ndarray of draws | ['Draws', 'latent', 'variables', 'from', 'the', 'model', '(', 'for', 'Bayesian', 'inference', ')'] | train | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/tsm.py#L614-L634 |
5,044 | cherrypy/cheroot | cheroot/server.py | HTTPServer.interrupt | def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt | python | def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt | ['def', 'interrupt', '(', 'self', ',', 'interrupt', ')', ':', 'self', '.', '_interrupt', '=', 'True', 'self', '.', 'stop', '(', ')', 'self', '.', '_interrupt', '=', 'interrupt'] | Perform the shutdown of this server and save the exception. | ['Perform', 'the', 'shutdown', 'of', 'this', 'server', 'and', 'save', 'the', 'exception', '.'] | train | https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L2081-L2085 |
5,045 | tylertreat/BigQuery-Python | bigquery/client.py | BigQueryClient.get_query_results | def get_query_results(self, job_id, offset=None, limit=None,
page_token=None, timeout=0):
"""Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply
"""
job_collection = self.bigquery.jobs()
return job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_id,
startIndex=offset,
maxResults=limit,
pageToken=page_token,
timeoutMs=timeout * 1000).execute(num_retries=self.num_retries) | python | def get_query_results(self, job_id, offset=None, limit=None,
page_token=None, timeout=0):
"""Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply
"""
job_collection = self.bigquery.jobs()
return job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_id,
startIndex=offset,
maxResults=limit,
pageToken=page_token,
timeoutMs=timeout * 1000).execute(num_retries=self.num_retries) | ['def', 'get_query_results', '(', 'self', ',', 'job_id', ',', 'offset', '=', 'None', ',', 'limit', '=', 'None', ',', 'page_token', '=', 'None', ',', 'timeout', '=', '0', ')', ':', 'job_collection', '=', 'self', '.', 'bigquery', '.', 'jobs', '(', ')', 'return', 'job_collection', '.', 'getQueryResults', '(', 'projectId', '=', 'self', '.', 'project_id', ',', 'jobId', '=', 'job_id', ',', 'startIndex', '=', 'offset', ',', 'maxResults', '=', 'limit', ',', 'pageToken', '=', 'page_token', ',', 'timeoutMs', '=', 'timeout', '*', '1000', ')', '.', 'execute', '(', 'num_retries', '=', 'self', '.', 'num_retries', ')'] | Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply | ['Execute', 'the', 'query', 'job', 'indicated', 'by', 'the', 'given', 'job', 'id', '.', 'This', 'is', 'direct', 'mapping', 'to', 'bigquery', 'api', 'https', ':', '//', 'cloud', '.', 'google', '.', 'com', '/', 'bigquery', '/', 'docs', '/', 'reference', '/', 'v2', '/', 'jobs', '/', 'getQueryResults'] | train | https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1629-L1662 |
5,046 | gtaylor/python-colormath | colormath/color_conversions.py | IPT_to_XYZ | def IPT_to_XYZ(cobj, *args, **kwargs):
"""
Converts IPT to XYZ.
"""
ipt_values = numpy.array(cobj.get_value_tuple())
lms_values = numpy.dot(
numpy.linalg.inv(IPTColor.conversion_matrices['lms_to_ipt']),
ipt_values)
lms_prime = numpy.sign(lms_values) * numpy.abs(lms_values) ** (1 / 0.43)
xyz_values = numpy.dot(
numpy.linalg.inv(IPTColor.conversion_matrices['xyz_to_lms']),
lms_prime)
return XYZColor(*xyz_values, observer='2', illuminant='d65') | python | def IPT_to_XYZ(cobj, *args, **kwargs):
"""
Converts IPT to XYZ.
"""
ipt_values = numpy.array(cobj.get_value_tuple())
lms_values = numpy.dot(
numpy.linalg.inv(IPTColor.conversion_matrices['lms_to_ipt']),
ipt_values)
lms_prime = numpy.sign(lms_values) * numpy.abs(lms_values) ** (1 / 0.43)
xyz_values = numpy.dot(
numpy.linalg.inv(IPTColor.conversion_matrices['xyz_to_lms']),
lms_prime)
return XYZColor(*xyz_values, observer='2', illuminant='d65') | ['def', 'IPT_to_XYZ', '(', 'cobj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ipt_values', '=', 'numpy', '.', 'array', '(', 'cobj', '.', 'get_value_tuple', '(', ')', ')', 'lms_values', '=', 'numpy', '.', 'dot', '(', 'numpy', '.', 'linalg', '.', 'inv', '(', 'IPTColor', '.', 'conversion_matrices', '[', "'lms_to_ipt'", ']', ')', ',', 'ipt_values', ')', 'lms_prime', '=', 'numpy', '.', 'sign', '(', 'lms_values', ')', '*', 'numpy', '.', 'abs', '(', 'lms_values', ')', '**', '(', '1', '/', '0.43', ')', 'xyz_values', '=', 'numpy', '.', 'dot', '(', 'numpy', '.', 'linalg', '.', 'inv', '(', 'IPTColor', '.', 'conversion_matrices', '[', "'xyz_to_lms'", ']', ')', ',', 'lms_prime', ')', 'return', 'XYZColor', '(', '*', 'xyz_values', ',', 'observer', '=', "'2'", ',', 'illuminant', '=', "'d65'", ')'] | Converts IPT to XYZ. | ['Converts', 'IPT', 'to', 'XYZ', '.'] | train | https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_conversions.py#L894-L908 |
5,047 | lemieuxl/pyGenClean | pyGenClean/RelatedSamples/merge_related_samples.py | merge_related_samples | def merge_related_samples(file_name, out_prefix, no_status):
"""Merge related samples.
:param file_name: the name of the input file.
:param out_prefix: the prefix of the output files.
:param no_status: is there a status column in the file?
:type file_name: str
:type out_prefix: str
:type no_status: boolean
In the output file, there are a pair of samples per line. Hence, one can
find related individuals by merging overlapping pairs.
"""
# What we need to save
status = {}
samples_sets = []
open_function = open
if file_name.endswith(".gz"):
open_function = gzip.open
with open_function(file_name, 'rb') as input_file:
header_index = dict([
(col_name, i) for i, col_name in
enumerate(input_file.readline().rstrip("\r\n").split("\t"))
])
for col_name in {"FID1", "IID1", "FID2", "IID2"}:
if col_name not in header_index:
msg = "{}: no column named {}".format(file_name, col_name)
raise ProgramError(msg)
if not no_status:
if "status" not in header_index:
msg = "{}: no column named status".format(file_name)
raise ProgramError(msg)
for line in input_file:
row = line.rstrip("\r\n").split("\t")
sample_1 = (row[header_index["FID1"]], row[header_index["IID1"]])
sample_2 = (row[header_index["FID2"]], row[header_index["IID2"]])
tmp_set = {sample_1, sample_2}
match = False
for i in xrange(len(samples_sets)):
if len(tmp_set & samples_sets[i]) > 0:
# We have a match
samples_sets[i] |= tmp_set
match = True
if not match:
# We did not find a match, so we add
samples_sets.append(tmp_set)
# Check for the status
the_status = "None"
if not no_status:
the_status = row[header_index["status"]]
status[(sample_1, sample_2)] = the_status
# Doing a final check
final_samples_set = []
removed = set()
for i in xrange(len(samples_sets)):
if i in removed:
# We removed this group
continue
group = samples_sets[i]
j = i + 1
while j < len(samples_sets):
if j in removed:
j += 1
continue
if len(group & samples_sets[j]) > 0:
# We have a match, we start from the beginning
group |= samples_sets[j]
removed.add(j)
j = i + 1
continue
j += 1
final_samples_set.append(group)
# Printing the output file
output_file = None
try:
output_file = open(out_prefix + ".merged_related_individuals", 'w')
to_print = ["index", "FID1", "IID1", "FID2", "IID2"]
if not no_status:
to_print.append("status")
print >>output_file, "\t".join(to_print)
except IOError:
msg = "{}: can't write file".format(out_prefix +
".merged_related_individuals")
raise ProgramError(msg)
# Iterating on the groups
chosen_samples = set()
remaining_samples = set()
for i, group in enumerate(final_samples_set):
index = str(i+1)
for sample_1, sample_2 in status.iterkeys():
if (sample_1 in group) and (sample_2 in group):
to_print = [index, sample_1[0], sample_1[1], sample_2[0],
sample_2[1]]
if not no_status:
to_print.append(status[(sample_1, sample_2)])
print >>output_file, "\t".join(to_print)
# Choose a random sample from the group
chosen = random.choice(list(group))
chosen_samples.add(chosen)
remaining_samples |= group - {chosen}
# Printing the files
try:
filename = out_prefix + ".chosen_related_individuals"
with open(filename, "w") as chosen_file:
for sample_id in chosen_samples:
print >>chosen_file, "\t".join(sample_id)
filename = out_prefix + ".discarded_related_individuals"
with open(filename, "w") as discarded_file:
for sample_id in remaining_samples:
print >>discarded_file, "\t".join(sample_id)
except IOError:
msg = "{}: can't write files".format(out_prefix + ".*")
raise ProgramError(msg)
# Closing the output file
output_file.close() | python | def merge_related_samples(file_name, out_prefix, no_status):
"""Merge related samples.
:param file_name: the name of the input file.
:param out_prefix: the prefix of the output files.
:param no_status: is there a status column in the file?
:type file_name: str
:type out_prefix: str
:type no_status: boolean
In the output file, there are a pair of samples per line. Hence, one can
find related individuals by merging overlapping pairs.
"""
# What we need to save
status = {}
samples_sets = []
open_function = open
if file_name.endswith(".gz"):
open_function = gzip.open
with open_function(file_name, 'rb') as input_file:
header_index = dict([
(col_name, i) for i, col_name in
enumerate(input_file.readline().rstrip("\r\n").split("\t"))
])
for col_name in {"FID1", "IID1", "FID2", "IID2"}:
if col_name not in header_index:
msg = "{}: no column named {}".format(file_name, col_name)
raise ProgramError(msg)
if not no_status:
if "status" not in header_index:
msg = "{}: no column named status".format(file_name)
raise ProgramError(msg)
for line in input_file:
row = line.rstrip("\r\n").split("\t")
sample_1 = (row[header_index["FID1"]], row[header_index["IID1"]])
sample_2 = (row[header_index["FID2"]], row[header_index["IID2"]])
tmp_set = {sample_1, sample_2}
match = False
for i in xrange(len(samples_sets)):
if len(tmp_set & samples_sets[i]) > 0:
# We have a match
samples_sets[i] |= tmp_set
match = True
if not match:
# We did not find a match, so we add
samples_sets.append(tmp_set)
# Check for the status
the_status = "None"
if not no_status:
the_status = row[header_index["status"]]
status[(sample_1, sample_2)] = the_status
# Doing a final check
final_samples_set = []
removed = set()
for i in xrange(len(samples_sets)):
if i in removed:
# We removed this group
continue
group = samples_sets[i]
j = i + 1
while j < len(samples_sets):
if j in removed:
j += 1
continue
if len(group & samples_sets[j]) > 0:
# We have a match, we start from the beginning
group |= samples_sets[j]
removed.add(j)
j = i + 1
continue
j += 1
final_samples_set.append(group)
# Printing the output file
output_file = None
try:
output_file = open(out_prefix + ".merged_related_individuals", 'w')
to_print = ["index", "FID1", "IID1", "FID2", "IID2"]
if not no_status:
to_print.append("status")
print >>output_file, "\t".join(to_print)
except IOError:
msg = "{}: can't write file".format(out_prefix +
".merged_related_individuals")
raise ProgramError(msg)
# Iterating on the groups
chosen_samples = set()
remaining_samples = set()
for i, group in enumerate(final_samples_set):
index = str(i+1)
for sample_1, sample_2 in status.iterkeys():
if (sample_1 in group) and (sample_2 in group):
to_print = [index, sample_1[0], sample_1[1], sample_2[0],
sample_2[1]]
if not no_status:
to_print.append(status[(sample_1, sample_2)])
print >>output_file, "\t".join(to_print)
# Choose a random sample from the group
chosen = random.choice(list(group))
chosen_samples.add(chosen)
remaining_samples |= group - {chosen}
# Printing the files
try:
filename = out_prefix + ".chosen_related_individuals"
with open(filename, "w") as chosen_file:
for sample_id in chosen_samples:
print >>chosen_file, "\t".join(sample_id)
filename = out_prefix + ".discarded_related_individuals"
with open(filename, "w") as discarded_file:
for sample_id in remaining_samples:
print >>discarded_file, "\t".join(sample_id)
except IOError:
msg = "{}: can't write files".format(out_prefix + ".*")
raise ProgramError(msg)
# Closing the output file
output_file.close() | ['def', 'merge_related_samples', '(', 'file_name', ',', 'out_prefix', ',', 'no_status', ')', ':', '# What we need to save', 'status', '=', '{', '}', 'samples_sets', '=', '[', ']', 'open_function', '=', 'open', 'if', 'file_name', '.', 'endswith', '(', '".gz"', ')', ':', 'open_function', '=', 'gzip', '.', 'open', 'with', 'open_function', '(', 'file_name', ',', "'rb'", ')', 'as', 'input_file', ':', 'header_index', '=', 'dict', '(', '[', '(', 'col_name', ',', 'i', ')', 'for', 'i', ',', 'col_name', 'in', 'enumerate', '(', 'input_file', '.', 'readline', '(', ')', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', ')', ']', ')', 'for', 'col_name', 'in', '{', '"FID1"', ',', '"IID1"', ',', '"FID2"', ',', '"IID2"', '}', ':', 'if', 'col_name', 'not', 'in', 'header_index', ':', 'msg', '=', '"{}: no column named {}"', '.', 'format', '(', 'file_name', ',', 'col_name', ')', 'raise', 'ProgramError', '(', 'msg', ')', 'if', 'not', 'no_status', ':', 'if', '"status"', 'not', 'in', 'header_index', ':', 'msg', '=', '"{}: no column named status"', '.', 'format', '(', 'file_name', ')', 'raise', 'ProgramError', '(', 'msg', ')', 'for', 'line', 'in', 'input_file', ':', 'row', '=', 'line', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', 'sample_1', '=', '(', 'row', '[', 'header_index', '[', '"FID1"', ']', ']', ',', 'row', '[', 'header_index', '[', '"IID1"', ']', ']', ')', 'sample_2', '=', '(', 'row', '[', 'header_index', '[', '"FID2"', ']', ']', ',', 'row', '[', 'header_index', '[', '"IID2"', ']', ']', ')', 'tmp_set', '=', '{', 'sample_1', ',', 'sample_2', '}', 'match', '=', 'False', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'samples_sets', ')', ')', ':', 'if', 'len', '(', 'tmp_set', '&', 'samples_sets', '[', 'i', ']', ')', '>', '0', ':', '# We have a match', 'samples_sets', '[', 'i', ']', '|=', 'tmp_set', 'match', '=', 'True', 'if', 'not', 'match', ':', '# We did not find a match, so we add', 'samples_sets', '.', 'append', '(', 'tmp_set', ')', '# Check for the status', 'the_status', '=', '"None"', 'if', 'not', 'no_status', ':', 'the_status', '=', 'row', '[', 'header_index', '[', '"status"', ']', ']', 'status', '[', '(', 'sample_1', ',', 'sample_2', ')', ']', '=', 'the_status', '# Doing a final check', 'final_samples_set', '=', '[', ']', 'removed', '=', 'set', '(', ')', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'samples_sets', ')', ')', ':', 'if', 'i', 'in', 'removed', ':', '# We removed this group', 'continue', 'group', '=', 'samples_sets', '[', 'i', ']', 'j', '=', 'i', '+', '1', 'while', 'j', '<', 'len', '(', 'samples_sets', ')', ':', 'if', 'j', 'in', 'removed', ':', 'j', '+=', '1', 'continue', 'if', 'len', '(', 'group', '&', 'samples_sets', '[', 'j', ']', ')', '>', '0', ':', '# We have a match, we start from the beginning', 'group', '|=', 'samples_sets', '[', 'j', ']', 'removed', '.', 'add', '(', 'j', ')', 'j', '=', 'i', '+', '1', 'continue', 'j', '+=', '1', 'final_samples_set', '.', 'append', '(', 'group', ')', '# Printing the output file', 'output_file', '=', 'None', 'try', ':', 'output_file', '=', 'open', '(', 'out_prefix', '+', '".merged_related_individuals"', ',', "'w'", ')', 'to_print', '=', '[', '"index"', ',', '"FID1"', ',', '"IID1"', ',', '"FID2"', ',', '"IID2"', ']', 'if', 'not', 'no_status', ':', 'to_print', '.', 'append', '(', '"status"', ')', 'print', '>>', 'output_file', ',', '"\\t"', '.', 'join', '(', 'to_print', ')', 'except', 'IOError', ':', 'msg', '=', '"{}: can\'t write file"', '.', 'format', '(', 'out_prefix', '+', '".merged_related_individuals"', ')', 'raise', 'ProgramError', '(', 'msg', ')', '# Iterating on the groups', 'chosen_samples', '=', 'set', '(', ')', 'remaining_samples', '=', 'set', '(', ')', 'for', 'i', ',', 'group', 'in', 'enumerate', '(', 'final_samples_set', ')', ':', 'index', '=', 'str', '(', 'i', '+', '1', ')', 'for', 'sample_1', ',', 'sample_2', 'in', 'status', '.', 'iterkeys', '(', ')', ':', 'if', '(', 'sample_1', 'in', 'group', ')', 'and', '(', 'sample_2', 'in', 'group', ')', ':', 'to_print', '=', '[', 'index', ',', 'sample_1', '[', '0', ']', ',', 'sample_1', '[', '1', ']', ',', 'sample_2', '[', '0', ']', ',', 'sample_2', '[', '1', ']', ']', 'if', 'not', 'no_status', ':', 'to_print', '.', 'append', '(', 'status', '[', '(', 'sample_1', ',', 'sample_2', ')', ']', ')', 'print', '>>', 'output_file', ',', '"\\t"', '.', 'join', '(', 'to_print', ')', '# Choose a random sample from the group', 'chosen', '=', 'random', '.', 'choice', '(', 'list', '(', 'group', ')', ')', 'chosen_samples', '.', 'add', '(', 'chosen', ')', 'remaining_samples', '|=', 'group', '-', '{', 'chosen', '}', '# Printing the files', 'try', ':', 'filename', '=', 'out_prefix', '+', '".chosen_related_individuals"', 'with', 'open', '(', 'filename', ',', '"w"', ')', 'as', 'chosen_file', ':', 'for', 'sample_id', 'in', 'chosen_samples', ':', 'print', '>>', 'chosen_file', ',', '"\\t"', '.', 'join', '(', 'sample_id', ')', 'filename', '=', 'out_prefix', '+', '".discarded_related_individuals"', 'with', 'open', '(', 'filename', ',', '"w"', ')', 'as', 'discarded_file', ':', 'for', 'sample_id', 'in', 'remaining_samples', ':', 'print', '>>', 'discarded_file', ',', '"\\t"', '.', 'join', '(', 'sample_id', ')', 'except', 'IOError', ':', 'msg', '=', '"{}: can\'t write files"', '.', 'format', '(', 'out_prefix', '+', '".*"', ')', 'raise', 'ProgramError', '(', 'msg', ')', '# Closing the output file', 'output_file', '.', 'close', '(', ')'] | Merge related samples.
:param file_name: the name of the input file.
:param out_prefix: the prefix of the output files.
:param no_status: is there a status column in the file?
:type file_name: str
:type out_prefix: str
:type no_status: boolean
In the output file, there are a pair of samples per line. Hence, one can
find related individuals by merging overlapping pairs. | ['Merge', 'related', 'samples', '.'] | train | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/RelatedSamples/merge_related_samples.py#L46-L172 |
5,048 | flatangle/flatlib | flatlib/ephem/tools.py | solarReturnJD | def solarReturnJD(jd, lon, forward=True):
""" Finds the julian date before or after
'jd' when the sun is at longitude 'lon'.
It searches forward by default.
"""
sun = swe.sweObjectLon(const.SUN, jd)
if forward:
dist = angle.distance(sun, lon)
else:
dist = -angle.distance(lon, sun)
while abs(dist) > MAX_ERROR:
jd = jd + dist / 0.9833 # Sun mean motion
sun = swe.sweObjectLon(const.SUN, jd)
dist = angle.closestdistance(sun, lon)
return jd | python | def solarReturnJD(jd, lon, forward=True):
""" Finds the julian date before or after
'jd' when the sun is at longitude 'lon'.
It searches forward by default.
"""
sun = swe.sweObjectLon(const.SUN, jd)
if forward:
dist = angle.distance(sun, lon)
else:
dist = -angle.distance(lon, sun)
while abs(dist) > MAX_ERROR:
jd = jd + dist / 0.9833 # Sun mean motion
sun = swe.sweObjectLon(const.SUN, jd)
dist = angle.closestdistance(sun, lon)
return jd | ['def', 'solarReturnJD', '(', 'jd', ',', 'lon', ',', 'forward', '=', 'True', ')', ':', 'sun', '=', 'swe', '.', 'sweObjectLon', '(', 'const', '.', 'SUN', ',', 'jd', ')', 'if', 'forward', ':', 'dist', '=', 'angle', '.', 'distance', '(', 'sun', ',', 'lon', ')', 'else', ':', 'dist', '=', '-', 'angle', '.', 'distance', '(', 'lon', ',', 'sun', ')', 'while', 'abs', '(', 'dist', ')', '>', 'MAX_ERROR', ':', 'jd', '=', 'jd', '+', 'dist', '/', '0.9833', '# Sun mean motion', 'sun', '=', 'swe', '.', 'sweObjectLon', '(', 'const', '.', 'SUN', ',', 'jd', ')', 'dist', '=', 'angle', '.', 'closestdistance', '(', 'sun', ',', 'lon', ')', 'return', 'jd'] | Finds the julian date before or after
'jd' when the sun is at longitude 'lon'.
It searches forward by default. | ['Finds', 'the', 'julian', 'date', 'before', 'or', 'after', 'jd', 'when', 'the', 'sun', 'is', 'at', 'longitude', 'lon', '.', 'It', 'searches', 'forward', 'by', 'default', '.'] | train | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/tools.py#L75-L91 |
5,049 | dfujim/bdata | bdata/bdata.py | bdata.beam_kev | def beam_kev(self,get_error=False):
"""
Get the beam energy in kev, based on typical biases:
itw (or ite bias) - bias15 - platform bias
if get_error: fetch error in value, rather than value
"""
# get epics pointer
epics = self.epics
# fetch stds
if get_error:
attr = 'std'
else:
attr = 'mean'
# get inital beam energy in keV
beam = getattr(epics.target_bias,attr)/1000.
# get RB cell voltage
bias15 = getattr(epics.bias15,attr)/1000.
# get platform bias
if self.area == 'BNMR':
platform = getattr(epics.nmr_bias,attr)
elif self.area == 'BNQR':
platform = getattr(epics.nqr_bias,attr)/1000.
else:
raise RuntimeError('Area not recognized')
if get_error:
return np.sqrt(np.sum(np.square((beam,bias15,platform)))) # keV
else:
return beam-bias15-platform | python | def beam_kev(self,get_error=False):
"""
Get the beam energy in kev, based on typical biases:
itw (or ite bias) - bias15 - platform bias
if get_error: fetch error in value, rather than value
"""
# get epics pointer
epics = self.epics
# fetch stds
if get_error:
attr = 'std'
else:
attr = 'mean'
# get inital beam energy in keV
beam = getattr(epics.target_bias,attr)/1000.
# get RB cell voltage
bias15 = getattr(epics.bias15,attr)/1000.
# get platform bias
if self.area == 'BNMR':
platform = getattr(epics.nmr_bias,attr)
elif self.area == 'BNQR':
platform = getattr(epics.nqr_bias,attr)/1000.
else:
raise RuntimeError('Area not recognized')
if get_error:
return np.sqrt(np.sum(np.square((beam,bias15,platform)))) # keV
else:
return beam-bias15-platform | ['def', 'beam_kev', '(', 'self', ',', 'get_error', '=', 'False', ')', ':', '# get epics pointer', 'epics', '=', 'self', '.', 'epics', '# fetch stds', 'if', 'get_error', ':', 'attr', '=', "'std'", 'else', ':', 'attr', '=', "'mean'", '# get inital beam energy in keV', 'beam', '=', 'getattr', '(', 'epics', '.', 'target_bias', ',', 'attr', ')', '/', '1000.', '# get RB cell voltage', 'bias15', '=', 'getattr', '(', 'epics', '.', 'bias15', ',', 'attr', ')', '/', '1000.', '# get platform bias ', 'if', 'self', '.', 'area', '==', "'BNMR'", ':', 'platform', '=', 'getattr', '(', 'epics', '.', 'nmr_bias', ',', 'attr', ')', 'elif', 'self', '.', 'area', '==', "'BNQR'", ':', 'platform', '=', 'getattr', '(', 'epics', '.', 'nqr_bias', ',', 'attr', ')', '/', '1000.', 'else', ':', 'raise', 'RuntimeError', '(', "'Area not recognized'", ')', 'if', 'get_error', ':', 'return', 'np', '.', 'sqrt', '(', 'np', '.', 'sum', '(', 'np', '.', 'square', '(', '(', 'beam', ',', 'bias15', ',', 'platform', ')', ')', ')', ')', '# keV', 'else', ':', 'return', 'beam', '-', 'bias15', '-', 'platform'] | Get the beam energy in kev, based on typical biases:
itw (or ite bias) - bias15 - platform bias
if get_error: fetch error in value, rather than value | ['Get', 'the', 'beam', 'energy', 'in', 'kev', 'based', 'on', 'typical', 'biases', ':', 'itw', '(', 'or', 'ite', 'bias', ')', '-', 'bias15', '-', 'platform', 'bias', 'if', 'get_error', ':', 'fetch', 'error', 'in', 'value', 'rather', 'than', 'value'] | train | https://github.com/dfujim/bdata/blob/86af7b091e5cc167d2b9a3146953da347cc38614/bdata/bdata.py#L1297-L1331 |
5,050 | cabalgata/cabalgata-silla-de-montar | cabalgata/silla/factories.py | load_factory | def load_factory(name, directory, configuration=None):
""" Load a factory and have it initialize in a particular directory
:param name: the name of the plugin to load
:param directory: the directory where the factory will reside
:return:
"""
for entry_point in pkg_resources.iter_entry_points(ENTRY_POINT):
if entry_point.name == name:
factory_class = entry_point.load(require=False)
return factory_class(directory, configuration)
raise KeyError | python | def load_factory(name, directory, configuration=None):
""" Load a factory and have it initialize in a particular directory
:param name: the name of the plugin to load
:param directory: the directory where the factory will reside
:return:
"""
for entry_point in pkg_resources.iter_entry_points(ENTRY_POINT):
if entry_point.name == name:
factory_class = entry_point.load(require=False)
return factory_class(directory, configuration)
raise KeyError | ['def', 'load_factory', '(', 'name', ',', 'directory', ',', 'configuration', '=', 'None', ')', ':', 'for', 'entry_point', 'in', 'pkg_resources', '.', 'iter_entry_points', '(', 'ENTRY_POINT', ')', ':', 'if', 'entry_point', '.', 'name', '==', 'name', ':', 'factory_class', '=', 'entry_point', '.', 'load', '(', 'require', '=', 'False', ')', 'return', 'factory_class', '(', 'directory', ',', 'configuration', ')', 'raise', 'KeyError'] | Load a factory and have it initialize in a particular directory
:param name: the name of the plugin to load
:param directory: the directory where the factory will reside
:return: | ['Load', 'a', 'factory', 'and', 'have', 'it', 'initialize', 'in', 'a', 'particular', 'directory', ':', 'param', 'name', ':', 'the', 'name', 'of', 'the', 'plugin', 'to', 'load', ':', 'param', 'directory', ':', 'the', 'directory', 'where', 'the', 'factory', 'will', 'reside', ':', 'return', ':'] | train | https://github.com/cabalgata/cabalgata-silla-de-montar/blob/6f1de56f207e55d788d56636f623c0e3ce1aa750/cabalgata/silla/factories.py#L24-L35 |
5,051 | rapidpro/dash | dash/context_processors.py | lang_direction | def lang_direction(request):
"""
Sets lang_direction context variable to whether the language is RTL or LTR
"""
if lang_direction.rtl_langs is None:
lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set())
return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"} | python | def lang_direction(request):
"""
Sets lang_direction context variable to whether the language is RTL or LTR
"""
if lang_direction.rtl_langs is None:
lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set())
return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"} | ['def', 'lang_direction', '(', 'request', ')', ':', 'if', 'lang_direction', '.', 'rtl_langs', 'is', 'None', ':', 'lang_direction', '.', 'rtl_langs', '=', 'getattr', '(', 'settings', ',', '"RTL_LANGUAGES"', ',', 'set', '(', ')', ')', 'return', '{', '"lang_direction"', ':', '"rtl"', 'if', 'request', '.', 'LANGUAGE_CODE', 'in', 'lang_direction', '.', 'rtl_langs', 'else', '"ltr"', '}'] | Sets lang_direction context variable to whether the language is RTL or LTR | ['Sets', 'lang_direction', 'context', 'variable', 'to', 'whether', 'the', 'language', 'is', 'RTL', 'or', 'LTR'] | train | https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/context_processors.py#L4-L11 |
5,052 | fermiPy/fermipy | fermipy/jobs/link.py | Link._set_status_self | def _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown):
"""Set the status of this job, both in self.jobs and
in the `JobArchive` if it is present. """
fullkey = JobDetails.make_fullkey(self.full_linkname, key)
if fullkey in self.jobs:
self.jobs[fullkey].status = status
if self._job_archive:
self._job_archive.register_job(self.jobs[fullkey])
else:
self._register_self('dummy.log', key, status) | python | def _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown):
"""Set the status of this job, both in self.jobs and
in the `JobArchive` if it is present. """
fullkey = JobDetails.make_fullkey(self.full_linkname, key)
if fullkey in self.jobs:
self.jobs[fullkey].status = status
if self._job_archive:
self._job_archive.register_job(self.jobs[fullkey])
else:
self._register_self('dummy.log', key, status) | ['def', '_set_status_self', '(', 'self', ',', 'key', '=', 'JobDetails', '.', 'topkey', ',', 'status', '=', 'JobStatus', '.', 'unknown', ')', ':', 'fullkey', '=', 'JobDetails', '.', 'make_fullkey', '(', 'self', '.', 'full_linkname', ',', 'key', ')', 'if', 'fullkey', 'in', 'self', '.', 'jobs', ':', 'self', '.', 'jobs', '[', 'fullkey', ']', '.', 'status', '=', 'status', 'if', 'self', '.', '_job_archive', ':', 'self', '.', '_job_archive', '.', 'register_job', '(', 'self', '.', 'jobs', '[', 'fullkey', ']', ')', 'else', ':', 'self', '.', '_register_self', '(', "'dummy.log'", ',', 'key', ',', 'status', ')'] | Set the status of this job, both in self.jobs and
in the `JobArchive` if it is present. | ['Set', 'the', 'status', 'of', 'this', 'job', 'both', 'in', 'self', '.', 'jobs', 'and', 'in', 'the', 'JobArchive', 'if', 'it', 'is', 'present', '.'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/link.py#L596-L605 |
5,053 | dwavesystems/dwave_networkx | dwave_networkx/algorithms/elimination_ordering.py | _elim_adj | def _elim_adj(adj, n):
"""eliminates a variable, acting on the adj matrix of G,
returning set of edges that were added.
Parameters
----------
adj: dict
A dict of the form {v: neighbors, ...} where v are
vertices in a graph and neighbors is a set.
Returns
----------
new_edges: set of edges that were added by eliminating v.
"""
neighbors = adj[n]
new_edges = set()
for u, v in itertools.combinations(neighbors, 2):
if v not in adj[u]:
adj[u].add(v)
adj[v].add(u)
new_edges.add((u, v))
new_edges.add((v, u))
for v in neighbors:
adj[v].discard(n)
del adj[n]
return new_edges | python | def _elim_adj(adj, n):
"""eliminates a variable, acting on the adj matrix of G,
returning set of edges that were added.
Parameters
----------
adj: dict
A dict of the form {v: neighbors, ...} where v are
vertices in a graph and neighbors is a set.
Returns
----------
new_edges: set of edges that were added by eliminating v.
"""
neighbors = adj[n]
new_edges = set()
for u, v in itertools.combinations(neighbors, 2):
if v not in adj[u]:
adj[u].add(v)
adj[v].add(u)
new_edges.add((u, v))
new_edges.add((v, u))
for v in neighbors:
adj[v].discard(n)
del adj[n]
return new_edges | ['def', '_elim_adj', '(', 'adj', ',', 'n', ')', ':', 'neighbors', '=', 'adj', '[', 'n', ']', 'new_edges', '=', 'set', '(', ')', 'for', 'u', ',', 'v', 'in', 'itertools', '.', 'combinations', '(', 'neighbors', ',', '2', ')', ':', 'if', 'v', 'not', 'in', 'adj', '[', 'u', ']', ':', 'adj', '[', 'u', ']', '.', 'add', '(', 'v', ')', 'adj', '[', 'v', ']', '.', 'add', '(', 'u', ')', 'new_edges', '.', 'add', '(', '(', 'u', ',', 'v', ')', ')', 'new_edges', '.', 'add', '(', '(', 'v', ',', 'u', ')', ')', 'for', 'v', 'in', 'neighbors', ':', 'adj', '[', 'v', ']', '.', 'discard', '(', 'n', ')', 'del', 'adj', '[', 'n', ']', 'return', 'new_edges'] | eliminates a variable, acting on the adj matrix of G,
returning set of edges that were added.
Parameters
----------
adj: dict
A dict of the form {v: neighbors, ...} where v are
vertices in a graph and neighbors is a set.
Returns
----------
new_edges: set of edges that were added by eliminating v. | ['eliminates', 'a', 'variable', 'acting', 'on', 'the', 'adj', 'matrix', 'of', 'G', 'returning', 'set', 'of', 'edges', 'that', 'were', 'added', '.'] | train | https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/elimination_ordering.py#L378-L404 |
5,054 | mdavidsaver/p4p | src/p4p/client/cothread.py | Context.monitor | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(name, cb, notify_disconnect=notify_disconnect)
cb = partial(cothread.Callback, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | python | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(name, cb, notify_disconnect=notify_disconnect)
cb = partial(cothread.Callback, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | ['def', 'monitor', '(', 'self', ',', 'name', ',', 'cb', ',', 'request', '=', 'None', ',', 'notify_disconnect', '=', 'False', ')', ':', 'R', '=', 'Subscription', '(', 'name', ',', 'cb', ',', 'notify_disconnect', '=', 'notify_disconnect', ')', 'cb', '=', 'partial', '(', 'cothread', '.', 'Callback', ',', 'R', '.', '_event', ')', 'R', '.', '_S', '=', 'super', '(', 'Context', ',', 'self', ')', '.', 'monitor', '(', 'name', ',', 'cb', ',', 'request', ')', 'return', 'R'] | Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled) | ['Create', 'a', 'subscription', '.'] | train | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L224-L243 |
5,055 | benfred/implicit | setup.py | set_gcc | def set_gcc():
"""Try to use GCC on OSX for OpenMP support."""
# For macports and homebrew
if 'darwin' in platform.platform().lower():
gcc = extract_gcc_binaries()
if gcc is not None:
os.environ["CC"] = gcc
os.environ["CXX"] = gcc
else:
global use_openmp
use_openmp = False
logging.warning('No GCC available. Install gcc from Homebrew '
'using brew install gcc.') | python | def set_gcc():
"""Try to use GCC on OSX for OpenMP support."""
# For macports and homebrew
if 'darwin' in platform.platform().lower():
gcc = extract_gcc_binaries()
if gcc is not None:
os.environ["CC"] = gcc
os.environ["CXX"] = gcc
else:
global use_openmp
use_openmp = False
logging.warning('No GCC available. Install gcc from Homebrew '
'using brew install gcc.') | ['def', 'set_gcc', '(', ')', ':', '# For macports and homebrew', 'if', "'darwin'", 'in', 'platform', '.', 'platform', '(', ')', '.', 'lower', '(', ')', ':', 'gcc', '=', 'extract_gcc_binaries', '(', ')', 'if', 'gcc', 'is', 'not', 'None', ':', 'os', '.', 'environ', '[', '"CC"', ']', '=', 'gcc', 'os', '.', 'environ', '[', '"CXX"', ']', '=', 'gcc', 'else', ':', 'global', 'use_openmp', 'use_openmp', '=', 'False', 'logging', '.', 'warning', '(', "'No GCC available. Install gcc from Homebrew '", "'using brew install gcc.'", ')'] | Try to use GCC on OSX for OpenMP support. | ['Try', 'to', 'use', 'GCC', 'on', 'OSX', 'for', 'OpenMP', 'support', '.'] | train | https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/setup.py#L101-L116 |
5,056 | dmwm/DBS | Client/src/python/dbs/apis/dbsClient.py | DbsApi.__callServer | def __callServer(self, method="", params={}, data={}, callmethod='GET', content='application/json'):
"""
A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str
"""
UserID = os.environ['USER']+'@'+socket.gethostname()
try:
UserAgent = "DBSClient/"+os.environ['DBS3_CLIENT_VERSION']+"/"+ self.userAgent
except:
UserAgent = "DBSClient/Unknown"+"/"+ self.userAgent
request_headers = {"Content-Type": content, "Accept": content, "UserID": UserID, "User-Agent":UserAgent }
method_func = getattr(self.rest_api, callmethod.lower())
data = cjson.encode(data)
try:
self.http_response = method_func(self.url, method, params, data, request_headers)
except HTTPError as http_error:
self.__parseForException(http_error)
if content != "application/json":
return self.http_response.body
try:
json_ret=cjson.decode(self.http_response.body)
except cjson.DecodeError:
print("The server output is not a valid json, most probably you have a typo in the url.\n%s.\n" % self.url, file=sys.stderr)
raise dbsClientException("Invalid url", "Possible urls are %s" %self.http_response.body)
return json_ret | python | def __callServer(self, method="", params={}, data={}, callmethod='GET', content='application/json'):
"""
A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str
"""
UserID = os.environ['USER']+'@'+socket.gethostname()
try:
UserAgent = "DBSClient/"+os.environ['DBS3_CLIENT_VERSION']+"/"+ self.userAgent
except:
UserAgent = "DBSClient/Unknown"+"/"+ self.userAgent
request_headers = {"Content-Type": content, "Accept": content, "UserID": UserID, "User-Agent":UserAgent }
method_func = getattr(self.rest_api, callmethod.lower())
data = cjson.encode(data)
try:
self.http_response = method_func(self.url, method, params, data, request_headers)
except HTTPError as http_error:
self.__parseForException(http_error)
if content != "application/json":
return self.http_response.body
try:
json_ret=cjson.decode(self.http_response.body)
except cjson.DecodeError:
print("The server output is not a valid json, most probably you have a typo in the url.\n%s.\n" % self.url, file=sys.stderr)
raise dbsClientException("Invalid url", "Possible urls are %s" %self.http_response.body)
return json_ret | ['def', '__callServer', '(', 'self', ',', 'method', '=', '""', ',', 'params', '=', '{', '}', ',', 'data', '=', '{', '}', ',', 'callmethod', '=', "'GET'", ',', 'content', '=', "'application/json'", ')', ':', 'UserID', '=', 'os', '.', 'environ', '[', "'USER'", ']', '+', "'@'", '+', 'socket', '.', 'gethostname', '(', ')', 'try', ':', 'UserAgent', '=', '"DBSClient/"', '+', 'os', '.', 'environ', '[', "'DBS3_CLIENT_VERSION'", ']', '+', '"/"', '+', 'self', '.', 'userAgent', 'except', ':', 'UserAgent', '=', '"DBSClient/Unknown"', '+', '"/"', '+', 'self', '.', 'userAgent', 'request_headers', '=', '{', '"Content-Type"', ':', 'content', ',', '"Accept"', ':', 'content', ',', '"UserID"', ':', 'UserID', ',', '"User-Agent"', ':', 'UserAgent', '}', 'method_func', '=', 'getattr', '(', 'self', '.', 'rest_api', ',', 'callmethod', '.', 'lower', '(', ')', ')', 'data', '=', 'cjson', '.', 'encode', '(', 'data', ')', 'try', ':', 'self', '.', 'http_response', '=', 'method_func', '(', 'self', '.', 'url', ',', 'method', ',', 'params', ',', 'data', ',', 'request_headers', ')', 'except', 'HTTPError', 'as', 'http_error', ':', 'self', '.', '__parseForException', '(', 'http_error', ')', 'if', 'content', '!=', '"application/json"', ':', 'return', 'self', '.', 'http_response', '.', 'body', 'try', ':', 'json_ret', '=', 'cjson', '.', 'decode', '(', 'self', '.', 'http_response', '.', 'body', ')', 'except', 'cjson', '.', 'DecodeError', ':', 'print', '(', '"The server output is not a valid json, most probably you have a typo in the url.\\n%s.\\n"', '%', 'self', '.', 'url', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'raise', 'dbsClientException', '(', '"Invalid url"', ',', '"Possible urls are %s"', '%', 'self', '.', 'http_response', '.', 'body', ')', 'return', 'json_ret'] | A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str | ['A', 'private', 'method', 'to', 'make', 'HTTP', 'call', 'to', 'the', 'DBS', 'Server'] | train | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L171-L210 |
5,057 | timothydmorton/isochrones | isochrones/starmodel.py | StarModel.mnest_basename | def mnest_basename(self):
"""Full path to basename
"""
if not hasattr(self, '_mnest_basename'):
s = self.labelstring
if s=='0_0':
s = 'single'
elif s=='0_0-0_1':
s = 'binary'
elif s=='0_0-0_1-0_2':
s = 'triple'
s = '{}-{}'.format(self.ic.name, s)
self._mnest_basename = os.path.join('chains', s+'-')
if os.path.isabs(self._mnest_basename):
return self._mnest_basename
else:
return os.path.join(self.directory, self._mnest_basename) | python | def mnest_basename(self):
"""Full path to basename
"""
if not hasattr(self, '_mnest_basename'):
s = self.labelstring
if s=='0_0':
s = 'single'
elif s=='0_0-0_1':
s = 'binary'
elif s=='0_0-0_1-0_2':
s = 'triple'
s = '{}-{}'.format(self.ic.name, s)
self._mnest_basename = os.path.join('chains', s+'-')
if os.path.isabs(self._mnest_basename):
return self._mnest_basename
else:
return os.path.join(self.directory, self._mnest_basename) | ['def', 'mnest_basename', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_mnest_basename'", ')', ':', 's', '=', 'self', '.', 'labelstring', 'if', 's', '==', "'0_0'", ':', 's', '=', "'single'", 'elif', 's', '==', "'0_0-0_1'", ':', 's', '=', "'binary'", 'elif', 's', '==', "'0_0-0_1-0_2'", ':', 's', '=', "'triple'", 's', '=', "'{}-{}'", '.', 'format', '(', 'self', '.', 'ic', '.', 'name', ',', 's', ')', 'self', '.', '_mnest_basename', '=', 'os', '.', 'path', '.', 'join', '(', "'chains'", ',', 's', '+', "'-'", ')', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'self', '.', '_mnest_basename', ')', ':', 'return', 'self', '.', '_mnest_basename', 'else', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'directory', ',', 'self', '.', '_mnest_basename', ')'] | Full path to basename | ['Full', 'path', 'to', 'basename'] | train | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel.py#L559-L577 |
5,058 | halcy/Mastodon.py | mastodon/Mastodon.py | Mastodon.account_pin | def account_pin(self, id):
"""
Pin / endorse a user.
Returns a `relationship dict`_ containing the updated relationship to the user.
"""
id = self.__unpack_id(id)
url = '/api/v1/accounts/{0}/pin'.format(str(id))
return self.__api_request('POST', url) | python | def account_pin(self, id):
"""
Pin / endorse a user.
Returns a `relationship dict`_ containing the updated relationship to the user.
"""
id = self.__unpack_id(id)
url = '/api/v1/accounts/{0}/pin'.format(str(id))
return self.__api_request('POST', url) | ['def', 'account_pin', '(', 'self', ',', 'id', ')', ':', 'id', '=', 'self', '.', '__unpack_id', '(', 'id', ')', 'url', '=', "'/api/v1/accounts/{0}/pin'", '.', 'format', '(', 'str', '(', 'id', ')', ')', 'return', 'self', '.', '__api_request', '(', "'POST'", ',', 'url', ')'] | Pin / endorse a user.
Returns a `relationship dict`_ containing the updated relationship to the user. | ['Pin', '/', 'endorse', 'a', 'user', '.', 'Returns', 'a', 'relationship', 'dict', '_', 'containing', 'the', 'updated', 'relationship', 'to', 'the', 'user', '.'] | train | https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L1917-L1925 |
5,059 | ARMmbed/icetea | icetea_lib/Result.py | Result.build_git_url | def build_git_url(self):
"""
get build git url.
:return: build git url or None if not found
"""
# pylint: disable=len-as-condition
if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None):
return self.dutinformation.get(0).build.giturl
return None | python | def build_git_url(self):
"""
get build git url.
:return: build git url or None if not found
"""
# pylint: disable=len-as-condition
if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None):
return self.dutinformation.get(0).build.giturl
return None | ['def', 'build_git_url', '(', 'self', ')', ':', '# pylint: disable=len-as-condition', 'if', 'len', '(', 'self', '.', 'dutinformation', ')', '>', '0', 'and', '(', 'self', '.', 'dutinformation', '.', 'get', '(', '0', ')', '.', 'build', 'is', 'not', 'None', ')', ':', 'return', 'self', '.', 'dutinformation', '.', 'get', '(', '0', ')', '.', 'build', '.', 'giturl', 'return', 'None'] | get build git url.
:return: build git url or None if not found | ['get', 'build', 'git', 'url', '.'] | train | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Result.py#L214-L223 |
5,060 | satellogic/telluric | telluric/features.py | GeoFeature.from_raster | def from_raster(cls, raster, properties, product='visual'):
"""Initialize a GeoFeature object with a GeoRaster
Parameters
----------
raster : GeoRaster
the raster in the feature
properties : dict
Properties.
product : str
product associated to the raster
"""
footprint = raster.footprint()
assets = raster.to_assets(product=product)
return cls(footprint, properties, assets) | python | def from_raster(cls, raster, properties, product='visual'):
"""Initialize a GeoFeature object with a GeoRaster
Parameters
----------
raster : GeoRaster
the raster in the feature
properties : dict
Properties.
product : str
product associated to the raster
"""
footprint = raster.footprint()
assets = raster.to_assets(product=product)
return cls(footprint, properties, assets) | ['def', 'from_raster', '(', 'cls', ',', 'raster', ',', 'properties', ',', 'product', '=', "'visual'", ')', ':', 'footprint', '=', 'raster', '.', 'footprint', '(', ')', 'assets', '=', 'raster', '.', 'to_assets', '(', 'product', '=', 'product', ')', 'return', 'cls', '(', 'footprint', ',', 'properties', ',', 'assets', ')'] | Initialize a GeoFeature object with a GeoRaster
Parameters
----------
raster : GeoRaster
the raster in the feature
properties : dict
Properties.
product : str
product associated to the raster | ['Initialize', 'a', 'GeoFeature', 'object', 'with', 'a', 'GeoRaster'] | train | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/features.py#L287-L301 |
5,061 | spyder-ide/spyder | spyder/plugins/variableexplorer/widgets/collectionseditor.py | BaseTableView.set_data | def set_data(self, data):
"""Set table data"""
if data is not None:
self.model.set_data(data, self.dictfilter)
self.sortByColumn(0, Qt.AscendingOrder) | python | def set_data(self, data):
"""Set table data"""
if data is not None:
self.model.set_data(data, self.dictfilter)
self.sortByColumn(0, Qt.AscendingOrder) | ['def', 'set_data', '(', 'self', ',', 'data', ')', ':', 'if', 'data', 'is', 'not', 'None', ':', 'self', '.', 'model', '.', 'set_data', '(', 'data', ',', 'self', '.', 'dictfilter', ')', 'self', '.', 'sortByColumn', '(', '0', ',', 'Qt', '.', 'AscendingOrder', ')'] | Set table data | ['Set', 'table', 'data'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L920-L924 |
5,062 | KrzyHonk/bpmn-python | bpmn_python/bpmn_diagram_import.py | BpmnDiagramGraphImport.import_parallel_gateway_to_graph | def import_parallel_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN parallel gateway.
Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'parallelGateway'.
"""
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element) | python | def import_parallel_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN parallel gateway.
Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'parallelGateway'.
"""
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element) | ['def', 'import_parallel_gateway_to_graph', '(', 'diagram_graph', ',', 'process_id', ',', 'process_attributes', ',', 'element', ')', ':', 'BpmnDiagramGraphImport', '.', 'import_gateway_to_graph', '(', 'diagram_graph', ',', 'process_id', ',', 'process_attributes', ',', 'element', ')'] | Adds to graph the new element that represents BPMN parallel gateway.
Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'parallelGateway'. | ['Adds', 'to', 'graph', 'the', 'new', 'element', 'that', 'represents', 'BPMN', 'parallel', 'gateway', '.', 'Parallel', 'gateway', 'doesn', 't', 'have', 'additional', 'attributes', '.', 'Separate', 'method', 'is', 'used', 'to', 'improve', 'code', 'readability', '.'] | train | https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L550-L561 |
5,063 | abe-winter/pg13-py | pg13/sqex.py | collapse_group_expr | def collapse_group_expr(groupx,cols,ret_row):
"collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this."
for i,col in enumerate(cols.children):
if col==groupx: ret_row[i]=ret_row[i][0]
return ret_row | python | def collapse_group_expr(groupx,cols,ret_row):
"collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this."
for i,col in enumerate(cols.children):
if col==groupx: ret_row[i]=ret_row[i][0]
return ret_row | ['def', 'collapse_group_expr', '(', 'groupx', ',', 'cols', ',', 'ret_row', ')', ':', 'for', 'i', ',', 'col', 'in', 'enumerate', '(', 'cols', '.', 'children', ')', ':', 'if', 'col', '==', 'groupx', ':', 'ret_row', '[', 'i', ']', '=', 'ret_row', '[', 'i', ']', '[', '0', ']', 'return', 'ret_row'] | collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this. | ['collapses', 'columns', 'matching', 'the', 'group', 'expression', '.', 'I', 'm', 'sure', 'this', 'is', 'buggy', ';', 'look', 'at', 'a', 'real', 'DB', 's', 'imp', 'of', 'this', '.'] | train | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L209-L213 |
5,064 | szastupov/aiotg | aiotg/bot.py | Bot.send_message | def send_message(self, chat_id, text, **options):
"""
Send a text message to chat
:param int chat_id: ID of the chat to send the message to
:param str text: Text to send
:param options: Additional sendMessage options
(see https://core.telegram.org/bots/api#sendmessage)
"""
return self.api_call("sendMessage", chat_id=chat_id, text=text, **options) | python | def send_message(self, chat_id, text, **options):
"""
Send a text message to chat
:param int chat_id: ID of the chat to send the message to
:param str text: Text to send
:param options: Additional sendMessage options
(see https://core.telegram.org/bots/api#sendmessage)
"""
return self.api_call("sendMessage", chat_id=chat_id, text=text, **options) | ['def', 'send_message', '(', 'self', ',', 'chat_id', ',', 'text', ',', '*', '*', 'options', ')', ':', 'return', 'self', '.', 'api_call', '(', '"sendMessage"', ',', 'chat_id', '=', 'chat_id', ',', 'text', '=', 'text', ',', '*', '*', 'options', ')'] | Send a text message to chat
:param int chat_id: ID of the chat to send the message to
:param str text: Text to send
:param options: Additional sendMessage options
(see https://core.telegram.org/bots/api#sendmessage) | ['Send', 'a', 'text', 'message', 'to', 'chat'] | train | https://github.com/szastupov/aiotg/blob/eed81a6a728c02120f1d730a6e8b8fe50263c010/aiotg/bot.py#L461-L470 |
5,065 | isambard-uob/ampal | src/ampal/base_ampal.py | Monomer.close_monomers | def close_monomers(self, group, cutoff=4.0):
"""Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
"""
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues | python | def close_monomers(self, group, cutoff=4.0):
"""Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
"""
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues | ['def', 'close_monomers', '(', 'self', ',', 'group', ',', 'cutoff', '=', '4.0', ')', ':', 'nearby_residues', '=', '[', ']', 'for', 'self_atom', 'in', 'self', '.', 'atoms', '.', 'values', '(', ')', ':', 'nearby_atoms', '=', 'group', '.', 'is_within', '(', 'cutoff', ',', 'self_atom', ')', 'for', 'res_atom', 'in', 'nearby_atoms', ':', 'if', 'res_atom', '.', 'parent', 'not', 'in', 'nearby_residues', ':', 'nearby_residues', '.', 'append', '(', 'res_atom', '.', 'parent', ')', 'return', 'nearby_residues'] | Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance. | ['Returns', 'a', 'list', 'of', 'Monomers', 'from', 'within', 'a', 'cut', 'off', 'distance', 'of', 'the', 'Monomer'] | train | https://github.com/isambard-uob/ampal/blob/906e2afacb435ffb129b381f262ff8e7bfb324c5/src/ampal/base_ampal.py#L605-L626 |
5,066 | eyeseast/propublica-congress | congress/members.py | MembersClient.filter | def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results']) | python | def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results']) | ['def', 'filter', '(', 'self', ',', 'chamber', ',', 'congress', '=', 'CURRENT_CONGRESS', ',', '*', '*', 'kwargs', ')', ':', 'check_chamber', '(', 'chamber', ')', 'kwargs', '.', 'update', '(', 'chamber', '=', 'chamber', ',', 'congress', '=', 'congress', ')', 'if', "'state'", 'in', 'kwargs', 'and', "'district'", 'in', 'kwargs', ':', 'path', '=', '(', '"members/{chamber}/{state}/{district}/"', '"current.json"', ')', '.', 'format', '(', '*', '*', 'kwargs', ')', 'elif', "'state'", 'in', 'kwargs', ':', 'path', '=', '(', '"members/{chamber}/{state}/"', '"current.json"', ')', '.', 'format', '(', '*', '*', 'kwargs', ')', 'else', ':', 'path', '=', '(', '"{congress}/{chamber}/"', '"members.json"', ')', '.', 'format', '(', '*', '*', 'kwargs', ')', 'return', 'self', '.', 'fetch', '(', 'path', ',', 'parse', '=', 'lambda', 'r', ':', 'r', '[', "'results'", ']', ')'] | Takes a chamber and Congress,
OR state and district, returning a list of members | ['Takes', 'a', 'chamber', 'and', 'Congress', 'OR', 'state', 'and', 'district', 'returning', 'a', 'list', 'of', 'members'] | train | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L12-L33 |
5,067 | a1ezzz/wasp-general | wasp_general/uri.py | WURI.parse | def parse(cls, uri):
""" Parse URI-string and return WURI object
:param uri: string to parse
:return: WURI
"""
uri_components = urlsplit(uri)
adapter_fn = lambda x: x if x is not None and (isinstance(x, str) is False or len(x)) > 0 else None
return cls(
scheme=adapter_fn(uri_components.scheme),
username=adapter_fn(uri_components.username),
password=adapter_fn(uri_components.password),
hostname=adapter_fn(uri_components.hostname),
port=adapter_fn(uri_components.port),
path=adapter_fn(uri_components.path),
query=adapter_fn(uri_components.query),
fragment=adapter_fn(uri_components.fragment),
) | python | def parse(cls, uri):
""" Parse URI-string and return WURI object
:param uri: string to parse
:return: WURI
"""
uri_components = urlsplit(uri)
adapter_fn = lambda x: x if x is not None and (isinstance(x, str) is False or len(x)) > 0 else None
return cls(
scheme=adapter_fn(uri_components.scheme),
username=adapter_fn(uri_components.username),
password=adapter_fn(uri_components.password),
hostname=adapter_fn(uri_components.hostname),
port=adapter_fn(uri_components.port),
path=adapter_fn(uri_components.path),
query=adapter_fn(uri_components.query),
fragment=adapter_fn(uri_components.fragment),
) | ['def', 'parse', '(', 'cls', ',', 'uri', ')', ':', 'uri_components', '=', 'urlsplit', '(', 'uri', ')', 'adapter_fn', '=', 'lambda', 'x', ':', 'x', 'if', 'x', 'is', 'not', 'None', 'and', '(', 'isinstance', '(', 'x', ',', 'str', ')', 'is', 'False', 'or', 'len', '(', 'x', ')', ')', '>', '0', 'else', 'None', 'return', 'cls', '(', 'scheme', '=', 'adapter_fn', '(', 'uri_components', '.', 'scheme', ')', ',', 'username', '=', 'adapter_fn', '(', 'uri_components', '.', 'username', ')', ',', 'password', '=', 'adapter_fn', '(', 'uri_components', '.', 'password', ')', ',', 'hostname', '=', 'adapter_fn', '(', 'uri_components', '.', 'hostname', ')', ',', 'port', '=', 'adapter_fn', '(', 'uri_components', '.', 'port', ')', ',', 'path', '=', 'adapter_fn', '(', 'uri_components', '.', 'path', ')', ',', 'query', '=', 'adapter_fn', '(', 'uri_components', '.', 'query', ')', ',', 'fragment', '=', 'adapter_fn', '(', 'uri_components', '.', 'fragment', ')', ',', ')'] | Parse URI-string and return WURI object
:param uri: string to parse
:return: WURI | ['Parse', 'URI', '-', 'string', 'and', 'return', 'WURI', 'object'] | train | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/uri.py#L150-L168 |
5,068 | peopledoc/workalendar | setup.py | read_relative_file | def read_relative_file(filename):
"""
Return the contents of the given file.
Its path is supposed relative to this module.
"""
path = join(dirname(abspath(__file__)), filename)
with io.open(path, encoding='utf-8') as f:
return f.read() | python | def read_relative_file(filename):
"""
Return the contents of the given file.
Its path is supposed relative to this module.
"""
path = join(dirname(abspath(__file__)), filename)
with io.open(path, encoding='utf-8') as f:
return f.read() | ['def', 'read_relative_file', '(', 'filename', ')', ':', 'path', '=', 'join', '(', 'dirname', '(', 'abspath', '(', '__file__', ')', ')', ',', 'filename', ')', 'with', 'io', '.', 'open', '(', 'path', ',', 'encoding', '=', "'utf-8'", ')', 'as', 'f', ':', 'return', 'f', '.', 'read', '(', ')'] | Return the contents of the given file.
Its path is supposed relative to this module. | ['Return', 'the', 'contents', 'of', 'the', 'given', 'file', '.'] | train | https://github.com/peopledoc/workalendar/blob/d044d5dfc1709ec388db34dab583dd554cc66c4e/setup.py#L8-L16 |
5,069 | KarchinLab/probabilistic2020 | prob2020/python/scores.py | compute_vest_stat | def compute_vest_stat(vest_dict, ref_aa, somatic_aa, codon_pos,
stat_func=np.mean,
default_val=0.0):
"""Compute missense VEST score statistic.
Note: non-missense mutations are intentially not filtered out and will take
a default value of zero.
Parameters
----------
vest_dict : dict
dictionary containing vest scores across the gene of interest
ref_aa: list of str
list of reference amino acids
somatic_aa: list of str
somatic mutation aa
codon_pos : list of int
position of codon in protein sequence
stat_func : function, default=np.mean
function that calculates a statistic
default_val : float
default value to return if there are no mutations
Returns
-------
score_stat : float
vest score statistic for provided mutation list
"""
# return default value if VEST scores are missing
if vest_dict is None:
return default_val
# fetch scores
myscores = fetch_vest_scores(vest_dict, ref_aa, somatic_aa, codon_pos)
# calculate mean score
if myscores:
score_stat = stat_func(myscores)
else:
score_stat = default_val
return score_stat | python | def compute_vest_stat(vest_dict, ref_aa, somatic_aa, codon_pos,
stat_func=np.mean,
default_val=0.0):
"""Compute missense VEST score statistic.
Note: non-missense mutations are intentially not filtered out and will take
a default value of zero.
Parameters
----------
vest_dict : dict
dictionary containing vest scores across the gene of interest
ref_aa: list of str
list of reference amino acids
somatic_aa: list of str
somatic mutation aa
codon_pos : list of int
position of codon in protein sequence
stat_func : function, default=np.mean
function that calculates a statistic
default_val : float
default value to return if there are no mutations
Returns
-------
score_stat : float
vest score statistic for provided mutation list
"""
# return default value if VEST scores are missing
if vest_dict is None:
return default_val
# fetch scores
myscores = fetch_vest_scores(vest_dict, ref_aa, somatic_aa, codon_pos)
# calculate mean score
if myscores:
score_stat = stat_func(myscores)
else:
score_stat = default_val
return score_stat | ['def', 'compute_vest_stat', '(', 'vest_dict', ',', 'ref_aa', ',', 'somatic_aa', ',', 'codon_pos', ',', 'stat_func', '=', 'np', '.', 'mean', ',', 'default_val', '=', '0.0', ')', ':', '# return default value if VEST scores are missing', 'if', 'vest_dict', 'is', 'None', ':', 'return', 'default_val', '# fetch scores', 'myscores', '=', 'fetch_vest_scores', '(', 'vest_dict', ',', 'ref_aa', ',', 'somatic_aa', ',', 'codon_pos', ')', '# calculate mean score', 'if', 'myscores', ':', 'score_stat', '=', 'stat_func', '(', 'myscores', ')', 'else', ':', 'score_stat', '=', 'default_val', 'return', 'score_stat'] | Compute missense VEST score statistic.
Note: non-missense mutations are intentially not filtered out and will take
a default value of zero.
Parameters
----------
vest_dict : dict
dictionary containing vest scores across the gene of interest
ref_aa: list of str
list of reference amino acids
somatic_aa: list of str
somatic mutation aa
codon_pos : list of int
position of codon in protein sequence
stat_func : function, default=np.mean
function that calculates a statistic
default_val : float
default value to return if there are no mutations
Returns
-------
score_stat : float
vest score statistic for provided mutation list | ['Compute', 'missense', 'VEST', 'score', 'statistic', '.'] | train | https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/scores.py#L110-L151 |
5,070 | pypa/pipenv | pipenv/vendor/jinja2/filters.py | do_max | def do_max(environment, value, case_sensitive=False, attribute=None):
"""Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, max, case_sensitive, attribute) | python | def do_max(environment, value, case_sensitive=False, attribute=None):
"""Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, max, case_sensitive, attribute) | ['def', 'do_max', '(', 'environment', ',', 'value', ',', 'case_sensitive', '=', 'False', ',', 'attribute', '=', 'None', ')', ':', 'return', '_min_or_max', '(', 'environment', ',', 'value', ',', 'max', ',', 'case_sensitive', ',', 'attribute', ')'] | Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute. | ['Return', 'the', 'largest', 'item', 'from', 'the', 'sequence', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/filters.py#L341-L352 |
5,071 | googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/_helpers.py | pbs_for_set_with_merge | def pbs_for_set_with_merge(document_path, document_data, merge):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, merge all fields; else, merge only the named fields.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractorForMerge(document_data)
extractor.apply_merge(merge)
merge_empty = not document_data
write_pbs = []
if extractor.has_updates or merge_empty:
write_pbs.append(
extractor.get_update_pb(document_path, allow_empty_mask=merge_empty)
)
if extractor.transform_paths:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs | python | def pbs_for_set_with_merge(document_path, document_data, merge):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, merge all fields; else, merge only the named fields.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractorForMerge(document_data)
extractor.apply_merge(merge)
merge_empty = not document_data
write_pbs = []
if extractor.has_updates or merge_empty:
write_pbs.append(
extractor.get_update_pb(document_path, allow_empty_mask=merge_empty)
)
if extractor.transform_paths:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs | ['def', 'pbs_for_set_with_merge', '(', 'document_path', ',', 'document_data', ',', 'merge', ')', ':', 'extractor', '=', 'DocumentExtractorForMerge', '(', 'document_data', ')', 'extractor', '.', 'apply_merge', '(', 'merge', ')', 'merge_empty', '=', 'not', 'document_data', 'write_pbs', '=', '[', ']', 'if', 'extractor', '.', 'has_updates', 'or', 'merge_empty', ':', 'write_pbs', '.', 'append', '(', 'extractor', '.', 'get_update_pb', '(', 'document_path', ',', 'allow_empty_mask', '=', 'merge_empty', ')', ')', 'if', 'extractor', '.', 'transform_paths', ':', 'transform_pb', '=', 'extractor', '.', 'get_transform_pb', '(', 'document_path', ')', 'write_pbs', '.', 'append', '(', 'transform_pb', ')', 'return', 'write_pbs'] | Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, merge all fields; else, merge only the named fields.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``. | ['Make', 'Write', 'protobufs', 'for', 'set', '()', 'methods', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/_helpers.py#L722-L752 |
5,072 | cloudera/cm_api | python/src/cm_api/endpoints/services.py | ApiService.get_snapshot_command_history | def get_snapshot_command_history(self, name, limit=20, offset=0, view=None):
"""
Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6
"""
params = {
'limit': limit,
'offset': offset,
}
if view:
params['view'] = view
return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True,
params=params, api_version=6) | python | def get_snapshot_command_history(self, name, limit=20, offset=0, view=None):
"""
Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6
"""
params = {
'limit': limit,
'offset': offset,
}
if view:
params['view'] = view
return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True,
params=params, api_version=6) | ['def', 'get_snapshot_command_history', '(', 'self', ',', 'name', ',', 'limit', '=', '20', ',', 'offset', '=', '0', ',', 'view', '=', 'None', ')', ':', 'params', '=', '{', "'limit'", ':', 'limit', ',', "'offset'", ':', 'offset', ',', '}', 'if', 'view', ':', 'params', '[', "'view'", ']', '=', 'view', 'return', 'self', '.', '_get', '(', '"snapshots/policies/%s/history"', '%', 'name', ',', 'ApiSnapshotCommand', ',', 'True', ',', 'params', '=', 'params', ',', 'api_version', '=', '6', ')'] | Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6 | ['Retrieve', 'a', 'list', 'of', 'commands', 'triggered', 'by', 'a', 'snapshot', 'policy', '.'] | train | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1672-L1691 |
5,073 | mitsei/dlkit | dlkit/records/assessment/basic/drag_and_drop_records.py | DragAndDropAnswerFormRecord.clear_coordinate_conditions | def clear_coordinate_conditions(self):
"""stub"""
if (self.get_zone_conditions_metadata().is_read_only() or
self.get_zone_conditions_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['coordinateConditions'] = \
self._coordinate_conditions_metadata['default_object_values'][0] | python | def clear_coordinate_conditions(self):
"""stub"""
if (self.get_zone_conditions_metadata().is_read_only() or
self.get_zone_conditions_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['coordinateConditions'] = \
self._coordinate_conditions_metadata['default_object_values'][0] | ['def', 'clear_coordinate_conditions', '(', 'self', ')', ':', 'if', '(', 'self', '.', 'get_zone_conditions_metadata', '(', ')', '.', 'is_read_only', '(', ')', 'or', 'self', '.', 'get_zone_conditions_metadata', '(', ')', '.', 'is_required', '(', ')', ')', ':', 'raise', 'NoAccess', '(', ')', 'self', '.', 'my_osid_object_form', '.', '_my_map', '[', "'coordinateConditions'", ']', '=', 'self', '.', '_coordinate_conditions_metadata', '[', "'default_object_values'", ']', '[', '0', ']'] | stub | ['stub'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L384-L390 |
5,074 | mdsol/rwslib | rwslib/extras/audit_event/parser.py | ODMTargetParser.data | def data(self, data):
"""Called for text between tags"""
if self.state == STATE_SOURCE_ID:
self.context.audit_record.source_id = int(data) # Audit ids can be 64 bits
elif self.state == STATE_DATETIME:
dt = datetime.datetime.strptime(data, "%Y-%m-%dT%H:%M:%S")
self.get_parent_element().datetimestamp = dt
elif self.state == STATE_REASON_FOR_CHANGE:
self.context.audit_record.reason_for_change = data.strip() or None # Convert a result of '' to None.
self.state = STATE_NONE | python | def data(self, data):
"""Called for text between tags"""
if self.state == STATE_SOURCE_ID:
self.context.audit_record.source_id = int(data) # Audit ids can be 64 bits
elif self.state == STATE_DATETIME:
dt = datetime.datetime.strptime(data, "%Y-%m-%dT%H:%M:%S")
self.get_parent_element().datetimestamp = dt
elif self.state == STATE_REASON_FOR_CHANGE:
self.context.audit_record.reason_for_change = data.strip() or None # Convert a result of '' to None.
self.state = STATE_NONE | ['def', 'data', '(', 'self', ',', 'data', ')', ':', 'if', 'self', '.', 'state', '==', 'STATE_SOURCE_ID', ':', 'self', '.', 'context', '.', 'audit_record', '.', 'source_id', '=', 'int', '(', 'data', ')', '# Audit ids can be 64 bits', 'elif', 'self', '.', 'state', '==', 'STATE_DATETIME', ':', 'dt', '=', 'datetime', '.', 'datetime', '.', 'strptime', '(', 'data', ',', '"%Y-%m-%dT%H:%M:%S"', ')', 'self', '.', 'get_parent_element', '(', ')', '.', 'datetimestamp', '=', 'dt', 'elif', 'self', '.', 'state', '==', 'STATE_REASON_FOR_CHANGE', ':', 'self', '.', 'context', '.', 'audit_record', '.', 'reason_for_change', '=', 'data', '.', 'strip', '(', ')', 'or', 'None', "# Convert a result of '' to None.", 'self', '.', 'state', '=', 'STATE_NONE'] | Called for text between tags | ['Called', 'for', 'text', 'between', 'tags'] | train | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/audit_event/parser.py#L268-L277 |
5,075 | cox-labs/perseuspy | perseuspy/parameters.py | singleChoiceParam | def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text) | python | def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text) | ['def', 'singleChoiceParam', '(', 'parameters', ',', 'name', ',', 'type_converter', '=', 'str', ')', ':', 'param', '=', 'parameters', '.', 'find', '(', '".//SingleChoiceParam[@Name=\'{name}\']"', '.', 'format', '(', 'name', '=', 'name', ')', ')', 'value', '=', 'int', '(', 'param', '.', 'find', '(', "'Value'", ')', '.', 'text', ')', 'values', '=', 'param', '.', 'find', '(', "'Values'", ')', 'if', 'value', '<', '0', ':', 'return', 'value', 'return', 'type_converter', '(', 'values', '[', 'value', ']', '.', 'text', ')'] | single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str | ['single', 'choice', 'parameter', 'value', '.', 'Returns', '-', '1', 'if', 'no', 'value', 'was', 'chosen', '.', ':', 'param', 'parameters', ':', 'the', 'parameters', 'tree', '.', ':', 'param', 'name', ':', 'the', 'name', 'of', 'the', 'parameter', '.', ':', 'param', 'type_converter', ':', 'function', 'to', 'convert', 'the', 'chosen', 'value', 'to', 'a', 'different', 'type', '(', 'e', '.', 'g', '.', 'str', 'float', 'int', ')', '.', 'default', '=', 'str'] | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L57-L67 |
5,076 | google/pyu2f | pyu2f/convenience/customauthenticator.py | CustomAuthenticator._BuildAuthenticatorResponse | def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response | python | def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response | ['def', '_BuildAuthenticatorResponse', '(', 'self', ',', 'app_id', ',', 'client_data', ',', 'plugin_response', ')', ':', 'encoded_client_data', '=', 'self', '.', '_Base64Encode', '(', 'client_data', ')', 'signature_data', '=', 'str', '(', 'plugin_response', '[', "'signatureData'", ']', ')', 'key_handle', '=', 'str', '(', 'plugin_response', '[', "'keyHandle'", ']', ')', 'response', '=', '{', "'clientData'", ':', 'encoded_client_data', ',', "'signatureData'", ':', 'signature_data', ',', "'applicationId'", ':', 'app_id', ',', "'keyHandle'", ':', 'key_handle', ',', '}', 'return', 'response'] | Builds the response to return to the caller. | ['Builds', 'the', 'response', 'to', 'return', 'to', 'the', 'caller', '.'] | train | https://github.com/google/pyu2f/blob/8742d798027a21cbde0704aac0e93269bad2c3d0/pyu2f/convenience/customauthenticator.py#L156-L168 |
5,077 | AshleySetter/optoanalysis | optoanalysis/optoanalysis/optoanalysis.py | DataObject.plot_PSD | def plot_PSD(self, xlim=None, units="kHz", show_fig=True, timeStart=None, timeEnd=None, *args, **kwargs):
"""
plot the pulse spectral density.
Parameters
----------
xlim : array_like, optional
The x limits of the plotted PSD [LowerLimit, UpperLimit]
Default value is [0, SampleFreq/2]
units : string, optional
Units of frequency to plot on the x axis - defaults to kHz
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The subplot object created
"""
# self.get_PSD()
if timeStart == None and timeEnd == None:
freqs = self.freqs
PSD = self.PSD
else:
freqs, PSD = self.get_PSD(timeStart=timeStart, timeEnd=timeEnd)
unit_prefix = units[:-2]
if xlim == None:
xlim = [0, unit_conversion(self.SampleFreq/2, unit_prefix)]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
ax.semilogy(unit_conversion(freqs, unit_prefix), PSD, *args, **kwargs)
ax.set_xlabel("Frequency ({})".format(units))
ax.set_xlim(xlim)
ax.grid(which="major")
ax.set_ylabel("$S_{xx}$ ($V^2/Hz$)")
if show_fig == True:
_plt.show()
return fig, ax | python | def plot_PSD(self, xlim=None, units="kHz", show_fig=True, timeStart=None, timeEnd=None, *args, **kwargs):
"""
plot the pulse spectral density.
Parameters
----------
xlim : array_like, optional
The x limits of the plotted PSD [LowerLimit, UpperLimit]
Default value is [0, SampleFreq/2]
units : string, optional
Units of frequency to plot on the x axis - defaults to kHz
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The subplot object created
"""
# self.get_PSD()
if timeStart == None and timeEnd == None:
freqs = self.freqs
PSD = self.PSD
else:
freqs, PSD = self.get_PSD(timeStart=timeStart, timeEnd=timeEnd)
unit_prefix = units[:-2]
if xlim == None:
xlim = [0, unit_conversion(self.SampleFreq/2, unit_prefix)]
fig = _plt.figure(figsize=properties['default_fig_size'])
ax = fig.add_subplot(111)
ax.semilogy(unit_conversion(freqs, unit_prefix), PSD, *args, **kwargs)
ax.set_xlabel("Frequency ({})".format(units))
ax.set_xlim(xlim)
ax.grid(which="major")
ax.set_ylabel("$S_{xx}$ ($V^2/Hz$)")
if show_fig == True:
_plt.show()
return fig, ax | ['def', 'plot_PSD', '(', 'self', ',', 'xlim', '=', 'None', ',', 'units', '=', '"kHz"', ',', 'show_fig', '=', 'True', ',', 'timeStart', '=', 'None', ',', 'timeEnd', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# self.get_PSD()', 'if', 'timeStart', '==', 'None', 'and', 'timeEnd', '==', 'None', ':', 'freqs', '=', 'self', '.', 'freqs', 'PSD', '=', 'self', '.', 'PSD', 'else', ':', 'freqs', ',', 'PSD', '=', 'self', '.', 'get_PSD', '(', 'timeStart', '=', 'timeStart', ',', 'timeEnd', '=', 'timeEnd', ')', 'unit_prefix', '=', 'units', '[', ':', '-', '2', ']', 'if', 'xlim', '==', 'None', ':', 'xlim', '=', '[', '0', ',', 'unit_conversion', '(', 'self', '.', 'SampleFreq', '/', '2', ',', 'unit_prefix', ')', ']', 'fig', '=', '_plt', '.', 'figure', '(', 'figsize', '=', 'properties', '[', "'default_fig_size'", ']', ')', 'ax', '=', 'fig', '.', 'add_subplot', '(', '111', ')', 'ax', '.', 'semilogy', '(', 'unit_conversion', '(', 'freqs', ',', 'unit_prefix', ')', ',', 'PSD', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'ax', '.', 'set_xlabel', '(', '"Frequency ({})"', '.', 'format', '(', 'units', ')', ')', 'ax', '.', 'set_xlim', '(', 'xlim', ')', 'ax', '.', 'grid', '(', 'which', '=', '"major"', ')', 'ax', '.', 'set_ylabel', '(', '"$S_{xx}$ ($V^2/Hz$)"', ')', 'if', 'show_fig', '==', 'True', ':', '_plt', '.', 'show', '(', ')', 'return', 'fig', ',', 'ax'] | plot the pulse spectral density.
Parameters
----------
xlim : array_like, optional
The x limits of the plotted PSD [LowerLimit, UpperLimit]
Default value is [0, SampleFreq/2]
units : string, optional
Units of frequency to plot on the x axis - defaults to kHz
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
ax : matplotlib.axes.Axes object
The subplot object created | ['plot', 'the', 'pulse', 'spectral', 'density', '.'] | train | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L383-L425 |
5,078 | tkaemming/django-subdomains | subdomains/utils.py | reverse | def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None,
current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs,
current_app=current_app)
return urljoin(domain, path, scheme=scheme) | python | def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None,
current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs,
current_app=current_app)
return urljoin(domain, path, scheme=scheme) | ['def', 'reverse', '(', 'viewname', ',', 'subdomain', '=', 'None', ',', 'scheme', '=', 'None', ',', 'args', '=', 'None', ',', 'kwargs', '=', 'None', ',', 'current_app', '=', 'None', ')', ':', 'urlconf', '=', 'settings', '.', 'SUBDOMAIN_URLCONFS', '.', 'get', '(', 'subdomain', ',', 'settings', '.', 'ROOT_URLCONF', ')', 'domain', '=', 'get_domain', '(', ')', 'if', 'subdomain', 'is', 'not', 'None', ':', 'domain', '=', "'%s.%s'", '%', '(', 'subdomain', ',', 'domain', ')', 'path', '=', 'simple_reverse', '(', 'viewname', ',', 'urlconf', '=', 'urlconf', ',', 'args', '=', 'args', ',', 'kwargs', '=', 'kwargs', ',', 'current_app', '=', 'current_app', ')', 'return', 'urljoin', '(', 'domain', ',', 'path', ',', 'scheme', '=', 'scheme', ')'] | Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application | ['Reverses', 'a', 'URL', 'from', 'the', 'given', 'parameters', 'in', 'a', 'similar', 'fashion', 'to', ':', 'meth', ':', 'django', '.', 'core', '.', 'urlresolvers', '.', 'reverse', '.'] | train | https://github.com/tkaemming/django-subdomains/blob/be6cc1c556a2007287ef4e647ea1784cf7690a44/subdomains/utils.py#L41-L62 |
5,079 | allianceauth/allianceauth | allianceauth/thirdparty/navhelper/templatetags/navactive.py | renavactive | def renavactive(request, pattern):
"""
{% renavactive request "^/a_regex" %}
"""
if re.search(pattern, request.path):
return getattr(settings, "NAVHELPER_ACTIVE_CLASS", "active")
return getattr(settings, "NAVHELPER_NOT_ACTIVE_CLASS", "") | python | def renavactive(request, pattern):
"""
{% renavactive request "^/a_regex" %}
"""
if re.search(pattern, request.path):
return getattr(settings, "NAVHELPER_ACTIVE_CLASS", "active")
return getattr(settings, "NAVHELPER_NOT_ACTIVE_CLASS", "") | ['def', 'renavactive', '(', 'request', ',', 'pattern', ')', ':', 'if', 're', '.', 'search', '(', 'pattern', ',', 'request', '.', 'path', ')', ':', 'return', 'getattr', '(', 'settings', ',', '"NAVHELPER_ACTIVE_CLASS"', ',', '"active"', ')', 'return', 'getattr', '(', 'settings', ',', '"NAVHELPER_NOT_ACTIVE_CLASS"', ',', '""', ')'] | {% renavactive request "^/a_regex" %} | ['{', '%', 'renavactive', 'request', '^', '/', 'a_regex', '%', '}'] | train | https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/thirdparty/navhelper/templatetags/navactive.py#L33-L39 |
5,080 | tango-controls/pytango | tango/databaseds/database.py | DataBase.DbGetServerInfo | def DbGetServerInfo(self, argin):
""" Get info about host, mode and level for specified server
:param argin: server name
:type: tango.DevString
:return: server info
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetServerInfo()")
return self.db.get_server_info(argin) | python | def DbGetServerInfo(self, argin):
""" Get info about host, mode and level for specified server
:param argin: server name
:type: tango.DevString
:return: server info
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetServerInfo()")
return self.db.get_server_info(argin) | ['def', 'DbGetServerInfo', '(', 'self', ',', 'argin', ')', ':', 'self', '.', '_log', '.', 'debug', '(', '"In DbGetServerInfo()"', ')', 'return', 'self', '.', 'db', '.', 'get_server_info', '(', 'argin', ')'] | Get info about host, mode and level for specified server
:param argin: server name
:type: tango.DevString
:return: server info
:rtype: tango.DevVarStringArray | ['Get', 'info', 'about', 'host', 'mode', 'and', 'level', 'for', 'specified', 'server'] | train | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L1128-L1136 |
5,081 | bcbio/bcbio-nextgen | bcbio/pipeline/qcsummary.py | _run_qc_tools | def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken,
qsignature, qualimap, samtools, picard, srna, umi, variant,
viral, preseq, chipseq)
tools = {"fastqc": fastqc.run,
"atropos": atropos.run,
"small-rna": srna.run,
"samtools": samtools.run,
"qualimap": qualimap.run,
"qualimap_rnaseq": qualimap.run_rnaseq,
"qsignature": qsignature.run,
"contamination": contamination.run,
"coverage": coverage.run,
"damage": damage.run,
"variants": variant.run,
"peddy": peddy.run_qc,
"kraken": kraken.run,
"picard": picard.run,
"umi": umi.run,
"viral": viral.run,
"preseq": preseq.run,
"chipqc": chipseq.run
}
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
qc_out = utils.deepish_copy(dd.get_summary_qc(data))
for program_name in dd.get_algorithm_qc(data):
if not bam_file and program_name != "kraken": # kraken doesn't need bam
continue
if dd.get_phenotype(data) == "germline" and program_name != "variants":
continue
qc_fn = tools[program_name]
cur_qc_dir = os.path.join(qc_dir, program_name)
out = qc_fn(bam_file, data, cur_qc_dir)
qc_files = None
if out and isinstance(out, dict):
# Check for metrics output, two cases:
# 1. output with {"metrics"} and files ("base")
if "metrics" in out:
metrics.update(out.pop("metrics"))
# 2. a dictionary of metrics
elif "base" not in out:
metrics.update(out)
# Check for files only output
if "base" in out:
qc_files = out
elif out and isinstance(out, six.string_types) and os.path.exists(out):
qc_files = {"base": out, "secondary": []}
if not qc_files:
qc_files = _organize_qc_files(program_name, cur_qc_dir)
if qc_files:
qc_out[program_name] = qc_files
metrics["Name"] = dd.get_sample_name(data)
metrics["Quality format"] = dd.get_quality_format(data).lower()
return {"qc": qc_out, "metrics": metrics} | python | def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken,
qsignature, qualimap, samtools, picard, srna, umi, variant,
viral, preseq, chipseq)
tools = {"fastqc": fastqc.run,
"atropos": atropos.run,
"small-rna": srna.run,
"samtools": samtools.run,
"qualimap": qualimap.run,
"qualimap_rnaseq": qualimap.run_rnaseq,
"qsignature": qsignature.run,
"contamination": contamination.run,
"coverage": coverage.run,
"damage": damage.run,
"variants": variant.run,
"peddy": peddy.run_qc,
"kraken": kraken.run,
"picard": picard.run,
"umi": umi.run,
"viral": viral.run,
"preseq": preseq.run,
"chipqc": chipseq.run
}
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
qc_out = utils.deepish_copy(dd.get_summary_qc(data))
for program_name in dd.get_algorithm_qc(data):
if not bam_file and program_name != "kraken": # kraken doesn't need bam
continue
if dd.get_phenotype(data) == "germline" and program_name != "variants":
continue
qc_fn = tools[program_name]
cur_qc_dir = os.path.join(qc_dir, program_name)
out = qc_fn(bam_file, data, cur_qc_dir)
qc_files = None
if out and isinstance(out, dict):
# Check for metrics output, two cases:
# 1. output with {"metrics"} and files ("base")
if "metrics" in out:
metrics.update(out.pop("metrics"))
# 2. a dictionary of metrics
elif "base" not in out:
metrics.update(out)
# Check for files only output
if "base" in out:
qc_files = out
elif out and isinstance(out, six.string_types) and os.path.exists(out):
qc_files = {"base": out, "secondary": []}
if not qc_files:
qc_files = _organize_qc_files(program_name, cur_qc_dir)
if qc_files:
qc_out[program_name] = qc_files
metrics["Name"] = dd.get_sample_name(data)
metrics["Quality format"] = dd.get_quality_format(data).lower()
return {"qc": qc_out, "metrics": metrics} | ['def', '_run_qc_tools', '(', 'bam_file', ',', 'data', ')', ':', 'from', 'bcbio', '.', 'qc', 'import', '(', 'atropos', ',', 'contamination', ',', 'coverage', ',', 'damage', ',', 'fastqc', ',', 'kraken', ',', 'qsignature', ',', 'qualimap', ',', 'samtools', ',', 'picard', ',', 'srna', ',', 'umi', ',', 'variant', ',', 'viral', ',', 'preseq', ',', 'chipseq', ')', 'tools', '=', '{', '"fastqc"', ':', 'fastqc', '.', 'run', ',', '"atropos"', ':', 'atropos', '.', 'run', ',', '"small-rna"', ':', 'srna', '.', 'run', ',', '"samtools"', ':', 'samtools', '.', 'run', ',', '"qualimap"', ':', 'qualimap', '.', 'run', ',', '"qualimap_rnaseq"', ':', 'qualimap', '.', 'run_rnaseq', ',', '"qsignature"', ':', 'qsignature', '.', 'run', ',', '"contamination"', ':', 'contamination', '.', 'run', ',', '"coverage"', ':', 'coverage', '.', 'run', ',', '"damage"', ':', 'damage', '.', 'run', ',', '"variants"', ':', 'variant', '.', 'run', ',', '"peddy"', ':', 'peddy', '.', 'run_qc', ',', '"kraken"', ':', 'kraken', '.', 'run', ',', '"picard"', ':', 'picard', '.', 'run', ',', '"umi"', ':', 'umi', '.', 'run', ',', '"viral"', ':', 'viral', '.', 'run', ',', '"preseq"', ':', 'preseq', '.', 'run', ',', '"chipqc"', ':', 'chipseq', '.', 'run', '}', 'qc_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'data', '[', '"dirs"', ']', '[', '"work"', ']', ',', '"qc"', ',', 'data', '[', '"description"', ']', ')', ')', 'metrics', '=', '{', '}', 'qc_out', '=', 'utils', '.', 'deepish_copy', '(', 'dd', '.', 'get_summary_qc', '(', 'data', ')', ')', 'for', 'program_name', 'in', 'dd', '.', 'get_algorithm_qc', '(', 'data', ')', ':', 'if', 'not', 'bam_file', 'and', 'program_name', '!=', '"kraken"', ':', "# kraken doesn't need bam", 'continue', 'if', 'dd', '.', 'get_phenotype', '(', 'data', ')', '==', '"germline"', 'and', 'program_name', '!=', '"variants"', ':', 'continue', 'qc_fn', '=', 'tools', '[', 'program_name', ']', 'cur_qc_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'qc_dir', ',', 'program_name', ')', 'out', '=', 'qc_fn', '(', 'bam_file', ',', 'data', ',', 'cur_qc_dir', ')', 'qc_files', '=', 'None', 'if', 'out', 'and', 'isinstance', '(', 'out', ',', 'dict', ')', ':', '# Check for metrics output, two cases:', '# 1. output with {"metrics"} and files ("base")', 'if', '"metrics"', 'in', 'out', ':', 'metrics', '.', 'update', '(', 'out', '.', 'pop', '(', '"metrics"', ')', ')', '# 2. a dictionary of metrics', 'elif', '"base"', 'not', 'in', 'out', ':', 'metrics', '.', 'update', '(', 'out', ')', '# Check for files only output', 'if', '"base"', 'in', 'out', ':', 'qc_files', '=', 'out', 'elif', 'out', 'and', 'isinstance', '(', 'out', ',', 'six', '.', 'string_types', ')', 'and', 'os', '.', 'path', '.', 'exists', '(', 'out', ')', ':', 'qc_files', '=', '{', '"base"', ':', 'out', ',', '"secondary"', ':', '[', ']', '}', 'if', 'not', 'qc_files', ':', 'qc_files', '=', '_organize_qc_files', '(', 'program_name', ',', 'cur_qc_dir', ')', 'if', 'qc_files', ':', 'qc_out', '[', 'program_name', ']', '=', 'qc_files', 'metrics', '[', '"Name"', ']', '=', 'dd', '.', 'get_sample_name', '(', 'data', ')', 'metrics', '[', '"Quality format"', ']', '=', 'dd', '.', 'get_quality_format', '(', 'data', ')', '.', 'lower', '(', ')', 'return', '{', '"qc"', ':', 'qc_out', ',', '"metrics"', ':', 'metrics', '}'] | Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools | ['Run', 'a', 'set', 'of', 'third', 'party', 'quality', 'control', 'tools', 'returning', 'QC', 'directory', 'and', 'metrics', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L125-L187 |
5,082 | hasgeek/coaster | coaster/views/classview.py | ClassView.is_available | def is_available(self):
"""
Returns `True` if *any* view handler in the class is currently
available via its `is_available` method.
"""
if self.is_always_available:
return True
for viewname in self.__views__:
if getattr(self, viewname).is_available():
return True
return False | python | def is_available(self):
"""
Returns `True` if *any* view handler in the class is currently
available via its `is_available` method.
"""
if self.is_always_available:
return True
for viewname in self.__views__:
if getattr(self, viewname).is_available():
return True
return False | ['def', 'is_available', '(', 'self', ')', ':', 'if', 'self', '.', 'is_always_available', ':', 'return', 'True', 'for', 'viewname', 'in', 'self', '.', '__views__', ':', 'if', 'getattr', '(', 'self', ',', 'viewname', ')', '.', 'is_available', '(', ')', ':', 'return', 'True', 'return', 'False'] | Returns `True` if *any* view handler in the class is currently
available via its `is_available` method. | ['Returns', 'True', 'if', '*', 'any', '*', 'view', 'handler', 'in', 'the', 'class', 'is', 'currently', 'available', 'via', 'its', 'is_available', 'method', '.'] | train | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/views/classview.py#L387-L397 |
5,083 | T-002/pycast | pycast/errors/meanabsolutedeviationerror.py | MeanAbsoluteDeviationError.local_error | def local_error(self, originalValue, calculatedValue):
"""Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric
"""
originalValue = originalValue[0]
calculatedValue = calculatedValue[0]
return abs(originalValue - calculatedValue) | python | def local_error(self, originalValue, calculatedValue):
"""Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric
"""
originalValue = originalValue[0]
calculatedValue = calculatedValue[0]
return abs(originalValue - calculatedValue) | ['def', 'local_error', '(', 'self', ',', 'originalValue', ',', 'calculatedValue', ')', ':', 'originalValue', '=', 'originalValue', '[', '0', ']', 'calculatedValue', '=', 'calculatedValue', '[', '0', ']', 'return', 'abs', '(', 'originalValue', '-', 'calculatedValue', ')'] | Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric | ['Calculates', 'the', 'error', 'between', 'the', 'two', 'given', 'values', '.'] | train | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/errors/meanabsolutedeviationerror.py#L53-L66 |
5,084 | jespino/anillo | anillo/app.py | application | def application(handler, adapter_cls=WerkzeugAdapter):
"""Converts an anillo function based handler in a
wsgi compiliant application function.
:param adapter_cls: the wsgi adapter implementation (default: wekrzeug)
:returns: wsgi function
:rtype: callable
"""
adapter = adapter_cls()
def wrapper(environ, start_response):
request = adapter.to_request(environ)
response = handler(request)
response_func = adapter.from_response(response)
return response_func(environ, start_response)
return wrapper | python | def application(handler, adapter_cls=WerkzeugAdapter):
"""Converts an anillo function based handler in a
wsgi compiliant application function.
:param adapter_cls: the wsgi adapter implementation (default: wekrzeug)
:returns: wsgi function
:rtype: callable
"""
adapter = adapter_cls()
def wrapper(environ, start_response):
request = adapter.to_request(environ)
response = handler(request)
response_func = adapter.from_response(response)
return response_func(environ, start_response)
return wrapper | ['def', 'application', '(', 'handler', ',', 'adapter_cls', '=', 'WerkzeugAdapter', ')', ':', 'adapter', '=', 'adapter_cls', '(', ')', 'def', 'wrapper', '(', 'environ', ',', 'start_response', ')', ':', 'request', '=', 'adapter', '.', 'to_request', '(', 'environ', ')', 'response', '=', 'handler', '(', 'request', ')', 'response_func', '=', 'adapter', '.', 'from_response', '(', 'response', ')', 'return', 'response_func', '(', 'environ', ',', 'start_response', ')', 'return', 'wrapper'] | Converts an anillo function based handler in a
wsgi compiliant application function.
:param adapter_cls: the wsgi adapter implementation (default: wekrzeug)
:returns: wsgi function
:rtype: callable | ['Converts', 'an', 'anillo', 'function', 'based', 'handler', 'in', 'a', 'wsgi', 'compiliant', 'application', 'function', '.'] | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/app.py#L4-L20 |
5,085 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_link.py | LinkModule.show_link | def show_link(self):
'''show link information'''
for master in self.mpstate.mav_master:
linkdelay = (self.status.highest_msec - master.highest_msec)*1.0e-3
if master.linkerror:
print("link %u down" % (master.linknum+1))
else:
print("link %u OK (%u packets, %.2fs delay, %u lost, %.1f%% loss)" % (master.linknum+1,
self.status.counters['MasterIn'][master.linknum],
linkdelay,
master.mav_loss,
master.packet_loss())) | python | def show_link(self):
'''show link information'''
for master in self.mpstate.mav_master:
linkdelay = (self.status.highest_msec - master.highest_msec)*1.0e-3
if master.linkerror:
print("link %u down" % (master.linknum+1))
else:
print("link %u OK (%u packets, %.2fs delay, %u lost, %.1f%% loss)" % (master.linknum+1,
self.status.counters['MasterIn'][master.linknum],
linkdelay,
master.mav_loss,
master.packet_loss())) | ['def', 'show_link', '(', 'self', ')', ':', 'for', 'master', 'in', 'self', '.', 'mpstate', '.', 'mav_master', ':', 'linkdelay', '=', '(', 'self', '.', 'status', '.', 'highest_msec', '-', 'master', '.', 'highest_msec', ')', '*', '1.0e-3', 'if', 'master', '.', 'linkerror', ':', 'print', '(', '"link %u down"', '%', '(', 'master', '.', 'linknum', '+', '1', ')', ')', 'else', ':', 'print', '(', '"link %u OK (%u packets, %.2fs delay, %u lost, %.1f%% loss)"', '%', '(', 'master', '.', 'linknum', '+', '1', ',', 'self', '.', 'status', '.', 'counters', '[', "'MasterIn'", ']', '[', 'master', '.', 'linknum', ']', ',', 'linkdelay', ',', 'master', '.', 'mav_loss', ',', 'master', '.', 'packet_loss', '(', ')', ')', ')'] | show link information | ['show', 'link', 'information'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_link.py#L92-L103 |
5,086 | PmagPy/PmagPy | pmagpy/validate_upload3.py | validate_df | def validate_df(df, dm, con=None):
"""
Take in a DataFrame and corresponding data model.
Run all validations for that DataFrame.
Output is the original DataFrame with some new columns
that contain the validation output.
Validation columns start with:
presence_pass_ (checking that req'd columns are present)
type_pass_ (checking that the data is of the correct type)
value_pass_ (checking that the value is within the appropriate range)
group_pass_ (making sure that group validations pass)
"""
# check column validity
required_one = {} # keep track of req'd one in group validations here
cols = df.columns
invalid_cols = [col for col in cols if col not in dm.index]
# go through and run all validations for the data type
for validation_name, validation in dm.iterrows():
value_type = validation['type']
if validation_name in df.columns:
output = df[validation_name].apply(test_type, args=(value_type,))
df["type_pass" + "_" + validation_name + "_" + value_type] = output
#
val_list = validation['validations']
if not val_list or isinstance(val_list, float):
continue
for num, val in enumerate(val_list):
func_name, arg = split_func(val)
if arg == "magic_table_column":
continue
# first validate for presence
if func_name in presence_operations:
func = presence_operations[func_name]
#grade = func(validation_name, df, arg, dm)
grade = func(validation_name, arg, dm, df, con)
pass_col_name = "presence_pass_" + validation_name + "_" + func.__name__
df[pass_col_name] = grade
# then validate for correct values
elif func_name in value_operations:
func = value_operations[func_name]
if validation_name in df.columns:
grade = df.apply(func, args=(validation_name, arg, dm, df, con), axis=1)
col_name = "value_pass_" + validation_name + "_" + func.__name__
if col_name in df.columns:
num_range = list(range(1, 10))
for num in num_range:
if (col_name + str(num)) in df.columns:
continue
else:
col_name = col_name + str(num)
break
df[col_name] = grade.astype(object)
# last, validate at the column group level
elif func_name in group_operations:
func = group_operations[func_name]
missing = func(validation_name, arg, dm, df)
if arg not in required_one:
required_one[arg] = [missing]
else:
required_one[arg].append(missing)
# format the group validation columns
for key, value in list(required_one.items()):
if None in value:
# this means at least one value from the required group is present,
# so the validation passes
continue
else:
# otherwise, all of the values from the required group are missing,
# so the validation fails
df["group_pass_{}".format(key)] = "you must have one column from group {}: {}".format(key, ", ".join(value))
return df | python | def validate_df(df, dm, con=None):
"""
Take in a DataFrame and corresponding data model.
Run all validations for that DataFrame.
Output is the original DataFrame with some new columns
that contain the validation output.
Validation columns start with:
presence_pass_ (checking that req'd columns are present)
type_pass_ (checking that the data is of the correct type)
value_pass_ (checking that the value is within the appropriate range)
group_pass_ (making sure that group validations pass)
"""
# check column validity
required_one = {} # keep track of req'd one in group validations here
cols = df.columns
invalid_cols = [col for col in cols if col not in dm.index]
# go through and run all validations for the data type
for validation_name, validation in dm.iterrows():
value_type = validation['type']
if validation_name in df.columns:
output = df[validation_name].apply(test_type, args=(value_type,))
df["type_pass" + "_" + validation_name + "_" + value_type] = output
#
val_list = validation['validations']
if not val_list or isinstance(val_list, float):
continue
for num, val in enumerate(val_list):
func_name, arg = split_func(val)
if arg == "magic_table_column":
continue
# first validate for presence
if func_name in presence_operations:
func = presence_operations[func_name]
#grade = func(validation_name, df, arg, dm)
grade = func(validation_name, arg, dm, df, con)
pass_col_name = "presence_pass_" + validation_name + "_" + func.__name__
df[pass_col_name] = grade
# then validate for correct values
elif func_name in value_operations:
func = value_operations[func_name]
if validation_name in df.columns:
grade = df.apply(func, args=(validation_name, arg, dm, df, con), axis=1)
col_name = "value_pass_" + validation_name + "_" + func.__name__
if col_name in df.columns:
num_range = list(range(1, 10))
for num in num_range:
if (col_name + str(num)) in df.columns:
continue
else:
col_name = col_name + str(num)
break
df[col_name] = grade.astype(object)
# last, validate at the column group level
elif func_name in group_operations:
func = group_operations[func_name]
missing = func(validation_name, arg, dm, df)
if arg not in required_one:
required_one[arg] = [missing]
else:
required_one[arg].append(missing)
# format the group validation columns
for key, value in list(required_one.items()):
if None in value:
# this means at least one value from the required group is present,
# so the validation passes
continue
else:
# otherwise, all of the values from the required group are missing,
# so the validation fails
df["group_pass_{}".format(key)] = "you must have one column from group {}: {}".format(key, ", ".join(value))
return df | ['def', 'validate_df', '(', 'df', ',', 'dm', ',', 'con', '=', 'None', ')', ':', '# check column validity', 'required_one', '=', '{', '}', "# keep track of req'd one in group validations here", 'cols', '=', 'df', '.', 'columns', 'invalid_cols', '=', '[', 'col', 'for', 'col', 'in', 'cols', 'if', 'col', 'not', 'in', 'dm', '.', 'index', ']', '# go through and run all validations for the data type', 'for', 'validation_name', ',', 'validation', 'in', 'dm', '.', 'iterrows', '(', ')', ':', 'value_type', '=', 'validation', '[', "'type'", ']', 'if', 'validation_name', 'in', 'df', '.', 'columns', ':', 'output', '=', 'df', '[', 'validation_name', ']', '.', 'apply', '(', 'test_type', ',', 'args', '=', '(', 'value_type', ',', ')', ')', 'df', '[', '"type_pass"', '+', '"_"', '+', 'validation_name', '+', '"_"', '+', 'value_type', ']', '=', 'output', '#', 'val_list', '=', 'validation', '[', "'validations'", ']', 'if', 'not', 'val_list', 'or', 'isinstance', '(', 'val_list', ',', 'float', ')', ':', 'continue', 'for', 'num', ',', 'val', 'in', 'enumerate', '(', 'val_list', ')', ':', 'func_name', ',', 'arg', '=', 'split_func', '(', 'val', ')', 'if', 'arg', '==', '"magic_table_column"', ':', 'continue', '# first validate for presence', 'if', 'func_name', 'in', 'presence_operations', ':', 'func', '=', 'presence_operations', '[', 'func_name', ']', '#grade = func(validation_name, df, arg, dm)', 'grade', '=', 'func', '(', 'validation_name', ',', 'arg', ',', 'dm', ',', 'df', ',', 'con', ')', 'pass_col_name', '=', '"presence_pass_"', '+', 'validation_name', '+', '"_"', '+', 'func', '.', '__name__', 'df', '[', 'pass_col_name', ']', '=', 'grade', '# then validate for correct values', 'elif', 'func_name', 'in', 'value_operations', ':', 'func', '=', 'value_operations', '[', 'func_name', ']', 'if', 'validation_name', 'in', 'df', '.', 'columns', ':', 'grade', '=', 'df', '.', 'apply', '(', 'func', ',', 'args', '=', '(', 'validation_name', ',', 'arg', ',', 'dm', ',', 'df', ',', 'con', ')', ',', 'axis', '=', '1', ')', 'col_name', '=', '"value_pass_"', '+', 'validation_name', '+', '"_"', '+', 'func', '.', '__name__', 'if', 'col_name', 'in', 'df', '.', 'columns', ':', 'num_range', '=', 'list', '(', 'range', '(', '1', ',', '10', ')', ')', 'for', 'num', 'in', 'num_range', ':', 'if', '(', 'col_name', '+', 'str', '(', 'num', ')', ')', 'in', 'df', '.', 'columns', ':', 'continue', 'else', ':', 'col_name', '=', 'col_name', '+', 'str', '(', 'num', ')', 'break', 'df', '[', 'col_name', ']', '=', 'grade', '.', 'astype', '(', 'object', ')', '# last, validate at the column group level', 'elif', 'func_name', 'in', 'group_operations', ':', 'func', '=', 'group_operations', '[', 'func_name', ']', 'missing', '=', 'func', '(', 'validation_name', ',', 'arg', ',', 'dm', ',', 'df', ')', 'if', 'arg', 'not', 'in', 'required_one', ':', 'required_one', '[', 'arg', ']', '=', '[', 'missing', ']', 'else', ':', 'required_one', '[', 'arg', ']', '.', 'append', '(', 'missing', ')', '# format the group validation columns', 'for', 'key', ',', 'value', 'in', 'list', '(', 'required_one', '.', 'items', '(', ')', ')', ':', 'if', 'None', 'in', 'value', ':', '# this means at least one value from the required group is present,', '# so the validation passes', 'continue', 'else', ':', '# otherwise, all of the values from the required group are missing,', '# so the validation fails', 'df', '[', '"group_pass_{}"', '.', 'format', '(', 'key', ')', ']', '=', '"you must have one column from group {}: {}"', '.', 'format', '(', 'key', ',', '", "', '.', 'join', '(', 'value', ')', ')', 'return', 'df'] | Take in a DataFrame and corresponding data model.
Run all validations for that DataFrame.
Output is the original DataFrame with some new columns
that contain the validation output.
Validation columns start with:
presence_pass_ (checking that req'd columns are present)
type_pass_ (checking that the data is of the correct type)
value_pass_ (checking that the value is within the appropriate range)
group_pass_ (making sure that group validations pass) | ['Take', 'in', 'a', 'DataFrame', 'and', 'corresponding', 'data', 'model', '.', 'Run', 'all', 'validations', 'for', 'that', 'DataFrame', '.', 'Output', 'is', 'the', 'original', 'DataFrame', 'with', 'some', 'new', 'columns', 'that', 'contain', 'the', 'validation', 'output', '.', 'Validation', 'columns', 'start', 'with', ':', 'presence_pass_', '(', 'checking', 'that', 'req', 'd', 'columns', 'are', 'present', ')', 'type_pass_', '(', 'checking', 'that', 'the', 'data', 'is', 'of', 'the', 'correct', 'type', ')', 'value_pass_', '(', 'checking', 'that', 'the', 'value', 'is', 'within', 'the', 'appropriate', 'range', ')', 'group_pass_', '(', 'making', 'sure', 'that', 'group', 'validations', 'pass', ')'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload3.py#L298-L369 |
5,087 | woolfson-group/isambard | isambard/ampal/specifications/assembly_specs/nucleic_acid_duplex.py | DNADuplex.from_sequence | def from_sequence(cls, sequence, phos_3_prime=False):
"""Creates a DNA duplex from a nucleotide sequence.
Parameters
----------
sequence: str
Nucleotide sequence.
phos_3_prime: bool, optional
If false the 5' and the 3' phosphor will be omitted.
"""
strand1 = NucleicAcidStrand(sequence, phos_3_prime=phos_3_prime)
duplex = cls(strand1)
return duplex | python | def from_sequence(cls, sequence, phos_3_prime=False):
"""Creates a DNA duplex from a nucleotide sequence.
Parameters
----------
sequence: str
Nucleotide sequence.
phos_3_prime: bool, optional
If false the 5' and the 3' phosphor will be omitted.
"""
strand1 = NucleicAcidStrand(sequence, phos_3_prime=phos_3_prime)
duplex = cls(strand1)
return duplex | ['def', 'from_sequence', '(', 'cls', ',', 'sequence', ',', 'phos_3_prime', '=', 'False', ')', ':', 'strand1', '=', 'NucleicAcidStrand', '(', 'sequence', ',', 'phos_3_prime', '=', 'phos_3_prime', ')', 'duplex', '=', 'cls', '(', 'strand1', ')', 'return', 'duplex'] | Creates a DNA duplex from a nucleotide sequence.
Parameters
----------
sequence: str
Nucleotide sequence.
phos_3_prime: bool, optional
If false the 5' and the 3' phosphor will be omitted. | ['Creates', 'a', 'DNA', 'duplex', 'from', 'a', 'nucleotide', 'sequence', '.'] | train | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/specifications/assembly_specs/nucleic_acid_duplex.py#L34-L46 |
5,088 | limpyd/redis-limpyd | limpyd/model.py | RedisModel.hdel | def hdel(self, *args):
"""
This command on the model allow deleting many instancehash fields with
only one redis call. You must pass hash names to retrieve as arguments
"""
if args and not any(arg in self._instancehash_fields for arg in args):
raise ValueError("Only InstanceHashField can be used here.")
# Set indexes for indexable fields.
for field_name in args:
field = self.get_field(field_name)
if field.indexable:
field.deindex()
# Return the number of fields really deleted
return self._call_command('hdel', *args) | python | def hdel(self, *args):
"""
This command on the model allow deleting many instancehash fields with
only one redis call. You must pass hash names to retrieve as arguments
"""
if args and not any(arg in self._instancehash_fields for arg in args):
raise ValueError("Only InstanceHashField can be used here.")
# Set indexes for indexable fields.
for field_name in args:
field = self.get_field(field_name)
if field.indexable:
field.deindex()
# Return the number of fields really deleted
return self._call_command('hdel', *args) | ['def', 'hdel', '(', 'self', ',', '*', 'args', ')', ':', 'if', 'args', 'and', 'not', 'any', '(', 'arg', 'in', 'self', '.', '_instancehash_fields', 'for', 'arg', 'in', 'args', ')', ':', 'raise', 'ValueError', '(', '"Only InstanceHashField can be used here."', ')', '# Set indexes for indexable fields.', 'for', 'field_name', 'in', 'args', ':', 'field', '=', 'self', '.', 'get_field', '(', 'field_name', ')', 'if', 'field', '.', 'indexable', ':', 'field', '.', 'deindex', '(', ')', '# Return the number of fields really deleted', 'return', 'self', '.', '_call_command', '(', "'hdel'", ',', '*', 'args', ')'] | This command on the model allow deleting many instancehash fields with
only one redis call. You must pass hash names to retrieve as arguments | ['This', 'command', 'on', 'the', 'model', 'allow', 'deleting', 'many', 'instancehash', 'fields', 'with', 'only', 'one', 'redis', 'call', '.', 'You', 'must', 'pass', 'hash', 'names', 'to', 'retrieve', 'as', 'arguments'] | train | https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/model.py#L489-L504 |
5,089 | rueckstiess/mtools | mtools/util/pattern.py | json2pattern | def json2pattern(s):
"""
Convert JSON format to a query pattern.
Includes even mongo shell notation without quoted key names.
"""
# make valid JSON by wrapping field names in quotes
s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s)
# handle shell values that are not valid JSON
s = shell2json(s)
# convert to 1 where possible, to get rid of things like new Date(...)
s, n = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s)
# now convert to dictionary, converting unicode to ascii
try:
doc = json.loads(s, object_hook=_decode_pattern_dict)
return json.dumps(doc, sort_keys=True, separators=(', ', ': '))
except ValueError as ex:
return None | python | def json2pattern(s):
"""
Convert JSON format to a query pattern.
Includes even mongo shell notation without quoted key names.
"""
# make valid JSON by wrapping field names in quotes
s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s)
# handle shell values that are not valid JSON
s = shell2json(s)
# convert to 1 where possible, to get rid of things like new Date(...)
s, n = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s)
# now convert to dictionary, converting unicode to ascii
try:
doc = json.loads(s, object_hook=_decode_pattern_dict)
return json.dumps(doc, sort_keys=True, separators=(', ', ': '))
except ValueError as ex:
return None | ['def', 'json2pattern', '(', 's', ')', ':', '# make valid JSON by wrapping field names in quotes', 's', ',', '_', '=', 're', '.', 'subn', '(', 'r\'([{,])\\s*([^,{\\s\\\'"]+)\\s*:\'', ',', '\' \\\\1 "\\\\2" : \'', ',', 's', ')', '# handle shell values that are not valid JSON', 's', '=', 'shell2json', '(', 's', ')', '# convert to 1 where possible, to get rid of things like new Date(...)', 's', ',', 'n', '=', 're', '.', 'subn', '(', 'r\'([:,\\[])\\s*([^{}\\[\\]"]+?)\\s*([,}\\]])\'', ',', "'\\\\1 1 \\\\3'", ',', 's', ')', '# now convert to dictionary, converting unicode to ascii', 'try', ':', 'doc', '=', 'json', '.', 'loads', '(', 's', ',', 'object_hook', '=', '_decode_pattern_dict', ')', 'return', 'json', '.', 'dumps', '(', 'doc', ',', 'sort_keys', '=', 'True', ',', 'separators', '=', '(', "', '", ',', "': '", ')', ')', 'except', 'ValueError', 'as', 'ex', ':', 'return', 'None'] | Convert JSON format to a query pattern.
Includes even mongo shell notation without quoted key names. | ['Convert', 'JSON', 'format', 'to', 'a', 'query', 'pattern', '.'] | train | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/pattern.py#L73-L90 |
5,090 | tcalmant/ipopo | pelix/http/basic.py | _RequestHandler.log_request | def log_request(self, code="-", size="-"):
"""
Logs a request to the server
"""
self._service.log(logging.DEBUG, '"%s" %s', self.requestline, code) | python | def log_request(self, code="-", size="-"):
"""
Logs a request to the server
"""
self._service.log(logging.DEBUG, '"%s" %s', self.requestline, code) | ['def', 'log_request', '(', 'self', ',', 'code', '=', '"-"', ',', 'size', '=', '"-"', ')', ':', 'self', '.', '_service', '.', 'log', '(', 'logging', '.', 'DEBUG', ',', '\'"%s" %s\'', ',', 'self', '.', 'requestline', ',', 'code', ')'] | Logs a request to the server | ['Logs', 'a', 'request', 'to', 'the', 'server'] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/http/basic.py#L322-L326 |
5,091 | atztogo/phono3py | phono3py/file_IO.py | write_fc3_to_hdf5 | def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression=None):
"""Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map) | python | def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression=None):
"""Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map) | ['def', 'write_fc3_to_hdf5', '(', 'fc3', ',', 'filename', '=', "'fc3.hdf5'", ',', 'p2s_map', '=', 'None', ',', 'compression', '=', 'None', ')', ':', 'with', 'h5py', '.', 'File', '(', 'filename', ',', "'w'", ')', 'as', 'w', ':', 'w', '.', 'create_dataset', '(', "'fc3'", ',', 'data', '=', 'fc3', ',', 'compression', '=', 'compression', ')', 'if', 'p2s_map', 'is', 'not', 'None', ':', 'w', '.', 'create_dataset', '(', "'p2s_map'", ',', 'data', '=', 'p2s_map', ')'] | Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None. | ['Write', 'third', '-', 'order', 'force', 'constants', 'in', 'hdf5', 'format', '.'] | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/file_IO.py#L184-L211 |
5,092 | tkf/rash | rash/index.py | index_run | def index_run(record_path, keep_json, check_duplicate):
"""
Convert raw JSON records into sqlite3 DB.
Normally RASH launches a daemon that takes care of indexing.
See ``rash daemon --help``.
"""
from .config import ConfigStore
from .indexer import Indexer
cfstore = ConfigStore()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all() | python | def index_run(record_path, keep_json, check_duplicate):
"""
Convert raw JSON records into sqlite3 DB.
Normally RASH launches a daemon that takes care of indexing.
See ``rash daemon --help``.
"""
from .config import ConfigStore
from .indexer import Indexer
cfstore = ConfigStore()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all() | ['def', 'index_run', '(', 'record_path', ',', 'keep_json', ',', 'check_duplicate', ')', ':', 'from', '.', 'config', 'import', 'ConfigStore', 'from', '.', 'indexer', 'import', 'Indexer', 'cfstore', '=', 'ConfigStore', '(', ')', 'indexer', '=', 'Indexer', '(', 'cfstore', ',', 'check_duplicate', ',', 'keep_json', ',', 'record_path', ')', 'indexer', '.', 'index_all', '(', ')'] | Convert raw JSON records into sqlite3 DB.
Normally RASH launches a daemon that takes care of indexing.
See ``rash daemon --help``. | ['Convert', 'raw', 'JSON', 'records', 'into', 'sqlite3', 'DB', '.'] | train | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/index.py#L17-L29 |
5,093 | ThreatConnect-Inc/tcex | tcex/tcex_playbook.py | TcExPlaybook.create_string_array | def create_string_array(self, key, value):
"""Create method of CRUD operation for string array data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
"""
data = None
if key is not None and value is not None:
if isinstance(value, (list)):
data = self.db.create(key.strip(), json.dumps(value))
else:
# used to save raw value with embedded variables
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data | python | def create_string_array(self, key, value):
"""Create method of CRUD operation for string array data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
"""
data = None
if key is not None and value is not None:
if isinstance(value, (list)):
data = self.db.create(key.strip(), json.dumps(value))
else:
# used to save raw value with embedded variables
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data | ['def', 'create_string_array', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'data', '=', 'None', 'if', 'key', 'is', 'not', 'None', 'and', 'value', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'value', ',', '(', 'list', ')', ')', ':', 'data', '=', 'self', '.', 'db', '.', 'create', '(', 'key', '.', 'strip', '(', ')', ',', 'json', '.', 'dumps', '(', 'value', ')', ')', 'else', ':', '# used to save raw value with embedded variables', 'data', '=', 'self', '.', 'db', '.', 'create', '(', 'key', '.', 'strip', '(', ')', ',', 'value', ')', 'else', ':', 'self', '.', 'tcex', '.', 'log', '.', 'warning', '(', "u'The key or value field was None.'", ')', 'return', 'data'] | Create method of CRUD operation for string array data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write. | ['Create', 'method', 'of', 'CRUD', 'operation', 'for', 'string', 'array', 'data', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L909-L928 |
5,094 | Jaymon/prom | prom/query.py | Query.value | def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals | python | def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals | ['def', 'value', '(', 'self', ')', ':', 'field_vals', '=', 'None', 'field_names', '=', 'self', '.', 'fields_select', '.', 'names', '(', ')', 'fcount', '=', 'len', '(', 'field_names', ')', 'if', 'fcount', ':', 'd', '=', 'self', '.', '_query', '(', "'get_one'", ')', 'if', 'd', ':', 'field_vals', '=', '[', 'd', '.', 'get', '(', 'fn', ',', 'None', ')', 'for', 'fn', 'in', 'field_names', ']', 'if', 'fcount', '==', '1', ':', 'field_vals', '=', 'field_vals', '[', '0', ']', 'else', ':', 'raise', 'ValueError', '(', '"no select fields were set, so cannot return value"', ')', 'return', 'field_vals'] | convenience method to just get one value or tuple of values for the query | ['convenience', 'method', 'to', 'just', 'get', 'one', 'value', 'or', 'tuple', 'of', 'values', 'for', 'the', 'query'] | train | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1065-L1080 |
5,095 | Chilipp/psyplot | psyplot/data.py | CFDecoder._get_coord_cell_node_coord | def _get_coord_cell_node_coord(self, coord, coords=None, nans=None,
var=None):
"""
Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s
"""
bounds = coord.attrs.get('bounds')
if bounds is not None:
bounds = self.ds.coords.get(bounds)
if bounds is not None:
if coords is not None:
bounds = bounds.sel(**{
key: coords[key]
for key in set(coords).intersection(bounds.dims)})
if nans is not None and var is None:
raise ValueError("Need the variable to deal with NaN!")
elif nans is None:
pass
elif nans == 'skip':
bounds = bounds[~np.isnan(var.values)]
elif nans == 'only':
bounds = bounds[np.isnan(var.values)]
else:
raise ValueError(
"`nans` must be either None, 'skip', or 'only'! "
"Not {0}!".format(str(nans)))
return bounds | python | def _get_coord_cell_node_coord(self, coord, coords=None, nans=None,
var=None):
"""
Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s
"""
bounds = coord.attrs.get('bounds')
if bounds is not None:
bounds = self.ds.coords.get(bounds)
if bounds is not None:
if coords is not None:
bounds = bounds.sel(**{
key: coords[key]
for key in set(coords).intersection(bounds.dims)})
if nans is not None and var is None:
raise ValueError("Need the variable to deal with NaN!")
elif nans is None:
pass
elif nans == 'skip':
bounds = bounds[~np.isnan(var.values)]
elif nans == 'only':
bounds = bounds[np.isnan(var.values)]
else:
raise ValueError(
"`nans` must be either None, 'skip', or 'only'! "
"Not {0}!".format(str(nans)))
return bounds | ['def', '_get_coord_cell_node_coord', '(', 'self', ',', 'coord', ',', 'coords', '=', 'None', ',', 'nans', '=', 'None', ',', 'var', '=', 'None', ')', ':', 'bounds', '=', 'coord', '.', 'attrs', '.', 'get', '(', "'bounds'", ')', 'if', 'bounds', 'is', 'not', 'None', ':', 'bounds', '=', 'self', '.', 'ds', '.', 'coords', '.', 'get', '(', 'bounds', ')', 'if', 'bounds', 'is', 'not', 'None', ':', 'if', 'coords', 'is', 'not', 'None', ':', 'bounds', '=', 'bounds', '.', 'sel', '(', '*', '*', '{', 'key', ':', 'coords', '[', 'key', ']', 'for', 'key', 'in', 'set', '(', 'coords', ')', '.', 'intersection', '(', 'bounds', '.', 'dims', ')', '}', ')', 'if', 'nans', 'is', 'not', 'None', 'and', 'var', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Need the variable to deal with NaN!"', ')', 'elif', 'nans', 'is', 'None', ':', 'pass', 'elif', 'nans', '==', "'skip'", ':', 'bounds', '=', 'bounds', '[', '~', 'np', '.', 'isnan', '(', 'var', '.', 'values', ')', ']', 'elif', 'nans', '==', "'only'", ':', 'bounds', '=', 'bounds', '[', 'np', '.', 'isnan', '(', 'var', '.', 'values', ')', ']', 'else', ':', 'raise', 'ValueError', '(', '"`nans` must be either None, \'skip\', or \'only\'! "', '"Not {0}!"', '.', 'format', '(', 'str', '(', 'nans', ')', ')', ')', 'return', 'bounds'] | Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s | ['Get', 'the', 'boundaries', 'of', 'an', 'unstructed', 'coordinate'] | train | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L755-L790 |
5,096 | jlesquembre/jlle | jlle/releaser/utils.py | sanity_check | def sanity_check(vcs):
"""Do sanity check before making changes
Check that we are not on a tag and/or do not have local changes.
Returns True when all is fine.
"""
if not vcs.is_clean_checkout():
q = ("This is NOT a clean checkout. You are on a tag or you have "
"local changes.\n"
"Are you sure you want to continue?")
if not ask(q, default=False):
sys.exit(1) | python | def sanity_check(vcs):
"""Do sanity check before making changes
Check that we are not on a tag and/or do not have local changes.
Returns True when all is fine.
"""
if not vcs.is_clean_checkout():
q = ("This is NOT a clean checkout. You are on a tag or you have "
"local changes.\n"
"Are you sure you want to continue?")
if not ask(q, default=False):
sys.exit(1) | ['def', 'sanity_check', '(', 'vcs', ')', ':', 'if', 'not', 'vcs', '.', 'is_clean_checkout', '(', ')', ':', 'q', '=', '(', '"This is NOT a clean checkout. You are on a tag or you have "', '"local changes.\\n"', '"Are you sure you want to continue?"', ')', 'if', 'not', 'ask', '(', 'q', ',', 'default', '=', 'False', ')', ':', 'sys', '.', 'exit', '(', '1', ')'] | Do sanity check before making changes
Check that we are not on a tag and/or do not have local changes.
Returns True when all is fine. | ['Do', 'sanity', 'check', 'before', 'making', 'changes'] | train | https://github.com/jlesquembre/jlle/blob/3645d8f203708355853ef911f4b887ae4d794826/jlle/releaser/utils.py#L223-L235 |
5,097 | NoviceLive/intellicoder | intellicoder/msbuild/builders.py | Builder.make_objs | def make_objs(names, out_dir=''):
"""
Make object file names for cl.exe and link.exe.
"""
objs = [replace_ext(name, '.obj') for name in names]
if out_dir:
objs = [os.path.join(out_dir, obj) for obj in objs]
return objs | python | def make_objs(names, out_dir=''):
"""
Make object file names for cl.exe and link.exe.
"""
objs = [replace_ext(name, '.obj') for name in names]
if out_dir:
objs = [os.path.join(out_dir, obj) for obj in objs]
return objs | ['def', 'make_objs', '(', 'names', ',', 'out_dir', '=', "''", ')', ':', 'objs', '=', '[', 'replace_ext', '(', 'name', ',', "'.obj'", ')', 'for', 'name', 'in', 'names', ']', 'if', 'out_dir', ':', 'objs', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', 'obj', ')', 'for', 'obj', 'in', 'objs', ']', 'return', 'objs'] | Make object file names for cl.exe and link.exe. | ['Make', 'object', 'file', 'names', 'for', 'cl', '.', 'exe', 'and', 'link', '.', 'exe', '.'] | train | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/msbuild/builders.py#L129-L136 |
5,098 | dtmilano/AndroidViewClient | src/com/dtmilano/android/viewclient.py | ViewClient.findViewsContainingPoint | def findViewsContainingPoint(self, (x, y), _filter=None):
'''
Finds the list of Views that contain the point (x, y).
'''
if not _filter:
_filter = lambda v: True
return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))] | python | def findViewsContainingPoint(self, (x, y), _filter=None):
'''
Finds the list of Views that contain the point (x, y).
'''
if not _filter:
_filter = lambda v: True
return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))] | ['def', 'findViewsContainingPoint', '(', 'self', ',', '(', 'x', ',', 'y', ')', ',', '_filter', '=', 'None', ')', ':', 'if', 'not', '_filter', ':', '_filter', '=', 'lambda', 'v', ':', 'True', 'return', '[', 'v', 'for', 'v', 'in', 'self', '.', 'views', 'if', '(', 'v', '.', 'containsPoint', '(', '(', 'x', ',', 'y', ')', ')', 'and', '_filter', '(', 'v', ')', ')', ']'] | Finds the list of Views that contain the point (x, y). | ['Finds', 'the', 'list', 'of', 'Views', 'that', 'contain', 'the', 'point', '(', 'x', 'y', ')', '.'] | train | https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L3781-L3789 |
5,099 | acorg/dark-matter | dark/mutations.py | getAPOBECFrequencies | def getAPOBECFrequencies(dotAlignment, orig, new, pattern):
"""
Gets mutation frequencies if they are in a certain pattern.
@param dotAlignment: result from calling basePlotter
@param orig: A C{str}, naming the original base
@param new: A C{str}, what orig was mutated to
@param pattern: A C{str}m which pattern we're looking for
(must be one of 'cPattern', 'tPattern')
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
middleBase = 'C'
else:
patterns = tPattern
middleBase = 'T'
# generate the freqs dict with the right pattern
freqs = defaultdict(int)
for pattern in patterns:
freqs[pattern] = 0
# get the subject sequence from dotAlignment
subject = dotAlignment[0].split('\t')[3]
# exclude the subject from the dotAlignment, so just the queries
# are left over
queries = dotAlignment[1:]
for item in queries:
query = item.split('\t')[1]
index = 0
for queryBase in query:
qBase = query[index]
sBase = subject[index]
if qBase == new and sBase == orig:
try:
plusSb = subject[index + 1]
minusSb = subject[index - 1]
except IndexError:
plusSb = 'end'
motif = '%s%s%s' % (minusSb, middleBase, plusSb)
if motif in freqs:
freqs[motif] += 1
index += 1
return freqs | python | def getAPOBECFrequencies(dotAlignment, orig, new, pattern):
"""
Gets mutation frequencies if they are in a certain pattern.
@param dotAlignment: result from calling basePlotter
@param orig: A C{str}, naming the original base
@param new: A C{str}, what orig was mutated to
@param pattern: A C{str}m which pattern we're looking for
(must be one of 'cPattern', 'tPattern')
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
middleBase = 'C'
else:
patterns = tPattern
middleBase = 'T'
# generate the freqs dict with the right pattern
freqs = defaultdict(int)
for pattern in patterns:
freqs[pattern] = 0
# get the subject sequence from dotAlignment
subject = dotAlignment[0].split('\t')[3]
# exclude the subject from the dotAlignment, so just the queries
# are left over
queries = dotAlignment[1:]
for item in queries:
query = item.split('\t')[1]
index = 0
for queryBase in query:
qBase = query[index]
sBase = subject[index]
if qBase == new and sBase == orig:
try:
plusSb = subject[index + 1]
minusSb = subject[index - 1]
except IndexError:
plusSb = 'end'
motif = '%s%s%s' % (minusSb, middleBase, plusSb)
if motif in freqs:
freqs[motif] += 1
index += 1
return freqs | ['def', 'getAPOBECFrequencies', '(', 'dotAlignment', ',', 'orig', ',', 'new', ',', 'pattern', ')', ':', 'cPattern', '=', '[', "'ACA'", ',', "'ACC'", ',', "'ACG'", ',', "'ACT'", ',', "'CCA'", ',', "'CCC'", ',', "'CCG'", ',', "'CCT'", ',', "'GCA'", ',', "'GCC'", ',', "'GCG'", ',', "'GCT'", ',', "'TCA'", ',', "'TCC'", ',', "'TCG'", ',', "'TCT'", ']', 'tPattern', '=', '[', "'ATA'", ',', "'ATC'", ',', "'ATG'", ',', "'ATT'", ',', "'CTA'", ',', "'CTC'", ',', "'CTG'", ',', "'CTT'", ',', "'GTA'", ',', "'GTC'", ',', "'GTG'", ',', "'GTT'", ',', "'TTA'", ',', "'TTC'", ',', "'TTG'", ',', "'TTT'", ']', '# choose the right pattern', 'if', 'pattern', '==', "'cPattern'", ':', 'patterns', '=', 'cPattern', 'middleBase', '=', "'C'", 'else', ':', 'patterns', '=', 'tPattern', 'middleBase', '=', "'T'", '# generate the freqs dict with the right pattern', 'freqs', '=', 'defaultdict', '(', 'int', ')', 'for', 'pattern', 'in', 'patterns', ':', 'freqs', '[', 'pattern', ']', '=', '0', '# get the subject sequence from dotAlignment', 'subject', '=', 'dotAlignment', '[', '0', ']', '.', 'split', '(', "'\\t'", ')', '[', '3', ']', '# exclude the subject from the dotAlignment, so just the queries', '# are left over', 'queries', '=', 'dotAlignment', '[', '1', ':', ']', 'for', 'item', 'in', 'queries', ':', 'query', '=', 'item', '.', 'split', '(', "'\\t'", ')', '[', '1', ']', 'index', '=', '0', 'for', 'queryBase', 'in', 'query', ':', 'qBase', '=', 'query', '[', 'index', ']', 'sBase', '=', 'subject', '[', 'index', ']', 'if', 'qBase', '==', 'new', 'and', 'sBase', '==', 'orig', ':', 'try', ':', 'plusSb', '=', 'subject', '[', 'index', '+', '1', ']', 'minusSb', '=', 'subject', '[', 'index', '-', '1', ']', 'except', 'IndexError', ':', 'plusSb', '=', "'end'", 'motif', '=', "'%s%s%s'", '%', '(', 'minusSb', ',', 'middleBase', ',', 'plusSb', ')', 'if', 'motif', 'in', 'freqs', ':', 'freqs', '[', 'motif', ']', '+=', '1', 'index', '+=', '1', 'return', 'freqs'] | Gets mutation frequencies if they are in a certain pattern.
@param dotAlignment: result from calling basePlotter
@param orig: A C{str}, naming the original base
@param new: A C{str}, what orig was mutated to
@param pattern: A C{str}m which pattern we're looking for
(must be one of 'cPattern', 'tPattern') | ['Gets', 'mutation', 'frequencies', 'if', 'they', 'are', 'in', 'a', 'certain', 'pattern', '.'] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/mutations.py#L181-L228 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.