Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,600 | zhanglab/psamm | psamm/fluxanalysis.py | consistency_check | def consistency_check(model, subset, epsilon, tfba, solver):
"""Check that reaction subset of model is consistent using FBA.
Yields all reactions that are *not* flux consistent. A reaction is
consistent if there is at least one flux solution to the model that both
respects the model constraints and also allows the reaction in question to
have non-zero flux.
This can be determined by running FBA on each reaction in turn
and checking whether the flux in the solution is non-zero. Since FBA
only tries to maximize the flux (and the flux can be negative for
reversible reactions), we have to try to both maximize and minimize
the flux. An optimization to this method is implemented such that if
checking one reaction results in flux in another unchecked reaction,
that reaction will immediately be marked flux consistent.
Args:
model: MetabolicModel to check for consistency.
subset: Subset of model reactions to check.
epsilon: The threshold at which the flux is considered non-zero.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
An iterator of flux inconsistent reactions in the subset.
"""
fba = _get_fba_problem(model, tfba, solver)
subset = set(subset)
while len(subset) > 0:
reaction = next(iter(subset))
logger.info('{} left, checking {}...'.format(len(subset), reaction))
fba.maximize(reaction)
subset = set(reaction_id for reaction_id in subset
if abs(fba.get_flux(reaction_id)) <= epsilon)
if reaction not in subset:
continue
elif model.is_reversible(reaction):
fba.maximize({reaction: -1})
subset = set(reaction_id for reaction_id in subset
if abs(fba.get_flux(reaction_id)) <= epsilon)
if reaction not in subset:
continue
logger.info('{} not consistent!'.format(reaction))
yield reaction
subset.remove(reaction) | python | def consistency_check(model, subset, epsilon, tfba, solver):
"""Check that reaction subset of model is consistent using FBA.
Yields all reactions that are *not* flux consistent. A reaction is
consistent if there is at least one flux solution to the model that both
respects the model constraints and also allows the reaction in question to
have non-zero flux.
This can be determined by running FBA on each reaction in turn
and checking whether the flux in the solution is non-zero. Since FBA
only tries to maximize the flux (and the flux can be negative for
reversible reactions), we have to try to both maximize and minimize
the flux. An optimization to this method is implemented such that if
checking one reaction results in flux in another unchecked reaction,
that reaction will immediately be marked flux consistent.
Args:
model: MetabolicModel to check for consistency.
subset: Subset of model reactions to check.
epsilon: The threshold at which the flux is considered non-zero.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
An iterator of flux inconsistent reactions in the subset.
"""
fba = _get_fba_problem(model, tfba, solver)
subset = set(subset)
while len(subset) > 0:
reaction = next(iter(subset))
logger.info('{} left, checking {}...'.format(len(subset), reaction))
fba.maximize(reaction)
subset = set(reaction_id for reaction_id in subset
if abs(fba.get_flux(reaction_id)) <= epsilon)
if reaction not in subset:
continue
elif model.is_reversible(reaction):
fba.maximize({reaction: -1})
subset = set(reaction_id for reaction_id in subset
if abs(fba.get_flux(reaction_id)) <= epsilon)
if reaction not in subset:
continue
logger.info('{} not consistent!'.format(reaction))
yield reaction
subset.remove(reaction) | ['def', 'consistency_check', '(', 'model', ',', 'subset', ',', 'epsilon', ',', 'tfba', ',', 'solver', ')', ':', 'fba', '=', '_get_fba_problem', '(', 'model', ',', 'tfba', ',', 'solver', ')', 'subset', '=', 'set', '(', 'subset', ')', 'while', 'len', '(', 'subset', ')', '>', '0', ':', 'reaction', '=', 'next', '(', 'iter', '(', 'subset', ')', ')', 'logger', '.', 'info', '(', "'{} left, checking {}...'", '.', 'format', '(', 'len', '(', 'subset', ')', ',', 'reaction', ')', ')', 'fba', '.', 'maximize', '(', 'reaction', ')', 'subset', '=', 'set', '(', 'reaction_id', 'for', 'reaction_id', 'in', 'subset', 'if', 'abs', '(', 'fba', '.', 'get_flux', '(', 'reaction_id', ')', ')', '<=', 'epsilon', ')', 'if', 'reaction', 'not', 'in', 'subset', ':', 'continue', 'elif', 'model', '.', 'is_reversible', '(', 'reaction', ')', ':', 'fba', '.', 'maximize', '(', '{', 'reaction', ':', '-', '1', '}', ')', 'subset', '=', 'set', '(', 'reaction_id', 'for', 'reaction_id', 'in', 'subset', 'if', 'abs', '(', 'fba', '.', 'get_flux', '(', 'reaction_id', ')', ')', '<=', 'epsilon', ')', 'if', 'reaction', 'not', 'in', 'subset', ':', 'continue', 'logger', '.', 'info', '(', "'{} not consistent!'", '.', 'format', '(', 'reaction', ')', ')', 'yield', 'reaction', 'subset', '.', 'remove', '(', 'reaction', ')'] | Check that reaction subset of model is consistent using FBA.
Yields all reactions that are *not* flux consistent. A reaction is
consistent if there is at least one flux solution to the model that both
respects the model constraints and also allows the reaction in question to
have non-zero flux.
This can be determined by running FBA on each reaction in turn
and checking whether the flux in the solution is non-zero. Since FBA
only tries to maximize the flux (and the flux can be negative for
reversible reactions), we have to try to both maximize and minimize
the flux. An optimization to this method is implemented such that if
checking one reaction results in flux in another unchecked reaction,
that reaction will immediately be marked flux consistent.
Args:
model: MetabolicModel to check for consistency.
subset: Subset of model reactions to check.
epsilon: The threshold at which the flux is considered non-zero.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
An iterator of flux inconsistent reactions in the subset. | ['Check', 'that', 'reaction', 'subset', 'of', 'model', 'is', 'consistent', 'using', 'FBA', '.'] | train | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fluxanalysis.py#L420-L470 |
3,601 | apple/turicreate | deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py | Template.range | def range(self):
"""Returns the range for N"""
match = self._match(in_comment(
'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+'
'step[ \t]+([0-9]+)'
))
return range(
int(match.group(1)),
int(match.group(2)),
int(match.group(3))
) | python | def range(self):
"""Returns the range for N"""
match = self._match(in_comment(
'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+'
'step[ \t]+([0-9]+)'
))
return range(
int(match.group(1)),
int(match.group(2)),
int(match.group(3))
) | ['def', 'range', '(', 'self', ')', ':', 'match', '=', 'self', '.', '_match', '(', 'in_comment', '(', "'n[ \\t]+in[ \\t]*\\\\[([0-9]+)\\\\.\\\\.([0-9]+)\\\\),[ \\t]+'", "'step[ \\t]+([0-9]+)'", ')', ')', 'return', 'range', '(', 'int', '(', 'match', '.', 'group', '(', '1', ')', ')', ',', 'int', '(', 'match', '.', 'group', '(', '2', ')', ')', ',', 'int', '(', 'match', '.', 'group', '(', '3', ')', ')', ')'] | Returns the range for N | ['Returns', 'the', 'range', 'for', 'N'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L143-L153 |
3,602 | ten10solutions/Geist | geist/backends/_x11_common.py | GeistXBase.create_process | def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
"""
Execute a process using subprocess.Popen, setting the backend's DISPLAY
"""
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env) | python | def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
"""
Execute a process using subprocess.Popen, setting the backend's DISPLAY
"""
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env) | ['def', 'create_process', '(', 'self', ',', 'command', ',', 'shell', '=', 'True', ',', 'stdout', '=', 'None', ',', 'stderr', '=', 'None', ',', 'env', '=', 'None', ')', ':', 'env', '=', 'env', 'if', 'env', 'is', 'not', 'None', 'else', 'dict', '(', 'os', '.', 'environ', ')', 'env', '[', "'DISPLAY'", ']', '=', 'self', '.', 'display', 'return', 'subprocess', '.', 'Popen', '(', 'command', ',', 'shell', '=', 'shell', ',', 'stdout', '=', 'stdout', ',', 'stderr', '=', 'stderr', ',', 'env', '=', 'env', ')'] | Execute a process using subprocess.Popen, setting the backend's DISPLAY | ['Execute', 'a', 'process', 'using', 'subprocess', '.', 'Popen', 'setting', 'the', 'backend', 's', 'DISPLAY'] | train | https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/backends/_x11_common.py#L54-L63 |
3,603 | olitheolix/qtmacs | qtmacs/logging_handler.py | QtmacsLoggingHandler.fetch | def fetch(self, start=None, stop=None):
"""
Fetch log records and return them as a list.
|Args|
* ``start`` (**int**): non-negative index of the first log
record to return.
* ``stop`` (**int**): non-negative index of the last log
record to return.
|Returns|
* **list**: list of log records (see ``logger`` module for
definition of log record).
|Raises|
* **None**
"""
# Set defaults if no explicit indices were provided.
if not start:
start = 0
if not stop:
stop = len(self.log)
# Sanity check: indices must be valid.
if start < 0:
start = 0
if stop > len(self.log):
stop = len(self.log)
# Clear the fetch flag. It will be set again in the emit()
# method once new data arrives.
self.waitForFetch = False
# Return the specified range of log records.
return self.log[start:stop] | python | def fetch(self, start=None, stop=None):
"""
Fetch log records and return them as a list.
|Args|
* ``start`` (**int**): non-negative index of the first log
record to return.
* ``stop`` (**int**): non-negative index of the last log
record to return.
|Returns|
* **list**: list of log records (see ``logger`` module for
definition of log record).
|Raises|
* **None**
"""
# Set defaults if no explicit indices were provided.
if not start:
start = 0
if not stop:
stop = len(self.log)
# Sanity check: indices must be valid.
if start < 0:
start = 0
if stop > len(self.log):
stop = len(self.log)
# Clear the fetch flag. It will be set again in the emit()
# method once new data arrives.
self.waitForFetch = False
# Return the specified range of log records.
return self.log[start:stop] | ['def', 'fetch', '(', 'self', ',', 'start', '=', 'None', ',', 'stop', '=', 'None', ')', ':', '# Set defaults if no explicit indices were provided.', 'if', 'not', 'start', ':', 'start', '=', '0', 'if', 'not', 'stop', ':', 'stop', '=', 'len', '(', 'self', '.', 'log', ')', '# Sanity check: indices must be valid.', 'if', 'start', '<', '0', ':', 'start', '=', '0', 'if', 'stop', '>', 'len', '(', 'self', '.', 'log', ')', ':', 'stop', '=', 'len', '(', 'self', '.', 'log', ')', '# Clear the fetch flag. It will be set again in the emit()', '# method once new data arrives.', 'self', '.', 'waitForFetch', '=', 'False', '# Return the specified range of log records.', 'return', 'self', '.', 'log', '[', 'start', ':', 'stop', ']'] | Fetch log records and return them as a list.
|Args|
* ``start`` (**int**): non-negative index of the first log
record to return.
* ``stop`` (**int**): non-negative index of the last log
record to return.
|Returns|
* **list**: list of log records (see ``logger`` module for
definition of log record).
|Raises|
* **None** | ['Fetch', 'log', 'records', 'and', 'return', 'them', 'as', 'a', 'list', '.'] | train | https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/logging_handler.py#L127-L165 |
3,604 | alexras/bread | bread/utils.py | indent_text | def indent_text(string, indent_level=2):
"""Indent every line of text in a newline-delimited string"""
indented_lines = []
indent_spaces = ' ' * indent_level
for line in string.split('\n'):
indented_lines.append(indent_spaces + line)
return '\n'.join(indented_lines) | python | def indent_text(string, indent_level=2):
"""Indent every line of text in a newline-delimited string"""
indented_lines = []
indent_spaces = ' ' * indent_level
for line in string.split('\n'):
indented_lines.append(indent_spaces + line)
return '\n'.join(indented_lines) | ['def', 'indent_text', '(', 'string', ',', 'indent_level', '=', '2', ')', ':', 'indented_lines', '=', '[', ']', 'indent_spaces', '=', "' '", '*', 'indent_level', 'for', 'line', 'in', 'string', '.', 'split', '(', "'\\n'", ')', ':', 'indented_lines', '.', 'append', '(', 'indent_spaces', '+', 'line', ')', 'return', "'\\n'", '.', 'join', '(', 'indented_lines', ')'] | Indent every line of text in a newline-delimited string | ['Indent', 'every', 'line', 'of', 'text', 'in', 'a', 'newline', '-', 'delimited', 'string'] | train | https://github.com/alexras/bread/blob/2e131380878c07500167fc12685e7bff1df258a4/bread/utils.py#L1-L10 |
3,605 | muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._fill_parameters | def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True | python | def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True | ['def', '_fill_parameters', '(', 'self', ')', ':', 'self', '.', '_parameters', '=', 'self', '.', '_config', '.', 'get', '(', "'parameters'", ',', '{', '}', ')', 'self', '.', '_fill_defaults', '(', ')', 'for', 'k', 'in', 'self', '.', '_parameters', '.', 'keys', '(', ')', ':', 'try', ':', 'if', 'self', '.', '_parameters', '[', 'k', ']', '.', 'startswith', '(', 'self', '.', 'SSM', ')', 'and', 'self', '.', '_parameters', '[', 'k', ']', '.', 'endswith', '(', "']'", ')', ':', 'parts', '=', 'self', '.', '_parameters', '[', 'k', ']', '.', 'split', '(', "':'", ')', 'tmp', '=', 'parts', '[', '1', ']', '.', 'replace', '(', "']'", ',', "''", ')', 'val', '=', 'self', '.', '_get_ssm_parameter', '(', 'tmp', ')', 'if', 'val', ':', 'self', '.', '_parameters', '[', 'k', ']', '=', 'val', 'else', ':', 'logging', '.', 'error', '(', "'SSM parameter {} not found'", '.', 'format', '(', 'tmp', ')', ')', 'return', 'False', 'elif', 'self', '.', '_parameters', '[', 'k', ']', '==', 'self', '.', 'ASK', ':', 'val', '=', 'None', 'a1', '=', "'__x___'", 'a2', '=', "'__y___'", 'prompt1', '=', '"Enter value for \'{}\': "', '.', 'format', '(', 'k', ')', 'prompt2', '=', '"Confirm value for \'{}\': "', '.', 'format', '(', 'k', ')', 'while', 'a1', '!=', 'a2', ':', 'a1', '=', 'getpass', '.', 'getpass', '(', 'prompt', '=', 'prompt1', ')', 'a2', '=', 'getpass', '.', 'getpass', '(', 'prompt', '=', 'prompt2', ')', 'if', 'a1', '==', 'a2', ':', 'val', '=', 'a1', 'else', ':', 'print', '(', "'values do not match, try again'", ')', 'self', '.', '_parameters', '[', 'k', ']', '=', 'val', 'except', ':', 'pass', 'return', 'True'] | Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist. | ['Fill', 'in', 'the', '_parameters', 'dict', 'from', 'the', 'properties', 'file', '.'] | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L453-L498 |
3,606 | fabioz/PyDev.Debugger | _pydevd_bundle/pydevd_console.py | ConsoleMessage.add_console_message | def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m)) | python | def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m)) | ['def', 'add_console_message', '(', 'self', ',', 'message_type', ',', 'message', ')', ':', 'for', 'm', 'in', 'message', '.', 'split', '(', '"\\n"', ')', ':', 'if', 'm', '.', 'strip', '(', ')', ':', 'self', '.', 'console_messages', '.', 'append', '(', '(', 'message_type', ',', 'm', ')', ')'] | add messages in the console_messages list | ['add', 'messages', 'in', 'the', 'console_messages', 'list'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_console.py#L31-L36 |
3,607 | ska-sa/katcp-python | katcp/kattypes.py | Parameter.unpack | def unpack(self, value):
"""Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
"""
# Wrap errors in FailReplies with information identifying the parameter
try:
return self._kattype.unpack(value, self.major)
except ValueError, message:
raise FailReply("Error in parameter %s (%s): %s" %
(self.position, self.name, message)) | python | def unpack(self, value):
"""Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
"""
# Wrap errors in FailReplies with information identifying the parameter
try:
return self._kattype.unpack(value, self.major)
except ValueError, message:
raise FailReply("Error in parameter %s (%s): %s" %
(self.position, self.name, message)) | ['def', 'unpack', '(', 'self', ',', 'value', ')', ':', '# Wrap errors in FailReplies with information identifying the parameter', 'try', ':', 'return', 'self', '.', '_kattype', '.', 'unpack', '(', 'value', ',', 'self', '.', 'major', ')', 'except', 'ValueError', ',', 'message', ':', 'raise', 'FailReply', '(', '"Error in parameter %s (%s): %s"', '%', '(', 'self', '.', 'position', ',', 'self', '.', 'name', ',', 'message', ')', ')'] | Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value. | ['Unpack', 'the', 'parameter', 'using', 'its', 'kattype', '.'] | train | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/kattypes.py#L586-L605 |
3,608 | bcbio/bcbio-nextgen | bcbio/bam/trim.py | _cutadapt_se_cmd | def _cutadapt_se_cmd(fastq_files, out_files, base_cmd, data):
"""
this has to use the -o option, not redirect to stdout in order for
gzipping to be supported
"""
min_length = dd.get_min_read_length(data)
cmd = base_cmd + " --minimum-length={min_length} ".format(**locals())
fq1 = objectstore.cl_input(fastq_files[0])
of1 = out_files[0]
cmd += " -o {of1_tx} " + str(fq1)
cmd = "%s | tee > {log_tx}" % cmd
return cmd | python | def _cutadapt_se_cmd(fastq_files, out_files, base_cmd, data):
"""
this has to use the -o option, not redirect to stdout in order for
gzipping to be supported
"""
min_length = dd.get_min_read_length(data)
cmd = base_cmd + " --minimum-length={min_length} ".format(**locals())
fq1 = objectstore.cl_input(fastq_files[0])
of1 = out_files[0]
cmd += " -o {of1_tx} " + str(fq1)
cmd = "%s | tee > {log_tx}" % cmd
return cmd | ['def', '_cutadapt_se_cmd', '(', 'fastq_files', ',', 'out_files', ',', 'base_cmd', ',', 'data', ')', ':', 'min_length', '=', 'dd', '.', 'get_min_read_length', '(', 'data', ')', 'cmd', '=', 'base_cmd', '+', '" --minimum-length={min_length} "', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', 'fq1', '=', 'objectstore', '.', 'cl_input', '(', 'fastq_files', '[', '0', ']', ')', 'of1', '=', 'out_files', '[', '0', ']', 'cmd', '+=', '" -o {of1_tx} "', '+', 'str', '(', 'fq1', ')', 'cmd', '=', '"%s | tee > {log_tx}"', '%', 'cmd', 'return', 'cmd'] | this has to use the -o option, not redirect to stdout in order for
gzipping to be supported | ['this', 'has', 'to', 'use', 'the', '-', 'o', 'option', 'not', 'redirect', 'to', 'stdout', 'in', 'order', 'for', 'gzipping', 'to', 'be', 'supported'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/trim.py#L246-L257 |
3,609 | ewels/MultiQC | multiqc/modules/fastqc/fastqc.py | MultiqcModule.sequence_quality_plot | def sequence_quality_plot (self):
""" Create the HTML for the phred quality score plot """
data = dict()
for s_name in self.fastqc_data:
try:
data[s_name] = {self.avg_bp_from_range(d['base']): d['mean'] for d in self.fastqc_data[s_name]['per_base_sequence_quality']}
except KeyError:
pass
if len(data) == 0:
log.debug('sequence_quality not found in FastQC reports')
return None
pconfig = {
'id': 'fastqc_per_base_sequence_quality_plot',
'title': 'FastQC: Mean Quality Scores',
'ylab': 'Phred Score',
'xlab': 'Position (bp)',
'ymin': 0,
'xDecimals': False,
'tt_label': '<b>Base {point.x}</b>: {point.y:.2f}',
'colors': self.get_status_cols('per_base_sequence_quality'),
'yPlotBands': [
{'from': 28, 'to': 100, 'color': '#c3e6c3'},
{'from': 20, 'to': 28, 'color': '#e6dcc3'},
{'from': 0, 'to': 20, 'color': '#e6c3c3'},
]
}
self.add_section (
name = 'Sequence Quality Histograms',
anchor = 'fastqc_per_base_sequence_quality',
description = 'The mean quality value across each base position in the read.',
helptext = '''
To enable multiple samples to be plotted on the same graph, only the mean quality
scores are plotted (unlike the box plots seen in FastQC reports).
Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):
_The y-axis on the graph shows the quality scores. The higher the score, the better
the base call. The background of the graph divides the y axis into very good quality
calls (green), calls of reasonable quality (orange), and calls of poor quality (red).
The quality of calls on most platforms will degrade as the run progresses, so it is
common to see base calls falling into the orange area towards the end of a read._
''',
plot = linegraph.plot(data, pconfig)
) | python | def sequence_quality_plot (self):
""" Create the HTML for the phred quality score plot """
data = dict()
for s_name in self.fastqc_data:
try:
data[s_name] = {self.avg_bp_from_range(d['base']): d['mean'] for d in self.fastqc_data[s_name]['per_base_sequence_quality']}
except KeyError:
pass
if len(data) == 0:
log.debug('sequence_quality not found in FastQC reports')
return None
pconfig = {
'id': 'fastqc_per_base_sequence_quality_plot',
'title': 'FastQC: Mean Quality Scores',
'ylab': 'Phred Score',
'xlab': 'Position (bp)',
'ymin': 0,
'xDecimals': False,
'tt_label': '<b>Base {point.x}</b>: {point.y:.2f}',
'colors': self.get_status_cols('per_base_sequence_quality'),
'yPlotBands': [
{'from': 28, 'to': 100, 'color': '#c3e6c3'},
{'from': 20, 'to': 28, 'color': '#e6dcc3'},
{'from': 0, 'to': 20, 'color': '#e6c3c3'},
]
}
self.add_section (
name = 'Sequence Quality Histograms',
anchor = 'fastqc_per_base_sequence_quality',
description = 'The mean quality value across each base position in the read.',
helptext = '''
To enable multiple samples to be plotted on the same graph, only the mean quality
scores are plotted (unlike the box plots seen in FastQC reports).
Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):
_The y-axis on the graph shows the quality scores. The higher the score, the better
the base call. The background of the graph divides the y axis into very good quality
calls (green), calls of reasonable quality (orange), and calls of poor quality (red).
The quality of calls on most platforms will degrade as the run progresses, so it is
common to see base calls falling into the orange area towards the end of a read._
''',
plot = linegraph.plot(data, pconfig)
) | ['def', 'sequence_quality_plot', '(', 'self', ')', ':', 'data', '=', 'dict', '(', ')', 'for', 's_name', 'in', 'self', '.', 'fastqc_data', ':', 'try', ':', 'data', '[', 's_name', ']', '=', '{', 'self', '.', 'avg_bp_from_range', '(', 'd', '[', "'base'", ']', ')', ':', 'd', '[', "'mean'", ']', 'for', 'd', 'in', 'self', '.', 'fastqc_data', '[', 's_name', ']', '[', "'per_base_sequence_quality'", ']', '}', 'except', 'KeyError', ':', 'pass', 'if', 'len', '(', 'data', ')', '==', '0', ':', 'log', '.', 'debug', '(', "'sequence_quality not found in FastQC reports'", ')', 'return', 'None', 'pconfig', '=', '{', "'id'", ':', "'fastqc_per_base_sequence_quality_plot'", ',', "'title'", ':', "'FastQC: Mean Quality Scores'", ',', "'ylab'", ':', "'Phred Score'", ',', "'xlab'", ':', "'Position (bp)'", ',', "'ymin'", ':', '0', ',', "'xDecimals'", ':', 'False', ',', "'tt_label'", ':', "'<b>Base {point.x}</b>: {point.y:.2f}'", ',', "'colors'", ':', 'self', '.', 'get_status_cols', '(', "'per_base_sequence_quality'", ')', ',', "'yPlotBands'", ':', '[', '{', "'from'", ':', '28', ',', "'to'", ':', '100', ',', "'color'", ':', "'#c3e6c3'", '}', ',', '{', "'from'", ':', '20', ',', "'to'", ':', '28', ',', "'color'", ':', "'#e6dcc3'", '}', ',', '{', "'from'", ':', '0', ',', "'to'", ':', '20', ',', "'color'", ':', "'#e6c3c3'", '}', ',', ']', '}', 'self', '.', 'add_section', '(', 'name', '=', "'Sequence Quality Histograms'", ',', 'anchor', '=', "'fastqc_per_base_sequence_quality'", ',', 'description', '=', "'The mean quality value across each base position in the read.'", ',', 'helptext', '=', "'''\n To enable multiple samples to be plotted on the same graph, only the mean quality\n scores are plotted (unlike the box plots seen in FastQC reports).\n\n Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):\n\n _The y-axis on the graph shows the quality scores. The higher the score, the better\n the base call. The background of the graph divides the y axis into very good quality\n calls (green), calls of reasonable quality (orange), and calls of poor quality (red).\n The quality of calls on most platforms will degrade as the run progresses, so it is\n common to see base calls falling into the orange area towards the end of a read._\n '''", ',', 'plot', '=', 'linegraph', '.', 'plot', '(', 'data', ',', 'pconfig', ')', ')'] | Create the HTML for the phred quality score plot | ['Create', 'the', 'HTML', 'for', 'the', 'phred', 'quality', 'score', 'plot'] | train | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/fastqc/fastqc.py#L324-L369 |
3,610 | honzajavorek/redis-collections | redis_collections/base.py | RedisCollection._normalize_index | def _normalize_index(self, index, pipe=None):
"""Convert negative indexes into their positive equivalents."""
pipe = self.redis if pipe is None else pipe
len_self = self.__len__(pipe)
positive_index = index if index >= 0 else len_self + index
return len_self, positive_index | python | def _normalize_index(self, index, pipe=None):
"""Convert negative indexes into their positive equivalents."""
pipe = self.redis if pipe is None else pipe
len_self = self.__len__(pipe)
positive_index = index if index >= 0 else len_self + index
return len_self, positive_index | ['def', '_normalize_index', '(', 'self', ',', 'index', ',', 'pipe', '=', 'None', ')', ':', 'pipe', '=', 'self', '.', 'redis', 'if', 'pipe', 'is', 'None', 'else', 'pipe', 'len_self', '=', 'self', '.', '__len__', '(', 'pipe', ')', 'positive_index', '=', 'index', 'if', 'index', '>=', '0', 'else', 'len_self', '+', 'index', 'return', 'len_self', ',', 'positive_index'] | Convert negative indexes into their positive equivalents. | ['Convert', 'negative', 'indexes', 'into', 'their', 'positive', 'equivalents', '.'] | train | https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/base.py#L152-L158 |
3,611 | leancloud/python-sdk | leancloud/user.py | User.sign_up | def sign_up(self, username=None, password=None):
"""
创建一个新用户。新创建的 User 对象,应该使用此方法来将数据保存至服务器,而不是使用 save 方法。
用户对象上必须包含 username 和 password 两个字段
"""
if username:
self.set('username', username)
if password:
self.set('password', password)
username = self.get('username')
if not username:
raise TypeError('invalid username: {0}'.format(username))
password = self.get('password')
if not password:
raise TypeError('invalid password')
self.save(make_current=True) | python | def sign_up(self, username=None, password=None):
"""
创建一个新用户。新创建的 User 对象,应该使用此方法来将数据保存至服务器,而不是使用 save 方法。
用户对象上必须包含 username 和 password 两个字段
"""
if username:
self.set('username', username)
if password:
self.set('password', password)
username = self.get('username')
if not username:
raise TypeError('invalid username: {0}'.format(username))
password = self.get('password')
if not password:
raise TypeError('invalid password')
self.save(make_current=True) | ['def', 'sign_up', '(', 'self', ',', 'username', '=', 'None', ',', 'password', '=', 'None', ')', ':', 'if', 'username', ':', 'self', '.', 'set', '(', "'username'", ',', 'username', ')', 'if', 'password', ':', 'self', '.', 'set', '(', "'password'", ',', 'password', ')', 'username', '=', 'self', '.', 'get', '(', "'username'", ')', 'if', 'not', 'username', ':', 'raise', 'TypeError', '(', "'invalid username: {0}'", '.', 'format', '(', 'username', ')', ')', 'password', '=', 'self', '.', 'get', '(', "'password'", ')', 'if', 'not', 'password', ':', 'raise', 'TypeError', '(', "'invalid password'", ')', 'self', '.', 'save', '(', 'make_current', '=', 'True', ')'] | 创建一个新用户。新创建的 User 对象,应该使用此方法来将数据保存至服务器,而不是使用 save 方法。
用户对象上必须包含 username 和 password 两个字段 | ['创建一个新用户。新创建的', 'User', '对象,应该使用此方法来将数据保存至服务器,而不是使用', 'save', '方法。', '用户对象上必须包含', 'username', '和', 'password', '两个字段'] | train | https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/user.py#L114-L131 |
3,612 | hellock/icrawler | icrawler/utils/proxy_pool.py | ProxyPool.load | def load(self, filename):
"""Load proxies from file"""
with open(filename, 'r') as fin:
proxies = json.load(fin)
for protocol in proxies:
for proxy in proxies[protocol]:
self.proxies[protocol][proxy['addr']] = Proxy(
proxy['addr'], proxy['protocol'], proxy['weight'],
proxy['last_checked'])
self.addr_list[protocol].append(proxy['addr']) | python | def load(self, filename):
"""Load proxies from file"""
with open(filename, 'r') as fin:
proxies = json.load(fin)
for protocol in proxies:
for proxy in proxies[protocol]:
self.proxies[protocol][proxy['addr']] = Proxy(
proxy['addr'], proxy['protocol'], proxy['weight'],
proxy['last_checked'])
self.addr_list[protocol].append(proxy['addr']) | ['def', 'load', '(', 'self', ',', 'filename', ')', ':', 'with', 'open', '(', 'filename', ',', "'r'", ')', 'as', 'fin', ':', 'proxies', '=', 'json', '.', 'load', '(', 'fin', ')', 'for', 'protocol', 'in', 'proxies', ':', 'for', 'proxy', 'in', 'proxies', '[', 'protocol', ']', ':', 'self', '.', 'proxies', '[', 'protocol', ']', '[', 'proxy', '[', "'addr'", ']', ']', '=', 'Proxy', '(', 'proxy', '[', "'addr'", ']', ',', 'proxy', '[', "'protocol'", ']', ',', 'proxy', '[', "'weight'", ']', ',', 'proxy', '[', "'last_checked'", ']', ')', 'self', '.', 'addr_list', '[', 'protocol', ']', '.', 'append', '(', 'proxy', '[', "'addr'", ']', ')'] | Load proxies from file | ['Load', 'proxies', 'from', 'file'] | train | https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/utils/proxy_pool.py#L166-L175 |
3,613 | inasafe/inasafe | safe/impact_function/impact_function.py | ImpactFunction.style | def style(self):
"""Function to apply some styles to the layers."""
LOGGER.info('ANALYSIS : Styling')
classes = generate_classified_legend(
self.analysis_impacted,
self.exposure,
self.hazard,
self.use_rounding,
self.debug_mode)
# Let's style layers which have a geometry and have hazard_class
hazard_class = hazard_class_field['key']
for layer in self._outputs():
without_geometries = [
QgsWkbTypes.NullGeometry,
QgsWkbTypes.UnknownGeometry]
if layer.geometryType() not in without_geometries:
display_not_exposed = False
if layer == self.impact or self.debug_mode:
display_not_exposed = True
if layer.keywords['inasafe_fields'].get(hazard_class):
hazard_class_style(layer, classes, display_not_exposed)
# Let's style the aggregation and analysis layer.
simple_polygon_without_brush(
self.aggregation_summary, aggregation_width, aggregation_color)
simple_polygon_without_brush(
self.analysis_impacted, analysis_width, analysis_color)
# Styling is finished, save them as QML
for layer in self._outputs():
layer.saveDefaultStyle() | python | def style(self):
"""Function to apply some styles to the layers."""
LOGGER.info('ANALYSIS : Styling')
classes = generate_classified_legend(
self.analysis_impacted,
self.exposure,
self.hazard,
self.use_rounding,
self.debug_mode)
# Let's style layers which have a geometry and have hazard_class
hazard_class = hazard_class_field['key']
for layer in self._outputs():
without_geometries = [
QgsWkbTypes.NullGeometry,
QgsWkbTypes.UnknownGeometry]
if layer.geometryType() not in without_geometries:
display_not_exposed = False
if layer == self.impact or self.debug_mode:
display_not_exposed = True
if layer.keywords['inasafe_fields'].get(hazard_class):
hazard_class_style(layer, classes, display_not_exposed)
# Let's style the aggregation and analysis layer.
simple_polygon_without_brush(
self.aggregation_summary, aggregation_width, aggregation_color)
simple_polygon_without_brush(
self.analysis_impacted, analysis_width, analysis_color)
# Styling is finished, save them as QML
for layer in self._outputs():
layer.saveDefaultStyle() | ['def', 'style', '(', 'self', ')', ':', 'LOGGER', '.', 'info', '(', "'ANALYSIS : Styling'", ')', 'classes', '=', 'generate_classified_legend', '(', 'self', '.', 'analysis_impacted', ',', 'self', '.', 'exposure', ',', 'self', '.', 'hazard', ',', 'self', '.', 'use_rounding', ',', 'self', '.', 'debug_mode', ')', "# Let's style layers which have a geometry and have hazard_class", 'hazard_class', '=', 'hazard_class_field', '[', "'key'", ']', 'for', 'layer', 'in', 'self', '.', '_outputs', '(', ')', ':', 'without_geometries', '=', '[', 'QgsWkbTypes', '.', 'NullGeometry', ',', 'QgsWkbTypes', '.', 'UnknownGeometry', ']', 'if', 'layer', '.', 'geometryType', '(', ')', 'not', 'in', 'without_geometries', ':', 'display_not_exposed', '=', 'False', 'if', 'layer', '==', 'self', '.', 'impact', 'or', 'self', '.', 'debug_mode', ':', 'display_not_exposed', '=', 'True', 'if', 'layer', '.', 'keywords', '[', "'inasafe_fields'", ']', '.', 'get', '(', 'hazard_class', ')', ':', 'hazard_class_style', '(', 'layer', ',', 'classes', ',', 'display_not_exposed', ')', "# Let's style the aggregation and analysis layer.", 'simple_polygon_without_brush', '(', 'self', '.', 'aggregation_summary', ',', 'aggregation_width', ',', 'aggregation_color', ')', 'simple_polygon_without_brush', '(', 'self', '.', 'analysis_impacted', ',', 'analysis_width', ',', 'analysis_color', ')', '# Styling is finished, save them as QML', 'for', 'layer', 'in', 'self', '.', '_outputs', '(', ')', ':', 'layer', '.', 'saveDefaultStyle', '(', ')'] | Function to apply some styles to the layers. | ['Function', 'to', 'apply', 'some', 'styles', 'to', 'the', 'layers', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/impact_function.py#L2478-L2510 |
3,614 | saltstack/salt | salt/returners/couchbase_return.py | get_jids | def get_jids():
'''
Return a list of all job ids
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jids', include_docs=True):
ret[result.key] = _format_jid_instance(result.key, result.doc.value['load'])
return ret | python | def get_jids():
'''
Return a list of all job ids
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jids', include_docs=True):
ret[result.key] = _format_jid_instance(result.key, result.doc.value['load'])
return ret | ['def', 'get_jids', '(', ')', ':', 'cb_', '=', '_get_connection', '(', ')', '_verify_views', '(', ')', 'ret', '=', '{', '}', 'for', 'result', 'in', 'cb_', '.', 'query', '(', 'DESIGN_NAME', ',', "'jids'", ',', 'include_docs', '=', 'True', ')', ':', 'ret', '[', 'result', '.', 'key', ']', '=', '_format_jid_instance', '(', 'result', '.', 'key', ',', 'result', '.', 'doc', '.', 'value', '[', "'load'", ']', ')', 'return', 'ret'] | Return a list of all job ids | ['Return', 'a', 'list', 'of', 'all', 'job', 'ids'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/couchbase_return.py#L297-L309 |
3,615 | geographika/mappyfile | mappyfile/validator.py | Validator.validate | def validate(self, value, add_comments=False, schema_name="map"):
"""
verbose - also return the jsonschema error details
"""
validator = self.get_schema_validator(schema_name)
error_messages = []
if isinstance(value, list):
for d in value:
error_messages += self._validate(d, validator, add_comments, schema_name)
else:
error_messages = self._validate(value, validator, add_comments, schema_name)
return error_messages | python | def validate(self, value, add_comments=False, schema_name="map"):
"""
verbose - also return the jsonschema error details
"""
validator = self.get_schema_validator(schema_name)
error_messages = []
if isinstance(value, list):
for d in value:
error_messages += self._validate(d, validator, add_comments, schema_name)
else:
error_messages = self._validate(value, validator, add_comments, schema_name)
return error_messages | ['def', 'validate', '(', 'self', ',', 'value', ',', 'add_comments', '=', 'False', ',', 'schema_name', '=', '"map"', ')', ':', 'validator', '=', 'self', '.', 'get_schema_validator', '(', 'schema_name', ')', 'error_messages', '=', '[', ']', 'if', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'for', 'd', 'in', 'value', ':', 'error_messages', '+=', 'self', '.', '_validate', '(', 'd', ',', 'validator', ',', 'add_comments', ',', 'schema_name', ')', 'else', ':', 'error_messages', '=', 'self', '.', '_validate', '(', 'value', ',', 'validator', ',', 'add_comments', ',', 'schema_name', ')', 'return', 'error_messages'] | verbose - also return the jsonschema error details | ['verbose', '-', 'also', 'return', 'the', 'jsonschema', 'error', 'details'] | train | https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/mappyfile/validator.py#L169-L183 |
3,616 | goshuirc/irc | girc/imapping.py | IDict.copy | def copy(self):
"""Return a copy of ourself."""
new_dict = IDict(std=self._std)
new_dict.update(self.store)
return new_dict | python | def copy(self):
"""Return a copy of ourself."""
new_dict = IDict(std=self._std)
new_dict.update(self.store)
return new_dict | ['def', 'copy', '(', 'self', ')', ':', 'new_dict', '=', 'IDict', '(', 'std', '=', 'self', '.', '_std', ')', 'new_dict', '.', 'update', '(', 'self', '.', 'store', ')', 'return', 'new_dict'] | Return a copy of ourself. | ['Return', 'a', 'copy', 'of', 'ourself', '.'] | train | https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/imapping.py#L103-L107 |
3,617 | tonioo/sievelib | sievelib/managesieve.py | Client.setactive | def setactive(self, scriptname):
"""Define the active script
See MANAGESIEVE specifications, section 2.8
If scriptname is empty, the current active script is disabled,
ie. there will be no active script anymore.
:param scriptname: script's name
:rtype: boolean
"""
code, data = self.__send_command(
"SETACTIVE", [scriptname.encode("utf-8")])
if code == "OK":
return True
return False | python | def setactive(self, scriptname):
"""Define the active script
See MANAGESIEVE specifications, section 2.8
If scriptname is empty, the current active script is disabled,
ie. there will be no active script anymore.
:param scriptname: script's name
:rtype: boolean
"""
code, data = self.__send_command(
"SETACTIVE", [scriptname.encode("utf-8")])
if code == "OK":
return True
return False | ['def', 'setactive', '(', 'self', ',', 'scriptname', ')', ':', 'code', ',', 'data', '=', 'self', '.', '__send_command', '(', '"SETACTIVE"', ',', '[', 'scriptname', '.', 'encode', '(', '"utf-8"', ')', ']', ')', 'if', 'code', '==', '"OK"', ':', 'return', 'True', 'return', 'False'] | Define the active script
See MANAGESIEVE specifications, section 2.8
If scriptname is empty, the current active script is disabled,
ie. there will be no active script anymore.
:param scriptname: script's name
:rtype: boolean | ['Define', 'the', 'active', 'script'] | train | https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/managesieve.py#L676-L691 |
3,618 | ray-project/ray | python/ray/tune/schedulers/median_stopping_rule.py | MedianStoppingRule.on_trial_result | def on_trial_result(self, trial_runner, trial, result):
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if trial in self._stopped_trials:
assert not self._hard_stop
return TrialScheduler.CONTINUE # fall back to FIFO
time = result[self._time_attr]
self._results[trial].append(result)
median_result = self._get_median_result(time)
best_result = self._best_result(trial)
if self._verbose:
logger.info("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if best_result < median_result and time > self._grace_period:
if self._verbose:
logger.info("MedianStoppingRule: "
"early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE | python | def on_trial_result(self, trial_runner, trial, result):
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if trial in self._stopped_trials:
assert not self._hard_stop
return TrialScheduler.CONTINUE # fall back to FIFO
time = result[self._time_attr]
self._results[trial].append(result)
median_result = self._get_median_result(time)
best_result = self._best_result(trial)
if self._verbose:
logger.info("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if best_result < median_result and time > self._grace_period:
if self._verbose:
logger.info("MedianStoppingRule: "
"early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE | ['def', 'on_trial_result', '(', 'self', ',', 'trial_runner', ',', 'trial', ',', 'result', ')', ':', 'if', 'trial', 'in', 'self', '.', '_stopped_trials', ':', 'assert', 'not', 'self', '.', '_hard_stop', 'return', 'TrialScheduler', '.', 'CONTINUE', '# fall back to FIFO', 'time', '=', 'result', '[', 'self', '.', '_time_attr', ']', 'self', '.', '_results', '[', 'trial', ']', '.', 'append', '(', 'result', ')', 'median_result', '=', 'self', '.', '_get_median_result', '(', 'time', ')', 'best_result', '=', 'self', '.', '_best_result', '(', 'trial', ')', 'if', 'self', '.', '_verbose', ':', 'logger', '.', 'info', '(', '"Trial {} best res={} vs median res={} at t={}"', '.', 'format', '(', 'trial', ',', 'best_result', ',', 'median_result', ',', 'time', ')', ')', 'if', 'best_result', '<', 'median_result', 'and', 'time', '>', 'self', '.', '_grace_period', ':', 'if', 'self', '.', '_verbose', ':', 'logger', '.', 'info', '(', '"MedianStoppingRule: "', '"early stopping {}"', '.', 'format', '(', 'trial', ')', ')', 'self', '.', '_stopped_trials', '.', 'add', '(', 'trial', ')', 'if', 'self', '.', '_hard_stop', ':', 'return', 'TrialScheduler', '.', 'STOP', 'else', ':', 'return', 'TrialScheduler', '.', 'PAUSE', 'else', ':', 'return', 'TrialScheduler', '.', 'CONTINUE'] | Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`. | ['Callback', 'for', 'early', 'stopping', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/schedulers/median_stopping_rule.py#L56-L85 |
3,619 | bxlab/bx-python | lib/bx_extras/stats.py | lgammln | def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser) | python | def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser) | ['def', 'lgammln', '(', 'xx', ')', ':', 'coeff', '=', '[', '76.18009173', ',', '-', '86.50532033', ',', '24.01409822', ',', '-', '1.231739516', ',', '0.120858003e-2', ',', '-', '0.536382e-5', ']', 'x', '=', 'xx', '-', '1.0', 'tmp', '=', 'x', '+', '5.5', 'tmp', '=', 'tmp', '-', '(', 'x', '+', '0.5', ')', '*', 'math', '.', 'log', '(', 'tmp', ')', 'ser', '=', '1.0', 'for', 'j', 'in', 'range', '(', 'len', '(', 'coeff', ')', ')', ':', 'x', '=', 'x', '+', '1', 'ser', '=', 'ser', '+', 'coeff', '[', 'j', ']', '/', 'x', 'return', '-', 'tmp', '+', 'math', '.', 'log', '(', '2.50662827465', '*', 'ser', ')'] | Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx) | ['Returns', 'the', 'gamma', 'function', 'of', 'xx', '.', 'Gamma', '(', 'z', ')', '=', 'Integral', '(', '0', 'infinity', ')', 'of', 't^', '(', 'z', '-', '1', ')', 'exp', '(', '-', 't', ')', 'dt', '.', '(', 'Adapted', 'from', ':', 'Numerical', 'Recipies', 'in', 'C', '.', ')'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1470-L1488 |
3,620 | BerkeleyAutomation/perception | perception/image.py | Image.mask_by_linear_ind | def mask_by_linear_ind(self, linear_inds):
"""Create a new image by zeroing out data at locations not in the
given indices.
Parameters
----------
linear_inds : :obj:`numpy.ndarray` of int
A list of linear coordinates.
Returns
-------
:obj:`Image`
A new Image of the same type, with data not indexed by inds set
to zero.
"""
inds = self.linear_to_ij(linear_inds)
return self.mask_by_ind(inds) | python | def mask_by_linear_ind(self, linear_inds):
"""Create a new image by zeroing out data at locations not in the
given indices.
Parameters
----------
linear_inds : :obj:`numpy.ndarray` of int
A list of linear coordinates.
Returns
-------
:obj:`Image`
A new Image of the same type, with data not indexed by inds set
to zero.
"""
inds = self.linear_to_ij(linear_inds)
return self.mask_by_ind(inds) | ['def', 'mask_by_linear_ind', '(', 'self', ',', 'linear_inds', ')', ':', 'inds', '=', 'self', '.', 'linear_to_ij', '(', 'linear_inds', ')', 'return', 'self', '.', 'mask_by_ind', '(', 'inds', ')'] | Create a new image by zeroing out data at locations not in the
given indices.
Parameters
----------
linear_inds : :obj:`numpy.ndarray` of int
A list of linear coordinates.
Returns
-------
:obj:`Image`
A new Image of the same type, with data not indexed by inds set
to zero. | ['Create', 'a', 'new', 'image', 'by', 'zeroing', 'out', 'data', 'at', 'locations', 'not', 'in', 'the', 'given', 'indices', '.'] | train | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L452-L468 |
3,621 | aewallin/allantools | allantools/noise_kasdin.py | Noise.adev_from_qd | def adev_from_qd(self, tau0=1.0, tau=1.0):
""" prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a
The relation between a, b, c is:
a b c(AVAR) c(MVAR)
-----------------------
-2 -4 1 1
-1 -3 0 0
0 -2 -1 -1
+1 -1 -2 -2
+2 0 -2 -3
Coefficients from:
S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on
the measurement of the stability of oscillators with frequency
counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and
Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007.
doi: 10.1109/TUFFC.2007.337
"""
g_b = self.phase_psd_from_qd(tau0)
f_h = 0.5/tau0
if self.b == 0:
coeff = 3.0*f_h / (4.0*pow(np.pi, 2)) # E, White PM, tau^-1
elif self.b == -1:
coeff = (1.038+3*np.log(2.0*np.pi*f_h*tau))/(4.0*pow(np.pi, 2))# D, Flicker PM, tau^-1
elif self.b == -2:
coeff = 0.5 # C, white FM, 1/sqrt(tau)
elif self.b == -3:
coeff = 2*np.log(2) # B, flicker FM, constant ADEV
elif self.b == -4:
coeff = 2.0*pow(np.pi, 2)/3.0 # A, RW FM, sqrt(tau)
return np.sqrt(coeff*g_b*pow(2.0*np.pi, 2)) | python | def adev_from_qd(self, tau0=1.0, tau=1.0):
""" prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a
The relation between a, b, c is:
a b c(AVAR) c(MVAR)
-----------------------
-2 -4 1 1
-1 -3 0 0
0 -2 -1 -1
+1 -1 -2 -2
+2 0 -2 -3
Coefficients from:
S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on
the measurement of the stability of oscillators with frequency
counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and
Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007.
doi: 10.1109/TUFFC.2007.337
"""
g_b = self.phase_psd_from_qd(tau0)
f_h = 0.5/tau0
if self.b == 0:
coeff = 3.0*f_h / (4.0*pow(np.pi, 2)) # E, White PM, tau^-1
elif self.b == -1:
coeff = (1.038+3*np.log(2.0*np.pi*f_h*tau))/(4.0*pow(np.pi, 2))# D, Flicker PM, tau^-1
elif self.b == -2:
coeff = 0.5 # C, white FM, 1/sqrt(tau)
elif self.b == -3:
coeff = 2*np.log(2) # B, flicker FM, constant ADEV
elif self.b == -4:
coeff = 2.0*pow(np.pi, 2)/3.0 # A, RW FM, sqrt(tau)
return np.sqrt(coeff*g_b*pow(2.0*np.pi, 2)) | ['def', 'adev_from_qd', '(', 'self', ',', 'tau0', '=', '1.0', ',', 'tau', '=', '1.0', ')', ':', 'g_b', '=', 'self', '.', 'phase_psd_from_qd', '(', 'tau0', ')', 'f_h', '=', '0.5', '/', 'tau0', 'if', 'self', '.', 'b', '==', '0', ':', 'coeff', '=', '3.0', '*', 'f_h', '/', '(', '4.0', '*', 'pow', '(', 'np', '.', 'pi', ',', '2', ')', ')', '# E, White PM, tau^-1', 'elif', 'self', '.', 'b', '==', '-', '1', ':', 'coeff', '=', '(', '1.038', '+', '3', '*', 'np', '.', 'log', '(', '2.0', '*', 'np', '.', 'pi', '*', 'f_h', '*', 'tau', ')', ')', '/', '(', '4.0', '*', 'pow', '(', 'np', '.', 'pi', ',', '2', ')', ')', '# D, Flicker PM, tau^-1', 'elif', 'self', '.', 'b', '==', '-', '2', ':', 'coeff', '=', '0.5', '# C, white FM, 1/sqrt(tau)', 'elif', 'self', '.', 'b', '==', '-', '3', ':', 'coeff', '=', '2', '*', 'np', '.', 'log', '(', '2', ')', '# B, flicker FM, constant ADEV', 'elif', 'self', '.', 'b', '==', '-', '4', ':', 'coeff', '=', '2.0', '*', 'pow', '(', 'np', '.', 'pi', ',', '2', ')', '/', '3.0', '# A, RW FM, sqrt(tau)', 'return', 'np', '.', 'sqrt', '(', 'coeff', '*', 'g_b', '*', 'pow', '(', '2.0', '*', 'np', '.', 'pi', ',', '2', ')', ')'] | prefactor for Allan deviation for noise
type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show an Allan variance of:
AVAR = prefactor * h_a * tau^c
where a = b + 2 is the slope of the frequency PSD.
and h_a is the frequency PSD prefactor S_y(f) = h_a * f^a
The relation between a, b, c is:
a b c(AVAR) c(MVAR)
-----------------------
-2 -4 1 1
-1 -3 0 0
0 -2 -1 -1
+1 -1 -2 -2
+2 0 -2 -3
Coefficients from:
S. T. Dawkins, J. J. McFerran and A. N. Luiten, "Considerations on
the measurement of the stability of oscillators with frequency
counters," in IEEE Transactions on Ultrasonics, Ferroelectrics, and
Frequency Control, vol. 54, no. 5, pp. 918-925, May 2007.
doi: 10.1109/TUFFC.2007.337 | ['prefactor', 'for', 'Allan', 'deviation', 'for', 'noise', 'type', 'defined', 'by', '(', 'qd', 'b', 'tau0', ')'] | train | https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L209-L251 |
3,622 | ars096/pypci | pypci/pypci.py | write | def write(bar, offset, data):
"""Write data to PCI board.
Parameters
----------
bar : BaseAddressRegister
BAR to write.
offset : int
Address offset in BAR to write.
data : bytes
Data to write.
Returns
-------
None
Examples
--------
>>> b = pypci.lspci(vendor=0x1147, device=3214)
>>> pypci.write(b[0].bar[2], 0x04, b'\x01')
>>> data = struct.pack('<I', 1234567)
>>> pypci.write(b[0].bar[2], 0x00, data)
"""
if type(data) not in [bytes, bytearray]:
msg = 'data should be bytes or bytearray type'
raise TypeError(msg)
size = len(data)
verify_access_range(bar, offset, size)
if bar.type == 'io': return io_write(bar, offset, data)
if bar.type == 'mem': return mem_write(bar, offset, data)
return | python | def write(bar, offset, data):
"""Write data to PCI board.
Parameters
----------
bar : BaseAddressRegister
BAR to write.
offset : int
Address offset in BAR to write.
data : bytes
Data to write.
Returns
-------
None
Examples
--------
>>> b = pypci.lspci(vendor=0x1147, device=3214)
>>> pypci.write(b[0].bar[2], 0x04, b'\x01')
>>> data = struct.pack('<I', 1234567)
>>> pypci.write(b[0].bar[2], 0x00, data)
"""
if type(data) not in [bytes, bytearray]:
msg = 'data should be bytes or bytearray type'
raise TypeError(msg)
size = len(data)
verify_access_range(bar, offset, size)
if bar.type == 'io': return io_write(bar, offset, data)
if bar.type == 'mem': return mem_write(bar, offset, data)
return | ['def', 'write', '(', 'bar', ',', 'offset', ',', 'data', ')', ':', 'if', 'type', '(', 'data', ')', 'not', 'in', '[', 'bytes', ',', 'bytearray', ']', ':', 'msg', '=', "'data should be bytes or bytearray type'", 'raise', 'TypeError', '(', 'msg', ')', 'size', '=', 'len', '(', 'data', ')', 'verify_access_range', '(', 'bar', ',', 'offset', ',', 'size', ')', 'if', 'bar', '.', 'type', '==', "'io'", ':', 'return', 'io_write', '(', 'bar', ',', 'offset', ',', 'data', ')', 'if', 'bar', '.', 'type', '==', "'mem'", ':', 'return', 'mem_write', '(', 'bar', ',', 'offset', ',', 'data', ')', 'return'] | Write data to PCI board.
Parameters
----------
bar : BaseAddressRegister
BAR to write.
offset : int
Address offset in BAR to write.
data : bytes
Data to write.
Returns
-------
None
Examples
--------
>>> b = pypci.lspci(vendor=0x1147, device=3214)
>>> pypci.write(b[0].bar[2], 0x04, b'\x01')
>>> data = struct.pack('<I', 1234567)
>>> pypci.write(b[0].bar[2], 0x00, data) | ['Write', 'data', 'to', 'PCI', 'board', '.', 'Parameters', '----------', 'bar', ':', 'BaseAddressRegister', 'BAR', 'to', 'write', '.', 'offset', ':', 'int', 'Address', 'offset', 'in', 'BAR', 'to', 'write', '.', 'data', ':', 'bytes', 'Data', 'to', 'write', '.', 'Returns', '-------', 'None', 'Examples', '--------', '>>>', 'b', '=', 'pypci', '.', 'lspci', '(', 'vendor', '=', '0x1147', 'device', '=', '3214', ')', '>>>', 'pypci', '.', 'write', '(', 'b', '[', '0', ']', '.', 'bar', '[', '2', ']', '0x04', 'b', '\\', 'x01', ')', '>>>', 'data', '=', 'struct', '.', 'pack', '(', '<I', '1234567', ')', '>>>', 'pypci', '.', 'write', '(', 'b', '[', '0', ']', '.', 'bar', '[', '2', ']', '0x00', 'data', ')'] | train | https://github.com/ars096/pypci/blob/9469fa012e1f88fc6efc3aa6c17cd9732bbf73f6/pypci/pypci.py#L247-L282 |
3,623 | watson-developer-cloud/python-sdk | ibm_watson/assistant_v1.py | AssistantV1.update_intent | def update_intent(self,
workspace_id,
intent,
new_intent=None,
new_description=None,
new_examples=None,
**kwargs):
"""
Update intent.
Update an existing intent with new or modified data. You must provide component
objects defining the content of the updated intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str new_intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str new_description: The description of the intent. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param list[Example] new_examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if new_examples is not None:
new_examples = [
self._convert_model(x, Example) for x in new_examples
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'update_intent')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'intent': new_intent,
'description': new_description,
'examples': new_examples
}
url = '/v1/workspaces/{0}/intents/{1}'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | python | def update_intent(self,
workspace_id,
intent,
new_intent=None,
new_description=None,
new_examples=None,
**kwargs):
"""
Update intent.
Update an existing intent with new or modified data. You must provide component
objects defining the content of the updated intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str new_intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str new_description: The description of the intent. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param list[Example] new_examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if new_examples is not None:
new_examples = [
self._convert_model(x, Example) for x in new_examples
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'update_intent')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'intent': new_intent,
'description': new_description,
'examples': new_examples
}
url = '/v1/workspaces/{0}/intents/{1}'.format(
*self._encode_path_vars(workspace_id, intent))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | ['def', 'update_intent', '(', 'self', ',', 'workspace_id', ',', 'intent', ',', 'new_intent', '=', 'None', ',', 'new_description', '=', 'None', ',', 'new_examples', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'workspace_id', 'is', 'None', ':', 'raise', 'ValueError', '(', "'workspace_id must be provided'", ')', 'if', 'intent', 'is', 'None', ':', 'raise', 'ValueError', '(', "'intent must be provided'", ')', 'if', 'new_examples', 'is', 'not', 'None', ':', 'new_examples', '=', '[', 'self', '.', '_convert_model', '(', 'x', ',', 'Example', ')', 'for', 'x', 'in', 'new_examples', ']', 'headers', '=', '{', '}', 'if', "'headers'", 'in', 'kwargs', ':', 'headers', '.', 'update', '(', 'kwargs', '.', 'get', '(', "'headers'", ')', ')', 'sdk_headers', '=', 'get_sdk_headers', '(', "'conversation'", ',', "'V1'", ',', "'update_intent'", ')', 'headers', '.', 'update', '(', 'sdk_headers', ')', 'params', '=', '{', "'version'", ':', 'self', '.', 'version', '}', 'data', '=', '{', "'intent'", ':', 'new_intent', ',', "'description'", ':', 'new_description', ',', "'examples'", ':', 'new_examples', '}', 'url', '=', "'/v1/workspaces/{0}/intents/{1}'", '.', 'format', '(', '*', 'self', '.', '_encode_path_vars', '(', 'workspace_id', ',', 'intent', ')', ')', 'response', '=', 'self', '.', 'request', '(', 'method', '=', "'POST'", ',', 'url', '=', 'url', ',', 'headers', '=', 'headers', ',', 'params', '=', 'params', ',', 'json', '=', 'data', ',', 'accept_json', '=', 'True', ')', 'return', 'response'] | Update intent.
Update an existing intent with new or modified data. You must provide component
objects defining the content of the updated intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The intent name.
:param str new_intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str new_description: The description of the intent. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param list[Example] new_examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | ['Update', 'intent', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L751-L815 |
3,624 | kayak/pypika | pypika/queries.py | QueryBuilder._group_sql | def _group_sql(self, quote_char=None, groupby_alias=True, **kwargs):
"""
Produces the GROUP BY part of the query. This is a list of fields. The clauses are stored in the query under
self._groupbys as a list fields.
If an groupby field is used in the select clause,
determined by a matching alias, and the groupby_alias is set True
then the GROUP BY clause will use the alias,
otherwise the entire field will be rendered as SQL.
"""
clauses = []
selected_aliases = {s.alias for s in self._selects}
for field in self._groupbys:
if groupby_alias and field.alias and field.alias in selected_aliases:
clauses.append("{quote}{alias}{quote}".format(
alias=field.alias,
quote=quote_char or '',
))
else:
clauses.append(field.get_sql(quote_char=quote_char, **kwargs))
sql = ' GROUP BY {groupby}'.format(groupby=','.join(clauses))
if self._with_totals:
return sql + ' WITH TOTALS'
return sql | python | def _group_sql(self, quote_char=None, groupby_alias=True, **kwargs):
"""
Produces the GROUP BY part of the query. This is a list of fields. The clauses are stored in the query under
self._groupbys as a list fields.
If an groupby field is used in the select clause,
determined by a matching alias, and the groupby_alias is set True
then the GROUP BY clause will use the alias,
otherwise the entire field will be rendered as SQL.
"""
clauses = []
selected_aliases = {s.alias for s in self._selects}
for field in self._groupbys:
if groupby_alias and field.alias and field.alias in selected_aliases:
clauses.append("{quote}{alias}{quote}".format(
alias=field.alias,
quote=quote_char or '',
))
else:
clauses.append(field.get_sql(quote_char=quote_char, **kwargs))
sql = ' GROUP BY {groupby}'.format(groupby=','.join(clauses))
if self._with_totals:
return sql + ' WITH TOTALS'
return sql | ['def', '_group_sql', '(', 'self', ',', 'quote_char', '=', 'None', ',', 'groupby_alias', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'clauses', '=', '[', ']', 'selected_aliases', '=', '{', 's', '.', 'alias', 'for', 's', 'in', 'self', '.', '_selects', '}', 'for', 'field', 'in', 'self', '.', '_groupbys', ':', 'if', 'groupby_alias', 'and', 'field', '.', 'alias', 'and', 'field', '.', 'alias', 'in', 'selected_aliases', ':', 'clauses', '.', 'append', '(', '"{quote}{alias}{quote}"', '.', 'format', '(', 'alias', '=', 'field', '.', 'alias', ',', 'quote', '=', 'quote_char', 'or', "''", ',', ')', ')', 'else', ':', 'clauses', '.', 'append', '(', 'field', '.', 'get_sql', '(', 'quote_char', '=', 'quote_char', ',', '*', '*', 'kwargs', ')', ')', 'sql', '=', "' GROUP BY {groupby}'", '.', 'format', '(', 'groupby', '=', "','", '.', 'join', '(', 'clauses', ')', ')', 'if', 'self', '.', '_with_totals', ':', 'return', 'sql', '+', "' WITH TOTALS'", 'return', 'sql'] | Produces the GROUP BY part of the query. This is a list of fields. The clauses are stored in the query under
self._groupbys as a list fields.
If an groupby field is used in the select clause,
determined by a matching alias, and the groupby_alias is set True
then the GROUP BY clause will use the alias,
otherwise the entire field will be rendered as SQL. | ['Produces', 'the', 'GROUP', 'BY', 'part', 'of', 'the', 'query', '.', 'This', 'is', 'a', 'list', 'of', 'fields', '.', 'The', 'clauses', 'are', 'stored', 'in', 'the', 'query', 'under', 'self', '.', '_groupbys', 'as', 'a', 'list', 'fields', '.'] | train | https://github.com/kayak/pypika/blob/bfed26e963b982ecdb9697b61b67d76b493f2115/pypika/queries.py#L914-L938 |
3,625 | kmedian/ctmc | ctmc/simulate.py | simulate | def simulate(s0, transmat, steps=1):
"""Simulate the next state
Parameters
----------
s0 : ndarray
Vector with state variables at t=0
transmat : ndarray
The estimated transition/stochastic matrix.
steps : int
(Default: 1) The number of steps to simulate model outputs ahead.
If steps>1 the a Mult-Step Simulation is triggered.
Returns
-------
out : ndarray
(steps=1) Vector with simulated state variables ().
(steps>1) Matrix with out[:,step] columns (Fortran order) from a
Multi-Step Simulation. The first column is the initial state
vector out[:,0]=s0 for algorithmic reasons.
"""
# Single-Step simulation
if steps == 1:
return np.dot(s0, transmat)
# Multi-Step simulation
out = np.zeros(shape=(steps + 1, len(s0)), order='C')
out[0, :] = s0
for i in range(1, steps + 1):
out[i, :] = np.dot(out[i - 1, :], transmat)
return out | python | def simulate(s0, transmat, steps=1):
"""Simulate the next state
Parameters
----------
s0 : ndarray
Vector with state variables at t=0
transmat : ndarray
The estimated transition/stochastic matrix.
steps : int
(Default: 1) The number of steps to simulate model outputs ahead.
If steps>1 the a Mult-Step Simulation is triggered.
Returns
-------
out : ndarray
(steps=1) Vector with simulated state variables ().
(steps>1) Matrix with out[:,step] columns (Fortran order) from a
Multi-Step Simulation. The first column is the initial state
vector out[:,0]=s0 for algorithmic reasons.
"""
# Single-Step simulation
if steps == 1:
return np.dot(s0, transmat)
# Multi-Step simulation
out = np.zeros(shape=(steps + 1, len(s0)), order='C')
out[0, :] = s0
for i in range(1, steps + 1):
out[i, :] = np.dot(out[i - 1, :], transmat)
return out | ['def', 'simulate', '(', 's0', ',', 'transmat', ',', 'steps', '=', '1', ')', ':', '# Single-Step simulation', 'if', 'steps', '==', '1', ':', 'return', 'np', '.', 'dot', '(', 's0', ',', 'transmat', ')', '# Multi-Step simulation', 'out', '=', 'np', '.', 'zeros', '(', 'shape', '=', '(', 'steps', '+', '1', ',', 'len', '(', 's0', ')', ')', ',', 'order', '=', "'C'", ')', 'out', '[', '0', ',', ':', ']', '=', 's0', 'for', 'i', 'in', 'range', '(', '1', ',', 'steps', '+', '1', ')', ':', 'out', '[', 'i', ',', ':', ']', '=', 'np', '.', 'dot', '(', 'out', '[', 'i', '-', '1', ',', ':', ']', ',', 'transmat', ')', 'return', 'out'] | Simulate the next state
Parameters
----------
s0 : ndarray
Vector with state variables at t=0
transmat : ndarray
The estimated transition/stochastic matrix.
steps : int
(Default: 1) The number of steps to simulate model outputs ahead.
If steps>1 the a Mult-Step Simulation is triggered.
Returns
-------
out : ndarray
(steps=1) Vector with simulated state variables ().
(steps>1) Matrix with out[:,step] columns (Fortran order) from a
Multi-Step Simulation. The first column is the initial state
vector out[:,0]=s0 for algorithmic reasons. | ['Simulate', 'the', 'next', 'state'] | train | https://github.com/kmedian/ctmc/blob/e30747f797ce777fd2aaa1b7ee5a77e91d7db5e4/ctmc/simulate.py#L5-L40 |
3,626 | inspirehep/inspire-dojson | inspire_dojson/hepnames/rules.py | name2marc | def name2marc(self, key, value):
"""Populates the ``100`` field.
Also populates the ``400``, ``880``, and ``667`` fields through side
effects.
"""
result = self.get('100', {})
result['a'] = value.get('value')
result['b'] = value.get('numeration')
result['c'] = value.get('title')
result['q'] = value.get('preferred_name')
if 'name_variants' in value:
self['400'] = [{'a': el} for el in value['name_variants']]
if 'native_names' in value:
self['880'] = [{'a': el} for el in value['native_names']]
if 'previous_names' in value:
prev_names = [
{'a': u'Formerly {}'.format(prev_name)}
for prev_name in value['previous_names']
]
self['667'] = prev_names
return result | python | def name2marc(self, key, value):
"""Populates the ``100`` field.
Also populates the ``400``, ``880``, and ``667`` fields through side
effects.
"""
result = self.get('100', {})
result['a'] = value.get('value')
result['b'] = value.get('numeration')
result['c'] = value.get('title')
result['q'] = value.get('preferred_name')
if 'name_variants' in value:
self['400'] = [{'a': el} for el in value['name_variants']]
if 'native_names' in value:
self['880'] = [{'a': el} for el in value['native_names']]
if 'previous_names' in value:
prev_names = [
{'a': u'Formerly {}'.format(prev_name)}
for prev_name in value['previous_names']
]
self['667'] = prev_names
return result | ['def', 'name2marc', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'result', '=', 'self', '.', 'get', '(', "'100'", ',', '{', '}', ')', 'result', '[', "'a'", ']', '=', 'value', '.', 'get', '(', "'value'", ')', 'result', '[', "'b'", ']', '=', 'value', '.', 'get', '(', "'numeration'", ')', 'result', '[', "'c'", ']', '=', 'value', '.', 'get', '(', "'title'", ')', 'result', '[', "'q'", ']', '=', 'value', '.', 'get', '(', "'preferred_name'", ')', 'if', "'name_variants'", 'in', 'value', ':', 'self', '[', "'400'", ']', '=', '[', '{', "'a'", ':', 'el', '}', 'for', 'el', 'in', 'value', '[', "'name_variants'", ']', ']', 'if', "'native_names'", 'in', 'value', ':', 'self', '[', "'880'", ']', '=', '[', '{', "'a'", ':', 'el', '}', 'for', 'el', 'in', 'value', '[', "'native_names'", ']', ']', 'if', "'previous_names'", 'in', 'value', ':', 'prev_names', '=', '[', '{', "'a'", ':', "u'Formerly {}'", '.', 'format', '(', 'prev_name', ')', '}', 'for', 'prev_name', 'in', 'value', '[', "'previous_names'", ']', ']', 'self', '[', "'667'", ']', '=', 'prev_names', 'return', 'result'] | Populates the ``100`` field.
Also populates the ``400``, ``880``, and ``667`` fields through side
effects. | ['Populates', 'the', '100', 'field', '.'] | train | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hepnames/rules.py#L213-L237 |
3,627 | mitsei/dlkit | dlkit/json_/grading/objects.py | GradebookNode.get_gradebook | def get_gradebook(self):
"""Gets the ``Gradebook`` at this node.
return: (osid.grading.Gradebook) - the gradebook represented by
this node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('GRADING', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_gradebook_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_gradebook(Id(self._my_map['id'])) | python | def get_gradebook(self):
"""Gets the ``Gradebook`` at this node.
return: (osid.grading.Gradebook) - the gradebook represented by
this node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('GRADING', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_gradebook_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_gradebook(Id(self._my_map['id'])) | ['def', 'get_gradebook', '(', 'self', ')', ':', 'if', 'self', '.', '_lookup_session', 'is', 'None', ':', 'mgr', '=', 'get_provider_manager', '(', "'GRADING'", ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'self', '.', '_lookup_session', '=', 'mgr', '.', 'get_gradebook_lookup_session', '(', 'proxy', '=', 'getattr', '(', 'self', ',', '"_proxy"', ',', 'None', ')', ')', 'return', 'self', '.', '_lookup_session', '.', 'get_gradebook', '(', 'Id', '(', 'self', '.', '_my_map', '[', "'id'", ']', ')', ')'] | Gets the ``Gradebook`` at this node.
return: (osid.grading.Gradebook) - the gradebook represented by
this node
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'Gradebook', 'at', 'this', 'node', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L2041-L2052 |
3,628 | saltstack/salt | salt/modules/solr.py | signal | def signal(signal=None):
'''
Signals Apache Solr to start, stop, or restart. Obviously this is only
going to work if the minion resides on the solr host. Additionally Solr
doesn't ship with an init script so one must be created.
signal : str (None)
The command to pass to the apache solr init valid values are 'start',
'stop', and 'restart'
CLI Example:
.. code-block:: bash
salt '*' solr.signal restart
'''
valid_signals = ('start', 'stop', 'restart')
# Give a friendly error message for invalid signals
# TODO: Fix this logic to be reusable and used by apache.signal
if signal not in valid_signals:
msg = valid_signals[:-1] + ('or {0}'.format(valid_signals[-1]),)
return '{0} is an invalid signal. Try: one of: {1}'.format(
signal, ', '.join(msg))
cmd = "{0} {1}".format(__opts__['solr.init_script'], signal)
__salt__['cmd.run'](cmd, python_shell=False) | python | def signal(signal=None):
'''
Signals Apache Solr to start, stop, or restart. Obviously this is only
going to work if the minion resides on the solr host. Additionally Solr
doesn't ship with an init script so one must be created.
signal : str (None)
The command to pass to the apache solr init valid values are 'start',
'stop', and 'restart'
CLI Example:
.. code-block:: bash
salt '*' solr.signal restart
'''
valid_signals = ('start', 'stop', 'restart')
# Give a friendly error message for invalid signals
# TODO: Fix this logic to be reusable and used by apache.signal
if signal not in valid_signals:
msg = valid_signals[:-1] + ('or {0}'.format(valid_signals[-1]),)
return '{0} is an invalid signal. Try: one of: {1}'.format(
signal, ', '.join(msg))
cmd = "{0} {1}".format(__opts__['solr.init_script'], signal)
__salt__['cmd.run'](cmd, python_shell=False) | ['def', 'signal', '(', 'signal', '=', 'None', ')', ':', 'valid_signals', '=', '(', "'start'", ',', "'stop'", ',', "'restart'", ')', '# Give a friendly error message for invalid signals', '# TODO: Fix this logic to be reusable and used by apache.signal', 'if', 'signal', 'not', 'in', 'valid_signals', ':', 'msg', '=', 'valid_signals', '[', ':', '-', '1', ']', '+', '(', "'or {0}'", '.', 'format', '(', 'valid_signals', '[', '-', '1', ']', ')', ',', ')', 'return', "'{0} is an invalid signal. Try: one of: {1}'", '.', 'format', '(', 'signal', ',', "', '", '.', 'join', '(', 'msg', ')', ')', 'cmd', '=', '"{0} {1}"', '.', 'format', '(', '__opts__', '[', "'solr.init_script'", ']', ',', 'signal', ')', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')'] | Signals Apache Solr to start, stop, or restart. Obviously this is only
going to work if the minion resides on the solr host. Additionally Solr
doesn't ship with an init script so one must be created.
signal : str (None)
The command to pass to the apache solr init valid values are 'start',
'stop', and 'restart'
CLI Example:
.. code-block:: bash
salt '*' solr.signal restart | ['Signals', 'Apache', 'Solr', 'to', 'start', 'stop', 'or', 'restart', '.', 'Obviously', 'this', 'is', 'only', 'going', 'to', 'work', 'if', 'the', 'minion', 'resides', 'on', 'the', 'solr', 'host', '.', 'Additionally', 'Solr', 'doesn', 't', 'ship', 'with', 'an', 'init', 'script', 'so', 'one', 'must', 'be', 'created', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L996-L1022 |
3,629 | bcbio/bcbio-nextgen | bcbio/ngsalign/bowtie2.py | align_transcriptome | def align_transcriptome(fastq_file, pair_file, ref_file, data):
"""
bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc
"""
work_bam = dd.get_work_bam(data)
base, ext = os.path.splitext(work_bam)
out_file = base + ".transcriptome" + ext
if utils.file_exists(out_file):
data = dd.set_transcriptome_bam(data, out_file)
return data
bowtie2 = config_utils.get_program("bowtie2", data["config"])
gtf_file = dd.get_gtf_file(data)
gtf_index = index_transcriptome(gtf_file, ref_file, data)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fastq_cmd = "-1 %s" % fastq_file if pair_file else "-U %s" % fastq_file
pair_cmd = "-2 %s " % pair_file if pair_file else ""
cmd = ("{bowtie2} -p {num_cores} -a -X 600 --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 --no-discordant --no-mixed -x {gtf_index} {fastq_cmd} {pair_cmd} ")
with file_transaction(data, out_file) as tx_out_file:
message = "Aligning %s and %s to the transcriptome." % (fastq_file, pair_file)
cmd += "| " + postalign.sam_to_sortbam_cl(data, tx_out_file, name_sort=True)
do.run(cmd.format(**locals()), message)
data = dd.set_transcriptome_bam(data, out_file)
return data | python | def align_transcriptome(fastq_file, pair_file, ref_file, data):
"""
bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc
"""
work_bam = dd.get_work_bam(data)
base, ext = os.path.splitext(work_bam)
out_file = base + ".transcriptome" + ext
if utils.file_exists(out_file):
data = dd.set_transcriptome_bam(data, out_file)
return data
bowtie2 = config_utils.get_program("bowtie2", data["config"])
gtf_file = dd.get_gtf_file(data)
gtf_index = index_transcriptome(gtf_file, ref_file, data)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fastq_cmd = "-1 %s" % fastq_file if pair_file else "-U %s" % fastq_file
pair_cmd = "-2 %s " % pair_file if pair_file else ""
cmd = ("{bowtie2} -p {num_cores} -a -X 600 --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 --no-discordant --no-mixed -x {gtf_index} {fastq_cmd} {pair_cmd} ")
with file_transaction(data, out_file) as tx_out_file:
message = "Aligning %s and %s to the transcriptome." % (fastq_file, pair_file)
cmd += "| " + postalign.sam_to_sortbam_cl(data, tx_out_file, name_sort=True)
do.run(cmd.format(**locals()), message)
data = dd.set_transcriptome_bam(data, out_file)
return data | ['def', 'align_transcriptome', '(', 'fastq_file', ',', 'pair_file', ',', 'ref_file', ',', 'data', ')', ':', 'work_bam', '=', 'dd', '.', 'get_work_bam', '(', 'data', ')', 'base', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'work_bam', ')', 'out_file', '=', 'base', '+', '".transcriptome"', '+', 'ext', 'if', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'data', '=', 'dd', '.', 'set_transcriptome_bam', '(', 'data', ',', 'out_file', ')', 'return', 'data', 'bowtie2', '=', 'config_utils', '.', 'get_program', '(', '"bowtie2"', ',', 'data', '[', '"config"', ']', ')', 'gtf_file', '=', 'dd', '.', 'get_gtf_file', '(', 'data', ')', 'gtf_index', '=', 'index_transcriptome', '(', 'gtf_file', ',', 'ref_file', ',', 'data', ')', 'num_cores', '=', 'data', '[', '"config"', ']', '[', '"algorithm"', ']', '.', 'get', '(', '"num_cores"', ',', '1', ')', 'fastq_cmd', '=', '"-1 %s"', '%', 'fastq_file', 'if', 'pair_file', 'else', '"-U %s"', '%', 'fastq_file', 'pair_cmd', '=', '"-2 %s "', '%', 'pair_file', 'if', 'pair_file', 'else', '""', 'cmd', '=', '(', '"{bowtie2} -p {num_cores} -a -X 600 --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 --no-discordant --no-mixed -x {gtf_index} {fastq_cmd} {pair_cmd} "', ')', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'message', '=', '"Aligning %s and %s to the transcriptome."', '%', '(', 'fastq_file', ',', 'pair_file', ')', 'cmd', '+=', '"| "', '+', 'postalign', '.', 'sam_to_sortbam_cl', '(', 'data', ',', 'tx_out_file', ',', 'name_sort', '=', 'True', ')', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', 'message', ')', 'data', '=', 'dd', '.', 'set_transcriptome_bam', '(', 'data', ',', 'out_file', ')', 'return', 'data'] | bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc | ['bowtie2', 'with', 'settings', 'for', 'aligning', 'to', 'the', 'transcriptome', 'for', 'eXpress', '/', 'RSEM', '/', 'etc'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie2.py#L130-L152 |
3,630 | Fuyukai/asyncwebsockets | asyncwebsockets/server.py | open_websocket_server | async def open_websocket_server(sock, filter=None): # pylint: disable=W0622
"""
A context manager which serves this websocket.
:param filter: an async callback which accepts the connection request
and returns a bool, or an explicit Accept/Reject message.
"""
ws = await create_websocket_server(sock, filter=filter)
try:
yield ws
finally:
await ws.close() | python | async def open_websocket_server(sock, filter=None): # pylint: disable=W0622
"""
A context manager which serves this websocket.
:param filter: an async callback which accepts the connection request
and returns a bool, or an explicit Accept/Reject message.
"""
ws = await create_websocket_server(sock, filter=filter)
try:
yield ws
finally:
await ws.close() | ['async', 'def', 'open_websocket_server', '(', 'sock', ',', 'filter', '=', 'None', ')', ':', '# pylint: disable=W0622', 'ws', '=', 'await', 'create_websocket_server', '(', 'sock', ',', 'filter', '=', 'filter', ')', 'try', ':', 'yield', 'ws', 'finally', ':', 'await', 'ws', '.', 'close', '(', ')'] | A context manager which serves this websocket.
:param filter: an async callback which accepts the connection request
and returns a bool, or an explicit Accept/Reject message. | ['A', 'context', 'manager', 'which', 'serves', 'this', 'websocket', '.'] | train | https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/server.py#L14-L25 |
3,631 | brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM.fit | def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self | python | def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self | ['def', 'fit', '(', 'self', ',', 'X', ',', 'y', ',', 'Z', ')', ':', 'logger', '.', 'info', '(', "'Starting SS-SRM'", ')', '# Check that the alpha value is in range (0.0,1.0)', 'if', '0.0', '>=', 'self', '.', 'alpha', 'or', 'self', '.', 'alpha', '>=', '1.0', ':', 'raise', 'ValueError', '(', '"Alpha parameter should be in range (0.0, 1.0)"', ')', '# Check that the regularizer value is positive', 'if', '0.0', '>=', 'self', '.', 'gamma', ':', 'raise', 'ValueError', '(', '"Gamma parameter should be positive."', ')', '# Check the number of subjects', 'if', 'len', '(', 'X', ')', '<=', '1', 'or', 'len', '(', 'y', ')', '<=', '1', 'or', 'len', '(', 'Z', ')', '<=', '1', ':', 'raise', 'ValueError', '(', '"There are not enough subjects in the input "', '"data to train the model."', ')', 'if', 'not', '(', 'len', '(', 'X', ')', '==', 'len', '(', 'y', ')', ')', 'or', 'not', '(', 'len', '(', 'X', ')', '==', 'len', '(', 'Z', ')', ')', ':', 'raise', 'ValueError', '(', '"Different number of subjects in data."', ')', '# Check for input data sizes', 'if', 'X', '[', '0', ']', '.', 'shape', '[', '1', ']', '<', 'self', '.', 'features', ':', 'raise', 'ValueError', '(', '"There are not enough samples to train the model with "', '"{0:d} features."', '.', 'format', '(', 'self', '.', 'features', ')', ')', '# Check if all subjects have same number of TRs for alignment', '# and if alignment and classification data have the same number of', '# voxels per subject. Also check that there labels for all the classif.', '# sample', 'number_trs', '=', 'X', '[', '0', ']', '.', 'shape', '[', '1', ']', 'number_subjects', '=', 'len', '(', 'X', ')', 'for', 'subject', 'in', 'range', '(', 'number_subjects', ')', ':', 'assert_all_finite', '(', 'X', '[', 'subject', ']', ')', 'assert_all_finite', '(', 'Z', '[', 'subject', ']', ')', 'if', 'X', '[', 'subject', ']', '.', 'shape', '[', '1', ']', '!=', 'number_trs', ':', 'raise', 'ValueError', '(', '"Different number of alignment samples "', '"between subjects."', ')', 'if', 'X', '[', 'subject', ']', '.', 'shape', '[', '0', ']', '!=', 'Z', '[', 'subject', ']', '.', 'shape', '[', '0', ']', ':', 'raise', 'ValueError', '(', '"Different number of voxels between alignment"', '" and classification data (subject {0:d})"', '"."', '.', 'format', '(', 'subject', ')', ')', 'if', 'Z', '[', 'subject', ']', '.', 'shape', '[', '1', ']', '!=', 'y', '[', 'subject', ']', '.', 'size', ':', 'raise', 'ValueError', '(', '"Different number of samples and labels in "', '"subject {0:d}."', '.', 'format', '(', 'subject', ')', ')', '# Map the classes to [0..C-1]', 'new_y', '=', 'self', '.', '_init_classes', '(', 'y', ')', '# Run SS-SRM', 'self', '.', 'w_', ',', 'self', '.', 's_', ',', 'self', '.', 'theta_', ',', 'self', '.', 'bias_', '=', 'self', '.', '_sssrm', '(', 'X', ',', 'Z', ',', 'new_y', ')', 'return', 'self'] | Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier. | ['Compute', 'the', 'Semi', '-', 'Supervised', 'Shared', 'Response', 'Model'] | train | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L133-L202 |
3,632 | horazont/aioxmpp | aioxmpp/statemachine.py | OrderedStateMachine.wait_for | def wait_for(self, new_state):
"""
Wait for an exact state `new_state` to be reached by the state
machine.
If the state is skipped, that is, if a state which is greater than
`new_state` is written to :attr:`state`, the coroutine raises
:class:`OrderedStateSkipped` exception as it is not possible anymore
that it can return successfully (see :attr:`state`).
"""
if self._state == new_state:
return
if self._state > new_state:
raise OrderedStateSkipped(new_state)
fut = asyncio.Future(loop=self.loop)
self._exact_waiters.append((new_state, fut))
yield from fut | python | def wait_for(self, new_state):
"""
Wait for an exact state `new_state` to be reached by the state
machine.
If the state is skipped, that is, if a state which is greater than
`new_state` is written to :attr:`state`, the coroutine raises
:class:`OrderedStateSkipped` exception as it is not possible anymore
that it can return successfully (see :attr:`state`).
"""
if self._state == new_state:
return
if self._state > new_state:
raise OrderedStateSkipped(new_state)
fut = asyncio.Future(loop=self.loop)
self._exact_waiters.append((new_state, fut))
yield from fut | ['def', 'wait_for', '(', 'self', ',', 'new_state', ')', ':', 'if', 'self', '.', '_state', '==', 'new_state', ':', 'return', 'if', 'self', '.', '_state', '>', 'new_state', ':', 'raise', 'OrderedStateSkipped', '(', 'new_state', ')', 'fut', '=', 'asyncio', '.', 'Future', '(', 'loop', '=', 'self', '.', 'loop', ')', 'self', '.', '_exact_waiters', '.', 'append', '(', '(', 'new_state', ',', 'fut', ')', ')', 'yield', 'from', 'fut'] | Wait for an exact state `new_state` to be reached by the state
machine.
If the state is skipped, that is, if a state which is greater than
`new_state` is written to :attr:`state`, the coroutine raises
:class:`OrderedStateSkipped` exception as it is not possible anymore
that it can return successfully (see :attr:`state`). | ['Wait', 'for', 'an', 'exact', 'state', 'new_state', 'to', 'be', 'reached', 'by', 'the', 'state', 'machine', '.'] | train | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/statemachine.py#L152-L170 |
3,633 | cwoebker/pen | pen/core.py | cmd_touch_note | def cmd_touch_note(args):
"""Create a note"""
major = args.get(0)
minor = args.get(1)
if major in penStore.data:
if minor is None: # show items in list
for note in penStore.data[major]:
puts(note)
elif minor in penStore.data[major]:
penStore.openNote(major, minor)
else:
penStore.createNote(major, minor)
penStore.openNote(major, minor)
else:
puts("No list of that name.") | python | def cmd_touch_note(args):
"""Create a note"""
major = args.get(0)
minor = args.get(1)
if major in penStore.data:
if minor is None: # show items in list
for note in penStore.data[major]:
puts(note)
elif minor in penStore.data[major]:
penStore.openNote(major, minor)
else:
penStore.createNote(major, minor)
penStore.openNote(major, minor)
else:
puts("No list of that name.") | ['def', 'cmd_touch_note', '(', 'args', ')', ':', 'major', '=', 'args', '.', 'get', '(', '0', ')', 'minor', '=', 'args', '.', 'get', '(', '1', ')', 'if', 'major', 'in', 'penStore', '.', 'data', ':', 'if', 'minor', 'is', 'None', ':', '# show items in list', 'for', 'note', 'in', 'penStore', '.', 'data', '[', 'major', ']', ':', 'puts', '(', 'note', ')', 'elif', 'minor', 'in', 'penStore', '.', 'data', '[', 'major', ']', ':', 'penStore', '.', 'openNote', '(', 'major', ',', 'minor', ')', 'else', ':', 'penStore', '.', 'createNote', '(', 'major', ',', 'minor', ')', 'penStore', '.', 'openNote', '(', 'major', ',', 'minor', ')', 'else', ':', 'puts', '(', '"No list of that name."', ')'] | Create a note | ['Create', 'a', 'note'] | train | https://github.com/cwoebker/pen/blob/996dfcdc018f2fc14a376835a2622fb4a7230a2f/pen/core.py#L58-L72 |
3,634 | jtwhite79/pyemu | pyemu/utils/geostats.py | gslib_2_dataframe | def gslib_2_dataframe(filename,attr_name=None,x_idx=0,y_idx=1):
""" function to read a GSLIB point data file into a pandas.DataFrame
Parameters
----------
filename : (str)
GSLIB file
attr_name : (str)
the column name in the dataframe for the attribute. If None, GSLIB file
can have only 3 columns. attr_name must be in the GSLIB file header
x_idx : (int)
the index of the x-coordinate information in the GSLIB file. Default is
0 (first column)
y_idx : (int)
the index of the y-coordinate information in the GSLIB file.
Default is 1 (second column)
Returns
-------
df : pandas.DataFrame
Raises
------
exception if attr_name is None and GSLIB file has more than 3 columns
Note
----
assigns generic point names ("pt0, pt1, etc)
Example
-------
``>>>import pyemu``
``>>>df = pyemu.utiils.geostats.gslib_2_dataframe("prop.gslib",attr_name="hk")``
"""
with open(filename,'r') as f:
title = f.readline().strip()
num_attrs = int(f.readline().strip())
attrs = [f.readline().strip() for _ in range(num_attrs)]
if attr_name is not None:
assert attr_name in attrs,"{0} not in attrs:{1}".format(attr_name,','.join(attrs))
else:
assert len(attrs) == 3,"propname is None but more than 3 attrs in gslib file"
attr_name = attrs[2]
assert len(attrs) > x_idx
assert len(attrs) > y_idx
a_idx = attrs.index(attr_name)
x,y,a = [],[],[]
while True:
line = f.readline()
if line == '':
break
raw = line.strip().split()
try:
x.append(float(raw[x_idx]))
y.append(float(raw[y_idx]))
a.append(float(raw[a_idx]))
except Exception as e:
raise Exception("error paring line {0}: {1}".format(line,str(e)))
df = pd.DataFrame({"x":x,"y":y,"value":a})
df.loc[:,"name"] = ["pt{0}".format(i) for i in range(df.shape[0])]
df.index = df.name
return df | python | def gslib_2_dataframe(filename,attr_name=None,x_idx=0,y_idx=1):
""" function to read a GSLIB point data file into a pandas.DataFrame
Parameters
----------
filename : (str)
GSLIB file
attr_name : (str)
the column name in the dataframe for the attribute. If None, GSLIB file
can have only 3 columns. attr_name must be in the GSLIB file header
x_idx : (int)
the index of the x-coordinate information in the GSLIB file. Default is
0 (first column)
y_idx : (int)
the index of the y-coordinate information in the GSLIB file.
Default is 1 (second column)
Returns
-------
df : pandas.DataFrame
Raises
------
exception if attr_name is None and GSLIB file has more than 3 columns
Note
----
assigns generic point names ("pt0, pt1, etc)
Example
-------
``>>>import pyemu``
``>>>df = pyemu.utiils.geostats.gslib_2_dataframe("prop.gslib",attr_name="hk")``
"""
with open(filename,'r') as f:
title = f.readline().strip()
num_attrs = int(f.readline().strip())
attrs = [f.readline().strip() for _ in range(num_attrs)]
if attr_name is not None:
assert attr_name in attrs,"{0} not in attrs:{1}".format(attr_name,','.join(attrs))
else:
assert len(attrs) == 3,"propname is None but more than 3 attrs in gslib file"
attr_name = attrs[2]
assert len(attrs) > x_idx
assert len(attrs) > y_idx
a_idx = attrs.index(attr_name)
x,y,a = [],[],[]
while True:
line = f.readline()
if line == '':
break
raw = line.strip().split()
try:
x.append(float(raw[x_idx]))
y.append(float(raw[y_idx]))
a.append(float(raw[a_idx]))
except Exception as e:
raise Exception("error paring line {0}: {1}".format(line,str(e)))
df = pd.DataFrame({"x":x,"y":y,"value":a})
df.loc[:,"name"] = ["pt{0}".format(i) for i in range(df.shape[0])]
df.index = df.name
return df | ['def', 'gslib_2_dataframe', '(', 'filename', ',', 'attr_name', '=', 'None', ',', 'x_idx', '=', '0', ',', 'y_idx', '=', '1', ')', ':', 'with', 'open', '(', 'filename', ',', "'r'", ')', 'as', 'f', ':', 'title', '=', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'num_attrs', '=', 'int', '(', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', ')', 'attrs', '=', '[', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'for', '_', 'in', 'range', '(', 'num_attrs', ')', ']', 'if', 'attr_name', 'is', 'not', 'None', ':', 'assert', 'attr_name', 'in', 'attrs', ',', '"{0} not in attrs:{1}"', '.', 'format', '(', 'attr_name', ',', "','", '.', 'join', '(', 'attrs', ')', ')', 'else', ':', 'assert', 'len', '(', 'attrs', ')', '==', '3', ',', '"propname is None but more than 3 attrs in gslib file"', 'attr_name', '=', 'attrs', '[', '2', ']', 'assert', 'len', '(', 'attrs', ')', '>', 'x_idx', 'assert', 'len', '(', 'attrs', ')', '>', 'y_idx', 'a_idx', '=', 'attrs', '.', 'index', '(', 'attr_name', ')', 'x', ',', 'y', ',', 'a', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'while', 'True', ':', 'line', '=', 'f', '.', 'readline', '(', ')', 'if', 'line', '==', "''", ':', 'break', 'raw', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', ')', 'try', ':', 'x', '.', 'append', '(', 'float', '(', 'raw', '[', 'x_idx', ']', ')', ')', 'y', '.', 'append', '(', 'float', '(', 'raw', '[', 'y_idx', ']', ')', ')', 'a', '.', 'append', '(', 'float', '(', 'raw', '[', 'a_idx', ']', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'Exception', '(', '"error paring line {0}: {1}"', '.', 'format', '(', 'line', ',', 'str', '(', 'e', ')', ')', ')', 'df', '=', 'pd', '.', 'DataFrame', '(', '{', '"x"', ':', 'x', ',', '"y"', ':', 'y', ',', '"value"', ':', 'a', '}', ')', 'df', '.', 'loc', '[', ':', ',', '"name"', ']', '=', '[', '"pt{0}"', '.', 'format', '(', 'i', ')', 'for', 'i', 'in', 'range', '(', 'df', '.', 'shape', '[', '0', ']', ')', ']', 'df', '.', 'index', '=', 'df', '.', 'name', 'return', 'df'] | function to read a GSLIB point data file into a pandas.DataFrame
Parameters
----------
filename : (str)
GSLIB file
attr_name : (str)
the column name in the dataframe for the attribute. If None, GSLIB file
can have only 3 columns. attr_name must be in the GSLIB file header
x_idx : (int)
the index of the x-coordinate information in the GSLIB file. Default is
0 (first column)
y_idx : (int)
the index of the y-coordinate information in the GSLIB file.
Default is 1 (second column)
Returns
-------
df : pandas.DataFrame
Raises
------
exception if attr_name is None and GSLIB file has more than 3 columns
Note
----
assigns generic point names ("pt0, pt1, etc)
Example
-------
``>>>import pyemu``
``>>>df = pyemu.utiils.geostats.gslib_2_dataframe("prop.gslib",attr_name="hk")`` | ['function', 'to', 'read', 'a', 'GSLIB', 'point', 'data', 'file', 'into', 'a', 'pandas', '.', 'DataFrame'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/geostats.py#L1792-L1856 |
3,635 | xav/Grapefruit | grapefruit.py | rgb_to_yiq | def rgb_to_yiq(r, g=None, b=None):
"""Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
"""
if type(r) in [list,tuple]:
r, g, b = r
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q) | python | def rgb_to_yiq(r, g=None, b=None):
"""Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
"""
if type(r) in [list,tuple]:
r, g, b = r
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q) | ['def', 'rgb_to_yiq', '(', 'r', ',', 'g', '=', 'None', ',', 'b', '=', 'None', ')', ':', 'if', 'type', '(', 'r', ')', 'in', '[', 'list', ',', 'tuple', ']', ':', 'r', ',', 'g', ',', 'b', '=', 'r', 'y', '=', '(', 'r', '*', '0.29895808', ')', '+', '(', 'g', '*', '0.58660979', ')', '+', '(', 'b', '*', '0.11443213', ')', 'i', '=', '(', 'r', '*', '0.59590296', ')', '-', '(', 'g', '*', '0.27405705', ')', '-', '(', 'b', '*', '0.32184591', ')', 'q', '=', '(', 'r', '*', '0.21133576', ')', '-', '(', 'g', '*', '0.52263517', ')', '+', '(', 'b', '*', '0.31129940', ')', 'return', '(', 'y', ',', 'i', ',', 'q', ')'] | Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)' | ['Convert', 'the', 'color', 'from', 'RGB', 'to', 'YIQ', '.'] | train | https://github.com/xav/Grapefruit/blob/b3d88375be727a3a1ec5839fbc462e0e8e0836e4/grapefruit.py#L430-L457 |
3,636 | brean/python-pathfinding | pathfinding/core/util.py | expand_path | def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded | python | def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded | ['def', 'expand_path', '(', 'path', ')', ':', 'expanded', '=', '[', ']', 'if', 'len', '(', 'path', ')', '<', '2', ':', 'return', 'expanded', 'for', 'i', 'in', 'range', '(', 'len', '(', 'path', ')', '-', '1', ')', ':', 'expanded', '+=', 'bresenham', '(', 'path', '[', 'i', ']', ',', 'path', '[', 'i', '+', '1', ']', ')', 'expanded', '+=', '[', 'path', '[', ':', '-', '1', ']', ']', 'return', 'expanded'] | Given a compressed path, return a new path that has all the segments
in it interpolated. | ['Given', 'a', 'compressed', 'path', 'return', 'a', 'new', 'path', 'that', 'has', 'all', 'the', 'segments', 'in', 'it', 'interpolated', '.'] | train | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/util.py#L97-L108 |
3,637 | MartinThoma/hwrt | hwrt/serve.py | work | def work():
"""Implement a worker for write-math.com."""
global n
cmd = utils.get_project_configuration()
if 'worker_api_key' not in cmd:
return ("You need to define a 'worker_api_key' in your ~/")
chunk_size = 1000
logging.info("Start working with n=%i", n)
for _ in range(chunk_size):
# contact the write-math server and get something to classify
url = "http://www.martin-thoma.de/write-math/api/get_unclassified.php"
response = urlopen(url)
page_source = response.read()
parsed_json = json.loads(page_source)
if parsed_json is False:
return "Nothing left to classify"
raw_data_json = parsed_json['recording']
# Classify
# Check recording
try:
json.loads(raw_data_json)
except ValueError:
return ("Raw Data ID %s; Invalid JSON string: %s" %
(parsed_json['id'], raw_data_json))
# Classify
if use_segmenter_flag:
strokelist = json.loads(raw_data_json)
beam = se.Beam()
for stroke in strokelist:
beam.add_stroke(stroke)
results = beam.get_writemath_results()
else:
results_sym = classify.classify_segmented_recording(raw_data_json)
results = []
strokelist = json.loads(raw_data_json)
segmentation = [list(range(len(strokelist)))]
translate = _get_translate()
for symbol in results_sym:
s = {'id': get_writemath_id(symbol, translate),
'probability': symbol['probability']}
results.append({'probability': symbol['probability'],
'segmentation': segmentation,
'symbols': [s]})
print("\thttp://write-math.com/view/?raw_data_id=%s" %
str(parsed_json['id']))
# Submit classification to write-math.com server
results_json = get_json_result(results, n=n)
headers = {'User-Agent': 'Mozilla/5.0',
'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'recording_id': parsed_json['id'],
'results': results_json,
'api_key': cmd['worker_api_key']}
s = requests.Session()
req = requests.Request('POST', url, headers=headers, data=payload)
prepared = req.prepare()
response = s.send(prepared)
try:
response = json.loads(response.text)
except ValueError:
return "Invalid JSON response: %s" % response.text
if 'error' in response:
logging.info(response)
return str(response)
return "Done - Classified %i recordings" % chunk_size | python | def work():
"""Implement a worker for write-math.com."""
global n
cmd = utils.get_project_configuration()
if 'worker_api_key' not in cmd:
return ("You need to define a 'worker_api_key' in your ~/")
chunk_size = 1000
logging.info("Start working with n=%i", n)
for _ in range(chunk_size):
# contact the write-math server and get something to classify
url = "http://www.martin-thoma.de/write-math/api/get_unclassified.php"
response = urlopen(url)
page_source = response.read()
parsed_json = json.loads(page_source)
if parsed_json is False:
return "Nothing left to classify"
raw_data_json = parsed_json['recording']
# Classify
# Check recording
try:
json.loads(raw_data_json)
except ValueError:
return ("Raw Data ID %s; Invalid JSON string: %s" %
(parsed_json['id'], raw_data_json))
# Classify
if use_segmenter_flag:
strokelist = json.loads(raw_data_json)
beam = se.Beam()
for stroke in strokelist:
beam.add_stroke(stroke)
results = beam.get_writemath_results()
else:
results_sym = classify.classify_segmented_recording(raw_data_json)
results = []
strokelist = json.loads(raw_data_json)
segmentation = [list(range(len(strokelist)))]
translate = _get_translate()
for symbol in results_sym:
s = {'id': get_writemath_id(symbol, translate),
'probability': symbol['probability']}
results.append({'probability': symbol['probability'],
'segmentation': segmentation,
'symbols': [s]})
print("\thttp://write-math.com/view/?raw_data_id=%s" %
str(parsed_json['id']))
# Submit classification to write-math.com server
results_json = get_json_result(results, n=n)
headers = {'User-Agent': 'Mozilla/5.0',
'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'recording_id': parsed_json['id'],
'results': results_json,
'api_key': cmd['worker_api_key']}
s = requests.Session()
req = requests.Request('POST', url, headers=headers, data=payload)
prepared = req.prepare()
response = s.send(prepared)
try:
response = json.loads(response.text)
except ValueError:
return "Invalid JSON response: %s" % response.text
if 'error' in response:
logging.info(response)
return str(response)
return "Done - Classified %i recordings" % chunk_size | ['def', 'work', '(', ')', ':', 'global', 'n', 'cmd', '=', 'utils', '.', 'get_project_configuration', '(', ')', 'if', "'worker_api_key'", 'not', 'in', 'cmd', ':', 'return', '(', '"You need to define a \'worker_api_key\' in your ~/"', ')', 'chunk_size', '=', '1000', 'logging', '.', 'info', '(', '"Start working with n=%i"', ',', 'n', ')', 'for', '_', 'in', 'range', '(', 'chunk_size', ')', ':', '# contact the write-math server and get something to classify', 'url', '=', '"http://www.martin-thoma.de/write-math/api/get_unclassified.php"', 'response', '=', 'urlopen', '(', 'url', ')', 'page_source', '=', 'response', '.', 'read', '(', ')', 'parsed_json', '=', 'json', '.', 'loads', '(', 'page_source', ')', 'if', 'parsed_json', 'is', 'False', ':', 'return', '"Nothing left to classify"', 'raw_data_json', '=', 'parsed_json', '[', "'recording'", ']', '# Classify', '# Check recording', 'try', ':', 'json', '.', 'loads', '(', 'raw_data_json', ')', 'except', 'ValueError', ':', 'return', '(', '"Raw Data ID %s; Invalid JSON string: %s"', '%', '(', 'parsed_json', '[', "'id'", ']', ',', 'raw_data_json', ')', ')', '# Classify', 'if', 'use_segmenter_flag', ':', 'strokelist', '=', 'json', '.', 'loads', '(', 'raw_data_json', ')', 'beam', '=', 'se', '.', 'Beam', '(', ')', 'for', 'stroke', 'in', 'strokelist', ':', 'beam', '.', 'add_stroke', '(', 'stroke', ')', 'results', '=', 'beam', '.', 'get_writemath_results', '(', ')', 'else', ':', 'results_sym', '=', 'classify', '.', 'classify_segmented_recording', '(', 'raw_data_json', ')', 'results', '=', '[', ']', 'strokelist', '=', 'json', '.', 'loads', '(', 'raw_data_json', ')', 'segmentation', '=', '[', 'list', '(', 'range', '(', 'len', '(', 'strokelist', ')', ')', ')', ']', 'translate', '=', '_get_translate', '(', ')', 'for', 'symbol', 'in', 'results_sym', ':', 's', '=', '{', "'id'", ':', 'get_writemath_id', '(', 'symbol', ',', 'translate', ')', ',', "'probability'", ':', 'symbol', '[', "'probability'", ']', '}', 'results', '.', 'append', '(', '{', "'probability'", ':', 'symbol', '[', "'probability'", ']', ',', "'segmentation'", ':', 'segmentation', ',', "'symbols'", ':', '[', 's', ']', '}', ')', 'print', '(', '"\\thttp://write-math.com/view/?raw_data_id=%s"', '%', 'str', '(', 'parsed_json', '[', "'id'", ']', ')', ')', '# Submit classification to write-math.com server', 'results_json', '=', 'get_json_result', '(', 'results', ',', 'n', '=', 'n', ')', 'headers', '=', '{', "'User-Agent'", ':', "'Mozilla/5.0'", ',', "'Content-Type'", ':', "'application/x-www-form-urlencoded'", '}', 'payload', '=', '{', "'recording_id'", ':', 'parsed_json', '[', "'id'", ']', ',', "'results'", ':', 'results_json', ',', "'api_key'", ':', 'cmd', '[', "'worker_api_key'", ']', '}', 's', '=', 'requests', '.', 'Session', '(', ')', 'req', '=', 'requests', '.', 'Request', '(', "'POST'", ',', 'url', ',', 'headers', '=', 'headers', ',', 'data', '=', 'payload', ')', 'prepared', '=', 'req', '.', 'prepare', '(', ')', 'response', '=', 's', '.', 'send', '(', 'prepared', ')', 'try', ':', 'response', '=', 'json', '.', 'loads', '(', 'response', '.', 'text', ')', 'except', 'ValueError', ':', 'return', '"Invalid JSON response: %s"', '%', 'response', '.', 'text', 'if', "'error'", 'in', 'response', ':', 'logging', '.', 'info', '(', 'response', ')', 'return', 'str', '(', 'response', ')', 'return', '"Done - Classified %i recordings"', '%', 'chunk_size'] | Implement a worker for write-math.com. | ['Implement', 'a', 'worker', 'for', 'write', '-', 'math', '.', 'com', '.'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/serve.py#L260-L332 |
3,638 | MoseleyBioinformaticsLab/mwtab | mwtab/fileio.py | _generate_filenames | def _generate_filenames(sources):
"""Generate filenames.
:param tuple sources: Sequence of strings representing path to file(s).
:return: Path to file(s).
:rtype: :py:class:`str`
"""
for source in sources:
if os.path.isdir(source):
for path, dirlist, filelist in os.walk(source):
for fname in filelist:
if GenericFilePath.is_compressed(fname):
if VERBOSE:
print("Skipping compressed file: {}".format(os.path.abspath(fname)))
continue
else:
yield os.path.join(path, fname)
elif os.path.isfile(source):
yield source
elif source.isdigit():
analysis_id = "AN{}".format(source.zfill(6))
url = MWREST.format(analysis_id)
yield url
elif GenericFilePath.is_url(source):
yield source
else:
raise TypeError("Unknown file source.") | python | def _generate_filenames(sources):
"""Generate filenames.
:param tuple sources: Sequence of strings representing path to file(s).
:return: Path to file(s).
:rtype: :py:class:`str`
"""
for source in sources:
if os.path.isdir(source):
for path, dirlist, filelist in os.walk(source):
for fname in filelist:
if GenericFilePath.is_compressed(fname):
if VERBOSE:
print("Skipping compressed file: {}".format(os.path.abspath(fname)))
continue
else:
yield os.path.join(path, fname)
elif os.path.isfile(source):
yield source
elif source.isdigit():
analysis_id = "AN{}".format(source.zfill(6))
url = MWREST.format(analysis_id)
yield url
elif GenericFilePath.is_url(source):
yield source
else:
raise TypeError("Unknown file source.") | ['def', '_generate_filenames', '(', 'sources', ')', ':', 'for', 'source', 'in', 'sources', ':', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'source', ')', ':', 'for', 'path', ',', 'dirlist', ',', 'filelist', 'in', 'os', '.', 'walk', '(', 'source', ')', ':', 'for', 'fname', 'in', 'filelist', ':', 'if', 'GenericFilePath', '.', 'is_compressed', '(', 'fname', ')', ':', 'if', 'VERBOSE', ':', 'print', '(', '"Skipping compressed file: {}"', '.', 'format', '(', 'os', '.', 'path', '.', 'abspath', '(', 'fname', ')', ')', ')', 'continue', 'else', ':', 'yield', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'fname', ')', 'elif', 'os', '.', 'path', '.', 'isfile', '(', 'source', ')', ':', 'yield', 'source', 'elif', 'source', '.', 'isdigit', '(', ')', ':', 'analysis_id', '=', '"AN{}"', '.', 'format', '(', 'source', '.', 'zfill', '(', '6', ')', ')', 'url', '=', 'MWREST', '.', 'format', '(', 'analysis_id', ')', 'yield', 'url', 'elif', 'GenericFilePath', '.', 'is_url', '(', 'source', ')', ':', 'yield', 'source', 'else', ':', 'raise', 'TypeError', '(', '"Unknown file source."', ')'] | Generate filenames.
:param tuple sources: Sequence of strings representing path to file(s).
:return: Path to file(s).
:rtype: :py:class:`str` | ['Generate', 'filenames', '.'] | train | https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L43-L73 |
3,639 | srsudar/eg | eg/config.py | get_expanded_path | def get_expanded_path(path):
"""Expand ~ and variables in a path. If path is not truthy, return None."""
if path:
result = path
result = os.path.expanduser(result)
result = os.path.expandvars(result)
return result
else:
return None | python | def get_expanded_path(path):
"""Expand ~ and variables in a path. If path is not truthy, return None."""
if path:
result = path
result = os.path.expanduser(result)
result = os.path.expandvars(result)
return result
else:
return None | ['def', 'get_expanded_path', '(', 'path', ')', ':', 'if', 'path', ':', 'result', '=', 'path', 'result', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'result', ')', 'result', '=', 'os', '.', 'path', '.', 'expandvars', '(', 'result', ')', 'return', 'result', 'else', ':', 'return', 'None'] | Expand ~ and variables in a path. If path is not truthy, return None. | ['Expand', '~', 'and', 'variables', 'in', 'a', 'path', '.', 'If', 'path', 'is', 'not', 'truthy', 'return', 'None', '.'] | train | https://github.com/srsudar/eg/blob/96142a74f4416b4a7000c85032c070df713b849e/eg/config.py#L345-L353 |
3,640 | spotify/snakebite | snakebite/minicluster.py | MiniCluster.mkdir | def mkdir(self, src, extra_args=[]):
'''Create a directory'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-mkdir'] + extra_args + [self._full_hdfs_path(src)], True) | python | def mkdir(self, src, extra_args=[]):
'''Create a directory'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-mkdir'] + extra_args + [self._full_hdfs_path(src)], True) | ['def', 'mkdir', '(', 'self', ',', 'src', ',', 'extra_args', '=', '[', ']', ')', ':', 'return', 'self', '.', '_getStdOutCmd', '(', '[', 'self', '.', '_hadoop_cmd', ',', "'fs'", ',', "'-mkdir'", ']', '+', 'extra_args', '+', '[', 'self', '.', '_full_hdfs_path', '(', 'src', ')', ']', ',', 'True', ')'] | Create a directory | ['Create', 'a', 'directory'] | train | https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/minicluster.py#L126-L128 |
3,641 | junzis/pyModeS | pyModeS/decoder/adsb.py | nic_v1 | def nic_v1(msg, NICs):
"""Calculate NIC, navigation integrity category, for ADS-B version 1
Args:
msg (string): 28 bytes hexadecimal message string
NICs (int or string): NIC supplement
Returns:
int or string: Horizontal Radius of Containment
int or string: Vertical Protection Limit
"""
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
tc = typecode(msg)
NIC = uncertainty.TC_NICv1_lookup[tc]
if isinstance(NIC, dict):
NIC = NIC[NICs]
try:
Rc = uncertainty.NICv1[NIC][NICs]['Rc']
VPL = uncertainty.NICv1[NIC][NICs]['VPL']
except KeyError:
Rc, VPL = uncertainty.NA, uncertainty.NA
return Rc, VPL | python | def nic_v1(msg, NICs):
"""Calculate NIC, navigation integrity category, for ADS-B version 1
Args:
msg (string): 28 bytes hexadecimal message string
NICs (int or string): NIC supplement
Returns:
int or string: Horizontal Radius of Containment
int or string: Vertical Protection Limit
"""
if typecode(msg) < 5 or typecode(msg) > 22:
raise RuntimeError(
"%s: Not a surface position message (5<TC<8), \
airborne position message (8<TC<19), \
or airborne position with GNSS height (20<TC<22)" % msg
)
tc = typecode(msg)
NIC = uncertainty.TC_NICv1_lookup[tc]
if isinstance(NIC, dict):
NIC = NIC[NICs]
try:
Rc = uncertainty.NICv1[NIC][NICs]['Rc']
VPL = uncertainty.NICv1[NIC][NICs]['VPL']
except KeyError:
Rc, VPL = uncertainty.NA, uncertainty.NA
return Rc, VPL | ['def', 'nic_v1', '(', 'msg', ',', 'NICs', ')', ':', 'if', 'typecode', '(', 'msg', ')', '<', '5', 'or', 'typecode', '(', 'msg', ')', '>', '22', ':', 'raise', 'RuntimeError', '(', '"%s: Not a surface position message (5<TC<8), \\\n airborne position message (8<TC<19), \\\n or airborne position with GNSS height (20<TC<22)"', '%', 'msg', ')', 'tc', '=', 'typecode', '(', 'msg', ')', 'NIC', '=', 'uncertainty', '.', 'TC_NICv1_lookup', '[', 'tc', ']', 'if', 'isinstance', '(', 'NIC', ',', 'dict', ')', ':', 'NIC', '=', 'NIC', '[', 'NICs', ']', 'try', ':', 'Rc', '=', 'uncertainty', '.', 'NICv1', '[', 'NIC', ']', '[', 'NICs', ']', '[', "'Rc'", ']', 'VPL', '=', 'uncertainty', '.', 'NICv1', '[', 'NIC', ']', '[', 'NICs', ']', '[', "'VPL'", ']', 'except', 'KeyError', ':', 'Rc', ',', 'VPL', '=', 'uncertainty', '.', 'NA', ',', 'uncertainty', '.', 'NA', 'return', 'Rc', ',', 'VPL'] | Calculate NIC, navigation integrity category, for ADS-B version 1
Args:
msg (string): 28 bytes hexadecimal message string
NICs (int or string): NIC supplement
Returns:
int or string: Horizontal Radius of Containment
int or string: Vertical Protection Limit | ['Calculate', 'NIC', 'navigation', 'integrity', 'category', 'for', 'ADS', '-', 'B', 'version', '1'] | train | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/adsb.py#L278-L308 |
3,642 | OiNutter/lean | lean/__init__.py | Lean.get_template | def get_template(file):
''' Lookup a template class for the given filename or file
extension. Return nil when no implementation is found.
'''
pattern = str(file).lower()
while len(pattern) and not Lean.is_registered(pattern):
pattern = os.path.basename(pattern)
pattern = re.sub(r'^[^.]*\.?','',pattern)
# Try to find a preferred engine.
preferred_klass = Lean.preferred_mappings[pattern] if Lean.preferred_mappings.has_key(pattern) else None
if preferred_klass:
return preferred_klass
# Fall back to the general list of mappings
klasses = Lean.template_mappings[pattern]
# Try to find an engine which is already loaded
template = None
for klass in klasses:
if hasattr(klass,'is_engine_initialized') and callable(klass.is_engine_initialized):
if klass.is_engine_initialized():
template = klass
break
if template:
return template
# Try each of the classes until one succeeds. If all of them fails,
# we'll raise the error of the first class.
first_failure = None
for klass in klasses:
try:
return klass
except Exception, e:
if not first_failure:
first_failure = e
if first_failure:
raise Exception(first_failure) | python | def get_template(file):
''' Lookup a template class for the given filename or file
extension. Return nil when no implementation is found.
'''
pattern = str(file).lower()
while len(pattern) and not Lean.is_registered(pattern):
pattern = os.path.basename(pattern)
pattern = re.sub(r'^[^.]*\.?','',pattern)
# Try to find a preferred engine.
preferred_klass = Lean.preferred_mappings[pattern] if Lean.preferred_mappings.has_key(pattern) else None
if preferred_klass:
return preferred_klass
# Fall back to the general list of mappings
klasses = Lean.template_mappings[pattern]
# Try to find an engine which is already loaded
template = None
for klass in klasses:
if hasattr(klass,'is_engine_initialized') and callable(klass.is_engine_initialized):
if klass.is_engine_initialized():
template = klass
break
if template:
return template
# Try each of the classes until one succeeds. If all of them fails,
# we'll raise the error of the first class.
first_failure = None
for klass in klasses:
try:
return klass
except Exception, e:
if not first_failure:
first_failure = e
if first_failure:
raise Exception(first_failure) | ['def', 'get_template', '(', 'file', ')', ':', 'pattern', '=', 'str', '(', 'file', ')', '.', 'lower', '(', ')', 'while', 'len', '(', 'pattern', ')', 'and', 'not', 'Lean', '.', 'is_registered', '(', 'pattern', ')', ':', 'pattern', '=', 'os', '.', 'path', '.', 'basename', '(', 'pattern', ')', 'pattern', '=', 're', '.', 'sub', '(', "r'^[^.]*\\.?'", ',', "''", ',', 'pattern', ')', '# Try to find a preferred engine.', 'preferred_klass', '=', 'Lean', '.', 'preferred_mappings', '[', 'pattern', ']', 'if', 'Lean', '.', 'preferred_mappings', '.', 'has_key', '(', 'pattern', ')', 'else', 'None', 'if', 'preferred_klass', ':', 'return', 'preferred_klass', '# Fall back to the general list of mappings', 'klasses', '=', 'Lean', '.', 'template_mappings', '[', 'pattern', ']', '# Try to find an engine which is already loaded', 'template', '=', 'None', 'for', 'klass', 'in', 'klasses', ':', 'if', 'hasattr', '(', 'klass', ',', "'is_engine_initialized'", ')', 'and', 'callable', '(', 'klass', '.', 'is_engine_initialized', ')', ':', 'if', 'klass', '.', 'is_engine_initialized', '(', ')', ':', 'template', '=', 'klass', 'break', 'if', 'template', ':', 'return', 'template', '# Try each of the classes until one succeeds. If all of them fails,', "# we'll raise the error of the first class.", 'first_failure', '=', 'None', 'for', 'klass', 'in', 'klasses', ':', 'try', ':', 'return', 'klass', 'except', 'Exception', ',', 'e', ':', 'if', 'not', 'first_failure', ':', 'first_failure', '=', 'e', 'if', 'first_failure', ':', 'raise', 'Exception', '(', 'first_failure', ')'] | Lookup a template class for the given filename or file
extension. Return nil when no implementation is found. | ['Lookup', 'a', 'template', 'class', 'for', 'the', 'given', 'filename', 'or', 'file', 'extension', '.', 'Return', 'nil', 'when', 'no', 'implementation', 'is', 'found', '.'] | train | https://github.com/OiNutter/lean/blob/5d251f923acd44265ed401de14a9ead6752c543f/lean/__init__.py#L61-L103 |
3,643 | user-cont/colin | colin/core/target.py | DockerfileTarget.labels | def labels(self):
"""
Get list of labels from the target instance.
:return: [str]
"""
if self._labels is None:
self._labels = self.instance.labels
return self._labels | python | def labels(self):
"""
Get list of labels from the target instance.
:return: [str]
"""
if self._labels is None:
self._labels = self.instance.labels
return self._labels | ['def', 'labels', '(', 'self', ')', ':', 'if', 'self', '.', '_labels', 'is', 'None', ':', 'self', '.', '_labels', '=', 'self', '.', 'instance', '.', 'labels', 'return', 'self', '.', '_labels'] | Get list of labels from the target instance.
:return: [str] | ['Get', 'list', 'of', 'labels', 'from', 'the', 'target', 'instance', '.'] | train | https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/target.py#L131-L139 |
3,644 | maxalbert/tohu | tohu/v6/set_special_methods.py | check_that_operator_can_be_applied_to_produces_items | def check_that_operator_can_be_applied_to_produces_items(op, g1, g2):
"""
Helper function to check that the operator `op` can be applied to items produced by g1 and g2.
"""
g1_tmp_copy = g1.spawn()
g2_tmp_copy = g2.spawn()
sample_item_1 = next(g1_tmp_copy)
sample_item_2 = next(g2_tmp_copy)
try:
op(sample_item_1, sample_item_2)
except TypeError:
raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} "
f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)") | python | def check_that_operator_can_be_applied_to_produces_items(op, g1, g2):
"""
Helper function to check that the operator `op` can be applied to items produced by g1 and g2.
"""
g1_tmp_copy = g1.spawn()
g2_tmp_copy = g2.spawn()
sample_item_1 = next(g1_tmp_copy)
sample_item_2 = next(g2_tmp_copy)
try:
op(sample_item_1, sample_item_2)
except TypeError:
raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} "
f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)") | ['def', 'check_that_operator_can_be_applied_to_produces_items', '(', 'op', ',', 'g1', ',', 'g2', ')', ':', 'g1_tmp_copy', '=', 'g1', '.', 'spawn', '(', ')', 'g2_tmp_copy', '=', 'g2', '.', 'spawn', '(', ')', 'sample_item_1', '=', 'next', '(', 'g1_tmp_copy', ')', 'sample_item_2', '=', 'next', '(', 'g2_tmp_copy', ')', 'try', ':', 'op', '(', 'sample_item_1', ',', 'sample_item_2', ')', 'except', 'TypeError', ':', 'raise', 'TypeError', '(', 'f"Operator \'{op.__name__}\' cannot be applied to items produced by {g1} and {g2} "', 'f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)"', ')'] | Helper function to check that the operator `op` can be applied to items produced by g1 and g2. | ['Helper', 'function', 'to', 'check', 'that', 'the', 'operator', 'op', 'can', 'be', 'applied', 'to', 'items', 'produced', 'by', 'g1', 'and', 'g2', '.'] | train | https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/set_special_methods.py#L16-L28 |
3,645 | hackthefed/govtrack2csv | govtrack2csv/__init__.py | extract_subjects | def extract_subjects(bill):
"""
Return a list subject for legislation.
"""
logger.debug("Extracting Subjects")
subject_map = []
subjects = bill.get('subjects', [])
bill_id = bill.get('bill_id', None)
bill_type = bill.get('bill_type', None)
for sub in subjects:
subject_map.append((bill_id, bill_type, sub))
logger.debug("End Extractioning Subjects")
return subject_map | python | def extract_subjects(bill):
"""
Return a list subject for legislation.
"""
logger.debug("Extracting Subjects")
subject_map = []
subjects = bill.get('subjects', [])
bill_id = bill.get('bill_id', None)
bill_type = bill.get('bill_type', None)
for sub in subjects:
subject_map.append((bill_id, bill_type, sub))
logger.debug("End Extractioning Subjects")
return subject_map | ['def', 'extract_subjects', '(', 'bill', ')', ':', 'logger', '.', 'debug', '(', '"Extracting Subjects"', ')', 'subject_map', '=', '[', ']', 'subjects', '=', 'bill', '.', 'get', '(', "'subjects'", ',', '[', ']', ')', 'bill_id', '=', 'bill', '.', 'get', '(', "'bill_id'", ',', 'None', ')', 'bill_type', '=', 'bill', '.', 'get', '(', "'bill_type'", ',', 'None', ')', 'for', 'sub', 'in', 'subjects', ':', 'subject_map', '.', 'append', '(', '(', 'bill_id', ',', 'bill_type', ',', 'sub', ')', ')', 'logger', '.', 'debug', '(', '"End Extractioning Subjects"', ')', 'return', 'subject_map'] | Return a list subject for legislation. | ['Return', 'a', 'list', 'subject', 'for', 'legislation', '.'] | train | https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L285-L300 |
3,646 | qacafe/cdrouter.py | cdrouter/configs.py | ConfigsService.bulk_delete | def bulk_delete(self, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk delete a set of configs.
:param ids: (optional) Int list of config IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
return self.service.bulk_delete(self.base, self.RESOURCE,
ids=ids, filter=filter, type=type, all=all) | python | def bulk_delete(self, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk delete a set of configs.
:param ids: (optional) Int list of config IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
return self.service.bulk_delete(self.base, self.RESOURCE,
ids=ids, filter=filter, type=type, all=all) | ['def', 'bulk_delete', '(', 'self', ',', 'ids', '=', 'None', ',', 'filter', '=', 'None', ',', 'type', '=', 'None', ',', 'all', '=', 'False', ')', ':', '# pylint: disable=redefined-builtin', 'return', 'self', '.', 'service', '.', 'bulk_delete', '(', 'self', '.', 'base', ',', 'self', '.', 'RESOURCE', ',', 'ids', '=', 'ids', ',', 'filter', '=', 'filter', ',', 'type', '=', 'type', ',', 'all', '=', 'all', ')'] | Bulk delete a set of configs.
:param ids: (optional) Int list of config IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`. | ['Bulk', 'delete', 'a', 'set', 'of', 'configs', '.'] | train | https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/configs.py#L381-L390 |
3,647 | ZELLMECHANIK-DRESDEN/dclab | dclab/isoelastics/__init__.py | Isoelastics.get | def get(self, col1, col2, method, channel_width, flow_rate=None,
viscosity=None, add_px_err=False, px_um=None):
"""Get isoelastics
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
channel_width: float
Channel width in µm
flow_rate: float or `None`
Flow rate through the channel in µl/s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572
px_um: float
Pixel size [µm], used for pixelation error computation
See Also
--------
dclab.features.emodulus.convert: conversion in-between
channel sizes and viscosities
dclab.features.emodulus.corrpix_deform_delta: pixelation
error that is applied to the deformation data
"""
if method not in VALID_METHODS:
validstr = ",".join(VALID_METHODS)
raise ValueError("`method` must be one of {}!".format(validstr))
for col in [col1, col2]:
if col not in dfn.scalar_feature_names:
raise ValueError("Not a valid feature name: {}".format(col))
if "isoelastics" not in self._data[method][col2][col1]:
msg = "No isoelastics matching {}, {}, {}".format(col1, col2,
method)
raise KeyError(msg)
isoel = self._data[method][col1][col2]["isoelastics"]
meta = self._data[method][col1][col2]["meta"]
if flow_rate is None:
flow_rate = meta[1]
if viscosity is None:
viscosity = meta[2]
isoel_ret = self.convert(isoel, col1, col2,
channel_width_in=meta[0],
channel_width_out=channel_width,
flow_rate_in=meta[1],
flow_rate_out=flow_rate,
viscosity_in=meta[2],
viscosity_out=viscosity,
inplace=False)
if add_px_err:
self.add_px_err(isoel=isoel_ret,
col1=col1,
col2=col2,
px_um=px_um,
inplace=True)
return isoel_ret | python | def get(self, col1, col2, method, channel_width, flow_rate=None,
viscosity=None, add_px_err=False, px_um=None):
"""Get isoelastics
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
channel_width: float
Channel width in µm
flow_rate: float or `None`
Flow rate through the channel in µl/s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572
px_um: float
Pixel size [µm], used for pixelation error computation
See Also
--------
dclab.features.emodulus.convert: conversion in-between
channel sizes and viscosities
dclab.features.emodulus.corrpix_deform_delta: pixelation
error that is applied to the deformation data
"""
if method not in VALID_METHODS:
validstr = ",".join(VALID_METHODS)
raise ValueError("`method` must be one of {}!".format(validstr))
for col in [col1, col2]:
if col not in dfn.scalar_feature_names:
raise ValueError("Not a valid feature name: {}".format(col))
if "isoelastics" not in self._data[method][col2][col1]:
msg = "No isoelastics matching {}, {}, {}".format(col1, col2,
method)
raise KeyError(msg)
isoel = self._data[method][col1][col2]["isoelastics"]
meta = self._data[method][col1][col2]["meta"]
if flow_rate is None:
flow_rate = meta[1]
if viscosity is None:
viscosity = meta[2]
isoel_ret = self.convert(isoel, col1, col2,
channel_width_in=meta[0],
channel_width_out=channel_width,
flow_rate_in=meta[1],
flow_rate_out=flow_rate,
viscosity_in=meta[2],
viscosity_out=viscosity,
inplace=False)
if add_px_err:
self.add_px_err(isoel=isoel_ret,
col1=col1,
col2=col2,
px_um=px_um,
inplace=True)
return isoel_ret | ['def', 'get', '(', 'self', ',', 'col1', ',', 'col2', ',', 'method', ',', 'channel_width', ',', 'flow_rate', '=', 'None', ',', 'viscosity', '=', 'None', ',', 'add_px_err', '=', 'False', ',', 'px_um', '=', 'None', ')', ':', 'if', 'method', 'not', 'in', 'VALID_METHODS', ':', 'validstr', '=', '","', '.', 'join', '(', 'VALID_METHODS', ')', 'raise', 'ValueError', '(', '"`method` must be one of {}!"', '.', 'format', '(', 'validstr', ')', ')', 'for', 'col', 'in', '[', 'col1', ',', 'col2', ']', ':', 'if', 'col', 'not', 'in', 'dfn', '.', 'scalar_feature_names', ':', 'raise', 'ValueError', '(', '"Not a valid feature name: {}"', '.', 'format', '(', 'col', ')', ')', 'if', '"isoelastics"', 'not', 'in', 'self', '.', '_data', '[', 'method', ']', '[', 'col2', ']', '[', 'col1', ']', ':', 'msg', '=', '"No isoelastics matching {}, {}, {}"', '.', 'format', '(', 'col1', ',', 'col2', ',', 'method', ')', 'raise', 'KeyError', '(', 'msg', ')', 'isoel', '=', 'self', '.', '_data', '[', 'method', ']', '[', 'col1', ']', '[', 'col2', ']', '[', '"isoelastics"', ']', 'meta', '=', 'self', '.', '_data', '[', 'method', ']', '[', 'col1', ']', '[', 'col2', ']', '[', '"meta"', ']', 'if', 'flow_rate', 'is', 'None', ':', 'flow_rate', '=', 'meta', '[', '1', ']', 'if', 'viscosity', 'is', 'None', ':', 'viscosity', '=', 'meta', '[', '2', ']', 'isoel_ret', '=', 'self', '.', 'convert', '(', 'isoel', ',', 'col1', ',', 'col2', ',', 'channel_width_in', '=', 'meta', '[', '0', ']', ',', 'channel_width_out', '=', 'channel_width', ',', 'flow_rate_in', '=', 'meta', '[', '1', ']', ',', 'flow_rate_out', '=', 'flow_rate', ',', 'viscosity_in', '=', 'meta', '[', '2', ']', ',', 'viscosity_out', '=', 'viscosity', ',', 'inplace', '=', 'False', ')', 'if', 'add_px_err', ':', 'self', '.', 'add_px_err', '(', 'isoel', '=', 'isoel_ret', ',', 'col1', '=', 'col1', ',', 'col2', '=', 'col2', ',', 'px_um', '=', 'px_um', ',', 'inplace', '=', 'True', ')', 'return', 'isoel_ret'] | Get isoelastics
Parameters
----------
col1: str
Name of the first feature of all isoelastics
(e.g. isoel[0][:,0])
col2: str
Name of the second feature of all isoelastics
(e.g. isoel[0][:,1])
method: str
The method used to compute the isoelastics
(must be one of `VALID_METHODS`).
channel_width: float
Channel width in µm
flow_rate: float or `None`
Flow rate through the channel in µl/s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
viscosity: float or `None`
Viscosity of the medium in mPa*s. If set to
`None`, the flow rate of the imported data will
be used (only do this if you do not need the
correct values for elastic moduli).
add_px_err: bool
If True, add pixelation errors according to
C. Herold (2017), https://arxiv.org/abs/1704.00572
px_um: float
Pixel size [µm], used for pixelation error computation
See Also
--------
dclab.features.emodulus.convert: conversion in-between
channel sizes and viscosities
dclab.features.emodulus.corrpix_deform_delta: pixelation
error that is applied to the deformation data | ['Get', 'isoelastics'] | train | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L233-L310 |
3,648 | gbiggs/rtctree | rtctree/tree.py | RTCTree.is_zombie | def is_zombie(self, path):
'''Is the node pointed to by @ref path a zombie object?'''
node = self.get_node(path)
if not node:
return False
return node.is_zombie | python | def is_zombie(self, path):
'''Is the node pointed to by @ref path a zombie object?'''
node = self.get_node(path)
if not node:
return False
return node.is_zombie | ['def', 'is_zombie', '(', 'self', ',', 'path', ')', ':', 'node', '=', 'self', '.', 'get_node', '(', 'path', ')', 'if', 'not', 'node', ':', 'return', 'False', 'return', 'node', '.', 'is_zombie'] | Is the node pointed to by @ref path a zombie object? | ['Is', 'the', 'node', 'pointed', 'to', 'by'] | train | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/tree.py#L240-L245 |
3,649 | rigetti/quantumflow | quantumflow/gates.py | join_gates | def join_gates(*gates: Gate) -> Gate:
"""Direct product of two gates. Qubit count is the sum of each gate's
bit count."""
vectors = [gate.vec for gate in gates]
vec = reduce(outer_product, vectors)
return Gate(vec.tensor, vec.qubits) | python | def join_gates(*gates: Gate) -> Gate:
"""Direct product of two gates. Qubit count is the sum of each gate's
bit count."""
vectors = [gate.vec for gate in gates]
vec = reduce(outer_product, vectors)
return Gate(vec.tensor, vec.qubits) | ['def', 'join_gates', '(', '*', 'gates', ':', 'Gate', ')', '->', 'Gate', ':', 'vectors', '=', '[', 'gate', '.', 'vec', 'for', 'gate', 'in', 'gates', ']', 'vec', '=', 'reduce', '(', 'outer_product', ',', 'vectors', ')', 'return', 'Gate', '(', 'vec', '.', 'tensor', ',', 'vec', '.', 'qubits', ')'] | Direct product of two gates. Qubit count is the sum of each gate's
bit count. | ['Direct', 'product', 'of', 'two', 'gates', '.', 'Qubit', 'count', 'is', 'the', 'sum', 'of', 'each', 'gate', 's', 'bit', 'count', '.'] | train | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/gates.py#L63-L68 |
3,650 | softlayer/softlayer-python | SoftLayer/CLI/firewall/edit.py | get_formatted_rule | def get_formatted_rule(rule=None):
"""Helper to format the rule into a user friendly format.
:param dict rule: A dict containing one rule of the firewall
:returns: a formatted string that get be pushed into the editor
"""
rule = rule or {}
return ('action: %s\n'
'protocol: %s\n'
'source_ip_address: %s\n'
'source_ip_subnet_mask: %s\n'
'destination_ip_address: %s\n'
'destination_ip_subnet_mask: %s\n'
'destination_port_range_start: %s\n'
'destination_port_range_end: %s\n'
'version: %s\n'
% (rule.get('action', 'permit'),
rule.get('protocol', 'tcp'),
rule.get('sourceIpAddress', 'any'),
rule.get('sourceIpSubnetMask', '255.255.255.255'),
rule.get('destinationIpAddress', 'any'),
rule.get('destinationIpSubnetMask', '255.255.255.255'),
rule.get('destinationPortRangeStart', 1),
rule.get('destinationPortRangeEnd', 1),
rule.get('version', 4))) | python | def get_formatted_rule(rule=None):
"""Helper to format the rule into a user friendly format.
:param dict rule: A dict containing one rule of the firewall
:returns: a formatted string that get be pushed into the editor
"""
rule = rule or {}
return ('action: %s\n'
'protocol: %s\n'
'source_ip_address: %s\n'
'source_ip_subnet_mask: %s\n'
'destination_ip_address: %s\n'
'destination_ip_subnet_mask: %s\n'
'destination_port_range_start: %s\n'
'destination_port_range_end: %s\n'
'version: %s\n'
% (rule.get('action', 'permit'),
rule.get('protocol', 'tcp'),
rule.get('sourceIpAddress', 'any'),
rule.get('sourceIpSubnetMask', '255.255.255.255'),
rule.get('destinationIpAddress', 'any'),
rule.get('destinationIpSubnetMask', '255.255.255.255'),
rule.get('destinationPortRangeStart', 1),
rule.get('destinationPortRangeEnd', 1),
rule.get('version', 4))) | ['def', 'get_formatted_rule', '(', 'rule', '=', 'None', ')', ':', 'rule', '=', 'rule', 'or', '{', '}', 'return', '(', "'action: %s\\n'", "'protocol: %s\\n'", "'source_ip_address: %s\\n'", "'source_ip_subnet_mask: %s\\n'", "'destination_ip_address: %s\\n'", "'destination_ip_subnet_mask: %s\\n'", "'destination_port_range_start: %s\\n'", "'destination_port_range_end: %s\\n'", "'version: %s\\n'", '%', '(', 'rule', '.', 'get', '(', "'action'", ',', "'permit'", ')', ',', 'rule', '.', 'get', '(', "'protocol'", ',', "'tcp'", ')', ',', 'rule', '.', 'get', '(', "'sourceIpAddress'", ',', "'any'", ')', ',', 'rule', '.', 'get', '(', "'sourceIpSubnetMask'", ',', "'255.255.255.255'", ')', ',', 'rule', '.', 'get', '(', "'destinationIpAddress'", ',', "'any'", ')', ',', 'rule', '.', 'get', '(', "'destinationIpSubnetMask'", ',', "'255.255.255.255'", ')', ',', 'rule', '.', 'get', '(', "'destinationPortRangeStart'", ',', '1', ')', ',', 'rule', '.', 'get', '(', "'destinationPortRangeEnd'", ',', '1', ')', ',', 'rule', '.', 'get', '(', "'version'", ',', '4', ')', ')', ')'] | Helper to format the rule into a user friendly format.
:param dict rule: A dict containing one rule of the firewall
:returns: a formatted string that get be pushed into the editor | ['Helper', 'to', 'format', 'the', 'rule', 'into', 'a', 'user', 'friendly', 'format', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/firewall/edit.py#L108-L132 |
3,651 | watson-developer-cloud/python-sdk | ibm_watson/language_translator_v3.py | LanguageTranslatorV3.translate | def translate(self, text, model_id=None, source=None, target=None,
**kwargs):
"""
Translate.
Translates the input text from the source language to the target language.
:param list[str] text: Input text in UTF-8 encoding. Multiple entries will result
in multiple translations in the response.
:param str model_id: A globally unique string that identifies the underlying model
that is used for translation.
:param str source: Translation source language code.
:param str target: Translation target language code.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3', 'translate')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'text': text,
'model_id': model_id,
'source': source,
'target': target
}
url = '/v3/translate'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | python | def translate(self, text, model_id=None, source=None, target=None,
**kwargs):
"""
Translate.
Translates the input text from the source language to the target language.
:param list[str] text: Input text in UTF-8 encoding. Multiple entries will result
in multiple translations in the response.
:param str model_id: A globally unique string that identifies the underlying model
that is used for translation.
:param str source: Translation source language code.
:param str target: Translation target language code.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3', 'translate')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'text': text,
'model_id': model_id,
'source': source,
'target': target
}
url = '/v3/translate'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | ['def', 'translate', '(', 'self', ',', 'text', ',', 'model_id', '=', 'None', ',', 'source', '=', 'None', ',', 'target', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'text', 'is', 'None', ':', 'raise', 'ValueError', '(', "'text must be provided'", ')', 'headers', '=', '{', '}', 'if', "'headers'", 'in', 'kwargs', ':', 'headers', '.', 'update', '(', 'kwargs', '.', 'get', '(', "'headers'", ')', ')', 'sdk_headers', '=', 'get_sdk_headers', '(', "'language_translator'", ',', "'V3'", ',', "'translate'", ')', 'headers', '.', 'update', '(', 'sdk_headers', ')', 'params', '=', '{', "'version'", ':', 'self', '.', 'version', '}', 'data', '=', '{', "'text'", ':', 'text', ',', "'model_id'", ':', 'model_id', ',', "'source'", ':', 'source', ',', "'target'", ':', 'target', '}', 'url', '=', "'/v3/translate'", 'response', '=', 'self', '.', 'request', '(', 'method', '=', "'POST'", ',', 'url', '=', 'url', ',', 'headers', '=', 'headers', ',', 'params', '=', 'params', ',', 'json', '=', 'data', ',', 'accept_json', '=', 'True', ')', 'return', 'response'] | Translate.
Translates the input text from the source language to the target language.
:param list[str] text: Input text in UTF-8 encoding. Multiple entries will result
in multiple translations in the response.
:param str model_id: A globally unique string that identifies the underlying model
that is used for translation.
:param str source: Translation source language code.
:param str target: Translation target language code.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | ['Translate', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/language_translator_v3.py#L110-L154 |
3,652 | minhhoit/yacms | yacms/twitter/managers.py | TweetManager.get_for | def get_for(self, query_type, value):
"""
Create a query and run it for the given arg if it doesn't exist, and
return the tweets for the query.
"""
from yacms.twitter.models import Query
lookup = {"type": query_type, "value": value}
query, created = Query.objects.get_or_create(**lookup)
if created:
query.run()
elif not query.interested:
query.interested = True
query.save()
return query.tweets.all() | python | def get_for(self, query_type, value):
"""
Create a query and run it for the given arg if it doesn't exist, and
return the tweets for the query.
"""
from yacms.twitter.models import Query
lookup = {"type": query_type, "value": value}
query, created = Query.objects.get_or_create(**lookup)
if created:
query.run()
elif not query.interested:
query.interested = True
query.save()
return query.tweets.all() | ['def', 'get_for', '(', 'self', ',', 'query_type', ',', 'value', ')', ':', 'from', 'yacms', '.', 'twitter', '.', 'models', 'import', 'Query', 'lookup', '=', '{', '"type"', ':', 'query_type', ',', '"value"', ':', 'value', '}', 'query', ',', 'created', '=', 'Query', '.', 'objects', '.', 'get_or_create', '(', '*', '*', 'lookup', ')', 'if', 'created', ':', 'query', '.', 'run', '(', ')', 'elif', 'not', 'query', '.', 'interested', ':', 'query', '.', 'interested', '=', 'True', 'query', '.', 'save', '(', ')', 'return', 'query', '.', 'tweets', '.', 'all', '(', ')'] | Create a query and run it for the given arg if it doesn't exist, and
return the tweets for the query. | ['Create', 'a', 'query', 'and', 'run', 'it', 'for', 'the', 'given', 'arg', 'if', 'it', 'doesn', 't', 'exist', 'and', 'return', 'the', 'tweets', 'for', 'the', 'query', '.'] | train | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/twitter/managers.py#L12-L25 |
3,653 | zarr-developers/zarr | zarr/convenience.py | open | def open(store=None, mode='a', **kwargs):
"""Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only>
"""
path = kwargs.get('path', None)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
path = normalize_storage_path(path)
if mode in {'w', 'w-', 'x'}:
if 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
elif mode == 'a':
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
elif 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
else:
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
else:
err_path_not_found(path) | python | def open(store=None, mode='a', **kwargs):
"""Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only>
"""
path = kwargs.get('path', None)
# handle polymorphic store arg
clobber = mode == 'w'
store = normalize_store_arg(store, clobber=clobber)
path = normalize_storage_path(path)
if mode in {'w', 'w-', 'x'}:
if 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
elif mode == 'a':
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
elif 'shape' in kwargs:
return open_array(store, mode=mode, **kwargs)
else:
return open_group(store, mode=mode, **kwargs)
else:
if contains_array(store, path):
return open_array(store, mode=mode, **kwargs)
elif contains_group(store, path):
return open_group(store, mode=mode, **kwargs)
else:
err_path_not_found(path) | ['def', 'open', '(', 'store', '=', 'None', ',', 'mode', '=', "'a'", ',', '*', '*', 'kwargs', ')', ':', 'path', '=', 'kwargs', '.', 'get', '(', "'path'", ',', 'None', ')', '# handle polymorphic store arg', 'clobber', '=', 'mode', '==', "'w'", 'store', '=', 'normalize_store_arg', '(', 'store', ',', 'clobber', '=', 'clobber', ')', 'path', '=', 'normalize_storage_path', '(', 'path', ')', 'if', 'mode', 'in', '{', "'w'", ',', "'w-'", ',', "'x'", '}', ':', 'if', "'shape'", 'in', 'kwargs', ':', 'return', 'open_array', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'else', ':', 'return', 'open_group', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'elif', 'mode', '==', "'a'", ':', 'if', 'contains_array', '(', 'store', ',', 'path', ')', ':', 'return', 'open_array', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'elif', 'contains_group', '(', 'store', ',', 'path', ')', ':', 'return', 'open_group', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'elif', "'shape'", 'in', 'kwargs', ':', 'return', 'open_array', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'else', ':', 'return', 'open_group', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'else', ':', 'if', 'contains_array', '(', 'store', ',', 'path', ')', ':', 'return', 'open_array', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'elif', 'contains_group', '(', 'store', ',', 'path', ')', ':', 'return', 'open_group', '(', 'store', ',', 'mode', '=', 'mode', ',', '*', '*', 'kwargs', ')', 'else', ':', 'err_path_not_found', '(', 'path', ')'] | Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : MutableMapping or string, optional
Store or path to directory in file system or name of zip file.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
Array or group, depending on what exists in the given store.
See Also
--------
zarr.creation.open_array, zarr.hierarchy.open_group
Examples
--------
Storing data in a directory 'data/example.zarr' on the local file system::
>>> import zarr
>>> store = 'data/example.zarr'
>>> zw = zarr.open(store, mode='w', shape=100, dtype='i4') # open new array
>>> zw
<zarr.core.Array (100,) int32>
>>> za = zarr.open(store, mode='a') # open existing array for reading and writing
>>> za
<zarr.core.Array (100,) int32>
>>> zr = zarr.open(store, mode='r') # open existing array read-only
>>> zr
<zarr.core.Array (100,) int32 read-only>
>>> gw = zarr.open(store, mode='w') # open new group, overwriting previous data
>>> gw
<zarr.hierarchy.Group '/'>
>>> ga = zarr.open(store, mode='a') # open existing group for reading and writing
>>> ga
<zarr.hierarchy.Group '/'>
>>> gr = zarr.open(store, mode='r') # open existing group read-only
>>> gr
<zarr.hierarchy.Group '/' read-only> | ['Convenience', 'function', 'to', 'open', 'a', 'group', 'or', 'array', 'using', 'file', '-', 'mode', '-', 'like', 'semantics', '.'] | train | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/convenience.py#L21-L102 |
3,654 | GPflow/GPflow | gpflow/multioutput/conditionals.py | independent_interdomain_conditional | def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False,
q_sqrt=None, white=False):
"""
The inducing outputs live in the g-space (R^L).
Interdomain conditional calculation.
:param Kmn: M x L x N x P
:param Kmm: L x M x M
:param Knn: N x P or N x N or P x N x N or N x P x N x P
:param f: data matrix, M x L
:param q_sqrt: L x M x M or M x L
:param full_cov: calculate covariance between inputs
:param full_output_cov: calculate covariance between outputs
:param white: use whitened representation
:return:
- mean: N x P
- variance: N x P, N x P x P, P x N x N, N x P x N x P
"""
logger.debug("independent_interdomain_conditional")
M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)]
Lm = tf.cholesky(Kmm) # L x M x M
# Compute the projection matrix A
Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P))
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
# compute the covariance due to the conditioning
if full_cov and full_output_cov:
fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P
elif full_cov and not full_output_cov:
At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N
elif not full_cov and full_output_cov:
At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P
# another backsubstitution in the unwhitened case
if not white:
A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P
if q_sqrt is not None:
if q_sqrt.shape.ndims == 3:
Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M
LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP
else: # q_sqrt M x L
LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP
if full_cov and full_output_cov:
LTAr = tf.reshape(LTA, (L * M, N * P))
fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P))
elif full_cov and not full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N
elif not full_cov and full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P))
return fmean, fvar | python | def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False,
q_sqrt=None, white=False):
"""
The inducing outputs live in the g-space (R^L).
Interdomain conditional calculation.
:param Kmn: M x L x N x P
:param Kmm: L x M x M
:param Knn: N x P or N x N or P x N x N or N x P x N x P
:param f: data matrix, M x L
:param q_sqrt: L x M x M or M x L
:param full_cov: calculate covariance between inputs
:param full_output_cov: calculate covariance between outputs
:param white: use whitened representation
:return:
- mean: N x P
- variance: N x P, N x P x P, P x N x N, N x P x N x P
"""
logger.debug("independent_interdomain_conditional")
M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)]
Lm = tf.cholesky(Kmm) # L x M x M
# Compute the projection matrix A
Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P))
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
# compute the covariance due to the conditioning
if full_cov and full_output_cov:
fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P
elif full_cov and not full_output_cov:
At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N
elif not full_cov and full_output_cov:
At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P
# another backsubstitution in the unwhitened case
if not white:
A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P
if q_sqrt is not None:
if q_sqrt.shape.ndims == 3:
Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M
LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP
else: # q_sqrt M x L
LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP
if full_cov and full_output_cov:
LTAr = tf.reshape(LTA, (L * M, N * P))
fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P))
elif full_cov and not full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N
elif not full_cov and full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P))
return fmean, fvar | ['def', 'independent_interdomain_conditional', '(', 'Kmn', ',', 'Kmm', ',', 'Knn', ',', 'f', ',', '*', ',', 'full_cov', '=', 'False', ',', 'full_output_cov', '=', 'False', ',', 'q_sqrt', '=', 'None', ',', 'white', '=', 'False', ')', ':', 'logger', '.', 'debug', '(', '"independent_interdomain_conditional"', ')', 'M', ',', 'L', ',', 'N', ',', 'P', '=', '[', 'tf', '.', 'shape', '(', 'Kmn', ')', '[', 'i', ']', 'for', 'i', 'in', 'range', '(', 'Kmn', '.', 'shape', '.', 'ndims', ')', ']', 'Lm', '=', 'tf', '.', 'cholesky', '(', 'Kmm', ')', '# L x M x M', '# Compute the projection matrix A', 'Kmn', '=', 'tf', '.', 'reshape', '(', 'tf', '.', 'transpose', '(', 'Kmn', ',', '(', '1', ',', '0', ',', '2', ',', '3', ')', ')', ',', '(', 'L', ',', 'M', ',', 'N', '*', 'P', ')', ')', 'A', '=', 'tf', '.', 'matrix_triangular_solve', '(', 'Lm', ',', 'Kmn', ',', 'lower', '=', 'True', ')', '# L x M x M * L x M x NP -> L x M x NP', 'Ar', '=', 'tf', '.', 'reshape', '(', 'A', ',', '(', 'L', ',', 'M', ',', 'N', ',', 'P', ')', ')', '# compute the covariance due to the conditioning', 'if', 'full_cov', 'and', 'full_output_cov', ':', 'fvar', '=', 'Knn', '-', 'tf', '.', 'tensordot', '(', 'Ar', ',', 'Ar', ',', '[', '[', '0', ',', '1', ']', ',', '[', '0', ',', '1', ']', ']', ')', '# N x P x N x P', 'elif', 'full_cov', 'and', 'not', 'full_output_cov', ':', 'At', '=', 'tf', '.', 'reshape', '(', 'tf', '.', 'transpose', '(', 'Ar', ')', ',', '(', 'P', ',', 'N', ',', 'M', '*', 'L', ')', ')', '# P x N x ML', 'fvar', '=', 'Knn', '-', 'tf', '.', 'matmul', '(', 'At', ',', 'At', ',', 'transpose_b', '=', 'True', ')', '# P x N x N', 'elif', 'not', 'full_cov', 'and', 'full_output_cov', ':', 'At', '=', 'tf', '.', 'reshape', '(', 'tf', '.', 'transpose', '(', 'Ar', ',', '[', '2', ',', '3', ',', '1', ',', '0', ']', ')', ',', '(', 'N', ',', 'P', ',', 'M', '*', 'L', ')', ')', '# N x P x ML', 'fvar', '=', 'Knn', '-', 'tf', '.', 'matmul', '(', 'At', ',', 'At', ',', 'transpose_b', '=', 'True', ')', '# N x P x P', 'elif', 'not', 'full_cov', 'and', 'not', 'full_output_cov', ':', 'fvar', '=', 'Knn', '-', 'tf', '.', 'reshape', '(', 'tf', '.', 'reduce_sum', '(', 'tf', '.', 'square', '(', 'A', ')', ',', '[', '0', ',', '1', ']', ')', ',', '(', 'N', ',', 'P', ')', ')', '# Knn: N x P', '# another backsubstitution in the unwhitened case', 'if', 'not', 'white', ':', 'A', '=', 'tf', '.', 'matrix_triangular_solve', '(', 'Lm', ',', 'Ar', ')', '# L x M x M * L x M x NP -> L x M x NP', 'Ar', '=', 'tf', '.', 'reshape', '(', 'A', ',', '(', 'L', ',', 'M', ',', 'N', ',', 'P', ')', ')', 'fmean', '=', 'tf', '.', 'tensordot', '(', 'Ar', ',', 'f', ',', '[', '[', '1', ',', '0', ']', ',', '[', '0', ',', '1', ']', ']', ')', '# N x P', 'if', 'q_sqrt', 'is', 'not', 'None', ':', 'if', 'q_sqrt', '.', 'shape', '.', 'ndims', '==', '3', ':', 'Lf', '=', 'tf', '.', 'matrix_band_part', '(', 'q_sqrt', ',', '-', '1', ',', '0', ')', '# L x M x M', 'LTA', '=', 'tf', '.', 'matmul', '(', 'Lf', ',', 'A', ',', 'transpose_a', '=', 'True', ')', '# L x M x M * L x M x NP -> L x M x NP', 'else', ':', '# q_sqrt M x L', 'LTA', '=', '(', 'A', '*', 'tf', '.', 'transpose', '(', 'q_sqrt', ')', '[', '...', ',', 'None', ']', ')', '# L x M x NP', 'if', 'full_cov', 'and', 'full_output_cov', ':', 'LTAr', '=', 'tf', '.', 'reshape', '(', 'LTA', ',', '(', 'L', '*', 'M', ',', 'N', '*', 'P', ')', ')', 'fvar', '=', 'fvar', '+', 'tf', '.', 'reshape', '(', 'tf', '.', 'matmul', '(', 'LTAr', ',', 'LTAr', ',', 'transpose_a', '=', 'True', ')', ',', '(', 'N', ',', 'P', ',', 'N', ',', 'P', ')', ')', 'elif', 'full_cov', 'and', 'not', 'full_output_cov', ':', 'LTAr', '=', 'tf', '.', 'transpose', '(', 'tf', '.', 'reshape', '(', 'LTA', ',', '(', 'L', '*', 'M', ',', 'N', ',', 'P', ')', ')', ',', '[', '2', ',', '0', ',', '1', ']', ')', '# P x LM x N', 'fvar', '=', 'fvar', '+', 'tf', '.', 'matmul', '(', 'LTAr', ',', 'LTAr', ',', 'transpose_a', '=', 'True', ')', '# P x N x N', 'elif', 'not', 'full_cov', 'and', 'full_output_cov', ':', 'LTAr', '=', 'tf', '.', 'transpose', '(', 'tf', '.', 'reshape', '(', 'LTA', ',', '(', 'L', '*', 'M', ',', 'N', ',', 'P', ')', ')', ',', '[', '1', ',', '0', ',', '2', ']', ')', '# N x LM x P', 'fvar', '=', 'fvar', '+', 'tf', '.', 'matmul', '(', 'LTAr', ',', 'LTAr', ',', 'transpose_a', '=', 'True', ')', '# N x P x P', 'elif', 'not', 'full_cov', 'and', 'not', 'full_output_cov', ':', 'fvar', '=', 'fvar', '+', 'tf', '.', 'reshape', '(', 'tf', '.', 'reduce_sum', '(', 'tf', '.', 'square', '(', 'LTA', ')', ',', '(', '0', ',', '1', ')', ')', ',', '(', 'N', ',', 'P', ')', ')', 'return', 'fmean', ',', 'fvar'] | The inducing outputs live in the g-space (R^L).
Interdomain conditional calculation.
:param Kmn: M x L x N x P
:param Kmm: L x M x M
:param Knn: N x P or N x N or P x N x N or N x P x N x P
:param f: data matrix, M x L
:param q_sqrt: L x M x M or M x L
:param full_cov: calculate covariance between inputs
:param full_output_cov: calculate covariance between outputs
:param white: use whitened representation
:return:
- mean: N x P
- variance: N x P, N x P x P, P x N x N, N x P x N x P | ['The', 'inducing', 'outputs', 'live', 'in', 'the', 'g', '-', 'space', '(', 'R^L', ')', '.', 'Interdomain', 'conditional', 'calculation', '.'] | train | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/multioutput/conditionals.py#L274-L339 |
3,655 | msmbuilder/msmbuilder | msmbuilder/tpt/hub.py | fraction_visited | def fraction_visited(source, sink, waypoint, msm):
"""
Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
for_committors = committors([source], [sink], msm)
cond_committors = conditional_committors(source, sink, waypoint, msm)
if hasattr(msm, 'all_transmats_'):
frac_visited = np.zeros((msm.n_states,))
for i, tprob in enumerate(msm.all_transmats_):
frac_visited[i] = _fraction_visited(source, sink, waypoint,
msm.transmat_, for_committors,
cond_committors)
return np.median(frac_visited, axis=0)
return _fraction_visited(source, sink, waypoint, msm.transmat_,
for_committors, cond_committors) | python | def fraction_visited(source, sink, waypoint, msm):
"""
Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
for_committors = committors([source], [sink], msm)
cond_committors = conditional_committors(source, sink, waypoint, msm)
if hasattr(msm, 'all_transmats_'):
frac_visited = np.zeros((msm.n_states,))
for i, tprob in enumerate(msm.all_transmats_):
frac_visited[i] = _fraction_visited(source, sink, waypoint,
msm.transmat_, for_committors,
cond_committors)
return np.median(frac_visited, axis=0)
return _fraction_visited(source, sink, waypoint, msm.transmat_,
for_committors, cond_committors) | ['def', 'fraction_visited', '(', 'source', ',', 'sink', ',', 'waypoint', ',', 'msm', ')', ':', 'for_committors', '=', 'committors', '(', '[', 'source', ']', ',', '[', 'sink', ']', ',', 'msm', ')', 'cond_committors', '=', 'conditional_committors', '(', 'source', ',', 'sink', ',', 'waypoint', ',', 'msm', ')', 'if', 'hasattr', '(', 'msm', ',', "'all_transmats_'", ')', ':', 'frac_visited', '=', 'np', '.', 'zeros', '(', '(', 'msm', '.', 'n_states', ',', ')', ')', 'for', 'i', ',', 'tprob', 'in', 'enumerate', '(', 'msm', '.', 'all_transmats_', ')', ':', 'frac_visited', '[', 'i', ']', '=', '_fraction_visited', '(', 'source', ',', 'sink', ',', 'waypoint', ',', 'msm', '.', 'transmat_', ',', 'for_committors', ',', 'cond_committors', ')', 'return', 'np', '.', 'median', '(', 'frac_visited', ',', 'axis', '=', '0', ')', 'return', '_fraction_visited', '(', 'source', ',', 'sink', ',', 'waypoint', ',', 'msm', '.', 'transmat_', ',', 'for_committors', ',', 'cond_committors', ')'] | Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | ['Calculate', 'the', 'fraction', 'of', 'times', 'a', 'walker', 'on', 'tprob', 'going', 'from', 'sources', 'to', 'sinks', 'will', 'travel', 'through', 'the', 'set', 'of', 'states', 'waypoints', 'en', 'route', '.'] | train | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/tpt/hub.py#L27-L83 |
3,656 | striglia/stockfighter | stockfighter/stockfighter.py | Stockfighter.status_for_order | def status_for_order(self, order_id, stock):
"""Status For An Existing Order
https://starfighter.readme.io/docs/status-for-an-existing-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json() | python | def status_for_order(self, order_id, stock):
"""Status For An Existing Order
https://starfighter.readme.io/docs/status-for-an-existing-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json() | ['def', 'status_for_order', '(', 'self', ',', 'order_id', ',', 'stock', ')', ':', 'url_fragment', '=', "'venues/{venue}/stocks/{stock}/orders/{order_id}'", '.', 'format', '(', 'venue', '=', 'self', '.', 'venue', ',', 'stock', '=', 'stock', ',', 'order_id', '=', 'order_id', ',', ')', 'url', '=', 'urljoin', '(', 'self', '.', 'base_url', ',', 'url_fragment', ')', 'return', 'self', '.', 'session', '.', 'get', '(', 'url', ')', '.', 'json', '(', ')'] | Status For An Existing Order
https://starfighter.readme.io/docs/status-for-an-existing-order | ['Status', 'For', 'An', 'Existing', 'Order'] | train | https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L94-L105 |
3,657 | bigchaindb/bigchaindb | bigchaindb/backend/connection.py | Connection.connect | def connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
"""
attempt = 0
for i in self.max_tries_counter:
attempt += 1
try:
self._conn = self._connect()
except ConnectionError as exc:
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
attempt, self.max_tries if self.max_tries != 0 else '∞',
self.host, self.port, self.connection_timeout)
if attempt == self.max_tries:
logger.critical('Cannot connect to the Database. Giving up.')
raise ConnectionError() from exc
else:
break | python | def connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
"""
attempt = 0
for i in self.max_tries_counter:
attempt += 1
try:
self._conn = self._connect()
except ConnectionError as exc:
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
attempt, self.max_tries if self.max_tries != 0 else '∞',
self.host, self.port, self.connection_timeout)
if attempt == self.max_tries:
logger.critical('Cannot connect to the Database. Giving up.')
raise ConnectionError() from exc
else:
break | ['def', 'connect', '(', 'self', ')', ':', 'attempt', '=', '0', 'for', 'i', 'in', 'self', '.', 'max_tries_counter', ':', 'attempt', '+=', '1', 'try', ':', 'self', '.', '_conn', '=', 'self', '.', '_connect', '(', ')', 'except', 'ConnectionError', 'as', 'exc', ':', 'logger', '.', 'warning', '(', "'Attempt %s/%s. Connection to %s:%s failed after %sms.'", ',', 'attempt', ',', 'self', '.', 'max_tries', 'if', 'self', '.', 'max_tries', '!=', '0', 'else', "'∞',", '', 'self', '.', 'host', ',', 'self', '.', 'port', ',', 'self', '.', 'connection_timeout', ')', 'if', 'attempt', '==', 'self', '.', 'max_tries', ':', 'logger', '.', 'critical', '(', "'Cannot connect to the Database. Giving up.'", ')', 'raise', 'ConnectionError', '(', ')', 'from', 'exc', 'else', ':', 'break'] | Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails. | ['Try', 'to', 'connect', 'to', 'the', 'database', '.'] | train | https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/backend/connection.py#L148-L169 |
3,658 | youversion/crony | crony/crony.py | CommandCenter.log | def log(self, output, exit_status):
"""Log given CompletedProcess and return exit status code."""
if exit_status != 0:
self.logger.error(f'Error running command! Exit status: {exit_status}, {output}')
return exit_status | python | def log(self, output, exit_status):
"""Log given CompletedProcess and return exit status code."""
if exit_status != 0:
self.logger.error(f'Error running command! Exit status: {exit_status}, {output}')
return exit_status | ['def', 'log', '(', 'self', ',', 'output', ',', 'exit_status', ')', ':', 'if', 'exit_status', '!=', '0', ':', 'self', '.', 'logger', '.', 'error', '(', "f'Error running command! Exit status: {exit_status}, {output}'", ')', 'return', 'exit_status'] | Log given CompletedProcess and return exit status code. | ['Log', 'given', 'CompletedProcess', 'and', 'return', 'exit', 'status', 'code', '.'] | train | https://github.com/youversion/crony/blob/c93d14b809a2e878f1b9d6d53d5a04947896583b/crony/crony.py#L129-L134 |
3,659 | iterative/dvc | dvc/repo/__init__.py | Repo.graph | def graph(self, stages=None, from_directory=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, use the ones
on the `from_directory`.
from_directory (str): directory where to look at for stages, if
None is given, use the current working directory
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from dvc.exceptions import (
OutputDuplicationError,
StagePathAsOutputError,
OverlappingOutputPathsError,
)
G = nx.DiGraph()
G_active = nx.DiGraph()
stages = stages or self.stages(from_directory, check_dag=False)
stages = [stage for stage in stages if stage]
outs = []
for stage in stages:
for out in stage.outs:
existing = []
for o in outs:
if o.path == out.path:
existing.append(o.stage)
in_o_dir = out.path.startswith(o.path + o.sep)
in_out_dir = o.path.startswith(out.path + out.sep)
if in_o_dir or in_out_dir:
raise OverlappingOutputPathsError(o, out)
if existing:
stages = [stage.relpath, existing[0].relpath]
raise OutputDuplicationError(out.path, stages)
outs.append(out)
for stage in stages:
path_dir = os.path.dirname(stage.path) + os.sep
for out in outs:
if path_dir.startswith(out.path + os.sep):
raise StagePathAsOutputError(stage.wdir, stage.relpath)
for stage in stages:
node = os.path.relpath(stage.path, self.root_dir)
G.add_node(node, stage=stage)
G_active.add_node(node, stage=stage)
for dep in stage.deps:
for out in outs:
if (
out.path != dep.path
and not dep.path.startswith(out.path + out.sep)
and not out.path.startswith(dep.path + dep.sep)
):
continue
dep_stage = out.stage
dep_node = os.path.relpath(dep_stage.path, self.root_dir)
G.add_node(dep_node, stage=dep_stage)
G.add_edge(node, dep_node)
if not stage.locked:
G_active.add_node(dep_node, stage=dep_stage)
G_active.add_edge(node, dep_node)
self._check_cyclic_graph(G)
return G, G_active | python | def graph(self, stages=None, from_directory=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, use the ones
on the `from_directory`.
from_directory (str): directory where to look at for stages, if
None is given, use the current working directory
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from dvc.exceptions import (
OutputDuplicationError,
StagePathAsOutputError,
OverlappingOutputPathsError,
)
G = nx.DiGraph()
G_active = nx.DiGraph()
stages = stages or self.stages(from_directory, check_dag=False)
stages = [stage for stage in stages if stage]
outs = []
for stage in stages:
for out in stage.outs:
existing = []
for o in outs:
if o.path == out.path:
existing.append(o.stage)
in_o_dir = out.path.startswith(o.path + o.sep)
in_out_dir = o.path.startswith(out.path + out.sep)
if in_o_dir or in_out_dir:
raise OverlappingOutputPathsError(o, out)
if existing:
stages = [stage.relpath, existing[0].relpath]
raise OutputDuplicationError(out.path, stages)
outs.append(out)
for stage in stages:
path_dir = os.path.dirname(stage.path) + os.sep
for out in outs:
if path_dir.startswith(out.path + os.sep):
raise StagePathAsOutputError(stage.wdir, stage.relpath)
for stage in stages:
node = os.path.relpath(stage.path, self.root_dir)
G.add_node(node, stage=stage)
G_active.add_node(node, stage=stage)
for dep in stage.deps:
for out in outs:
if (
out.path != dep.path
and not dep.path.startswith(out.path + out.sep)
and not out.path.startswith(dep.path + dep.sep)
):
continue
dep_stage = out.stage
dep_node = os.path.relpath(dep_stage.path, self.root_dir)
G.add_node(dep_node, stage=dep_stage)
G.add_edge(node, dep_node)
if not stage.locked:
G_active.add_node(dep_node, stage=dep_stage)
G_active.add_edge(node, dep_node)
self._check_cyclic_graph(G)
return G, G_active | ['def', 'graph', '(', 'self', ',', 'stages', '=', 'None', ',', 'from_directory', '=', 'None', ')', ':', 'import', 'networkx', 'as', 'nx', 'from', 'dvc', '.', 'exceptions', 'import', '(', 'OutputDuplicationError', ',', 'StagePathAsOutputError', ',', 'OverlappingOutputPathsError', ',', ')', 'G', '=', 'nx', '.', 'DiGraph', '(', ')', 'G_active', '=', 'nx', '.', 'DiGraph', '(', ')', 'stages', '=', 'stages', 'or', 'self', '.', 'stages', '(', 'from_directory', ',', 'check_dag', '=', 'False', ')', 'stages', '=', '[', 'stage', 'for', 'stage', 'in', 'stages', 'if', 'stage', ']', 'outs', '=', '[', ']', 'for', 'stage', 'in', 'stages', ':', 'for', 'out', 'in', 'stage', '.', 'outs', ':', 'existing', '=', '[', ']', 'for', 'o', 'in', 'outs', ':', 'if', 'o', '.', 'path', '==', 'out', '.', 'path', ':', 'existing', '.', 'append', '(', 'o', '.', 'stage', ')', 'in_o_dir', '=', 'out', '.', 'path', '.', 'startswith', '(', 'o', '.', 'path', '+', 'o', '.', 'sep', ')', 'in_out_dir', '=', 'o', '.', 'path', '.', 'startswith', '(', 'out', '.', 'path', '+', 'out', '.', 'sep', ')', 'if', 'in_o_dir', 'or', 'in_out_dir', ':', 'raise', 'OverlappingOutputPathsError', '(', 'o', ',', 'out', ')', 'if', 'existing', ':', 'stages', '=', '[', 'stage', '.', 'relpath', ',', 'existing', '[', '0', ']', '.', 'relpath', ']', 'raise', 'OutputDuplicationError', '(', 'out', '.', 'path', ',', 'stages', ')', 'outs', '.', 'append', '(', 'out', ')', 'for', 'stage', 'in', 'stages', ':', 'path_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'stage', '.', 'path', ')', '+', 'os', '.', 'sep', 'for', 'out', 'in', 'outs', ':', 'if', 'path_dir', '.', 'startswith', '(', 'out', '.', 'path', '+', 'os', '.', 'sep', ')', ':', 'raise', 'StagePathAsOutputError', '(', 'stage', '.', 'wdir', ',', 'stage', '.', 'relpath', ')', 'for', 'stage', 'in', 'stages', ':', 'node', '=', 'os', '.', 'path', '.', 'relpath', '(', 'stage', '.', 'path', ',', 'self', '.', 'root_dir', ')', 'G', '.', 'add_node', '(', 'node', ',', 'stage', '=', 'stage', ')', 'G_active', '.', 'add_node', '(', 'node', ',', 'stage', '=', 'stage', ')', 'for', 'dep', 'in', 'stage', '.', 'deps', ':', 'for', 'out', 'in', 'outs', ':', 'if', '(', 'out', '.', 'path', '!=', 'dep', '.', 'path', 'and', 'not', 'dep', '.', 'path', '.', 'startswith', '(', 'out', '.', 'path', '+', 'out', '.', 'sep', ')', 'and', 'not', 'out', '.', 'path', '.', 'startswith', '(', 'dep', '.', 'path', '+', 'dep', '.', 'sep', ')', ')', ':', 'continue', 'dep_stage', '=', 'out', '.', 'stage', 'dep_node', '=', 'os', '.', 'path', '.', 'relpath', '(', 'dep_stage', '.', 'path', ',', 'self', '.', 'root_dir', ')', 'G', '.', 'add_node', '(', 'dep_node', ',', 'stage', '=', 'dep_stage', ')', 'G', '.', 'add_edge', '(', 'node', ',', 'dep_node', ')', 'if', 'not', 'stage', '.', 'locked', ':', 'G_active', '.', 'add_node', '(', 'dep_node', ',', 'stage', '=', 'dep_stage', ')', 'G_active', '.', 'add_edge', '(', 'node', ',', 'dep_node', ')', 'self', '.', '_check_cyclic_graph', '(', 'G', ')', 'return', 'G', ',', 'G_active'] | Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, use the ones
on the `from_directory`.
from_directory (str): directory where to look at for stages, if
None is given, use the current working directory
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles | ['Generate', 'a', 'graph', 'by', 'using', 'the', 'given', 'stages', 'on', 'the', 'given', 'directory'] | train | https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/repo/__init__.py#L300-L404 |
3,660 | zagaran/mongolia | mongolia/mongo_connection.py | authenticate_connection | def authenticate_connection(username, password, db=None):
"""
Authenticates the current database connection with the passed username
and password. If the database connection uses all default parameters,
this can be called without connect_to_database. Otherwise, it should
be preceded by a connect_to_database call.
@param username: the username with which you authenticate; must match
a user registered in the database
@param password: the password of that user
@param db: the database the user is authenticated to access. Passing None
(the default) means authenticating against the admin database, which
gives the connection access to all databases
Example; connecting to all databases locally:
connect_to_database()
authenticate_connection("username", "password")
Example; connecting to a particular database of a remote server:
connect_to_database(host="example.com", port="12345")
authenticate_connection("username", "password", db="somedb")
"""
return CONNECTION.authenticate(username, password, db=db) | python | def authenticate_connection(username, password, db=None):
"""
Authenticates the current database connection with the passed username
and password. If the database connection uses all default parameters,
this can be called without connect_to_database. Otherwise, it should
be preceded by a connect_to_database call.
@param username: the username with which you authenticate; must match
a user registered in the database
@param password: the password of that user
@param db: the database the user is authenticated to access. Passing None
(the default) means authenticating against the admin database, which
gives the connection access to all databases
Example; connecting to all databases locally:
connect_to_database()
authenticate_connection("username", "password")
Example; connecting to a particular database of a remote server:
connect_to_database(host="example.com", port="12345")
authenticate_connection("username", "password", db="somedb")
"""
return CONNECTION.authenticate(username, password, db=db) | ['def', 'authenticate_connection', '(', 'username', ',', 'password', ',', 'db', '=', 'None', ')', ':', 'return', 'CONNECTION', '.', 'authenticate', '(', 'username', ',', 'password', ',', 'db', '=', 'db', ')'] | Authenticates the current database connection with the passed username
and password. If the database connection uses all default parameters,
this can be called without connect_to_database. Otherwise, it should
be preceded by a connect_to_database call.
@param username: the username with which you authenticate; must match
a user registered in the database
@param password: the password of that user
@param db: the database the user is authenticated to access. Passing None
(the default) means authenticating against the admin database, which
gives the connection access to all databases
Example; connecting to all databases locally:
connect_to_database()
authenticate_connection("username", "password")
Example; connecting to a particular database of a remote server:
connect_to_database(host="example.com", port="12345")
authenticate_connection("username", "password", db="somedb") | ['Authenticates', 'the', 'current', 'database', 'connection', 'with', 'the', 'passed', 'username', 'and', 'password', '.', 'If', 'the', 'database', 'connection', 'uses', 'all', 'default', 'parameters', 'this', 'can', 'be', 'called', 'without', 'connect_to_database', '.', 'Otherwise', 'it', 'should', 'be', 'preceded', 'by', 'a', 'connect_to_database', 'call', '.'] | train | https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L109-L132 |
3,661 | halfak/python-jsonable | jsonable/functions.py | to_json | def to_json(value):
"""
Converts a value to a jsonable type.
"""
if type(value) in JSON_TYPES:
return value
elif hasattr(value, "to_json"):
return value.to_json()
elif isinstance(value, list) or isinstance(value, set) or \
isinstance(value, deque) or isinstance(value, tuple):
return [to_json(v) for v in value]
elif isinstance(value, dict):
return {str(k): to_json(v) for k, v in value.items()}
else:
raise TypeError("{0} is not json serializable.".format(type(value))) | python | def to_json(value):
"""
Converts a value to a jsonable type.
"""
if type(value) in JSON_TYPES:
return value
elif hasattr(value, "to_json"):
return value.to_json()
elif isinstance(value, list) or isinstance(value, set) or \
isinstance(value, deque) or isinstance(value, tuple):
return [to_json(v) for v in value]
elif isinstance(value, dict):
return {str(k): to_json(v) for k, v in value.items()}
else:
raise TypeError("{0} is not json serializable.".format(type(value))) | ['def', 'to_json', '(', 'value', ')', ':', 'if', 'type', '(', 'value', ')', 'in', 'JSON_TYPES', ':', 'return', 'value', 'elif', 'hasattr', '(', 'value', ',', '"to_json"', ')', ':', 'return', 'value', '.', 'to_json', '(', ')', 'elif', 'isinstance', '(', 'value', ',', 'list', ')', 'or', 'isinstance', '(', 'value', ',', 'set', ')', 'or', 'isinstance', '(', 'value', ',', 'deque', ')', 'or', 'isinstance', '(', 'value', ',', 'tuple', ')', ':', 'return', '[', 'to_json', '(', 'v', ')', 'for', 'v', 'in', 'value', ']', 'elif', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'return', '{', 'str', '(', 'k', ')', ':', 'to_json', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', '}', 'else', ':', 'raise', 'TypeError', '(', '"{0} is not json serializable."', '.', 'format', '(', 'type', '(', 'value', ')', ')', ')'] | Converts a value to a jsonable type. | ['Converts', 'a', 'value', 'to', 'a', 'jsonable', 'type', '.'] | train | https://github.com/halfak/python-jsonable/blob/70a53aedaca84d078228b3564fdd8f60a586d43f/jsonable/functions.py#L6-L20 |
3,662 | manahl/arctic | arctic/chunkstore/chunkstore.py | ChunkStore.rename | def rename(self, from_symbol, to_symbol, audit=None):
"""
Rename a symbol
Parameters
----------
from_symbol: str
the existing symbol that will be renamed
to_symbol: str
the new symbol name
audit: dict
audit information
"""
sym = self._get_symbol_info(from_symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (from_symbol))
if self._get_symbol_info(to_symbol) is not None:
raise Exception('Symbol %s already exists' % (to_symbol))
mongo_retry(self._collection.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._symbols.update_one)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._mdata.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._audit.update_many)({'symbol': from_symbol},
{'$set': {'symbol': to_symbol}})
if audit is not None:
audit['symbol'] = to_symbol
audit['action'] = 'symbol rename'
audit['old_symbol'] = from_symbol
self._audit.insert_one(audit) | python | def rename(self, from_symbol, to_symbol, audit=None):
"""
Rename a symbol
Parameters
----------
from_symbol: str
the existing symbol that will be renamed
to_symbol: str
the new symbol name
audit: dict
audit information
"""
sym = self._get_symbol_info(from_symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (from_symbol))
if self._get_symbol_info(to_symbol) is not None:
raise Exception('Symbol %s already exists' % (to_symbol))
mongo_retry(self._collection.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._symbols.update_one)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._mdata.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._audit.update_many)({'symbol': from_symbol},
{'$set': {'symbol': to_symbol}})
if audit is not None:
audit['symbol'] = to_symbol
audit['action'] = 'symbol rename'
audit['old_symbol'] = from_symbol
self._audit.insert_one(audit) | ['def', 'rename', '(', 'self', ',', 'from_symbol', ',', 'to_symbol', ',', 'audit', '=', 'None', ')', ':', 'sym', '=', 'self', '.', '_get_symbol_info', '(', 'from_symbol', ')', 'if', 'not', 'sym', ':', 'raise', 'NoDataFoundException', '(', "'No data found for %s'", '%', '(', 'from_symbol', ')', ')', 'if', 'self', '.', '_get_symbol_info', '(', 'to_symbol', ')', 'is', 'not', 'None', ':', 'raise', 'Exception', '(', "'Symbol %s already exists'", '%', '(', 'to_symbol', ')', ')', 'mongo_retry', '(', 'self', '.', '_collection', '.', 'update_many', ')', '(', '{', 'SYMBOL', ':', 'from_symbol', '}', ',', '{', "'$set'", ':', '{', 'SYMBOL', ':', 'to_symbol', '}', '}', ')', 'mongo_retry', '(', 'self', '.', '_symbols', '.', 'update_one', ')', '(', '{', 'SYMBOL', ':', 'from_symbol', '}', ',', '{', "'$set'", ':', '{', 'SYMBOL', ':', 'to_symbol', '}', '}', ')', 'mongo_retry', '(', 'self', '.', '_mdata', '.', 'update_many', ')', '(', '{', 'SYMBOL', ':', 'from_symbol', '}', ',', '{', "'$set'", ':', '{', 'SYMBOL', ':', 'to_symbol', '}', '}', ')', 'mongo_retry', '(', 'self', '.', '_audit', '.', 'update_many', ')', '(', '{', "'symbol'", ':', 'from_symbol', '}', ',', '{', "'$set'", ':', '{', "'symbol'", ':', 'to_symbol', '}', '}', ')', 'if', 'audit', 'is', 'not', 'None', ':', 'audit', '[', "'symbol'", ']', '=', 'to_symbol', 'audit', '[', "'action'", ']', '=', "'symbol rename'", 'audit', '[', "'old_symbol'", ']', '=', 'from_symbol', 'self', '.', '_audit', '.', 'insert_one', '(', 'audit', ')'] | Rename a symbol
Parameters
----------
from_symbol: str
the existing symbol that will be renamed
to_symbol: str
the new symbol name
audit: dict
audit information | ['Rename', 'a', 'symbol'] | train | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L193-L226 |
3,663 | blueset/ehForwarderBot | ehforwarderbot/coordinator.py | add_middleware | def add_middleware(middleware: EFBMiddleware):
"""
Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register
"""
global middlewares
if isinstance(middleware, EFBMiddleware):
middlewares.append(middleware)
else:
raise TypeError("Middleware instance is expected") | python | def add_middleware(middleware: EFBMiddleware):
"""
Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register
"""
global middlewares
if isinstance(middleware, EFBMiddleware):
middlewares.append(middleware)
else:
raise TypeError("Middleware instance is expected") | ['def', 'add_middleware', '(', 'middleware', ':', 'EFBMiddleware', ')', ':', 'global', 'middlewares', 'if', 'isinstance', '(', 'middleware', ',', 'EFBMiddleware', ')', ':', 'middlewares', '.', 'append', '(', 'middleware', ')', 'else', ':', 'raise', 'TypeError', '(', '"Middleware instance is expected"', ')'] | Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register | ['Register', 'a', 'middleware', 'with', 'the', 'coordinator', '.'] | train | https://github.com/blueset/ehForwarderBot/blob/62e8fcfe77b2993aba91623f538f404a90f59f1d/ehforwarderbot/coordinator.py#L70-L81 |
3,664 | google/prettytensor | prettytensor/pretty_tensor_methods.py | split | def split(input_layer, split_dim=0, num_splits=2):
"""Splits this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
"""
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = tf.split(
value=input_layer, num_or_size_splits=num_splits, axis=split_dim)
return input_layer.with_sequence(splits) | python | def split(input_layer, split_dim=0, num_splits=2):
"""Splits this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
"""
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = tf.split(
value=input_layer, num_or_size_splits=num_splits, axis=split_dim)
return input_layer.with_sequence(splits) | ['def', 'split', '(', 'input_layer', ',', 'split_dim', '=', '0', ',', 'num_splits', '=', '2', ')', ':', 'shape', '=', 'input_layer', '.', 'shape', '_check_split_dims', '(', 'num_splits', ',', 'split_dim', ',', 'shape', ')', 'splits', '=', 'tf', '.', 'split', '(', 'value', '=', 'input_layer', ',', 'num_or_size_splits', '=', 'num_splits', ',', 'axis', '=', 'split_dim', ')', 'return', 'input_layer', '.', 'with_sequence', '(', 'splits', ')'] | Splits this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits. | ['Splits', 'this', 'Tensor', 'along', 'the', 'split_dim', 'into', 'num_splits', 'Equal', 'chunks', '.'] | train | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_methods.py#L569-L591 |
3,665 | chrisbouchard/braillegraph | braillegraph/__main__.py | run | def run():
"""Display the arguments as a braille graph on standard output."""
# We override the program name to reflect that this script must be run with
# the python executable.
parser = argparse.ArgumentParser(
prog='python -m braillegraph',
description='Print a braille bar graph of the given integers.'
)
# This flag sets the end string that we'll print. If we pass end=None to
# print(), it will use its default. If we pass end='', it will suppress the
# newline character.
parser.add_argument('-n', '--no-newline', action='store_const',
dest='end', const='', default=None,
help='do not print the trailing newline character')
# Add subparsers for the directions
subparsers = parser.add_subparsers(title='directions')
horizontal_parser = subparsers.add_parser('horizontal',
help='a horizontal graph')
horizontal_parser.set_defaults(
func=lambda args: horizontal_graph(args.integers)
)
horizontal_parser.add_argument('integers', metavar='N', type=int,
nargs='+', help='an integer')
vertical_parser = subparsers.add_parser('vertical',
help='a vertical graph')
vertical_parser.set_defaults(
func=lambda args: vertical_graph(args.integers, sep=args.sep)
)
vertical_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer')
# The separator for groups of bars (i.e., "lines"). If we pass None,
# vertical_parser will use its default.
vertical_parser.add_argument('-s', '--sep', action='store', default=None,
help='separator for groups of bars')
args = parser.parse_args()
print(args.func(args), end=args.end) | python | def run():
"""Display the arguments as a braille graph on standard output."""
# We override the program name to reflect that this script must be run with
# the python executable.
parser = argparse.ArgumentParser(
prog='python -m braillegraph',
description='Print a braille bar graph of the given integers.'
)
# This flag sets the end string that we'll print. If we pass end=None to
# print(), it will use its default. If we pass end='', it will suppress the
# newline character.
parser.add_argument('-n', '--no-newline', action='store_const',
dest='end', const='', default=None,
help='do not print the trailing newline character')
# Add subparsers for the directions
subparsers = parser.add_subparsers(title='directions')
horizontal_parser = subparsers.add_parser('horizontal',
help='a horizontal graph')
horizontal_parser.set_defaults(
func=lambda args: horizontal_graph(args.integers)
)
horizontal_parser.add_argument('integers', metavar='N', type=int,
nargs='+', help='an integer')
vertical_parser = subparsers.add_parser('vertical',
help='a vertical graph')
vertical_parser.set_defaults(
func=lambda args: vertical_graph(args.integers, sep=args.sep)
)
vertical_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer')
# The separator for groups of bars (i.e., "lines"). If we pass None,
# vertical_parser will use its default.
vertical_parser.add_argument('-s', '--sep', action='store', default=None,
help='separator for groups of bars')
args = parser.parse_args()
print(args.func(args), end=args.end) | ['def', 'run', '(', ')', ':', '# We override the program name to reflect that this script must be run with', '# the python executable.', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'prog', '=', "'python -m braillegraph'", ',', 'description', '=', "'Print a braille bar graph of the given integers.'", ')', "# This flag sets the end string that we'll print. If we pass end=None to", "# print(), it will use its default. If we pass end='', it will suppress the", '# newline character.', 'parser', '.', 'add_argument', '(', "'-n'", ',', "'--no-newline'", ',', 'action', '=', "'store_const'", ',', 'dest', '=', "'end'", ',', 'const', '=', "''", ',', 'default', '=', 'None', ',', 'help', '=', "'do not print the trailing newline character'", ')', '# Add subparsers for the directions', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'title', '=', "'directions'", ')', 'horizontal_parser', '=', 'subparsers', '.', 'add_parser', '(', "'horizontal'", ',', 'help', '=', "'a horizontal graph'", ')', 'horizontal_parser', '.', 'set_defaults', '(', 'func', '=', 'lambda', 'args', ':', 'horizontal_graph', '(', 'args', '.', 'integers', ')', ')', 'horizontal_parser', '.', 'add_argument', '(', "'integers'", ',', 'metavar', '=', "'N'", ',', 'type', '=', 'int', ',', 'nargs', '=', "'+'", ',', 'help', '=', "'an integer'", ')', 'vertical_parser', '=', 'subparsers', '.', 'add_parser', '(', "'vertical'", ',', 'help', '=', "'a vertical graph'", ')', 'vertical_parser', '.', 'set_defaults', '(', 'func', '=', 'lambda', 'args', ':', 'vertical_graph', '(', 'args', '.', 'integers', ',', 'sep', '=', 'args', '.', 'sep', ')', ')', 'vertical_parser', '.', 'add_argument', '(', "'integers'", ',', 'metavar', '=', "'N'", ',', 'type', '=', 'int', ',', 'nargs', '=', "'+'", ',', 'help', '=', "'an integer'", ')', '# The separator for groups of bars (i.e., "lines"). If we pass None,', '# vertical_parser will use its default.', 'vertical_parser', '.', 'add_argument', '(', "'-s'", ',', "'--sep'", ',', 'action', '=', "'store'", ',', 'default', '=', 'None', ',', 'help', '=', "'separator for groups of bars'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'print', '(', 'args', '.', 'func', '(', 'args', ')', ',', 'end', '=', 'args', '.', 'end', ')'] | Display the arguments as a braille graph on standard output. | ['Display', 'the', 'arguments', 'as', 'a', 'braille', 'graph', 'on', 'standard', 'output', '.'] | train | https://github.com/chrisbouchard/braillegraph/blob/744ca8394676579cfb11e5c297c9bd794ab5bd78/braillegraph/__main__.py#L45-L88 |
3,666 | adafruit/Adafruit_Python_CharLCD | Adafruit_CharLCD/Adafruit_CharLCD.py | Adafruit_CharLCD.set_right_to_left | def set_right_to_left(self):
"""Set text direction right to left."""
self.displaymode &= ~LCD_ENTRYLEFT
self.write8(LCD_ENTRYMODESET | self.displaymode) | python | def set_right_to_left(self):
"""Set text direction right to left."""
self.displaymode &= ~LCD_ENTRYLEFT
self.write8(LCD_ENTRYMODESET | self.displaymode) | ['def', 'set_right_to_left', '(', 'self', ')', ':', 'self', '.', 'displaymode', '&=', '~', 'LCD_ENTRYLEFT', 'self', '.', 'write8', '(', 'LCD_ENTRYMODESET', '|', 'self', '.', 'displaymode', ')'] | Set text direction right to left. | ['Set', 'text', 'direction', 'right', 'to', 'left', '.'] | train | https://github.com/adafruit/Adafruit_Python_CharLCD/blob/c126e6b673074c12a03f4bd36afb2fe40272341e/Adafruit_CharLCD/Adafruit_CharLCD.py#L228-L231 |
3,667 | google/openhtf | openhtf/output/servers/dashboard_server.py | DashboardPubSub.publish_if_new | def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message | python | def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message | ['def', 'publish_if_new', '(', 'cls', ')', ':', 'message', '=', 'cls', '.', 'make_message', '(', ')', 'if', 'message', '!=', 'cls', '.', 'last_message', ':', 'super', '(', 'DashboardPubSub', ',', 'cls', ')', '.', 'publish', '(', 'message', ')', 'cls', '.', 'last_message', '=', 'message'] | If the station map has changed, publish the new information. | ['If', 'the', 'station', 'map', 'has', 'changed', 'publish', 'the', 'new', 'information', '.'] | train | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/dashboard_server.py#L92-L97 |
3,668 | daviddrysdale/python-phonenumbers | python/phonenumbers/shortnumberinfo.py | _example_short_number_for_cost | def _example_short_number_for_cost(region_code, cost):
"""Gets a valid short number for the specified cost category.
Arguments:
region_code -- the region for which an example short number is needed.
cost -- the cost category of number that is needed.
Returns a valid short number for the specified region and cost
category. Returns an empty string when the metadata does not contain such
information, or the cost is UNKNOWN_COST.
"""
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is None:
return U_EMPTY_STRING
desc = None
if cost == ShortNumberCost.TOLL_FREE:
desc = metadata.toll_free
elif cost == ShortNumberCost.STANDARD_RATE:
desc = metadata.standard_rate
elif cost == ShortNumberCost.PREMIUM_RATE:
desc = metadata.premium_rate
else:
# ShortNumberCost.UNKNOWN_COST numbers are computed by the process of
# elimination from the other cost categoried.
pass
if desc is not None and desc.example_number is not None:
return desc.example_number
return U_EMPTY_STRING | python | def _example_short_number_for_cost(region_code, cost):
"""Gets a valid short number for the specified cost category.
Arguments:
region_code -- the region for which an example short number is needed.
cost -- the cost category of number that is needed.
Returns a valid short number for the specified region and cost
category. Returns an empty string when the metadata does not contain such
information, or the cost is UNKNOWN_COST.
"""
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is None:
return U_EMPTY_STRING
desc = None
if cost == ShortNumberCost.TOLL_FREE:
desc = metadata.toll_free
elif cost == ShortNumberCost.STANDARD_RATE:
desc = metadata.standard_rate
elif cost == ShortNumberCost.PREMIUM_RATE:
desc = metadata.premium_rate
else:
# ShortNumberCost.UNKNOWN_COST numbers are computed by the process of
# elimination from the other cost categoried.
pass
if desc is not None and desc.example_number is not None:
return desc.example_number
return U_EMPTY_STRING | ['def', '_example_short_number_for_cost', '(', 'region_code', ',', 'cost', ')', ':', 'metadata', '=', 'PhoneMetadata', '.', 'short_metadata_for_region', '(', 'region_code', ')', 'if', 'metadata', 'is', 'None', ':', 'return', 'U_EMPTY_STRING', 'desc', '=', 'None', 'if', 'cost', '==', 'ShortNumberCost', '.', 'TOLL_FREE', ':', 'desc', '=', 'metadata', '.', 'toll_free', 'elif', 'cost', '==', 'ShortNumberCost', '.', 'STANDARD_RATE', ':', 'desc', '=', 'metadata', '.', 'standard_rate', 'elif', 'cost', '==', 'ShortNumberCost', '.', 'PREMIUM_RATE', ':', 'desc', '=', 'metadata', '.', 'premium_rate', 'else', ':', '# ShortNumberCost.UNKNOWN_COST numbers are computed by the process of', '# elimination from the other cost categoried.', 'pass', 'if', 'desc', 'is', 'not', 'None', 'and', 'desc', '.', 'example_number', 'is', 'not', 'None', ':', 'return', 'desc', '.', 'example_number', 'return', 'U_EMPTY_STRING'] | Gets a valid short number for the specified cost category.
Arguments:
region_code -- the region for which an example short number is needed.
cost -- the cost category of number that is needed.
Returns a valid short number for the specified region and cost
category. Returns an empty string when the metadata does not contain such
information, or the cost is UNKNOWN_COST. | ['Gets', 'a', 'valid', 'short', 'number', 'for', 'the', 'specified', 'cost', 'category', '.'] | train | https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/shortnumberinfo.py#L295-L322 |
3,669 | msfrank/cifparser | cifparser/valuetree.py | ValueTree.append_field | def append_field(self, path, name, value):
"""
Appends the field to the container at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:param value:
:type value: str
"""
path = make_path(path)
container = self.get_container(path)
current = container._values.get(name, None)
if current is None:
container._values[name] = value
elif isinstance(current, ValueTree):
raise TypeError()
elif isinstance(current, list):
container._values[name] = current + [value]
else:
container._values[name] = [current, value] | python | def append_field(self, path, name, value):
"""
Appends the field to the container at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:param value:
:type value: str
"""
path = make_path(path)
container = self.get_container(path)
current = container._values.get(name, None)
if current is None:
container._values[name] = value
elif isinstance(current, ValueTree):
raise TypeError()
elif isinstance(current, list):
container._values[name] = current + [value]
else:
container._values[name] = [current, value] | ['def', 'append_field', '(', 'self', ',', 'path', ',', 'name', ',', 'value', ')', ':', 'path', '=', 'make_path', '(', 'path', ')', 'container', '=', 'self', '.', 'get_container', '(', 'path', ')', 'current', '=', 'container', '.', '_values', '.', 'get', '(', 'name', ',', 'None', ')', 'if', 'current', 'is', 'None', ':', 'container', '.', '_values', '[', 'name', ']', '=', 'value', 'elif', 'isinstance', '(', 'current', ',', 'ValueTree', ')', ':', 'raise', 'TypeError', '(', ')', 'elif', 'isinstance', '(', 'current', ',', 'list', ')', ':', 'container', '.', '_values', '[', 'name', ']', '=', 'current', '+', '[', 'value', ']', 'else', ':', 'container', '.', '_values', '[', 'name', ']', '=', '[', 'current', ',', 'value', ']'] | Appends the field to the container at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:param value:
:type value: str | ['Appends', 'the', 'field', 'to', 'the', 'container', 'at', 'the', 'specified', 'path', '.'] | train | https://github.com/msfrank/cifparser/blob/ecd899ba2e7b990e2cec62b115742d830e7e4384/cifparser/valuetree.py#L177-L197 |
3,670 | gem/oq-engine | openquake/commonlib/logictree.py | BranchSet.filter_source | def filter_source(self, source):
# pylint: disable=R0911,R0912
"""
Apply filters to ``source`` and return ``True`` if uncertainty should
be applied to it.
"""
for key, value in self.filters.items():
if key == 'applyToTectonicRegionType':
if value != source.tectonic_region_type:
return False
elif key == 'applyToSourceType':
if value == 'area':
if not isinstance(source, ohs.AreaSource):
return False
elif value == 'point':
# area source extends point source
if (not isinstance(source, ohs.PointSource)
or isinstance(source, ohs.AreaSource)):
return False
elif value == 'simpleFault':
if not isinstance(source, ohs.SimpleFaultSource):
return False
elif value == 'complexFault':
if not isinstance(source, ohs.ComplexFaultSource):
return False
elif value == 'characteristicFault':
if not isinstance(source, ohs.CharacteristicFaultSource):
return False
else:
raise AssertionError("unknown source type '%s'" % value)
elif key == 'applyToSources':
if source and source.source_id not in value:
return False
else:
raise AssertionError("unknown filter '%s'" % key)
# All filters pass, return True.
return True | python | def filter_source(self, source):
# pylint: disable=R0911,R0912
"""
Apply filters to ``source`` and return ``True`` if uncertainty should
be applied to it.
"""
for key, value in self.filters.items():
if key == 'applyToTectonicRegionType':
if value != source.tectonic_region_type:
return False
elif key == 'applyToSourceType':
if value == 'area':
if not isinstance(source, ohs.AreaSource):
return False
elif value == 'point':
# area source extends point source
if (not isinstance(source, ohs.PointSource)
or isinstance(source, ohs.AreaSource)):
return False
elif value == 'simpleFault':
if not isinstance(source, ohs.SimpleFaultSource):
return False
elif value == 'complexFault':
if not isinstance(source, ohs.ComplexFaultSource):
return False
elif value == 'characteristicFault':
if not isinstance(source, ohs.CharacteristicFaultSource):
return False
else:
raise AssertionError("unknown source type '%s'" % value)
elif key == 'applyToSources':
if source and source.source_id not in value:
return False
else:
raise AssertionError("unknown filter '%s'" % key)
# All filters pass, return True.
return True | ['def', 'filter_source', '(', 'self', ',', 'source', ')', ':', '# pylint: disable=R0911,R0912', 'for', 'key', ',', 'value', 'in', 'self', '.', 'filters', '.', 'items', '(', ')', ':', 'if', 'key', '==', "'applyToTectonicRegionType'", ':', 'if', 'value', '!=', 'source', '.', 'tectonic_region_type', ':', 'return', 'False', 'elif', 'key', '==', "'applyToSourceType'", ':', 'if', 'value', '==', "'area'", ':', 'if', 'not', 'isinstance', '(', 'source', ',', 'ohs', '.', 'AreaSource', ')', ':', 'return', 'False', 'elif', 'value', '==', "'point'", ':', '# area source extends point source', 'if', '(', 'not', 'isinstance', '(', 'source', ',', 'ohs', '.', 'PointSource', ')', 'or', 'isinstance', '(', 'source', ',', 'ohs', '.', 'AreaSource', ')', ')', ':', 'return', 'False', 'elif', 'value', '==', "'simpleFault'", ':', 'if', 'not', 'isinstance', '(', 'source', ',', 'ohs', '.', 'SimpleFaultSource', ')', ':', 'return', 'False', 'elif', 'value', '==', "'complexFault'", ':', 'if', 'not', 'isinstance', '(', 'source', ',', 'ohs', '.', 'ComplexFaultSource', ')', ':', 'return', 'False', 'elif', 'value', '==', "'characteristicFault'", ':', 'if', 'not', 'isinstance', '(', 'source', ',', 'ohs', '.', 'CharacteristicFaultSource', ')', ':', 'return', 'False', 'else', ':', 'raise', 'AssertionError', '(', '"unknown source type \'%s\'"', '%', 'value', ')', 'elif', 'key', '==', "'applyToSources'", ':', 'if', 'source', 'and', 'source', '.', 'source_id', 'not', 'in', 'value', ':', 'return', 'False', 'else', ':', 'raise', 'AssertionError', '(', '"unknown filter \'%s\'"', '%', 'key', ')', '# All filters pass, return True.', 'return', 'True'] | Apply filters to ``source`` and return ``True`` if uncertainty should
be applied to it. | ['Apply', 'filters', 'to', 'source', 'and', 'return', 'True', 'if', 'uncertainty', 'should', 'be', 'applied', 'to', 'it', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L343-L379 |
3,671 | django-salesforce/django-salesforce | salesforce/utils.py | get_soap_client | def get_soap_client(db_alias, client_class=None):
"""
Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
"""
if not beatbox:
raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.")
if client_class is None:
client_class = beatbox.PythonClient
soap_client = client_class()
# authenticate
connection = connections[db_alias]
# verify the authenticated connection, because Beatbox can not refresh the token
cursor = connection.cursor()
cursor.urls_request()
auth_info = connections[db_alias].sf_session.auth
access_token = auth_info.get_auth()['access_token']
assert access_token[15] == '!'
org_id = access_token[:15]
url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION,
org_id=org_id)
soap_client.useSession(access_token, auth_info.instance_url + url)
return soap_client | python | def get_soap_client(db_alias, client_class=None):
"""
Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
"""
if not beatbox:
raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.")
if client_class is None:
client_class = beatbox.PythonClient
soap_client = client_class()
# authenticate
connection = connections[db_alias]
# verify the authenticated connection, because Beatbox can not refresh the token
cursor = connection.cursor()
cursor.urls_request()
auth_info = connections[db_alias].sf_session.auth
access_token = auth_info.get_auth()['access_token']
assert access_token[15] == '!'
org_id = access_token[:15]
url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION,
org_id=org_id)
soap_client.useSession(access_token, auth_info.instance_url + url)
return soap_client | ['def', 'get_soap_client', '(', 'db_alias', ',', 'client_class', '=', 'None', ')', ':', 'if', 'not', 'beatbox', ':', 'raise', 'InterfaceError', '(', '"To use SOAP API, you\'ll need to install the Beatbox package."', ')', 'if', 'client_class', 'is', 'None', ':', 'client_class', '=', 'beatbox', '.', 'PythonClient', 'soap_client', '=', 'client_class', '(', ')', '# authenticate', 'connection', '=', 'connections', '[', 'db_alias', ']', '# verify the authenticated connection, because Beatbox can not refresh the token', 'cursor', '=', 'connection', '.', 'cursor', '(', ')', 'cursor', '.', 'urls_request', '(', ')', 'auth_info', '=', 'connections', '[', 'db_alias', ']', '.', 'sf_session', '.', 'auth', 'access_token', '=', 'auth_info', '.', 'get_auth', '(', ')', '[', "'access_token'", ']', 'assert', 'access_token', '[', '15', ']', '==', "'!'", 'org_id', '=', 'access_token', '[', ':', '15', ']', 'url', '=', "'/services/Soap/u/{version}/{org_id}'", '.', 'format', '(', 'version', '=', 'salesforce', '.', 'API_VERSION', ',', 'org_id', '=', 'org_id', ')', 'soap_client', '.', 'useSession', '(', 'access_token', ',', 'auth_info', '.', 'instance_url', '+', 'url', ')', 'return', 'soap_client'] | Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient) | ['Create', 'the', 'SOAP', 'client', 'for', 'the', 'current', 'user', 'logged', 'in', 'the', 'db_alias'] | train | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/utils.py#L20-L46 |
3,672 | rosenbrockc/acorn | acorn/ipython.py | record_markdown | def record_markdown(text, cellid):
"""Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
"""
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
#Compute a new ekey if possible with the most similar markdown cell
#in the database.
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
#We expect the similarity to be at least 0.5; otherwise we decide
#that it is a new cell.
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True) | python | def record_markdown(text, cellid):
"""Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
"""
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
#Compute a new ekey if possible with the most similar markdown cell
#in the database.
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
#We expect the similarity to be at least 0.5; otherwise we decide
#that it is a new cell.
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True) | ['def', 'record_markdown', '(', 'text', ',', 'cellid', ')', ':', 'from', 'acorn', '.', 'logging', '.', 'database', 'import', 'record', 'from', 'time', 'import', 'time', 'ekey', '=', '"nb-{}"', '.', 'format', '(', 'cellid', ')', 'global', '_cellid_map', 'if', 'cellid', 'not', 'in', '_cellid_map', ':', 'from', 'acorn', '.', 'logging', '.', 'database', 'import', 'active_db', 'from', 'difflib', 'import', 'SequenceMatcher', 'from', 'acorn', '.', 'logging', '.', 'diff', 'import', 'cascade', 'taskdb', '=', 'active_db', '(', ')', 'if', 'ekey', 'not', 'in', 'taskdb', '.', 'entities', ':', '#Compute a new ekey if possible with the most similar markdown cell', '#in the database.', 'possible', '=', '[', 'k', 'for', 'k', 'in', 'taskdb', '.', 'entities', 'if', 'k', '[', '0', ':', '3', ']', '==', '"nb-"', ']', 'maxkey', ',', 'maxvalue', '=', 'None', ',', '0.', 'for', 'pkey', 'in', 'possible', ':', 'sequence', '=', '[', 'e', '[', '"c"', ']', 'for', 'e', 'in', 'taskdb', '.', 'entities', '[', 'pkey', ']', ']', 'state', '=', "''", '.', 'join', '(', 'cascade', '(', 'sequence', ')', ')', 'matcher', '=', 'SequenceMatcher', '(', 'a', '=', 'state', ',', 'b', '=', 'text', ')', 'ratio', '=', 'matcher', '.', 'quick_ratio', '(', ')', 'if', 'ratio', '>', 'maxvalue', 'and', 'ratio', '>', '0.5', ':', 'maxkey', ',', 'maxvalue', '=', 'pkey', ',', 'ratio', '#We expect the similarity to be at least 0.5; otherwise we decide', '#that it is a new cell.', 'if', 'maxkey', 'is', 'not', 'None', ':', 'ekey', '=', 'pkey', '_cellid_map', '[', 'cellid', ']', '=', 'ekey', 'ekey', '=', '_cellid_map', '[', 'cellid', ']', 'entry', '=', '{', '"m"', ':', '"md"', ',', '"a"', ':', 'None', ',', '"s"', ':', 'time', '(', ')', ',', '"r"', ':', 'None', ',', '"c"', ':', 'text', ',', '}', 'record', '(', 'ekey', ',', 'entry', ',', 'diff', '=', 'True', ')'] | Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook. | ['Records', 'the', 'specified', 'markdown', 'text', 'to', 'the', 'acorn', 'database', '.'] | train | https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L488-L534 |
3,673 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py | brocade_port_profile.port_profile_restrict_flooding_container_restrict_flooding | def port_profile_restrict_flooding_container_restrict_flooding(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
restrict_flooding_container = ET.SubElement(port_profile, "restrict-flooding-container")
restrict_flooding = ET.SubElement(restrict_flooding_container, "restrict-flooding")
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def port_profile_restrict_flooding_container_restrict_flooding(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
restrict_flooding_container = ET.SubElement(port_profile, "restrict-flooding-container")
restrict_flooding = ET.SubElement(restrict_flooding_container, "restrict-flooding")
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'port_profile_restrict_flooding_container_restrict_flooding', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'port_profile', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"port-profile"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-port-profile"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'port_profile', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'restrict_flooding_container', '=', 'ET', '.', 'SubElement', '(', 'port_profile', ',', '"restrict-flooding-container"', ')', 'restrict_flooding', '=', 'ET', '.', 'SubElement', '(', 'restrict_flooding_container', ',', '"restrict-flooding"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py#L660-L671 |
3,674 | yyuu/botornado | botornado/__init__.py | connect_euca | def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
raise BotoClientError('Not Implemented') | python | def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
raise BotoClientError('Not Implemented') | ['def', 'connect_euca', '(', 'host', '=', 'None', ',', 'aws_access_key_id', '=', 'None', ',', 'aws_secret_access_key', '=', 'None', ',', 'port', '=', '8773', ',', 'path', '=', "'/services/Eucalyptus'", ',', 'is_secure', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'raise', 'BotoClientError', '(', "'Not Implemented'", ')'] | Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server | ['Connect', 'to', 'a', 'Eucalyptus', 'service', '.'] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/__init__.py#L259-L277 |
3,675 | pycontribs/pyrax | pyrax/cloudmonitoring.py | CloudMonitorNotificationManager.create | def create(self, notification_type, label=None, name=None, details=None):
"""
Defines a notification for handling an alarm.
"""
uri = "/%s" % self.uri_base
body = {"label": label or name,
"type": utils.get_id(notification_type),
"details": details,
}
resp, resp_body = self.api.method_post(uri, body=body)
return self.get(resp.headers["x-object-id"]) | python | def create(self, notification_type, label=None, name=None, details=None):
"""
Defines a notification for handling an alarm.
"""
uri = "/%s" % self.uri_base
body = {"label": label or name,
"type": utils.get_id(notification_type),
"details": details,
}
resp, resp_body = self.api.method_post(uri, body=body)
return self.get(resp.headers["x-object-id"]) | ['def', 'create', '(', 'self', ',', 'notification_type', ',', 'label', '=', 'None', ',', 'name', '=', 'None', ',', 'details', '=', 'None', ')', ':', 'uri', '=', '"/%s"', '%', 'self', '.', 'uri_base', 'body', '=', '{', '"label"', ':', 'label', 'or', 'name', ',', '"type"', ':', 'utils', '.', 'get_id', '(', 'notification_type', ')', ',', '"details"', ':', 'details', ',', '}', 'resp', ',', 'resp_body', '=', 'self', '.', 'api', '.', 'method_post', '(', 'uri', ',', 'body', '=', 'body', ')', 'return', 'self', '.', 'get', '(', 'resp', '.', 'headers', '[', '"x-object-id"', ']', ')'] | Defines a notification for handling an alarm. | ['Defines', 'a', 'notification', 'for', 'handling', 'an', 'alarm', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L289-L299 |
3,676 | DataDog/integrations-core | sqlserver/datadog_checks/sqlserver/sqlserver.py | SQLServer._conn_string_adodbapi | def _conn_string_adodbapi(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with adodbapi
'''
if instance:
_, host, username, password, database, _ = self._get_access_info(instance, db_key, db_name)
elif conn_key:
_, host, username, password, database, _ = conn_key.split(":")
p = self._get_adoprovider(instance)
conn_str = 'Provider={};Data Source={};Initial Catalog={};'.format(p, host, database)
if username:
conn_str += 'User ID={};'.format(username)
if password:
conn_str += 'Password={};'.format(password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str | python | def _conn_string_adodbapi(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with adodbapi
'''
if instance:
_, host, username, password, database, _ = self._get_access_info(instance, db_key, db_name)
elif conn_key:
_, host, username, password, database, _ = conn_key.split(":")
p = self._get_adoprovider(instance)
conn_str = 'Provider={};Data Source={};Initial Catalog={};'.format(p, host, database)
if username:
conn_str += 'User ID={};'.format(username)
if password:
conn_str += 'Password={};'.format(password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str | ['def', '_conn_string_adodbapi', '(', 'self', ',', 'db_key', ',', 'instance', '=', 'None', ',', 'conn_key', '=', 'None', ',', 'db_name', '=', 'None', ')', ':', 'if', 'instance', ':', '_', ',', 'host', ',', 'username', ',', 'password', ',', 'database', ',', '_', '=', 'self', '.', '_get_access_info', '(', 'instance', ',', 'db_key', ',', 'db_name', ')', 'elif', 'conn_key', ':', '_', ',', 'host', ',', 'username', ',', 'password', ',', 'database', ',', '_', '=', 'conn_key', '.', 'split', '(', '":"', ')', 'p', '=', 'self', '.', '_get_adoprovider', '(', 'instance', ')', 'conn_str', '=', "'Provider={};Data Source={};Initial Catalog={};'", '.', 'format', '(', 'p', ',', 'host', ',', 'database', ')', 'if', 'username', ':', 'conn_str', '+=', "'User ID={};'", '.', 'format', '(', 'username', ')', 'if', 'password', ':', 'conn_str', '+=', "'Password={};'", '.', 'format', '(', 'password', ')', 'if', 'not', 'username', 'and', 'not', 'password', ':', 'conn_str', '+=', "'Integrated Security=SSPI;'", 'return', 'conn_str'] | Return a connection string to use with adodbapi | ['Return', 'a', 'connection', 'string', 'to', 'use', 'with', 'adodbapi'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L391-L408 |
3,677 | mushkevych/scheduler | synergy/scheduler/timetable.py | Timetable.load_tree | def load_tree(self):
""" method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable"""
timeperiod = settings.settings['synergy_start_timeperiod']
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod)
hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod)
self._build_tree_by_level(QUALIFIER_HOURLY, COLLECTION_JOB_HOURLY, since=hourly_timeperiod)
self._build_tree_by_level(QUALIFIER_DAILY, COLLECTION_JOB_DAILY, since=daily_timeperiod)
self._build_tree_by_level(QUALIFIER_MONTHLY, COLLECTION_JOB_MONTHLY, since=monthly_timeperiod)
self._build_tree_by_level(QUALIFIER_YEARLY, COLLECTION_JOB_YEARLY, since=yearly_timeperiod) | python | def load_tree(self):
""" method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable"""
timeperiod = settings.settings['synergy_start_timeperiod']
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod)
hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod)
self._build_tree_by_level(QUALIFIER_HOURLY, COLLECTION_JOB_HOURLY, since=hourly_timeperiod)
self._build_tree_by_level(QUALIFIER_DAILY, COLLECTION_JOB_DAILY, since=daily_timeperiod)
self._build_tree_by_level(QUALIFIER_MONTHLY, COLLECTION_JOB_MONTHLY, since=monthly_timeperiod)
self._build_tree_by_level(QUALIFIER_YEARLY, COLLECTION_JOB_YEARLY, since=yearly_timeperiod) | ['def', 'load_tree', '(', 'self', ')', ':', 'timeperiod', '=', 'settings', '.', 'settings', '[', "'synergy_start_timeperiod'", ']', 'yearly_timeperiod', '=', 'time_helper', '.', 'cast_to_time_qualifier', '(', 'QUALIFIER_YEARLY', ',', 'timeperiod', ')', 'monthly_timeperiod', '=', 'time_helper', '.', 'cast_to_time_qualifier', '(', 'QUALIFIER_MONTHLY', ',', 'timeperiod', ')', 'daily_timeperiod', '=', 'time_helper', '.', 'cast_to_time_qualifier', '(', 'QUALIFIER_DAILY', ',', 'timeperiod', ')', 'hourly_timeperiod', '=', 'time_helper', '.', 'cast_to_time_qualifier', '(', 'QUALIFIER_HOURLY', ',', 'timeperiod', ')', 'self', '.', '_build_tree_by_level', '(', 'QUALIFIER_HOURLY', ',', 'COLLECTION_JOB_HOURLY', ',', 'since', '=', 'hourly_timeperiod', ')', 'self', '.', '_build_tree_by_level', '(', 'QUALIFIER_DAILY', ',', 'COLLECTION_JOB_DAILY', ',', 'since', '=', 'daily_timeperiod', ')', 'self', '.', '_build_tree_by_level', '(', 'QUALIFIER_MONTHLY', ',', 'COLLECTION_JOB_MONTHLY', ',', 'since', '=', 'monthly_timeperiod', ')', 'self', '.', '_build_tree_by_level', '(', 'QUALIFIER_YEARLY', ',', 'COLLECTION_JOB_YEARLY', ',', 'since', '=', 'yearly_timeperiod', ')'] | method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable | ['method', 'iterates', 'thru', 'all', 'objects', 'older', 'than', 'synergy_start_timeperiod', 'parameter', 'in', 'job', 'collections', 'and', 'loads', 'them', 'into', 'this', 'timetable'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/timetable.py#L207-L219 |
3,678 | fake-name/WebRequest | WebRequest/WebRequestClass.py | WebGetRobust.getFileNameMime | def getFileNameMime(self, requestedUrl, *args, **kwargs):
'''
Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL, the filename for the target content, and
the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime).
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename.
'''
if 'returnMultiple' in kwargs:
raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl)
if 'soup' in kwargs and kwargs['soup']:
raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl)
kwargs["returnMultiple"] = True
pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs)
info = pghandle.info()
if not 'Content-Disposition' in info:
hName = ''
elif not 'filename=' in info['Content-Disposition']:
hName = ''
else:
hName = info['Content-Disposition'].split('filename=')[1]
# Unquote filename if it's quoted.
if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2:
hName = hName[1:-1]
mime = info.get_content_type()
if not hName.strip():
requestedUrl = pghandle.geturl()
hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip()
if "/" in hName:
hName = hName.split("/")[-1]
return pgctnt, hName, mime | python | def getFileNameMime(self, requestedUrl, *args, **kwargs):
'''
Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL, the filename for the target content, and
the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime).
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename.
'''
if 'returnMultiple' in kwargs:
raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl)
if 'soup' in kwargs and kwargs['soup']:
raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl)
kwargs["returnMultiple"] = True
pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs)
info = pghandle.info()
if not 'Content-Disposition' in info:
hName = ''
elif not 'filename=' in info['Content-Disposition']:
hName = ''
else:
hName = info['Content-Disposition'].split('filename=')[1]
# Unquote filename if it's quoted.
if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2:
hName = hName[1:-1]
mime = info.get_content_type()
if not hName.strip():
requestedUrl = pghandle.geturl()
hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip()
if "/" in hName:
hName = hName.split("/")[-1]
return pgctnt, hName, mime | ['def', 'getFileNameMime', '(', 'self', ',', 'requestedUrl', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', "'returnMultiple'", 'in', 'kwargs', ':', 'raise', 'Exceptions', '.', 'ArgumentError', '(', '"getFileAndName cannot be called with \'returnMultiple\'"', ',', 'requestedUrl', ')', 'if', "'soup'", 'in', 'kwargs', 'and', 'kwargs', '[', "'soup'", ']', ':', 'raise', 'Exceptions', '.', 'ArgumentError', '(', '"getFileAndName contradicts the \'soup\' directive!"', ',', 'requestedUrl', ')', 'kwargs', '[', '"returnMultiple"', ']', '=', 'True', 'pgctnt', ',', 'pghandle', '=', 'self', '.', 'getpage', '(', 'requestedUrl', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'info', '=', 'pghandle', '.', 'info', '(', ')', 'if', 'not', "'Content-Disposition'", 'in', 'info', ':', 'hName', '=', "''", 'elif', 'not', "'filename='", 'in', 'info', '[', "'Content-Disposition'", ']', ':', 'hName', '=', "''", 'else', ':', 'hName', '=', 'info', '[', "'Content-Disposition'", ']', '.', 'split', '(', "'filename='", ')', '[', '1', ']', "# Unquote filename if it's quoted.", 'if', '(', '(', 'hName', '.', 'startswith', '(', '"\'"', ')', 'and', 'hName', '.', 'endswith', '(', '"\'"', ')', ')', 'or', 'hName', '.', 'startswith', '(', '\'"\'', ')', 'and', 'hName', '.', 'endswith', '(', '\'"\'', ')', ')', 'and', 'len', '(', 'hName', ')', '>=', '2', ':', 'hName', '=', 'hName', '[', '1', ':', '-', '1', ']', 'mime', '=', 'info', '.', 'get_content_type', '(', ')', 'if', 'not', 'hName', '.', 'strip', '(', ')', ':', 'requestedUrl', '=', 'pghandle', '.', 'geturl', '(', ')', 'hName', '=', 'urllib', '.', 'parse', '.', 'urlsplit', '(', 'requestedUrl', ')', '.', 'path', '.', 'split', '(', '"/"', ')', '[', '-', '1', ']', '.', 'strip', '(', ')', 'if', '"/"', 'in', 'hName', ':', 'hName', '=', 'hName', '.', 'split', '(', '"/"', ')', '[', '-', '1', ']', 'return', 'pgctnt', ',', 'hName', ',', 'mime'] | Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL, the filename for the target content, and
the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime).
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename. | ['Give', 'a', 'requested', 'page', '(', 'note', ':', 'the', 'arguments', 'for', 'this', 'call', 'are', 'forwarded', 'to', 'getpage', '()', ')', 'return', 'the', 'content', 'at', 'the', 'target', 'URL', 'the', 'filename', 'for', 'the', 'target', 'content', 'and', 'the', 'mimetype', 'for', 'the', 'content', 'at', 'the', 'target', 'URL', 'as', 'a', '3', '-', 'tuple', '(', 'pgctnt', 'hName', 'mime', ')', '.'] | train | https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L292-L334 |
3,679 | kajala/django-jutil | jutil/dates.py | this_month | def this_month(today: datetime=None, tz=None):
"""
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
end = begin + timedelta(days=32)
end = datetime(day=1, month=end.month, year=end.year)
return localize_time_range(begin, end, tz) | python | def this_month(today: datetime=None, tz=None):
"""
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
end = begin + timedelta(days=32)
end = datetime(day=1, month=end.month, year=end.year)
return localize_time_range(begin, end, tz) | ['def', 'this_month', '(', 'today', ':', 'datetime', '=', 'None', ',', 'tz', '=', 'None', ')', ':', 'if', 'today', 'is', 'None', ':', 'today', '=', 'datetime', '.', 'utcnow', '(', ')', 'begin', '=', 'datetime', '(', 'day', '=', '1', ',', 'month', '=', 'today', '.', 'month', ',', 'year', '=', 'today', '.', 'year', ')', 'end', '=', 'begin', '+', 'timedelta', '(', 'days', '=', '32', ')', 'end', '=', 'datetime', '(', 'day', '=', '1', ',', 'month', '=', 'end', '.', 'month', ',', 'year', '=', 'end', '.', 'year', ')', 'return', 'localize_time_range', '(', 'begin', ',', 'end', ',', 'tz', ')'] | Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | ['Returns', 'current', 'month', 'begin', '(', 'inclusive', ')', 'and', 'end', '(', 'exclusive', ')', '.', ':', 'param', 'today', ':', 'Some', 'date', 'in', 'the', 'month', '(', 'defaults', 'current', 'datetime', ')', ':', 'param', 'tz', ':', 'Timezone', '(', 'defaults', 'pytz', 'UTC', ')', ':', 'return', ':', 'begin', '(', 'inclusive', ')', 'end', '(', 'exclusive', ')'] | train | https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/dates.py#L76-L88 |
3,680 | santoshphilip/eppy | eppy/EPlusInterfaceFunctions/mylib2.py | tabfile2doefile | def tabfile2doefile(tabfile, doefile):
"""tabfile2doefile"""
alist = tabfile2list(tabfile)
astr = list2doe(alist)
mylib1.write_str2file(doefile, astr) | python | def tabfile2doefile(tabfile, doefile):
"""tabfile2doefile"""
alist = tabfile2list(tabfile)
astr = list2doe(alist)
mylib1.write_str2file(doefile, astr) | ['def', 'tabfile2doefile', '(', 'tabfile', ',', 'doefile', ')', ':', 'alist', '=', 'tabfile2list', '(', 'tabfile', ')', 'astr', '=', 'list2doe', '(', 'alist', ')', 'mylib1', '.', 'write_str2file', '(', 'doefile', ',', 'astr', ')'] | tabfile2doefile | ['tabfile2doefile'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L90-L94 |
3,681 | tradenity/python-sdk | tradenity/resources/country.py | Country.get_country_by_id | def get_country_by_id(cls, country_id, **kwargs):
"""Find Country
Return single instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to return (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._get_country_by_id_with_http_info(country_id, **kwargs)
return data | python | def get_country_by_id(cls, country_id, **kwargs):
"""Find Country
Return single instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to return (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._get_country_by_id_with_http_info(country_id, **kwargs)
return data | ['def', 'get_country_by_id', '(', 'cls', ',', 'country_id', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async'", ')', ':', 'return', 'cls', '.', '_get_country_by_id_with_http_info', '(', 'country_id', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'cls', '.', '_get_country_by_id_with_http_info', '(', 'country_id', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | Find Country
Return single instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to return (required)
:return: Country
If the method is called asynchronously,
returns the request thread. | ['Find', 'Country'] | train | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/country.py#L599-L619 |
3,682 | lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_thumbnail | def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | python | def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | ['def', 'set_thumbnail', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'thumbnail', '=', '{', "'url'", ':', 'kwargs', '.', 'get', '(', "'url'", ')', ',', "'proxy_url'", ':', 'kwargs', '.', 'get', '(', "'proxy_url'", ')', ',', "'height'", ':', 'kwargs', '.', 'get', '(', "'height'", ')', ',', "'width'", ':', 'kwargs', '.', 'get', '(', "'width'", ')', ',', '}'] | set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail | ['set', 'thumbnail', 'of', 'embed', ':', 'keyword', 'url', ':', 'source', 'url', 'of', 'thumbnail', '(', 'only', 'supports', 'http', '(', 's', ')', 'and', 'attachments', ')', ':', 'keyword', 'proxy_url', ':', 'a', 'proxied', 'thumbnail', 'of', 'the', 'image', ':', 'keyword', 'height', ':', 'height', 'of', 'thumbnail', ':', 'keyword', 'width', ':', 'width', 'of', 'thumbnail'] | train | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L205-L218 |
3,683 | sailthru/sailthru-python-client | sailthru/sailthru_client.py | SailthruClient.multi_send | def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
"""
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {'template': template,
'email': ','.join(emails) if isinstance(emails, list) else emails,
'vars': _vars.copy(),
'evars': evars.copy(),
'options': options.copy()}
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data) | python | def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
"""
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {'template': template,
'email': ','.join(emails) if isinstance(emails, list) else emails,
'vars': _vars.copy(),
'evars': evars.copy(),
'options': options.copy()}
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data) | ['def', 'multi_send', '(', 'self', ',', 'template', ',', 'emails', ',', '_vars', '=', 'None', ',', 'evars', '=', 'None', ',', 'schedule_time', '=', 'None', ',', 'options', '=', 'None', ')', ':', '_vars', '=', '_vars', 'or', '{', '}', 'evars', '=', 'evars', 'or', '{', '}', 'options', '=', 'options', 'or', '{', '}', 'data', '=', '{', "'template'", ':', 'template', ',', "'email'", ':', "','", '.', 'join', '(', 'emails', ')', 'if', 'isinstance', '(', 'emails', ',', 'list', ')', 'else', 'emails', ',', "'vars'", ':', '_vars', '.', 'copy', '(', ')', ',', "'evars'", ':', 'evars', '.', 'copy', '(', ')', ',', "'options'", ':', 'options', '.', 'copy', '(', ')', '}', 'if', 'schedule_time', 'is', 'not', 'None', ':', 'data', '[', "'schedule_time'", ']', '=', 'schedule_time', 'return', 'self', '.', 'api_post', '(', "'send'", ',', 'data', ')'] | Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion | ['Remotely', 'send', 'an', 'email', 'template', 'to', 'multiple', 'email', 'addresses', '.', 'http', ':', '//', 'docs', '.', 'sailthru', '.', 'com', '/', 'api', '/', 'send'] | train | https://github.com/sailthru/sailthru-python-client/blob/22aa39ba0c5bddd7b8743e24ada331128c0f4f54/sailthru/sailthru_client.py#L88-L108 |
3,684 | cqparts/cqparts | src/cqparts_template/catalogue/scripts/build.py | _relative_path_to | def _relative_path_to(path_list, filename):
"""Get a neat relative path to files relative to the CWD"""
return os.path.join(
os.path.relpath(os.path.join(*path_list), os.getcwd()),
filename
) | python | def _relative_path_to(path_list, filename):
"""Get a neat relative path to files relative to the CWD"""
return os.path.join(
os.path.relpath(os.path.join(*path_list), os.getcwd()),
filename
) | ['def', '_relative_path_to', '(', 'path_list', ',', 'filename', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'relpath', '(', 'os', '.', 'path', '.', 'join', '(', '*', 'path_list', ')', ',', 'os', '.', 'getcwd', '(', ')', ')', ',', 'filename', ')'] | Get a neat relative path to files relative to the CWD | ['Get', 'a', 'neat', 'relative', 'path', 'to', 'files', 'relative', 'to', 'the', 'CWD'] | train | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/catalogue/scripts/build.py#L21-L26 |
3,685 | pyvisa/pyvisa | pyvisa/resources/resource.py | Resource.lock_context | def lock_context(self, timeout='default', requested_key='exclusive'):
"""A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable.
"""
if requested_key == 'exclusive':
self.lock_excl(timeout)
access_key = None
else:
access_key = self.lock(timeout, requested_key)
try:
yield access_key
finally:
self.unlock() | python | def lock_context(self, timeout='default', requested_key='exclusive'):
"""A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable.
"""
if requested_key == 'exclusive':
self.lock_excl(timeout)
access_key = None
else:
access_key = self.lock(timeout, requested_key)
try:
yield access_key
finally:
self.unlock() | ['def', 'lock_context', '(', 'self', ',', 'timeout', '=', "'default'", ',', 'requested_key', '=', "'exclusive'", ')', ':', 'if', 'requested_key', '==', "'exclusive'", ':', 'self', '.', 'lock_excl', '(', 'timeout', ')', 'access_key', '=', 'None', 'else', ':', 'access_key', '=', 'self', '.', 'lock', '(', 'timeout', ',', 'requested_key', ')', 'try', ':', 'yield', 'access_key', 'finally', ':', 'self', '.', 'unlock', '(', ')'] | A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable. | ['A', 'context', 'that', 'locks'] | train | https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L385-L407 |
3,686 | joeyespo/grip | grip/renderers.py | OfflineRenderer.render | def render(self, text, auth=None):
"""
Renders the specified markdown content and embedded styles.
"""
if markdown is None:
import markdown
if UrlizeExtension is None:
from .mdx_urlize import UrlizeExtension
return markdown.markdown(text, extensions=[
'fenced_code',
'codehilite(css_class=highlight)',
'toc',
'tables',
'sane_lists',
UrlizeExtension(),
]) | python | def render(self, text, auth=None):
"""
Renders the specified markdown content and embedded styles.
"""
if markdown is None:
import markdown
if UrlizeExtension is None:
from .mdx_urlize import UrlizeExtension
return markdown.markdown(text, extensions=[
'fenced_code',
'codehilite(css_class=highlight)',
'toc',
'tables',
'sane_lists',
UrlizeExtension(),
]) | ['def', 'render', '(', 'self', ',', 'text', ',', 'auth', '=', 'None', ')', ':', 'if', 'markdown', 'is', 'None', ':', 'import', 'markdown', 'if', 'UrlizeExtension', 'is', 'None', ':', 'from', '.', 'mdx_urlize', 'import', 'UrlizeExtension', 'return', 'markdown', '.', 'markdown', '(', 'text', ',', 'extensions', '=', '[', "'fenced_code'", ',', "'codehilite(css_class=highlight)'", ',', "'toc'", ',', "'tables'", ',', "'sane_lists'", ',', 'UrlizeExtension', '(', ')', ',', ']', ')'] | Renders the specified markdown content and embedded styles. | ['Renders', 'the', 'specified', 'markdown', 'content', 'and', 'embedded', 'styles', '.'] | train | https://github.com/joeyespo/grip/blob/ce933ccc4ca8e0d3718f271c59bd530a4518bf63/grip/renderers.py#L95-L110 |
3,687 | rameshg87/pyremotevbox | pyremotevbox/ZSI/wstools/c14n.py | _utilized | def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix: return 1
return 0 | python | def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix: return 1
return 0 | ['def', '_utilized', '(', 'n', ',', 'node', ',', 'other_attrs', ',', 'unsuppressedPrefixes', ')', ':', 'if', 'n', '.', 'startswith', '(', "'xmlns:'", ')', ':', 'n', '=', 'n', '[', '6', ':', ']', 'elif', 'n', '.', 'startswith', '(', "'xmlns'", ')', ':', 'n', '=', 'n', '[', '5', ':', ']', 'if', '(', 'n', '==', '""', 'and', 'node', '.', 'prefix', 'in', '[', '"#default"', ',', 'None', ']', ')', 'or', 'n', '==', 'node', '.', 'prefix', 'or', 'n', 'in', 'unsuppressedPrefixes', ':', 'return', '1', 'for', 'attr', 'in', 'other_attrs', ':', 'if', 'n', '==', 'attr', '.', 'prefix', ':', 'return', '1', '# For exclusive need to look at attributes', 'if', 'unsuppressedPrefixes', 'is', 'not', 'None', ':', 'for', 'attr', 'in', '_attrs', '(', 'node', ')', ':', 'if', 'n', '==', 'attr', '.', 'prefix', ':', 'return', '1', 'return', '0'] | _utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node | ['_utilized', '(', 'n', 'node', 'other_attrs', 'unsuppressedPrefixes', ')', '-', '>', 'boolean', 'Return', 'true', 'if', 'that', 'nodespace', 'is', 'utilized', 'within', 'the', 'node'] | train | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/c14n.py#L91-L108 |
3,688 | EventTeam/beliefs | src/beliefs/cells/colors.py | RGBColorCell.from_name | def from_name(clz, name):
"""
Instantiates the object from a known name
"""
if isinstance(name, list) and "green" in name:
name = "teal"
assert name in COLOR_NAMES, 'Unknown color name'
r, b, g = COLOR_NAMES[name]
return clz(r, b, g) | python | def from_name(clz, name):
"""
Instantiates the object from a known name
"""
if isinstance(name, list) and "green" in name:
name = "teal"
assert name in COLOR_NAMES, 'Unknown color name'
r, b, g = COLOR_NAMES[name]
return clz(r, b, g) | ['def', 'from_name', '(', 'clz', ',', 'name', ')', ':', 'if', 'isinstance', '(', 'name', ',', 'list', ')', 'and', '"green"', 'in', 'name', ':', 'name', '=', '"teal"', 'assert', 'name', 'in', 'COLOR_NAMES', ',', "'Unknown color name'", 'r', ',', 'b', ',', 'g', '=', 'COLOR_NAMES', '[', 'name', ']', 'return', 'clz', '(', 'r', ',', 'b', ',', 'g', ')'] | Instantiates the object from a known name | ['Instantiates', 'the', 'object', 'from', 'a', 'known', 'name'] | train | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/colors.py#L27-L35 |
3,689 | ultrabug/py3status | py3status/command.py | CommandServer.run | def run(self):
"""
Main thread listen to socket and send any commands to the
CommandRunner.
"""
while True:
try:
data = None
# Wait for a connection
if self.debug:
self.py3_wrapper.log("waiting for a connection")
connection, client_address = self.sock.accept()
try:
if self.debug:
self.py3_wrapper.log("connection from")
data = connection.recv(MAX_SIZE)
if data:
data = json.loads(data.decode("utf-8"))
if self.debug:
self.py3_wrapper.log(u"received %s" % data)
self.command_runner.run_command(data)
finally:
# Clean up the connection
connection.close()
except Exception:
if data:
self.py3_wrapper.log("Command error")
self.py3_wrapper.log(data)
self.py3_wrapper.report_exception("command failed") | python | def run(self):
"""
Main thread listen to socket and send any commands to the
CommandRunner.
"""
while True:
try:
data = None
# Wait for a connection
if self.debug:
self.py3_wrapper.log("waiting for a connection")
connection, client_address = self.sock.accept()
try:
if self.debug:
self.py3_wrapper.log("connection from")
data = connection.recv(MAX_SIZE)
if data:
data = json.loads(data.decode("utf-8"))
if self.debug:
self.py3_wrapper.log(u"received %s" % data)
self.command_runner.run_command(data)
finally:
# Clean up the connection
connection.close()
except Exception:
if data:
self.py3_wrapper.log("Command error")
self.py3_wrapper.log(data)
self.py3_wrapper.report_exception("command failed") | ['def', 'run', '(', 'self', ')', ':', 'while', 'True', ':', 'try', ':', 'data', '=', 'None', '# Wait for a connection', 'if', 'self', '.', 'debug', ':', 'self', '.', 'py3_wrapper', '.', 'log', '(', '"waiting for a connection"', ')', 'connection', ',', 'client_address', '=', 'self', '.', 'sock', '.', 'accept', '(', ')', 'try', ':', 'if', 'self', '.', 'debug', ':', 'self', '.', 'py3_wrapper', '.', 'log', '(', '"connection from"', ')', 'data', '=', 'connection', '.', 'recv', '(', 'MAX_SIZE', ')', 'if', 'data', ':', 'data', '=', 'json', '.', 'loads', '(', 'data', '.', 'decode', '(', '"utf-8"', ')', ')', 'if', 'self', '.', 'debug', ':', 'self', '.', 'py3_wrapper', '.', 'log', '(', 'u"received %s"', '%', 'data', ')', 'self', '.', 'command_runner', '.', 'run_command', '(', 'data', ')', 'finally', ':', '# Clean up the connection', 'connection', '.', 'close', '(', ')', 'except', 'Exception', ':', 'if', 'data', ':', 'self', '.', 'py3_wrapper', '.', 'log', '(', '"Command error"', ')', 'self', '.', 'py3_wrapper', '.', 'log', '(', 'data', ')', 'self', '.', 'py3_wrapper', '.', 'report_exception', '(', '"command failed"', ')'] | Main thread listen to socket and send any commands to the
CommandRunner. | ['Main', 'thread', 'listen', 'to', 'socket', 'and', 'send', 'any', 'commands', 'to', 'the', 'CommandRunner', '.'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/command.py#L264-L294 |
3,690 | RockFeng0/rtsf | rtsf/p_report.py | HtmlReporter.add_report_data | def add_report_data(list_all=[], module_name="TestModule", **kwargs):
''' add report data to a list
@param list_all: a list which save the report data
@param module_name: test set name or test module name
@param kwargs: such as
case_name: testcase name
status: test result, Pass or Fail
resp_tester: responsible tester who write this case
tester: tester who execute the test
start_at: tester run this case at time
end_at: tester stop this case at time
'''
start_at = kwargs.get("start_at")
case_name = kwargs.get("case_name","TestCase")
raw_case_name = kwargs.get("raw_case_name","TestCase")
exec_date_time = time.localtime(start_at)
execdate = time.strftime("%Y-%m-%d",exec_date_time)
exectime = time.strftime("%H:%M:%S",exec_date_time)
_case_report = {
'resp_tester': kwargs.get("resp_tester","administrator"),
'tester': kwargs.get("tester","administrator"),
'case_name': case_name,
'raw_case_name': raw_case_name,
'status': kwargs.get("status","Pass"),
'exec_date': execdate,
'exec_time': exectime,
'start_at': start_at,
'end_at': kwargs.get("end_at"),
}
for module in list_all:
if module_name != module["Name"]:
continue
for case in module["TestCases"]:
if raw_case_name == case["raw_case_name"]:
case.update(_case_report)
return list_all
module["TestCases"].append(_case_report)
return list_all
list_all.append({"Name": module_name, "TestCases": [_case_report]})
return list_all | python | def add_report_data(list_all=[], module_name="TestModule", **kwargs):
''' add report data to a list
@param list_all: a list which save the report data
@param module_name: test set name or test module name
@param kwargs: such as
case_name: testcase name
status: test result, Pass or Fail
resp_tester: responsible tester who write this case
tester: tester who execute the test
start_at: tester run this case at time
end_at: tester stop this case at time
'''
start_at = kwargs.get("start_at")
case_name = kwargs.get("case_name","TestCase")
raw_case_name = kwargs.get("raw_case_name","TestCase")
exec_date_time = time.localtime(start_at)
execdate = time.strftime("%Y-%m-%d",exec_date_time)
exectime = time.strftime("%H:%M:%S",exec_date_time)
_case_report = {
'resp_tester': kwargs.get("resp_tester","administrator"),
'tester': kwargs.get("tester","administrator"),
'case_name': case_name,
'raw_case_name': raw_case_name,
'status': kwargs.get("status","Pass"),
'exec_date': execdate,
'exec_time': exectime,
'start_at': start_at,
'end_at': kwargs.get("end_at"),
}
for module in list_all:
if module_name != module["Name"]:
continue
for case in module["TestCases"]:
if raw_case_name == case["raw_case_name"]:
case.update(_case_report)
return list_all
module["TestCases"].append(_case_report)
return list_all
list_all.append({"Name": module_name, "TestCases": [_case_report]})
return list_all | ['def', 'add_report_data', '(', 'list_all', '=', '[', ']', ',', 'module_name', '=', '"TestModule"', ',', '*', '*', 'kwargs', ')', ':', 'start_at', '=', 'kwargs', '.', 'get', '(', '"start_at"', ')', 'case_name', '=', 'kwargs', '.', 'get', '(', '"case_name"', ',', '"TestCase"', ')', 'raw_case_name', '=', 'kwargs', '.', 'get', '(', '"raw_case_name"', ',', '"TestCase"', ')', 'exec_date_time', '=', 'time', '.', 'localtime', '(', 'start_at', ')', 'execdate', '=', 'time', '.', 'strftime', '(', '"%Y-%m-%d"', ',', 'exec_date_time', ')', 'exectime', '=', 'time', '.', 'strftime', '(', '"%H:%M:%S"', ',', 'exec_date_time', ')', '_case_report', '=', '{', "'resp_tester'", ':', 'kwargs', '.', 'get', '(', '"resp_tester"', ',', '"administrator"', ')', ',', "'tester'", ':', 'kwargs', '.', 'get', '(', '"tester"', ',', '"administrator"', ')', ',', "'case_name'", ':', 'case_name', ',', "'raw_case_name'", ':', 'raw_case_name', ',', "'status'", ':', 'kwargs', '.', 'get', '(', '"status"', ',', '"Pass"', ')', ',', "'exec_date'", ':', 'execdate', ',', "'exec_time'", ':', 'exectime', ',', "'start_at'", ':', 'start_at', ',', "'end_at'", ':', 'kwargs', '.', 'get', '(', '"end_at"', ')', ',', '}', 'for', 'module', 'in', 'list_all', ':', 'if', 'module_name', '!=', 'module', '[', '"Name"', ']', ':', 'continue', 'for', 'case', 'in', 'module', '[', '"TestCases"', ']', ':', 'if', 'raw_case_name', '==', 'case', '[', '"raw_case_name"', ']', ':', 'case', '.', 'update', '(', '_case_report', ')', 'return', 'list_all', 'module', '[', '"TestCases"', ']', '.', 'append', '(', '_case_report', ')', 'return', 'list_all', 'list_all', '.', 'append', '(', '{', '"Name"', ':', 'module_name', ',', '"TestCases"', ':', '[', '_case_report', ']', '}', ')', 'return', 'list_all'] | add report data to a list
@param list_all: a list which save the report data
@param module_name: test set name or test module name
@param kwargs: such as
case_name: testcase name
status: test result, Pass or Fail
resp_tester: responsible tester who write this case
tester: tester who execute the test
start_at: tester run this case at time
end_at: tester stop this case at time | ['add', 'report', 'data', 'to', 'a', 'list'] | train | https://github.com/RockFeng0/rtsf/blob/fbc0d57edaeca86418af3942472fcc6d3e9ce591/rtsf/p_report.py#L196-L241 |
3,691 | tariqdaouda/rabaDB | rabaDB/Raba.py | RabaListPupa._attachToObject | def _attachToObject(self, anchorObj, relationName) :
"dummy fct for compatibility reasons, a RabaListPupa is attached by default"
#MutableSequence.__getattribute__(self, "develop")()
self.develop()
self._attachToObject(anchorObj, relationName) | python | def _attachToObject(self, anchorObj, relationName) :
"dummy fct for compatibility reasons, a RabaListPupa is attached by default"
#MutableSequence.__getattribute__(self, "develop")()
self.develop()
self._attachToObject(anchorObj, relationName) | ['def', '_attachToObject', '(', 'self', ',', 'anchorObj', ',', 'relationName', ')', ':', '#MutableSequence.__getattribute__(self, "develop")()', 'self', '.', 'develop', '(', ')', 'self', '.', '_attachToObject', '(', 'anchorObj', ',', 'relationName', ')'] | dummy fct for compatibility reasons, a RabaListPupa is attached by default | ['dummy', 'fct', 'for', 'compatibility', 'reasons', 'a', 'RabaListPupa', 'is', 'attached', 'by', 'default'] | train | https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L739-L743 |
3,692 | ga4gh/ga4gh-server | ga4gh/server/backend.py | Backend.runSearchVariantAnnotationSets | def runSearchVariantAnnotationSets(self, request):
"""
Runs the specified SearchVariantAnnotationSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchVariantAnnotationSetsRequest,
protocol.SearchVariantAnnotationSetsResponse,
self.variantAnnotationSetsGenerator) | python | def runSearchVariantAnnotationSets(self, request):
"""
Runs the specified SearchVariantAnnotationSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchVariantAnnotationSetsRequest,
protocol.SearchVariantAnnotationSetsResponse,
self.variantAnnotationSetsGenerator) | ['def', 'runSearchVariantAnnotationSets', '(', 'self', ',', 'request', ')', ':', 'return', 'self', '.', 'runSearchRequest', '(', 'request', ',', 'protocol', '.', 'SearchVariantAnnotationSetsRequest', ',', 'protocol', '.', 'SearchVariantAnnotationSetsResponse', ',', 'self', '.', 'variantAnnotationSetsGenerator', ')'] | Runs the specified SearchVariantAnnotationSetsRequest. | ['Runs', 'the', 'specified', 'SearchVariantAnnotationSetsRequest', '.'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L946-L953 |
3,693 | automl/HpBandSter | hpbandster/examples/example_5_pytorch_worker.py | PyTorchWorker.compute | def compute(self, config, budget, working_directory, *args, **kwargs):
"""
Simple example for a compute function using a feed forward network.
It is trained on the MNIST dataset.
The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer
"""
# device = torch.device('cpu')
model = MNISTConvNet(num_conv_layers=config['num_conv_layers'],
num_filters_1=config['num_filters_1'],
num_filters_2=config['num_filters_2'] if 'num_filters_2' in config else None,
num_filters_3=config['num_filters_3'] if 'num_filters_3' in config else None,
dropout_rate=config['dropout_rate'],
num_fc_units=config['num_fc_units'],
kernel_size=3
)
criterion = torch.nn.CrossEntropyLoss()
if config['optimizer'] == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
else:
optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'], momentum=config['sgd_momentum'])
for epoch in range(int(budget)):
loss = 0
model.train()
for i, (x, y) in enumerate(self.train_loader):
optimizer.zero_grad()
output = model(x)
loss = F.nll_loss(output, y)
loss.backward()
optimizer.step()
train_accuracy = self.evaluate_accuracy(model, self.train_loader)
validation_accuracy = self.evaluate_accuracy(model, self.validation_loader)
test_accuracy = self.evaluate_accuracy(model, self.test_loader)
return ({
'loss': 1-validation_accuracy, # remember: HpBandSter always minimizes!
'info': { 'test accuracy': test_accuracy,
'train accuracy': train_accuracy,
'validation accuracy': validation_accuracy,
'number of parameters': model.number_of_parameters(),
}
}) | python | def compute(self, config, budget, working_directory, *args, **kwargs):
"""
Simple example for a compute function using a feed forward network.
It is trained on the MNIST dataset.
The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer
"""
# device = torch.device('cpu')
model = MNISTConvNet(num_conv_layers=config['num_conv_layers'],
num_filters_1=config['num_filters_1'],
num_filters_2=config['num_filters_2'] if 'num_filters_2' in config else None,
num_filters_3=config['num_filters_3'] if 'num_filters_3' in config else None,
dropout_rate=config['dropout_rate'],
num_fc_units=config['num_fc_units'],
kernel_size=3
)
criterion = torch.nn.CrossEntropyLoss()
if config['optimizer'] == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
else:
optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'], momentum=config['sgd_momentum'])
for epoch in range(int(budget)):
loss = 0
model.train()
for i, (x, y) in enumerate(self.train_loader):
optimizer.zero_grad()
output = model(x)
loss = F.nll_loss(output, y)
loss.backward()
optimizer.step()
train_accuracy = self.evaluate_accuracy(model, self.train_loader)
validation_accuracy = self.evaluate_accuracy(model, self.validation_loader)
test_accuracy = self.evaluate_accuracy(model, self.test_loader)
return ({
'loss': 1-validation_accuracy, # remember: HpBandSter always minimizes!
'info': { 'test accuracy': test_accuracy,
'train accuracy': train_accuracy,
'validation accuracy': validation_accuracy,
'number of parameters': model.number_of_parameters(),
}
}) | ['def', 'compute', '(', 'self', ',', 'config', ',', 'budget', ',', 'working_directory', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', "# device = torch.device('cpu')", 'model', '=', 'MNISTConvNet', '(', 'num_conv_layers', '=', 'config', '[', "'num_conv_layers'", ']', ',', 'num_filters_1', '=', 'config', '[', "'num_filters_1'", ']', ',', 'num_filters_2', '=', 'config', '[', "'num_filters_2'", ']', 'if', "'num_filters_2'", 'in', 'config', 'else', 'None', ',', 'num_filters_3', '=', 'config', '[', "'num_filters_3'", ']', 'if', "'num_filters_3'", 'in', 'config', 'else', 'None', ',', 'dropout_rate', '=', 'config', '[', "'dropout_rate'", ']', ',', 'num_fc_units', '=', 'config', '[', "'num_fc_units'", ']', ',', 'kernel_size', '=', '3', ')', 'criterion', '=', 'torch', '.', 'nn', '.', 'CrossEntropyLoss', '(', ')', 'if', 'config', '[', "'optimizer'", ']', '==', "'Adam'", ':', 'optimizer', '=', 'torch', '.', 'optim', '.', 'Adam', '(', 'model', '.', 'parameters', '(', ')', ',', 'lr', '=', 'config', '[', "'lr'", ']', ')', 'else', ':', 'optimizer', '=', 'torch', '.', 'optim', '.', 'SGD', '(', 'model', '.', 'parameters', '(', ')', ',', 'lr', '=', 'config', '[', "'lr'", ']', ',', 'momentum', '=', 'config', '[', "'sgd_momentum'", ']', ')', 'for', 'epoch', 'in', 'range', '(', 'int', '(', 'budget', ')', ')', ':', 'loss', '=', '0', 'model', '.', 'train', '(', ')', 'for', 'i', ',', '(', 'x', ',', 'y', ')', 'in', 'enumerate', '(', 'self', '.', 'train_loader', ')', ':', 'optimizer', '.', 'zero_grad', '(', ')', 'output', '=', 'model', '(', 'x', ')', 'loss', '=', 'F', '.', 'nll_loss', '(', 'output', ',', 'y', ')', 'loss', '.', 'backward', '(', ')', 'optimizer', '.', 'step', '(', ')', 'train_accuracy', '=', 'self', '.', 'evaluate_accuracy', '(', 'model', ',', 'self', '.', 'train_loader', ')', 'validation_accuracy', '=', 'self', '.', 'evaluate_accuracy', '(', 'model', ',', 'self', '.', 'validation_loader', ')', 'test_accuracy', '=', 'self', '.', 'evaluate_accuracy', '(', 'model', ',', 'self', '.', 'test_loader', ')', 'return', '(', '{', "'loss'", ':', '1', '-', 'validation_accuracy', ',', '# remember: HpBandSter always minimizes!', "'info'", ':', '{', "'test accuracy'", ':', 'test_accuracy', ',', "'train accuracy'", ':', 'train_accuracy', ',', "'validation accuracy'", ':', 'validation_accuracy', ',', "'number of parameters'", ':', 'model', '.', 'number_of_parameters', '(', ')', ',', '}', '}', ')'] | Simple example for a compute function using a feed forward network.
It is trained on the MNIST dataset.
The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer | ['Simple', 'example', 'for', 'a', 'compute', 'function', 'using', 'a', 'feed', 'forward', 'network', '.', 'It', 'is', 'trained', 'on', 'the', 'MNIST', 'dataset', '.', 'The', 'input', 'parameter', 'config', '(', 'dictionary', ')', 'contains', 'the', 'sampled', 'configurations', 'passed', 'by', 'the', 'bohb', 'optimizer'] | train | https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/examples/example_5_pytorch_worker.py#L99-L144 |
3,694 | ArabellaTech/django-basic-cms | basic_cms/templatetags/pages_tags.py | show_slug_with_level | def show_slug_with_level(context, page, lang=None, fallback=True):
"""Display slug with level by language."""
if not lang:
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if not page:
return ''
return {'content': page.slug_with_level(lang)} | python | def show_slug_with_level(context, page, lang=None, fallback=True):
"""Display slug with level by language."""
if not lang:
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if not page:
return ''
return {'content': page.slug_with_level(lang)} | ['def', 'show_slug_with_level', '(', 'context', ',', 'page', ',', 'lang', '=', 'None', ',', 'fallback', '=', 'True', ')', ':', 'if', 'not', 'lang', ':', 'lang', '=', 'context', '.', 'get', '(', "'lang'", ',', 'pages_settings', '.', 'PAGE_DEFAULT_LANGUAGE', ')', 'page', '=', 'get_page_from_string_or_id', '(', 'page', ',', 'lang', ')', 'if', 'not', 'page', ':', 'return', "''", 'return', '{', "'content'", ':', 'page', '.', 'slug_with_level', '(', 'lang', ')', '}'] | Display slug with level by language. | ['Display', 'slug', 'with', 'level', 'by', 'language', '.'] | train | https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/templatetags/pages_tags.py#L174-L183 |
3,695 | tkem/cachetools | cachetools/func.py | lfu_cache | def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
else:
return _cache(LFUCache(maxsize), typed) | python | def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
else:
return _cache(LFUCache(maxsize), typed) | ['def', 'lfu_cache', '(', 'maxsize', '=', '128', ',', 'typed', '=', 'False', ')', ':', 'if', 'maxsize', 'is', 'None', ':', 'return', '_cache', '(', '_UnboundCache', '(', ')', ',', 'typed', ')', 'else', ':', 'return', '_cache', '(', 'LFUCache', '(', 'maxsize', ')', ',', 'typed', ')'] | Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm. | ['Decorator', 'to', 'wrap', 'a', 'function', 'with', 'a', 'memoizing', 'callable', 'that', 'saves', 'up', 'to', 'maxsize', 'results', 'based', 'on', 'a', 'Least', 'Frequently', 'Used', '(', 'LFU', ')', 'algorithm', '.'] | train | https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/func.py#L96-L105 |
3,696 | patrickayoup/md2remark | md2remark/main.py | parse_cl_args | def parse_cl_args(arg_vector):
'''Parses the command line arguments'''
parser = argparse.ArgumentParser(description='Compiles markdown files into html files for remark.js')
parser.add_argument('source', metavar='source', help='the source to compile. If a directory is provided, all markdown files in that directory are compiled. Output is saved in the current working directory under a md2remark_build subdirectory.')
return parser.parse_args(arg_vector) | python | def parse_cl_args(arg_vector):
'''Parses the command line arguments'''
parser = argparse.ArgumentParser(description='Compiles markdown files into html files for remark.js')
parser.add_argument('source', metavar='source', help='the source to compile. If a directory is provided, all markdown files in that directory are compiled. Output is saved in the current working directory under a md2remark_build subdirectory.')
return parser.parse_args(arg_vector) | ['def', 'parse_cl_args', '(', 'arg_vector', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Compiles markdown files into html files for remark.js'", ')', 'parser', '.', 'add_argument', '(', "'source'", ',', 'metavar', '=', "'source'", ',', 'help', '=', "'the source to compile. If a directory is provided, all markdown files in that directory are compiled. Output is saved in the current working directory under a md2remark_build subdirectory.'", ')', 'return', 'parser', '.', 'parse_args', '(', 'arg_vector', ')'] | Parses the command line arguments | ['Parses', 'the', 'command', 'line', 'arguments'] | train | https://github.com/patrickayoup/md2remark/blob/04e66462046cd123c5b1810454d949c3a05bc057/md2remark/main.py#L38-L43 |
3,697 | cebel/pyuniprot | src/pyuniprot/manager/database.py | DbManager.db_import_xml | def db_import_xml(self, url=None, force_download=False, taxids=None, silent=False):
"""Updates the CTD database
1. downloads gzipped XML
2. drops all tables in database
3. creates all tables in database
4. import XML
5. close session
:param Optional[list[int]] taxids: list of NCBI taxonomy identifier
:param str url: iterable of URL strings
:param bool force_download: force method to download
:param bool silent:
"""
log.info('Update UniProt database from {}'.format(url))
self._drop_tables()
xml_gzipped_file_path, version_file_path = self.download(url, force_download)
self._create_tables()
self.import_version(version_file_path)
self.import_xml(xml_gzipped_file_path, taxids, silent)
self.session.close() | python | def db_import_xml(self, url=None, force_download=False, taxids=None, silent=False):
"""Updates the CTD database
1. downloads gzipped XML
2. drops all tables in database
3. creates all tables in database
4. import XML
5. close session
:param Optional[list[int]] taxids: list of NCBI taxonomy identifier
:param str url: iterable of URL strings
:param bool force_download: force method to download
:param bool silent:
"""
log.info('Update UniProt database from {}'.format(url))
self._drop_tables()
xml_gzipped_file_path, version_file_path = self.download(url, force_download)
self._create_tables()
self.import_version(version_file_path)
self.import_xml(xml_gzipped_file_path, taxids, silent)
self.session.close() | ['def', 'db_import_xml', '(', 'self', ',', 'url', '=', 'None', ',', 'force_download', '=', 'False', ',', 'taxids', '=', 'None', ',', 'silent', '=', 'False', ')', ':', 'log', '.', 'info', '(', "'Update UniProt database from {}'", '.', 'format', '(', 'url', ')', ')', 'self', '.', '_drop_tables', '(', ')', 'xml_gzipped_file_path', ',', 'version_file_path', '=', 'self', '.', 'download', '(', 'url', ',', 'force_download', ')', 'self', '.', '_create_tables', '(', ')', 'self', '.', 'import_version', '(', 'version_file_path', ')', 'self', '.', 'import_xml', '(', 'xml_gzipped_file_path', ',', 'taxids', ',', 'silent', ')', 'self', '.', 'session', '.', 'close', '(', ')'] | Updates the CTD database
1. downloads gzipped XML
2. drops all tables in database
3. creates all tables in database
4. import XML
5. close session
:param Optional[list[int]] taxids: list of NCBI taxonomy identifier
:param str url: iterable of URL strings
:param bool force_download: force method to download
:param bool silent: | ['Updates', 'the', 'CTD', 'database', '1', '.', 'downloads', 'gzipped', 'XML', '2', '.', 'drops', 'all', 'tables', 'in', 'database', '3', '.', 'creates', 'all', 'tables', 'in', 'database', '4', '.', 'import', 'XML', '5', '.', 'close', 'session'] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/database.py#L135-L156 |
3,698 | defunkt/pystache | pystache/locator.py | Locator.find_object | def find_object(self, obj, search_dirs, file_name=None):
"""
Return the path to a template associated with the given object.
"""
if file_name is None:
# TODO: should we define a make_file_name() method?
template_name = self.make_template_name(obj)
file_name = self.make_file_name(template_name)
dir_path = self.get_object_directory(obj)
if dir_path is not None:
search_dirs = [dir_path] + search_dirs
path = self._find_path_required(search_dirs, file_name)
return path | python | def find_object(self, obj, search_dirs, file_name=None):
"""
Return the path to a template associated with the given object.
"""
if file_name is None:
# TODO: should we define a make_file_name() method?
template_name = self.make_template_name(obj)
file_name = self.make_file_name(template_name)
dir_path = self.get_object_directory(obj)
if dir_path is not None:
search_dirs = [dir_path] + search_dirs
path = self._find_path_required(search_dirs, file_name)
return path | ['def', 'find_object', '(', 'self', ',', 'obj', ',', 'search_dirs', ',', 'file_name', '=', 'None', ')', ':', 'if', 'file_name', 'is', 'None', ':', '# TODO: should we define a make_file_name() method?', 'template_name', '=', 'self', '.', 'make_template_name', '(', 'obj', ')', 'file_name', '=', 'self', '.', 'make_file_name', '(', 'template_name', ')', 'dir_path', '=', 'self', '.', 'get_object_directory', '(', 'obj', ')', 'if', 'dir_path', 'is', 'not', 'None', ':', 'search_dirs', '=', '[', 'dir_path', ']', '+', 'search_dirs', 'path', '=', 'self', '.', '_find_path_required', '(', 'search_dirs', ',', 'file_name', ')', 'return', 'path'] | Return the path to a template associated with the given object. | ['Return', 'the', 'path', 'to', 'a', 'template', 'associated', 'with', 'the', 'given', 'object', '.'] | train | https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/locator.py#L154-L171 |
3,699 | bharadwajyarlagadda/bingmaps | bingmaps/apiservices/trafficincidents.py | TrafficIncidentsApi.get_data | def get_data(self):
"""Gets data from the given url"""
url = self.build_url()
self.incidents_data = requests.get(url)
if not self.incidents_data.status_code == 200:
raise self.incidents_data.raise_for_status() | python | def get_data(self):
"""Gets data from the given url"""
url = self.build_url()
self.incidents_data = requests.get(url)
if not self.incidents_data.status_code == 200:
raise self.incidents_data.raise_for_status() | ['def', 'get_data', '(', 'self', ')', ':', 'url', '=', 'self', '.', 'build_url', '(', ')', 'self', '.', 'incidents_data', '=', 'requests', '.', 'get', '(', 'url', ')', 'if', 'not', 'self', '.', 'incidents_data', '.', 'status_code', '==', '200', ':', 'raise', 'self', '.', 'incidents_data', '.', 'raise_for_status', '(', ')'] | Gets data from the given url | ['Gets', 'data', 'from', 'the', 'given', 'url'] | train | https://github.com/bharadwajyarlagadda/bingmaps/blob/6bb3cdadfb121aaff96704509cedff2710a62b6d/bingmaps/apiservices/trafficincidents.py#L89-L94 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.