code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def main(argv=None): # pragma: no coverage
""" Main entry point when the user runs the `trytravis` command. """
try:
colorama.init()
if argv is None:
argv = sys.argv[1:]
_main(argv)
except RuntimeError as e:
print(colorama.Fore.RED + 'ERROR: ' +
str(e) + colorama.Style.RESET_ALL)
sys.exit(1)
else:
sys.exit(0) | Main entry point when the user runs the `trytravis` command. |
def num_rings(self):
"""The number of rings a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of rings or 0 if the device has no rings.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_rings(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | The number of rings a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of rings or 0 if the device has no rings.
Raises:
AttributeError |
def open_interpreter(self, fnames):
"""Open interpreter"""
for path in sorted(fnames):
self.sig_open_interpreter.emit(path) | Open interpreter |
def write_early_data(self, data: bytes) -> int:
"""Returns the number of (encrypted) bytes sent.
"""
if self._is_handshake_completed:
raise IOError('SSL Handshake was completed; cannot send early data.')
# Pass the cleartext data to the SSL engine
self._ssl.write_early_data(data)
# Recover the corresponding encrypted data
final_length = self._flush_ssl_engine()
return final_length | Returns the number of (encrypted) bytes sent. |
def getsize(store, path=None):
"""Compute size of stored items for a given path. If `store` provides a `getsize`
method, this will be called, otherwise will return -1."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
if path in store:
v = store[path]
size = buffer_size(v)
else:
members = listdir(store, path)
prefix = _path_to_prefix(path)
size = 0
for k in members:
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1 | Compute size of stored items for a given path. If `store` provides a `getsize`
method, this will be called, otherwise will return -1. |
def param(f):
'''
The @param decorator, usable in an immutable class (see immutable), specifies that the following
function is actually a transformation on an input parameter; the parameter is required, and is
set to the value returned by the function decorated by the parameter; i.e., if you decorate the
function abc with @param, then imm.abc = x will result in imm's abc attribute being set to the
value of type(imm).abc(x).
'''
(args, varargs, kwargs, dflts) = getargspec_py27like(f)
if varargs is not None or kwargs is not None or dflts:
raise ValueError('Params may not accept variable, variadic keyword, or default arguments')
if len(args) != 1:
raise ValueError('Parameter transformation functions must take exactly one argument')
f._pimms_immutable_data_ = {}
f._pimms_immutable_data_['is_param'] = True
f._pimms_immutable_data_['name'] = f.__name__
f = staticmethod(f)
return f | The @param decorator, usable in an immutable class (see immutable), specifies that the following
function is actually a transformation on an input parameter; the parameter is required, and is
set to the value returned by the function decorated by the parameter; i.e., if you decorate the
function abc with @param, then imm.abc = x will result in imm's abc attribute being set to the
value of type(imm).abc(x). |
def submit_sample(self, filepath, filename, tags=['TheHive']):
"""
Uploads a new sample to VMRay api. Filename gets sent base64 encoded.
:param filepath: path to sample
:type filepath: str
:param filename: filename of the original file
:type filename: str
:param tags: List of tags to apply to the sample
:type tags: list(str)
:returns: Dictionary of results
:rtype: dict
"""
apiurl = '/rest/sample/submit?sample_file'
params = {'sample_filename_b64enc': base64.b64encode(filename.encode('utf-8')),
'reanalyze': self.reanalyze}
if tags:
params['tags'] = ','.join(tags)
if os.path.isfile(filepath):
res = self.session.post(url=self.url + apiurl,
files=[('sample_file', open(filepath, mode='rb'))],
params=params)
if res.status_code == 200:
return json.loads(res.text)
else:
raise BadResponseError('Response from VMRay was not HTTP 200.'
' Responsecode: {}; Text: {}'.format(res.status_code, res.text))
else:
raise SampleFileNotFoundError('Given sample file was not found.') | Uploads a new sample to VMRay api. Filename gets sent base64 encoded.
:param filepath: path to sample
:type filepath: str
:param filename: filename of the original file
:type filename: str
:param tags: List of tags to apply to the sample
:type tags: list(str)
:returns: Dictionary of results
:rtype: dict |
def contains_key(self, key):
"""
Determines whether this multimap contains an entry with the key.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (bool), ``true`` if this multimap contains an entry for the specified key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_contains_key_codec, key_data, key=key_data,
thread_id=thread_id()) | Determines whether this multimap contains an entry with the key.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (bool), ``true`` if this multimap contains an entry for the specified key. |
def chop(self, bits=1):
"""
Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits.
:returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e.
most significant) bits.
"""
s = len(self)
if s % bits != 0:
raise ValueError("expression length (%d) should be a multiple of 'bits' (%d)" % (len(self), bits))
elif s == bits:
return [ self ]
else:
return list(reversed([ self[(n+1)*bits - 1:n*bits] for n in range(0, s // bits) ])) | Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits.
:returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e.
most significant) bits. |
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self) | Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None |
def to_date(value, default=None):
"""Tries to convert the passed in value to Zope's DateTime
:param value: The value to be converted to a valid DateTime
:type value: str, DateTime or datetime
:return: The DateTime representation of the value passed in or default
"""
if isinstance(value, DateTime):
return value
if not value:
if default is None:
return None
return to_date(default)
try:
if isinstance(value, str) and '.' in value:
# https://docs.plone.org/develop/plone/misc/datetime.html#datetime-problems-and-pitfalls
return DateTime(value, datefmt='international')
return DateTime(value)
except (TypeError, ValueError, DateTimeError):
return to_date(default) | Tries to convert the passed in value to Zope's DateTime
:param value: The value to be converted to a valid DateTime
:type value: str, DateTime or datetime
:return: The DateTime representation of the value passed in or default |
def _raw_sql(self, values):
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements."""
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
) | Prepare SQL statement consisting of a sequence of WHEN .. THEN statements. |
def build_from_info(cls, info):
"""build a Term instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
Term instance
"""
info = deepcopy(info)
if 'term_type' in info:
cls_ = TERMS[info.pop('term_type')]
if issubclass(cls_, MetaTermMixin):
return cls_.build_from_info(info)
else:
cls_ = cls
return cls_(**info) | build a Term instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
Term instance |
def _compute_soil_linear_factor(cls, pga_rock, imt):
"""
Compute soil linear factor as explained in paragraph 'Functional
Form', page 1706.
"""
if imt.period >= 1:
return np.ones_like(pga_rock)
else:
sl = np.zeros_like(pga_rock)
pga_between_100_500 = (pga_rock > 100) & (pga_rock < 500)
pga_greater_equal_500 = pga_rock >= 500
is_SA_between_05_1 = 0.5 < imt.period < 1
is_SA_less_equal_05 = imt.period <= 0.5
if is_SA_between_05_1:
sl[pga_between_100_500] = (1 - (1. / imt.period - 1) *
(pga_rock[pga_between_100_500] -
100) / 400)
sl[pga_greater_equal_500] = 1 - (1. / imt.period - 1)
if is_SA_less_equal_05 or imt.period == 0:
sl[pga_between_100_500] = (1 - (pga_rock[pga_between_100_500] -
100) / 400)
sl[pga_rock <= 100] = 1
return sl | Compute soil linear factor as explained in paragraph 'Functional
Form', page 1706. |
def find_root( self, rows ):
"""Attempt to find/create a reasonable root node from list/set of rows
rows -- key: PStatRow mapping
TODO: still need more robustness here, particularly in the case of
threaded programs. Should be tracing back each row to root, breaking
cycles by sorting on cumulative time, and then collecting the traced
roots (or, if they are all on the same root, use that).
"""
maxes = sorted( rows.values(), key = lambda x: x.cumulative )
if not maxes:
raise RuntimeError( """Null results!""" )
root = maxes[-1]
roots = [root]
for key,value in rows.items():
if not value.parents:
log.debug( 'Found node root: %s', value )
if value not in roots:
roots.append( value )
if len(roots) > 1:
root = PStatGroup(
directory='*',
filename='*',
name=_("<profiling run>"),
children= roots,
)
root.finalize()
self.rows[ root.key ] = root
self.roots['functions'] = root
return root | Attempt to find/create a reasonable root node from list/set of rows
rows -- key: PStatRow mapping
TODO: still need more robustness here, particularly in the case of
threaded programs. Should be tracing back each row to root, breaking
cycles by sorting on cumulative time, and then collecting the traced
roots (or, if they are all on the same root, use that). |
def genestats(args):
"""
%prog genestats gffile
Print summary stats, including:
- Number of genes
- Number of single-exon genes
- Number of multi-exon genes
- Number of distinct exons
- Number of genes with alternative transcript variants
- Number of predicted transcripts
- Mean number of distinct exons per gene
- Mean number of transcripts per gene
- Mean gene locus size (first to last exon)
- Mean transcript size (UTR, CDS)
- Mean exon size
Stats modeled after barley genome paper Table 1.
A physical, genetic and functional sequence assembly of the barley genome
"""
p = OptionParser(genestats.__doc__)
p.add_option("--groupby", default="conf_class",
help="Print separate stats groupby")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
gb = opts.groupby
g = make_index(gff_file)
tf = "transcript.sizes"
if need_update(gff_file, tf):
fw = open(tf, "w")
for feat in g.features_of_type("mRNA"):
fid = feat.id
conf_class = feat.attributes.get(gb, "all")
tsize = sum((c.stop - c.start + 1) for c in g.children(fid, 1) \
if c.featuretype == "exon")
print("\t".join((fid, str(tsize), conf_class)), file=fw)
fw.close()
tsizes = DictFile(tf, cast=int)
conf_classes = DictFile(tf, valuepos=2)
logging.debug("A total of {0} transcripts populated.".format(len(tsizes)))
genes = []
for feat in g.features_of_type("gene"):
fid = feat.id
transcripts = [c.id for c in g.children(fid, 1) \
if c.featuretype == "mRNA"]
transcript_sizes = [tsizes[x] for x in transcripts]
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
conf_class = conf_classes[transcripts[0]]
gs = GeneStats(feat, conf_class, transcript_sizes, exons)
genes.append(gs)
r = {} # Report
distinct_groups = set(conf_classes.values())
for g in distinct_groups:
num_genes = num_single_exon_genes = num_multi_exon_genes = 0
num_genes_with_alts = num_transcripts = num_exons = max_transcripts = 0
cum_locus_size = cum_transcript_size = cum_exon_size = 0
for gs in genes:
if gs.conf_class != g:
continue
num_genes += 1
if gs.num_exons == 1:
num_single_exon_genes += 1
else:
num_multi_exon_genes += 1
num_exons += gs.num_exons
if gs.num_transcripts > 1:
num_genes_with_alts += 1
if gs.num_transcripts > max_transcripts:
max_transcripts = gs.num_transcripts
num_transcripts += gs.num_transcripts
cum_locus_size += gs.locus_size
cum_transcript_size += gs.cum_transcript_size
cum_exon_size += gs.cum_exon_size
mean_num_exons = num_exons * 1. / num_genes
mean_num_transcripts = num_transcripts * 1. / num_genes
mean_locus_size = cum_locus_size * 1. / num_genes
mean_transcript_size = cum_transcript_size * 1. / num_transcripts
mean_exon_size = cum_exon_size * 1. / num_exons
r[("Number of genes", g)] = num_genes
r[("Number of single-exon genes", g)] = \
percentage(num_single_exon_genes, num_genes, mode=1)
r[("Number of multi-exon genes", g)] = \
percentage(num_multi_exon_genes, num_genes, mode=1)
r[("Number of distinct exons", g)] = num_exons
r[("Number of genes with alternative transcript variants", g)] = \
percentage(num_genes_with_alts, num_genes, mode=1)
r[("Number of predicted transcripts", g)] = num_transcripts
r[("Mean number of distinct exons per gene", g)] = mean_num_exons
r[("Mean number of transcripts per gene", g)] = mean_num_transcripts
r[("Max number of transcripts per gene", g)] = max_transcripts
r[("Mean gene locus size (first to last exon)", g)] = mean_locus_size
r[("Mean transcript size (UTR, CDS)", g)] = mean_transcript_size
r[("Mean exon size", g)] = mean_exon_size
fw = must_open(opts.outfile, "w")
print(tabulate(r), file=fw)
fw.close() | %prog genestats gffile
Print summary stats, including:
- Number of genes
- Number of single-exon genes
- Number of multi-exon genes
- Number of distinct exons
- Number of genes with alternative transcript variants
- Number of predicted transcripts
- Mean number of distinct exons per gene
- Mean number of transcripts per gene
- Mean gene locus size (first to last exon)
- Mean transcript size (UTR, CDS)
- Mean exon size
Stats modeled after barley genome paper Table 1.
A physical, genetic and functional sequence assembly of the barley genome |
def check_perplexities(self, perplexities):
"""Check and correct/truncate perplexities.
If a perplexity is too large, it is corrected to the largest allowed
value. It is then inserted into the list of perplexities only if that
value doesn't already exist in the list.
"""
usable_perplexities = []
for perplexity in sorted(perplexities):
if 3 * perplexity > self.n_samples - 1:
new_perplexity = (self.n_samples - 1) / 3
if new_perplexity in usable_perplexities:
log.warning(
"Perplexity value %d is too high. Dropping "
"because the max perplexity is already in the "
"list." % perplexity
)
else:
usable_perplexities.append(new_perplexity)
log.warning(
"Perplexity value %d is too high. Using "
"perplexity %.2f instead" % (perplexity, new_perplexity)
)
else:
usable_perplexities.append(perplexity)
return usable_perplexities | Check and correct/truncate perplexities.
If a perplexity is too large, it is corrected to the largest allowed
value. It is then inserted into the list of perplexities only if that
value doesn't already exist in the list. |
def train(self, x = None, y = None, training_frame = None, fold_column = None,
weights_column = None, validation_frame = None, leaderboard_frame = None, blending_frame = None):
"""
Begins an AutoML task, a background task that automatically builds a number of models
with various algorithms and tracks their performance in a leaderboard. At any point
in the process you may use H2O's performance or prediction functions on the resulting
models.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param fold_column: The name or index of the column in training_frame that holds per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds per-row weights.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold_column or weights_column).
:param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets
nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used
for early stopping of individual models and early stopping of the grid searches. By default and
when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored.
:param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and
if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard
rankings instead.
:param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values).
This is optional, but when provided, it is also recommended to disable cross validation
by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes.
:returns: An H2OAutoML object.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
"""
ncols = training_frame.ncols
names = training_frame.names
#Set project name if None
if self.project_name is None:
self.project_name = "automl_" + training_frame.frame_id
self.build_control["project_name"] = self.project_name
# Minimal required arguments are training_frame and y (response)
if y is None:
raise ValueError('The response column (y) is not set; please set it to the name of the column that you are trying to predict in your data.')
else:
assert_is_type(y,int,str)
if is_type(y, int):
if not (-ncols <= y < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % y)
y = names[y]
else:
if y not in names:
raise H2OValueError("Column %s does not exist in the training frame" % y)
input_spec = {
'response_column': y,
}
if training_frame is None:
raise ValueError('The training frame is not set!')
else:
assert_is_type(training_frame, H2OFrame)
input_spec['training_frame'] = training_frame.frame_id
if fold_column is not None:
assert_is_type(fold_column,int,str)
input_spec['fold_column'] = fold_column
if weights_column is not None:
assert_is_type(weights_column,int,str)
input_spec['weights_column'] = weights_column
if validation_frame is not None:
assert_is_type(validation_frame, H2OFrame)
input_spec['validation_frame'] = validation_frame.frame_id
if leaderboard_frame is not None:
assert_is_type(leaderboard_frame, H2OFrame)
input_spec['leaderboard_frame'] = leaderboard_frame.frame_id
if blending_frame is not None:
assert_is_type(blending_frame, H2OFrame)
input_spec['blending_frame'] = blending_frame.frame_id
if self.sort_metric is not None:
assert_is_type(self.sort_metric, str)
sort_metric = self.sort_metric.lower()
# Changed the API to use "deviance" to be consistent with stopping_metric values
# TO DO: let's change the backend to use "deviance" since we use the term "deviance"
# After that we can take this `if` statement out
if sort_metric == "deviance":
sort_metric = "mean_residual_deviance"
input_spec['sort_metric'] = sort_metric
if x is not None:
assert_is_type(x,list)
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-ncols <= xi < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(names[xi])
else:
if xi not in names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
ignored_columns = set(names) - {y} - set(x)
if fold_column is not None and fold_column in ignored_columns:
ignored_columns.remove(fold_column)
if weights_column is not None and weights_column in ignored_columns:
ignored_columns.remove(weights_column)
if ignored_columns is not None:
input_spec['ignored_columns'] = list(ignored_columns)
automl_build_params = dict(input_spec = input_spec)
# NOTE: if the user hasn't specified some block of parameters don't send them!
# This lets the back end use the defaults.
automl_build_params['build_control'] = self.build_control
automl_build_params['build_models'] = self.build_models
resp = h2o.api('POST /99/AutoMLBuilder', json=automl_build_params)
if 'job' not in resp:
print("Exception from the back end: ")
print(resp)
return
self._job = H2OJob(resp['job'], "AutoML")
self._job.poll()
self._fetch() | Begins an AutoML task, a background task that automatically builds a number of models
with various algorithms and tracks their performance in a leaderboard. At any point
in the process you may use H2O's performance or prediction functions on the resulting
models.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param fold_column: The name or index of the column in training_frame that holds per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds per-row weights.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold_column or weights_column).
:param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets
nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used
for early stopping of individual models and early stopping of the grid searches. By default and
when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored.
:param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and
if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard
rankings instead.
:param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values).
This is optional, but when provided, it is also recommended to disable cross validation
by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes.
:returns: An H2OAutoML object.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train) |
def start_new_log(self):
'''open a new dataflash log, reset state'''
filename = self.new_log_filepath()
self.block_cnt = 0
self.logfile = open(filename, 'w+b')
print("DFLogger: logging started (%s)" % (filename))
self.prev_cnt = 0
self.download = 0
self.prev_download = 0
self.last_idle_status_printed_time = time.time()
self.last_status_time = time.time()
self.missing_blocks = {}
self.acking_blocks = {}
self.blocks_to_ack_and_nack = []
self.missing_found = 0
self.abandoned = 0 | open a new dataflash log, reset state |
def readline(self):
"""Reads (and optionally parses) a single line."""
line = self.file.readline()
if self.grammar and line:
try:
return self.grammar.parseString(line).asDict()
except ParseException:
return self.readline()
else:
return line | Reads (and optionally parses) a single line. |
def to_netflux(flux):
r"""Compute the netflux from the gross flux.
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
Notes
-----
The netflux or effective current is defined as
.. math:: f_{ij}^{+}=\max \{ f_{ij}-f_{ji}, 0 \}
:math:`f_{ij}` is the flux for the transition from :math:`A` to
:math:`B`.
References
----------
.. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden.
Transition Path Theory for Markov Jump Processes.
Multiscale Model Simul 7: 1192-1219 (2009)
"""
if issparse(flux):
return sparse.tpt.to_netflux(flux)
elif isdense(flux):
return dense.tpt.to_netflux(flux)
else:
raise _type_not_supported | r"""Compute the netflux from the gross flux.
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
Notes
-----
The netflux or effective current is defined as
.. math:: f_{ij}^{+}=\max \{ f_{ij}-f_{ji}, 0 \}
:math:`f_{ij}` is the flux for the transition from :math:`A` to
:math:`B`.
References
----------
.. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden.
Transition Path Theory for Markov Jump Processes.
Multiscale Model Simul 7: 1192-1219 (2009) |
def pick(self):
""" picks a value accoriding to the given density """
v = random.uniform(0, self.ub)
d = self.dist
c = self.vc - 1
s = self.vc
while True:
s = s / 2
if s == 0:
break
if v <= d[c][1]:
c -= s
else:
c += s
# we only need this logic when increasing c
while len(d) <= c:
s = s / 2
c -= s
if s == 0:
break
# we may have converged from the left, instead of the right
if c == len(d) or v <= d[c][1]:
c -= 1
return d[c][0] | picks a value accoriding to the given density |
def find_signature_input_colocation_error(signature_name, inputs):
"""Returns error message for colocation of signature inputs, or None if ok."""
for input_name, tensor in inputs.items():
expected_colocation_groups = [tf.compat.as_bytes("loc:@" + tensor.op.name)]
if tensor.op.colocation_groups() != expected_colocation_groups:
return (
"A tensor x used as input in a signature must not be subject to a "
"tf.colocate_with(y) constraint. (The reverse would be allowed.)\n"
"Details: tensor '%s' appears as input '%s' of signature '%s' "
"but has Tensor.op.colocation_groups() == %s" %
(tensor, input_name, signature_name, tensor.op.colocation_groups()))
return None | Returns error message for colocation of signature inputs, or None if ok. |
def correct_dmdt(d, dmind, dtind, blrange):
""" Dedisperses and resamples data *in place*.
Drops edges, since it assumes that data is read with overlapping chunks in time.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
data_resamp[:, bl0:bl1] = data[:, bl0:bl1]
rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0) | Dedisperses and resamples data *in place*.
Drops edges, since it assumes that data is read with overlapping chunks in time. |
def process_edge_dijkstra(self, current, neighbor, pred, q, component):
'''
API: process_edge_dijkstra(self, current, neighbor, pred, q, component)
Description:
Used by search() method if the algo argument is 'Dijkstra'. Processes
edges along Dijkstra's algorithm. User does not need to call this
method directly.
Input:
current: Name of the current node.
neighbor: Name of the neighbor node.
pred: Predecessor tree.
q: Data structure that holds nodes to be processed in a queue.
component: component number.
Post:
'color' attribute of nodes and edges may change.
'''
if current is None:
self.get_node(neighbor).set_attr('color', 'red')
self.get_node(neighbor).set_attr('label', 0)
q.push(neighbor, 0)
self.display()
self.get_node(neighbor).set_attr('color', 'black')
return
new_estimate = (q.get_priority(current) +
self.get_edge_attr(current, neighbor, 'cost'))
if neighbor not in pred or new_estimate < q.get_priority(neighbor):
pred[neighbor] = current
self.get_node(neighbor).set_attr('color', 'red')
self.get_node(neighbor).set_attr('label', new_estimate)
q.push(neighbor, new_estimate)
self.display()
self.get_node(neighbor).set_attr('color', 'black') | API: process_edge_dijkstra(self, current, neighbor, pred, q, component)
Description:
Used by search() method if the algo argument is 'Dijkstra'. Processes
edges along Dijkstra's algorithm. User does not need to call this
method directly.
Input:
current: Name of the current node.
neighbor: Name of the neighbor node.
pred: Predecessor tree.
q: Data structure that holds nodes to be processed in a queue.
component: component number.
Post:
'color' attribute of nodes and edges may change. |
def generateExecutable(self, outpath='.', signed=False):
"""
Generates the executable for this builder in the output path.
:param outpath | <str>
"""
if not (self.runtime() or self.specfile()):
return True
if not self.distributionPath():
return True
if os.path.exists(self.distributionPath()):
shutil.rmtree(self.distributionPath())
if os.path.isfile(self.sourcePath()):
basepath = os.path.normpath(os.path.dirname(self.sourcePath()))
else:
basepath = os.path.normpath(self.sourcePath())
# store the plugin table of contents
self.generatePlugins(basepath)
# generate the specfile if necessary
specfile = self.specfile()
# generate the spec file options
opts = {
'name': self.name(),
'exname': self.executableName(),
'product': self.productName(),
'runtime': self.runtime(),
'srcpath': self.sourcePath(),
'buildpath': self.buildPath(),
'hookpaths': ',\n'.join(wrap_str(self.hookPaths())),
'hiddenimports': ',\n'.join(wrap_str(self.hiddenImports())),
'distpath': self.distributionPath(),
'platform': sys.platform,
'excludes': ',\n'.join(wrap_str(self.executableExcludes()))
}
if not specfile:
datasets = []
for typ, data in self.executableData():
if typ == 'tree':
args = {
'path': data[0],
'prefix': data[1],
'excludes': ','.join(wrap_str(data[2]))
}
datasets.append(templ.SPECTREE.format(**args))
else:
args = {}
args.update(data)
args.setdefault('type', typ)
datasets.append(templ.SPECDATA.format(**args))
opts['datasets'] = '\n'.join(datasets)
opts.update(self._executableOptions)
if self.executableCliName():
opts['cliname'] = self.executableCliName()
opts['collect'] = templ.SPECFILE_CLI.format(**opts)
else:
opts['collect'] = templ.SPECFILE_COLLECT.format(**opts)
if opts['onefile']:
data = templ.SPECFILE_ONEFILE.format(**opts)
else:
data = templ.SPECFILE.format(**opts)
# generate the spec file for building
specfile = os.path.join(self.buildPath(), self.name() + '.spec')
f = open(specfile, 'w')
f.write(data)
f.close()
cmd = os.path.expandvars(self.executableOption('cmd'))
success = cmdexec(cmd.format(spec=specfile)) == 0
if signed:
binfile = os.path.join(opts['distpath'],
opts['product'],
opts['exname'] + '.exe')
self.sign(binfile)
return success | Generates the executable for this builder in the output path.
:param outpath | <str> |
def set_background_corpus(self, background):
'''
Parameters
----------
background
'''
if issubclass(type(background), TermDocMatrixWithoutCategories):
self._background_corpus = pd.DataFrame(background
.get_term_freq_df()
.sum(axis=1),
columns=['background']).reset_index()
self._background_corpus.columns = ['word', 'background']
elif (type(background) == pd.DataFrame
and set(background.columns) == set(['word', 'background'])):
self._background_corpus = background
else:
raise Exception('The argument named background must be a subclass of TermDocMatrix or a ' \
+ 'DataFrame with columns "word" and "background", where "word" ' \
+ 'is the term text, and "background" is its frequency.') | Parameters
----------
background |
def get_generator(tweet):
"""
Get information about the application that generated the Tweet
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
dict: keys are 'link' and 'name', the web link and the name
of the application
Example:
>>> from tweet_parser.getter_methods.tweet_generator import get_generator
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "source": '<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>'
... }
>>> get_generator(original_format_dict)
{'link': 'http://twitter.com', 'name': 'Twitter Web Client'}
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "generator":
... {"link": "http://twitter.com",
... "displayName": "Twitter Web Client"}
... }
>>> get_generator(activity_streams_format_dict)
{'link': 'http://twitter.com', 'name': 'Twitter Web Client'}
"""
if is_original_format(tweet):
if sys.version_info[0] == 3 and sys.version_info[1] >= 4:
parser = GeneratorHTMLParser(convert_charrefs=True)
else:
parser = GeneratorHTMLParser()
parser.feed(tweet["source"])
return {"link": parser.generator_link,
"name": parser.generator_name}
else:
return {"link": tweet["generator"]["link"],
"name": tweet["generator"]["displayName"]} | Get information about the application that generated the Tweet
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
dict: keys are 'link' and 'name', the web link and the name
of the application
Example:
>>> from tweet_parser.getter_methods.tweet_generator import get_generator
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "source": '<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>'
... }
>>> get_generator(original_format_dict)
{'link': 'http://twitter.com', 'name': 'Twitter Web Client'}
>>> activity_streams_format_dict = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "generator":
... {"link": "http://twitter.com",
... "displayName": "Twitter Web Client"}
... }
>>> get_generator(activity_streams_format_dict)
{'link': 'http://twitter.com', 'name': 'Twitter Web Client'} |
def _leapfrog_integrator_one_step(
target_log_prob_fn,
independent_chain_ndims,
step_sizes,
current_momentum_parts,
current_state_parts,
current_target_log_prob,
current_target_log_prob_grad_parts,
state_gradients_are_stopped=False,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = tf.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
tf.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with tf.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `tf.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
# Note on per-variable step sizes:
#
# Using per-variable step sizes is equivalent to using the same step
# size for all variables and adding a diagonal mass matrix in the
# kinetic energy term of the Hamiltonian being integrated. This is
# hinted at by Neal (2011) but not derived in detail there.
#
# Let x and v be position and momentum variables respectively.
# Let g(x) be the gradient of `target_log_prob_fn(x)`.
# Let S be a diagonal matrix of per-variable step sizes.
# Let the Hamiltonian H(x, v) = -target_log_prob_fn(x) + 0.5 * ||v||**2.
#
# Using per-variable step sizes gives the updates
# v' = v + 0.5 * matmul(S, g(x))
# x'' = x + matmul(S, v')
# v'' = v' + 0.5 * matmul(S, g(x''))
#
# Let u = matmul(inv(S), v).
# Multiplying v by inv(S) in the updates above gives the transformed dynamics
# u' = matmul(inv(S), v') = matmul(inv(S), v) + 0.5 * g(x)
# = u + 0.5 * g(x)
# x'' = x + matmul(S, v') = x + matmul(S**2, u')
# u'' = matmul(inv(S), v'') = matmul(inv(S), v') + 0.5 * g(x'')
# = u' + 0.5 * g(x'')
#
# These are exactly the leapfrog updates for the Hamiltonian
# H'(x, u) = -target_log_prob_fn(x) + 0.5 * u^T S**2 u
# = -target_log_prob_fn(x) + 0.5 * ||v||**2 = H(x, v).
#
# To summarize:
#
# * Using per-variable step sizes implicitly simulates the dynamics
# of the Hamiltonian H' (which are energy-conserving in H'). We
# keep track of v instead of u, but the underlying dynamics are
# the same if we transform back.
# * The value of the Hamiltonian H'(x, u) is the same as the value
# of the original Hamiltonian H(x, v) after we transform back from
# u to v.
# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).
#
# So using per-variable step sizes in HMC will give results that are
# exactly identical to explicitly using a diagonal mass matrix.
with tf.compat.v1.name_scope(name, 'hmc_leapfrog_integrator_one_step', [
independent_chain_ndims, step_sizes, current_momentum_parts,
current_state_parts, current_target_log_prob,
current_target_log_prob_grad_parts
]):
# Step 1: Update momentum.
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(current_momentum_parts,
step_sizes,
current_target_log_prob_grad_parts)]
# Step 2: Update state.
proposed_state_parts = [
x + tf.cast(eps, v.dtype) * v
for x, eps, v
in zip(current_state_parts,
step_sizes,
proposed_momentum_parts)]
if state_gradients_are_stopped:
proposed_state_parts = [tf.stop_gradient(x) for x in proposed_state_parts]
# Step 3a: Re-evaluate target-log-prob (and grad) at proposed state.
[
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
] = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError('`target_log_prob_fn` must produce a `Tensor` '
'with `float` `dtype`.')
if any(g is None for g in proposed_target_log_prob_grad_parts):
raise ValueError(
'Encountered `None` gradient. Does your target `target_log_prob_fn` '
'access all `tf.Variable`s via `tf.get_variable`?\n'
' current_state_parts: {}\n'
' proposed_state_parts: {}\n'
' proposed_target_log_prob_grad_parts: {}'.format(
current_state_parts,
proposed_state_parts,
proposed_target_log_prob_grad_parts))
# Step 3b: Update momentum (again).
proposed_momentum_parts = [
v + 0.5 * tf.cast(eps, v.dtype) * g
for v, eps, g
in zip(proposed_momentum_parts,
step_sizes,
proposed_target_log_prob_grad_parts)]
return [
proposed_momentum_parts,
proposed_state_parts,
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
] | Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = tf.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
tf.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with tf.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `tf.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`. |
def find_mutant_amino_acid_interval(
cdna_sequence,
cdna_first_codon_offset,
cdna_variant_start_offset,
cdna_variant_end_offset,
n_ref,
n_amino_acids):
"""
Parameters
----------
cdna_sequence : skbio.DNA or str
cDNA sequence found in RNAseq data
cdna_first_codon_offset : int
Offset into cDNA sequence to first complete codon, lets us skip
past UTR region and incomplete codons.
cdna_variant_start_offset : int
Interbase start offset into cDNA sequence for selecting mutant
nucleotides.
cdna_variant_end_offset : int
Interbase end offset into cDNA sequence for selecting mutant
nucleotides.
n_ref : int
Number of reference nucleotides
n_amino_acids : int
Number of translated amino acids
Returns tuple with three fields:
1) Start offset for interval of mutant amino acids in translated sequence
2) End offset for interval of mutant amino acids in translated sequence
3) Boolean flag indicating whether the variant was a frameshift.
"""
cdna_alt_nucleotides = cdna_sequence[
cdna_variant_start_offset:cdna_variant_end_offset]
n_alt = len(cdna_alt_nucleotides)
# sequence of nucleotides before the variant starting from the first codon
cdna_coding_prefix = cdna_sequence[cdna_first_codon_offset:cdna_variant_start_offset]
# rounding down since a change in the middle of a codon should count
# toward the variant codons
n_coding_nucleotides_before_variant = len(cdna_coding_prefix)
n_complete_prefix_codons = n_coding_nucleotides_before_variant // 3
frame_of_variant_nucleotides = n_coding_nucleotides_before_variant % 3
frameshift = abs(n_ref - n_alt) % 3 != 0
indel = n_ref != n_alt
variant_aa_interval_start = n_complete_prefix_codons
if frameshift:
# if mutation is a frame shift then every amino acid from the
# first affected codon to the stop is considered mutant
#
# TODO: what if the first k amino acids are synonymous with the
# reference sequence?
variant_aa_interval_end = n_amino_acids
else:
n_alt_codons = int(math.ceil(n_alt / 3.0))
if indel:
# We need to adjust the number of affected codons by whether the
# variant is aligned with codon boundaries, since in-frame indels
# may still be split across multiple codons.
#
# Example of in-frame deletion of 3 nucleotides which leaves
# 0 variant codons in the sequence (interval = 1:1)
# ref = CCC|AAA|GGG|TTT
# alt = CCC|GGG|TTT
#
# Example of in-frame deletion of 3 nucleotides which leaves
# 1 variant codon in the sequence (interval = 1:2)
# ref = CCC|AAA|GGG|TTT
# alt = CCC|AGG|TTT
#
# Example of in-frame insertion of 3 nucleotides which
# yields two variant codons:
# ref = CCC|AAA|GGG|TTT
# alt = CTT|TCC|AAA|GGG|TTT
extra_affected_codon = int(frame_of_variant_nucleotides != 0)
variant_aa_interval_end = (
variant_aa_interval_start + n_alt_codons + extra_affected_codon)
else:
# if the variant is a simple substitution then it only affects
# as many codons as are in the alternate sequence
variant_aa_interval_end = variant_aa_interval_start + n_alt_codons
return variant_aa_interval_start, variant_aa_interval_end, frameshift | Parameters
----------
cdna_sequence : skbio.DNA or str
cDNA sequence found in RNAseq data
cdna_first_codon_offset : int
Offset into cDNA sequence to first complete codon, lets us skip
past UTR region and incomplete codons.
cdna_variant_start_offset : int
Interbase start offset into cDNA sequence for selecting mutant
nucleotides.
cdna_variant_end_offset : int
Interbase end offset into cDNA sequence for selecting mutant
nucleotides.
n_ref : int
Number of reference nucleotides
n_amino_acids : int
Number of translated amino acids
Returns tuple with three fields:
1) Start offset for interval of mutant amino acids in translated sequence
2) End offset for interval of mutant amino acids in translated sequence
3) Boolean flag indicating whether the variant was a frameshift. |
def on_message(self, websocket, msg):
'''When a new message arrives, it publishes to all listening clients.
'''
if msg:
lines = []
for li in msg.split('\n'):
li = li.strip()
if li:
lines.append(li)
msg = ' '.join(lines)
if msg:
return self.pubsub.publish(self.channel, msg) | When a new message arrives, it publishes to all listening clients. |
def random_string(length, charset):
"""
Return a random string of the given length from the
given character set.
:param int length: The length of string to return
:param str charset: A string of characters to choose from
:returns: A random string
:rtype: str
"""
n = len(charset)
return ''.join(charset[random.randrange(n)] for _ in range(length)) | Return a random string of the given length from the
given character set.
:param int length: The length of string to return
:param str charset: A string of characters to choose from
:returns: A random string
:rtype: str |
def set_post_data(self):
"""
Need to set form data so that validation on all post data occurs and
places newly entered form data on the form object.
"""
self.form.data = self.post_data_dict
# Specifically adding list field keys to the form so they are included
# in form.cleaned_data after the call to is_valid
for field_key, field in self.form.fields.items():
if has_digit(field_key):
# We have a list field.
base_key = make_key(field_key, exclude_last_string=True)
# Add new key value with field to form fields so validation
# will work correctly
for key in self.post_data_dict.keys():
if base_key in key:
self.form.fields.update({key: field}) | Need to set form data so that validation on all post data occurs and
places newly entered form data on the form object. |
def create(**kwargs):
"""
Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given.
"""
secType = kwargs.get('secType', '')
cls = {
'': Contract,
'STK': Stock,
'OPT': Option,
'FUT': Future,
'CONTFUT': ContFuture,
'CASH': Forex,
'IND': Index,
'CFD': CFD,
'BOND': Bond,
'CMDTY': Commodity,
'FOP': FuturesOption,
'FUND': MutualFund,
'WAR': Warrant,
'IOPT': Warrant,
'BAG': Bag,
'NEWS': Contract
}.get(secType, Contract)
if cls is not Contract:
kwargs.pop('secType', '')
return cls(**kwargs) | Create and a return a specialized contract based on the given secType,
or a general Contract if secType is not given. |
def Flush(self):
"""Syncs this object with the data store, maintaining object validity."""
if self.locked and self.CheckLease() == 0:
self._RaiseLockError("Flush")
self._WriteAttributes()
self._SyncAttributes()
if self.parent:
self.parent.Flush() | Syncs this object with the data store, maintaining object validity. |
def onDragSelection(self, event):
"""
Set self.df_slice based on user's selection
"""
if self.grid.GetSelectionBlockTopLeft():
#top_left = self.grid.GetSelectionBlockTopLeft()
#bottom_right = self.grid.GetSelectionBlockBottomRight()
# awkward hack to fix wxPhoenix memory problem, (Github issue #221)
bottom_right = eval(repr(self.grid.GetSelectionBlockBottomRight()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
top_left = eval(repr(self.grid.GetSelectionBlockTopLeft()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
#
top_left = top_left[0]
bottom_right = bottom_right[0]
else:
return
# GetSelectionBlock returns (row, col)
min_col = top_left[1]
max_col = bottom_right[1]
min_row = top_left[0]
max_row = bottom_right[0]
self.df_slice = self.contribution.tables[self.grid_type].df.iloc[min_row:max_row+1, min_col:max_col+1] | Set self.df_slice based on user's selection |
def salt_master(project, target, module, args=None, kwargs=None):
"""
Execute a `salt` command in the head node
"""
client = project.cluster.head.ssh_client
cmd = ['salt']
cmd.extend(generate_salt_cmd(target, module, args, kwargs))
cmd.append('--timeout=300')
cmd.append('--state-output=mixed')
cmd = ' '.join(cmd)
output = client.exec_command(cmd, sudo=True)
if output['exit_code'] == 0:
return output['stdout']
else:
return output['stderr'] | Execute a `salt` command in the head node |
def create_as_library(cls, url):
"""
Creates a single crawler as in library mode. Crawling will start immediately.
:param url:
:return:
"""
site = {
"crawler": "Download",
"url": url
}
cfg_file_path = os.path.dirname(__file__) + os.path.sep + 'config' + os.path.sep + 'config_lib.cfg'
return cls(cfg_file_path, site, 0, False, False, True) | Creates a single crawler as in library mode. Crawling will start immediately.
:param url:
:return: |
def delete_page_property(self, page_id, page_property):
"""
Delete the page (content) property e.g. delete key of hash
:param page_id: content_id format
:param page_property: key of property
:return:
"""
url = 'rest/api/content/{page_id}/property/{page_property}'.format(page_id=page_id,
page_property=str(page_property))
return self.delete(path=url) | Delete the page (content) property e.g. delete key of hash
:param page_id: content_id format
:param page_property: key of property
:return: |
def calculate_perf_100nsec_timer(previous, current, property_name):
"""
PERF_100NSEC_TIMER
https://technet.microsoft.com/en-us/library/cc728274(v=ws.10).aspx
"""
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / (d1 - d0) * 100 | PERF_100NSEC_TIMER
https://technet.microsoft.com/en-us/library/cc728274(v=ws.10).aspx |
def unmarshal(self, v):
"""
Convert the value from Strava API format to useful python representation.
If the value does not appear in the choices attribute we log an error rather
than raising an exception as this may be caused by a change to the API upstream
so we want to fail gracefully.
"""
try:
return self.choices[v]
except KeyError:
self.log.warning("No such choice {0} for field {1}.".format(v, self))
# Just return the value from the API
return v | Convert the value from Strava API format to useful python representation.
If the value does not appear in the choices attribute we log an error rather
than raising an exception as this may be caused by a change to the API upstream
so we want to fail gracefully. |
def unique_identifier(self):
"""
Get the unique identifier by looking through ``mods:identifier``
See `specs <https://ocr-d.github.io/mets#unique-id-for-the-document-processed>`_ for details.
"""
for t in IDENTIFIER_PRIORITY:
found = self._tree.getroot().find('.//mods:identifier[@type="%s"]' % t, NS)
if found is not None:
return found.text | Get the unique identifier by looking through ``mods:identifier``
See `specs <https://ocr-d.github.io/mets#unique-id-for-the-document-processed>`_ for details. |
def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5,
environment=None):
'''Makes a python dictionary of container properties.
Args:
container_name: The name of the container.
image (str): Container image string. E.g. nginx.
port (int): TCP port number. E.g. 8080.
cpu (float): Amount of CPU to allocate to container. E.g. 1.0.
memgb (float): Memory in GB to allocate to container. E.g. 1.5.
environment (list): A list of [{'name':'envname', 'value':'envvalue'}].
Sets environment variables in the container.
Returns:
A Python dictionary of container properties, pass a list of these to
create_container_group().
'''
container = {'name': container_name}
container_properties = {'image': image}
container_properties['ports'] = [{'port': port}]
container_properties['resources'] = {
'requests': {'cpu': cpu, 'memoryInGB': memgb}}
container['properties'] = container_properties
if environment is not None:
container_properties['environmentVariables'] = environment
return container | Makes a python dictionary of container properties.
Args:
container_name: The name of the container.
image (str): Container image string. E.g. nginx.
port (int): TCP port number. E.g. 8080.
cpu (float): Amount of CPU to allocate to container. E.g. 1.0.
memgb (float): Memory in GB to allocate to container. E.g. 1.5.
environment (list): A list of [{'name':'envname', 'value':'envvalue'}].
Sets environment variables in the container.
Returns:
A Python dictionary of container properties, pass a list of these to
create_container_group(). |
def main():
"""
NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff
"""
col1,col2=0,1
sym,size = 'ro',20
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
'-f option is a required field'
print(main.__doc__)
sys.exit()
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=sys.argv[ind+1]
col2=sys.argv[ind+2]
else:
'Column headers a required field'
print(main.__doc__)
sys.exit()
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
# if '-b' in sys.argv:
# ind=sys.argv.index('-b')
# bounds=sys.argv[ind+1].split(',')
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
X,Y=[],[]
data,file_type=pmag.magic_read(file)
print(file_type)
for rec in data:
if col1 not in list(rec.keys()) or col2 not in list(rec.keys()):
print(col1,' and/or ',col2, ' not in file headers')
print('try again')
sys.exit()
if rec[col1]!='' and rec[col2]!='':
skip=0
if '-crit' in sys.argv:
for crit in bounds:
crits=crit.split(':')
crit_key=crits[0]
crit_min=crits[1]
crit_max=crits[2]
if rec[crit_key]=="":
skip=1
else:
if crit_min!="" and float(rec[crit_key])<float(crit_min):skip=1
if crit_max!="" and float(rec[crit_key])>float(crit_min):skip=1
if skip==0:
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if len(X)==0:
print(col1,' and/or ',col2, ' have no data ')
print('try again')
sys.exit()
else:
print(len(X),' data points')
if sym!='':pylab.scatter(X,Y,c=sym[0],marker=sym[1],s=size)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
pylab.draw()
ans=input("Press return to quit ")
sys.exit() | NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff |
def store(self):
'''
Write content of the entire cache to disk
'''
if msgpack is None:
log.error('Cache cannot be stored on disk: msgpack is missing')
else:
# TODO Dir hashing?
try:
with salt.utils.files.fopen(self._path, 'wb+') as fp_:
cache = {
"CacheDisk_data": self._dict,
"CacheDisk_cachetime": self._key_cache_time
}
msgpack.dump(cache, fp_, use_bin_type=True)
except (IOError, OSError) as err:
log.error('Error storing cache data to the disk: %s', err) | Write content of the entire cache to disk |
def copy(self):
""" Return copy of self
Returns:
Identifier object
"""
tokens = ([t for t in self.tokens]
if isinstance(self.tokens, list) else self.tokens)
return Identifier(tokens, 0) | Return copy of self
Returns:
Identifier object |
def add_group_members(self, members):
"""Add a new group member to the groups list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not getattr(self, 'group_members', None):
self.group_members = members
else:
self.group_members.extend(members) | Add a new group member to the groups list
:param members: member name
:type members: str
:return: None |
def _bubbleP(cls, T):
"""Using ancillary equation return the pressure of bubble point"""
c = cls._blend["bubble"]
Tj = cls._blend["Tj"]
Pj = cls._blend["Pj"]
Tita = 1-T/Tj
suma = 0
for i, n in zip(c["i"], c["n"]):
suma += n*Tita**(i/2.)
P = Pj*exp(Tj/T*suma)
return P | Using ancillary equation return the pressure of bubble point |
def run(self, agent_host):
"""run the agent on the world"""
total_reward = 0
self.prev_s = None
self.prev_a = None
is_first_action = True
# main loop:
world_state = agent_host.getWorldState()
while world_state.is_mission_running:
current_r = 0
if is_first_action:
# wait until have received a valid observation
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
is_first_action = False
else:
# wait for non-zero reward
while world_state.is_mission_running and current_r == 0:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
# allow time to stabilise after action
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
# process final reward
self.logger.debug("Final reward: %d" % current_r)
total_reward += current_r
# update Q values
if self.prev_s is not None and self.prev_a is not None:
self.updateQTableFromTerminatingState( current_r )
self.drawQ()
return total_reward | run the agent on the world |
def mk_function(metamodel, s_sync):
'''
Create a python function from a BridgePoint function.
'''
action = s_sync.Action_Semantics_internal
label = s_sync.Name
return lambda **kwargs: interpret.run_function(metamodel, label,
action, kwargs) | Create a python function from a BridgePoint function. |
def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split("\n")
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ""
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (" " * x + "\n")
for line in input_lines:
output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n"
output += padding_bottom * (" " * x + "\n")
return output | Adds whitespace to text to center it within a frame of the given
dimensions. |
def get_breadcrumbs(self):
"""
Breadcrumb format: (('name', 'url'), ...) or None if not used.
"""
if not self.breadcrumbs:
return None
else:
allowed_breadcrumbs = []
for breadcrumb in self.breadcrumbs:
# check permission based on named_url
if breadcrumb[1] is not None and not view_from_url(
breadcrumb[1]
).has_permission(self.request.user):
continue
obj = self if not hasattr(self, "object") else self.object
url = (
None
if not breadcrumb[1]
else reverse_url(breadcrumb[1], obj)
)
allowed_breadcrumbs.append({"name": breadcrumb[0], "url": url})
return allowed_breadcrumbs | Breadcrumb format: (('name', 'url'), ...) or None if not used. |
def update(self):
"""
Handle update events on bokeh server.
"""
if not self._queue:
return
dim, widget_type, attr, old, new = self._queue[-1]
self._queue = []
dim_label = dim.pprint_label
label, widget = self.widgets[dim_label]
if widget_type == 'label':
if isinstance(label, AutocompleteInput):
value = [new]
widget.value = value
else:
widget.value = float(new)
elif label:
lookups = self.lookups.get(dim_label)
if not self.editable:
if lookups:
new = lookups[widget.value][1]
label.text = '<b>%s</b>' % dim.pprint_value_string(new)
elif isinstance(label, AutocompleteInput):
text = lookups[new][1]
label.value = text
else:
label.value = dim.pprint_value(new)
key = []
for dim, (label, widget) in self.widgets.items():
lookups = self.lookups.get(dim)
if label and lookups:
val = lookups[widget.value][0]
else:
val = widget.value
key.append(val)
key = wrap_tuple_streams(tuple(key), self.plot.dimensions,
self.plot.streams)
self.plot.update(key)
self._active = False | Handle update events on bokeh server. |
def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check
allow_sets : boolean, default True
If this parameter is False, sets will not be considered list-like
.. versionadded:: 0.24.0
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
>>> is_list_like(np.array([2]))
True
>>> is_list_like(np.array(2)))
False
"""
return (isinstance(obj, abc.Iterable) and
# we do not count strings/unicode/bytes as list-like
not isinstance(obj, (str, bytes)) and
# exclude zero-dimensional numpy arrays, effectively scalars
not (isinstance(obj, np.ndarray) and obj.ndim == 0) and
# exclude sets if allow_sets is False
not (allow_sets is False and isinstance(obj, abc.Set))) | Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check
allow_sets : boolean, default True
If this parameter is False, sets will not be considered list-like
.. versionadded:: 0.24.0
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
>>> is_list_like(np.array([2]))
True
>>> is_list_like(np.array(2)))
False |
def _calculate_Hfr(self, T):
"""
Calculate the enthalpy flow rate of the stream at the specified
temperature.
:param T: Temperature. [°C]
:returns: Enthalpy flow rate. [kWh/h]
"""
if self.isCoal:
return self._calculate_Hfr_coal(T)
Hfr = 0.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
dHfr = thermo.H(compound, T, self._compound_mfrs[index])
Hfr = Hfr + dHfr
return Hfr | Calculate the enthalpy flow rate of the stream at the specified
temperature.
:param T: Temperature. [°C]
:returns: Enthalpy flow rate. [kWh/h] |
def channels(self):
"""
List of channels of this slack team
"""
if not self._channels:
self._channels = self._call_api('channels.list')['channels']
return self._channels | List of channels of this slack team |
def _handle_get(self, request_data):
"""
An OCSP GET request contains the DER-in-base64 encoded OCSP request in the
HTTP request URL.
"""
der = base64.b64decode(request_data)
ocsp_request = self._parse_ocsp_request(der)
return self._build_http_response(ocsp_request) | An OCSP GET request contains the DER-in-base64 encoded OCSP request in the
HTTP request URL. |
def setLength(self, personID, length):
"""setLength(string, double) -> None
Sets the length in m for the given person.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_LENGTH, personID, length) | setLength(string, double) -> None
Sets the length in m for the given person. |
def aggregate(input, **params):
"""
Returns aggregate
:param input:
:param params:
:return:
"""
PARAM_CFG_EXTRACT = 'extract'
PARAM_CFG_SUBSTITUTE = 'substitute'
PARAM_CFG_AGGREGATE = 'aggregate'
AGGR_FIELD = 'field'
AGGR_FUNC = 'func'
extract_params = params.get(PARAM_CFG_EXTRACT)
extract_params.update({AccessParams.KEY_TYPE: AccessParams.TYPE_MULTI})
dataset = __extract(input, extract_params)
if PARAM_CFG_SUBSTITUTE in params:
dataset = __substitute(input, dataset, params.get(PARAM_CFG_SUBSTITUTE))
cfg = params.get(PARAM_CFG_AGGREGATE)
res = Aggregator.agg_single_func(dataset, cfg[AGGR_FIELD], cfg[AGGR_FUNC])
return res | Returns aggregate
:param input:
:param params:
:return: |
def metapolicy(self, permitted):
"""
Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``.
"""
if permitted not in VALID_SITE_CONTROL:
raise TypeError(SITE_CONTROL_ERROR.format(permitted))
if permitted == SITE_CONTROL_NONE:
# Metapolicy 'none' means no access is permitted.
self.domains = {}
self.header_domains = {}
self.identities = []
self.site_control = permitted | Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``. |
async def connect(self) -> None:
"""Open a connection to the defined server."""
def protocol_factory() -> Protocol:
return Protocol(client=self)
_, protocol = await self.loop.create_connection(
protocol_factory,
host=self.host,
port=self.port,
ssl=self.ssl
) # type: Tuple[Any, Any]
if self.protocol:
self.protocol.close()
self.protocol = protocol
# TODO: Delete the following code line. It is currently kept in order
# to not break the current existing codebase. Removing it requires a
# heavy change in the test codebase.
protocol.client = self
self.trigger("client_connect") | Open a connection to the defined server. |
def bisect(func, a, b, xtol=1e-6, errorcontrol=True,
testkwargs=dict(), outside='extrapolate',
ascending=None,
disp=False):
"""Find root by bysection search.
If the function evaluation is noisy then use `errorcontrol=True` for adaptive
sampling of the function during the bisection search.
Parameters
----------
func: callable
Function of which the root should be found. If `errorcontrol=True`
then the function should be derived from `AverageBase`.
a, b: float
initial interval
xtol: float
target tolerance for interval size
errorcontrol: boolean
if true, assume that function is derived from `AverageBase`.
testkwargs: only for `errorcontrol=True`
see `AverageBase.test0`
outside: ['extrapolate', 'raise']
How to handle the case where f(a) and f(b) have same sign,
i.e. where the root lies outside of the interval.
If 'raise' throws a `BisectException`.
ascending: allow passing in directly whether function is ascending or not
if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0
if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0
Returns
-------
float, root of function
"""
search = True
# check whether function is ascending or not
if ascending is None:
if errorcontrol:
testkwargs.update(dict(type_='smaller', force=True))
fa = func.test0(a, **testkwargs)
fb = func.test0(b, **testkwargs)
else:
fa = func(a) < 0
fb = func(b) < 0
if fa and not fb:
ascending = True
elif fb and not fa:
ascending = False
else:
if disp:
print('Warning: func(a) and func(b) do not have opposing signs -> no search done')
if outside == 'raise':
raise BisectException()
search = False
# refine interval until it has reached size xtol, except if root outside
while (b-a > xtol) and search:
mid = (a+b)/2.0
if ascending:
if ((not errorcontrol) and (func(mid) < 0)) or \
(errorcontrol and func.test0(mid, **testkwargs)):
a = mid
else:
b = mid
else:
if ((not errorcontrol) and (func(mid) < 0)) or \
(errorcontrol and func.test0(mid, **testkwargs)):
b = mid
else:
a = mid
if disp:
print('bisect bounds', a, b)
# interpolate linearly to get zero
if errorcontrol:
ya, yb = func(a)[0], func(b)[0]
else:
ya, yb = func(a), func(b)
m = (yb-ya) / (b-a)
res = a-ya/m
if disp:
print('bisect final value', res)
return res | Find root by bysection search.
If the function evaluation is noisy then use `errorcontrol=True` for adaptive
sampling of the function during the bisection search.
Parameters
----------
func: callable
Function of which the root should be found. If `errorcontrol=True`
then the function should be derived from `AverageBase`.
a, b: float
initial interval
xtol: float
target tolerance for interval size
errorcontrol: boolean
if true, assume that function is derived from `AverageBase`.
testkwargs: only for `errorcontrol=True`
see `AverageBase.test0`
outside: ['extrapolate', 'raise']
How to handle the case where f(a) and f(b) have same sign,
i.e. where the root lies outside of the interval.
If 'raise' throws a `BisectException`.
ascending: allow passing in directly whether function is ascending or not
if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0
if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0
Returns
-------
float, root of function |
def getAllSavedQueries(self, projectarea_id=None, projectarea_name=None,
creator=None, saved_query_name=None):
"""Get all saved queries created by somebody (optional)
in a certain project area (optional, either `projectarea_id`
or `projectarea_name` is needed if specified)
If `saved_query_name` is specified, only the saved queries match the
name will be fetched.
Note: only if `creator` is added as a member, the saved queries
can be found. Otherwise None will be returned.
WARNING: now the RTC server cannot correctly list all the saved queries
It seems to be a bug of RTC. Recommend using `runSavedQueryByUrl` to
query all the workitems if the query is saved.
Note: It will run faster when more attributes are specified.
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the
:class:`rtcclient.project_area.ProjectArea` name
:param creator: the creator email address
:param saved_query_name: the saved query name
:return: a :class:`list` that contains the saved queried
:class:`rtcclient.models.SavedQuery` objects
:rtype: list
"""
pa_id = (self.rtc_obj
._pre_get_resource(projectarea_id=projectarea_id,
projectarea_name=projectarea_name))
filter_rule = None
if creator is not None:
fcreator = self.rtc_obj.getOwnedBy(creator).url
filter_rule = [("dc:creator", "@rdf:resource",
fcreator)]
self.log.debug("Add rules for fetching all saved queries: "
"created by %s", creator)
if saved_query_name is not None:
ftitle_rule = ("dc:title", None, saved_query_name)
if filter_rule is None:
filter_rule = [ftitle_rule]
else:
filter_rule.append(ftitle_rule)
self.log.debug("Add rules for fetching all saved queries: "
"saved query title is %s", saved_query_name)
return (self.rtc_obj
._get_paged_resources("SavedQuery",
projectarea_id=pa_id,
page_size="100",
filter_rule=filter_rule)) | Get all saved queries created by somebody (optional)
in a certain project area (optional, either `projectarea_id`
or `projectarea_name` is needed if specified)
If `saved_query_name` is specified, only the saved queries match the
name will be fetched.
Note: only if `creator` is added as a member, the saved queries
can be found. Otherwise None will be returned.
WARNING: now the RTC server cannot correctly list all the saved queries
It seems to be a bug of RTC. Recommend using `runSavedQueryByUrl` to
query all the workitems if the query is saved.
Note: It will run faster when more attributes are specified.
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the
:class:`rtcclient.project_area.ProjectArea` name
:param creator: the creator email address
:param saved_query_name: the saved query name
:return: a :class:`list` that contains the saved queried
:class:`rtcclient.models.SavedQuery` objects
:rtype: list |
def pre_parse_and_validate_signavio(self, bpmn, filename):
"""
This is the Signavio specific editor hook for pre-parsing and
validation.
A subclass can override this method to provide additional parseing or
validation. It should call the parent method first.
:param bpmn: an lxml tree of the bpmn content
:param filename: the source file name
This must return the updated bpmn object (or a replacement)
"""
self._check_for_disconnected_boundary_events_signavio(bpmn, filename)
self._fix_call_activities_signavio(bpmn, filename)
return bpmn | This is the Signavio specific editor hook for pre-parsing and
validation.
A subclass can override this method to provide additional parseing or
validation. It should call the parent method first.
:param bpmn: an lxml tree of the bpmn content
:param filename: the source file name
This must return the updated bpmn object (or a replacement) |
def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.source_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1) | Move to previous row from currently selected row. |
def refresh_modules(self, module_string=None, exact=True):
"""
Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes.
"""
if not module_string:
if time.time() > (self.last_refresh_ts + 0.1):
self.last_refresh_ts = time.time()
else:
# rate limiting
return
update_i3status = False
for name, module in self.output_modules.items():
if (
module_string is None
or (exact and name == module_string)
or (not exact and name.startswith(module_string))
):
if module["type"] == "py3status":
if self.config["debug"]:
self.log("refresh py3status module {}".format(name))
module["module"].force_update()
else:
if self.config["debug"]:
self.log("refresh i3status module {}".format(name))
update_i3status = True
if update_i3status:
self.i3status_thread.refresh_i3status() | Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes. |
def get_register_func(base_class, nickname):
"""Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
"""
if base_class not in _REGISTRY:
_REGISTRY[base_class] = {}
registry = _REGISTRY[base_class]
def register(klass, name=None):
"""Register functions"""
assert issubclass(klass, base_class), \
"Can only register subclass of %s"%base_class.__name__
if name is None:
name = klass.__name__
name = name.lower()
if name in registry:
warnings.warn(
"\033[91mNew %s %s.%s registered with name %s is"
"overriding existing %s %s.%s\033[0m"%(
nickname, klass.__module__, klass.__name__, name,
nickname, registry[name].__module__, registry[name].__name__),
UserWarning, stacklevel=2)
registry[name] = klass
return klass
register.__doc__ = "Register %s to the %s factory"%(nickname, nickname)
return register | Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetworkCollector, self).get_default_config()
config.update({
'path': 'network',
'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens',
'enx'],
'byte_unit': ['bit', 'byte'],
'greedy': 'true',
})
return config | Returns the default collector settings |
def add_exception_handler(self, exception_handler):
# type: (AbstractExceptionHandler) -> None
"""Register input to the exception handlers list.
:param exception_handler: Exception Handler instance to be
registered.
:type exception_handler: AbstractExceptionHandler
:return: None
"""
if exception_handler is None:
raise RuntimeConfigException(
"Valid Exception Handler instance to be provided")
if not isinstance(exception_handler, AbstractExceptionHandler):
raise RuntimeConfigException(
"Input should be an ExceptionHandler instance")
self.exception_handlers.append(exception_handler) | Register input to the exception handlers list.
:param exception_handler: Exception Handler instance to be
registered.
:type exception_handler: AbstractExceptionHandler
:return: None |
def _get_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method may internally
use AssetRetriever to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
dom = get_page(self._session, OPENCOURSE_ASSETS_URL,
json=True, id=asset_id)
logging.debug('Parsing JSON for asset_id <%s>.', asset_id)
urls = []
for element in dom['elements']:
typeName = element['typeName']
definition = element['definition']
# Elements of `asset` types look as follows:
#
# {'elements': [{'definition': {'assetId': 'gtSfvscoEeW7RxKvROGwrw',
# 'name': 'Презентация к лекции'},
# 'id': 'phxNlMcoEeWXCQ4nGuQJXw',
# 'typeName': 'asset'}],
# 'linked': None,
# 'paging': None}
#
if typeName == 'asset':
open_course_asset_id = definition['assetId']
for asset in self._asset_retriever([open_course_asset_id],
download=False):
urls.append({'name': asset.name, 'url': asset.url})
# Elements of `url` types look as follows:
#
# {'elements': [{'definition': {'name': 'What motivates you.pptx',
# 'url': 'https://d396qusza40orc.cloudfront.net/learning/Powerpoints/2-4A_What_motivates_you.pptx'},
# 'id': '0hixqpWJEeWQkg5xdHApow',
# 'typeName': 'url'}],
# 'linked': None,
# 'paging': None}
#
elif typeName == 'url':
urls.append({'name': definition['name'].strip(),
'url': definition['url'].strip()})
else:
logging.warning(
'Unknown asset typeName: %s\ndom: %s\n'
'If you think the downloader missed some '
'files, please report the issue here:\n'
'https://github.com/coursera-dl/coursera-dl/issues/new',
typeName, json.dumps(dom, indent=4))
return urls | Get list of asset urls and file names. This method may internally
use AssetRetriever to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}] |
async def create_local_did(self, seed: str = None, loc_did: str = None, metadata: dict = None) -> DIDInfo:
"""
Create and store a new local DID for use in pairwise DID relations.
:param seed: seed from which to create (default random)
:param loc_did: local DID value (default None to let indy-sdk generate)
:param metadata: metadata to associate with the local DID
(operation always sets 'since', 'modified' epoch timestamps)
:return: DIDInfo for new local DID
"""
LOGGER.debug('Wallet.create_local_did >>> seed: [SEED] loc_did: %s metadata: %s', loc_did, metadata)
cfg = {}
if seed:
cfg['seed'] = seed
if loc_did:
cfg['did'] = loc_did
if not self.handle:
LOGGER.debug('Wallet.create_local_did <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
(created_did, verkey) = await did.create_and_store_my_did(self.handle, json.dumps(cfg))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.DidAlreadyExistsError:
LOGGER.debug('Wallet.create_local_did <!< DID %s already present in wallet %s', loc_did, self.name)
raise ExtantRecord('Local DID {} already present in wallet {}'.format(loc_did, self.name))
LOGGER.debug('Wallet.create_local_did <!< indy-sdk raised error %s', x_indy.error_code)
raise
now = int(time())
loc_did_metadata = {**(metadata or {}), 'since': now, 'modified': now}
await did.set_did_metadata(self.handle, created_did, json.dumps(loc_did_metadata))
rv = DIDInfo(created_did, verkey, loc_did_metadata)
LOGGER.debug('Wallet.create_local_did <<< %s', rv)
return rv | Create and store a new local DID for use in pairwise DID relations.
:param seed: seed from which to create (default random)
:param loc_did: local DID value (default None to let indy-sdk generate)
:param metadata: metadata to associate with the local DID
(operation always sets 'since', 'modified' epoch timestamps)
:return: DIDInfo for new local DID |
def service_group(self, service_name):
"""
Args:
service_name: the name of the service in the service registry
Returns:
the name of the group the service is in, or None of the service was not found
"""
for group in EFConfig.SERVICE_GROUPS:
if self.services(group).has_key(service_name):
return group
return None | Args:
service_name: the name of the service in the service registry
Returns:
the name of the group the service is in, or None of the service was not found |
def summarize(self):
""" G protein annotation summary in a text format
:return: A string summary of the annotation
:rtype: str
"""
data = [
['Sequence ID', self.seqrecord.id],
['G domain', ' '.join(self.gdomain_regions) if self.gdomain_regions else None],
['E-value vs rab db', self.evalue_bh_rabs],
['E-value vs non-rab db', self.evalue_bh_non_rabs],
['RabF motifs', ' '.join(map(str, self.rabf_motifs)) if self.rabf_motifs else None],
['Is Rab?', self.is_rab()]
]
summary = ''
for name, value in data:
summary += '{:25s}{}\n'.format(name, value)
if self.is_rab():
summary += '{:25s}{}\n'.format('Top 5 subfamilies',
', '.join('{:s} ({:.2g})'.format(name, score) for name, score
in self.rab_subfamily_top5))
return summary | G protein annotation summary in a text format
:return: A string summary of the annotation
:rtype: str |
def decode(self, encoded):
""" Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
"""
if self.enforce_reversible:
self.enforce_reversible = False
if self.encode(self.decode(encoded)) != encoded:
raise ValueError('Decoding is not reversible for "%s"' % encoded)
self.enforce_reversible = True
return encoded | Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded. |
def fetch_pillar(self):
'''
In the event of a cache miss, we need to incur the overhead of caching
a new pillar.
'''
log.debug('Pillar cache getting external pillar with ext: %s', self.ext)
fresh_pillar = Pillar(self.opts,
self.grains,
self.minion_id,
self.saltenv,
ext=self.ext,
functions=self.functions,
pillarenv=self.pillarenv)
return fresh_pillar.compile_pillar() | In the event of a cache miss, we need to incur the overhead of caching
a new pillar. |
def stop_change(self):
"""Stop changing light level manually"""
self.logger.info("Dimmer %s stop_change", self.device_id)
self.hub.direct_command(self.device_id, '18', '00')
success = self.hub.check_success(self.device_id, '18', '00')
if success:
self.logger.info("Dimmer %s stop_change: Light stopped changing successfully",
self.device_id)
self.hub.clear_device_command_cache(self.device_id)
else:
self.logger.error("Dimmer %s stop_change: Light did not stop",
self.device_id)
return success | Stop changing light level manually |
def load(stream, Loader=None):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
if Loader is None:
load_warning('load')
Loader = FullLoader
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose() | Parse the first YAML document in a stream
and produce the corresponding Python object. |
def extract_tar (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract a TAR archive with the tarfile Python module."""
try:
with tarfile.open(archive) as tfile:
tfile.extractall(path=outdir)
except Exception as err:
msg = "error extracting %s: %s" % (archive, err)
raise util.PatoolError(msg)
return None | Extract a TAR archive with the tarfile Python module. |
def migrate_file(src_id, location_name, post_fixity_check=False):
"""Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``)
"""
location = Location.get_by_name(location_name)
f_src = FileInstance.get(src_id)
# Create destination
f_dst = FileInstance.create()
db.session.commit()
try:
# Copy contents
f_dst.copy_contents(
f_src,
progress_callback=progress_updater,
default_location=location.uri,
)
db.session.commit()
except Exception:
# Remove destination file instance if an error occurred.
db.session.delete(f_dst)
db.session.commit()
raise
# Update all objects pointing to file.
ObjectVersion.relink_all(f_src, f_dst)
db.session.commit()
# Start a fixity check
if post_fixity_check:
verify_checksum.delay(str(f_dst.id)) | Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``) |
def cli(ctx, ftdi_enable, ftdi_disable, serial_enable, serial_disable):
"""Manage FPGA boards drivers."""
exit_code = 0
if ftdi_enable: # pragma: no cover
exit_code = Drivers().ftdi_enable()
elif ftdi_disable: # pragma: no cover
exit_code = Drivers().ftdi_disable()
elif serial_enable: # pragma: no cover
exit_code = Drivers().serial_enable()
elif serial_disable: # pragma: no cover
exit_code = Drivers().serial_disable()
else:
click.secho(ctx.get_help())
ctx.exit(exit_code) | Manage FPGA boards drivers. |
def _got_srv(self, addrs):
"""Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples
"""
with self.lock:
if not addrs:
self._dst_service = None
if self._dst_port:
self._dst_nameports = [(self._dst_name, self._dst_port)]
else:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Could not resolve SRV for service {0!r}"
" on host {1!r} and fallback port number not given"
.format(self._dst_service, self._dst_name))
elif addrs == [(".", 0)]:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Service {0!r} not available on host {1!r}"
.format(self._dst_service, self._dst_name))
else:
self._dst_nameports = addrs
self._set_state("resolve-hostname") | Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples |
def min_ems(self, value: float) -> 'Size':
"""Set the minimum size in ems."""
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | Set the minimum size in ems. |
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Pass the provided series obj through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
series = table[self.name]
self._check_series_name(series)
col = self.name
data = series.copy()
for recoder in self.recoders.values():
try:
data = recoder(data)
except (BaseException) as err:
raise RecodingError(col, recoder, err)
if validate:
failed_rows = find_failed_rows(self.validate(data.to_frame()))
if failed_rows.shape[0] > 0:
raise ValidationError(f"Rows that failed to validate for column '{self.name}':\n{failed_rows}")
return data.to_frame() | Pass the provided series obj through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests. |
def _compose_mro(cls, types): # noqa
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(_type):
return ( # :off
_type not in bases and
hasattr(_type, '__mro__') and
issubclass(cls, _type)
) # :on
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(_typ):
for other in types:
if _typ != other and _typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro) | Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm. |
def burn(self):
"""
Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object.
"""
if not self.data:
raise ValueError("No data available")
if hasattr(self, 'calculations'):
self.calculations()
self.start_svg()
self.calculate_graph_dimensions()
self.foreground = etree.Element("g")
self.draw_graph()
self.draw_titles()
self.draw_legend()
self.draw_data()
self.graph.append(self.foreground)
self.render_inline_styles()
return self.render(self.root) | Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object. |
def VerifyStructure(self, parser_mediator, line):
"""Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._DPKG_LOG_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug(
'Unable to parse Debian dpkg.log file with error: {0!s}'.format(
exception))
return False
return 'date_time' in structure and 'body' in structure | Verifies if a line from a text file is in the expected format.
Args:
parser_mediator (ParserMediator): parser mediator.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not. |
def get_relationships_by_query(self, relationship_query):
"""Gets a list of ``Relationships`` matching the given relationship query.
arg: relationship_query
(osid.relationship.RelationshipQuery): the relationship
query
return: (osid.relationship.RelationshipList) - the returned
``RelationshipList``
raise: NullArgument - ``relationship_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in relationship_query._query_terms:
if '$in' in relationship_query._query_terms[term] and '$nin' in relationship_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': relationship_query._query_terms[term]['$in']}},
{term: {'$nin': relationship_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: relationship_query._query_terms[term]})
for term in relationship_query._keyword_terms:
or_list.append({term: relationship_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('relationship',
collection='Relationship',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy) | Gets a list of ``Relationships`` matching the given relationship query.
arg: relationship_query
(osid.relationship.RelationshipQuery): the relationship
query
return: (osid.relationship.RelationshipList) - the returned
``RelationshipList``
raise: NullArgument - ``relationship_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.* |
def fit(self, matrix, epochs=5, no_threads=2, verbose=False):
"""
Estimate the word embeddings.
Parameters:
- scipy.sparse.coo_matrix matrix: coocurrence matrix
- int epochs: number of training epochs
- int no_threads: number of training threads
- bool verbose: print progress messages if True
"""
shape = matrix.shape
if (len(shape) != 2 or
shape[0] != shape[1]):
raise Exception('Coocurrence matrix must be square')
if not sp.isspmatrix_coo(matrix):
raise Exception('Coocurrence matrix must be in the COO format')
random_state = check_random_state(self.random_state)
self.word_vectors = ((random_state.rand(shape[0],
self.no_components) - 0.5)
/ self.no_components)
self.word_biases = np.zeros(shape[0],
dtype=np.float64)
self.vectors_sum_gradients = np.ones_like(self.word_vectors)
self.biases_sum_gradients = np.ones_like(self.word_biases)
shuffle_indices = np.arange(matrix.nnz, dtype=np.int32)
if verbose:
print('Performing %s training epochs '
'with %s threads' % (epochs, no_threads))
for epoch in range(epochs):
if verbose:
print('Epoch %s' % epoch)
# Shuffle the coocurrence matrix
random_state.shuffle(shuffle_indices)
fit_vectors(self.word_vectors,
self.vectors_sum_gradients,
self.word_biases,
self.biases_sum_gradients,
matrix.row,
matrix.col,
matrix.data,
shuffle_indices,
self.learning_rate,
self.max_count,
self.alpha,
self.max_loss,
int(no_threads))
if not np.isfinite(self.word_vectors).all():
raise Exception('Non-finite values in word vectors. '
'Try reducing the learning rate or the '
'max_loss parameter.') | Estimate the word embeddings.
Parameters:
- scipy.sparse.coo_matrix matrix: coocurrence matrix
- int epochs: number of training epochs
- int no_threads: number of training threads
- bool verbose: print progress messages if True |
def get_messages(session, query, limit=10, offset=0):
"""
Get one or more messages
"""
query['limit'] = limit
query['offset'] = offset
# GET /api/messages/0.1/messages
response = make_get_request(session, 'messages', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MessagesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) | Get one or more messages |
def __reset_crosshair(self):
"""
redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
-------
"""
self.lhor.set_ydata(self.y_coord)
self.lver.set_xdata(self.x_coord) | redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
------- |
def remove(self, value):
"""Remove an entry from the catalog """
ret = libxml2mod.xmlACatalogRemove(self._o, value)
return ret | Remove an entry from the catalog |
def get_dimension_type(self, dim):
"""Get the type of the requested dimension.
Type is determined by Dimension.type attribute or common
type of the dimension values, otherwise None.
Args:
dimension: Dimension to look up by name or by index
Returns:
Declared type of values along the dimension
"""
dim = self.get_dimension(dim)
if dim is None:
return None
elif dim.type is not None:
return dim.type
elif dim in self.vdims:
return np.float64
return self.interface.dimension_type(self, dim) | Get the type of the requested dimension.
Type is determined by Dimension.type attribute or common
type of the dimension values, otherwise None.
Args:
dimension: Dimension to look up by name or by index
Returns:
Declared type of values along the dimension |
def _validate_param(param): # pylint: disable=too-many-branches
""" Ensure the filter cast properly according to the operator """
detail = None
if param.oper not in goldman.config.QUERY_FILTERS:
detail = 'The query filter {} is not a supported ' \
'operator. Please change {} & retry your ' \
'request'.format(param.oper, param)
elif param.oper in goldman.config.GEO_FILTERS:
try:
if not isinstance(param.val, list) or len(param.val) <= 2:
raise ValueError
else:
param.val = [float(i) for i in param.val]
except ValueError:
detail = 'The query filter {} requires a list ' \
'of floats for geo evaluation. Please ' \
'modify your request & retry'.format(param)
elif param.oper in goldman.config.ENUM_FILTERS:
if not isinstance(param.val, list):
param.val = [param.val]
param.val = tuple(param.val)
elif isinstance(param.val, list):
detail = 'The query filter {} should not be specified more ' \
'than once or have multiple values. Please modify ' \
'your request & retry'.format(param)
elif param.oper in goldman.config.BOOL_FILTERS:
try:
param.val = str_to_bool(param.val)
except ValueError:
detail = 'The query filter {} requires a boolean ' \
'for evaluation. Please modify your ' \
'request & retry'.format(param)
elif param.oper in goldman.config.DATE_FILTERS:
try:
param.val = str_to_dt(param.val)
except ValueError:
detail = 'The query filter {} supports only an ' \
'epoch or ISO 8601 timestamp. Please ' \
'modify your request & retry'.format(param)
elif param.oper in goldman.config.NUM_FILTERS:
try:
param.val = int(param.val)
except ValueError:
detail = 'The query filter {} requires a number ' \
'for evaluation. Please modify your ' \
'request & retry'.format(param)
if detail:
raise InvalidQueryParams(**{
'detail': detail,
'links': LINK,
'parameter': PARAM,
}) | Ensure the filter cast properly according to the operator |
def get_power_status() -> SystemPowerStatus:
"""Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus
"""
get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus
get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)]
get_system_power_status.restype = wintypes.BOOL
status = SystemPowerStatus()
if not get_system_power_status(ctypes.pointer(status)):
raise ctypes.WinError()
else:
return status | Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus |
def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running. |
def _finish_progress(self):
"""
Mark the progressbar as finished.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
if self._progressbar is not None:
self._progressbar.finish()
if self._progress_callback is not None:
self._progress_callback(100.0) | Mark the progressbar as finished.
:return: None |
def validate(self):
"""Checks that at least required params exist"""
required = ['token', 'content']
valid_data = {
'exp_record': (['type', 'format'], 'record',
'Exporting record but content is not record'),
'imp_record': (['type', 'overwriteBehavior', 'data', 'format'],
'record', 'Importing record but content is not record'),
'metadata': (['format'], 'metadata',
'Requesting metadata but content != metadata'),
'exp_file': (['action', 'record', 'field'], 'file',
'Exporting file but content is not file'),
'imp_file': (['action', 'record', 'field'], 'file',
'Importing file but content is not file'),
'del_file': (['action', 'record', 'field'], 'file',
'Deleteing file but content is not file'),
'exp_event': (['format'], 'event',
'Exporting events but content is not event'),
'exp_arm': (['format'], 'arm',
'Exporting arms but content is not arm'),
'exp_fem': (['format'], 'formEventMapping',
'Exporting form-event mappings but content != formEventMapping'),
'exp_user': (['format'], 'user',
'Exporting users but content is not user'),
'exp_survey_participant_list': (['instrument'], 'participantList',
'Exporting Survey Participant List but content != participantList'),
'version': (['format'], 'version',
'Requesting version but content != version')
}
extra, req_content, err_msg = valid_data[self.type]
required.extend(extra)
required = set(required)
pl_keys = set(self.payload.keys())
# if req is not subset of payload keys, this call is wrong
if not set(required) <= pl_keys:
# what is not in pl_keys?
not_pre = required - pl_keys
raise RCAPIError("Required keys: %s" % ', '.join(not_pre))
# Check content, raise with err_msg if not good
try:
if self.payload['content'] != req_content:
raise RCAPIError(err_msg)
except KeyError:
raise RCAPIError('content not in payload') | Checks that at least required params exist |
def encrypt(s, base64 = False):
"""
对称加密函数
"""
e = _cipher().encrypt(s)
return base64 and b64encode(e) or e | 对称加密函数 |
def get_parser(parser=None):
"""Get parser for mpu."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
if parser is None:
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers()
pkg_init_parser = subparsers.add_parser('init')
pkg_init_parser.add_argument("root",
nargs='?',
help="project root - should be empty")
pkg_init_parser.set_defaults(func=run_init)
return parser | Get parser for mpu. |
def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False):
"""Call this function with all the settings to use for future operations on a repository, must be called FIRST"""
self.searchString = searchString
self.category = category
self.math = math
self.game = game
self.searchFiles = searchFiles
self.extension = extension | Call this function with all the settings to use for future operations on a repository, must be called FIRST |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.