code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_url_from_model_core(request, obj):
"""
Returns object URL from model core.
"""
from is_core.site import get_model_core
model_core = get_model_core(obj.__class__)
if model_core and hasattr(model_core, 'ui_patterns'):
edit_pattern = model_core.ui_patterns.get('detail')
return (
edit_pattern.get_url_string(request, obj=obj)
if edit_pattern and edit_pattern.has_permission('get', request, obj=obj) else None
)
else:
return None | Returns object URL from model core. |
def from_code(cls, co):
"""Disassemble a Python code object into a Code object."""
co_code = co.co_code
labels = dict((addr, Label()) for addr in findlabels(co_code))
linestarts = dict(cls._findlinestarts(co))
cellfree = co.co_cellvars + co.co_freevars
code = CodeList()
n = len(co_code)
i = 0
extended_arg = 0
while i < n:
op = Opcode(ord(co_code[i]))
if i in labels:
code.append((labels[i], None))
if i in linestarts:
code.append((SetLineno, linestarts[i]))
i += 1
if op in hascode:
lastop, lastarg = code[-1]
if lastop != LOAD_CONST:
raise ValueError(
"%s should be preceded by LOAD_CONST code" % op)
code[-1] = (LOAD_CONST, Code.from_code(lastarg))
if op not in hasarg:
code.append((op, None))
else:
arg = ord(co_code[i]) + ord(co_code[i+1])*256 + extended_arg
extended_arg = 0
i += 2
if op == opcode.EXTENDED_ARG:
extended_arg = arg << 16
elif op in hasconst:
code.append((op, co.co_consts[arg]))
elif op in hasname:
code.append((op, co.co_names[arg]))
elif op in hasjabs:
code.append((op, labels[arg]))
elif op in hasjrel:
code.append((op, labels[i + arg]))
elif op in haslocal:
code.append((op, co.co_varnames[arg]))
elif op in hascompare:
code.append((op, cmp_op[arg]))
elif op in hasfree:
code.append((op, cellfree[arg]))
else:
code.append((op, arg))
varargs = bool(co.co_flags & CO_VARARGS)
varkwargs = bool(co.co_flags & CO_VARKEYWORDS)
newlocals = bool(co.co_flags & CO_NEWLOCALS)
args = co.co_varnames[:co.co_argcount + varargs + varkwargs]
if co.co_consts and isinstance(co.co_consts[0], basestring):
docstring = co.co_consts[0]
else:
docstring = None
return cls(code = code,
freevars = co.co_freevars,
args = args,
varargs = varargs,
varkwargs = varkwargs,
newlocals = newlocals,
name = co.co_name,
filename = co.co_filename,
firstlineno = co.co_firstlineno,
docstring = docstring,
) | Disassemble a Python code object into a Code object. |
def save_photon_hdf5(self, identity=None, overwrite=True, path=None):
"""Create a smFRET Photon-HDF5 file with current timestamps."""
filepath = self.filepath
if path is not None:
filepath = Path(path, filepath.name)
self.merge_da()
data = self._make_photon_hdf5(identity=identity)
phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),
overwrite=overwrite) | Create a smFRET Photon-HDF5 file with current timestamps. |
def quote_datetime(self, value):
"""
Force the quote_datetime to always be a datetime
:param value:
:return:
"""
if value:
if isinstance(value, type_check):
self._quote_datetime = parse(value)
elif isinstance(value, datetime.datetime):
self._quote_datetime = value | Force the quote_datetime to always be a datetime
:param value:
:return: |
def parse_ndxlist(output):
"""Parse output from make_ndx to build list of index groups::
groups = parse_ndxlist(output)
output should be the standard output from ``make_ndx``, e.g.::
rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True)
(or simply use
rc,output,junk = cbook.make_ndx_captured(...)
which presets input, stdout and stderr; of course input can be overriden.)
:Returns:
The function returns a list of dicts (``groups``) with fields
name
name of the groups
nr
number of the group (starts at 0)
natoms
number of atoms in the group
"""
m = NDXLIST.search(output) # make sure we pick up a proper full list
grouplist = m.group('LIST')
return parse_groups(grouplist) | Parse output from make_ndx to build list of index groups::
groups = parse_ndxlist(output)
output should be the standard output from ``make_ndx``, e.g.::
rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True)
(or simply use
rc,output,junk = cbook.make_ndx_captured(...)
which presets input, stdout and stderr; of course input can be overriden.)
:Returns:
The function returns a list of dicts (``groups``) with fields
name
name of the groups
nr
number of the group (starts at 0)
natoms
number of atoms in the group |
def getBucketIndices(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
# Encoder each sub-field
return [None] * len(self.encoders)
else:
assert isinstance(input, datetime.datetime)
# Get the scalar values for each sub-field
scalars = self.getScalars(input)
# Encoder each sub-field
result = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
result.extend(encoder.getBucketIndices(scalars[i]))
return result | See method description in base.py |
def contents(self, from_date=DEFAULT_DATETIME,
offset=None, max_contents=MAX_CONTENTS):
"""Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request
"""
resource = self.RCONTENTS + '/' + self.MSEARCH
# Set confluence query parameter (cql)
date = from_date.strftime("%Y-%m-%d %H:%M")
cql = self.VCQL % {'date': date}
# Set parameters
params = {
self.PCQL: cql,
self.PLIMIT: max_contents,
self.PEXPAND: self.PANCESTORS
}
if offset:
params[self.PSTART] = offset
for response in self._call(resource, params):
yield response | Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request |
def apply_backspaces_and_linefeeds(text):
"""
Interpret backspaces and linefeeds in text like a terminal would.
Interpret text like a terminal by removing backspace and linefeed
characters and applying them line by line.
If final line ends with a carriage it keeps it to be concatenable with next
output chunk.
"""
orig_lines = text.split('\n')
orig_lines_len = len(orig_lines)
new_lines = []
for orig_line_idx, orig_line in enumerate(orig_lines):
chars, cursor = [], 0
orig_line_len = len(orig_line)
for orig_char_idx, orig_char in enumerate(orig_line):
if orig_char == '\r' and (orig_char_idx != orig_line_len - 1 or
orig_line_idx != orig_lines_len - 1):
cursor = 0
elif orig_char == '\b':
cursor = max(0, cursor - 1)
else:
if (orig_char == '\r' and
orig_char_idx == orig_line_len - 1 and
orig_line_idx == orig_lines_len - 1):
cursor = len(chars)
if cursor == len(chars):
chars.append(orig_char)
else:
chars[cursor] = orig_char
cursor += 1
new_lines.append(''.join(chars))
return '\n'.join(new_lines) | Interpret backspaces and linefeeds in text like a terminal would.
Interpret text like a terminal by removing backspace and linefeed
characters and applying them line by line.
If final line ends with a carriage it keeps it to be concatenable with next
output chunk. |
def set_color_zones(self, start_index, end_index, color, duration=0, apply=1, callb=None, rapid=False):
"""Convenience method to set the colour status zone of the device
This method will send a MultiZoneSetColorZones message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param start_index: Index of the start of the zone of interest
:type start_index: int
:param end_index: Index of the end of the zone of interest. By default start_index+7
:type end_index: int
:param apply: Indicates if the colour change is to be applied or memorized. Default: 1
:type apply: int
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None
"""
if len(color) == 4:
args = {
"start_index": start_index,
"end_index": end_index,
"color": color,
"duration": duration,
"apply": apply,
}
mypartial=partial(self.resp_set_multizonemultizone, args=args)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
if rapid:
self.fire_and_forget(MultiZoneSetColorZones, args, num_repeats=1)
mycallb(self, None)
else:
self.req_with_ack(MultiZoneSetColorZones, args, callb=mycallb) | Convenience method to set the colour status zone of the device
This method will send a MultiZoneSetColorZones message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param start_index: Index of the start of the zone of interest
:type start_index: int
:param end_index: Index of the end of the zone of interest. By default start_index+7
:type end_index: int
:param apply: Indicates if the colour change is to be applied or memorized. Default: 1
:type apply: int
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None |
def latencies(self):
"""List[Tuple[:class:`int`, :class:`float`]]: A list of latencies between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This returns a list of tuples with elements ``(shard_id, latency)``.
"""
return [(shard_id, shard.ws.latency) for shard_id, shard in self.shards.items()] | List[Tuple[:class:`int`, :class:`float`]]: A list of latencies between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This returns a list of tuples with elements ``(shard_id, latency)``. |
def getargnames(argspecs, with_unbox=False):
"""Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args.
"""
# todo: We can maybe make use of inspect.formatargspec
args = argspecs.args
vargs = argspecs.varargs
try:
kw = argspecs.keywords
except AttributeError:
kw = argspecs.varkw
try:
kwonly = argspecs.kwonlyargs
except AttributeError:
kwonly = None
res = []
if not args is None:
res.extend(args)
if not vargs is None:
res.append('*'+vargs if with_unbox else vargs)
if not kwonly is None:
res.extend(kwonly)
if not kw is None:
res.append('**'+kw if with_unbox else kw)
return res | Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args. |
def context(fname, node):
"""
Context manager managing exceptions and adding line number of the
current node and name of the current file to the error message.
:param fname: the current file being processed
:param node: the current node being processed
"""
try:
yield node
except Exception:
etype, exc, tb = sys.exc_info()
msg = 'node %s: %s, line %s of %s' % (
striptag(node.tag), exc, getattr(node, 'lineno', '?'), fname)
raise_(etype, msg, tb) | Context manager managing exceptions and adding line number of the
current node and name of the current file to the error message.
:param fname: the current file being processed
:param node: the current node being processed |
def check_X_y(X, y):
"""
tool to ensure input and output data have the same number of samples
Parameters
----------
X : array-like
y : array-like
Returns
-------
None
"""
if len(X) != len(y):
raise ValueError('Inconsistent input and output data shapes. '\
'found X: {} and y: {}'.format(X.shape, y.shape)) | tool to ensure input and output data have the same number of samples
Parameters
----------
X : array-like
y : array-like
Returns
-------
None |
def filterOverlappingAlignments(alignments):
"""Filter alignments to be non-overlapping.
"""
l = []
alignments = alignments[:]
sortAlignments(alignments)
alignments.reverse()
for pA1 in alignments:
for pA2 in l:
if pA1.contig1 == pA2.contig1 and getPositiveCoordinateRangeOverlap(pA1.start1+1, pA1.end1, pA2.start1+1, pA2.end1) is not None: #One offset, inclusive coordinates
break
if pA1.contig2 == pA2.contig2 and getPositiveCoordinateRangeOverlap(pA1.start2+1, pA1.end2, pA2.start2+1, pA2.end2) is not None: #One offset, inclusive coordinates
break
if pA1.contig2 == pA2.contig1 and getPositiveCoordinateRangeOverlap(pA1.start2+1, pA1.end2, pA2.start1+1, pA2.end1) is not None: #One offset, inclusive coordinates
break
if pA1.contig1 == pA2.contig2 and getPositiveCoordinateRangeOverlap(pA1.start1+1, pA1.end1, pA2.start2+1, pA2.end2) is not None: #One offset, inclusive coordinates
break
else:
l.append(pA1)
l.reverse()
return l | Filter alignments to be non-overlapping. |
def _gridmake2(x1, x2):
"""
Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if x1.ndim == 1 and x2.ndim == 1:
return np.column_stack([np.tile(x1, x2.shape[0]),
np.repeat(x2, x1.shape[0])])
elif x1.ndim > 1 and x2.ndim == 1:
first = np.tile(x1, (x2.shape[0], 1))
second = np.repeat(x2, x1.shape[0])
return np.column_stack([first, second])
else:
raise NotImplementedError("Come back here") | Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002. |
def ensure_path(path):
# type: (Union[vistir.compat.Path, str]) -> vistir.compat.Path
"""
Given a path (either a string or a Path object), expand variables and return a Path object.
:param path: A string or a :class:`~pathlib.Path` object.
:type path: str or :class:`~pathlib.Path`
:return: A fully expanded Path object.
:rtype: :class:`~pathlib.Path`
"""
if isinstance(path, vistir.compat.Path):
return path
path = vistir.compat.Path(os.path.expandvars(path))
return path.absolute() | Given a path (either a string or a Path object), expand variables and return a Path object.
:param path: A string or a :class:`~pathlib.Path` object.
:type path: str or :class:`~pathlib.Path`
:return: A fully expanded Path object.
:rtype: :class:`~pathlib.Path` |
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format(
"Database dump file {dump_fn} does not exist."
)
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
#r.pc('Uploading PostgreSQL database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
#r.local('rsync -rvz --progress --no-p --no-g '
#'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
#'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
with settings(warn_only=True):
r.sudo('dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}', user=r.env.postgres_user)
r.sudo('psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"', user=r.env.postgres_user)
with settings(warn_only=True):
if r.env.engine == POSTGIS:
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"',
user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"',
user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"', user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"', user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; '
'CREATE USER {db_user} WITH PASSWORD \'{db_password}\'; '
'GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"', user=r.env.postgres_user)
for createlang in r.env.createlangs:
r.env.createlang = createlang
r.sudo('createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true', user=r.env.postgres_user)
if not prep_only:
# Ignore errors needed to work around bug "ERROR: schema "public" already exists", which is thrown in 9.6 even if we use --clean and --if-exists?
with settings(warn_only=True):
r.sudo(r.env.load_command, user=r.env.postgres_user) | Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot. |
def _query(self, *criterion):
"""
Construct a query for the model.
"""
return self.session.query(
self.model_class
).filter(
*criterion
) | Construct a query for the model. |
def p_arguments(self, p):
"""arguments : LPAREN RPAREN
| LPAREN argument_list RPAREN
"""
if len(p) == 4:
p[0] = self.asttypes.Arguments(p[2])
else:
p[0] = self.asttypes.Arguments([])
p[0].setpos(p) | arguments : LPAREN RPAREN
| LPAREN argument_list RPAREN |
def destroy(self, request, *args, **kwargs):
"""
Run **DELETE** request against */api/price-list-items/<uuid>/* to delete price list item.
Only customer owner and staff can delete price items.
"""
return super(PriceListItemViewSet, self).destroy(request, *args, **kwargs) | Run **DELETE** request against */api/price-list-items/<uuid>/* to delete price list item.
Only customer owner and staff can delete price items. |
def update_all(self, criteria: Q, *args, **kwargs):
"""Update all objects satisfying the criteria """
items = self._filter(criteria, self.conn['data'][self.schema_name])
update_count = 0
for key in items:
item = items[key]
item.update(*args)
item.update(kwargs)
self.conn['data'][self.schema_name][key] = item
update_count += 1
return update_count | Update all objects satisfying the criteria |
def decode(self, descriptor):
""" Produce a list of dictionaries for each dimension in this transcoder """
i = iter(descriptor)
n = len(self._schema)
# Add the name key to our schema
schema = self._schema + ('name',)
# For each dimensions, generator takes n items off iterator
# wrapping the descriptor, making a tuple with the dimension
# name appended
tuple_gen = (tuple(itertools.islice(i, n)) + (d, )
for d in self._dimensions)
# Generate dictionary by mapping schema keys to generated tuples
return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen] | Produce a list of dictionaries for each dimension in this transcoder |
def write(self, process_tile, data):
"""
Write data from process tiles into GeoJSON file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
"""
if data is None or len(data) == 0:
return
if not isinstance(data, (list, types.GeneratorType)):
raise TypeError(
"GeoJSON driver data has to be a list or generator of GeoJSON objects"
)
data = list(data)
if not len(data):
logger.debug("no features to write")
else:
# in case of S3 output, create an boto3 resource
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
# Convert from process_tile to output_tiles
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_vector_window(
in_data=data,
out_schema=self.output_params["schema"],
out_tile=out_tile,
out_path=out_path,
bucket_resource=bucket_resource
) | Write data from process tiles into GeoJSON file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid`` |
def get_comparable_values_for_ordering(self):
"""Return a tupple of values representing the unicity of the object
"""
return (0 if self.position >= 0 else 1, int(self.position), str(self.name), str(self.description)) | Return a tupple of values representing the unicity of the object |
def get_releasenotes(repo_path, from_commit=None, bugtracker_url=''):
"""
Given a repo and optionally a base revision to start from, will return
a text suitable for the relase notes announcement, grouping the bugs, the
features and the api-breaking changes.
Args:
repo_path(str): Path to the code git repository.
from_commit(str): Refspec of the commit to start aggregating the
authors from.
bugtracker_url(str): URL to be prepended to any bug ids found in the
commits.
Returns:
str: Release notes text.
"""
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
refs = get_refs(repo)
maj_version = 0
feat_version = 0
fix_version = 0
start_including = False
release_notes_per_major = OrderedDict()
cur_line = ''
if from_commit is None:
start_including = True
prev_version = (maj_version, feat_version, fix_version)
prev_version_str = '%s.%s.%s' % prev_version
bugs = []
features = []
api_break_changes = []
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
version = (maj_version, feat_version, fix_version)
version_str = '%s.%s.%s' % version
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
start_including = True
parent_commit_type = get_commit_type(
commit=commit,
children=children,
tags=tags,
prev_version=prev_version,
)
cur_line = pretty_commit(
commit=commit,
version=version_str,
bugtracker_url=bugtracker_url,
commit_type=parent_commit_type,
)
for child in children:
commit_type = get_commit_type(
commit=commit,
tags=tags,
prev_version=prev_version,
)
cur_line += pretty_commit(
commit=child,
version=None,
commit_type=commit_type,
bugtracker_url=bugtracker_url,
)
if parent_commit_type == 'api_break':
release_notes_per_major[prev_version_str] = (
api_break_changes,
features,
bugs,
)
bugs, features, api_break_changes = [], [], []
api_break_changes.append(cur_line)
elif parent_commit_type == 'feature':
features.append(cur_line)
else:
bugs.append(cur_line)
prev_version = version
prev_version_str = version_str
release_notes_per_major[prev_version_str] = (
api_break_changes,
features,
bugs,
)
releasenotes = ''
for major_version, lines in reversed(release_notes_per_major.items()):
api_break_changes, features, bugs = lines
releasenotes += u'''New changes for version %s
=================================
API Breaking changes
--------------------
%s
New features
------------
%s
Bugfixes and minor changes
--------------------------
%s
''' % (
major_version,
(
'\n'.join(reversed(api_break_changes)) or
'No new API breaking changes\n'
),
'\n'.join(reversed(features)) or 'No new features\n',
'\n'.join(reversed(bugs)) or 'No new bugs\n',
)
return releasenotes.strip() | Given a repo and optionally a base revision to start from, will return
a text suitable for the relase notes announcement, grouping the bugs, the
features and the api-breaking changes.
Args:
repo_path(str): Path to the code git repository.
from_commit(str): Refspec of the commit to start aggregating the
authors from.
bugtracker_url(str): URL to be prepended to any bug ids found in the
commits.
Returns:
str: Release notes text. |
def copyFile(src, dest):
"""Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string)
"""
#Src Exists?
try:
if os.path.isfile(src):
dpath, dfile = os.path.split(dest)
if not os.path.isdir(dpath):
os.makedirs(dpath)
if not os.path.exists(dest):
touch(dest)
try:
shutil.copy2(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
logging.exception('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
logging.exception('Error: %s' % e.strerror)
except:
logging.exception('Error: src to copy does not exist.') | Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string) |
def cmd_example(self, args):
'''control behaviour of the module'''
if len(args) == 0:
print(self.usage())
elif args[0] == "status":
print(self.status())
elif args[0] == "set":
self.example_settings.command(args[1:])
else:
print(self.usage()) | control behaviour of the module |
def __process_node(self, node: yaml.Node,
expected_type: Type) -> yaml.Node:
"""Processes a node.
This is the main function that implements yatiml's \
functionality. It figures out how to interpret this node \
(recognition), then applies syntactic sugar, and finally \
recurses to the subnodes, if any.
Args:
node: The node to process.
expected_type: The type we expect this node to be.
Returns:
The transformed node, or a transformed copy.
"""
logger.info('Processing node {} expecting type {}'.format(
node, expected_type))
# figure out how to interpret this node
recognized_types, message = self.__recognizer.recognize(
node, expected_type)
if len(recognized_types) != 1:
raise RecognitionError(message)
recognized_type = recognized_types[0]
# remove syntactic sugar
logger.debug('Savorizing node {}'.format(node))
if recognized_type in self._registered_classes.values():
node = self.__savorize(node, recognized_type)
logger.debug('Savorized, now {}'.format(node))
# process subnodes
logger.debug('Recursing into subnodes')
if is_generic_list(recognized_type):
if node.tag != 'tag:yaml.org,2002:seq':
raise RecognitionError('{}{}Expected a {} here'.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for item in node.value:
self.__process_node(item,
generic_type_args(recognized_type)[0])
elif is_generic_dict(recognized_type):
if node.tag != 'tag:yaml.org,2002:map':
raise RecognitionError('{}{}Expected a {} here'.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for _, value_node in node.value:
self.__process_node(value_node,
generic_type_args(recognized_type)[1])
elif recognized_type in self._registered_classes.values():
if (not issubclass(recognized_type, enum.Enum)
and not issubclass(recognized_type, str)
and not issubclass(recognized_type, UserString)):
for attr_name, type_, _ in class_subobjects(recognized_type):
cnode = Node(node)
if cnode.has_attribute(attr_name):
subnode = cnode.get_attribute(attr_name)
new_subnode = self.__process_node(
subnode.yaml_node, type_)
cnode.set_attribute(attr_name, new_subnode)
else:
logger.debug('Not a generic class or a user-defined class, not'
' recursing')
node.tag = self.__type_to_tag(recognized_type)
logger.debug('Finished processing node {}'.format(node))
return node | Processes a node.
This is the main function that implements yatiml's \
functionality. It figures out how to interpret this node \
(recognition), then applies syntactic sugar, and finally \
recurses to the subnodes, if any.
Args:
node: The node to process.
expected_type: The type we expect this node to be.
Returns:
The transformed node, or a transformed copy. |
def astype(self, dtype):
"""
Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double
"""
if dtype not in _supported_dtypes:
raise ValueError('Datatype %s not supported. Supported types are %s' % (dtype, _supported_dtypes))
pixeltype = _npy_to_itk_map[dtype]
return self.clone(pixeltype) | Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double |
def _parse(self):
"""
The function for parsing the JSON response to the vars dictionary.
"""
try:
self.vars['status'] = self.json['status']
except (KeyError, ValueError, TypeError):
pass
for v in ['remarks', 'notices']:
try:
self.vars[v] = self.summarize_notices(self.json[v])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars['links'] = self.summarize_links(self.json['links'])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars['events'] = self.summarize_events(self.json['events'])
except (KeyError, ValueError, TypeError):
pass | The function for parsing the JSON response to the vars dictionary. |
def to_schema(self):
"""Return field schema for this field."""
if not self.name or not self.process:
raise ValueError("field is not registered with process")
schema = {
'name': self.name,
'type': self.get_field_type(),
}
if self.required is not None:
schema['required'] = self.required
if self.label is not None:
schema['label'] = self.label
if self.description is not None:
schema['description'] = self.description
if self.default is not None:
schema['default'] = self.default
if self.hidden is not None:
schema['hidden'] = self.hidden
if self.choices is not None:
for choice, label in self.choices:
schema.setdefault('choices', []).append({
'label': label,
'value': choice,
})
return schema | Return field schema for this field. |
def retinotopy_comparison(arg1, arg2, arg3=None,
eccentricity_range=None, polar_angle_range=None, visual_area_mask=None,
weight=Ellipsis, weight_min=None, visual_area=Ellipsis,
method='rmse', distance='scaled', gold=None):
'''
retinotopy_comparison(dataset1, dataset2) yields a pimms itable comparing the two retinotopy
datasets.
retinotopy_error(obj, dataset1, dataset2) is equivalent to retinotopy_comparison(x, y) where x
and y are retinotopy(obj, dataset1) and retinotopy_data(obj, dataset2).
The datasets may be specified in a number of ways, some of which may be incompatible with
certain options. The simplest way to specify a dataset is as a vector of complex numbers, which
are taken to represent positions in the visual field with (a + bi) corresponding to the
coordinate (a deg, b deg) in the visual field. Alternately, an n x 2 or 2 x n matrix will be
interpreted as (polar angle, eccentricity) coordinates, in terms of visual degrees (see the
as_retinotopy function: as_retinotopy(arg, 'visual') yields this input format). Alternately,
the datasets may be mappings such as those retuend by the retinotopy_data function; in this case
as_retinotopy is used to extract the visual coordinates (so they need not be specified in visual
coordinates specifically in this case). In this last case, additional properties such as the
variance explained and pRF size can be returned, making it valuable for more sophisticated error
methods or distance metrics.
The returned dataset will always have a row for each row in the two datasets (which must have
the same number of rows). However, many rows may have a weight of 0 even if no weights were
specified in the options; this is because other limitations may have been specified (such as
in the eccentricity_range or visual_areas). The returned dataset will always contain the
following columns:
* 'weight' gives the weight assigned to this particular vertex; the weights will always sum to
1 unless all vertices have 0 weight.
* 'polar_angle_1' and 'polar_angle_2', 'eccentricity_1', 'eccenticity_2', 'x_1', 'x_2', 'y_1',
'y_2', 'z_1', and 'z_2' all give the visual field coordinates in degrees; the z values give
complex numbers equivalent to the x/y values.
* 'radius_1' and 'radius_2' give the radii (sigma parameters) of the pRF gaussians.
* 'polar_angle_error', 'eccentricity_error', and 'center_error' all give the difference
between the visual field points in the two datasets; note that polar_angle_error in
particular is an error measure of rotations around the visual field and not of visual field
position. The 'center_error' is the distance between the centers of the visual field, in
degrees. The 'radius_error' value is also given.
* 'visual_area_1' and 'visual_area_2' specify the visual areas of the individual datasets; if
either of the datasets did not have a visual area, it will be omitted. Additionally, the
property 'visual_area' specifies the visual area suggested for use in analyses; this is
chosen based on the following: (1) if there is a gold standard dataset specified that has
a visual area, use it; (2) if only one of the datasets has a visual area, use it; (3) if
both have a visual area, then use the (varea1 == varea2) * varea1 (the areas that agree are
kept and all others are set to 0); (4) if neither has a visual area, then this property is
omitted. In all cases where a 'visual_area' property is included, those vertices that do not
overlap with the given visual_area_option option will be set to 0 along with the
corresponding weights.
* A variety of other lazily-calculated error metrics are included.
The following options are accepted:
* eccentricity_range (default: None) specifies the range of eccentricity to include in the
calculation (in degrees). This may be specified as emax or (emin, emax).
* polar_angle_range (default: None) specifies the range of polar angles to include in the
calculation. Like eccentricity range it may be specified as (amin, amax) but amax alone is
not allowed. Additionally the strings 'lh' and 'rvf' are equivalent to (0,180) and the
strings 'rh' and 'lvf' are equivalent to (-180,0).
* weight (default: Ellipsis) specifies the weights to be used in the calculation. This may be
None to specify that no weights should be used, or a property name or an array of weight
values. Alternately, it may be a tuple (w1, w2) of the weights for datasets 1 and 2. If the
argument is Ellipsis, then it will use weights if they are found in the retinotopy dataset;
both datasets may contain weights in which the product is used.
* weight_min (default: None) specifies the minimum weight a vertex must have to be included in
the calculation.
* visual_area (default: Ellipsis) specifies the visual area labels to be used in the
calculation. This may be None to specify that no labels should be used, or a property name
or an array of labels. Alternately, it may be a tuple (l1, l2) of the labels for datasets 1
and 2. If the argument is Ellipsis, then it will use labels if they are found in the
retinotopy dataset; both datasets may contain labels in which the gold standard's labels are
used if there is a gold standard and the overlapping labels are used otherwise.
* visual_area_mask (default: None) specifies a list of visual areas included in the
calculation; this is applied to all datasets with a visual_area key; see the 'visual_area'
columns above and the visual_area option. If None, then no visual areas are filtered;
otherwise, arguments should like (1,2,3), which would usually specify that areas V1, V2, and
V3, be included.
* gold (default: None) specifies which dataset should be considered the gold standard; this
should be either 1 or 2. If a gold-standard dataset is specified, then it is used in certain
calculations; for example, when scaling an error by eccentricity, the gold-standard's
eccentricity will be used unless there is no gold standard, in which case the mean of the
two values are used.
'''
if arg3 is not None: (obj, dsets) = (arg1, [retinotopy_data(arg1, aa) for aa in (arg2,arg3)])
else: (obj, dsets) = (None, [arg1, arg2])
(gi,gold) = (None,False) if not gold else (gold-1,True)
# we'll build up this result as we go...
result = {}
# they must have a retinotopy representation:
vis = [as_retinotopy(ds, 'visual') for ds in dsets]
ps = (vis[0][0], vis[1][0])
es = (vis[0][1], vis[1][1])
rs = [ds['radius'] if 'radius' in ds else None for ds in dsets]
for ii in (0,1):
s = '_%d' % (ii + 1)
(p,e) = (ps[ii],es[ii])
result['polar_angle' + s] = p
result['eccentricity' + s] = e
if rs[ii] is not None: result['radius' + s] = rs[ii]
p = np.pi/180.0 * (90.0 - p)
(x,y) = (e*np.cos(p), e*np.sin(p))
result['x' + s] = x
result['y' + s] = y
result['z' + s] = x + 1j * y
n = len(ps[0])
# figure out the weight
if pimms.is_vector(weight) and len(weight) == 2:
ws = [(None if w is None else
ds[w] if pimms.is_str(w) and w in ds else
geo.to_property(obj, w))
for (w,ds) in zip(weight, dsets)]
weight = Ellipsis
else:
ws = [next((ds[k] for k in ('weight','variance_explained') if k in ds), None)
for ds in dsets]
if pimms.is_vector(weight, 'real'):
wgt = weight
elif pimms.is_str(weight):
if obj is None: raise ValueError('weight property name but no vertex-set given')
wgt = geo.to_property(obj, weight)
elif weight is Ellipsis:
if gold: wgt = ws[gi]
elif ws[0] is None and ws[1] is None: wgt = None
elif ws[0] is None: wgt = ws[1]
elif ws[1] is None: wgt = ws[0]
else: wgt = ws[0] * ws[1]
else: raise ValueError('Could not parse weight argument')
if wgt is None: wgt = np.ones(n)
if ws[0] is not None: result['weight_1'] = ws[0]
if ws[1] is not None: result['weight_2'] = ws[1]
# figure out the visual areas
if is_tuple(visual_area) and len(visual_area) == 2:
ls = [(None if l is None else
ds[l] if pimms.is_str(l) and l in ds else
geo.to_property(obj, l))
for (l,ds) in zip(visual_area, dsets)]
visual_area = Ellipsis
else:
ls = [next((ds[k] for k in ('visual_area','label') if k in ds), None)
for ds in dsets]
if pimms.is_vector(visual_area):
lbl = visual_area
elif pimms.is_str(visual_area):
if obj is None: raise ValueError('visual_area property name but no vertex-set given')
lbl = geo.to_property(obj, visual_area)
elif visual_area is None:
lbl = None
elif visual_area is Ellipsis:
if gold: lbl = ls[gi]
elif ls[0] is None and ls[1] is None: lbl = None
elif ls[0] is None: lbl = ls[1]
elif ls[1] is None: lbl = ls[0]
else: lbl = l[0] * (l[0] == l[1])
else: raise ValueError('Could not parse visual_area argument')
if ls[0] is not None: result['visual_area_1'] = ls[0]
if ls[1] is not None: result['visual_area_2'] = ls[1]
# Okay, now let's do some filtering; we clear weights as we go
wgt = np.array(wgt)
# Weight must be greater than the min
if weight_min is not None: wgt[wgt < weight_min] = 0
# Visual areas must be in the mask
lbl = None if lbl is None else np.array(lbl)
if lbl is not None and visual_area_mask is not None:
if pimms.is_int(visual_area_mask): visual_area_mask = [visual_area_mask]
oomask = (0 == np.sum([lbl == va for va in visual_area_mask], axis=0))
wgt[oomask] = 0
lbl[oomask] = 0
if lbl is not None: result['visual_area'] = lbl
# eccen must be in range
if eccentricity_range is not None:
er = eccentricity_range
if pimms.is_real(er): er = (0,er)
if gold: wgt[(es[gi] < er[0]) | (es[gi] > er[1])] = 0
else: wgt[(es[0] < er[0]) | (es[0] > er[1]) | (es[1] < er[0]) | (es[1] > er[1])] = 0
# angle must be in range
if polar_angle_range is not None:
pr = polar_angle_range
if pimms.is_str(pr):
pr = pr.lower()
if pr in ['lh', 'rvf']: pr = ( 0, 180)
elif pr in ['rh', 'lvf']: pr = (-180, 0)
else: raise ValueError('unrecognized polar angle range argument: %s' % pr)
if gold: wgt[(ps[gi] < pr[0]) | (ps[gi] > pr[1])] = 0
else: wgt[(ps[0] < pr[0]) | (ps[0] > pr[1]) | (ps[1] < pr[0]) | (ps[1] > pr[1])] = 0
# okay! Now we can add the weight into the result
result['weight'] = wgt * zinv(np.sum(wgt))
# now we add a bunch of calculations we can perform on the data!
# first: metrics of distance
gsecc = es[gi] if gold else np.mean(es, axis=0)
gsang = ps[gi] if gold else np.mean(ps, axis=0)
gsrad = rs[gi] if gold else rs[0] if rs[1] is None else rs[1] if rs[0] is None else \
np.mean(rs, axis=0)
gsecc_inv = zinv(gsecc)
gsrad_inv = None if gsrad is None else zinv(gsrad)
for (tag,resprop) in [('z', 'center'), ('polar_angle', 'polar_angle'),
('eccentricity', 'eccentricity'), ('x', 'x'), ('y', 'y')]:
serr = result[tag + '_1'] - result[tag + '_2']
aerr = np.abs(serr)
result[resprop + '_error'] = serr
result[resprop + '_abs_error'] = aerr
result[resprop + '_scaled_error'] = aerr * gsecc_inv
if gsrad_inv is not None:
result[resprop + '_radii_error'] = aerr * gsrad_inv
return pimms.itable(result) | retinotopy_comparison(dataset1, dataset2) yields a pimms itable comparing the two retinotopy
datasets.
retinotopy_error(obj, dataset1, dataset2) is equivalent to retinotopy_comparison(x, y) where x
and y are retinotopy(obj, dataset1) and retinotopy_data(obj, dataset2).
The datasets may be specified in a number of ways, some of which may be incompatible with
certain options. The simplest way to specify a dataset is as a vector of complex numbers, which
are taken to represent positions in the visual field with (a + bi) corresponding to the
coordinate (a deg, b deg) in the visual field. Alternately, an n x 2 or 2 x n matrix will be
interpreted as (polar angle, eccentricity) coordinates, in terms of visual degrees (see the
as_retinotopy function: as_retinotopy(arg, 'visual') yields this input format). Alternately,
the datasets may be mappings such as those retuend by the retinotopy_data function; in this case
as_retinotopy is used to extract the visual coordinates (so they need not be specified in visual
coordinates specifically in this case). In this last case, additional properties such as the
variance explained and pRF size can be returned, making it valuable for more sophisticated error
methods or distance metrics.
The returned dataset will always have a row for each row in the two datasets (which must have
the same number of rows). However, many rows may have a weight of 0 even if no weights were
specified in the options; this is because other limitations may have been specified (such as
in the eccentricity_range or visual_areas). The returned dataset will always contain the
following columns:
* 'weight' gives the weight assigned to this particular vertex; the weights will always sum to
1 unless all vertices have 0 weight.
* 'polar_angle_1' and 'polar_angle_2', 'eccentricity_1', 'eccenticity_2', 'x_1', 'x_2', 'y_1',
'y_2', 'z_1', and 'z_2' all give the visual field coordinates in degrees; the z values give
complex numbers equivalent to the x/y values.
* 'radius_1' and 'radius_2' give the radii (sigma parameters) of the pRF gaussians.
* 'polar_angle_error', 'eccentricity_error', and 'center_error' all give the difference
between the visual field points in the two datasets; note that polar_angle_error in
particular is an error measure of rotations around the visual field and not of visual field
position. The 'center_error' is the distance between the centers of the visual field, in
degrees. The 'radius_error' value is also given.
* 'visual_area_1' and 'visual_area_2' specify the visual areas of the individual datasets; if
either of the datasets did not have a visual area, it will be omitted. Additionally, the
property 'visual_area' specifies the visual area suggested for use in analyses; this is
chosen based on the following: (1) if there is a gold standard dataset specified that has
a visual area, use it; (2) if only one of the datasets has a visual area, use it; (3) if
both have a visual area, then use the (varea1 == varea2) * varea1 (the areas that agree are
kept and all others are set to 0); (4) if neither has a visual area, then this property is
omitted. In all cases where a 'visual_area' property is included, those vertices that do not
overlap with the given visual_area_option option will be set to 0 along with the
corresponding weights.
* A variety of other lazily-calculated error metrics are included.
The following options are accepted:
* eccentricity_range (default: None) specifies the range of eccentricity to include in the
calculation (in degrees). This may be specified as emax or (emin, emax).
* polar_angle_range (default: None) specifies the range of polar angles to include in the
calculation. Like eccentricity range it may be specified as (amin, amax) but amax alone is
not allowed. Additionally the strings 'lh' and 'rvf' are equivalent to (0,180) and the
strings 'rh' and 'lvf' are equivalent to (-180,0).
* weight (default: Ellipsis) specifies the weights to be used in the calculation. This may be
None to specify that no weights should be used, or a property name or an array of weight
values. Alternately, it may be a tuple (w1, w2) of the weights for datasets 1 and 2. If the
argument is Ellipsis, then it will use weights if they are found in the retinotopy dataset;
both datasets may contain weights in which the product is used.
* weight_min (default: None) specifies the minimum weight a vertex must have to be included in
the calculation.
* visual_area (default: Ellipsis) specifies the visual area labels to be used in the
calculation. This may be None to specify that no labels should be used, or a property name
or an array of labels. Alternately, it may be a tuple (l1, l2) of the labels for datasets 1
and 2. If the argument is Ellipsis, then it will use labels if they are found in the
retinotopy dataset; both datasets may contain labels in which the gold standard's labels are
used if there is a gold standard and the overlapping labels are used otherwise.
* visual_area_mask (default: None) specifies a list of visual areas included in the
calculation; this is applied to all datasets with a visual_area key; see the 'visual_area'
columns above and the visual_area option. If None, then no visual areas are filtered;
otherwise, arguments should like (1,2,3), which would usually specify that areas V1, V2, and
V3, be included.
* gold (default: None) specifies which dataset should be considered the gold standard; this
should be either 1 or 2. If a gold-standard dataset is specified, then it is used in certain
calculations; for example, when scaling an error by eccentricity, the gold-standard's
eccentricity will be used unless there is no gold standard, in which case the mean of the
two values are used. |
def get_neuroml_from_sonata(sonata_filename, id, generate_lems = True, format='xml'):
"""
Return a NeuroMLDocument with (most of) the contents of the Sonata model
"""
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
sr = SonataReader(filename=sonata_filename, id=id)
sr.parse(neuroml_handler)
nml_doc = neuroml_handler.get_nml_doc()
sr.add_neuroml_components(nml_doc)
if format == 'xml':
nml_file_name = '%s.net.nml'%id
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
elif format == 'hdf5':
nml_file_name = '%s.net.nml.h5'%id
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v('Written to: %s'%nml_file_name)
if generate_lems:
lems_file_name = sr.generate_lems_file(nml_file_name, nml_doc)
return sr, lems_file_name, nml_file_name, nml_doc
return nml_doc | Return a NeuroMLDocument with (most of) the contents of the Sonata model |
def attr_subresource(raml_resource, route_name):
""" Determine if :raml_resource: is an attribute subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:.
"""
static_parent = get_static_parent(raml_resource, method='POST')
if static_parent is None:
return False
schema = resource_schema(static_parent) or {}
properties = schema.get('properties', {})
if route_name in properties:
db_settings = properties[route_name].get('_db_settings', {})
return db_settings.get('type') in ('dict', 'list')
return False | Determine if :raml_resource: is an attribute subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:. |
def root_sync(args, l, config):
"""Sync with the remote. For more options, use library sync
"""
from requests.exceptions import ConnectionError
all_remote_names = [ r.short_name for r in l.remotes ]
if args.all:
remotes = all_remote_names
else:
remotes = args.refs
prt("Sync with {} remotes or bundles ".format(len(remotes)))
if not remotes:
return
for ref in remotes:
l.commit()
try:
if ref in all_remote_names: # It's a remote name
l.sync_remote(l.remote(ref))
else: # It's a bundle reference
l.checkin_remote_bundle(ref)
except NotFoundError as e:
warn(e)
continue
except ConnectionError as e:
warn(e)
continue | Sync with the remote. For more options, use library sync |
def has_minimum_version(raises=True):
"""
Return if tmux meets version requirement. Version >1.8 or above.
Parameters
----------
raises : bool
raise exception if below minimum version requirement
Returns
-------
bool
True if tmux meets minimum required version.
Raises
------
libtmux.exc.VersionTooLow
tmux version below minimum required for libtmux
Notes
-----
.. versionchanged:: 0.7.0
No longer returns version, returns True or False
.. versionchanged:: 0.1.7
Versions will now remove trailing letters per `Issue 55`_.
.. _Issue 55: https://github.com/tmux-python/tmuxp/issues/55.
"""
if get_version() < LooseVersion(TMUX_MIN_VERSION):
if raises:
raise exc.VersionTooLow(
'libtmux only supports tmux %s and greater. This system'
' has %s installed. Upgrade your tmux to use libtmux.'
% (TMUX_MIN_VERSION, get_version())
)
else:
return False
return True | Return if tmux meets version requirement. Version >1.8 or above.
Parameters
----------
raises : bool
raise exception if below minimum version requirement
Returns
-------
bool
True if tmux meets minimum required version.
Raises
------
libtmux.exc.VersionTooLow
tmux version below minimum required for libtmux
Notes
-----
.. versionchanged:: 0.7.0
No longer returns version, returns True or False
.. versionchanged:: 0.1.7
Versions will now remove trailing letters per `Issue 55`_.
.. _Issue 55: https://github.com/tmux-python/tmuxp/issues/55. |
def fingerprint(self):
"""
Creates a fingerprint that can be compared with a public key to see if
the two form a pair.
This fingerprint is not compatible with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type)
"""
if self._fingerprint is None:
params = self['private_key_algorithm']['parameters']
key = self['private_key'].parsed
if self.algorithm == 'rsa':
to_hash = '%d:%d' % (
key['modulus'].native,
key['public_exponent'].native,
)
elif self.algorithm == 'dsa':
public_key = self.public_key
to_hash = '%d:%d:%d:%d' % (
params['p'].native,
params['q'].native,
params['g'].native,
public_key.native,
)
elif self.algorithm == 'ec':
public_key = key['public_key'].native
if public_key is None:
public_key = self.public_key.native
if params.name == 'named':
to_hash = '%s:' % params.chosen.native
to_hash = to_hash.encode('utf-8')
to_hash += public_key
elif params.name == 'implicit_ca':
to_hash = public_key
elif params.name == 'specified':
to_hash = '%s:' % params.chosen['field_id']['parameters'].native
to_hash = to_hash.encode('utf-8')
to_hash += b':' + params.chosen['curve']['a'].native
to_hash += b':' + params.chosen['curve']['b'].native
to_hash += public_key
if isinstance(to_hash, str_cls):
to_hash = to_hash.encode('utf-8')
self._fingerprint = hashlib.sha256(to_hash).digest()
return self._fingerprint | Creates a fingerprint that can be compared with a public key to see if
the two form a pair.
This fingerprint is not compatible with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type) |
def genlmsg_parse(nlh, hdrlen, tb, maxtype, policy):
"""Parse Generic Netlink message including attributes.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L191
Verifies the validity of the Netlink and Generic Netlink headers using genlmsg_valid_hdr() and calls nla_parse() on
the message payload to parse eventual attributes.
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of user header (integer).
tb -- empty dict, to be updated with nlattr class instances to store parsed attributes.
maxtype -- maximum attribute id expected (integer).
policy -- dictionary of nla_policy class instances as values, with nla types as keys.
Returns:
0 on success or a negative error code.
"""
if not genlmsg_valid_hdr(nlh, hdrlen):
return -NLE_MSG_TOOSHORT
ghdr = genlmsghdr(nlmsg_data(nlh))
return int(nla_parse(tb, maxtype, genlmsg_attrdata(ghdr, hdrlen), genlmsg_attrlen(ghdr, hdrlen), policy)) | Parse Generic Netlink message including attributes.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L191
Verifies the validity of the Netlink and Generic Netlink headers using genlmsg_valid_hdr() and calls nla_parse() on
the message payload to parse eventual attributes.
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of user header (integer).
tb -- empty dict, to be updated with nlattr class instances to store parsed attributes.
maxtype -- maximum attribute id expected (integer).
policy -- dictionary of nla_policy class instances as values, with nla types as keys.
Returns:
0 on success or a negative error code. |
def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. |
def get_background_rms(self):
"""
Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to
reduce bias from source pixels.
Returns
-------
rms : float
The image rms.
Notes
-----
The rms value is cached after first calculation.
"""
# TODO: return a proper background RMS ignoring the sources
# This is an approximate method suggested by PaulH.
# I have no idea where this magic 1.34896 number comes from...
if self._rms is None:
# Get the pixels values without the NaNs
data = numpy.extract(self.hdu.data > -9999999, self.hdu.data)
p25 = scipy.stats.scoreatpercentile(data, 25)
p75 = scipy.stats.scoreatpercentile(data, 75)
iqr = p75 - p25
self._rms = iqr / 1.34896
return self._rms | Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to
reduce bias from source pixels.
Returns
-------
rms : float
The image rms.
Notes
-----
The rms value is cached after first calculation. |
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples()) | What domains send us the most traffic? |
def export_recordeddata_to_file(time_min=None, time_max=None, filename=None, active_vars=None, file_extension=None,
append_to_file=False, no_mean_value=False, mean_value_period=5.0,
backgroundprocess_id=None, export_task_id=None, **kwargs):
"""
read all data
"""
if backgroundprocess_id is not None:
tp = BackgroundProcess.objects.get(id=backgroundprocess_id)
tp.message = 'init'
tp.last_update = now()
tp.save()
else:
tp = None
if isinstance(time_max, string_types):
# convert date strings
time_max = mktime(datetime.strptime(time_max, "%d-%b-%Y %H:%M:%S").timetuple())
if isinstance(time_min, string_types):
# convert date strings
time_min = mktime(datetime.strptime(time_min, "%d-%b-%Y %H:%M:%S").timetuple())
# add default time_min
if time_max is None:
time_max = time() # now
if time_min is None:
time_min = time() - 24 * 60 * 60 # last 24 hours
# add default extension if no extension is given
if file_extension is None and filename is None:
file_extension = '.h5'
elif filename is not None:
file_extension = '.' + filename.split('.')[-1]
# validate file type
if file_extension not in ['.h5', '.mat', '.csv']:
if tp is not None:
tp.last_update = now()
tp.message = 'failed wrong file type'
tp.failed = 1
tp.save()
return
#
if filename is None:
if hasattr(settings, 'PYSCADA_EXPORT'):
if 'output_folder' in settings.PYSCADA_EXPORT:
backup_file_path = os.path.expanduser(settings.PYSCADA_EXPORT['output_folder'])
else:
backup_file_path = os.path.expanduser('~/measurement_data_dumps')
else:
backup_file_path = os.path.expanduser('~/measurement_data_dumps')
# add filename prefix
backup_file_name = 'measurement_data'
if hasattr(settings, 'PYSCADA_EXPORT'):
if 'file_prefix' in settings.PYSCADA_EXPORT:
backup_file_name = settings.PYSCADA_EXPORT['file_prefix'] + backup_file_name
# create output dir if not existing
if not os.path.exists(backup_file_path):
os.mkdir(backup_file_path)
# validate time values
db_time_min = RecordedData.objects.first() # todo add RecordedDataOld
if not db_time_min:
if tp is not None:
tp.last_update = now()
tp.message = 'no data to export'
tp.failed = 1
tp.save()
return
time_min = max(db_time_min.time_value(), time_min)
db_time_max = RecordedData.objects.last() # todo add RecordedDataOld
if not db_time_max:
if tp is not None:
tp.last_update = now()
tp.message = 'no data to export'
tp.failed = 1
tp.save()
return
time_max = min(db_time_max.time_value(), time_max)
# filename and suffix
cdstr_from = datetime.fromtimestamp(time_min).strftime("%Y_%m_%d_%H%M")
cdstr_to = datetime.fromtimestamp(time_max).strftime("%Y_%m_%d_%H%M")
if 'filename_suffix' in kwargs:
filename = os.path.join(backup_file_path,
backup_file_name + '_' + cdstr_from + '_' + cdstr_to + '_' + kwargs[
'filename_suffix'])
else:
filename = os.path.join(backup_file_path, backup_file_name + '_' + cdstr_from + '_' + cdstr_to)
# check if file exists
if os.path.exists(filename + file_extension) and not append_to_file:
count = 0
filename_old = filename
while os.path.exists(filename + file_extension):
filename = filename_old + '_%03.0f' % count
count += 1
# append the extension
filename = filename + file_extension
# add Filename to ExportTask
if export_task_id is not None:
job = ExportTask.objects.filter(pk=export_task_id).first()
if job:
job.filename = filename
job.save()
#
if active_vars is None:
active_vars = Variable.objects.filter(active=1, device__active=1)
else:
if type(active_vars) is str:
if active_vars == 'all':
active_vars = Variable.objects.all()
else:
active_vars = Variable.objects.filter(active=1, device__active=1)
else:
active_vars = Variable.objects.filter(pk__in=active_vars, active=1, device__active=1)
if mean_value_period == 0:
no_mean_value = True
mean_value_period = 5.0 # todo get from DB, default is 5 seconds
# calculate time vector
timevalues = arange(math.ceil(time_min / mean_value_period) * mean_value_period,
math.floor(time_max / mean_value_period) * mean_value_period, mean_value_period)
# get Meta from Settings
if hasattr(settings, 'PYSCADA_META'):
if 'description' in settings.PYSCADA_META:
description = settings.PYSCADA_META['description']
else:
description = 'None'
if 'name' in settings.PYSCADA_META:
name = settings.PYSCADA_META['name']
else:
name = 'None'
else:
description = 'None'
name = 'None'
if file_extension in ['.h5', '.mat']:
bf = MatCompatibleH5(filename, version='1.1', description=description, name=name,
creation_date=strftime('%d-%b-%Y %H:%M:%S'))
out_timevalues = [unix_time_stamp_to_matlab_datenum(element) for element in timevalues]
elif file_extension in ['.csv']:
bf = ExcelCompatibleCSV(filename, version='1.1', description=description, name=name,
creation_date=strftime('%d-%b-%Y %H:%M:%S'))
out_timevalues = [unix_time_stamp_to_excel_datenum(element) for element in timevalues]
else:
return
# less than 24
# read everything
bf.write_data('time', float64(out_timevalues),
id=0,
description="global time vector",
value_class=validate_value_class('FLOAT64'),
unit="Days since 0000-1-1 00:00:00",
color='#000000',
short_name='time',
chart_line_thickness=3
)
for var_idx in range(0, active_vars.count(), 10):
if tp is not None:
tp.last_update = now()
tp.message = 'reading values from database (%d)' % var_idx
tp.save()
# query data
var_slice = active_vars[var_idx:var_idx + 10]
data = RecordedData.objects.get_values_in_time_range(
variable_id__in=list(var_slice.values_list('pk', flat=True)),
time_min=time_min,
time_max=time_max,
query_first_value=True)
for var in var_slice:
# write background task info
if tp is not None:
tp.last_update = now()
tp.message = 'writing values for %s (%d) to file' % (var.name, var.pk)
tp.save()
# check if variable is scalled
if var.scaling is None or var.value_class.upper() in ['BOOL', 'BOOLEAN']:
value_class = var.value_class
else:
value_class = 'FLOAT64'
# read unit
if hasattr(var.unit, 'udunit'):
udunit = var.unit.udunit
else:
udunit = 'None'
if var.pk not in data:
# write dummy data
bf.write_data(var.name, _cast_value([0] * len(timevalues), validate_value_class(value_class)),
id=var.pk,
description=var.description,
value_class=validate_value_class(value_class),
unit=udunit,
color=var.chart_line_color_code(),
short_name=var.short_name,
chart_line_thickness=var.chart_line_thickness)
continue
out_data = np.zeros(len(timevalues))
# i # time data index
ii = 0 # source data index
# calculate mean values
last_value = None
max_ii = len(data[var.pk]) - 1
for i in range(len(timevalues)): # iter over time values
if ii >= max_ii + 1:
# if not more data in data source break
if last_value is not None:
out_data[i] = last_value
continue
# init mean value vars
tmp = 0.0 # sum
tmp_i = 0.0 # count
if data[var.pk][ii][0] < timevalues[i]:
# skip elements that are befor current time step
while data[var.pk][ii][0] < timevalues[i] and ii < max_ii:
last_value = data[var.pk][ii][1]
ii += 1
if ii >= max_ii:
if last_value is not None:
out_data[i] = last_value
continue
# calc mean value
if timevalues[i] <= data[var.pk][ii][0] < timevalues[i] + mean_value_period:
# there is data in time range
while timevalues[i] <= data[var.pk][ii][0] < timevalues[i] + mean_value_period and ii < max_ii:
# calculate mean value
if no_mean_value:
tmp = data[var.pk][ii][1]
tmp_i = 1
else:
tmp += data[var.pk][ii][1]
tmp_i += 1
last_value = data[var.pk][ii][1]
ii += 1
# calc and store mean value
if tmp_i > 0:
out_data[i] = tmp / tmp_i
else:
out_data[i] = data[var.pk][ii][1]
last_value = data[var.pk][ii][1]
else:
# there is no data in time range, keep last value, not mean value
if last_value is not None:
out_data[i] = last_value
# write data
bf.write_data(var.name, _cast_value(out_data, validate_value_class(value_class)),
id=var.pk,
description=var.description,
value_class=validate_value_class(value_class),
unit=udunit,
color=var.chart_line_color_code(),
short_name=var.short_name,
chart_line_thickness=var.chart_line_thickness)
bf.close_file()
if tp is not None:
tp.last_update = now()
tp.message = 'done'
tp.done = True
tp.save() | read all data |
def get_closest(self, lon, lat, depth=0):
"""
Get the closest object to the given longitude and latitude
and its distance.
:param lon: longitude in degrees
:param lat: latitude in degrees
:param depth: depth in km (default 0)
:returns: (object, distance)
"""
xyz = spherical_to_cartesian(lon, lat, depth)
min_dist, idx = self.kdtree.query(xyz)
return self.objects[idx], min_dist | Get the closest object to the given longitude and latitude
and its distance.
:param lon: longitude in degrees
:param lat: latitude in degrees
:param depth: depth in km (default 0)
:returns: (object, distance) |
def add_forwarding_rules(self, forwarding_rules):
"""
Adds new forwarding rules to a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
"""
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(
"load_balancers/%s/forwarding_rules/" % self.id,
type=POST,
params={"forwarding_rules": rules_dict}
) | Adds new forwarding rules to a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects |
def move_item(self, item, origin, destination):
"""
Moves an item from one cluster to anoter cluster.
:param item: the item to be moved.
:param origin: the originating cluster.
:param destination: the target cluster.
"""
if self.equality:
item_index = 0
for i, element in enumerate(origin):
if self.equality(element, item):
item_index = i
break
else:
item_index = origin.index(item)
destination.append(origin.pop(item_index)) | Moves an item from one cluster to anoter cluster.
:param item: the item to be moved.
:param origin: the originating cluster.
:param destination: the target cluster. |
def saveSession(self):
"""Save cookies/session."""
if self.cookies_file:
self.r.cookies.save(ignore_discard=True)
with open(self.token_file, 'w') as f:
f.write('%s %s' % (self.token_type, self.access_token)) | Save cookies/session. |
def edit_message_media(
self,
chat_id: Union[int, str],
message_id: int,
media: InputMedia,
reply_markup: "pyrogram.InlineKeyboardMarkup" = None
) -> "pyrogram.Message":
"""Use this method to edit audio, document, photo, or video messages.
If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise,
message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded.
Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent
by the bot, the edited Message is returned, otherwise True is returned.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
media (:obj:`InputMedia`)
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
On success, the edited :obj:`Message <pyrogram.Message>` is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
style = self.html if media.parse_mode.lower() == "html" else self.markdown
caption = media.caption
if isinstance(media, InputMediaPhoto):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedPhoto(
file=self.save_file(media.media)
)
)
)
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=media.photo.id,
access_hash=media.photo.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaPhotoExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 2:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaVideo):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=media.supports_streaming or None,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 4:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaAudio):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "audio/mpeg",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeAudio(
duration=media.duration,
performer=media.performer,
title=media.title
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 9:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaAnimation):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=True,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
),
types.DocumentAttributeAnimated()
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 10:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaDocument):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "application/zip",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] not in (5, 10):
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
r = self.send(
functions.messages.EditMessage(
peer=self.resolve_peer(chat_id),
id=message_id,
reply_markup=reply_markup.write() if reply_markup else None,
media=media,
**style.parse(caption)
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
) | Use this method to edit audio, document, photo, or video messages.
If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise,
message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded.
Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent
by the bot, the edited Message is returned, otherwise True is returned.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
media (:obj:`InputMedia`)
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
On success, the edited :obj:`Message <pyrogram.Message>` is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def addsystemhook(self, url):
"""
Add a system hook
:param url: url of the hook
:return: True if success
"""
data = {"url": url}
request = requests.post(
self.hook_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return True
else:
return False | Add a system hook
:param url: url of the hook
:return: True if success |
def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". |
def format_help(self, description):
"""
Format the setting's description into HTML.
"""
for bold in ("``", "*"):
parts = []
if description is None:
description = ""
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
description = urlize(description, autoescape=False)
return mark_safe(description.replace("\n", "<br>")) | Format the setting's description into HTML. |
def mkdir(self, name=None, folder_id='0'):
'''Create a folder with a specified "name" attribute.
folder_id allows to specify a parent folder.'''
return self( 'folders', method='post', encode='json',
data=dict(name=name, parent=dict(id=folder_id)) ) | Create a folder with a specified "name" attribute.
folder_id allows to specify a parent folder. |
def explode_line(argument_line: str) -> typing.Tuple[str, str]:
"""
Returns a tuple containing the parameter name and the description parsed
from the given argument line
"""
parts = tuple(argument_line.split(' ', 1)[-1].split(':', 1))
return parts if len(parts) > 1 else (parts[0], '') | Returns a tuple containing the parameter name and the description parsed
from the given argument line |
def obtain_licenses():
"""Obtain the licenses in a dictionary form, keyed by url."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT combined_row.url, row_to_json(combined_row) FROM (
SELECT "code", "version", "name", "url", "is_valid_for_publication"
FROM licenses) AS combined_row""")
licenses = {r[0]: r[1] for r in cursor.fetchall()}
return licenses | Obtain the licenses in a dictionary form, keyed by url. |
def get_key_codes(keys):
"""
Calculates the list of key codes from a string with key combinations.
Ex: 'CTRL+A' will produce the output (17, 65)
"""
keys = keys.strip().upper().split('+')
codes = list()
for key in keys:
code = ks_settings.KEY_CODES.get(key.strip())
if code:
codes.append(code)
return codes | Calculates the list of key codes from a string with key combinations.
Ex: 'CTRL+A' will produce the output (17, 65) |
def predict(self, Xnew=None, filteronly=False, include_likelihood=True, balance=None, **kw):
"""
Inputs:
------------------
balance: bool
Whether to balance or not the model as a whole
"""
if balance is None:
p_balance = self.balance
else:
p_balance = balance
# Run the Kalman filter to get the state
(m, V) = self._raw_predict(Xnew,filteronly=filteronly, p_balance=p_balance)
# Add the noise variance to the state variance
if include_likelihood:
V += float(self.likelihood.variance)
# Lower and upper bounds
#lower = m - 2*np.sqrt(V)
#upper = m + 2*np.sqrt(V)
# Return mean and variance
return m, V | Inputs:
------------------
balance: bool
Whether to balance or not the model as a whole |
def with_(self, replacement):
"""Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given
"""
ensure_string(replacement)
if is_mapping(self._replacements):
raise ReplacementError("string replacements already provided")
self._replacements = dict.fromkeys(self._replacements, replacement)
return self | Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given |
def process_summary(article):
"""Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered"""
summary = article.summary
summary_parsed = BeautifulSoup(summary, 'html.parser')
math = summary_parsed.find_all(class_='math')
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == '...':
content_parsed = BeautifulSoup(article._content, 'html.parser')
full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text()
math[-1].string = "%s ..." % full_text
summary = summary_parsed.decode()
# clear memoization cache
import functools
if isinstance(article.get_summary, functools.partial):
memoize_instance = article.get_summary.func.__self__
memoize_instance.cache.clear()
article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script) | Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered |
def add_untagged_ok(self, text: MaybeBytes,
code: Optional[ResponseCode] = None) -> None:
"""Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code.
"""
response = ResponseOk(b'*', text, code)
self.add_untagged(response) | Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code. |
def disco_loop_asm_format(opc, version, co, real_out,
fn_name_map, all_fns):
"""Produces disassembly in a format more conducive to
automatic assembly by producing inner modules before they are
used by outer ones. Since this is recusive, we'll
use more stack space at runtime.
"""
if version < 3.0:
co = code2compat(co)
else:
co = code3compat(co)
co_name = co.co_name
mapped_name = fn_name_map.get(co_name, co_name)
new_consts = []
for c in co.co_consts:
if iscode(c):
if version < 3.0:
c_compat = code2compat(c)
else:
c_compat = code3compat(c)
disco_loop_asm_format(opc, version, c_compat, real_out,
fn_name_map, all_fns)
m = re.match(".* object <(.+)> at", str(c))
if m:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, c.co_code)
c_compat.co_name = mapped_name
c_compat.freeze()
new_consts.append(c_compat)
else:
new_consts.append(c)
pass
co.co_consts = new_consts
m = re.match("^<(.+)>$", co.co_name)
if m or co_name in all_fns:
if co_name in all_fns:
basename = co_name
else:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, co.co_code)
co_name = mapped_name
assert mapped_name not in fn_name_map
fn_name_map[mapped_name] = basename
co.co_name = mapped_name
pass
elif co_name in fn_name_map:
# FIXME: better would be a hash of the co_code
mapped_name = code_uniquify(co_name, co.co_code)
fn_name_map[mapped_name] = co_name
co.co_name = mapped_name
pass
co = co.freeze()
all_fns.add(co_name)
if co.co_name != '<module>' or co.co_filename:
real_out.write("\n" + format_code_info(co, version, mapped_name) + "\n")
bytecode = Bytecode(co, opc, dup_lines=True)
real_out.write(bytecode.dis(asm_format=True) + "\n") | Produces disassembly in a format more conducive to
automatic assembly by producing inner modules before they are
used by outer ones. Since this is recusive, we'll
use more stack space at runtime. |
def segments(self, **kwargs):
"""
Segments are yielded when they are available
Segments appear on a time line, for dynamic content they are only available at a certain time
and sometimes for a limited time. For static content they are all available at the same time.
:param kwargs: extra args to pass to the segment template
:return: yields Segments
"""
segmentBase = self.segmentBase or self.walk_back_get_attr("segmentBase")
segmentLists = self.segmentList or self.walk_back_get_attr("segmentList")
segmentTemplate = self.segmentTemplate or self.walk_back_get_attr("segmentTemplate")
if segmentTemplate:
for segment in segmentTemplate.segments(RepresentationID=self.id,
Bandwidth=int(self.bandwidth * 1000),
**kwargs):
if segment.init:
yield segment
else:
yield segment
elif segmentLists:
for segmentList in segmentLists:
for segment in segmentList.segments:
yield segment
else:
yield Segment(self.base_url, 0, True, True) | Segments are yielded when they are available
Segments appear on a time line, for dynamic content they are only available at a certain time
and sometimes for a limited time. For static content they are all available at the same time.
:param kwargs: extra args to pass to the segment template
:return: yields Segments |
def dump(obj, fp, **kw):
r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content.
"""
xml = dumps(obj, **kw)
if isinstance(fp, basestring):
with open(fp, 'w') as fobj:
fobj.write(xml)
else:
fp.write(xml) | r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content. |
def estimate_gas(
self,
block_identifier,
function: str,
*args,
**kwargs,
) -> typing.Optional[int]:
"""Returns a gas estimate for the function with the given arguments or
None if the function call will fail due to Insufficient funds or
the logic in the called function."""
fn = getattr(self.contract.functions, function)
address = to_checksum_address(self.jsonrpc_client.address)
if self.jsonrpc_client.eth_node is constants.EthClient.GETH:
# Unfortunately geth does not follow the ethereum JSON-RPC spec and
# does not accept a block identifier argument for eth_estimateGas
# parity and py-evm (trinity) do.
#
# Geth only runs estimateGas on the pending block and that's why we
# should also enforce parity, py-evm and others to do the same since
# we can't customize geth.
#
# Spec: https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_estimategas
# Geth Issue: https://github.com/ethereum/go-ethereum/issues/2586
# Relevant web3 PR: https://github.com/ethereum/web3.py/pull/1046
block_identifier = None
try:
return fn(*args, **kwargs).estimateGas(
transaction={'from': address},
block_identifier=block_identifier,
)
except ValueError as err:
action = inspect_client_error(err, self.jsonrpc_client.eth_node)
will_fail = action in (
ClientErrorInspectResult.INSUFFICIENT_FUNDS,
ClientErrorInspectResult.ALWAYS_FAIL,
)
if will_fail:
return None
raise err | Returns a gas estimate for the function with the given arguments or
None if the function call will fail due to Insufficient funds or
the logic in the called function. |
def handle_resourceset(ltext, **kwargs):
'''
A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs
'''
fullprop=kwargs.get('fullprop')
rid=kwargs.get('rid')
base=kwargs.get('base', VERSA_BASEIRI)
model=kwargs.get('model')
iris = ltext.strip().split()
for i in iris:
model.add(rid, fullprop, I(iri.absolutize(i, base)))
return None | A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs |
def is_lazy_user(user):
""" Return True if the passed user is a lazy user. """
# Anonymous users are not lazy.
if user.is_anonymous:
return False
# Check the user backend. If the lazy signup backend
# authenticated them, then the user is lazy.
backend = getattr(user, 'backend', None)
if backend == 'lazysignup.backends.LazySignupBackend':
return True
# Otherwise, we have to fall back to checking the database.
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0) | Return True if the passed user is a lazy user. |
def str_to_datetime(self,format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to datetime. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
If format is "ISO", the the format is "%Y%m%dT%H%M%S%F%q"
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = turicreate.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(format)) | Create a new SArray with all the values cast to datetime. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
If format is "ISO", the the format is "%Y%m%dT%H%M%S%F%q"
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = turicreate.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html) |
def roi_pooling(input, rois, pool_height, pool_width):
"""
returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections
"""
# TODO(maciek): ops scope
out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)
output, argmax_output = out[0], out[1]
return output | returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections |
def calculate_size(name, sequence):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size |
def system_find_databases(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findDatabases API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindDatabases
"""
return DXHTTPRequest('/system/findDatabases', input_params, always_retry=always_retry, **kwargs) | Invokes the /system/findDatabases API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindDatabases |
def remove(parent, idx):
"""Remove a value from a dict."""
if isinstance(parent, dict):
del parent[idx]
elif isinstance(parent, list):
del parent[int(idx)]
else:
raise JSONPathError("Invalid path for operation") | Remove a value from a dict. |
def modified(self):
'''
Get human-readable last modification date-time.
:returns: iso9008-like date-time string (without timezone)
:rtype: str
'''
try:
dt = datetime.datetime.fromtimestamp(self.stats.st_mtime)
return dt.strftime('%Y.%m.%d %H:%M:%S')
except OSError:
return None | Get human-readable last modification date-time.
:returns: iso9008-like date-time string (without timezone)
:rtype: str |
def _init_create_child(self):
"""
Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not.
"""
if self._requires_pty():
self.create_child = mitogen.parent.hybrid_tty_create_child
else:
self.create_child = mitogen.parent.create_child
self.create_child_args = {
'stderr_pipe': True,
} | Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not. |
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
"""Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
"""
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
# Set HTTP code to 500.
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
# Prepare parameters for db transaction and taskqueue.
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
# For test only.
# TODO(user): Remove this.
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
"""The Transaction helper."""
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
# Add task in the same datastore transaction.
# This way we guarantee taskqueue is never behind datastore states.
# Old tasks will be dropped.
# Future task won't run until datastore states catches up.
if fresh_shard_state.active:
# Not adding task transactionally.
# transactional enqueue requires tasks with no name.
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e | Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable. |
def _configure_context(ctx, opts, skip=()):
"""
Configures context of public key operations
@param ctx - context to configure
@param opts - dictionary of options (from kwargs of calling
function)
@param skip - list of options which shouldn't be passed to
context
"""
for oper in opts:
if oper in skip:
continue
if isinstance(oper,chartype):
op = oper.encode("ascii")
else:
op = oper
if isinstance(opts[oper],chartype):
value = opts[oper].encode("ascii")
elif isinstance(opts[oper],bintype):
value = opts[oper]
else:
if pyver == 2:
value = str(opts[oper])
else:
value = str(opts[oper]).encode('ascii')
ret = libcrypto.EVP_PKEY_CTX_ctrl_str(ctx, op, value)
if ret == -2:
raise PKeyError("Parameter %s is not supported by key" % oper)
if ret < 1:
raise PKeyError("Error setting parameter %s" % oper) | Configures context of public key operations
@param ctx - context to configure
@param opts - dictionary of options (from kwargs of calling
function)
@param skip - list of options which shouldn't be passed to
context |
def _lookup_vpc_count_min_max(session=None, **bfilter):
"""Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
try:
res = session.query(
func.count(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.min(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.max(nexus_models_v2.NexusVPCAlloc.vpc_id),
).filter(nexus_models_v2.NexusVPCAlloc.switch_ip ==
bfilter['switch_ip']).one()
count = res[0]
sw_min = res[1]
sw_max = res[2]
return count, sw_min, sw_max
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound. |
def numDomtblout(domtblout, numHits, evalueT, bitT, sort):
"""
parse hmm domain table output
this version is faster but does not work unless the table is sorted
"""
if sort is True:
for hit in numDomtblout_sort(domtblout, numHits, evalueT, bitT):
yield hit
return
header = ['#target name', 'target accession', 'tlen',
'query name', 'query accession', 'qlen',
'full E-value', 'full score', 'full bias',
'domain #', '# domains',
'domain c-Evalue', 'domain i-Evalue', 'domain score', 'domain bias',
'hmm from', 'hmm to', 'seq from', 'seq to', 'env from', 'env to',
'acc', 'target description']
yield header
prev, hits = None, []
for line in domtblout:
if line.startswith('#'):
continue
# parse line and get description
line = line.strip().split()
desc = ' '.join(line[18:])
line = line[0:18]
line.append(desc)
# create ID based on query name and domain number
ID = line[0] + line[9]
# domain c-Evalue and domain score thresholds
line[11], line[13] = float(line[11]), float(line[13])
evalue, bitscore = line[11], line[13]
line[11], line[13] = evalue, bitscore
if ID != prev:
if len(hits) > 0:
for hit in top_hits(hits, numHits, 13, True):
yield hit
hits = []
if evalueT == False and bitT == False:
hits.append(line)
elif evalue <= evalueT and bitT == False:
hits.append(line)
elif evalue <= evalueT and bit >= bitT:
hits.append(line)
elif evalueT == False and bit >= bitT:
hits.append(line)
prev = ID
for hit in top_hits(hits, numHits, 13, True):
yield hit | parse hmm domain table output
this version is faster but does not work unless the table is sorted |
def _get_config_instance(group_or_term, session, **kwargs):
""" Finds appropriate config instance and returns it.
Args:
group_or_term (Group or Term):
session (Sqlalchemy session):
kwargs (dict): kwargs to pass to get_or_create.
Returns:
tuple of (Config, bool):
"""
path = group_or_term._get_path()
cached = group_or_term._top._cached_configs.get(path)
if cached:
config = cached
created = False
else:
# does not exist or not yet cached
config, created = get_or_create(session, Config, **kwargs)
return config, created | Finds appropriate config instance and returns it.
Args:
group_or_term (Group or Term):
session (Sqlalchemy session):
kwargs (dict): kwargs to pass to get_or_create.
Returns:
tuple of (Config, bool): |
def register_master():
"""Register the SDP Master device."""
tango_db = Database()
device = "sip_sdp/elt/master"
device_info = DbDevInfo()
device_info._class = "SDPMasterDevice"
device_info.server = "sdp_master_ds/1"
device_info.name = device
devices = tango_db.get_device_name(device_info.server, device_info._class)
if device not in devices:
LOG.info('Registering device "%s" with device server "%s"',
device_info.name, device_info.server)
tango_db.add_device(device_info) | Register the SDP Master device. |
def prepare_batch(self):
"""
Propagates exception on failure
:return: byte array to put on the blockchain
"""
# validate batch
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.validate_certificate(metadata)
# sign batch
with FinalizableSigner(self.secret_manager) as signer:
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.sign_certificate(signer, metadata)
self.merkle_tree.populate(self.get_certificate_generator())
logging.info('here is the op_return_code data: %s', b2h(self.merkle_tree.get_blockchain_data()))
return self.merkle_tree.get_blockchain_data() | Propagates exception on failure
:return: byte array to put on the blockchain |
def P_conditional(self, i, li, j, lj, y):
"""Compute the conditional probability
P_\theta(li | lj, y)
=
Z^{-1} exp(
theta_{i|y} \indpm{ \lambda_i = Y }
+ \theta_{i,j} \indpm{ \lambda_i = \lambda_j }
)
In other words, compute the conditional probability that LF i outputs
li given that LF j output lj, and Y = y, parameterized by
- a class-conditional LF accuracy parameter \theta_{i|y}
- a symmetric LF correlation paramter \theta_{i,j}
"""
Z = np.sum([self._P(i, _li, j, lj, y) for _li in range(self.k + 1)])
return self._P(i, li, j, lj, y) / Z | Compute the conditional probability
P_\theta(li | lj, y)
=
Z^{-1} exp(
theta_{i|y} \indpm{ \lambda_i = Y }
+ \theta_{i,j} \indpm{ \lambda_i = \lambda_j }
)
In other words, compute the conditional probability that LF i outputs
li given that LF j output lj, and Y = y, parameterized by
- a class-conditional LF accuracy parameter \theta_{i|y}
- a symmetric LF correlation paramter \theta_{i,j} |
def encoder_data(self, data):
"""
This method handles the incoming encoder data message and stores
the data in the digital response table.
:param data: Message data from Firmata
:return: No return value.
"""
prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
val = int((data[self.MSB] << 7) + data[self.LSB])
# set value so that it shows positive and negative values
if val > 8192:
val -= 16384
pin = data[0]
with self.pymata.data_lock:
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
if prev_val != val:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback is not None:
callback([self.pymata.ENCODER, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]]) | This method handles the incoming encoder data message and stores
the data in the digital response table.
:param data: Message data from Firmata
:return: No return value. |
def process_user_record(cls, info):
"""Type convert the csv record, modifies in place."""
keys = list(info.keys())
# Value conversion
for k in keys:
v = info[k]
if v in ('N/A', 'no_information'):
info[k] = None
elif v == 'false':
info[k] = False
elif v == 'true':
info[k] = True
# Object conversion
for p, t in cls.list_sub_objects:
obj = dict([(k[len(p):], info.pop(k))
for k in keys if k.startswith(p)])
if obj.get('active', False):
info.setdefault(t, []).append(obj)
return info | Type convert the csv record, modifies in place. |
def monkeypatch_method(cls, patch_name):
# This function's code was inspired from the following thread:
# "[Python-Dev] Monkeypatching idioms -- elegant or ugly?"
# by Robert Brewer <fumanchu at aminus.org>
# (Tue Jan 15 19:13:25 CET 2008)
"""
Add the decorated method to the given class; replace as needed.
If the named method already exists on the given class, it will
be replaced, and a reference to the old method is created as
cls._old<patch_name><name>. If the "_old_<patch_name>_<name>" attribute
already exists, KeyError is raised.
"""
def decorator(func):
fname = func.__name__
old_func = getattr(cls, fname, None)
if old_func is not None:
# Add the old func to a list of old funcs.
old_ref = "_old_%s_%s" % (patch_name, fname)
old_attr = getattr(cls, old_ref, None)
if old_attr is None:
setattr(cls, old_ref, old_func)
else:
raise KeyError("%s.%s already exists."
% (cls.__name__, old_ref))
setattr(cls, fname, func)
return func
return decorator | Add the decorated method to the given class; replace as needed.
If the named method already exists on the given class, it will
be replaced, and a reference to the old method is created as
cls._old<patch_name><name>. If the "_old_<patch_name>_<name>" attribute
already exists, KeyError is raised. |
def to_python(self, omobj):
""" Convert OpenMath object to Python """
# general overrides
if omobj.__class__ in self._omclass_to_py:
return self._omclass_to_py[omobj.__class__](omobj)
# oms
elif isinstance(omobj, om.OMSymbol):
return self._lookup_to_python(omobj.cdbase, omobj.cd, omobj.name)
# oma
elif isinstance(omobj, om.OMApplication):
elem = self.to_python(omobj.elem)
arguments = [self.to_python(x) for x in omobj.arguments]
return elem(*arguments)
raise ValueError('Cannot convert object of class %s to Python.' % omobj.__class__.__name__) | Convert OpenMath object to Python |
def conv2d(self, filter_size, output_channels, stride=1, padding='SAME', bn=True, activation_fn=tf.nn.relu,
b_value=0.0, s_value=1.0, trainable=True):
"""
2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float
"""
self.count['conv'] += 1
scope = 'conv_' + str(self.count['conv'])
with tf.variable_scope(scope):
# Conv function
input_channels = self.input.get_shape()[3]
if filter_size == 0: # outputs a 1x1 feature map; used for FCN
filter_size = self.input.get_shape()[2]
padding = 'VALID'
output_shape = [filter_size, filter_size, input_channels, output_channels]
w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)
if bn is True: # batch normalization
self.input = self.batch_norm(self.input)
if b_value is not None: # bias value
b = self.const_variable(name='bias', shape=[output_channels], value=b_value, trainable=trainable)
self.input = tf.add(self.input, b)
if s_value is not None: # scale value
s = self.const_variable(name='scale', shape=[output_channels], value=s_value, trainable=trainable)
self.input = tf.multiply(self.input, s)
if activation_fn is not None: # activation function
self.input = activation_fn(self.input)
print(scope + ' output: ' + str(self.input.get_shape())) | 2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float |
def parse_message(self, msg, msg_signature, timestamp, nonce):
"""
处理 wechat server 推送消息
:params msg: 加密内容
:params msg_signature: 消息签名
:params timestamp: 时间戳
:params nonce: 随机数
"""
content = self.crypto.decrypt_message(msg, msg_signature, timestamp, nonce)
message = xmltodict.parse(to_text(content))['xml']
message_type = message['InfoType'].lower()
message_class = COMPONENT_MESSAGE_TYPES.get(message_type, ComponentUnknownMessage)
msg = message_class(message)
if msg.type == 'component_verify_ticket':
self.session.set(msg.type, msg.verify_ticket)
elif msg.type in ('authorized', 'updateauthorized'):
msg.query_auth_result = self.query_auth(msg.authorization_code)
return msg | 处理 wechat server 推送消息
:params msg: 加密内容
:params msg_signature: 消息签名
:params timestamp: 时间戳
:params nonce: 随机数 |
def html_visit_inheritance_diagram(
self: NodeVisitor, node: inheritance_diagram
) -> None:
"""
Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
"""
inheritance_graph = node["graph"]
urls = build_urls(self, node)
graphviz_graph = inheritance_graph.build_graph(urls)
dot_code = format(graphviz_graph, "graphviz")
# TODO: We can perform unflattening here
aspect_ratio = inheritance_graph.aspect_ratio
if aspect_ratio:
aspect_ratio = math.ceil(math.sqrt(aspect_ratio[1] / aspect_ratio[0]))
if aspect_ratio > 1:
process = subprocess.Popen(
["unflatten", "-l", str(aspect_ratio), "-c", str(aspect_ratio), "-f"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate(dot_code.encode())
dot_code = stdout.decode()
render_dot_html(self, node, dot_code, {}, "inheritance", "inheritance")
raise SkipNode | Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node. |
def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern | Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz |
def merge_errors(self, errors_local, errors_remote):
"""
Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict
"""
for prop in errors_remote:
# create if doesn't exist
if prop not in errors_local:
errors_local[prop] = errors_remote[prop]
continue
local = errors_local[prop]
local = local.errors if isinstance(local, Result) else local
remote = errors_remote[prop]
remote = remote.errors if isinstance(remote, Result) else remote
# check compatibility
if not isinstance(local, type(remote)):
msg = 'Type mismatch on property [{}] when merging errors. '
msg += 'Unable to merge [{}] into [{}]'
raise x.UnableToMergeResultsType(msg.format(
prop,
type(errors_remote[prop]),
type(self.errors[prop])
))
mismatch = 'Unable to merge nested entity errors with nested '
mismatch += 'collection errors on property [{}]'
if 'schema' in local and 'collection' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop))
if 'collection' in local and 'schema' in remote:
raise x.UnableToMergeResultsType(mismatch.format(prop))
# merge simple & state
if type(remote) is list:
errors_local[prop].extend(remote)
continue
# merge direct errors on nested entities and collection
if 'direct' in remote and 'direct' in local:
errors_local[prop]['direct'].extend(remote['direct'])
# merge nested schema errors
if 'schema' in remote and 'schema' in local:
errors_local[prop]['schema'] = self.merge_errors(
errors_local[prop]['schema'],
remote['schema']
)
# merge nested collections errors
if 'collection' in remote and 'collection' in local:
for index, result in remote['collection'].items():
if index not in local['collection']:
errors_local[prop]['collection'][index] = result
else:
merged = self.merge_errors(
errors_local[prop]['collection'][index].errors,
errors_remote[prop]['collection'][index].errors,
)
errors_local[prop]['collection'][index] = merged
# and return
return errors_local | Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict |
def read_fastq(filename):
"""
return a stream of FASTQ entries, handling gzipped and empty files
"""
if not filename:
return itertools.cycle((None,))
if filename == "-":
filename_fh = sys.stdin
elif filename.endswith('gz'):
if is_python3:
filename_fh = gzip.open(filename, mode='rt')
else:
filename_fh = BufferedReader(gzip.open(filename, mode='rt'))
else:
filename_fh = open(filename)
return stream_fastq(filename_fh) | return a stream of FASTQ entries, handling gzipped and empty files |
def imprints2marc(self, key, value):
"""Populate the ``260`` MARC field."""
return {
'a': value.get('place'),
'b': value.get('publisher'),
'c': value.get('date'),
} | Populate the ``260`` MARC field. |
def preprocess(self, dataset, mode, hparams, interleave=True):
"""Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset
"""
def _preprocess(example):
examples = self.preprocess_example(example, mode, hparams)
if not isinstance(examples, tf.data.Dataset):
examples = tf.data.Dataset.from_tensors(examples)
return examples
if interleave:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_preprocess, sloppy=True, cycle_length=8))
else:
dataset = dataset.flat_map(_preprocess)
return dataset | Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset |
def record_participation(self, client, dt=None):
"""Record a user's participation in a test along with a given variation"""
if dt is None:
date = datetime.now()
else:
date = dt
experiment_key = self.experiment.name
pipe = self.redis.pipeline()
pipe.sadd(_key("p:{0}:years".format(experiment_key)), date.strftime('%Y'))
pipe.sadd(_key("p:{0}:months".format(experiment_key)), date.strftime('%Y-%m'))
pipe.sadd(_key("p:{0}:days".format(experiment_key)), date.strftime('%Y-%m-%d'))
pipe.execute()
keys = [
_key("p:{0}:_all:all".format(experiment_key)),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y'))),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m'))),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m-%d'))),
_key("p:{0}:{1}:all".format(experiment_key, self.name)),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y'))),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m'))),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m-%d'))),
]
msetbit(keys=keys, args=([self.experiment.sequential_id(client), 1] * len(keys))) | Record a user's participation in a test along with a given variation |
def _parse_members(self, contents, anexec, params, mode="insert"):
"""Parses the local variables for the contents of the specified executable."""
#First get the variables declared in the body of the executable, these can
#be either locals or parameter declarations.
members = self.vparser.parse(contents, anexec)
#If the name matches one in the parameter list, we can connect them
for param in list(params):
lparam = param.lower()
if lparam in members:
if mode == "insert" and not lparam in anexec.parameters:
anexec.add_parameter(members[lparam])
elif mode == "delete":
anexec.remove_parameter(members[lparam])
#The remaining members that aren't in parameters are the local variables
for key in members:
if mode == "insert":
if not key.lower() in anexec.parameters:
anexec.members[key] = members[key]
elif mode == "delete" and key in anexec.members:
del anexec.members[key]
#Next we need to get hold of the docstrings for these members
if mode == "insert":
memdocs = self.docparser.parse_docs(contents, anexec)
if anexec.name in memdocs:
docs = self.docparser.to_doc(memdocs[anexec.name][0], anexec.name)
self.docparser.process_memberdocs(docs, anexec)
#Also process the embedded types and executables who may have
#docstrings just like regular executables/types do.
self.docparser.process_embedded(memdocs, anexec) | Parses the local variables for the contents of the specified executable. |
def neg_int(i):
""" Simple negative integer validation. """
try:
if isinstance(i, string_types):
i = int(i)
if not isinstance(i, int) or i > 0:
raise Exception()
except:
raise ValueError("Not a negative integer")
return i | Simple negative integer validation. |
def opener_from_zipfile(zipfile):
"""
Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text.
"""
def opener(filename):
inner_file = zipfile.open(filename)
if PY3:
from io import TextIOWrapper
return TextIOWrapper(inner_file)
else:
return inner_file
return opener | Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text. |
def get_letters( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
ta_letters = list()
not_empty = False
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
ta_letters.append(c)
not_empty = True
elif c in grantha_agaram_set:
ta_letters.append(c)
not_empty = True
elif c in accent_symbol_set:
if not not_empty:
# odd situation
ta_letters.append(c)
not_empty = True
else:
#print("Merge/accent")
ta_letters[-1] += c
else:
if ord(c) < 256 or not (is_tamil_unicode(c)):
ta_letters.append( c )
else:
if not_empty:
#print("Merge/??")
ta_letters[-1]+= c
else:
ta_letters.append(c)
not_empty = True
idx = idx + 1
return ta_letters | splits the word into a character-list of tamil/english
characters present in the stream |
def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
"""
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (
len(items1), len(items2)))
return cls(zip(items1, items2)) | Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {} |
def render(self, name, value, attrs=None, renderer=None):
"""Include a hidden input to store the serialized upload value."""
location = getattr(value, '_seralized_location', '')
if location and not hasattr(value, 'url'):
value.url = '#'
if hasattr(self, 'get_template_substitution_values'):
# Django 1.8-1.10
self.template_with_initial = (
'%(initial_text)s: %(initial)s %(clear_template)s'
'<br />%(input_text)s: %(input)s')
attrs = attrs or {}
attrs.update({'data-upload-url': self.url})
hidden_name = self.get_hidden_name(name)
kwargs = {}
if django_version >= (1, 11):
kwargs['renderer'] = renderer
parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs)
hidden = forms.HiddenInput().render(hidden_name, location, **kwargs)
return mark_safe(parent + '\n' + hidden) | Include a hidden input to store the serialized upload value. |
def parse_markdown(markdown_content, site_settings):
"""Parse markdown text to html.
:param markdown_content: Markdown text lists #TODO#
"""
markdown_extensions = set_markdown_extensions(site_settings)
html_content = markdown.markdown(
markdown_content,
extensions=markdown_extensions,
)
return html_content | Parse markdown text to html.
:param markdown_content: Markdown text lists #TODO# |
def _to_autoassign(self):
"""Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str`
"""
autoassign_str = "#Index\t\t{}\t\tIntensity\t\tWorkbook\n".format(
"\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))]))
for peak_idx, peak in enumerate(self):
dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list])
autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name)
return autoassign_str | Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.