code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _build_robots_txt_checker(cls, session: AppSession):
'''Build robots.txt checker.'''
if session.args.robots:
robots_txt_pool = session.factory.new('RobotsTxtPool')
robots_txt_checker = session.factory.new(
'RobotsTxtChecker',
web_client=session.factory['WebClient'],
robots_txt_pool=robots_txt_pool
)
return robots_txt_checker | Build robots.txt checker. |
def get_schedule_by_regid_and_term(regid, term,
non_time_schedule_instructors=True,
per_section_prefetch_callback=None,
transcriptable_course="", **kwargs):
"""
Returns a uw_sws.models.ClassSchedule object
for the regid and term passed in.
"""
if "include_instructor_not_on_time_schedule" in kwargs:
include = kwargs["include_instructor_not_on_time_schedule"]
non_time_schedule_instructors = include
params = [
('reg_id', regid),
]
if transcriptable_course != "":
params.append(("transcriptable_course", transcriptable_course,))
params.extend([
('quarter', term.quarter),
('is_active', 'true'),
('year', term.year),
])
url = "{}?{}".format(registration_res_url_prefix, urlencode(params))
return _json_to_schedule(get_resource(url), term, regid,
non_time_schedule_instructors,
per_section_prefetch_callback) | Returns a uw_sws.models.ClassSchedule object
for the regid and term passed in. |
def near_sphere(self, x, y, max_distance=None):
""" Return documents near the given point using sphere distances
"""
expr = {
self : {'$nearSphere' : [x, y]}
}
if max_distance is not None:
expr[self]['$maxDistance'] = max_distance
return QueryExpression(expr) | Return documents near the given point using sphere distances |
def get_factors(self, node=None):
"""
Returns all the factors containing the node. If node is not specified
returns all the factors that have been added till now to the graph.
Parameter
---------
node: any hashable python object (optional)
The node whose factor we want. If node is not specified
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3],
... values=np.ones(6))
>>> student.add_factors(factor1,factor2)
>>> student.get_factors()
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>,
<DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>]
>>> student.get_factors('Alice')
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>]
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Undirected Graph')
node_factors = []
for factor in self.factors:
if node in factor.scope():
node_factors.append(factor)
return node_factors
else:
return self.factors | Returns all the factors containing the node. If node is not specified
returns all the factors that have been added till now to the graph.
Parameter
---------
node: any hashable python object (optional)
The node whose factor we want. If node is not specified
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3],
... values=np.ones(6))
>>> student.add_factors(factor1,factor2)
>>> student.get_factors()
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>,
<DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>]
>>> student.get_factors('Alice')
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>] |
def _TerminateProcessByPid(self, pid):
"""Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine.
"""
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process) | Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine. |
def chain_input_files(self):
"""Return a list of the input files needed by this chain.
For `Link` sub-classes this will return only those files
that were not created by any internal `Link`
"""
ret_list = []
for key, val in self.file_dict.items():
# For chain input files we only want files that were not marked as output
# (I.e., not produced by some other step in the chain)
if val & FileFlags.in_ch_mask == FileFlags.input_mask:
ret_list.append(key)
return ret_list | Return a list of the input files needed by this chain.
For `Link` sub-classes this will return only those files
that were not created by any internal `Link` |
def load_sound(self, loc, title, group):
'''
Used internally when loading sounds. You should probably use
load_objects().
'''
self.sounds.setdefault(group, {})
self.sounds[group][title] = Sound(loc, self) | Used internally when loading sounds. You should probably use
load_objects(). |
def issues(self, from_date=DEFAULT_DATETIME,
offset=None, max_issues=MAX_ISSUES):
"""Get the information of a list of issues.
:param from_date: retrieve issues that where updated from that date;
dates are converted to UTC
:param offset: starting position for the search
:param max_issues: maximum number of issues to reteurn per query
"""
resource = self.RISSUES + self.CJSON
ts = datetime_to_utc(from_date)
ts = ts.strftime("%Y-%m-%dT%H:%M:%SZ")
# By default, Redmine returns open issues only.
# Parameter 'status_id' is set to get all the statuses.
params = {
self.PSTATUS_ID: '*',
self.PSORT: self.PUPDATED_ON,
self.PUPDATED_ON: '>=' + ts,
self.PLIMIT: max_issues
}
if offset is not None:
params[self.POFFSET] = offset
response = self._call(resource, params)
return response | Get the information of a list of issues.
:param from_date: retrieve issues that where updated from that date;
dates are converted to UTC
:param offset: starting position for the search
:param max_issues: maximum number of issues to reteurn per query |
def can_update_repositories(self):
"""Tests if this user can update ``Repositories``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a
``Repository`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may not wish to offer
update operations to unauthorized users.
:return: ``false`` if ``Repository`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveBankHints']['canUpdate'] | Tests if this user can update ``Repositories``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a
``Repository`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may not wish to offer
update operations to unauthorized users.
:return: ``false`` if ``Repository`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.* |
def identity(ctx, variant_id):
"""Check how well SVs are working in the database
"""
if not variant_id:
LOG.warning("Please provide a variant id")
ctx.abort()
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Search variants {0}".format(adapter))
result = adapter.get_clusters(variant_id)
if result.count() == 0:
LOG.info("No hits for variant %s", variant_id)
return
for res in result:
click.echo(res) | Check how well SVs are working in the database |
def match_var(self, tokens, item):
"""Matches a variable."""
setvar, = tokens
if setvar != wildcard:
if setvar in self.names:
self.add_check(self.names[setvar] + " == " + item)
else:
self.add_def(setvar + " = " + item)
self.names[setvar] = item | Matches a variable. |
def _context_source_file_url(path_or_url):
"""
Returns a URL for a remote or local context CSV file
"""
if path_or_url.startswith('http'):
# Remote CSV. Just return the URL
return path_or_url
if path_or_url.startswith('/'):
# Absolute path
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url) | Returns a URL for a remote or local context CSV file |
def is_free_chunk(self, chk):
"""Check the chunk is free or not"""
cs = self.get_chunk_status(chk)
if cs & 0x1 != 0:
return True
return False | Check the chunk is free or not |
def until_some(*args, **kwargs):
"""Return a future that resolves when some of the passed futures resolve.
The futures can be passed as either a sequence of *args* or a dict of
*kwargs* (but not both). Some additional keyword arguments are supported,
as described below. Once a specified number of underlying futures have
resolved, the returned future resolves as well, or a timeout could be
raised if specified.
Parameters
----------
done_at_least : None or int
Number of futures that need to resolve before this resolves or None
to wait for all (default None)
timeout : None or float
Timeout in seconds, or None for no timeout (the default)
Returns
-------
This command returns a tornado Future that resolves with a list of
(index, value) tuples containing the results of all futures that resolved,
with corresponding indices (numbers for *args* futures or keys for *kwargs*
futures).
Raises
------
:class:`tornado.gen.TimeoutError`
If operation times out before the requisite number of futures resolve
"""
done_at_least = kwargs.pop('done_at_least', None)
timeout = kwargs.pop('timeout', None)
# At this point args and kwargs are either empty or contain futures only
if done_at_least is None:
done_at_least = len(args) + len(kwargs)
wait_iterator = tornado.gen.WaitIterator(*args, **kwargs)
maybe_timeout = future_timeout_manager(timeout)
results = []
while not wait_iterator.done():
result = yield maybe_timeout(wait_iterator.next())
results.append((wait_iterator.current_index, result))
if len(results) >= done_at_least:
break
raise tornado.gen.Return(results) | Return a future that resolves when some of the passed futures resolve.
The futures can be passed as either a sequence of *args* or a dict of
*kwargs* (but not both). Some additional keyword arguments are supported,
as described below. Once a specified number of underlying futures have
resolved, the returned future resolves as well, or a timeout could be
raised if specified.
Parameters
----------
done_at_least : None or int
Number of futures that need to resolve before this resolves or None
to wait for all (default None)
timeout : None or float
Timeout in seconds, or None for no timeout (the default)
Returns
-------
This command returns a tornado Future that resolves with a list of
(index, value) tuples containing the results of all futures that resolved,
with corresponding indices (numbers for *args* futures or keys for *kwargs*
futures).
Raises
------
:class:`tornado.gen.TimeoutError`
If operation times out before the requisite number of futures resolve |
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name | Return a string describing the probable encoding of a file. |
def restore(s, t):
"""
s is the source string, it can contain '.'
t is the target, it's smaller than s by the number of '.'s in s
Each char in s is replaced by the corresponding
char in t, jumping over '.'s in s.
>>> restore('ABC.DEF', 'XYZABC')
'XYZ.ABC'
"""
t = (c for c in t)
return ''.join(next(t) if not is_blacksquare(c) else c for c in s) | s is the source string, it can contain '.'
t is the target, it's smaller than s by the number of '.'s in s
Each char in s is replaced by the corresponding
char in t, jumping over '.'s in s.
>>> restore('ABC.DEF', 'XYZABC')
'XYZ.ABC' |
def event(self, event):
"""Qt method override."""
if event.type() in (QEvent.Shortcut, QEvent.ShortcutOverride):
return True
else:
return super(ShortcutEditor, self).event(event) | Qt method override. |
def selector(C, style):
"""return the selector for the given stylemap style"""
clas = C.classname(style.name)
if style.type == 'paragraph':
# heading outline levels are 0..7 internally, indicating h1..h8
outlineLvl = int((style.properties.get('outlineLvl') or {}).get('val') or 8) + 1
if outlineLvl < 9:
tag = 'h%d' % outlineLvl
else:
tag = 'p'
elif style.type == 'character':
tag = 'span'
elif style.type == 'table':
tag = 'table'
elif style.type == 'numbering':
tag = 'ol'
return "%s.%s" % (tag, clas) | return the selector for the given stylemap style |
def get_queryset(self, **kwargs):
"""
Gets our queryset. This takes care of filtering if there are any
fields to filter by.
"""
queryset = self.derive_queryset(**kwargs)
return self.order_queryset(queryset) | Gets our queryset. This takes care of filtering if there are any
fields to filter by. |
def in_order(self) -> Iterator["BSP"]:
"""Iterate over this BSP's hierarchy in order.
.. versionadded:: 8.3
"""
if self.children:
yield from self.children[0].in_order()
yield self
yield from self.children[1].in_order()
else:
yield self | Iterate over this BSP's hierarchy in order.
.. versionadded:: 8.3 |
def make_assignment(instr, queue, stack):
"""
Make an ast.Assign node.
"""
value = make_expr(stack)
# Make assignment targets.
# If there are multiple assignments (e.g. 'a = b = c'),
# each LHS expression except the last is preceded by a DUP_TOP instruction.
# Thus, we make targets until we don't see a DUP_TOP, and then make one
# more.
targets = []
while isinstance(instr, instrs.DUP_TOP):
targets.append(make_assign_target(queue.popleft(), queue, stack))
instr = queue.popleft()
targets.append(make_assign_target(instr, queue, stack))
return ast.Assign(targets=targets, value=value) | Make an ast.Assign node. |
def recursive_update(d, u):
"""
Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return:
"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d | Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return: |
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True,
an exception is raised. Otherwise, the I{unmarshalled} fault L{Object}
is returned. This method is called when the server raises a
I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail) | Extract the fault from the specified soap reply. If I{faults} is True,
an exception is raised. Otherwise, the I{unmarshalled} fault L{Object}
is returned. This method is called when the server raises a
I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} ) |
def batch_message_from_parts(cls, messages):
'''Convert messages, one per batch item, into a batch message. At
least one message must be passed.
'''
# Comma-separate the messages and wrap the lot in square brackets
middle = b', '.join(messages)
if not middle:
raise ProtocolError.empty_batch()
return b''.join([b'[', middle, b']']) | Convert messages, one per batch item, into a batch message. At
least one message must be passed. |
def _choices(self):
"""
Generate a string of choices as key/value pairs
:return: string
"""
# Generate key/value strings
pairs = []
for key, value in self.choices.items():
pairs.append(str(value) + "=" + str(key))
# Assemble into overall string and escape
return GPTaskSpec.manifest_escape(";".join(pairs)) | Generate a string of choices as key/value pairs
:return: string |
def nl_msg_dump(msg, ofd=_LOGGER.debug):
"""Dump message in human readable format to callable.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L970
Positional arguments:
msg -- message to print (nl_msg class instance).
Keyword arguments:
ofd -- function to call with arguments similar to `logging.debug`.
"""
hdr = nlmsg_hdr(msg)
ofd('-------------------------- BEGIN NETLINK MESSAGE ---------------------------')
ofd(' [NETLINK HEADER] %d octets', hdr.SIZEOF)
print_hdr(ofd, msg)
if hdr.nlmsg_type == libnl.linux_private.netlink.NLMSG_ERROR:
dump_error_msg(msg, ofd)
elif nlmsg_len(hdr) > 0:
print_msg(msg, ofd, hdr)
ofd('--------------------------- END NETLINK MESSAGE ---------------------------') | Dump message in human readable format to callable.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L970
Positional arguments:
msg -- message to print (nl_msg class instance).
Keyword arguments:
ofd -- function to call with arguments similar to `logging.debug`. |
def from_cone(cls, center,
radius=3*u.arcmin,
magnitudelimit=None,
**kw):
'''
Create a Constellation from a cone search of the sky,
characterized by a positional center and a radius from it.
Parameters
----------
center : SkyCoord object
The center around which the query will be made.
radius : float, with units of angle
The angular radius for the query.
magnitudelimit : float
The maximum magnitude to include in the download.
(This is explicitly thinking UV/optical/IR, would
need to change to flux to be able to include other
wavelengths.)
'''
# make sure the center is a SkyCoord
center = parse_center(center)
criteria = {}
if magnitudelimit is not None:
criteria[cls.defaultfilter + 'mag'] = '<{}'.format(magnitudelimit)
v = Vizier(columns=cls.columns,
column_filters=criteria)
v.ROW_LIMIT = -1
# run the query
print('querying Vizier for {}, centered on {} with radius {}, for G<{}'.format(cls.name, center, radius, magnitudelimit))
table = v.query_region(coordinates=center,
radius=radius,
catalog=cls.catalog)[0]
# store the search parameters in this object
c = cls(cls.standardize_table(table))
c.standardized.meta['catalog'] = cls.catalog
c.standardized.meta['center'] = center
c.standardized.meta['radius'] = radius
c.standardized.meta['magnitudelimit'] = magnitudelimit
#c.center = center
#c.radius = radius
#c.magnitudelimit = magnitudelimit or cls.magnitudelimit
return c | Create a Constellation from a cone search of the sky,
characterized by a positional center and a radius from it.
Parameters
----------
center : SkyCoord object
The center around which the query will be made.
radius : float, with units of angle
The angular radius for the query.
magnitudelimit : float
The maximum magnitude to include in the download.
(This is explicitly thinking UV/optical/IR, would
need to change to flux to be able to include other
wavelengths.) |
def __solve(self, lvl, x, b, cycle):
"""Multigrid cycling.
Parameters
----------
lvl : int
Solve problem on level `lvl`
x : numpy array
Initial guess `x` and return correction
b : numpy array
Right-hand side for Ax=b
cycle : {'V','W','F','AMLI'}
Recursively called cycling function. The
Defines the cycling used:
cycle = 'V', V-cycle
cycle = 'W', W-cycle
cycle = 'F', F-cycle
cycle = 'AMLI', AMLI-cycle
"""
A = self.levels[lvl].A
self.levels[lvl].presmoother(A, x, b)
residual = b - A * x
coarse_b = self.levels[lvl].R * residual
coarse_x = np.zeros_like(coarse_b)
if lvl == len(self.levels) - 2:
coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b)
else:
if cycle == 'V':
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == 'W':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
elif cycle == 'F':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == "AMLI":
# Run nAMLI AMLI cycles, which compute "optimal" corrections by
# orthogonalizing the coarse-grid corrections in the A-norm
nAMLI = 2
Ac = self.levels[lvl + 1].A
p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype)
beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype)
for k in range(nAMLI):
# New search direction --> M^{-1}*residual
p[k, :] = 1
self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape),
coarse_b, cycle)
# Orthogonalize new search direction to old directions
for j in range(k): # loops from j = 0...(k-1)
beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\
np.inner(p[j, :].conj(), Ac * p[j, :])
p[k, :] -= beta[k, j] * p[j, :]
# Compute step size
Ap = Ac * p[k, :]
alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\
np.inner(p[k, :].conj(), Ap)
# Update solution
coarse_x += alpha * p[k, :].reshape(coarse_x.shape)
# Update residual
coarse_b -= alpha * Ap.reshape(coarse_b.shape)
else:
raise TypeError('Unrecognized cycle type (%s)' % cycle)
x += self.levels[lvl].P * coarse_x # coarse grid correction
self.levels[lvl].postsmoother(A, x, b) | Multigrid cycling.
Parameters
----------
lvl : int
Solve problem on level `lvl`
x : numpy array
Initial guess `x` and return correction
b : numpy array
Right-hand side for Ax=b
cycle : {'V','W','F','AMLI'}
Recursively called cycling function. The
Defines the cycling used:
cycle = 'V', V-cycle
cycle = 'W', W-cycle
cycle = 'F', F-cycle
cycle = 'AMLI', AMLI-cycle |
def in_period(period, dt=None):
"""
Determines if a datetime is within a certain time period. If the time
is omitted the current time will be used.
in_period return True is the datetime is within the time period, False if not.
If the expression is malformed a TimePeriod.InvalidFormat exception
will be raised. (Note that this differs from Time::Period, which
returns -1 if the expression is invalid).
The format for the time period is like Perl's Time::Period module,
which is documented in some detail here:
http://search.cpan.org/~pryan/Period-1.20/Period.pm
Here's the quick and dirty version.
Each period is composed of one or more sub-period seperated by a comma.
A datetime must match at least one of the sub periods to be considered
in that time period.
Each sub-period is composed of one or more tests, like so:
scale {value}
scale {a-b}
scale {a b c}
The datetime must pass each test for a sub-period for the sub-period to
be considered true.
For example:
Match Mondays
wd {mon}
Match Monday mornings
wd {mon} hr {9-16}
Match Monday morning or Friday afternoon
wd {mon} hr {0-12}, wd {fri} hr {0-12}
Valid scales are:
year
month
week
yday
mday
wday
hour
minute
second
Those can be substituted with their corresponding code:
yd
mo
wk
yd
md
wd
hr
min
sec
"""
if dt is None:
dt = datetime.now()
# transform whatever crazy format we're given and turn it into
# something like this:
#
# md{1}|hr{midnight-noon},md{2}|hr{noon-midnight}
period = re.sub(r"^\s*|\s*$", '', period)
period = re.sub(r"\s*(?={|$)", '', period)
period = re.sub(r",\s*", ',', period)
period = re.sub(r"\s*-\s*", '-', period)
period = re.sub(r"{\s*", '{', period)
period = re.sub(r"\s*}\s*", '}', period)
period = re.sub(r"}(?=[^,])", '}|', period)
period = period.lower()
if period == '':
return True
sub_periods = re.split(',', period)
# go through each sub-period until one matches (OR logic)
for sp in sub_periods:
if _is_in_sub_period(sp, dt):
return True
return False | Determines if a datetime is within a certain time period. If the time
is omitted the current time will be used.
in_period return True is the datetime is within the time period, False if not.
If the expression is malformed a TimePeriod.InvalidFormat exception
will be raised. (Note that this differs from Time::Period, which
returns -1 if the expression is invalid).
The format for the time period is like Perl's Time::Period module,
which is documented in some detail here:
http://search.cpan.org/~pryan/Period-1.20/Period.pm
Here's the quick and dirty version.
Each period is composed of one or more sub-period seperated by a comma.
A datetime must match at least one of the sub periods to be considered
in that time period.
Each sub-period is composed of one or more tests, like so:
scale {value}
scale {a-b}
scale {a b c}
The datetime must pass each test for a sub-period for the sub-period to
be considered true.
For example:
Match Mondays
wd {mon}
Match Monday mornings
wd {mon} hr {9-16}
Match Monday morning or Friday afternoon
wd {mon} hr {0-12}, wd {fri} hr {0-12}
Valid scales are:
year
month
week
yday
mday
wday
hour
minute
second
Those can be substituted with their corresponding code:
yd
mo
wk
yd
md
wd
hr
min
sec |
def column_width(tokens):
"""
Return a suitable column width to display one or more strings.
"""
get_len = tools.display_len if PY3 else len
lens = sorted(map(get_len, tokens or [])) or [0]
width = lens[-1]
# adjust for disproportionately long strings
if width >= 18:
most = lens[int(len(lens) * 0.9)]
if most < width + 6:
return most
return width | Return a suitable column width to display one or more strings. |
def duplicate(
self,
insert_sheet_index=None,
new_sheet_id=None,
new_sheet_name=None
):
"""Duplicate the sheet.
:param int insert_sheet_index: (optional) The zero-based index
where the new sheet should be inserted.
The index of all sheets after this are
incremented.
:param int new_sheet_id: (optional) The ID of the new sheet.
If not set, an ID is chosen. If set, the ID
must not conflict with any existing sheet ID.
If set, it must be non-negative.
:param str new_sheet_name: (optional) The name of the new sheet.
If empty, a new name is chosen for you.
:returns: a newly created :class:`<gspread.models.Worksheet>`.
.. versionadded:: 3.1.0
"""
return self.spreadsheet.duplicate_sheet(
self.id,
insert_sheet_index,
new_sheet_id,
new_sheet_name
) | Duplicate the sheet.
:param int insert_sheet_index: (optional) The zero-based index
where the new sheet should be inserted.
The index of all sheets after this are
incremented.
:param int new_sheet_id: (optional) The ID of the new sheet.
If not set, an ID is chosen. If set, the ID
must not conflict with any existing sheet ID.
If set, it must be non-negative.
:param str new_sheet_name: (optional) The name of the new sheet.
If empty, a new name is chosen for you.
:returns: a newly created :class:`<gspread.models.Worksheet>`.
.. versionadded:: 3.1.0 |
def create_float(self, value: float) -> Float:
"""
Creates a new :class:`ConstantFloat`, adding it to the pool and
returning it.
:param value: The value of the new float.
"""
self.append((4, value))
return self.get(self.raw_count - 1) | Creates a new :class:`ConstantFloat`, adding it to the pool and
returning it.
:param value: The value of the new float. |
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url) | Return upload url.
Makes request to tus server to create a new upload url for the required file upload. |
async def on_raw_cap_ls(self, params):
""" Update capability mapping. Request capabilities. """
to_request = set()
for capab in params[0].split():
capab, value = self._capability_normalize(capab)
# Only process new capabilities.
if capab in self._capabilities:
continue
# Check if we support the capability.
attr = 'on_capability_' + pydle.protocol.identifierify(capab) + '_available'
supported = (await getattr(self, attr)(value)) if hasattr(self, attr) else False
if supported:
if isinstance(supported, str):
to_request.add(capab + CAPABILITY_VALUE_DIVIDER + supported)
else:
to_request.add(capab)
else:
self._capabilities[capab] = False
if to_request:
# Request some capabilities.
self._capabilities_requested.update(x.split(CAPABILITY_VALUE_DIVIDER, 1)[0] for x in to_request)
await self.rawmsg('CAP', 'REQ', ' '.join(to_request))
else:
# No capabilities requested, end negotiation.
await self.rawmsg('CAP', 'END') | Update capability mapping. Request capabilities. |
def find_connection(self):
'''find an antenna tracker connection if possible'''
if self.connection is not None:
return self.connection
for m in self.mpstate.mav_master:
if 'HEARTBEAT' in m.messages:
if m.messages['HEARTBEAT'].type == mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER:
return m
return None | find an antenna tracker connection if possible |
def _explicit_close(napalm_device):
'''
Will explicily close the config session with the network device,
when running in a now-always-alive proxy minion or regular minion.
This helper must be used in configuration-related functions,
as the session is preserved and not closed before making any changes.
'''
if salt.utils.napalm.not_always_alive(__opts__):
# force closing the configuration session
# when running in a non-always-alive proxy
# or regular minion
try:
napalm_device['DRIVER'].close()
except Exception as err:
log.error('Unable to close the temp connection with the device:')
log.error(err)
log.error('Please report.') | Will explicily close the config session with the network device,
when running in a now-always-alive proxy minion or regular minion.
This helper must be used in configuration-related functions,
as the session is preserved and not closed before making any changes. |
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper | Wrap standard function to easily pass into 'map' processing. |
def fold(data, prefix='', delimeter='__'):
"""
>>> _dd(fold({'a__a': 4}))
"{'a': {'a': 4}}"
>>> _dd(fold({'a__a': 4, 'a__b': 5}))
"{'a': {'a': 4, 'b': 5}}"
>>> _dd(fold({'a__1': 2, 'a__0': 1, 'a__2': 3}))
"{'a': [1, 2, 3]}"
>>> _dd(fold({'form__a__b': 5, 'form__a__a': 4}, 'form'))
"{'a': {'a': 4, 'b': 5}}"
>>> _dd(fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form'))
"{'a': {'a': [4, 7], 'b': 5}}"
>>> repr(fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form'))
"[{'a': [4, 7]}, {'b': 5}]"
"""
if not isinstance(delimeter, (tuple, list)):
delimeter = (delimeter, )
def deep(data):
if len(data) == 1 and len(data[0][0]) < 2:
if data[0][0]:
return {data[0][0][0]: data[0][1]}
return data[0][1]
collect = {}
for key, group in groupby(data, lambda kv: kv[0][0]):
nest_data = [(k[1:], v) for k, v in group]
collect[key] = deep(nest_data)
is_num = all(k.isdigit() for k in collect.keys())
if is_num:
return [i[1] for i in sorted(collect.items())]
return collect
data_ = [
(split(key, delimeter), value)
for key, value in sorted(data.items())
]
result = deep(data_)
return result[prefix] if prefix else result | >>> _dd(fold({'a__a': 4}))
"{'a': {'a': 4}}"
>>> _dd(fold({'a__a': 4, 'a__b': 5}))
"{'a': {'a': 4, 'b': 5}}"
>>> _dd(fold({'a__1': 2, 'a__0': 1, 'a__2': 3}))
"{'a': [1, 2, 3]}"
>>> _dd(fold({'form__a__b': 5, 'form__a__a': 4}, 'form'))
"{'a': {'a': 4, 'b': 5}}"
>>> _dd(fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form'))
"{'a': {'a': [4, 7], 'b': 5}}"
>>> repr(fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form'))
"[{'a': [4, 7]}, {'b': 5}]" |
def derive_fields(self):
"""
Default implementation
"""
fields = []
if self.fields:
fields.append(self.fields)
return fields | Default implementation |
def foreign(self, value, context=None):
"""Construct a string-like representation for an iterable of string-like objects."""
if self.separator is None:
separator = ' '
else:
separator = self.separator.strip() if self.strip and hasattr(self.separator, 'strip') else self.separator
value = self._clean(value)
try:
value = separator.join(value)
except Exception as e:
raise Concern("{0} caught, failed to convert to string: {1}", e.__class__.__name__, str(e))
return super().foreign(value) | Construct a string-like representation for an iterable of string-like objects. |
def Pitzer(T, Tc, omega):
r'''Calculates enthalpy of vaporization at arbitrary temperatures using a
fit by [2]_ to the work of Pitzer [1]_; requires a chemical's critical
temperature and acentric factor.
The enthalpy of vaporization is given by:
.. math::
\frac{\Delta_{vap} H}{RT_c}=7.08(1-T_r)^{0.354}+10.95\omega(1-T_r)^{0.456}
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
This equation is listed in [3]_, page 2-487 as method #2 for estimating
Hvap. This cites [2]_.
The recommended range is 0.6 to 1 Tr. Users should expect up to 5% error.
T must be under Tc, or an exception is raised.
The original article has been reviewed and found to have a set of tabulated
values which could be used instead of the fit function to provide additional
accuracy.
Examples
--------
Example as in [3]_, p2-487; exp: 37.51 kJ/mol
>>> Pitzer(452, 645.6, 0.35017)
36696.736640106414
References
----------
.. [1] Pitzer, Kenneth S. "The Volumetric and Thermodynamic Properties of
Fluids. I. Theoretical Basis and Virial Coefficients."
Journal of the American Chemical Society 77, no. 13 (July 1, 1955):
3427-33. doi:10.1021/ja01618a001
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [3] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
Tr = T/Tc
return R*Tc * (7.08*(1. - Tr)**0.354 + 10.95*omega*(1. - Tr)**0.456) | r'''Calculates enthalpy of vaporization at arbitrary temperatures using a
fit by [2]_ to the work of Pitzer [1]_; requires a chemical's critical
temperature and acentric factor.
The enthalpy of vaporization is given by:
.. math::
\frac{\Delta_{vap} H}{RT_c}=7.08(1-T_r)^{0.354}+10.95\omega(1-T_r)^{0.456}
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
This equation is listed in [3]_, page 2-487 as method #2 for estimating
Hvap. This cites [2]_.
The recommended range is 0.6 to 1 Tr. Users should expect up to 5% error.
T must be under Tc, or an exception is raised.
The original article has been reviewed and found to have a set of tabulated
values which could be used instead of the fit function to provide additional
accuracy.
Examples
--------
Example as in [3]_, p2-487; exp: 37.51 kJ/mol
>>> Pitzer(452, 645.6, 0.35017)
36696.736640106414
References
----------
.. [1] Pitzer, Kenneth S. "The Volumetric and Thermodynamic Properties of
Fluids. I. Theoretical Basis and Virial Coefficients."
Journal of the American Chemical Society 77, no. 13 (July 1, 1955):
3427-33. doi:10.1021/ja01618a001
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [3] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007. |
def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse:
'''Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
'''
if self._session_state != SessionState.directory_request_sent:
raise RuntimeError('File request not sent')
self._session_state = SessionState.file_request_sent
yield from self.download(file=file, rewind=False,
duration_timeout=duration_timeout)
try:
if self._response.body.tell() == 0:
listings = ()
elif self._listing_type == 'mlsd':
self._response.body.seek(0)
machine_listings = wpull.protocol.ftp.util.parse_machine_listing(
self._response.body.read().decode('utf-8',
errors='surrogateescape'),
convert=True, strict=False
)
listings = list(
wpull.protocol.ftp.util.machine_listings_to_file_entries(
machine_listings
))
else:
self._response.body.seek(0)
file = io.TextIOWrapper(self._response.body, encoding='utf-8',
errors='surrogateescape')
listing_parser = ListingParser(file=file)
listings = list(listing_parser.parse_input())
_logger.debug('Listing detected as %s', listing_parser.type)
# We don't want the file to be closed when exiting this function
file.detach()
except (ListingError, ValueError) as error:
raise ProtocolError(*error.args) from error
self._response.files = listings
self._response.body.seek(0)
self._session_state = SessionState.response_received
return self._response | Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine. |
def meta(self):
"""
Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata
"""
if not self.__meta_received:
raise RuntimeError(
'This property only valid once all rows are received!')
if isinstance(self.raw.value, dict):
return self.raw.value
return {} | Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata |
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth."""
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1) | Return the unmasked overfitting metric for a given transit depth. |
async def async_init(self) -> None:
"""Create a Tile session."""
if not self._client_established:
await self.request(
'put',
'clients/{0}'.format(self.client_uuid),
data={
'app_id': DEFAULT_APP_ID,
'app_version': DEFAULT_APP_VERSION,
'locale': self._locale
})
self._client_established = True
resp = await self.request(
'post',
'clients/{0}/sessions'.format(self.client_uuid),
data={
'email': self._email,
'password': self._password
})
if not self.user_uuid:
self.user_uuid = resp['result']['user']['user_uuid']
self._session_expiry = resp['result']['session_expiration_timestamp']
self.tiles = Tile(self.request, self.user_uuid) | Create a Tile session. |
def _check_data_port_id(self, data_port):
"""Checks the validity of a data port id
Checks whether the id of the given data port is already used by anther data port (input, output, scoped vars)
within the state.
:param rafcon.core.data_port.DataPort data_port: The data port to be checked
:return bool validity, str message: validity is True, when the data port is valid, False else. message gives
more information especially if the data port is not valid
"""
# First check inputs and outputs
valid, message = super(ContainerState, self)._check_data_port_id(data_port)
if not valid:
return False, message
# Container state also has scoped variables
for scoped_variable_id, scoped_variable in self.scoped_variables.items():
if data_port.data_port_id == scoped_variable_id and data_port is not scoped_variable:
return False, "data port id already existing in state"
return True, message | Checks the validity of a data port id
Checks whether the id of the given data port is already used by anther data port (input, output, scoped vars)
within the state.
:param rafcon.core.data_port.DataPort data_port: The data port to be checked
:return bool validity, str message: validity is True, when the data port is valid, False else. message gives
more information especially if the data port is not valid |
def doc(self, export='plain'):
"""
Dump help document for setting classes
"""
rows = []
title = '<{:s}> config options'.format(self.__class__.__name__)
table = Tab(export=export, title=title)
for opt in sorted(self.config_descr):
if hasattr(self, opt):
c1 = opt
c2 = self.config_descr[opt]
c3 = self.__dict__.get(opt, '')
c4 = self.get_alt(opt)
rows.append([c1, c2, c3, c4])
else:
print('Setting {:s} has no {:s} option. Correct in config_descr.'.
format(self.__class__.__name__, opt))
table.add_rows(rows, header=False)
table.header(['Option', 'Description', 'Value', 'Alt.'])
return table.draw() | Dump help document for setting classes |
def _get_default_value_to_cache(self, xblock):
"""
Perform special logic to provide a field's default value for caching.
"""
try:
# pylint: disable=protected-access
return self.from_json(xblock._field_data.default(xblock, self.name))
except KeyError:
if self._default is UNIQUE_ID:
return self._check_or_enforce_type(self._calculate_unique_id(xblock))
else:
return self.default | Perform special logic to provide a field's default value for caching. |
def housekeeping(self, **kwargs):
"""Start the housekeeping task.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabHousekeepingError: If the server failed to perform the
request
"""
path = '/projects/%s/housekeeping' % self.get_id()
self.manager.gitlab.http_post(path, **kwargs) | Start the housekeeping task.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabHousekeepingError: If the server failed to perform the
request |
def setEmergencyDecel(self, typeID, decel):
"""setDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 of vehicles of this type.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_EMERGENCY_DECEL, typeID, decel) | setDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 of vehicles of this type. |
def order_quote(self, quote_id, extra):
"""Places an order using a quote
::
extras = {
'hardware': {'hostname': 'test', 'domain': 'testing.com'},
'quantity': 2
}
manager = ordering.OrderingManager(env.client)
result = manager.order_quote(12345, extras)
:param int quote_id: ID for the target quote
:param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order
:param int quantity: Quantity to override default
"""
container = self.generate_order_template(quote_id, extra)
return self.client.call('SoftLayer_Billing_Order_Quote', 'placeOrder', container, id=quote_id) | Places an order using a quote
::
extras = {
'hardware': {'hostname': 'test', 'domain': 'testing.com'},
'quantity': 2
}
manager = ordering.OrderingManager(env.client)
result = manager.order_quote(12345, extras)
:param int quote_id: ID for the target quote
:param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order
:param int quantity: Quantity to override default |
def _serialize_value(self, value):
"""
Called by :py:meth:`._serialize` to serialise an individual value.
"""
if isinstance(value, (list, tuple, set)):
return [self._serialize_value(v) for v in value]
elif isinstance(value, dict):
return dict([(k, self._serialize_value(v)) for k, v in value.items()])
elif isinstance(value, ModelBase):
return value._serialize()
elif isinstance(value, datetime.date): # includes datetime.datetime
return value.isoformat()
else:
return value | Called by :py:meth:`._serialize` to serialise an individual value. |
def generate_config_file():
"""
Generate a config file for a ProTECT run on hg19.
:return: None
"""
shutil.copy(os.path.join(os.path.dirname(__file__), 'input_parameters.yaml'),
os.path.join(os.getcwd(), 'ProTECT_config.yaml')) | Generate a config file for a ProTECT run on hg19.
:return: None |
def update(data, id, medium, credentials):
"""Updates the [medium] with the given id and data on the user's [medium]List.
:param data The data for the [medium] to update.
:param id The id of the data to update.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:raise ValueError For bad arguments.
"""
_op(data, id, medium, tokens.Operations.UPDATE, credentials) | Updates the [medium] with the given id and data on the user's [medium]List.
:param data The data for the [medium] to update.
:param id The id of the data to update.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:raise ValueError For bad arguments. |
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs) | r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response |
def append_panel(panels, size_x, size_y, max_col=12):
"""
Appends a panel to the list of panels. Finds the highest palce at the left for the new panel.
:param panels:
:param size_x:
:param size_y:
:param max_col:
:return: a new panel or None if it is not possible to place a panel with such size_x
"""
bottom_lines = bottoms(panels)
shape = find_shape(bottom_lines, max_col)
lines = longest_lines(shape)
line = find_place(lines, size_x)
if not line:
return
panel = {
'col': line['col'],
'row': line['row'],
'size_x': size_x,
'size_y': size_y,
}
panels.append(panel)
return panel | Appends a panel to the list of panels. Finds the highest palce at the left for the new panel.
:param panels:
:param size_x:
:param size_y:
:param max_col:
:return: a new panel or None if it is not possible to place a panel with such size_x |
def apply_to_field_if_exists(effect, field_name, fn, default):
"""
Apply function to specified field of effect if it is not None,
otherwise return default.
"""
value = getattr(effect, field_name, None)
if value is None:
return default
else:
return fn(value) | Apply function to specified field of effect if it is not None,
otherwise return default. |
def list_drafts(self):
"""
A filterable list views of layers, returning the draft version of each layer.
If the most recent version of a layer or table has been published already,
it won’t be returned here.
"""
target_url = self.client.get_url('LAYER', 'GET', 'multidraft')
return base.Query(self, target_url) | A filterable list views of layers, returning the draft version of each layer.
If the most recent version of a layer or table has been published already,
it won’t be returned here. |
def dlogpdf_link_dr(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the log-likelihood function at y given f, w.r.t shape parameter
.. math::
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: includes censoring information in dictionary key 'censored'
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: float
"""
# c = Y_metadata['censored']
# c = np.zeros((y.shape[0],))
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
link_f = inv_link_f #FIXME: Change names consistently...
y_link_f = y/link_f
log_y_link_f = np.log(y) - np.log(link_f)
y_link_f_r = y_link_f**self.r
#In terms of link_f
censored = c*(-y_link_f_r*log_y_link_f/(1 + y_link_f_r))
uncensored = (1-c)*(1./self.r + np.log(y) - np.log(link_f) - (2*y_link_f_r*log_y_link_f) / (1 + y_link_f_r))
dlogpdf_dr = censored + uncensored
return dlogpdf_dr | Gradient of the log-likelihood function at y given f, w.r.t shape parameter
.. math::
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: includes censoring information in dictionary key 'censored'
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: float |
def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None):
"""
Create a workspace from a METS by URL.
Sets the mets.xml file
Arguments:
mets_url (string): Source mets URL
dst_dir (string, None): Target directory for the workspace
clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception.
download (boolean, False): Whether to download all the files
baseurl (string, None): Base URL for resolving relative file locations
Returns:
Workspace
"""
if dst_dir and not dst_dir.startswith('/'):
dst_dir = abspath(dst_dir)
if mets_url is None:
if baseurl is None:
raise Exception("Must pass mets_url and/or baseurl to workspace_from_url")
else:
mets_url = 'file://%s/%s' % (baseurl, mets_basename if mets_basename else 'mets.xml')
if baseurl is None:
baseurl = mets_url.rsplit('/', 1)[0]
log.debug("workspace_from_url\nmets_url='%s'\nbaseurl='%s'\ndst_dir='%s'", mets_url, baseurl, dst_dir)
# resolve to absolute
if '://' not in mets_url:
mets_url = 'file://%s' % abspath(mets_url)
if dst_dir is None:
# if mets_url is a file-url assume working directory is source directory
if mets_url.startswith('file://'):
# if dst_dir was not given and mets_url is a file assume that
# dst_dir should be the directory where the mets.xml resides
dst_dir = dirname(mets_url[len('file://'):])
else:
dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)
log.debug("Creating workspace '%s' for METS @ <%s>", dst_dir, mets_url)
# if mets_basename is not given, use the last URL segment of the mets_url
if mets_basename is None:
mets_basename = mets_url \
.rsplit('/', 1)[-1] \
.split('?')[0] \
.split('#')[0]
dst_mets = join(dst_dir, mets_basename)
log.debug("Copying mets url '%s' to '%s'", mets_url, dst_mets)
if 'file://' + dst_mets == mets_url:
log.debug("Target and source mets are identical")
else:
if exists(dst_mets) and not clobber_mets:
raise Exception("File '%s' already exists but clobber_mets is false" % dst_mets)
else:
self.download_to_directory(dst_dir, mets_url, basename=mets_basename)
workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl)
if download:
for f in workspace.mets.find_files():
workspace.download_file(f)
return workspace | Create a workspace from a METS by URL.
Sets the mets.xml file
Arguments:
mets_url (string): Source mets URL
dst_dir (string, None): Target directory for the workspace
clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception.
download (boolean, False): Whether to download all the files
baseurl (string, None): Base URL for resolving relative file locations
Returns:
Workspace |
def add_backends(self, *backends):
"""
See the documentation for __init__() to see an explanation of the
*backends argument.
"""
for backend in backends:
full = self._expand_host(backend)
self.backends[full] = 0
self.task_counter[full] = 0 | See the documentation for __init__() to see an explanation of the
*backends argument. |
def FindUnspentCoins(self, from_addr=None, use_standard=False, watch_only_val=0):
"""
Finds unspent coin objects in the wallet.
Args:
from_addr (UInt160): a bytearray (len 20) representing an address.
use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).
watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.
Returns:
list: a list of ``neo.Wallet.Coins`` in the wallet that are not spent.
"""
ret = []
for coin in self.GetCoins():
if coin.State & CoinState.Confirmed > 0 and \
coin.State & CoinState.Spent == 0 and \
coin.State & CoinState.Locked == 0 and \
coin.State & CoinState.Frozen == 0 and \
coin.State & CoinState.WatchOnly == watch_only_val:
do_exclude = False
if self._vin_exclude:
for to_exclude in self._vin_exclude:
if coin.Reference.PrevIndex == to_exclude.PrevIndex and \
coin.Reference.PrevHash == to_exclude.PrevHash:
do_exclude = True
if do_exclude:
continue
if from_addr is not None:
if coin.Output.ScriptHash == from_addr:
ret.append(coin)
elif use_standard:
contract = self._contracts[coin.Output.ScriptHash.ToBytes()]
if contract.IsStandard:
ret.append(coin)
else:
ret.append(coin)
return ret | Finds unspent coin objects in the wallet.
Args:
from_addr (UInt160): a bytearray (len 20) representing an address.
use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).
watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.
Returns:
list: a list of ``neo.Wallet.Coins`` in the wallet that are not spent. |
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file.
"""
self.write_preamble(outfile)
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# data accumulates bytes to be compressed for the IDAT chunk;
# it's compressed when sufficiently large.
data = bytearray()
for i, row in enumerate(rows):
# Add "None" filter type.
# Currently, it's essential that this filter type be used
# for every scanline as
# we do not mark the first row of a reduced pass image;
# that means we could accidentally compute
# the wrong filtered scanline if we used
# "up", "average", or "paeth" on such a line.
data.append(0)
data.extend(row)
if len(data) > self.chunk_limit:
# :todo: bytes() only necessary in Python 2
compressed = compressor.compress(bytes(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i + 1 | Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file. |
def get_dummy_thread(nsamples, **kwargs):
"""Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.),
'theta': np.random.random((nsamples, ndim)),
'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread | Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. |
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None):
"""Read a file from a ZipFile and write it to a new ZipFile."""
data = from_zip.read(from_name)
if to_name is None:
to_name = from_name
to_zip.writestr(to_name, data) | Read a file from a ZipFile and write it to a new ZipFile. |
def get_accessibility_packs(self):
"""Get accessibility packs of the graph:
in one pack element are related in a way. Between packs, there is no relation at all.
TODO: Make it work for directional graph too
Because for now, edge must be father->son AND son->father
:return: packs of nodes
:rtype: list
"""
packs = []
# Add the tag for dfs check
for node in list(self.nodes.values()):
node['dfs_loop_status'] = 'DFS_UNCHECKED'
for node_id, node in self.nodes.items():
# Run the dfs only if the node is not already done */
if node['dfs_loop_status'] == 'DFS_UNCHECKED':
packs.append(self.dfs_get_all_childs(node_id))
# Remove the tag
for node in list(self.nodes.values()):
del node['dfs_loop_status']
return packs | Get accessibility packs of the graph:
in one pack element are related in a way. Between packs, there is no relation at all.
TODO: Make it work for directional graph too
Because for now, edge must be father->son AND son->father
:return: packs of nodes
:rtype: list |
def validate(cnpj_number):
"""This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cnpj = compat.clear_punctuation(cnpj_number)
if (len(_cnpj) != 14 or
len(set(_cnpj)) == 1):
return False
first_part = _cnpj[:12]
second_part = _cnpj[:13]
first_digit = _cnpj[12]
second_digit = _cnpj[13]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False | This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise. |
def _expand_target(self):
'''
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
'''
# TODO: Support -L
target = self.opts['tgt']
if isinstance(target, list):
return
hostname = self.opts['tgt'].split('@')[-1]
needs_expansion = '*' not in hostname and \
salt.utils.network.is_reachable_host(hostname) and \
salt.utils.network.is_ip(hostname)
if needs_expansion:
hostname = salt.utils.network.ip_to_host(hostname)
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data.get('host')]:
if hostname != self.opts['tgt']:
self.opts['tgt'] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return | Figures out if the target is a reachable host without wildcards, expands if any.
:return: |
def get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht):
"""
Like get_scaled_cutout_wdht, but returns the view/slice to extract
from an image instead of the extraction itself.
"""
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
new_wd, new_ht = int(new_wd), int(new_ht)
# calculate dimensions of NON-scaled cutout
old_wd = x2 - x1 + 1
old_ht = y2 - y1 + 1
max_x, max_y = shp[1] - 1, shp[0] - 1
if (new_wd != old_wd) or (new_ht != old_ht):
# Make indexes and scale them
# Is there a more efficient way to do this?
yi = np.mgrid[0:new_ht].reshape(-1, 1)
xi = np.mgrid[0:new_wd].reshape(1, -1)
iscale_x = float(old_wd) / float(new_wd)
iscale_y = float(old_ht) / float(new_ht)
xi = (x1 + xi * iscale_x).clip(0, max_x).astype(np.int, copy=False)
yi = (y1 + yi * iscale_y).clip(0, max_y).astype(np.int, copy=False)
wd, ht = xi.shape[1], yi.shape[0]
# bounds check against shape (to protect future data access)
xi_max, yi_max = xi[0, -1], yi[-1, 0]
assert xi_max <= max_x, ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x))
assert yi_max <= max_y, ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y))
view = np.s_[yi, xi]
else:
# simple stepped view will do, because new view is same as old
wd, ht = old_wd, old_ht
view = np.s_[y1:y2 + 1, x1:x2 + 1]
# Calculate actual scale used (vs. desired)
old_wd, old_ht = max(old_wd, 1), max(old_ht, 1)
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
# return view + actual scale factors used
return (view, (scale_x, scale_y)) | Like get_scaled_cutout_wdht, but returns the view/slice to extract
from an image instead of the extraction itself. |
def insertData(self, offset: int, string: str) -> None:
"""Insert ``string`` at offset on this node."""
self._insert_data(offset, string) | Insert ``string`` at offset on this node. |
def __parse_domain_to_employer_line(self, raw_domain, raw_org):
"""Parse domain to employer lines"""
d = re.match(self.DOMAIN_REGEX, raw_domain, re.UNICODE)
if not d:
cause = "invalid domain format: '%s'" % raw_domain
raise InvalidFormatError(cause=cause)
dom = d.group('domain').strip()
o = re.match(self.ORGANIZATION_REGEX, raw_org, re.UNICODE)
if not o:
cause = "invalid organization format: '%s'" % raw_org
raise InvalidFormatError(cause=cause)
org = o.group('organization').strip()
org = self.__encode(org)
dom = self.__encode(dom)
return org, dom | Parse domain to employer lines |
def _ensure_started(self):
"""Marks the API as started and runs all startup handlers"""
if not self.started:
async_handlers = [startup_handler for startup_handler in self.startup_handlers if
introspect.is_coroutine(startup_handler)]
if async_handlers:
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*[handler(self) for handler in async_handlers], loop=loop))
for startup_handler in self.startup_handlers:
if not startup_handler in async_handlers:
startup_handler(self) | Marks the API as started and runs all startup handlers |
def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings) | Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s' |
def detect_client_auth_request(server_handshake_bytes):
"""
Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0d':
return True
return False | Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found |
def fitTo_t(what: Union[RtlSignal, Value], where_t: HdlType,
extend: bool=True, shrink: bool=True):
"""
Slice signal "what" to fit in "where"
or
arithmetically (for signed by MSB / unsigned, vector with 0) extend
"what" to same width as "where"
little-endian impl.
"""
whatWidth = what._dtype.bit_length()
toWidth = where_t.bit_length()
if toWidth == whatWidth:
return what
elif toWidth < whatWidth:
# slice
if not shrink:
raise BitWidthErr()
return what[toWidth:]
else:
if not extend:
raise BitWidthErr()
w = toWidth - whatWidth
if what._dtype.signed:
# signed extension
msb = what[whatWidth - 1]
ext = reduce(lambda a, b: a._concat(b), [msb for _ in range(w)])
else:
# 0 extend
ext = vec(0, w)
return ext._concat(what) | Slice signal "what" to fit in "where"
or
arithmetically (for signed by MSB / unsigned, vector with 0) extend
"what" to same width as "where"
little-endian impl. |
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name) | Tries to guess the filename of the given object. |
def output(self):
"""Output object of driver."""
output_params = dict(
self._raw["output"],
grid=self.output_pyramid.grid,
pixelbuffer=self.output_pyramid.pixelbuffer,
metatiling=self.output_pyramid.metatiling
)
if "path" in output_params:
output_params.update(
path=absolute_path(path=output_params["path"], base_dir=self.config_dir)
)
if "format" not in output_params:
raise MapcheteConfigError("output format not specified")
if output_params["format"] not in available_output_formats():
raise MapcheteConfigError(
"format %s not available in %s" % (
output_params["format"], str(available_output_formats())
)
)
writer = load_output_writer(output_params)
try:
writer.is_valid_with_config(output_params)
except Exception as e:
logger.exception(e)
raise MapcheteConfigError(
"driver %s not compatible with configuration: %s" % (
writer.METADATA["driver_name"], e
)
)
return writer | Output object of driver. |
def classes_(self):
"""
Proxy property to smartly access the classes from the estimator or
stored locally on the score visualizer for visualization.
"""
if self.__classes is None:
try:
return self.estimator.classes_
except AttributeError:
return None
return self.__classes | Proxy property to smartly access the classes from the estimator or
stored locally on the score visualizer for visualization. |
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter cmdVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error |
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# Check for invalid tracking number
if 'Invalid number / data not currently available' in self.page:
raise ValueError('Invalid number/data not currently available')
# Checkpoints extraction begins here
soup = BeautifulSoup(self.page,'html.parser')
# Assign the current status of the shipment - self.status
current_status = soup.find('span',id='spnCurrentStatusValue').text.strip()
if current_status == 'Supporting Document Returned to Shipper':
self.status = 'R'
elif current_status == 'Delivered':
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Get all rows of the Checkpoints table (no particular order)
rows = soup.findAll('div',{'class':'fullWidth odd leftFloat bottomGreyBorder'})
rows += soup.findAll('div',{'class':'fullWidth even leftFloat bottomGreyBorder'})
for row in rows:
# Get the data
location = row.find('div',{'class':'leftFloat thirdWidth'}).string.strip()
date_time = row.find('div',{'class':'leftFloat shipmentSummaryLabel'}).string.strip()
status = row.find('div',{'class':'leftFloat shipmentHistoryActivityLabel'}).string.strip()
# Clean it
location = self.remove_non_ascii(location)
date_time_format = "%d-%b-%Y %H:%M"
date_time = parse(self.remove_non_ascii(date_time))
status = self.remove_non_ascii(status)
# Add it to the checkpoint list
self.tracking_data.append({'status':status,'date':date_time,'location':location})
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date']) | Extract the checkpoints and store in self.tracking_data |
def predict_sequence(self, X, A, pi, inference='smoothing'):
"""
Calculate class probabilities for a sequence of data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are time frames, columns
are data dimensions)
A : class transition matrix, where A[i,j] contains p(y_t=j|y_{t-1}=i)
pi : vector of initial class probabilities
inference : can be 'smoothing' or 'filtering'.
Returns:
-------
y_prob : array
An array of dimension N times n_inlier_classes+1, containing
the probabilities of each row of X being one of the inlier
classes, or the outlier class (last column).
"""
obsll = self.predict_proba(X)
T, S = obsll.shape
alpha = np.zeros((T, S))
alpha[0, :] = pi
for t in range(1, T):
alpha[t, :] = np.dot(alpha[t-1, :], A)
for s in range(S):
alpha[t, s] *= obsll[t, s]
alpha[t, :] = alpha[t, :]/sum(alpha[t, :])
if inference == 'filtering':
return alpha
else:
beta = np.zeros((T, S))
gamma = np.zeros((T, S))
beta[T-1, :] = np.ones(S)
for t in range(T-2, -1, -1):
for i in range(S):
for j in range(S):
beta[t, i] += A[i, j]*obsll[t+1, j]*beta[t+1, j]
beta[t, :] = beta[t, :]/sum(beta[t, :])
for t in range(T):
gamma[t, :] = alpha[t, :]*beta[t, :]
gamma[t, :] = gamma[t, :]/sum(gamma[t, :])
return gamma | Calculate class probabilities for a sequence of data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are time frames, columns
are data dimensions)
A : class transition matrix, where A[i,j] contains p(y_t=j|y_{t-1}=i)
pi : vector of initial class probabilities
inference : can be 'smoothing' or 'filtering'.
Returns:
-------
y_prob : array
An array of dimension N times n_inlier_classes+1, containing
the probabilities of each row of X being one of the inlier
classes, or the outlier class (last column). |
def to_period(self, freq=None):
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn("Converting to PeriodArray/Index representation "
"will drop timezone information.", UserWarning)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError("You must pass a freq argument as "
"current index has none.")
freq = get_period_alias(freq)
return PeriodArray._from_datetime64(self._data, freq, tz=self.tz) | Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D') |
def _get_methods_that_calculate_outputs(inputs, outputs, methods):
'''
Given iterables of input variable names, output variable names,
and a methods dictionary, returns the subset of the methods dictionary
that can be calculated, doesn't calculate something we already have,
and only contains equations that might help calculate the outputs from
the inputs.
'''
# Get a list of everything that we can possibly calculate
# This is useful in figuring out whether we can calculate arguments
intermediates = get_calculatable_quantities(inputs, methods)
# Initialize our return dictionary
return_methods = {}
# list so that we can append arguments that need to be output for
# some of the paths
outputs = list(outputs)
# keep track of when to exit the while loop
keep_going = True
while keep_going:
# If there are no updates in a pass, the loop will exit
keep_going = False
for output in outputs:
try:
output_dict = return_methods[output]
except:
output_dict = {}
for args, func in methods[output].items():
# only check the method if we're not already returning it
if args not in output_dict.keys():
# Initialize a list of intermediates needed to use
# this method, to add to outputs if we find we can
# use it.
needed = []
for arg in args:
if arg in inputs:
# we have this argument
pass
elif arg in outputs:
# we may need to calculate one output using
# another output
pass
elif arg in intermediates:
if arg not in outputs:
# don't need to add to needed if it's already
# been put in outputs
needed.append(arg)
else:
# Can't do this func
break
else: # did not break, can calculate this
output_dict[args] = func
if len(needed) > 0:
# We added an output, so need another loop
outputs.extend(needed)
keep_going = True
if len(output_dict) > 0:
return_methods[output] = output_dict
return return_methods | Given iterables of input variable names, output variable names,
and a methods dictionary, returns the subset of the methods dictionary
that can be calculated, doesn't calculate something we already have,
and only contains equations that might help calculate the outputs from
the inputs. |
def in_virtual_env():
"""
returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result)
"""
import sys
has_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
has_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
has_venv = sys.base_prefix != sys.prefix
return has_venv | returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result) |
def definition(self, suffix = "", local=False, ctype=None, optionals=True,
customdim=None, modifiers=None):
"""Returns the fortran code string that would define this value element.
:arg suffix: an optional suffix to append to the name of the variable.
Useful for re-using definitions with new names.
:arg local: when True, the parameter definition is re-cast as a local
variable definition that has the "intent" and "optional" modifiers removed.
:arg ctype: if a ctype should be used as the data type of the variable
instead of the original type, specify that string here.
:arg optionals: removes the "optional" modifier from the definition before
generating it.
:arg customdim: if the dimension string needs to be changed, specify the
new one here.
:arg modifiers: specify an additional list of modifiers to add to the
variable definition.
"""
kind = "({})".format(self.kind) if self.kind is not None else ""
cleanmods = [m for m in self.modifiers if m != "" and m != " "
and not (local and ("intent" in m or m == "optional"))
and not (not optionals and m == "optional")]
if modifiers is not None:
cleanmods.extend(modifiers)
if len(cleanmods) > 0:
mods = ", " + ", ".join(cleanmods) + " "
else:
mods = " "
if customdim is not None:
dimension = "({})".format(customdim)
else:
dimension = "({})".format(self.dimension) if self.dimension is not None else ""
if self.default is None:
default = ""
else:
if ">" in self.default: #We have a pointer, don't add an extra space.
default = " ={}".format(self.default) if self.default is not None else ""
else:
default = " = {}".format(self.default) if self.default is not None else ""
name = "{}{}".format(self.name, suffix)
stype = self.dtype if ctype is None else ctype
return "{}{}{}:: {}{}{}".format(stype, kind, mods, name, dimension, default) | Returns the fortran code string that would define this value element.
:arg suffix: an optional suffix to append to the name of the variable.
Useful for re-using definitions with new names.
:arg local: when True, the parameter definition is re-cast as a local
variable definition that has the "intent" and "optional" modifiers removed.
:arg ctype: if a ctype should be used as the data type of the variable
instead of the original type, specify that string here.
:arg optionals: removes the "optional" modifier from the definition before
generating it.
:arg customdim: if the dimension string needs to be changed, specify the
new one here.
:arg modifiers: specify an additional list of modifiers to add to the
variable definition. |
def update_filenames(self):
"""Does nothing currently. May not need this method"""
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'),
'sky_' + self.sky_state + '_z' + str(
self.sky_zenith) + '_a' + str(
self.sky_azimuth) + '_' + str(
self.num_bands) + '_' + self.ds_code)) | Does nothing currently. May not need this method |
def headercontent(self, method):
"""
Get the content for the SOAP I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The XML content for the <body/>.
@rtype: [L{Element},...]
"""
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple, list, dict)):
headers = (headers,)
elif not headers:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple, list)):
n = 0
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n:
break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace("ns0")
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace("ns0")
h.setPrefix(ns[0], ns[1])
content.append(h)
return content | Get the content for the SOAP I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The XML content for the <body/>.
@rtype: [L{Element},...] |
def get_data_filename(filename):
"""Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory.
"""
global _data_map
if _data_map is None:
_data_map = {}
for root, dirs, files in os.walk(specdir):
for fname in files:
_data_map[fname] = os.path.join(root, fname)
if filename not in _data_map:
raise KeyError(filename + ' not found in ' + specdir)
return _data_map[filename] | Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory. |
def get_user(uid, channel=14, **kwargs):
'''
Get user from uid and access on channel
:param uid: user number [1:16]
:param channel: number [1:7]
:param kwargs:
- api_host=127.0.0.1
- api_user=admin
- api_pass=example
- api_port=623
- api_kg=None
Return Data
.. code-block:: none
name: (str)
uid: (int)
channel: (int)
access:
- callback (bool)
- link_auth (bool)
- ipmi_msg (bool)
- privilege_level: (str)[callback, user, operatorm administrator,
proprietary, no_access]
CLI Examples:
.. code-block:: bash
salt-call ipmi.get_user uid=2
'''
name = get_user_name(uid, **kwargs)
access = get_user_access(uid, channel, **kwargs)
data = {'name': name, 'uid': uid, 'channel': channel, 'access': access['access']}
return data | Get user from uid and access on channel
:param uid: user number [1:16]
:param channel: number [1:7]
:param kwargs:
- api_host=127.0.0.1
- api_user=admin
- api_pass=example
- api_port=623
- api_kg=None
Return Data
.. code-block:: none
name: (str)
uid: (int)
channel: (int)
access:
- callback (bool)
- link_auth (bool)
- ipmi_msg (bool)
- privilege_level: (str)[callback, user, operatorm administrator,
proprietary, no_access]
CLI Examples:
.. code-block:: bash
salt-call ipmi.get_user uid=2 |
def from_dict(cls, ctx):
'Instance a new structure from a Python native type.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
s.ContextFlags = ContextFlags
for key in cls._others:
if key != 'VectorRegister':
setattr(s, key, ctx[key])
else:
w = ctx[key]
v = (M128A * len(w))()
i = 0
for x in w:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
v[i] = y
i += 1
setattr(s, key, v)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in cls._control:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in cls._integer:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in cls._segments:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in cls._debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = s.FltSave.xmm
for key in cls._mmx:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
setattr(xmm, key, y)
return s | Instance a new structure from a Python native type. |
def _page(q, chunk=1000):
""" Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world.
"""
offset = 0
while True:
r = False
for elem in q.limit(chunk).offset(offset):
r = True
yield elem
offset += chunk
if not r:
break | Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world. |
def _process_marked_candidate_indexes(candidate, markers):
"""
Run regexes against candidate's marked indexes to strip
signature candidate.
>>> _process_marked_candidate_indexes([9, 12, 14, 15, 17], 'clddc')
[15, 17]
"""
match = RE_SIGNATURE_CANDIDATE.match(markers[::-1])
return candidate[-match.end('candidate'):] if match else [] | Run regexes against candidate's marked indexes to strip
signature candidate.
>>> _process_marked_candidate_indexes([9, 12, 14, 15, 17], 'clddc')
[15, 17] |
def extract_connected_components(graph, connectivity_type, node_to_id):
"""
Extract the largest connected component from a graph.
Inputs: - graph: An adjacency matrix in scipy sparse matrix format.
- connectivity_type: A string that can be either: "strong" or "weak".
- node_to_id: A map from graph node id to Twitter id, in python dictionary format.
Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format.
- new_node_to_id: A map from graph node id to Twitter id, in python dictionary format.
- old_node_list: List of nodes from the possibly disconnected original graph.
Raises: - RuntimeError: If there the input graph is empty.
"""
# Get a networkx graph.
nx_graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph())
# Calculate all connected components in graph.
if connectivity_type == "weak":
largest_connected_component_list = nxalgcom.weakly_connected_component_subgraphs(nx_graph)
elif connectivity_type == "strong":
largest_connected_component_list = nxalgcom.strongly_connected_component_subgraphs(nx_graph)
else:
print("Invalid connectivity type input.")
raise RuntimeError
# Handle empty graph.
try:
largest_connected_component = max(largest_connected_component_list, key=len)
except ValueError:
print("Error: Empty graph.")
raise RuntimeError
old_node_list = largest_connected_component.nodes()
node_to_node = dict(zip(np.arange(len(old_node_list)), old_node_list))
largest_connected_component = nx.to_scipy_sparse_matrix(largest_connected_component, dtype=np.float64, format="csr")
# Make node_to_id.
new_node_to_id = {k: node_to_id[v] for k, v in node_to_node.items()}
return largest_connected_component, new_node_to_id, old_node_list | Extract the largest connected component from a graph.
Inputs: - graph: An adjacency matrix in scipy sparse matrix format.
- connectivity_type: A string that can be either: "strong" or "weak".
- node_to_id: A map from graph node id to Twitter id, in python dictionary format.
Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format.
- new_node_to_id: A map from graph node id to Twitter id, in python dictionary format.
- old_node_list: List of nodes from the possibly disconnected original graph.
Raises: - RuntimeError: If there the input graph is empty. |
def __chopStringDict(self, data):
'''Returns a dictionary of the provided raw service/host check string.'''
r = {}
d = data.split('\t')
for item in d:
item_parts = item.split('::')
if len(item_parts) == 2:
(name, value) = item_parts
else:
name = item_parts[0]
value = item_parts[1]
name = self.__filter(name)
r[name] = value
if "hostperfdata" in r:
r["type"] = "hostcheck"
r["perfdata"] = r["hostperfdata"]
r["checkcommand"] = re.search("(.*?)!\(?.*", r["hostcheckcommand"]).group(1)
r["name"] = "hostcheck"
else:
r["type"] = "servicecheck"
r["perfdata"] = r["serviceperfdata"]
r["checkcommand"] = re.search("((.*)(?=\!)|(.*))", r["servicecheckcommand"]).group(1)
r["name"] = self.__filter(r["servicedesc"])
r["hostname"] = self.replacePeriod(self.__filter(r["hostname"]))
return r | Returns a dictionary of the provided raw service/host check string. |
def load_name(self, name):
"""
Implementation of the LOAD_NAME operation
"""
if name in self.globals_:
return self.globals_[name]
b = self.globals_['__builtins__']
if isinstance(b, dict):
return b[name]
else:
return getattr(b, name) | Implementation of the LOAD_NAME operation |
def view_pool(arg, opts, shell_opts):
""" View a single pool
"""
res = Pool.list({ 'name': arg })
if len(res) == 0:
print("No pool with name '%s' found." % arg)
return
p = res[0]
vrf_rt = None
vrf_name = None
if p.vrf:
vrf_rt = p.vrf.rt
vrf_name = p.vrf.name
print("-- Pool ")
print(" %-26s : %d" % ("ID", p.id))
print(" %-26s : %s" % ("Name", p.name))
print(" %-26s : %s" % ("Description", p.description))
print(" %-26s : %s" % ("Default type", p.default_type))
print(" %-26s : %s / %s" % ("Implied VRF RT / name", vrf_rt, vrf_name))
print(" %-26s : %s / %s" % ("Preflen (v4/v6)", str(p.ipv4_default_prefix_length), str(p.ipv6_default_prefix_length)))
print("-- Extra Attributes")
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print(" %-26s : %s" % (key, p.avps[key]))
print("-- Tags")
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print(" %s" % tag_name)
# statistics
print("-- Statistics")
# IPv4 total / used / free prefixes
if p.member_prefixes_v4 == 0:
print(" IPv4 prefixes Used / Free : N/A (No IPv4 member prefixes)")
elif p.ipv4_default_prefix_length is None:
print(" IPv4 prefixes Used / Free : N/A (IPv4 default prefix length is not set)")
else:
if p.total_prefixes_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_prefixes_v4)/p.total_prefixes_v4)*100
print(" %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 prefixes Used / Free",
p.used_prefixes_v4, p.free_prefixes_v4, used_percent_v4,
p.total_prefixes_v4))
# IPv6 total / used / free prefixes
if p.member_prefixes_v6 == 0:
print(" IPv6 prefixes Used / Free : N/A (No IPv6 member prefixes)")
elif p.ipv6_default_prefix_length is None:
print(" IPv6 prefixes Used / Free : N/A (IPv6 default prefix length is not set)")
else:
if p.total_prefixes_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_prefixes_v6)/p.total_prefixes_v6)*100
print(" %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 prefixes Used / Free",
p.used_prefixes_v6, p.free_prefixes_v6, used_percent_v6,
p.total_prefixes_v6))
# IPv4 total / used / free addresses
if p.member_prefixes_v4 == 0:
print(" IPv4 addresses Used / Free : N/A (No IPv4 member prefixes)")
elif p.ipv4_default_prefix_length is None:
print(" IPv4 addresses Used / Free : N/A (IPv4 default prefix length is not set)")
else:
if p.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(p.used_addresses_v4)/p.total_addresses_v4)*100
print(" %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
p.used_addresses_v4, p.free_addresses_v4, used_percent_v4,
p.total_addresses_v4))
# IPv6 total / used / free addresses
if p.member_prefixes_v6 == 0:
print(" IPv6 addresses Used / Free : N/A (No IPv6 member prefixes)")
elif p.ipv6_default_prefix_length is None:
print(" IPv6 addresses Used / Free : N/A (IPv6 default prefix length is not set)")
else:
if p.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(p.used_addresses_v6)/p.total_addresses_v6)*100
print(" %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
p.used_addresses_v6, p.free_addresses_v6, used_percent_v6,
p.total_addresses_v6))
print("\n-- Prefixes in pool - v4: %d v6: %d" % (p.member_prefixes_v4,
p.member_prefixes_v6))
res = Prefix.list({ 'pool_id': p.id})
for pref in res:
print(" %s" % pref.display_prefix) | View a single pool |
def _cull(self):
"""Remove calls more than 1 second old from the queue."""
right_now = time.time()
cull_from = -1
for index in range(len(self._call_times)):
if right_now - self._call_times[index].time >= 1.0:
cull_from = index
self._outstanding_calls -= self._call_times[index].num_calls
else:
break
if cull_from > -1:
self._call_times = self._call_times[cull_from + 1:] | Remove calls more than 1 second old from the queue. |
def _nest_variable(v, check_records=False):
"""Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array.
"""
if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and
v.get("type", {}).get("type") == "array"):
return v
else:
v = copy.deepcopy(v)
v["type"] = {"type": "array", "items": v["type"]}
return v | Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array. |
def xvJacobianFreqs(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
xvJacobianFreqs
PURPOSE:
return [R,vR,vT,z,vz,phi], the Jacobian d [R,vR,vT,z,vz,phi] / d (J,angle), the Hessian dO/dJ, and frequencies Omega corresponding to a torus at multiple sets of angles
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
([R,vR,vT,z,vz,phi], [N,6] array
d[R,vR,vT,z,vz,phi]/d[J,angle], --> (N,6,6) array
dO/dJ, --> (3,3) array
Omegar,Omegaphi,Omegaz, [N] arrays
Autofit error message)
HISTORY:
2016-07-19 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_jacobian_c(\
self._pot,
jr,jphi,jz,
angler,anglephi,anglez,
tol=kwargs.get('tol',self._tol),
dJ=kwargs.get('dJ',self._dJ))
if out[11] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[11],_autofit_errvals[out[11]]),
galpyWarning)
# Re-arrange actions,angles to r,phi,z
out[6][:,:,:]= out[6][:,:,[0,2,1,3,5,4]]
out[7][:,:]= out[7][:,[0,2,1]]
out[7][:,:]= out[7][[0,2,1]]
# Re-arrange x,v to R,vR,vT,z,vz,phi
out[6][:,:]= out[6][:,[0,3,5,1,4,2]]
if not kwargs.get('nosym',False):
# explicitly symmetrize
out[7][:]= 0.5*(out[7]+out[7].T)
return (numpy.array(out[:6]).T,out[6],out[7],
out[8],out[9],out[10],out[11]) | NAME:
xvJacobianFreqs
PURPOSE:
return [R,vR,vT,z,vz,phi], the Jacobian d [R,vR,vT,z,vz,phi] / d (J,angle), the Hessian dO/dJ, and frequencies Omega corresponding to a torus at multiple sets of angles
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
([R,vR,vT,z,vz,phi], [N,6] array
d[R,vR,vT,z,vz,phi]/d[J,angle], --> (N,6,6) array
dO/dJ, --> (3,3) array
Omegar,Omegaphi,Omegaz, [N] arrays
Autofit error message)
HISTORY:
2016-07-19 - Written - Bovy (UofT) |
def default_metric_definitions(cls, toolkit):
"""Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions
"""
if toolkit is RLToolkit.COACH:
return [
{'Name': 'reward-training',
'Regex': '^Training>.*Total reward=(.*?),'},
{'Name': 'reward-testing',
'Regex': '^Testing>.*Total reward=(.*?),'}
]
elif toolkit is RLToolkit.RAY:
float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" # noqa: W605, E501 pylint: disable=anomalous-backslash-in-string
return [
{'Name': 'episode_reward_mean',
'Regex': 'episode_reward_mean: (%s)' % float_regex},
{'Name': 'episode_reward_max',
'Regex': 'episode_reward_max: (%s)' % float_regex}
] | Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.