code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def ko_data(queryset, field_names=None, name=None, safe=False, return_json=False):
"""
Given a QuerySet, return just the serialized representation
based on the knockout_fields as JavaScript.
"""
try:
try:
# Get an inital instance of the QS.
queryset_instance = queryset[0]
except TypeError as e:
# We are being passed an object rather than a QuerySet.
# That's naughty, but we'll survive.
queryset_instance = queryset
queryset = [queryset]
except IndexError as e:
if not isinstance(queryset, list):
# This is an empty QS - get the model directly.
queryset_instance = queryset.model
else:
# We have been given an empty list.
# Return nothing.
return '[]'
modelName = queryset_instance.__class__.__name__
modelNameData = []
if field_names is not None:
fields = field_names
else:
fields = get_fields(queryset_instance)
for obj in queryset:
object_data = get_object_data(obj, fields, safe)
modelNameData.append(object_data)
if name:
modelNameString = name
else:
modelNameString = modelName + "Data"
dthandler = lambda obj: obj.isoformat() if isinstance(obj, (datetime.date, datetime.datetime)) else None
dumped_json = json.dumps(modelNameData, default=dthandler)
if return_json:
return dumped_json
return "var " + modelNameString + " = " + dumped_json + ';'
except Exception as e:
logger.exception(e)
return '[]' | Given a QuerySet, return just the serialized representation
based on the knockout_fields as JavaScript. |
def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp | Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties |
def set_lock(key, value=None, expiry_time=60):
"""Force to set a distribute lock"""
from uliweb.utils.common import get_uuid
redis = get_redis()
value = value or get_uuid()
return redis.set(key, value, ex=expiry_time, xx=True) | Force to set a distribute lock |
def write_lst(self):
"""
Dump the variable name lst file
:return: succeed flag
"""
ret = False
out = ''
system = self.system
dae = self.system.dae
varname = self.system.varname
template = '{:>6g}, {:>25s}, {:>35s}\n'
# header line
out += template.format(0, 'Time [s]', '$Time\\ [s]$')
# include line flow variables in algebraic variables
nflows = 0
if self.system.tds.config.compute_flows:
nflows = 2 * self.system.Bus.n + \
8 * self.system.Line.n + \
2 * self.system.Area.n_combination
# output variable indices
if system.Recorder.n == 0:
state_idx = list(range(dae.n))
algeb_idx = list(range(dae.n, dae.n + dae.m + nflows))
idx = state_idx + algeb_idx
else:
idx = system.Recorder.varout_idx
# variable names concatenated
uname = varname.unamex + varname.unamey
fname = varname.fnamex + varname.fnamey
for e, i in enumerate(idx):
out += template.format(e + 1, uname[i], fname[i])
try:
with open(self.system.files.lst, 'w') as f:
f.write(out)
ret = True
except IOError:
logger.error('I/O Error while writing the lst file.')
return ret | Dump the variable name lst file
:return: succeed flag |
def get_impls(interfaces):
"""Get impls from their interfaces."""
if interfaces is None:
return None
elif isinstance(interfaces, Mapping):
return {name: interfaces[name]._impl for name in interfaces}
elif isinstance(interfaces, Sequence):
return [interfaces._impl for interfaces in interfaces]
else:
return interfaces._impl | Get impls from their interfaces. |
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
scale = self.scale / weights
return sp.stats.norm.logpdf(y, loc=mu, scale=scale) | computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n |
def get_absolute_url(self, endpoint):
"""Get absolute for secret link (using https scheme).
The endpoint is passed to ``url_for`` with ``token`` and ``extra_data``
as keyword arguments. E.g.::
>>> link.extra_data
dict(recid=1)
>>> link.get_absolute_url('record.metadata')
translates into::
>>> url_for('record.metadata', token="...", recid=1, )
"""
copy = deepcopy(self.extra_data)
if 'recid' in copy:
copy['pid_value'] = copy.pop('recid')
return url_for(
endpoint, token=self.token,
_external=True, **(copy or {})
) | Get absolute for secret link (using https scheme).
The endpoint is passed to ``url_for`` with ``token`` and ``extra_data``
as keyword arguments. E.g.::
>>> link.extra_data
dict(recid=1)
>>> link.get_absolute_url('record.metadata')
translates into::
>>> url_for('record.metadata', token="...", recid=1, ) |
def substitute_variables(command, level, name, value, target=None, **kwargs):
"""Substitute variables in command fragments by values e.g. ${level} => 'warning'."""
rule = kwargs.get('rule', {})
rule_value = rule.get('value', '') if rule else ''
substitutes = {
'${level}': str(level),
'${target}': str(target),
'${name}': '"' + str(name) + '"',
'${value}': str(value),
'${limit_value}': str(rule_value),
}
result = command
for pattern, value in substitutes.items():
result = result.replace(pattern, value)
return result | Substitute variables in command fragments by values e.g. ${level} => 'warning'. |
def catalog(self):
"""Primary registered catalog for the wrapped portal type
"""
if self._catalog is None:
logger.debug("SuperModel::catalog: *Fetch catalog*")
self._catalog = self.get_catalog_for(self.brain)
return self._catalog | Primary registered catalog for the wrapped portal type |
def find_previous_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_siblings', *args, **kwargs)
return self._wrap_multi(op) | Like :meth:`find_all`, but searches through :attr:`previous_siblings` |
def aggregate_in(Data, On=None, AggFuncDict=None, AggFunc=None, AggList=None,
interspersed=True):
"""
Aggregate a ndarray with structured dtype or recarray
and include original data in the result.
Take aggregate of data set on specified columns, then add the resulting
rows back into data set to make a composite object containing both original
non-aggregate data rows as well as the aggregate rows.
First read comments for :func:`tabular.spreadsheet.aggregate`.
This function returns a numpy ndarray, with the number of rows equaling::
len(Data) + len(A)
where `A` is the the result of::
Data.aggregate(On,AggFuncDict)
`A` represents the aggregate rows; the other rows were the original data
rows.
This function supports _multiple_ aggregation, meaning that one can first
aggregate on one set of factors, then repeat aggregation on the result for
another set of factors, without the results of the first aggregation
interfering the second. To achieve this, the method adds two new columns:
* a column called "__aggregates__" specifying on which factors the rows
that are aggregate rows were aggregated. Rows added by aggregating on
factor `A` (a column in the original data set) will have `A` in the
"__aggregates__" column. When multiple factors `A1`, `A2` , ... are
aggregated on, the notation is a comma-separated list: `A1,A2,...`.
This way, when you call `aggregate_in` again, the function only
aggregates on the columns that have the empty char '' in their
"__aggregates__" column.
* a column called '__color__', specifying Gray-Scale colors for
aggregated rows that will be used by the Data Environment system
browser for colorizing the data. When there are multiple levels of
aggregation, the coarser aggregate groups (e.g. on fewer factors) get
darker gray color then those on finer aggregate groups (e.g. more
factors).
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate_in`.
**Parameters**
**Data** : numpy ndarray with structured dtype or recarray
The data set to aggregate in.
**On** : list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT in
`On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**interspersed** : boolean, optional
* If `True`, aggregate rows are interleaved with the data
of which they are aggregates.
* If `False`, all aggregate rows placed at the end of the
array.
**Returns**
**agg** : numpy ndarray with structured dtype
Composite aggregated data set plus original data set.
**See also:**
:func:`tabular.spreadsheet.aggregate`
"""
# See if there's an '__aggregates__ column'.
# If so, strip off all those that are nontrivial.
Data = deletecols(Data,'__color__')
if '__aggregates__' in Data.dtype.names:
X = Data[Data['__aggregates__'] == ''][:]
OldAggregates = Data[Data['__aggregates__'] != ''][:]
AggVars = utils.uniqify(utils.listunion([x.split(',') for x in
OldAggregates['__aggregates__']]))
else:
X = Data
OldAggregates = Data[0:0]
AggVars = []
if On == None:
On = []
NewAggregates = aggregate(X, On, AggFuncDict=AggFuncDict,
AggFunc=AggFunc, AggList=AggList, KeepOthers=True)
on = ','.join(On)
NewAggregates = addcols(NewAggregates,
utils.fromarrays([[on]*len(NewAggregates)],
type=np.ndarray, names=['__aggregates__']))
AggVars = utils.uniqify(AggVars + On)
Aggregates = rowstack([OldAggregates,NewAggregates],mode='nulls')
ANLen = np.array([len(x.split(',')) for x in Aggregates['__aggregates__']])
U = np.array(utils.uniqify(ANLen)); U.sort()
[A,B] = fast.equalspairs(ANLen,U)
Grays = np.array(grayspec(len(U)))
AggColor = utils.fromarrays([Grays[A]], type=np.ndarray,
names = ['__color__'])
Aggregates = addcols(Aggregates,AggColor)
if not interspersed or len(AggVars) == 0:
return rowstack([X,Aggregates],mode='nulls')
else:
s = ANLen.argsort()
Aggregates = Aggregates[s[range(len(Aggregates) - 1, -1, -1)]]
X.sort(order = AggVars)
Diffs = np.append(np.append([0], 1 + (X[AggVars][1:] !=
X[AggVars][:-1]).nonzero()[0]), [len(X)])
DiffAtts = ([[t for t in AggVars if X[t][Diffs[i]] != X[t][Diffs[i+1]]]
for i in range(len(Diffs) - 2)]
if len(Diffs) > 2 else []) + [AggVars]
HH = {}
for l in utils.uniqify(Aggregates['__aggregates__']):
Avars = l.split(',')
HH[l] = fast.recarrayequalspairs(X[Avars][Diffs[:-1]],
Aggregates[Avars])
Order = []
for i in range(len(Diffs)-1):
Order.extend(range(Diffs[i], Diffs[i+1]))
Get = []
for l in HH.keys():
Get += [len(X) + j for j in
HH[l][2][range(HH[l][0][i], HH[l][1][i])] if
len(set(DiffAtts[i]).intersection(
Aggregates['__aggregates__'][j].split(','))) > 0 and
set(Aggregates['__aggregates__'][j].split(',')) ==
set(l.split(','))]
Order.extend(Get)
return rowstack([X, Aggregates], mode='nulls')[Order] | Aggregate a ndarray with structured dtype or recarray
and include original data in the result.
Take aggregate of data set on specified columns, then add the resulting
rows back into data set to make a composite object containing both original
non-aggregate data rows as well as the aggregate rows.
First read comments for :func:`tabular.spreadsheet.aggregate`.
This function returns a numpy ndarray, with the number of rows equaling::
len(Data) + len(A)
where `A` is the the result of::
Data.aggregate(On,AggFuncDict)
`A` represents the aggregate rows; the other rows were the original data
rows.
This function supports _multiple_ aggregation, meaning that one can first
aggregate on one set of factors, then repeat aggregation on the result for
another set of factors, without the results of the first aggregation
interfering the second. To achieve this, the method adds two new columns:
* a column called "__aggregates__" specifying on which factors the rows
that are aggregate rows were aggregated. Rows added by aggregating on
factor `A` (a column in the original data set) will have `A` in the
"__aggregates__" column. When multiple factors `A1`, `A2` , ... are
aggregated on, the notation is a comma-separated list: `A1,A2,...`.
This way, when you call `aggregate_in` again, the function only
aggregates on the columns that have the empty char '' in their
"__aggregates__" column.
* a column called '__color__', specifying Gray-Scale colors for
aggregated rows that will be used by the Data Environment system
browser for colorizing the data. When there are multiple levels of
aggregation, the coarser aggregate groups (e.g. on fewer factors) get
darker gray color then those on finer aggregate groups (e.g. more
factors).
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate_in`.
**Parameters**
**Data** : numpy ndarray with structured dtype or recarray
The data set to aggregate in.
**On** : list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT in
`On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**interspersed** : boolean, optional
* If `True`, aggregate rows are interleaved with the data
of which they are aggregates.
* If `False`, all aggregate rows placed at the end of the
array.
**Returns**
**agg** : numpy ndarray with structured dtype
Composite aggregated data set plus original data set.
**See also:**
:func:`tabular.spreadsheet.aggregate` |
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError(
"cannot convert WSGI application into response"
" objects without an environ"
)
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response | Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object. |
def update_history(self) -> None:
"""
Update messaging history on disk.
:returns: None
"""
self.log.debug(f"Saving history. History is: \n{self.history}")
jsons = []
for item in self.history:
json_item = item.__dict__
# Convert sub-entries into JSON as well.
json_item["output_records"] = self._parse_output_records(item)
jsons.append(json_item)
if not path.isfile(self.history_filename):
open(self.history_filename, "a+").close()
with open(self.history_filename, "w") as f:
json.dump(jsons, f, default=lambda x: x.__dict__.copy(), sort_keys=True, indent=4)
f.write("\n") | Update messaging history on disk.
:returns: None |
def create(self, session):
"""
caches the session and caches an entry to associate the cached session
with the subject
"""
sessionid = super().create(session) # calls _do_create and verify
self._cache(session, sessionid)
return sessionid | caches the session and caches an entry to associate the cached session
with the subject |
def trim(self):
"""
Trim leading and trailing whitespace.
@return: self
@rtype: L{Element}
"""
if self.hasText():
self.text = self.text.trim()
return self | Trim leading and trailing whitespace.
@return: self
@rtype: L{Element} |
def get_value(self, subsystem, option):
"""
Read the given value from the given subsystem.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self, 'Subsystem {} is missing'.format(subsystem)
return util.read_file(self.per_subsystem[subsystem], subsystem + '.' + option) | Read the given value from the given subsystem.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available. |
def query_status(self):
'''Query the hub for the status of this command'''
try:
data = self.api_iface._api_get(self.link)
self._update_details(data)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)) | Query the hub for the status of this command |
def ef_plugin(service_name):
"""
Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code()
"""
def class_rebuilder(cls):
class EFPlugin(cls):
"""
Base class of ef-plugins. Defines which service is extended and provides access to the current instance of
EFContext to the plugin.
Args:
context (obj:EFContext): Instance of EFContext created by ef-open command line tool
clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
"""
def __init__(self, context, clients):
self.service = service_name
self.context = context
self.clients = clients
self.oInstance = cls()
def __getattribute__(self, s):
"""
This is called whenever any attribute of a EFPlugin object is accessed. This function first tries to
get the attribute off EFPlugin. If it fails then it tries to fetch the attribute from self.oInstance
(an instance of the decorated class).
"""
try:
x = super(EFPlugin, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(s)
return EFPlugin
return class_rebuilder | Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code() |
def clusters(points, radius):
"""
Find clusters of points which have neighbours closer than radius
Parameters
---------
points : (n, d) float
Points of dimension d
radius : float
Max distance between points in a cluster
Returns
----------
groups : (m,) sequence of int
Indices of points in a cluster
"""
from . import graph
tree = cKDTree(points)
# some versions return pairs as a set of tuples
pairs = tree.query_pairs(r=radius, output_type='ndarray')
# group connected components
groups = graph.connected_components(pairs)
return groups | Find clusters of points which have neighbours closer than radius
Parameters
---------
points : (n, d) float
Points of dimension d
radius : float
Max distance between points in a cluster
Returns
----------
groups : (m,) sequence of int
Indices of points in a cluster |
def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r | czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead. |
def create_message(self, params={}):
"""
Creates a message
http://dev.wheniwork.com/#create/update-message
"""
url = "/2/messages/"
body = params
data = self._post_resource(url, body)
return self.message_from_json(data["message"]) | Creates a message
http://dev.wheniwork.com/#create/update-message |
def _manipulate(self, *args, **kwargs):
"""
This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur.
"""
self.connection._manipulate(self, *args, **kwargs) | This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur. |
def create(self, to, from_, parameters=values.unset):
"""
Create a new ExecutionInstance
:param unicode to: The Contact phone number to start a Studio Flow Execution.
:param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Execution.
:param dict parameters: JSON data that will be added to your flow's context and can accessed as variables inside your flow.
:returns: Newly created ExecutionInstance
:rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance
"""
data = values.of({'To': to, 'From': from_, 'Parameters': serialize.object(parameters), })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ExecutionInstance(self._version, payload, flow_sid=self._solution['flow_sid'], ) | Create a new ExecutionInstance
:param unicode to: The Contact phone number to start a Studio Flow Execution.
:param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Execution.
:param dict parameters: JSON data that will be added to your flow's context and can accessed as variables inside your flow.
:returns: Newly created ExecutionInstance
:rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance |
def multipointm(self, points):
"""Creates a MULTIPOINTM shape.
Points is a list of xym values.
If the m (measure) value is not included, it defaults to None (NoData)."""
shapeType = MULTIPOINTM
points = [points] # nest the points inside a list to be compatible with the generic shapeparts method
self._shapeparts(parts=points, shapeType=shapeType) | Creates a MULTIPOINTM shape.
Points is a list of xym values.
If the m (measure) value is not included, it defaults to None (NoData). |
def parse_issues(raw_page):
"""Parse a JIRA API raw response.
The method parses the API response retrieving the
issues from the received items
:param items: items from where to parse the issues
:returns: a generator of issues
"""
raw_issues = json.loads(raw_page)
issues = raw_issues['issues']
for issue in issues:
yield issue | Parse a JIRA API raw response.
The method parses the API response retrieving the
issues from the received items
:param items: items from where to parse the issues
:returns: a generator of issues |
def main(argv=None):
"""Run a Tensorflow model on the Iris dataset."""
args = parse_arguments(sys.argv if argv is None else argv)
tf.logging.set_verbosity(tf.logging.INFO)
learn_runner.run(
experiment_fn=get_experiment_fn(args),
output_dir=args.job_dir) | Run a Tensorflow model on the Iris dataset. |
def underline(self, msg):
"""Underline the input"""
return click.style(msg, underline=True) if self.colorize else msg | Underline the input |
def setup_logger():
"""Return a logger with a default ColoredFormatter."""
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
logger = logging.getLogger('example')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger | Return a logger with a default ColoredFormatter. |
def _extract_field_with_regex(self, field):
""" extract field from response content with regex.
requests.Response body could be json or html text.
Args:
field (str): regex string that matched r".*\(.*\).*"
Returns:
str: matched content.
Raises:
exceptions.ExtractFailure: If no content matched with regex.
Examples:
>>> # self.text: "LB123abcRB789"
>>> filed = "LB[\d]*(.*)RB[\d]*"
>>> _extract_field_with_regex(field)
abc
"""
matched = re.search(field, self.text)
if not matched:
err_msg = u"Failed to extract data with regex! => {}\n".format(field)
err_msg += u"response body: {}\n".format(self.text)
logger.log_error(err_msg)
raise exceptions.ExtractFailure(err_msg)
return matched.group(1) | extract field from response content with regex.
requests.Response body could be json or html text.
Args:
field (str): regex string that matched r".*\(.*\).*"
Returns:
str: matched content.
Raises:
exceptions.ExtractFailure: If no content matched with regex.
Examples:
>>> # self.text: "LB123abcRB789"
>>> filed = "LB[\d]*(.*)RB[\d]*"
>>> _extract_field_with_regex(field)
abc |
def wrap_targets(self, targets, topological_order=False):
"""Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target.
"""
def vt_iter():
if topological_order:
target_set = set(targets)
sorted_targets = [t for t in reversed(sort_targets(targets)) if t in target_set]
else:
sorted_targets = sorted(targets)
for target in sorted_targets:
target_key = self._key_for(target)
if target_key is not None:
yield VersionedTarget(self, target, target_key)
return list(vt_iter()) | Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target. |
def run_actor(self, actor):
'''Start running the ``actor``.
'''
set_actor(actor)
if not actor.mailbox.address:
address = ('127.0.0.1', 0)
actor._loop.create_task(
actor.mailbox.start_serving(address=address)
)
actor._loop.run_forever() | Start running the ``actor``. |
def add_arg(self, arg):
""" Add an argument
"""
if not isinstance(arg, File):
arg = str(arg)
self._args += [arg] | Add an argument |
def split_from_df(self, col:IntsOrStrs=2):
"Split the data from the `col` in the dataframe in `self.inner_df`."
valid_idx = np.where(self.inner_df.iloc[:,df_names_to_idx(col, self.inner_df)])[0]
return self.split_by_idx(valid_idx) | Split the data from the `col` in the dataframe in `self.inner_df`. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
return _dict | Return a json dictionary representing this model. |
def get_detail(self):
"""
个人信息,同时会把返回值保存在self.detail中
:return: information of student
:rtype: dict
"""
response = self._post("http://bkjws.sdu.edu.cn/b/grxx/xs/xjxx/detail",
data=None)
if response['result'] == 'success':
self._detail = response['object']
return self._detail
else:
self._unexpected(response) | 个人信息,同时会把返回值保存在self.detail中
:return: information of student
:rtype: dict |
def lms(args):
"""
%prog lms
ALLMAPS cartoon to illustrate LMS metric.
"""
from random import randint
from jcvi.graphics.chromosome import HorizontalChromosome
p = OptionParser(lms.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="6x6", dpi=300)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Panel A
w, h = .7, .35
ax = fig.add_axes([.15, .6, w, h])
xdata = [x + randint(-3, 3) for x in range(10, 110, 10)]
ydata = [x + randint(-3, 3) for x in range(10, 110, 10)]
ydata[3:7] = ydata[3:7][::-1]
xydata = zip(xdata, ydata)
lis = xydata[:3] + [xydata[4]] + xydata[7:]
lds = xydata[3:7]
xlis, ylis = zip(*lis)
xlds, ylds = zip(*lds)
ax.plot(xlis, ylis, "r-", lw=12, alpha=.3,
solid_capstyle="round", solid_joinstyle="round")
ax.plot(xlds, ylds, "g-", lw=12, alpha=.3,
solid_capstyle="round", solid_joinstyle="round")
ax.plot(xdata, ydata, "k.", mec="k", mfc="w", mew=3, ms=12)
HorizontalChromosome(root, .15, .15 + w, .57, height=.02, lw=2)
root.text(.15 + w / 2, .55, "Chromosome location (bp)", ha="center", va="top")
ax.text(80, 30, "LIS = 7", color="r", ha="center", va="center")
ax.text(80, 20, "LDS = 4", color="g", ha="center", va="center")
ax.text(80, 10, "LMS = $max$(LIS, LDS) = 7", ha="center", va="center")
normalize_lms_axis(ax, xlim=110, ylim=110)
# Panel B
w = .37
p = (0, 45, 75, 110)
ax = fig.add_axes([.1, .12, w, h])
xdata = [x for x in range(10, 110, 10)]
ydata = ydata_orig = [x for x in range(10, 110, 10)]
ydata = ydata[:4] + ydata[7:] + ydata[4:7][::-1]
xydata = zip(xdata, ydata)
lis = xydata[:7]
xlis, ylis = zip(*lis)
ax.plot(xlis, ylis, "r-", lw=12, alpha=.3,
solid_capstyle="round", solid_joinstyle="round")
ax.plot(xdata, ydata, "k.", mec="k", mfc="w", mew=3, ms=12)
ax.vlines(p, 0, 110, colors="beige", lw=3)
normalize_lms_axis(ax, xlim=110, ylim=110)
patch = [.1 + w * x / 110. for x in p]
HorizontalChromosome(root, .1, .1 + w, .09, patch=patch,
height=.02, lw=2)
scaffolds = ("a", "b", "c")
for i, s in enumerate(scaffolds):
xx = (patch[i] + patch[i + 1]) / 2
root.text(xx, .09, s, va="center", ha="center")
root.text(.1 + w / 2, .04, "LMS($a||b||c$) = 7", ha="center")
# Panel C
ax = fig.add_axes([.6, .12, w, h])
patch = [.6 + w * x / 110. for x in p]
ydata = ydata_orig
ax.plot(xdata, ydata, "r-", lw=12, alpha=.3,
solid_capstyle="round", solid_joinstyle="round")
ax.plot(xdata, ydata, "k.", mec="k", mfc="w", mew=3, ms=12)
ax.vlines(p, [0], [110], colors="beige", lw=3)
normalize_lms_axis(ax, xlim=110, ylim=110)
HorizontalChromosome(root, .6, .6 + w, .09, patch=patch,
height=.02, lw=2)
scaffolds = ("a", "-c", "b")
for i, s in enumerate(scaffolds):
xx = (patch[i] + patch[i + 1]) / 2
root.text(xx, .09, s, va="center", ha="center")
root.text(.6 + w / 2, .04, "LMS($a||-c||b$) = 10", ha="center")
labels = ((.05, .95, 'A'), (.05, .48, 'B'), (.55, .48, 'C'))
panel_labels(root, labels)
normalize_axes(root)
pf = "lms"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog lms
ALLMAPS cartoon to illustrate LMS metric. |
def read_cell(self, x, y):
"""
Reads the cell at position x+1 and y+1; return value
:param x: line index
:param y: coll index
:return: {header: value}
"""
if isinstance(self.header[y], tuple):
header = self.header[y][0]
else:
header = self.header[y]
x += 1
y += 1
if self.strip:
self._sheet.cell(x, y).value = self._sheet.cell(x, y).value.strip()
else:
return {header: self._sheet.cell(x, y).value} | Reads the cell at position x+1 and y+1; return value
:param x: line index
:param y: coll index
:return: {header: value} |
def get_app_template(name):
""" Getter function of templates for each applications.
Argument `name` will be interpreted as colon separated, the left value
means application name, right value means a template name.
get_app_template('blog:dashboarb.mako')
It will return a template for dashboard page of `blog` application.
"""
app_name, template_name = name.split(':')
return get_lookups()[app_name].get_template(template_name) | Getter function of templates for each applications.
Argument `name` will be interpreted as colon separated, the left value
means application name, right value means a template name.
get_app_template('blog:dashboarb.mako')
It will return a template for dashboard page of `blog` application. |
def typeseq(types):
"""
Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse'
"""
ret = ""
for t in types:
ret += termcap.get(fmttypes[t])
return ret | Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse' |
def clear_cache():
"""Remove all cached objects"""
del Cache._keys
for k in list(Cache._cache.keys()):
it = Cache._cache.pop(k)
del it
del Cache._cache
Cache._keys = []
Cache._cache = {}
gc.collect() | Remove all cached objects |
def delete_asset(self):
"""
Delete asset from the release.
:rtype: bool
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
return True | Delete asset from the release.
:rtype: bool |
def finalize(self):
"""
finalize simulation for consumer
"""
# todo sort self.result by path_num
if self.result:
self.result = sorted(self.result, key=lambda x: x[0])
p, r = map(list, zip(*self.result))
self.result = r | finalize simulation for consumer |
def stream_header(self, f):
"""Stream the block header in the standard way to the file-like object f."""
stream_struct("L##LLL", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce) | Stream the block header in the standard way to the file-like object f. |
def auth(self, user, pwd):
"""
Perform a login with the given Skype username and its password. This emulates a login to Skype for Web on
``api.skype.com``.
Args:
user (str): username of the connecting account
pwd (str): password of the connecting account
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
"""
# Wrap up the credentials ready to send.
pwdHash = base64.b64encode(hashlib.md5((user + "\nskyper\n" + pwd).encode("utf-8")).digest()).decode("utf-8")
json = self.conn("POST", "{0}/login/skypetoken".format(SkypeConnection.API_USER),
json={"username": user, "passwordHash": pwdHash, "scopes": "client"}).json()
if "skypetoken" not in json:
raise SkypeAuthException("Couldn't retrieve Skype token from response")
expiry = None
if "expiresIn" in json:
expiry = datetime.fromtimestamp(int(time.time()) + int(json["expiresIn"]))
return json["skypetoken"], expiry | Perform a login with the given Skype username and its password. This emulates a login to Skype for Web on
``api.skype.com``.
Args:
user (str): username of the connecting account
pwd (str): password of the connecting account
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed |
def set_project_pid(project, old_pid, new_pid):
""" Project's PID was changed. """
for datastore in _get_datastores():
datastore.save_project(project)
datastore.set_project_pid(project, old_pid, new_pid) | Project's PID was changed. |
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image | Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes. |
def zero_disk(self, disk_xml=None):
""" Collector and publish not zeroed disk metrics
"""
troubled_disks = 0
for filer_disk in disk_xml:
raid_state = filer_disk.find('raid-state').text
if not raid_state == 'spare':
continue
is_zeroed = filer_disk.find('is-zeroed').text
if is_zeroed == 'false':
troubled_disks += 1
self.push('not_zeroed', 'disk', troubled_disks) | Collector and publish not zeroed disk metrics |
def close(self):
""" Stop overwriting display, or update parent. """
if self.parent:
self.parent.update(self.parent.offset + self.offset)
return
self.output.write("\n")
self.output.flush() | Stop overwriting display, or update parent. |
def get_version():
"""
Read version from __init__.py
"""
version_regex = re.compile(
'__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)'
)
here = path.abspath(path.dirname(__file__))
init_location = path.join(here, "CHAID/__init__.py")
with open(init_location) as init_file:
for line in init_file:
match = version_regex.search(line)
if not match:
raise Exception(
"Couldn't read version information from '{0}'".format(init_location)
)
return match.group('version') | Read version from __init__.py |
def get_assets_by_query(self, asset_query=None):
"""Gets a list of ``Assets`` matching the given asset query.
arg: asset_query (osid.repository.AssetQuery): the asset
query
return: (osid.repository.AssetList) - the returned ``AssetList``
raise: NullArgument - ``asset_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - the ``asset_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return AssetList(self._provider_session.get_assets_by_query(asset_query),
self._config_map) | Gets a list of ``Assets`` matching the given asset query.
arg: asset_query (osid.repository.AssetQuery): the asset
query
return: (osid.repository.AssetList) - the returned ``AssetList``
raise: NullArgument - ``asset_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - the ``asset_query`` is not of this service
*compliance: mandatory -- This method must be implemented.* |
def default(self, obj): # pylint: disable=method-hidden
"""Use the default behavior unless the object to be encoded has a
`strftime` attribute."""
if hasattr(obj, 'strftime'):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
elif hasattr(obj, 'get_public_dict'):
return obj.get_public_dict()
else:
return json.JSONEncoder.default(self, obj) | Use the default behavior unless the object to be encoded has a
`strftime` attribute. |
def install_service(instance, dbhost, dbname, port):
"""Install systemd service configuration"""
_check_root()
log("Installing systemd service")
launcher = os.path.realpath(__file__).replace('manage', 'launcher')
executable = sys.executable + " " + launcher
executable += " --instance " + instance
executable += " --dbname " + dbname + " --dbhost " + dbhost
executable += " --port " + port
executable += " --dolog --logfile /var/log/hfos-" + instance + ".log"
executable += " --logfileverbosity 30 -q"
definitions = {
'instance': instance,
'executable': executable
}
service_name = 'hfos-' + instance + '.service'
write_template_file(os.path.join('dev/templates', service_template),
os.path.join('/etc/systemd/system/', service_name),
definitions)
Popen([
'systemctl',
'enable',
service_name
])
log('Launching service')
Popen([
'systemctl',
'start',
service_name
])
log("Done: Install Service") | Install systemd service configuration |
def synchronise_signals(in_signal_1, in_signal_2):
"""
-----
Brief
-----
This function synchronises the input signals using the full cross correlation function between the signals.
-----------
Description
-----------
Signals acquired with two devices may be dephased. It is possible to synchronise the two signals by multiple
methods. Here, it is implemented a method that uses the calculus of the cross-correlation between those signals and
identifies the correct instant of synchrony.
This function synchronises the two input signals and returns the dephasing between them, and the resulting
synchronised signals.
----------
Parameters
----------
in_signal_1 : list or numpy.array
One of the input signals.
in_signal_2 : list or numpy.array
The other input signal.
Returns
-------
phase : int
The dephasing between signals in data points.
result_signal_1: list or numpy.array
The first signal synchronised.
result_signal_2: list or numpy.array
The second signal synchronised.
"""
mean_1, std_1, mean_2, std_2 = [np.mean(in_signal_1), np.std(in_signal_1), np.mean(in_signal_2),
np.std(in_signal_2)]
signal_1 = in_signal_1 - mean_1
signal_1 /= std_1
signal_2 = in_signal_2 - mean_2
signal_2 /= std_2
# Calculate the full cross-correlation between the two signals.
correlation = np.correlate(signal_1, signal_2, 'full')
# Finding the edge of the correct correlation signal
center = len(correlation) - len(signal_1) if len(signal_1) < len(signal_2) else len(correlation) - len(signal_2)
# Finding the position of the maximum value of the correlation signal
max_position = correlation.argmax()
# Calculating the difference between the center and the position of the maximum value
phase_straight = center - max_position
# Finding the position of the maximum value of the reversed signal (corr[::-1])
max_position_reversed = correlation[::-1].argmax()
# Calculating the difference between the center and the position of the maximum value in the reversed correlation
# signal
phase_reversed = center - max_position_reversed
# Calculate the dephasing between the signals. Maximum value of both results guarantees that we find the true
# dephasing of the signals
phases_aux = [phase_straight, phase_reversed]
phase = np.abs(phases_aux).argmax()
true_phase = np.abs(phases_aux[phase])
if phases_aux[0] < phases_aux[1]:
signal_1 = signal_1[true_phase:]
else:
signal_2 = signal_2[true_phase:]
result_signal_1 = signal_1 * std_1 + mean_1
result_signal_2 = signal_2 * std_2 + mean_2
return true_phase, result_signal_1, result_signal_2 | -----
Brief
-----
This function synchronises the input signals using the full cross correlation function between the signals.
-----------
Description
-----------
Signals acquired with two devices may be dephased. It is possible to synchronise the two signals by multiple
methods. Here, it is implemented a method that uses the calculus of the cross-correlation between those signals and
identifies the correct instant of synchrony.
This function synchronises the two input signals and returns the dephasing between them, and the resulting
synchronised signals.
----------
Parameters
----------
in_signal_1 : list or numpy.array
One of the input signals.
in_signal_2 : list or numpy.array
The other input signal.
Returns
-------
phase : int
The dephasing between signals in data points.
result_signal_1: list or numpy.array
The first signal synchronised.
result_signal_2: list or numpy.array
The second signal synchronised. |
def _advance_to_next_stage(self, config_ids, losses):
"""
SuccessiveHalving simply continues the best based on the current loss.
"""
ranks = np.argsort(np.argsort(losses))
return(ranks < self.num_configs[self.stage]) | SuccessiveHalving simply continues the best based on the current loss. |
def _parse_incval(incunit, incval):
''' Parse a non-day increment value. Should be an integer or a comma-separated integer list. '''
try:
retn = [int(val) for val in incval.split(',')]
except ValueError:
return None
return retn[0] if len(retn) == 1 else retn | Parse a non-day increment value. Should be an integer or a comma-separated integer list. |
def change_state(self, item, state):
"""
Replace the current state of the item.
i.e. replace the current state tag but keeps the other tags.
:param item: item id
:type item: str
:param state: "checked", "unchecked" or "tristate": new state of the item
:type state: str
"""
tags = self.item(item, "tags")
states = ("checked", "unchecked", "tristate")
new_tags = [t for t in tags if t not in states]
new_tags.append(state)
self.item(item, tags=tuple(new_tags)) | Replace the current state of the item.
i.e. replace the current state tag but keeps the other tags.
:param item: item id
:type item: str
:param state: "checked", "unchecked" or "tristate": new state of the item
:type state: str |
def npd_to_pmf(nodal_plane_dist, use_default=False):
"""
Returns the nodal plane distribution as an instance of the PMF class
"""
if isinstance(nodal_plane_dist, PMF):
# Aready in PMF format - return
return nodal_plane_dist
else:
if use_default:
return PMF([(1.0, NodalPlane(0.0, 90.0, 0.0))])
else:
raise ValueError('Nodal Plane distribution not defined') | Returns the nodal plane distribution as an instance of the PMF class |
def from_command_line():
"""
Run CGI var to gVCF conversion from the command line.
"""
# Parse options
parser = argparse.ArgumentParser(
description='Convert Complete Genomics var files to gVCF format.')
parser.add_argument(
'-d', '--refseqdir', metavar='REFSEQDIR', required=True,
dest='refseqdir',
help='Directory twobit reference genomes files are stored.')
parser.add_argument(
'-i', '--input', metavar='INPUTVARFILE',
dest='cgivarfile',
help='Path to Complete Genomics var file to convert. If omitted, data '
' also be piped in as standard input.')
parser.add_argument(
'-o', '--output', metavar='OUTPUTVCFFILE',
dest='vcfoutfile',
help='Path to where to save output VCF file.')
parser.add_argument(
'-D', '--download', action='store_true', dest='downloadrefseq',
help='Download the 2bit file from UCSC to REFSEQDIR, if needed.')
parser.add_argument(
'-v', '--var-only', action='store_true', dest='varonly',
help='Only report variant lines (i.e. VCF, but not gVCF)')
args = parser.parse_args()
# Get local twobit file from its directory. Download and store if needed.
twobit_path, twobit_name = get_reference_genome_file(
args.refseqdir, build='b37')
# Handle input
if sys.stdin.isatty(): # false if data is piped in
var_input = args.cgivarfile
else:
var_input = sys.stdin
# Handle output
if args.vcfoutfile:
convert_to_file(var_input,
args.vcfoutfile,
twobit_path,
twobit_name,
args.varonly)
else:
for line in convert(
cgi_input=var_input,
twobit_ref=twobit_path,
twobit_name=twobit_name,
var_only=args.varonly):
print(line) | Run CGI var to gVCF conversion from the command line. |
def apply_T4(word): # OPTIMIZE
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) == 2 and v.endswith(('u', 'y')):
if WORD.get(k + 2, 0):
if not WORD.get(k + 3, 0):
if len(WORD[k + 2]) == 1 and is_consonant(WORD[k + 2]):
WORD[k] = v[0] + '.' + v[1]
elif len(WORD[k + 1]) == 1 and WORD.get(k + 3, 0):
if is_consonant(WORD[k + 3][0]):
WORD[k] = v[0] + '.' + v[1]
elif len(WORD[k + 2]) == 2:
WORD[k] = v[0] + '.' + v[1]
word = _compile_dict_into_word(WORD)
return word | An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa]. |
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars) | Get a new dict with the exported variables. |
def process(self, block=True):
'''process and display graph'''
self.msg_types = set()
self.multiplier = []
self.field_types = []
# work out msg types we are interested in
self.x = []
self.y = []
self.modes = []
self.axes = []
self.first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in self.fields:
caps = set(re.findall(re_caps, f))
self.msg_types = self.msg_types.union(caps)
self.field_types.append(caps)
self.y.append([])
self.x.append([])
self.axes.append(1)
self.first_only.append(False)
if self.labels is not None:
labels = self.labels.split(',')
if len(labels) != len(fields)*len(self.mav_list):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(self.mav_list)))
return
else:
labels = None
timeshift = self.timeshift
for fi in range(0, len(self.mav_list)):
mlog = self.mav_list[fi]
self.process_mav(mlog, timeshift)
timeshift = 0
for i in range(0, len(self.x)):
if self.first_only[i] and fi != 0:
self.x[i] = []
self.y[i] = []
if labels:
lab = labels[fi*len(self.fields):(fi+1)*len(self.fields)]
else:
lab = self.fields[:]
if self.multi:
col = colors[:]
else:
col = colors[fi*len(self.fields):]
self.plotit(self.x, self.y, lab, colors=col)
for i in range(0, len(self.x)):
self.x[i] = []
self.y[i] = []
pylab.draw() | process and display graph |
def disabledPenColor(self):
"""
Returns the disabled pen color for this node.
:return <QColor>
"""
palette = self.palette()
return palette.color(palette.Disabled, palette.NodeForeground) | Returns the disabled pen color for this node.
:return <QColor> |
def convert_to_btc_on(self, amount, currency, date_obj):
"""
Convert X amount to BTC based on given date rate
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date") | Convert X amount to BTC based on given date rate |
def get_word_at(self, index: int) -> Union[int, BitVec]:
"""Access a word from a specified memory index.
:param index: integer representing the index to access
:return: 32 byte word at the specified index
"""
try:
return symbol_factory.BitVecVal(
util.concrete_int_from_bytes(
bytes([util.get_concrete_int(b) for b in self[index : index + 32]]),
0,
),
256,
)
except TypeError:
result = simplify(
Concat(
[
b if isinstance(b, BitVec) else symbol_factory.BitVecVal(b, 8)
for b in cast(
List[Union[int, BitVec]], self[index : index + 32]
)
]
)
)
assert result.size() == 256
return result | Access a word from a specified memory index.
:param index: integer representing the index to access
:return: 32 byte word at the specified index |
def port_has_listener(address, port):
"""
Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result)) | Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell |
def service(
state, host,
*args, **kwargs
):
'''
Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments.
'''
if host.fact.which('systemctl'):
yield systemd(state, host, *args, **kwargs)
return
if host.fact.which('initctl'):
yield upstart(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/init.d'):
yield d(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/rc.d'):
yield rc(state, host, *args, **kwargs)
return
raise OperationError((
'No init system found '
'(no systemctl, initctl, /etc/init.d or /etc/rc.d found)'
)) | Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments. |
def intword(value, format='%.1f'):
"""Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'. Supports up to
decillion (33 digits) and googol (100 digits). You can pass format to change
the number of decimal or general format of the number portion. This function
returns a string unless the value passed was unable to be coaxed into an int."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < powers[0]:
return str(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped
return str(value) | Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'. Supports up to
decillion (33 digits) and googol (100 digits). You can pass format to change
the number of decimal or general format of the number portion. This function
returns a string unless the value passed was unable to be coaxed into an int. |
def execute(self, source, splitting_stream, sinks, interval, meta_data_id, output_plate_values):
"""
Execute the tool over the given time interval.
:param source: The source stream
:param splitting_stream: The stream over which to split
:param sinks: The sink streams
:param interval: The time interval
:param meta_data_id: The meta data id of the output plate
:param output_plate_values: The values of the plate where data is put onto
:type source: Stream
:type sinks: list[Stream] | tuple[Stream]
:type interval: TimeInterval
:type meta_data_id: str
:type output_plate_values: list | tuple
:return: None
"""
if not isinstance(interval, TimeInterval):
raise TypeError('Expected TimeInterval, got {}'.format(type(interval)))
# logging.info(self.message(interval))
calculated_intervals = None
for sink in sinks:
if interval.end > sink.channel.up_to_timestamp:
raise ValueError(
'The stream is not available after {} and cannot be calculated'.format(
sink.channel.up_to_timestamp))
if calculated_intervals is None:
calculated_intervals = sink.calculated_intervals
continue
if sink.calculated_intervals != calculated_intervals:
# TODO: What we actually want to do here is find any parts of the sinks that haven't been calculated,
# and recompute all of the sinks for that time period. This would only happen if computation of one of
# the sinks failed for some reason. For now we will just assume that all sinks have been computed the
# same amount, and we will raise an exception if this is not the case
raise RuntimeError("Partially executed sinks not yet supported")
required_intervals = TimeIntervals([interval]) - calculated_intervals
if not required_intervals.is_empty:
document_count = 0
for interval in required_intervals:
for item in self._execute(
source=source,
splitting_stream=splitting_stream,
interval=interval,
meta_data_id=meta_data_id,
output_plate_values=output_plate_values):
# Join the output meta data with the parent plate meta data
# meta_data = input_plate_value + (item.meta_data,) if input_plate_value else (item.meta_data, )
meta_data = item.meta_data if isinstance(item.meta_data[0], tuple) else (item.meta_data,)
try:
# sink = next(s for s in sinks if set(s.stream_id.meta_data) == set(meta_data))
sink = next(s for s in sinks if all(m in s.stream_id.meta_data for m in meta_data))
sink.writer(item.stream_instance)
document_count += 1
except StopIteration:
logging.warn("A multi-output tool has produced a value {} "
"which does not belong to the output plate".format(meta_data))
continue
except TypeError:
logging.error("A multi-output tool has produced a value {} "
"which cannot be hashed and does not belong to the output plate"
.format(meta_data))
if not document_count:
logging.debug("{} did not produce any data for time interval {} on stream {}".format(
self.name, required_intervals, source))
self.write_to_history(
interval=interval,
tool=self.name,
document_count=document_count
) | Execute the tool over the given time interval.
:param source: The source stream
:param splitting_stream: The stream over which to split
:param sinks: The sink streams
:param interval: The time interval
:param meta_data_id: The meta data id of the output plate
:param output_plate_values: The values of the plate where data is put onto
:type source: Stream
:type sinks: list[Stream] | tuple[Stream]
:type interval: TimeInterval
:type meta_data_id: str
:type output_plate_values: list | tuple
:return: None |
def kill_mprocess(process):
"""kill process
Args:
process - Popen object for process
"""
if process and proc_alive(process):
process.terminate()
process.communicate()
return not proc_alive(process) | kill process
Args:
process - Popen object for process |
def get_operator_output_port(self):
"""Get the output port of this exported stream.
Returns:
OperatorOutputPort: Output port of this exported stream.
"""
return OperatorOutputPort(self.rest_client.make_request(self.operatorOutputPort), self.rest_client) | Get the output port of this exported stream.
Returns:
OperatorOutputPort: Output port of this exported stream. |
def pyramid(
input_raster,
output_dir,
pyramid_type=None,
output_format=None,
resampling_method=None,
scale_method=None,
zoom=None,
bounds=None,
overwrite=False,
debug=False
):
"""Create tile pyramid out of input raster."""
bounds = bounds if bounds else None
options = dict(
pyramid_type=pyramid_type,
scale_method=scale_method,
output_format=output_format,
resampling=resampling_method,
zoom=zoom,
bounds=bounds,
overwrite=overwrite
)
raster2pyramid(input_raster, output_dir, options) | Create tile pyramid out of input raster. |
def _move_end_to_cxn(self, shape, cxn_pt_idx):
"""
Move the end point of this connector to the coordinates of the
connection point of *shape* specified by *cxn_pt_idx*.
"""
x, y, cx, cy = shape.left, shape.top, shape.width, shape.height
self.end_x, self.end_y = {
0: (int(x + cx/2), y),
1: (x, int(y + cy/2)),
2: (int(x + cx/2), y + cy),
3: (x + cx, int(y + cy/2)),
}[cxn_pt_idx] | Move the end point of this connector to the coordinates of the
connection point of *shape* specified by *cxn_pt_idx*. |
def init0(self, dae):
"""
Set initial voltage and reactive power for PQ.
Overwrites Bus.voltage values
"""
dae.y[self.v] = self.v0
dae.y[self.q] = mul(self.u, self.qg) | Set initial voltage and reactive power for PQ.
Overwrites Bus.voltage values |
def verify_event_source_current(self, event_uuid, resource_name,
service_name, function_arn):
# type: (str, str, str, str) -> bool
"""Check if the uuid matches the resource and function arn provided.
Given a uuid representing an event source mapping for a lambda
function, verify that the associated source arn
and function arn match up to the parameters passed in.
Instead of providing the event source arn, the resource name
is provided along with the service name. For example, if we're
checking an SQS queue event source, the resource name would be
the queue name (e.g. ``myqueue``) and the service would be ``sqs``.
"""
client = self._client('lambda')
try:
attributes = client.get_event_source_mapping(UUID=event_uuid)
actual_arn = attributes['EventSourceArn']
arn_start, actual_name = actual_arn.rsplit(':', 1)
return (
actual_name == resource_name and
arn_start.startswith('arn:aws:%s' % service_name) and
attributes['FunctionArn'] == function_arn
)
except client.exceptions.ResourceNotFoundException:
return False | Check if the uuid matches the resource and function arn provided.
Given a uuid representing an event source mapping for a lambda
function, verify that the associated source arn
and function arn match up to the parameters passed in.
Instead of providing the event source arn, the resource name
is provided along with the service name. For example, if we're
checking an SQS queue event source, the resource name would be
the queue name (e.g. ``myqueue``) and the service would be ``sqs``. |
def _check_split_list_validity(self):
"""
See _temporal_split_list above. This function checks if the current
split lists are still valid.
"""
# FIXME: Currently very primitive, but needs to be fast
if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)):
return False
elif len(self) != self._splitListsLength:
return False
else:
return True | See _temporal_split_list above. This function checks if the current
split lists are still valid. |
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request') | Get the result of simplejson.loads if possible. |
def rec_setattr(obj, attr, value):
"""Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2
"""
attrs = attr.split('.')
setattr(reduce(getattr, attrs[:-1], obj), attrs[-1], value) | Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2 |
def get_volume(self, datacenter_id, volume_id):
"""
Retrieves a single volume by ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str``
"""
response = self._perform_request(
'/datacenters/%s/volumes/%s' % (datacenter_id, volume_id))
return response | Retrieves a single volume by ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str`` |
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
This method is named to match the HTTP method. This method makes at least
one roundtrip to the server, one additional round trip for
each 303 status returned, plus at most two additional round
trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method takes a
default namespace from the :class:`Service` object for this :class:`Endpoint`.
All other keyword arguments are included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Service`` is not logged in.
:raises HTTPError: Raised when an error in the request occurs.
:param path_segment: A path segment relative to this endpoint.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode for the namespace (optional).
:type sharing: "global", "system", "app", or "user"
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
import splunklib.client
s = client.service(...)
apps = s.apps
apps.get() == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
apps.get('nonexistant/path') # raises HTTPError
s.logout()
apps.get() # raises AuthenticationError
"""
# self.path to the Endpoint is relative in the SDK, so passing
# owner, app, sharing, etc. along will produce the correct
# namespace in the final request.
if path_segment.startswith('/'):
path = path_segment
else:
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
# That doesn't work, because self.path may be UrlEncoded.
return self.service.get(path,
owner=owner, app=app, sharing=sharing,
**query) | Performs a GET operation on the path segment relative to this endpoint.
This method is named to match the HTTP method. This method makes at least
one roundtrip to the server, one additional round trip for
each 303 status returned, plus at most two additional round
trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method takes a
default namespace from the :class:`Service` object for this :class:`Endpoint`.
All other keyword arguments are included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Service`` is not logged in.
:raises HTTPError: Raised when an error in the request occurs.
:param path_segment: A path segment relative to this endpoint.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode for the namespace (optional).
:type sharing: "global", "system", "app", or "user"
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
import splunklib.client
s = client.service(...)
apps = s.apps
apps.get() == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
apps.get('nonexistant/path') # raises HTTPError
s.logout()
apps.get() # raises AuthenticationError |
def _check_edgemap_registers(self, edge_map, keyregs, valregs, valreg=True):
"""Check that wiremap neither fragments nor leaves duplicate registers.
1. There are no fragmented registers. A register in keyregs
is fragmented if not all of its (qu)bits are renamed by edge_map.
2. There are no duplicate registers. A register is duplicate if
it appears in both self and keyregs but not in edge_map.
Args:
edge_map (dict): map from (reg,idx) in keyregs to (reg,idx) in valregs
keyregs (dict): a map from register names to Register objects
valregs (dict): a map from register names to Register objects
valreg (bool): if False the method ignores valregs and does not
add regs for bits in the edge_map image that don't appear in valregs
Returns:
set(Register): the set of regs to add to self
Raises:
DAGCircuitError: if the wiremap fragments, or duplicates exist
"""
# FIXME: some mixing of objects and strings here are awkward (due to
# self.qregs/self.cregs still keying on string.
add_regs = set()
reg_frag_chk = {}
for v in keyregs.values():
reg_frag_chk[v] = {j: False for j in range(len(v))}
for k in edge_map.keys():
if k[0].name in keyregs:
reg_frag_chk[k[0]][k[1]] = True
for k, v in reg_frag_chk.items():
s = set(v.values())
if len(s) == 2:
raise DAGCircuitError("edge_map fragments reg %s" % k)
elif s == set([False]):
if k in self.qregs.values() or k in self.cregs.values():
raise DAGCircuitError("unmapped duplicate reg %s" % k)
else:
# Add registers that appear only in keyregs
add_regs.add(k)
else:
if valreg:
# If mapping to a register not in valregs, add it.
# (k,0) exists in edge_map because edge_map doesn't
# fragment k
if not edge_map[(k, 0)][0].name in valregs:
size = max(map(lambda x: x[1],
filter(lambda x: x[0] == edge_map[(k, 0)][0],
edge_map.values())))
qreg = QuantumRegister(size + 1, edge_map[(k, 0)][0].name)
add_regs.add(qreg)
return add_regs | Check that wiremap neither fragments nor leaves duplicate registers.
1. There are no fragmented registers. A register in keyregs
is fragmented if not all of its (qu)bits are renamed by edge_map.
2. There are no duplicate registers. A register is duplicate if
it appears in both self and keyregs but not in edge_map.
Args:
edge_map (dict): map from (reg,idx) in keyregs to (reg,idx) in valregs
keyregs (dict): a map from register names to Register objects
valregs (dict): a map from register names to Register objects
valreg (bool): if False the method ignores valregs and does not
add regs for bits in the edge_map image that don't appear in valregs
Returns:
set(Register): the set of regs to add to self
Raises:
DAGCircuitError: if the wiremap fragments, or duplicates exist |
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists | Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info |
def send_async(
self,
queue_identifier: QueueIdentifier,
message: Message,
):
"""Queue the message for sending to recipient in the queue_identifier
It may be called before transport is started, to initialize message queues
The actual sending is started only when the transport is started
"""
# even if transport is not started, can run to enqueue messages to send when it starts
receiver_address = queue_identifier.recipient
if not is_binary_address(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
# These are not protocol messages, but transport specific messages
if isinstance(message, (Delivered, Ping, Pong)):
raise ValueError(
'Do not use send_async for {} messages'.format(message.__class__.__name__),
)
self.log.debug(
'Send async',
receiver_address=pex(receiver_address),
message=message,
queue_identifier=queue_identifier,
)
self._send_with_retry(queue_identifier, message) | Queue the message for sending to recipient in the queue_identifier
It may be called before transport is started, to initialize message queues
The actual sending is started only when the transport is started |
def plot_one_day(x, y, xlabel=None, ylabel=None, title=None, ylim=None):
"""时间跨度为一天。
major tick = every hours
minor tick = every 15 minutes
"""
plt.close("all")
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax.plot(x, y)
hours = HourLocator(range(24))
hoursFmt = DateFormatter("%H:%M")
minutes = MinuteLocator([30,])
minutesFmt = DateFormatter("%M")
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.xaxis.set_minor_locator(minutes)
ax.xaxis.set_minor_formatter(minutesFmt)
ax.autoscale_view()
ax.grid()
plt.setp( ax.xaxis.get_majorticklabels(), rotation=90 )
plt.setp( ax.xaxis.get_minorticklabels(), rotation=90 )
if xlabel:
plt.xlabel(xlabel)
else:
plt.xlabel("Time")
if ylabel:
plt.ylabel(ylabel)
else:
plt.ylabel("Value")
if title:
plt.title(title)
else:
plt.title(str(x[0].date()))
if ylim:
plt.ylim(ylim)
else:
plt.ylim([min(y) - (max(y) - min(y) ) * 0.05,
max(y) + (max(y) - min(y) ) * 0.05])
return plt, ax | 时间跨度为一天。
major tick = every hours
minor tick = every 15 minutes |
def critical_path(self, print_cp=True, cp_limit=100):
""" Takes a timing map and returns the critical paths of the system.
:param print_cp: Whether to print the critical path to the terminal
after calculation
:return: a list containing tuples with the 'first' wire as the
first value and the critical paths (which themselves are lists
of nets) as the second
"""
critical_paths = [] # storage of all completed critical paths
wire_src_map, dst_map = self.block.net_connections()
def critical_path_pass(old_critical_path, first_wire):
if isinstance(first_wire, (Input, Const, Register)):
critical_paths.append((first_wire, old_critical_path))
return
if len(critical_paths) >= cp_limit:
raise self._TooManyCPsError()
source = wire_src_map[first_wire]
critical_path = [source]
critical_path.extend(old_critical_path)
arg_max_time = max(self.timing_map[arg_wire] for arg_wire in source.args)
for arg_wire in source.args:
# if the time for both items are the max, both will be on a critical path
if self.timing_map[arg_wire] == arg_max_time:
critical_path_pass(critical_path, arg_wire)
max_time = self.max_length()
try:
for wire_pair in self.timing_map.items():
if wire_pair[1] == max_time:
critical_path_pass([], wire_pair[0])
except self._TooManyCPsError:
print("Critical path count limit reached")
if print_cp:
self.print_critical_paths(critical_paths)
return critical_paths | Takes a timing map and returns the critical paths of the system.
:param print_cp: Whether to print the critical path to the terminal
after calculation
:return: a list containing tuples with the 'first' wire as the
first value and the critical paths (which themselves are lists
of nets) as the second |
def write_length_and_key(fp, value):
"""
Helper to write descriptor key.
"""
written = write_fmt(fp, 'I', 0 if value in _TERMS else len(value))
written += write_bytes(fp, value)
return written | Helper to write descriptor key. |
def get(self, uuid):
"""Workaround: missing get entry point"""
for token in self.list():
if token.get('uuid') == uuid:
return token
raise LinShareException(-1, "Can find uuid:" + uuid) | Workaround: missing get entry point |
def set(self, option, value):
"""
Sets an option to a value.
"""
if self.config is None:
self.config = {}
self.config[option] = value | Sets an option to a value. |
def demo(quiet, shell, speed, prompt, commentecho):
"""Run a demo doitlive session."""
run(
DEMO,
shell=shell,
speed=speed,
test_mode=TESTING,
prompt_template=prompt,
quiet=quiet,
commentecho=commentecho,
) | Run a demo doitlive session. |
def get_diplomacy(self):
"""Compute diplomacy."""
if not self._cache['teams']:
self.get_teams()
player_num = 0
computer_num = 0
for player in self._header.scenario.game_settings.player_info:
if player.type == 'human':
player_num += 1
elif player.type == 'computer':
computer_num += 1
total_num = player_num + computer_num
diplomacy = {
'FFA': (len(self._cache['teams']) == total_num) and total_num > 2,
'TG': len(self._cache['teams']) == 2 and total_num > 2,
'1v1': total_num == 2,
}
diplomacy['type'] = 'Other'
team_sizes = sorted([len(team) for team in self._cache['teams']])
diplomacy['team_size'] = 'v'.join([str(size) for size in team_sizes])
if diplomacy['FFA']:
diplomacy['type'] = 'FFA'
diplomacy['team_size'] = 'FFA'
elif diplomacy['TG']:
diplomacy['type'] = 'TG'
elif diplomacy['1v1']:
diplomacy['type'] = '1v1'
return diplomacy | Compute diplomacy. |
def basic_clean_str(string):
"""Tokenization/string cleaning for a datasets."""
string = re.sub(r"\n", " ", string) # '\n' --> ' '
string = re.sub(r"\'s", " \'s", string) # it's --> it 's
string = re.sub(r"\’s", " \'s", string)
string = re.sub(r"\'ve", " have", string) # they've --> they have
string = re.sub(r"\’ve", " have", string)
string = re.sub(r"\'t", " not", string) # can't --> can not
string = re.sub(r"\’t", " not", string)
string = re.sub(r"\'re", " are", string) # they're --> they are
string = re.sub(r"\’re", " are", string)
string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I
string = re.sub(r"\’d", "", string)
string = re.sub(r"\'ll", " will", string) # I'll --> I will
string = re.sub(r"\’ll", " will", string)
string = re.sub(r"\“", " ", string) # “a” --> “ a ”
string = re.sub(r"\”", " ", string)
string = re.sub(r"\"", " ", string) # "a" --> " a "
string = re.sub(r"\'", " ", string) # they' --> they '
string = re.sub(r"\’", " ", string) # they’ --> they ’
string = re.sub(r"\.", " . ", string) # they. --> they .
string = re.sub(r"\,", " , ", string) # they, --> they ,
string = re.sub(r"\!", " ! ", string)
string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost
string = re.sub(r"\(", " ", string) # (they) --> ( they)
string = re.sub(r"\)", " ", string) # ( they) --> ( they )
string = re.sub(r"\]", " ", string) # they] --> they ]
string = re.sub(r"\[", " ", string) # they[ --> they [
string = re.sub(r"\?", " ", string) # they? --> they ?
string = re.sub(r"\>", " ", string) # they> --> they >
string = re.sub(r"\<", " ", string) # they< --> they <
string = re.sub(r"\=", " ", string) # easier= --> easier =
string = re.sub(r"\;", " ", string) # easier; --> easier ;
string = re.sub(r"\;", " ", string)
string = re.sub(r"\:", " ", string) # easier: --> easier :
string = re.sub(r"\"", " ", string) # easier" --> easier "
string = re.sub(r"\$", " ", string) # $380 --> $ 380
string = re.sub(r"\_", " ", string) # _100 --> _ 100
string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome
return string.strip().lower() | Tokenization/string cleaning for a datasets. |
def log_to_syslog():
"""
Configure logging to syslog.
"""
# Get root logger
rl = logging.getLogger()
rl.setLevel('INFO')
# Stderr gets critical messages (mostly config/setup issues)
# only when not daemonized
stderr = logging.StreamHandler(stream=sys.stderr)
stderr.setLevel(logging.CRITICAL)
stderr.setFormatter(logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s %(message)s'))
rl.addHandler(stderr)
# All interesting data goes to syslog, using root logger's loglevel
syslog = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_MAIL)
syslog.setFormatter(logging.Formatter(
'%(name)s[%(process)d]: %(levelname)s %(message)s'))
rl.addHandler(syslog) | Configure logging to syslog. |
def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | Receive an ack from the broker. |
def allow(self, role, method, resource, with_children=True):
"""Add allowing rules.
:param role: Role of this rule.
:param method: Method to allow in rule, include GET, POST, PUT etc.
:param resource: Resource also view function.
:param with_children: Allow role's children in rule as well
if with_children is `True`
"""
if with_children:
for r in role.get_children():
permission = (r.get_name(), method, resource)
if permission not in self._allowed:
self._allowed.append(permission)
if role == 'anonymous':
permission = (role, method, resource)
else:
permission = (role.get_name(), method, resource)
if permission not in self._allowed:
self._allowed.append(permission) | Add allowing rules.
:param role: Role of this rule.
:param method: Method to allow in rule, include GET, POST, PUT etc.
:param resource: Resource also view function.
:param with_children: Allow role's children in rule as well
if with_children is `True` |
def to_float(option,value):
'''
Converts string values to floats when appropriate
'''
if type(value) is str:
try:
value=float(value)
except ValueError:
pass
return (option,value) | Converts string values to floats when appropriate |
def _skip_spaces_and_peek(self):
""" Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached.
"""
while 1:
# skipping spaces
self.skip_chars(self.end, lambda x: x in self.spaces)
c = self.peek()
if not self.params.allow_comments:
return c
if c != '/':
return c
d = self.peek(1)
if d == '/':
self.skip_to(self.pos + 2)
self._skip_singleline_comment()
elif d == '*':
self.skip_to(self.pos + 2)
self._skip_multiline_comment()
else:
return c | Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached. |
def features_properties_null_remove(obj):
"""
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
"""
features = obj['features']
for i in tqdm(range(len(features))):
if 'properties' in features[i]:
properties = features[i]['properties']
features[i]['properties'] = {p:properties[p] for p in properties if properties[p] is not None}
return obj | Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value |
def main():
"""
NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N
"""
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs= [] # set up list for dec inc data
ofile = ""
if '-F' in sys.argv: # set up output file
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
bpars=pmag.dobingham(DIs)
output = '%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %i' % (bpars["dec"],bpars["inc"],bpars["Eta"],bpars["Edec"],bpars["Einc"],bpars["Zeta"],bpars["Zdec"],bpars["Zinc"],bpars["n"])
if ofile == "":
print(output)
else:
out.write(output+'\n') | NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N |
def itemmeta(self):
"""Returns metadata for members of the collection.
Makes a single roundtrip to the server, plus two more at most if
the ``autologin`` field of :func:`connect` is set to ``True``.
:return: A :class:`splunklib.data.Record` object containing the metadata.
**Example**::
import splunklib.client as client
import pprint
s = client.connect(...)
pprint.pprint(s.apps.itemmeta())
{'access': {'app': 'search',
'can_change_perms': '1',
'can_list': '1',
'can_share_app': '1',
'can_share_global': '1',
'can_share_user': '1',
'can_write': '1',
'modifiable': '1',
'owner': 'admin',
'perms': {'read': ['*'], 'write': ['admin']},
'removable': '0',
'sharing': 'user'},
'fields': {'optional': ['author',
'configured',
'description',
'label',
'manageable',
'template',
'visible'],
'required': ['name'], 'wildcard': []}}
"""
response = self.get("_new")
content = _load_atom(response, MATCH_ENTRY_CONTENT)
return _parse_atom_metadata(content) | Returns metadata for members of the collection.
Makes a single roundtrip to the server, plus two more at most if
the ``autologin`` field of :func:`connect` is set to ``True``.
:return: A :class:`splunklib.data.Record` object containing the metadata.
**Example**::
import splunklib.client as client
import pprint
s = client.connect(...)
pprint.pprint(s.apps.itemmeta())
{'access': {'app': 'search',
'can_change_perms': '1',
'can_list': '1',
'can_share_app': '1',
'can_share_global': '1',
'can_share_user': '1',
'can_write': '1',
'modifiable': '1',
'owner': 'admin',
'perms': {'read': ['*'], 'write': ['admin']},
'removable': '0',
'sharing': 'user'},
'fields': {'optional': ['author',
'configured',
'description',
'label',
'manageable',
'template',
'visible'],
'required': ['name'], 'wildcard': []}} |
def _censor_with(x, range, value=None):
"""
Censor any values outside of range with ``None``
"""
return [val if range[0] <= val <= range[1] else value
for val in x] | Censor any values outside of range with ``None`` |
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value | Check initial state value - if is proper and translate it.
Initial state is required. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.