code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def connect_ssh(*args, **kwargs):
"""
Create a new connected :class:`SSHClient` instance. All arguments
are passed to :meth:`SSHClient.connect`.
"""
client = SSHClient()
client.connect(*args, **kwargs)
return client | Create a new connected :class:`SSHClient` instance. All arguments
are passed to :meth:`SSHClient.connect`. |
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task | Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task |
def check_network_health(self):
r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
(5) Headless throats
Returns
-------
A dictionary containing the offending pores or throat numbers under
each named key.
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' and does not 'fix' the problems it finds
"""
health = HealthDict()
health['disconnected_clusters'] = []
health['isolated_pores'] = []
health['trim_pores'] = []
health['duplicate_throats'] = []
health['bidirectional_throats'] = []
health['headless_throats'] = []
health['looped_throats'] = []
# Check for headless throats
hits = sp.where(self['throat.conns'] > self.Np - 1)[0]
if sp.size(hits) > 0:
health['headless_throats'] = sp.unique(hits)
return health
# Check for throats that loop back onto the same pore
P12 = self['throat.conns']
hits = sp.where(P12[:, 0] == P12[:, 1])[0]
if sp.size(hits) > 0:
health['looped_throats'] = hits
# Check for individual isolated pores
Ps = self.num_neighbors(self.pores())
if sp.sum(Ps == 0) > 0:
health['isolated_pores'] = sp.where(Ps == 0)[0]
# Check for separated clusters of pores
temp = []
am = self.create_adjacency_matrix(fmt='coo', triu=True)
Cs = csg.connected_components(am, directed=False)[1]
if sp.unique(Cs).size > 1:
for i in sp.unique(Cs):
temp.append(sp.where(Cs == i)[0])
b = sp.array([len(item) for item in temp])
c = sp.argsort(b)[::-1]
for i in range(0, len(c)):
health['disconnected_clusters'].append(temp[c[i]])
if i > 0:
health['trim_pores'].extend(temp[c[i]])
# Check for duplicate throats
am = self.create_adjacency_matrix(fmt='csr', triu=True).tocoo()
hits = sp.where(am.data > 1)[0]
if len(hits):
mergeTs = []
hits = sp.vstack((am.row[hits], am.col[hits])).T
ihits = hits[:, 0] + 1j*hits[:, 1]
conns = self['throat.conns']
iconns = conns[:, 0] + 1j*conns[:, 1] # Convert to imaginary
for item in ihits:
mergeTs.append(sp.where(iconns == item)[0])
health['duplicate_throats'] = mergeTs
# Check for bidirectional throats
adjmat = self.create_adjacency_matrix(fmt='coo')
num_full = adjmat.sum()
temp = sprs.triu(adjmat, k=1)
num_upper = temp.sum()
if num_full > num_upper:
biTs = sp.where(self['throat.conns'][:, 0] >
self['throat.conns'][:, 1])[0]
health['bidirectional_throats'] = biTs.tolist()
return health | r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
(5) Headless throats
Returns
-------
A dictionary containing the offending pores or throat numbers under
each named key.
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' and does not 'fix' the problems it finds |
def cli(ctx, timeout, proxy, output, quiet, lyric, again):
"""A command tool to download NetEase-Music's songs."""
ctx.obj = NetEase(timeout, proxy, output, quiet, lyric, again) | A command tool to download NetEase-Music's songs. |
def flush_to_index(self):
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache() | Flush changes in our configuration file to the index |
def _getphoto_location(self,pid):
"""Asks fb for photo location information
returns tuple with lat,lon,accuracy
"""
logger.debug('%s - Getting location from fb'%(pid))
lat=None
lon=None
accuracy=None
resp=self.fb.photos_geo_getLocation(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - fb: photos_geo_getLocation failed with status: %s",\
resp.attrib['stat']);
return (None,None,None)
for location in resp.find('photo'):
lat=location.attrib['latitude']
lon=location.attrib['longitude']
accuracy=location.attrib['accuracy']
return (lat,lon,accuracy) | Asks fb for photo location information
returns tuple with lat,lon,accuracy |
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '') | Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object |
def send(node_name):
""" Send our information to a remote nago instance
Arguments:
node -- node_name or token for the node this data belongs to
"""
my_data = nago.core.get_my_info()
if not node_name:
node_name = nago.settings.get('server')
node = nago.core.get_node(node_name)
json_params = {}
json_params['node_name'] = node_name
json_params['key'] = "node_info"
for k, v in my_data.items():
nago.core.log("sending %s to %s" % (k, node['host_name']), level="notice")
json_params[k] = v
return node.send_command('info', 'post', node_name=node.token, key="node_info", **my_data) | Send our information to a remote nago instance
Arguments:
node -- node_name or token for the node this data belongs to |
def logger_init(level):
"""
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
"""
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level]) | Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`. |
def serialize(self):
"""Serialize the full configuration to a single dictionary. For any
instance that has passed validate() (which happens in __init__), it
matches the Configuration contract.
Note that args are not serialized.
:returns dict: The serialized configuration.
"""
result = self.to_project_config(with_packages=True)
result.update(self.to_profile_info(serialize_credentials=True))
result['cli_vars'] = deepcopy(self.cli_vars)
return result | Serialize the full configuration to a single dictionary. For any
instance that has passed validate() (which happens in __init__), it
matches the Configuration contract.
Note that args are not serialized.
:returns dict: The serialized configuration. |
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details | Traverse groups and yield info dicts for jobs |
async def connect(
self,
hostname: str = None,
port: int = None,
source_address: DefaultStrType = _default,
timeout: DefaultNumType = _default,
loop: asyncio.AbstractEventLoop = None,
use_tls: bool = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
cert_bundle: DefaultStrType = _default,
) -> SMTPResponse:
"""
Initialize a connection to the server. Options provided to
:meth:`.connect` take precedence over those used to initialize the
class.
:keyword hostname: Server name (or IP) to connect to
:keyword port: Server port. Defaults to 25 if ``use_tls`` is
False, 465 if ``use_tls`` is True.
:keyword source_address: The hostname of the client. Defaults to the
result of :func:`socket.getfqdn`. Note that this call blocks.
:keyword timeout: Default timeout value for the connection, in seconds.
Defaults to 60.
:keyword loop: event loop to run on. If not set, uses
:func:`asyncio.get_event_loop()`.
:keyword use_tls: If True, make the initial connection to the server
over TLS/SSL. Note that if the server supports STARTTLS only, this
should be False.
:keyword validate_certs: Determines if server certificates are
validated. Defaults to True.
:keyword client_cert: Path to client side certificate, for TLS.
:keyword client_key: Path to client side key, for TLS.
:keyword tls_context: An existing :class:`ssl.SSLContext`, for TLS.
Mutually exclusive with ``client_cert``/``client_key``.
:keyword cert_bundle: Path to certificate bundle, for TLS verification.
:raises ValueError: mutually exclusive options provided
"""
await self._connect_lock.acquire()
if hostname is not None:
self.hostname = hostname
if loop is not None:
self.loop = loop
if use_tls is not None:
self.use_tls = use_tls
if validate_certs is not None:
self.validate_certs = validate_certs
if port is not None:
self.port = port
if self.port is None:
self.port = SMTP_TLS_PORT if self.use_tls else SMTP_PORT
if timeout is not _default:
self.timeout = timeout # type: ignore
if source_address is not _default:
self._source_address = source_address # type: ignore
if client_cert is not _default:
self.client_cert = client_cert # type: ignore
if client_key is not _default:
self.client_key = client_key # type: ignore
if tls_context is not _default:
self.tls_context = tls_context # type: ignore
if cert_bundle is not _default:
self.cert_bundle = cert_bundle # type: ignore
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
response = await self._create_connection()
return response | Initialize a connection to the server. Options provided to
:meth:`.connect` take precedence over those used to initialize the
class.
:keyword hostname: Server name (or IP) to connect to
:keyword port: Server port. Defaults to 25 if ``use_tls`` is
False, 465 if ``use_tls`` is True.
:keyword source_address: The hostname of the client. Defaults to the
result of :func:`socket.getfqdn`. Note that this call blocks.
:keyword timeout: Default timeout value for the connection, in seconds.
Defaults to 60.
:keyword loop: event loop to run on. If not set, uses
:func:`asyncio.get_event_loop()`.
:keyword use_tls: If True, make the initial connection to the server
over TLS/SSL. Note that if the server supports STARTTLS only, this
should be False.
:keyword validate_certs: Determines if server certificates are
validated. Defaults to True.
:keyword client_cert: Path to client side certificate, for TLS.
:keyword client_key: Path to client side key, for TLS.
:keyword tls_context: An existing :class:`ssl.SSLContext`, for TLS.
Mutually exclusive with ``client_cert``/``client_key``.
:keyword cert_bundle: Path to certificate bundle, for TLS verification.
:raises ValueError: mutually exclusive options provided |
def update(self, other=None, **kwargs):
"""x.update(E, **F) -> None. update x from trie/dict/iterable E or F.
If E has a .keys() method, does: for k in E: x[k] = E[k]
If E lacks .keys() method, does: for (k, v) in E: x[k] = v
In either case, this is followed by: for k in F: x[k] = F[k]"""
if other is None:
other = ()
if hasattr(other, 'keys'):
for key in other:
self._update(key, other[key])
else:
for key,value in other:
self._update(key, value)
for key,value in six.iteritems(kwargs):
self._update(key, value) | x.update(E, **F) -> None. update x from trie/dict/iterable E or F.
If E has a .keys() method, does: for k in E: x[k] = E[k]
If E lacks .keys() method, does: for (k, v) in E: x[k] = v
In either case, this is followed by: for k in F: x[k] = F[k] |
def formatted(text, *args, **kwargs):
"""
Args:
text (str | unicode): Text to format
*args: Objects to extract values from (as attributes)
**kwargs: Optional values provided as named args
Returns:
(str): Attributes from this class are expanded if mentioned
"""
if not text or "{" not in text:
return text
strict = kwargs.pop("strict", True)
max_depth = kwargs.pop("max_depth", 3)
objects = list(args) + [kwargs] if kwargs else args[0] if len(args) == 1 else args
if not objects:
return text
definitions = {}
markers = RE_FORMAT_MARKERS.findall(text)
while markers:
key = markers.pop()
if key in definitions:
continue
val = _find_value(key, objects)
if strict and val is None:
return None
val = str(val) if val is not None else "{%s}" % key
markers.extend(m for m in RE_FORMAT_MARKERS.findall(val) if m not in definitions)
definitions[key] = val
if not max_depth or not isinstance(max_depth, int) or max_depth <= 0:
return text
expanded = dict((k, _rformat(k, v, definitions, max_depth)) for k, v in definitions.items())
return text.format(**expanded) | Args:
text (str | unicode): Text to format
*args: Objects to extract values from (as attributes)
**kwargs: Optional values provided as named args
Returns:
(str): Attributes from this class are expanded if mentioned |
def _isstring(dtype):
"""Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode.
"""
return dtype.type == numpy.unicode_ or dtype.type == numpy.string_ | Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode. |
def parse(self) -> Statement:
"""Parse a complete YANG module or submodule.
Args:
mtext: YANG module text.
Raises:
EndOfInput: If past the end of input.
ModuleNameMismatch: If parsed module name doesn't match `self.name`.
ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`.
UnexpectedInput: If top-level statement isn't ``(sub)module``.
"""
self.opt_separator()
start = self.offset
res = self.statement()
if res.keyword not in ["module", "submodule"]:
self.offset = start
raise UnexpectedInput(self, "'module' or 'submodule'")
if self.name is not None and res.argument != self.name:
raise ModuleNameMismatch(res.argument, self.name)
if self.rev:
revst = res.find1("revision")
if revst is None or revst.argument != self.rev:
raise ModuleRevisionMismatch(revst.argument, self.rev)
try:
self.opt_separator()
except EndOfInput:
return res
raise UnexpectedInput(self, "end of input") | Parse a complete YANG module or submodule.
Args:
mtext: YANG module text.
Raises:
EndOfInput: If past the end of input.
ModuleNameMismatch: If parsed module name doesn't match `self.name`.
ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`.
UnexpectedInput: If top-level statement isn't ``(sub)module``. |
def username(anon, obj, field, val):
"""
Generates a random username
"""
return anon.faker.user_name(field=field) | Generates a random username |
def max_brightness(self):
"""
Returns the maximum allowable brightness value.
"""
self._max_brightness, value = self.get_cached_attr_int(self._max_brightness, 'max_brightness')
return value | Returns the maximum allowable brightness value. |
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.)) | Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units) |
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
"""
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
"""
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host | Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``. |
def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType,
dataset=None, featureType="gene",
description="", programs="", featureSetNames="",
readGroupSetNames="", biosampleId=""):
"""
Reads RNA Quantification data in one of several formats and stores the data
in a sqlite database for use by the GA4GH reference server.
Supports the following quantification output types:
Cufflinks, kallisto, RSEM.
"""
readGroupSetName = ""
if readGroupSetNames:
readGroupSetName = readGroupSetNames.strip().split(",")[0]
featureSetIds = ""
readGroupIds = ""
if dataset:
featureSetIdList = []
if featureSetNames:
for annotationName in featureSetNames.split(","):
featureSet = dataset.getFeatureSetByName(annotationName)
featureSetIdList.append(featureSet.getId())
featureSetIds = ",".join(featureSetIdList)
# TODO: multiple readGroupSets
if readGroupSetName:
readGroupSet = dataset.getReadGroupSetByName(readGroupSetName)
readGroupIds = ",".join(
[x.getId() for x in readGroupSet.getReadGroups()])
if rnaType not in SUPPORTED_RNA_INPUT_FORMATS:
raise exceptions.UnsupportedFormatException(rnaType)
rnaDB = RnaSqliteStore(sqlFilename)
if rnaType == "cufflinks":
writer = CufflinksWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "kallisto":
writer = KallistoWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "rsem":
writer = RsemWriter(rnaDB, featureType, dataset=dataset)
writeRnaseqTable(rnaDB, [localName], description, featureSetIds,
readGroupId=readGroupIds, programs=programs,
biosampleId=biosampleId)
writeExpressionTable(writer, [(localName, quantificationFilename)]) | Reads RNA Quantification data in one of several formats and stores the data
in a sqlite database for use by the GA4GH reference server.
Supports the following quantification output types:
Cufflinks, kallisto, RSEM. |
def get_links(self, recall, timeout):
"""Gets links in page
:param recall: max times to attempt to fetch url
:param timeout: max times
:return: array of out_links
"""
for _ in range(recall):
try: # setting timeout
soup = BeautifulSoup(self.source) # parse source
out_links = []
for tag in soup.findAll(["a", "link"], href=True):
tag["href"] = urljoin(self.url, tag["href"])
out_links.append(tag["href"])
return sorted(out_links) # sort array
except:
time.sleep(timeout) | Gets links in page
:param recall: max times to attempt to fetch url
:param timeout: max times
:return: array of out_links |
def disable_signing(self):
'''disable MAVLink2 signing'''
self.mav.signing.secret_key = None
self.mav.signing.sign_outgoing = False
self.mav.signing.allow_unsigned_callback = None
self.mav.signing.link_id = 0
self.mav.signing.timestamp = 0 | disable MAVLink2 signing |
def view_hmap(token, dstore):
"""
Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE
"""
try:
poe = valid.probability(token.split(':')[1])
except IndexError:
poe = 0.1
mean = dict(extract(dstore, 'hcurves?kind=mean'))['mean']
oq = dstore['oqparam']
hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
dt = numpy.dtype([('sid', U32)] + [(imt, F32) for imt in oq.imtls])
array = numpy.zeros(len(hmap), dt)
for i, vals in enumerate(hmap):
array[i] = (i, ) + tuple(vals)
array.sort(order=list(oq.imtls)[0])
return rst_table(array[:20]) | Display the highest 20 points of the mean hazard map. Called as
$ oq show hmap:0.1 # 10% PoE |
def _popup(self):
"""recursively find commutative binary operator
among child formulas and pop up them at the same level"""
res = ()
for child in self.formulas:
if type(child) == type(self):
superchilds = child.formulas
res += superchilds
else:
res += (child, )
return tuple(res) | recursively find commutative binary operator
among child formulas and pop up them at the same level |
def load_config(path):
"""
Loads configuration from a path.
Path can be a json file, or a directory containing config.json
and zero or more *.txt files with word lists or phrase lists.
Returns config dict.
Raises InitializationError when something is wrong.
"""
path = os.path.abspath(path)
if os.path.isdir(path):
config, wordlists = _load_data(path)
elif os.path.isfile(path):
config = _load_config(path)
wordlists = {}
else:
raise InitializationError('File or directory not found: {0}'.format(path))
for name, wordlist in wordlists.items():
if name in config:
raise InitializationError("Conflict: list {!r} is defined both in config "
"and in *.txt file. If it's a {!r} list, "
"you should remove it from config."
.format(name, _CONF.TYPE.WORDS))
config[name] = wordlist
return config | Loads configuration from a path.
Path can be a json file, or a directory containing config.json
and zero or more *.txt files with word lists or phrase lists.
Returns config dict.
Raises InitializationError when something is wrong. |
def to_json(self):
"""
Serialises the content of the KnowledgeBase as JSON.
:return: TODO
"""
return json.dumps({
"statistics": self.get_statistics()
, "authors": [json.loads(author.to_json()) for author in self.get_authors()]
}, indent=2) | Serialises the content of the KnowledgeBase as JSON.
:return: TODO |
def start(self, *args, **kwargs):
"""Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical.
"""
if self.is_running():
raise RuntimeError('Already started')
self._running = self.run(*args, **kwargs)
try:
yielded = next(self._running)
except StopIteration:
raise TypeError('run() must yield just one time')
if yielded is not None:
raise TypeError('run() must yield without value') | Starts the instance.
:raises RuntimeError: has been already started.
:raises TypeError: :meth:`run` is not canonical. |
def multchoicebox(message='Pick as many items as you like.', title='', choices=['program logic error - no choices specified']):
"""Original doc: Present the user with a list of choices.
allow him to select multiple items and return them in a list.
if the user doesn't choose anything from the list, return the empty list.
return None if he cancelled selection.
"""
return psidialogs.multi_choice(message=message, title=title, choices=choices) | Original doc: Present the user with a list of choices.
allow him to select multiple items and return them in a list.
if the user doesn't choose anything from the list, return the empty list.
return None if he cancelled selection. |
def count(self):
""" Total number of array cells
"""
return functools.reduce(lambda x, y: x * y, (x.count for x in self.bounds)) | Total number of array cells |
def get_resource_siblings(raml_resource):
""" Get siblings of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
"""
path = raml_resource.path
return [res for res in raml_resource.root.resources
if res.path == path] | Get siblings of :raml_resource:.
:param raml_resource: Instance of ramlfications.raml.ResourceNode. |
def nsx_controller_connection_addr_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
port = ET.SubElement(connection_addr, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def mag_to_fnu(self, mag):
"""SDSS *primed* magnitudes to F_ν. The primed magnitudes are the "USNO"
standard-star system defined in Smith+ (2002AJ....123.2121S) and
Fukugita+ (1996AJ....111.1748F). This system is anchored to the AB
magnitude system, and as far as I can tell it is not known to have
measurable offsets from that system. (As of DR10, the *unprimed* SDSS
system is known to have small offsets from AB, but I do not believe
that that necessarily has implications for u'g'r'i'z'.)
However, as far as I can tell the filter responses of the USNO
telescope are not published -- only those of the main SDSS 2.5m
telescope. The whole reason for the existence of both the primed and
unprimed ugriz systems is that their responses do not quite match. For
my current application, which involves a completely different
telescope anyway, the difference shouldn't matter.
"""
# `band` should be 'up', 'gp', 'rp', 'ip', or 'zp'.
if len(band) != 2 or band[1] != 'p':
raise ValueError('band: ' + band)
return abmag_to_fnu_cgs(mag) | SDSS *primed* magnitudes to F_ν. The primed magnitudes are the "USNO"
standard-star system defined in Smith+ (2002AJ....123.2121S) and
Fukugita+ (1996AJ....111.1748F). This system is anchored to the AB
magnitude system, and as far as I can tell it is not known to have
measurable offsets from that system. (As of DR10, the *unprimed* SDSS
system is known to have small offsets from AB, but I do not believe
that that necessarily has implications for u'g'r'i'z'.)
However, as far as I can tell the filter responses of the USNO
telescope are not published -- only those of the main SDSS 2.5m
telescope. The whole reason for the existence of both the primed and
unprimed ugriz systems is that their responses do not quite match. For
my current application, which involves a completely different
telescope anyway, the difference shouldn't matter. |
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples)) | Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects |
def search_dashboard_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data | Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. |
def analyze_text( self, text, **kwargs ):
'''
Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations;
'''
# 0) Parse given arguments
#
# Output specifics
all_return_types = ["text", "labels", "tokens", "strings"]
return_type = all_return_types[0]
cutPhrases = True
cutMaxThreshold = 3
annotate_text = True
# Syntax layer & Parsing specifics
syntax_layer_name = LAYER_CONLL
force_parsing = False
for argName, argVal in kwargs.items():
if argName == 'cutPhrases':
cutPhrases = bool(argVal)
elif argName == 'force_parsing':
force_parsing = bool(argVal)
elif argName == 'syntax_layer':
syntax_layer_name = argVal
elif argName == 'cutMaxThreshold':
cutMaxThreshold = int(argVal)
elif argName == 'return_type':
if argVal.lower() in all_return_types:
return_type = argVal.lower()
else:
raise Exception(' Unexpected return type: ', argVal)
else:
raise Exception(' Unsupported argument given: '+argName)
#
# 1) Acquire the layers of morphological & syntactic annotations:
#
if not syntax_layer_name in text or force_parsing:
# No existing layer found: produce a new layer with the parser
self.parser.parse_text( text )
if isinstance(self.parser, MaltParser):
syntax_layer_name = LAYER_CONLL
elif isinstance(self.parser, VISLCG3Parser):
syntax_layer_name = LAYER_VISLCG3
else:
raise Exception(' (!) Unknown type of syntactic parser: ',self.parser)
if not text.is_tagged(ANALYSIS):
# If missing, add the layer of morphological analyses
text = text.tag_analysis()
# 2) Process text sentence by sentence
all_np_labels = []
for sentence_text in text.split_by( SENTENCES ):
tokens = sentence_text[WORDS]
syntax_layer = sentence_text[syntax_layer_name]
# Find phrases
np_labels = self._find_phrases( tokens, syntax_layer, cutPhrases, cutMaxThreshold )
# Normalize labels
np_labels = [ 'O' if not l in ['B', 'I'] else l for l in np_labels ]
# Collect results
all_np_labels.extend( np_labels )
# 3) Return input text, labels, phrases or phrase texts
if annotate_text:
self.annotateText( text, NOUN_CHUNKS, all_np_labels )
if return_type == "text":
return text
elif return_type == "labels":
return all_np_labels
elif return_type == "tokens":
return self.get_phrases(text, all_np_labels)
else:
return self.get_phrase_texts(text, all_np_labels) | Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations; |
def output_to_table(obj, olist='inputs', oformat='latex', table_ends=False, prefix=""):
"""
Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string
"""
para = ""
property_list = []
if olist == 'inputs':
property_list = obj.inputs
elif olist == 'all':
for item in obj.__dict__:
if "_" != item[0]:
property_list.append(item)
for item in property_list:
if hasattr(obj, item):
value = getattr(obj, item)
value_str = format_value(value)
if oformat == "latex":
delimeter = " & "
else:
delimeter = ","
para += "{0}{1}{2}\\\\\n".format(prefix + format_name(item), delimeter, value_str)
if table_ends:
para = add_table_ends(para, oformat)
return para | Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string |
def main():
"""Main entry point"""
parser = OptionParser()
parser.add_option('-a', '--hostname',
help='ClamAV source server hostname',
dest='hostname',
type='str',
default='db.de.clamav.net')
parser.add_option('-r', '--text-record',
help='ClamAV Updates TXT record',
dest='txtrecord',
type='str',
default='current.cvd.clamav.net')
parser.add_option('-w', '--work-directory',
help='Working directory',
dest='workdir',
type='str',
default='/var/spool/clamav-mirror')
parser.add_option('-d', '--mirror-directory',
help='The mirror directory',
dest='mirrordir',
type='str',
default='/srv/www/clamav')
parser.add_option('-u', '--user',
help='Change file owner to this user',
dest='user',
type='str',
default='nginx')
parser.add_option('-g', '--group',
help='Change file group to this group',
dest='group',
type='str',
default='nginx')
parser.add_option('-l', '--locks-directory',
help='Lock files directory',
dest='lockdir',
type='str',
default='/var/lock/subsys')
parser.add_option('-v', '--verbose',
help='Display verbose output',
dest='verbose',
action='store_true',
default=False)
options, _ = parser.parse_args()
try:
lockfile = os.path.join(options.lockdir, 'clamavmirror')
with open(lockfile, 'w+') as lock:
fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
work(options)
except IOError:
info("=> Another instance is already running")
sys.exit(254) | Main entry point |
def get_filepath(self, filename):
"""
Creates file path for the file.
:param filename: name of the file
:type filename: str
:return: filename with path on disk
:rtype: str
"""
return os.path.join(self.parent_folder, self.product_id, self.add_file_extension(filename)).replace(':', '.') | Creates file path for the file.
:param filename: name of the file
:type filename: str
:return: filename with path on disk
:rtype: str |
async def _connect(self, connection_lost_callbk=None):
"""Asyncio connection to Elk."""
self.connection_lost_callbk = connection_lost_callbk
url = self._config['url']
LOG.info("Connecting to ElkM1 at %s", url)
scheme, dest, param, ssl_context = parse_url(url)
conn = partial(Connection, self.loop, self._connected,
self._disconnected, self._got_data, self._timeout)
try:
if scheme == 'serial':
await serial_asyncio.create_serial_connection(
self.loop, conn, dest, baudrate=param)
else:
await asyncio.wait_for(self.loop.create_connection(
conn, host=dest, port=param, ssl=ssl_context), timeout=30)
except (ValueError, OSError, asyncio.TimeoutError) as err:
LOG.warning("Could not connect to ElkM1 (%s). Retrying in %d seconds",
err, self._connection_retry_timer)
self.loop.call_later(self._connection_retry_timer, self.connect)
self._connection_retry_timer = 2 * self._connection_retry_timer \
if self._connection_retry_timer < 32 else 60 | Asyncio connection to Elk. |
def add_install_defaults(args):
"""Add any saved installation defaults to the upgrade.
"""
# Ensure we install data if we've specified any secondary installation targets
if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0:
args.install_data = True
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
default_args = {}
else:
with open(install_config) as in_handle:
default_args = yaml.safe_load(in_handle)
# if we are upgrading to development, also upgrade the tools
if args.upgrade in ["development"] and (args.tooldir or "tooldir" in default_args):
args.tools = True
if args.tools and args.tooldir is None:
if "tooldir" in default_args:
args.tooldir = str(default_args["tooldir"])
else:
raise ValueError("Default tool directory not yet saved in config defaults. "
"Specify the '--tooldir=/path/to/tools' to upgrade tools. "
"After a successful upgrade, the '--tools' parameter will "
"work for future upgrades.")
for attr in ["genomes", "aligners"]:
# don't upgrade default genomes if a genome was specified
if attr == "genomes" and len(args.genomes) > 0:
continue
for x in default_args.get(attr, []):
x = str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _datatarget_defaults(args, default_args)
if "isolate" in default_args and args.isolate is not True:
args.isolate = default_args["isolate"]
return args | Add any saved installation defaults to the upgrade. |
def set_line_join(self, line_join):
"""Set the current :ref:`LINE_JOIN` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default line cap is :obj:`MITER <LINE_JOIN_MITER>`.
:param line_join: A :ref:`LINE_JOIN` string.
"""
cairo.cairo_set_line_join(self._pointer, line_join)
self._check_status() | Set the current :ref:`LINE_JOIN` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default line cap is :obj:`MITER <LINE_JOIN_MITER>`.
:param line_join: A :ref:`LINE_JOIN` string. |
def get_blank_row(self, filler="-", splitter="+"):
"""Gets blank row
:param filler: Fill empty columns with this char
:param splitter: Separate columns with this char
:return: Pretty formatted blank row (with no meaningful data in it)
"""
return self.get_pretty_row(
["" for _ in self.widths], # blanks
filler, # fill with this
splitter, # split columns with this
) | Gets blank row
:param filler: Fill empty columns with this char
:param splitter: Separate columns with this char
:return: Pretty formatted blank row (with no meaningful data in it) |
def filter_by_analysis_period(self, analysis_period):
"""
Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
"""
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data |
def hardware_custom_profile_kap_custom_profile_xstp_xstp_hello_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
custom_profile = ET.SubElement(hardware, "custom-profile")
kap_custom_profile = ET.SubElement(custom_profile, "kap-custom-profile")
name_key = ET.SubElement(kap_custom_profile, "name")
name_key.text = kwargs.pop('name')
xstp = ET.SubElement(kap_custom_profile, "xstp")
xstp_hello_interval = ET.SubElement(xstp, "xstp_hello_interval")
xstp_hello_interval.text = kwargs.pop('xstp_hello_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def setSignals(self, vehID, signals):
"""setSignals(string, integer) -> None
Sets an integer encoding the state of the vehicle's signals.
"""
self._connection._sendIntCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_SIGNALS, vehID, signals) | setSignals(string, integer) -> None
Sets an integer encoding the state of the vehicle's signals. |
def pix2vec(nside, ipix, nest=False):
"""Drop-in replacement for healpy `~healpy.pixelfunc.pix2vec`."""
lon, lat = healpix_to_lonlat(ipix, nside, order='nested' if nest else 'ring')
return ang2vec(*_lonlat_to_healpy(lon, lat)) | Drop-in replacement for healpy `~healpy.pixelfunc.pix2vec`. |
def getSegmentOnCell(self, c, i, segIdx):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
"""
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
# Accumulate segment information
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`. |
def append(self, data: Union[bytes, bytearray, memoryview]) -> None:
"""
Append the given piece of data (should be a buffer-compatible object).
"""
size = len(data)
if size > self._large_buf_threshold:
if not isinstance(data, memoryview):
data = memoryview(data)
self._buffers.append((True, data))
elif size > 0:
if self._buffers:
is_memview, b = self._buffers[-1]
new_buf = is_memview or len(b) >= self._large_buf_threshold
else:
new_buf = True
if new_buf:
self._buffers.append((False, bytearray(data)))
else:
b += data # type: ignore
self._size += size | Append the given piece of data (should be a buffer-compatible object). |
def preprocess(self):
'''
Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible.
'''
self.processed_tables = []
self.flags_by_table = []
self.units_by_table = []
for worksheet, rtable in enumerate(self.raw_tables):
ptable, flags, units = self.preprocess_worksheet(rtable, worksheet)
self.processed_tables.append(ptable)
self.flags_by_table.append(flags)
self.units_by_table.append(units)
return self.processed_tables | Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible. |
def p_sigtypes(self, p):
'sigtypes : sigtypes sigtype'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | sigtypes : sigtypes sigtype |
def document_from_string(self, schema, request_string):
# type: (GraphQLSchema, str) -> GraphQLDocument
"""This method returns a GraphQLQuery (from cache if present)"""
key = self.get_key_for_schema_and_document_string(schema, request_string)
if key not in self.cache_map:
# We return from the fallback
self.cache_map[key] = self.fallback_backend.document_from_string(
schema, request_string
)
# We ensure the main backend response is in the queue
self.get_worker().queue(self.queue_backend, key, schema, request_string)
return self.cache_map[key] | This method returns a GraphQLQuery (from cache if present) |
def get_bins_by_resource(self, resource_id):
"""Gets the list of ``Bin`` objects mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.resource.BinList) - list of bins
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bins_by_resource
mgr = self._get_provider_manager('RESOURCE', local=True)
lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy)
return lookup_session.get_bins_by_ids(
self.get_bin_ids_by_resource(resource_id)) | Gets the list of ``Bin`` objects mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.resource.BinList) - list of bins
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def calc_acceleration(xdata, dt):
"""
Calculates the acceleration from the position
Parameters
----------
xdata : ndarray
Position data
dt : float
time between measurements
Returns
-------
acceleration : ndarray
values of acceleration from position
2 to N.
"""
acceleration = _np.diff(_np.diff(xdata))/dt**2
return acceleration | Calculates the acceleration from the position
Parameters
----------
xdata : ndarray
Position data
dt : float
time between measurements
Returns
-------
acceleration : ndarray
values of acceleration from position
2 to N. |
def _get_layer_converter_fn(layer, add_custom_layers = False):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == 'CUSTOM':
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | Get the right converter function for Keras |
def text(what="sentence", *args, **kwargs):
"""An aggregator for all above defined public methods."""
if what == "character":
return character(*args, **kwargs)
elif what == "characters":
return characters(*args, **kwargs)
elif what == "word":
return word(*args, **kwargs)
elif what == "words":
return words(*args, **kwargs)
elif what == "sentence":
return sentence(*args, **kwargs)
elif what == "sentences":
return sentences(*args, **kwargs)
elif what == "paragraph":
return paragraph(*args, **kwargs)
elif what == "paragraphs":
return paragraphs(*args, **kwargs)
elif what == "title":
return title(*args, **kwargs)
else:
raise NameError('No such method') | An aggregator for all above defined public methods. |
def reference_to_greatcircle(reference_frame, greatcircle_frame):
"""Convert a reference coordinate to a great circle frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
pole = greatcircle_frame.pole.transform_to(coord.ICRS)
ra0 = greatcircle_frame.ra0
center = greatcircle_frame.center
R_rot = rotation_matrix(greatcircle_frame.rotation, 'z')
if not np.isnan(ra0):
xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.])
zaxis = pole.cartesian.xyz.value
if np.abs(zaxis[2]) >= 1e-15:
xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what?
else:
xaxis[2] = 0.
xaxis = xaxis / np.sqrt(np.sum(xaxis**2))
yaxis = np.cross(zaxis, xaxis)
R = np.stack((xaxis, yaxis, zaxis))
elif center is not None:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(90*u.deg - pole.dec, 'y')
Rtmp = matrix_product(R2, R1)
rot = center.cartesian.transform(Rtmp)
rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon
R3 = rotation_matrix(rot_lon, 'z')
R = matrix_product(R3, R2, R1)
else:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(pole.dec, 'y')
R = matrix_product(R2, R1)
return matrix_product(R_rot, R) | Convert a reference coordinate to a great circle frame. |
def QA_util_code_tolist(code, auto_fill=True):
"""转换code==> list
Arguments:
code {[type]} -- [description]
Keyword Arguments:
auto_fill {bool} -- 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
Returns:
[list] -- [description]
"""
if isinstance(code, str):
if auto_fill:
return [QA_util_code_tostr(code)]
else:
return [code]
elif isinstance(code, list):
if auto_fill:
return [QA_util_code_tostr(item) for item in code]
else:
return [item for item in code] | 转换code==> list
Arguments:
code {[type]} -- [description]
Keyword Arguments:
auto_fill {bool} -- 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
Returns:
[list] -- [description] |
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm | Recompute this distribution's dependencies. |
def previous(self, cli):
"""
Return the previously focussed :class:`.Buffer` or `None`.
"""
if len(self.focus_stack) > 1:
try:
return self[self.focus_stack[-2]]
except KeyError:
pass | Return the previously focussed :class:`.Buffer` or `None`. |
def foreach(self, f):
"""
Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter())
"""
from pyspark.rdd import _wrap_function
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.taskcontext import TaskContext
if callable(f):
# The provided object is a callable function that is supposed to be called on each row.
# Construct a function that takes an iterator and calls the provided function on each
# row.
def func_without_process(_, iterator):
for x in iterator:
f(x)
return iter([])
func = func_without_process
else:
# The provided object is not a callable function. Then it is expected to have a
# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and
# 'close(error)' methods.
if not hasattr(f, 'process'):
raise Exception("Provided object does not have a 'process' method")
if not callable(getattr(f, 'process')):
raise Exception("Attribute 'process' in provided object is not callable")
def doesMethodExist(method_name):
exists = hasattr(f, method_name)
if exists and not callable(getattr(f, method_name)):
raise Exception(
"Attribute '%s' in provided object is not callable" % method_name)
return exists
open_exists = doesMethodExist('open')
close_exists = doesMethodExist('close')
def func_with_open_process_close(partition_id, iterator):
epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId')
if epoch_id:
epoch_id = int(epoch_id)
else:
raise Exception("Could not get batch id from TaskContext")
# Check if the data should be processed
should_process = True
if open_exists:
should_process = f.open(partition_id, epoch_id)
error = None
try:
if should_process:
for x in iterator:
f.process(x)
except Exception as ex:
error = ex
finally:
if close_exists:
f.close(error)
if error:
raise error
return iter([])
func = func_with_open_process_close
serializer = AutoBatchedSerializer(PickleSerializer())
wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer)
jForeachWriter = \
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
wrapped_func, self._df._jdf.schema())
self._jwrite.foreach(jForeachWriter)
return self | Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter()) |
def mu(self):
"""See docs for `Model` abstract base class."""
mu = self._models[0].mu
assert all([mu == model.mu for model in self._models])
return mu | See docs for `Model` abstract base class. |
def set_url_part(url, **kwargs):
"""Change one or more parts of a URL"""
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict(d) | Change one or more parts of a URL |
def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]]
"""
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.") | Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]] |
def clear_cached_values(self):
"""Removes all of the cached values and interpolators
"""
self._prof_interp = None
self._prof_y = None
self._prof_z = None
self._marg_interp = None
self._marg_z = None
self._post = None
self._post_interp = None
self._interp = None
self._ret_type = None | Removes all of the cached values and interpolators |
def filter_data(data, kernel, mode='constant', fill_value=0.0,
check_normalization=False):
"""
Convolve a 2D image with a 2D kernel.
The kernel may either be a 2D `~numpy.ndarray` or a
`~astropy.convolution.Kernel2D` object.
Parameters
----------
data : array_like
The 2D array of the image.
kernel : array-like (2D) or `~astropy.convolution.Kernel2D`
The 2D kernel used to filter the input ``data``. Filtering the
``data`` will smooth the noise and maximize detectability of
objects with a shape similar to the kernel.
mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` determines how the array borders are handled. For
the ``'constant'`` mode, values outside the array borders are
set to ``fill_value``. The default is ``'constant'``.
fill_value : scalar, optional
Value to fill data values beyond the array borders if ``mode``
is ``'constant'``. The default is ``0.0``.
check_normalization : bool, optional
If `True` then a warning will be issued if the kernel is not
normalized to 1.
"""
from scipy import ndimage
if kernel is not None:
if isinstance(kernel, Kernel2D):
kernel_array = kernel.array
else:
kernel_array = kernel
if check_normalization:
if not np.allclose(np.sum(kernel_array), 1.0):
warnings.warn('The kernel is not normalized.',
AstropyUserWarning)
# NOTE: astropy.convolution.convolve fails with zero-sum
# kernels (used in findstars) (cf. astropy #1647)
# NOTE: if data is int and kernel is float, ndimage.convolve
# will return an int image - here we make the data float so
# that a float image is always returned
return ndimage.convolve(data.astype(float), kernel_array, mode=mode,
cval=fill_value)
else:
return data | Convolve a 2D image with a 2D kernel.
The kernel may either be a 2D `~numpy.ndarray` or a
`~astropy.convolution.Kernel2D` object.
Parameters
----------
data : array_like
The 2D array of the image.
kernel : array-like (2D) or `~astropy.convolution.Kernel2D`
The 2D kernel used to filter the input ``data``. Filtering the
``data`` will smooth the noise and maximize detectability of
objects with a shape similar to the kernel.
mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` determines how the array borders are handled. For
the ``'constant'`` mode, values outside the array borders are
set to ``fill_value``. The default is ``'constant'``.
fill_value : scalar, optional
Value to fill data values beyond the array borders if ``mode``
is ``'constant'``. The default is ``0.0``.
check_normalization : bool, optional
If `True` then a warning will be issued if the kernel is not
normalized to 1. |
def register_surrogateescape():
"""
Registers the surrogateescape error handler on Python 2 (only)
"""
if six.PY3:
return
try:
codecs.lookup_error(FS_ERRORS)
except LookupError:
codecs.register_error(FS_ERRORS, surrogateescape_handler) | Registers the surrogateescape error handler on Python 2 (only) |
def get_clients(self, limit=None, offset=None):
"""
Returns a list of clients.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json() | Returns a list of clients. |
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args) | Returns a list of values ordered identically to ``keys`` |
def p_subidentifiers(self, p):
"""subidentifiers : subidentifiers subidentifier
| subidentifier"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = [p[1]] | subidentifiers : subidentifiers subidentifier
| subidentifier |
def run(path, code=None, params=None, ignore=None, select=None, **meta):
"""Check code with Radon.
:return list: List of errors.
"""
complexity = params.get('complexity', 10)
no_assert = params.get('no_assert', False)
show_closures = params.get('show_closures', False)
visitor = ComplexityVisitor.from_code(code, no_assert=no_assert)
blocks = visitor.blocks
if show_closures:
blocks = add_inner_blocks(blocks)
return [
{'lnum': block.lineno, 'col': block.col_offset, 'type': 'R', 'number': 'R709',
'text': 'R701: %s is too complex %d' % (block.name, block.complexity)}
for block in visitor.blocks if block.complexity > complexity
] | Check code with Radon.
:return list: List of errors. |
def obj_to_json(self, file_path=None, indent=2, sort_keys=False,
quote_numbers=True):
"""
This will return a str of a json list.
:param file_path: path to data file, defaults to
self's contents if left alone
:param indent: int if set to 2 will indent to spaces and include
line breaks.
:param sort_keys: sorts columns as oppose to column order.
:param quote_numbers: bool if True will quote numbers that are strings
:return: string representing the grid formation
of the relevant data
"""
data = [row.obj_to_ordered_dict(self.columns) for row in self]
if not quote_numbers:
for row in data:
for k, v in row.items():
if isinstance(v, (bool, int, float)):
row[k] = str(row[k])
ret = json.dumps(data, indent=indent, sort_keys=sort_keys)
if sys.version_info[0] == 2:
ret = ret.replace(', \n', ',\n')
self._save_file(file_path, ret)
return ret | This will return a str of a json list.
:param file_path: path to data file, defaults to
self's contents if left alone
:param indent: int if set to 2 will indent to spaces and include
line breaks.
:param sort_keys: sorts columns as oppose to column order.
:param quote_numbers: bool if True will quote numbers that are strings
:return: string representing the grid formation
of the relevant data |
def hr_size(num, suffix='B') -> str:
"""
Human-readable data size
From https://stackoverflow.com/a/1094933
:param num: number of bytes
:param suffix: Optional size specifier
:return: Formatted string
"""
for unit in ' KMGTPEZ':
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit if unit != ' ' else '', suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix) | Human-readable data size
From https://stackoverflow.com/a/1094933
:param num: number of bytes
:param suffix: Optional size specifier
:return: Formatted string |
def output(self, stream, disabletransferencoding = None):
"""
Set output stream and send response immediately
"""
if self._sendHeaders:
raise HttpProtocolException('Cannot modify response, headers already sent')
self.outputstream = stream
try:
content_length = len(stream)
except Exception:
pass
else:
self.header(b'Content-Length', str(content_length).encode('ascii'))
if disabletransferencoding is not None:
self.disabledeflate = disabletransferencoding
self._startResponse() | Set output stream and send response immediately |
def encrypt(self, plaintext):
"""Encrypt the given plaintext value"""
if not isinstance(plaintext, int):
raise ValueError('Plaintext must be an integer value')
if not self.in_range.contains(plaintext):
raise OutOfRangeError('Plaintext is not within the input range')
return self.encrypt_recursive(plaintext, self.in_range, self.out_range) | Encrypt the given plaintext value |
def add_access_policy_filter(request, query, column_name):
"""Filter records that do not have ``read`` or better access for one or more of the
active subjects.
Since ``read`` is the lowest access level that a subject can have, this method only
has to filter on the presence of the subject.
"""
q = d1_gmn.app.models.Subject.objects.filter(
subject__in=request.all_subjects_set
).values('permission__sciobj')
filter_arg = '{}__in'.format(column_name)
return query.filter(**{filter_arg: q}) | Filter records that do not have ``read`` or better access for one or more of the
active subjects.
Since ``read`` is the lowest access level that a subject can have, this method only
has to filter on the presence of the subject. |
def xml_endtag (self, name):
"""
Write XML end tag.
"""
self.level -= 1
assert self.level >= 0
self.write(self.indent*self.level)
self.writeln(u"</%s>" % xmlquote(name)) | Write XML end tag. |
def get_angle(self, verify = False):
"""
Retuns measured angle in degrees in range 0-360.
"""
LSB = self.bus.read_byte_data(self.address, self.angle_LSB)
MSB = self.bus.read_byte_data(self.address, self.angle_MSB)
DATA = (MSB << 6) + LSB
if not verify:
return (360.0 / 2**14) * DATA
else:
status = self.get_diagnostics()
if not (status['Comp_Low']) and not(status['Comp_High']) and not(status['COF']):
return (360.0 / 2**14) * DATA
else:
return None | Retuns measured angle in degrees in range 0-360. |
def interconnect_link_topologies(self):
"""
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
"""
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies: |
def DiscreteUniform(n=10,LB=1,UB=99,B=100):
"""DiscreteUniform: create random, uniform instance for the bin packing problem."""
B = 100
s = [0]*n
for i in range(n):
s[i] = random.randint(LB,UB)
return s,B | DiscreteUniform: create random, uniform instance for the bin packing problem. |
def iflatten(seq, isSeq=isSeq):
r"""Like `flatten` but lazy."""
for elt in seq:
if isSeq(elt):
for x in iflatten(elt, isSeq):
yield x
else:
yield elt | r"""Like `flatten` but lazy. |
def _mark_started(self):
"""
Set the state information for a task once it has completely started.
In particular, the time limit is applied as of this time (ie after
and start delay has been taking.
"""
log = self._params.get('log', self._discard)
now = time.time()
self._started = now
limit = self._config_running.get('time_limit')
try:
limit = float(_fmt_context(self._get(limit, default='0'), self._context))
if limit > 0:
log.debug("Applying task '%s' time limit of %s", self._name, deltafmt(limit))
self._limit = now + limit
except Exception as e:
log.warn("Task '%s' time_limit value '%s' invalid -- %s",
self._name, limit, e, exc_info=log.isEnabledFor(logging.DEBUG)) | Set the state information for a task once it has completely started.
In particular, the time limit is applied as of this time (ie after
and start delay has been taking. |
def get(key, value=None, conf_file=_DEFAULT_CONF):
'''
Get the value for a specific configuration line.
:param str key: The command or stanza block to configure.
:param str value: The command value or command of the block specified by the key parameter.
:param str conf_file: The logrotate configuration file.
:return: The value for a specific configuration line.
:rtype: bool|int|str
CLI Example:
.. code-block:: bash
salt '*' logrotate.get rotate
salt '*' logrotate.get /var/log/wtmp rotate /etc/logrotate.conf
'''
current_conf = _parse_conf(conf_file)
stanza = current_conf.get(key, False)
if value:
if stanza:
return stanza.get(value, False)
_LOG.warning("Block '%s' not present or empty.", key)
return stanza | Get the value for a specific configuration line.
:param str key: The command or stanza block to configure.
:param str value: The command value or command of the block specified by the key parameter.
:param str conf_file: The logrotate configuration file.
:return: The value for a specific configuration line.
:rtype: bool|int|str
CLI Example:
.. code-block:: bash
salt '*' logrotate.get rotate
salt '*' logrotate.get /var/log/wtmp rotate /etc/logrotate.conf |
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday')
defaults = (None, 1, 1, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except:
fracday = 0.0
else:
timestr, fracday = timestr[:idot], timestr[idot:]
fracday = float(fracday)
for _, strptime_fmt_or_regex, _ in subfmts:
vals = []
#print strptime_fmt_or_regex
if isinstance(strptime_fmt_or_regex, six.string_types):
try:
#print timstr
#print strptime_fmt_or_regex
tm = time.strptime(timestr, strptime_fmt_or_regex)
tm.tm_hour += int(24 * fracday)
tm.tm_min += int(60 * (24 * fracday - tm.tm_hour))
tm.tm_sec += 60 * (60 * (24 * fracday - tm.tm_hour) - tm.tm_min)
except ValueError as ex:
print ex
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in six.moves.zip(components, defaults)]
hrprt = int(24 * fracday)
vals.append(hrprt)
mnprt = int(60 * (24 * fracday - hrprt))
vals.append(mnprt)
scprt = 60 * (60 * (24 * fracday - hrprt) - mnprt)
vals.append(scprt)
return vals
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name)) | Read time from a single string, using a set of possible formats. |
def classify_tangent_intersection(
intersection, nodes1, tangent1, nodes2, tangent2
):
"""Helper for func:`classify_intersection` at tangencies.
.. note::
This is a helper used only by :func:`classify_intersection`.
Args:
intersection (.Intersection): An intersection object.
nodes1 (numpy.ndarray): Control points for the first curve at
the intersection.
tangent1 (numpy.ndarray): The tangent vector to the first curve
at the intersection (``2 x 1`` array).
nodes2 (numpy.ndarray): Control points for the second curve at
the intersection.
tangent2 (numpy.ndarray): The tangent vector to the second curve
at the intersection (``2 x 1`` array).
Returns:
IntersectionClassification: The "inside" curve type, based on
the classification enum. Will either be ``opposed`` or one
of the ``tangent`` values.
Raises:
NotImplementedError: If the curves are tangent at the intersection
and have the same curvature.
"""
# Each array is 2 x 1 (i.e. a column vector), we want the vector
# dot product.
dot_prod = np.vdot(tangent1[:, 0], tangent2[:, 0])
# NOTE: When computing curvatures we assume that we don't have lines
# here, because lines that are tangent at an intersection are
# parallel and we don't handle that case.
curvature1 = _curve_helpers.get_curvature(nodes1, tangent1, intersection.s)
curvature2 = _curve_helpers.get_curvature(nodes2, tangent2, intersection.t)
if dot_prod < 0:
# If the tangent vectors are pointing in the opposite direction,
# then the curves are facing opposite directions.
sign1, sign2 = _SIGN([curvature1, curvature2])
if sign1 == sign2:
# If both curvatures are positive, since the curves are
# moving in opposite directions, the tangency isn't part of
# the surface intersection.
if sign1 == 1.0:
return CLASSIFICATION_T.OPPOSED
else:
return CLASSIFICATION_T.TANGENT_BOTH
else:
delta_c = abs(curvature1) - abs(curvature2)
if delta_c == 0.0:
raise NotImplementedError(_SAME_CURVATURE)
elif sign1 == _SIGN(delta_c):
return CLASSIFICATION_T.OPPOSED
else:
return CLASSIFICATION_T.TANGENT_BOTH
else:
if curvature1 > curvature2:
return CLASSIFICATION_T.TANGENT_FIRST
elif curvature1 < curvature2:
return CLASSIFICATION_T.TANGENT_SECOND
else:
raise NotImplementedError(_SAME_CURVATURE) | Helper for func:`classify_intersection` at tangencies.
.. note::
This is a helper used only by :func:`classify_intersection`.
Args:
intersection (.Intersection): An intersection object.
nodes1 (numpy.ndarray): Control points for the first curve at
the intersection.
tangent1 (numpy.ndarray): The tangent vector to the first curve
at the intersection (``2 x 1`` array).
nodes2 (numpy.ndarray): Control points for the second curve at
the intersection.
tangent2 (numpy.ndarray): The tangent vector to the second curve
at the intersection (``2 x 1`` array).
Returns:
IntersectionClassification: The "inside" curve type, based on
the classification enum. Will either be ``opposed`` or one
of the ``tangent`` values.
Raises:
NotImplementedError: If the curves are tangent at the intersection
and have the same curvature. |
def get_flight_rules(vis: Number, ceiling: Cloud) -> int:
"""
Returns int based on current flight rules from parsed METAR data
0=VFR, 1=MVFR, 2=IFR, 3=LIFR
Note: Common practice is to report IFR if visibility unavailable
"""
# Parse visibility
if not vis:
return 2
if vis.repr == 'CAVOK' or vis.repr.startswith('P6'):
vis = 10 # type: ignore
elif vis.repr.startswith('M'):
vis = 0 # type: ignore
# Convert meters to miles
elif len(vis.repr) == 4:
vis = vis.value * 0.000621371 # type: ignore
else:
vis = vis.value # type: ignore
# Parse ceiling
cld = ceiling.altitude if ceiling else 99
# Determine flight rules
if (vis <= 5) or (cld <= 30): # type: ignore
if (vis < 3) or (cld < 10): # type: ignore
if (vis < 1) or (cld < 5): # type: ignore
return 3 # LIFR
return 2 # IFR
return 1 # MVFR
return 0 | Returns int based on current flight rules from parsed METAR data
0=VFR, 1=MVFR, 2=IFR, 3=LIFR
Note: Common practice is to report IFR if visibility unavailable |
def calc_avr_uvr_v1(self):
"""Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= (con.hm+der.hv[i]):
flu.avr[i] = 0.
flu.uvr[i] = 0.
else:
flu.avr[i] = (flu.h-(con.hm+der.hv[i]))**2*con.bnvr[i]/2.
flu.uvr[i] = (flu.h-(con.hm+der.hv[i]))*(1.+con.bnvr[i]**2)**.5 | Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553) |
def energy_ratio_by_chunks(x, param):
"""
Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole
series.
Takes as input parameters the number num_segments of segments to divide the series into and segment_focus
which is the segment number (starting at zero) to return a feature on.
If the length of the time series is not a multiple of the number of segments, the remaining data points are
distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the
first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`.
Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario
in case somebody calls it. Sum of the ratios should be 1.0.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints
:return: the feature values
:return type: list of tuples (index, data)
"""
res_data = []
res_index = []
full_series_energy = np.sum(x ** 2)
for parameter_combination in param:
num_segments = parameter_combination["num_segments"]
segment_focus = parameter_combination["segment_focus"]
assert segment_focus < num_segments
assert num_segments > 0
res_data.append(np.sum(np.array_split(x, num_segments)[segment_focus] ** 2.0)/full_series_energy)
res_index.append("num_segments_{}__segment_focus_{}".format(num_segments, segment_focus))
return list(zip(res_index, res_data)) | Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole
series.
Takes as input parameters the number num_segments of segments to divide the series into and segment_focus
which is the segment number (starting at zero) to return a feature on.
If the length of the time series is not a multiple of the number of segments, the remaining data points are
distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the
first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`.
Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario
in case somebody calls it. Sum of the ratios should be 1.0.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints
:return: the feature values
:return type: list of tuples (index, data) |
def connect(self, port=None, baud_rate=115200):
'''
Parameters
----------
port : str or list-like, optional
Port (or list of ports) to try to connect to as a DMF Control
Board.
baud_rate : int, optional
Returns
-------
str
Port DMF control board was connected on.
Raises
------
RuntimeError
If connection could not be established.
IOError
If no ports were specified and Arduino Mega2560 not found on any
port.
'''
if isinstance(port, types.StringTypes):
ports = [port]
else:
ports = port
if not ports:
# No port was specified.
#
# Try ports matching Mega2560 USB vendor/product ID.
ports = serial_ports().index.tolist()
if not ports:
raise IOError("Arduino Mega2560 not found on any port.")
for comport_i in ports:
if self.connected():
self.disconnect()
self.port = None
self._i2c_devices = {}
# Try to connect to control board on available ports.
try:
logger.debug('Try to connect to: %s', comport_i)
# Explicitly cast `comport_i` to string since `Base.connect`
# Boost Python binding does not support unicode strings.
#
# Fixes [issue 8][issue-8].
#
# [issue-8]: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/8
Base.connect(self, str(comport_i), baud_rate)
self.port = comport_i
break
except BadVGND, exception:
logger.warning(exception)
break
except RuntimeError, exception:
continue
else:
raise RuntimeError('Could not connect to control board on any of '
'the following ports: %s' % ports)
name = self.name()
version = self.hardware_version()
firmware = self.software_version()
serial_number_string = ""
try:
serial_number_string = ", S/N %03d" % self.serial_number
except:
# Firmware does not support `serial_number` attribute.
pass
logger.info("Connected to %s v%s (Firmware: %s%s)" %
(name, version, firmware, serial_number_string))
logger.info("Poll control board for series resistors and "
"capacitance values.")
self._read_calibration_data()
try:
self.__aref__ = self._aref()
logger.info("Analog reference = %.2f V" % self.__aref__)
except:
# Firmware does not support `__aref__` attribute.
pass
# Check VGND for both analog channels
expected = 2 ** 10/2
v = {}
channels = [0, 1]
damaged = []
for channel in channels:
try:
v[channel] = np.mean(self.analog_reads(channel, 10))
logger.info("A%d VGND = %.2f V (%.2f%% of Aref)", channel,
self.__aref__ * v[channel] / (2 ** 10), 100.0 *
v[channel] / (2 ** 10))
# Make sure that the VGND is close to the expected value;
# otherwise, the op-amp may be damaged (expected error
# is <= 10%).
if np.abs(v[channel] - expected) / expected > .1:
damaged.append(channel)
except:
# Firmware does not support `__aref__` attribute.
break
# Scan I2C bus to generate list of connected devices.
self._i2c_scan()
if damaged:
# At least one of the analog input channels appears to be damaged.
if len(damaged) == 1:
msg = "Analog channel %d appears" % damaged[0]
else:
msg = "Analog channels %s appear" % damaged
raise BadVGND(msg + " to be damaged. You may need to replace the "
"op-amp on the control board.")
return self.RETURN_OK | Parameters
----------
port : str or list-like, optional
Port (or list of ports) to try to connect to as a DMF Control
Board.
baud_rate : int, optional
Returns
-------
str
Port DMF control board was connected on.
Raises
------
RuntimeError
If connection could not be established.
IOError
If no ports were specified and Arduino Mega2560 not found on any
port. |
def standard_parsing_functions(Block, Tx):
"""
Return the standard parsing functions for a given Block and Tx class.
The return value is expected to be used with the standard_streamer function.
"""
def stream_block(f, block):
assert isinstance(block, Block)
block.stream(f)
def stream_blockheader(f, blockheader):
assert isinstance(blockheader, Block)
blockheader.stream_header(f)
def stream_tx(f, tx):
assert isinstance(tx, Tx)
tx.stream(f)
def parse_int_6(f):
b = f.read(6) + b'\0\0'
return struct.unpack(b, "<L")[0]
def stream_int_6(f, v):
f.write(struct.pack(v, "<L")[:6])
more_parsing = [
("A", (PeerAddress.parse, lambda f, peer_addr: peer_addr.stream(f))),
("v", (InvItem.parse, lambda f, inv_item: inv_item.stream(f))),
("T", (Tx.parse, stream_tx)),
("B", (Block.parse, stream_block)),
("z", (Block.parse_as_header, stream_blockheader)),
("1", (lambda f: struct.unpack("B", f.read(1))[0], lambda f, v: f.write(struct.pack("B", v)))),
("6", (parse_int_6, stream_int_6)),
("O", (lambda f: True if f.read(1) else False,
lambda f, v: f.write(b'' if v is None else struct.pack("B", v)))),
]
all_items = list(STREAMER_FUNCTIONS.items())
all_items.extend(more_parsing)
return all_items | Return the standard parsing functions for a given Block and Tx class.
The return value is expected to be used with the standard_streamer function. |
def save(self, index=None, force=False):
"""Save file"""
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force) | Save file |
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs) | Console verbose message to STDOUT |
def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome,
outcomeDetail=None, eventIdentifier=None,
linkObjectList=[], eventDate=None):
"""
Actually create our PREMIS Event XML
"""
eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP)
eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier")
eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType")
eventTypeXML.text = eventType
eventIDTypeXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierType"
)
eventIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID"
eventIDValueXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierValue"
)
if eventIdentifier:
eventIDValueXML.text = eventIdentifier
else:
eventIDValueXML.text = uuid.uuid4().hex
eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime")
if eventDate is None:
eventDateTimeXML.text = xsDateTime_format(datetime.utcnow())
else:
eventDateTimeXML.text = xsDateTime_format(eventDate)
eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail")
eventDetailXML.text = eventDetail
eventOutcomeInfoXML = etree.SubElement(
eventXML, PREMIS + "eventOutcomeInformation"
)
eventOutcomeXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcome"
)
eventOutcomeXML.text = eventOutcome
if outcomeDetail:
eventOutcomeDetailXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail"
)
eventOutcomeDetailNoteXML = etree.SubElement(
eventOutcomeDetailXML, PREMIS + "eventOutcomeDetailNote"
)
eventOutcomeDetailNoteXML.text = outcomeDetail
# Assuming it's a list of 3-item tuples here [ ( identifier, type, role) ]
linkAgentIDXML = etree.SubElement(
eventXML, PREMIS + "linkingAgentIdentifier")
linkAgentIDTypeXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierType"
)
linkAgentIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL"
linkAgentIDValueXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue"
)
linkAgentIDValueXML.text = agentIdentifier
linkAgentIDRoleXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentRole"
)
linkAgentIDRoleXML.text = \
"http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram"
for linkObject in linkObjectList:
linkObjectIDXML = etree.SubElement(
eventXML, PREMIS + "linkingObjectIdentifier"
)
linkObjectIDTypeXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierType"
)
linkObjectIDTypeXML.text = linkObject[1]
linkObjectIDValueXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue"
)
linkObjectIDValueXML.text = linkObject[0]
if linkObject[2]:
linkObjectRoleXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectRole"
)
linkObjectRoleXML.text = linkObject[2]
return eventXML | Actually create our PREMIS Event XML |
def get_instance(self, payload):
"""
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance
"""
return NotificationInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance |
def _update_port_locations(self, initial_coordinates):
"""Adjust port locations after particles have moved
Compares the locations of Particles between 'self' and an array of
reference coordinates. Shifts Ports in accordance with how far anchors
have been moved. This conserves the location of Ports with respect to
their anchor Particles, but does not conserve the orientation of Ports
with respect to the molecule as a whole.
Parameters
----------
initial_coordinates : np.ndarray, shape=(n, 3), dtype=float
Reference coordinates to use for comparing how far anchor Particles
have shifted.
"""
particles = list(self.particles())
for port in self.all_ports():
if port.anchor:
idx = particles.index(port.anchor)
shift = particles[idx].pos - initial_coordinates[idx]
port.translate(shift) | Adjust port locations after particles have moved
Compares the locations of Particles between 'self' and an array of
reference coordinates. Shifts Ports in accordance with how far anchors
have been moved. This conserves the location of Ports with respect to
their anchor Particles, but does not conserve the orientation of Ports
with respect to the molecule as a whole.
Parameters
----------
initial_coordinates : np.ndarray, shape=(n, 3), dtype=float
Reference coordinates to use for comparing how far anchor Particles
have shifted. |
def remove_outcome_hook(self, outcome_id):
"""Removes internal transition going to the outcome
"""
for transition_id in list(self.transitions.keys()):
transition = self.transitions[transition_id]
if transition.to_outcome == outcome_id and transition.to_state == self.state_id:
self.remove_transition(transition_id) | Removes internal transition going to the outcome |
def _metric_when_multiplied_with_sig_vec(self, sig):
"""return D^-1 B^T diag(sig) B D as a measure for
C^-1/2 diag(sig) C^1/2
:param sig: a vector "used" as diagonal matrix
:return:
"""
return dot((self.B * self.D**-1.).T * sig, self.B * self.D) | return D^-1 B^T diag(sig) B D as a measure for
C^-1/2 diag(sig) C^1/2
:param sig: a vector "used" as diagonal matrix
:return: |
def collect_modules(self):
""" Collect up the list of modules in use """
try:
res = {}
m = sys.modules
for k in m:
# Don't report submodules (e.g. django.x, django.y, django.z)
# Skip modules that begin with underscore
if ('.' in k) or k[0] == '_':
continue
if m[k]:
try:
d = m[k].__dict__
if "version" in d and d["version"]:
res[k] = self.jsonable(d["version"])
elif "__version__" in d and d["__version__"]:
res[k] = self.jsonable(d["__version__"])
else:
res[k] = get_distribution(k).version
except DistributionNotFound:
pass
except Exception:
logger.debug("collect_modules: could not process module: %s" % k)
except Exception:
logger.debug("collect_modules", exc_info=True)
else:
return res | Collect up the list of modules in use |
def cross_validate(self, ax):
'''
Performs the cross-validation step.
'''
# The CDPP to beat
cdpp_opt = self.get_cdpp_arr()
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
# Mask for current chunk
m = self.get_masked_chunk(b)
# Mask transits and outliers
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(self.fraw)
# Setup the GP
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
# The masks
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
# The pre-computed matrices
pre_v = [self.cv_precompute(mask, b) for mask in masks]
# Initialize with the nPLD solution
log_lam_opt = np.log10(self.lam[b])
scatter_opt = self.validation_scatter(
log_lam_opt, b, masks, pre_v, gp, flux, time, med)
log.info("Iter 0/%d: " % (self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
# Do `piter` iterations
for p in range(self.piter):
# Perturb the initial condition a bit
log_lam = np.array(
np.log10(self.lam[b])) * \
(1 + self.ppert * np.random.randn(len(self.lam[b])))
scatter = self.validation_scatter(
log_lam, b, masks, pre_v, gp, flux, time, med)
log.info("Initializing at: " +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# Call the minimizer
log_lam, scatter, _, _, _, _ = \
fmin_powell(self.validation_scatter, log_lam,
args=(b, masks, pre_v, gp, flux, time, med),
maxfun=self.pmaxf, disp=False,
full_output=True)
# Did it improve the CDPP?
tmp = np.array(self.lam[b])
self.lam[b] = 10 ** log_lam
self.compute()
cdpp = self.get_cdpp_arr()[b]
self.lam[b] = tmp
if cdpp < cdpp_opt[b]:
cdpp_opt[b] = cdpp
log_lam_opt = log_lam
# Log it
log.info("Iter %d/%d: " % (p + 1, self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# The best solution
log.info("Found minimum: logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
self.lam[b] = 10 ** log_lam_opt
# We're just going to plot lambda as a function of chunk number
bs = np.arange(len(self.breakpoints))
color = ['k', 'b', 'r', 'g', 'y']
for n in range(self.pld_order):
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '.', color=color[n])
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '-', color=color[n], alpha=0.25)
ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5)
ax[0].margins(0.1, 0.1)
ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1))
ax[0].set_xticklabels([])
# Now plot the CDPP
cdpp_arr = self.get_cdpp_arr()
ax[1].plot(bs + 1, cdpp_arr, 'b.')
ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25)
ax[1].margins(0.1, 0.1)
ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5)
ax[1].set_xlabel(r'Chunk', fontsize=5)
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) | Performs the cross-validation step. |
def get_products(self):
"""
List of formulas of potential products. E.g., ['Li','O2','Mn'].
"""
products = set()
for _, _, _, react, _ in self.get_kinks():
products = products.union(set([k.reduced_formula
for k in react.products]))
return list(products) | List of formulas of potential products. E.g., ['Li','O2','Mn']. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.