code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def add_section(self, section_name: str) -> None:
"""Add a section to the :class:`SampleSheet`."""
section_name = self._whitespace_re.sub('_', section_name)
self._sections.append(section_name)
setattr(self, section_name, Section()) | Add a section to the :class:`SampleSheet`. |
def load(self, options):
"""
Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema}
"""
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug('loaded:\n%s', self)
merged = self.merge()
log.debug('MERGED:\n%s', merged)
return merged | Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema} |
def _set_lookup_prop(self, result_data):
"""Set lookup property based on processed testcases if not configured."""
if self._lookup_prop:
return
if result_data.get("id"):
self._lookup_prop = "id"
elif result_data.get("title"):
self._lookup_prop = "name"
else:
return
logger.debug("Setting lookup method for xunit to `%s`", self._lookup_prop) | Set lookup property based on processed testcases if not configured. |
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) | For each row in the input, compute a like-shaped array of per-row
ranks. |
def _is_type(self, instance, type):
"""
Check if an ``instance`` is of the provided (JSON Schema) ``type``.
"""
if type not in self._types:
raise UnknownType(type)
type = self._types[type]
# bool inherits from int, so ensure bools aren't reported as integers
if isinstance(instance, bool):
type = _flatten(type)
if int in type and bool not in type:
return False
return isinstance(instance, type) | Check if an ``instance`` is of the provided (JSON Schema) ``type``. |
def build(self):
"""Generate a TermDocMatrix from data in parameters.
Returns
----------
term_doc_matrix : TermDocMatrix
The object that this factory class builds.
"""
if self._category_text_iter is None:
raise CategoryTextIterNotSetError()
nlp = self.get_nlp()
category_document_iter = (
(category, self._clean_function(raw_text))
for category, raw_text
in self._category_text_iter
)
term_doc_matrix = self._build_from_category_spacy_doc_iter(
(
(category, nlp(text))
for (category, text)
in category_document_iter
if text.strip() != ''
)
)
return term_doc_matrix | Generate a TermDocMatrix from data in parameters.
Returns
----------
term_doc_matrix : TermDocMatrix
The object that this factory class builds. |
def definition_name(cls):
"""Helper method for creating definition name.
Names will be generated to include the classes package name,
scope (if the class is nested in another definition) and class
name.
By default, the package name for a definition is derived from
its module name. However, this value can be overriden by
placing a 'package' attribute in the module that contains the
definition class. For example:
package = 'some.alternate.package'
class MyMessage(Message):
...
>>> MyMessage.definition_name()
some.alternate.package.MyMessage
Returns:
Dot-separated fully qualified name of definition.
"""
outer_definition_name = cls.outer_definition_name()
if outer_definition_name is None:
return six.text_type(cls.__name__)
return u'%s.%s' % (outer_definition_name, cls.__name__) | Helper method for creating definition name.
Names will be generated to include the classes package name,
scope (if the class is nested in another definition) and class
name.
By default, the package name for a definition is derived from
its module name. However, this value can be overriden by
placing a 'package' attribute in the module that contains the
definition class. For example:
package = 'some.alternate.package'
class MyMessage(Message):
...
>>> MyMessage.definition_name()
some.alternate.package.MyMessage
Returns:
Dot-separated fully qualified name of definition. |
def __dfs(self, start, weights, depth_limit):
"""
modified NX dfs
"""
adj = self._adj
stack = [(start, depth_limit, iter(sorted(adj[start], key=weights)))]
visited = {start}
disconnected = defaultdict(list)
edges = defaultdict(list)
while stack:
parent, depth_now, children = stack[-1]
try:
child = next(children)
except StopIteration:
stack.pop()
else:
if child not in visited:
edges[parent].append(child)
visited.add(child)
if depth_now > 1:
front = adj[child].keys() - {parent}
if front:
stack.append((child, depth_now - 1, iter(sorted(front, key=weights))))
elif child not in disconnected:
disconnected[parent].append(child)
return visited, edges, disconnected | modified NX dfs |
def bind_kernel(**kwargs):
"""Bind an Engine's Kernel to be used as a full IPython kernel.
This allows a running Engine to be used simultaneously as a full IPython kernel
with the QtConsole or other frontends.
This function returns immediately.
"""
from IPython.zmq.ipkernel import IPKernelApp
from IPython.parallel.apps.ipengineapp import IPEngineApp
# first check for IPKernelApp, in which case this should be a no-op
# because there is already a bound kernel
if IPKernelApp.initialized() and isinstance(IPKernelApp._instance, IPKernelApp):
return
if IPEngineApp.initialized():
try:
app = IPEngineApp.instance()
except MultipleInstanceError:
pass
else:
return app.bind_kernel(**kwargs)
raise RuntimeError("bind_kernel be called from an IPEngineApp instance") | Bind an Engine's Kernel to be used as a full IPython kernel.
This allows a running Engine to be used simultaneously as a full IPython kernel
with the QtConsole or other frontends.
This function returns immediately. |
def predict_mhci_binding(job, peptfile, allele, peplen, univ_options, mhci_options):
"""
Predict binding for each peptide in `peptfile` to `allele` using the IEDB mhci binding
prediction tool.
:param toil.fileStore.FileID peptfile: The input peptide fasta
:param str allele: Allele to predict binding against
:param str peplen: Length of peptides to process
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhci_options: Options specific to mhci binding prediction
:return: fsID for file containing the predictions
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'peptfile.faa': peptfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
peptides = read_peptide_file(os.path.join(os.getcwd(), 'peptfile.faa'))
if not peptides:
return job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile())
parameters = [mhci_options['pred'],
allele,
peplen,
input_files['peptfile.faa']]
with open('/'.join([work_dir, 'predictions.tsv']), 'w') as predfile:
docker_call(tool='mhci', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=predfile, interactive=True,
tool_version=mhci_options['version'])
output_file = job.fileStore.writeGlobalFile(predfile.name)
job.fileStore.logToMaster('Ran mhci on %s:%s:%s successfully'
% (univ_options['patient'], allele, peplen))
return output_file | Predict binding for each peptide in `peptfile` to `allele` using the IEDB mhci binding
prediction tool.
:param toil.fileStore.FileID peptfile: The input peptide fasta
:param str allele: Allele to predict binding against
:param str peplen: Length of peptides to process
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhci_options: Options specific to mhci binding prediction
:return: fsID for file containing the predictions
:rtype: toil.fileStore.FileID |
def _set_current_page(self, current_page, last_page):
"""
Get the current page for the request.
:param current_page: The current page of results
:type current_page: int
:param last_page: The last page of results
:type last_page: int
:rtype: int
"""
if not current_page:
current_page = self.resolve_current_page()
if current_page > last_page:
if last_page > 0:
return last_page
return 1
if not self._is_valid_page_number(current_page):
return 1
return current_page | Get the current page for the request.
:param current_page: The current page of results
:type current_page: int
:param last_page: The last page of results
:type last_page: int
:rtype: int |
def on_service_modify(self, svc_ref, old_properties):
"""
Called when a service has been modified in the framework
:param svc_ref: A service reference
:param old_properties: Previous properties values
:return: A tuple (added, (service, reference)) if the dependency has
been changed, else None
"""
with self._lock:
try:
# Look for the service
service = self.services[svc_ref]
except KeyError:
# A previously registered service now matches our filter
return self.on_service_arrival(svc_ref)
else:
# Notify the property modification
self._ipopo_instance.update(
self, service, svc_ref, old_properties
)
return None | Called when a service has been modified in the framework
:param svc_ref: A service reference
:param old_properties: Previous properties values
:return: A tuple (added, (service, reference)) if the dependency has
been changed, else None |
def cast_to_python(self, value):
"""Convert JSON definition to UserGroup object"""
# v2.x does not provide a distinction between users and groups at the field selection level, can only return
# UserGroup instances instead of specific User or Group instances
if value is not None:
value = UserGroup(self._swimlane, value)
return value | Convert JSON definition to UserGroup object |
def to_flat_dict(self, **kwargs):
"""
Convert the :class:`ParameterSet` to a flat dictionary, with keys being
uniquetwigs to access the parameter and values being the :class:`Parameter`
objects themselves.
:return: dict of :class:`Parameter`s
"""
if kwargs:
return self.filter(**kwargs).to_flat_dict()
return {param.uniquetwig: param for param in self._params} | Convert the :class:`ParameterSet` to a flat dictionary, with keys being
uniquetwigs to access the parameter and values being the :class:`Parameter`
objects themselves.
:return: dict of :class:`Parameter`s |
def _file_path(self, uid):
"""Create and return full file path for DayOne entry"""
file_name = '%s.doentry' % (uid)
return os.path.join(self.dayone_journal_path, file_name) | Create and return full file path for DayOne entry |
def lambda_not_found_response(*args):
"""
Constructs a Flask Response for when a Lambda function is not found for an endpoint
:return: a Flask Response
"""
response_data = jsonify(ServiceErrorResponses._NO_LAMBDA_INTEGRATION)
return make_response(response_data, ServiceErrorResponses.HTTP_STATUS_CODE_502) | Constructs a Flask Response for when a Lambda function is not found for an endpoint
:return: a Flask Response |
def add_extensions(self, extensions):
"""
Add extensions to the certificate.
:param extensions: The extensions to add.
:type extensions: An iterable of :py:class:`X509Extension` objects.
:return: ``None``
"""
for ext in extensions:
if not isinstance(ext, X509Extension):
raise ValueError("One of the elements is not an X509Extension")
add_result = _lib.X509_add_ext(self._x509, ext._extension, -1)
if not add_result:
_raise_current_error() | Add extensions to the certificate.
:param extensions: The extensions to add.
:type extensions: An iterable of :py:class:`X509Extension` objects.
:return: ``None`` |
def user_events(self, user_obj=None):
"""Fetch all events by a specific user."""
query = dict(user_id=user_obj['_id']) if user_obj else dict()
return self.event_collection.find(query) | Fetch all events by a specific user. |
def getPrefixDirectories(self, engineRoot, delimiter=' '):
"""
Returns the list of prefix directories for this library, joined using the specified delimiter
"""
return delimiter.join(self.resolveRoot(self.prefixDirs, engineRoot)) | Returns the list of prefix directories for this library, joined using the specified delimiter |
def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # Should be good for MutualAuth and Websocket
"""
**Description**
Used to configure the rootCA, private key and certificate files. Should be called before connect. This is a public
facing API inherited by application level public clients.
**Syntax**
.. code:: python
myShadowClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
myJobsClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
**Parameters**
*CAFilePath* - Path to read the root CA file. Required for all connection types.
*KeyPath* - Path to read the private key. Required for X.509 certificate based connection.
*CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection.
**Returns**
None
"""
# AWSIoTMQTTClient.configureCredentials
self._AWSIoTMQTTClient.configureCredentials(CAFilePath, KeyPath, CertificatePath) | **Description**
Used to configure the rootCA, private key and certificate files. Should be called before connect. This is a public
facing API inherited by application level public clients.
**Syntax**
.. code:: python
myShadowClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
myJobsClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
**Parameters**
*CAFilePath* - Path to read the root CA file. Required for all connection types.
*KeyPath* - Path to read the private key. Required for X.509 certificate based connection.
*CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection.
**Returns**
None |
def proto_IC_ramp_gain(abf=exampleABF):
"""increasing ramps in (?) pA steps."""
standard_inspect(abf)
swhlab.ap.detect(abf)
swhlab.ap.check_AP_raw(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="01-raw",resize=False)
swhlab.ap.check_AP_deriv(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="02-deriv")
swhlab.ap.check_AP_phase(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="03-phase")
swhlab.ap.plot_values(abf,'freq',continuous=True) #plot AP info
pylab.subplot(211)
pylab.axhline(40,color='r',lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag='04-freq')
swhlab.ap.plot_values(abf,'downslope',continuous=True) #plot AP info
pylab.subplot(211)
pylab.axhline(-100,color='r',lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag='04-downslope') | increasing ramps in (?) pA steps. |
def nlargest(self, n=None):
"""List the n most common elements and their counts.
List is from the most
common to the least. If n is None, the list all element counts.
Run time should be O(m log m) where m is len(self)
Args:
n (int): The number of elements to return
"""
if n is None:
return sorted(self.counts(), key=itemgetter(1), reverse=True)
else:
return heapq.nlargest(n, self.counts(), key=itemgetter(1)) | List the n most common elements and their counts.
List is from the most
common to the least. If n is None, the list all element counts.
Run time should be O(m log m) where m is len(self)
Args:
n (int): The number of elements to return |
def _fset(self, name):
"""
Build and returns the property's *fdel* method for the member defined by *name*.
"""
def fset(inst, value):
# the setter uses the wrapped function as well
# to allow for value checks
value = self.fparse(inst, value)
setattr(inst, name, value)
return fset | Build and returns the property's *fdel* method for the member defined by *name*. |
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse | Toggles direction of test
:rtype: bool |
def write(self, __text: str) -> None:
"""Write text to the debug stream.
Args:
__text: Text to write
"""
if __text == os.linesep:
self.handle.write(__text)
else:
frame = inspect.currentframe()
if frame is None:
filename = 'unknown'
lineno = 0
else:
outer = frame.f_back
filename = outer.f_code.co_filename.split(os.sep)[-1]
lineno = outer.f_lineno
self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[-15:],
lineno, __text)) | Write text to the debug stream.
Args:
__text: Text to write |
def parse_safari (url_data):
"""Parse a Safari bookmark file."""
from ..bookmarks.safari import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name) | Parse a Safari bookmark file. |
def _add_q(self, q_object):
"""Add a Q-object to the current filter."""
self._criteria = self._criteria._combine(q_object, q_object.connector) | Add a Q-object to the current filter. |
def _self_event(self, event_name, cmd, *pargs, **kwargs):
"""Call self event"""
if hasattr(self, event_name):
getattr(self, event_name)(cmd, *pargs, **kwargs) | Call self event |
def t_BIN_STRING(self, t):
r'\'[01]*\'[bB]'
value = t.value[1:-2]
while value and value[0] == '0' and len(value) % 8:
value = value[1:]
# XXX raise in strict mode
# if len(value) % 8:
# raise error.PySmiLexerError("Number of 0s and 1s have to divide by 8 in binary string %s" % t.value, lineno=t.lineno)
return t | r'\'[01]*\'[bB] |
def constructor(self, random, args):
"""Return a candidate solution for an ant colony optimization."""
self._use_ants = True
candidate = []
while len(candidate) < len(self.components):
# Find feasible components
feasible_components = []
if len(candidate) == 0:
feasible_components = self.components
else:
remaining_capacity = self.capacity - sum([c.element for c in candidate])
if self.duplicates:
feasible_components = [c for c in self.components if c.element <= remaining_capacity]
else:
feasible_components = [c for c in self.components if c not in candidate and c.element <= remaining_capacity]
if len(feasible_components) == 0:
break
else:
# Choose a feasible component
if random.random() <= self.bias:
next_component = max(feasible_components)
else:
next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0]
candidate.append(next_component)
return candidate | Return a candidate solution for an ant colony optimization. |
def parse_int(value, base_unit=None):
"""
>>> parse_int('1') == 1
True
>>> parse_int(' 0x400 MB ', '16384kB') == 64
True
>>> parse_int('1MB', 'kB') == 1024
True
>>> parse_int('1000 ms', 's') == 1
True
>>> parse_int('1GB', 'MB') is None
True
>>> parse_int(0) == 0
True
"""
convert = {
'kB': {'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024},
'ms': {'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24},
's': {'ms': -1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24},
'min': {'ms': -1000 * 60, 's': -60, 'min': 1, 'h': 60, 'd': 60 * 24}
}
value, unit = strtol(value)
if value is not None:
if not unit:
return value
if base_unit and base_unit not in convert:
base_value, base_unit = strtol(base_unit, False)
else:
base_value = 1
if base_unit in convert and unit in convert[base_unit]:
multiplier = convert[base_unit][unit]
if multiplier < 0:
value /= -multiplier
else:
value *= multiplier
return int(value/base_value) | >>> parse_int('1') == 1
True
>>> parse_int(' 0x400 MB ', '16384kB') == 64
True
>>> parse_int('1MB', 'kB') == 1024
True
>>> parse_int('1000 ms', 's') == 1
True
>>> parse_int('1GB', 'MB') is None
True
>>> parse_int(0) == 0
True |
def match_phase(qpi, model, n0, r0, c0=None, pha_offset=0,
fix_pha_offset=False, nrel=.10, rrel=.05, crel=.05,
stop_dn=.0005, stop_dr=.0010, stop_dc=1, min_iter=3,
max_iter=100, ret_center=False, ret_pha_offset=False,
ret_qpi=False, ret_num_iter=False, ret_interim=False,
verbose=0, verbose_h5path="./match_phase_error.h5"):
"""Fit a scattering model to a quantitative phase image
Parameters
----------
qpi: qpimage.QPImage
QPI data to fit (e.g. experimental data)
model: str
Name of the light-scattering model
(see :const:`qpsphere.models.available`)
n0: float
Initial refractive index of the sphere
r0: float
Initial radius of the sphere [m]
c0: tuple of (float, float)
Initial center position of the sphere in ndarray index
coordinates [px]; if set to `None` (default), the center
of the image is used.
pha_offset: float
Initial phase offset [rad]
fix_pha_offset: bool
If True, do not fit the phase offset `pha_offset`. The phase
offset is determined from the mean of all pixels whose absolute
phase is
- below 1% of the modeled phase and
- within a 5px or 20% border (depending on which is larger)
around the phase image.
nrel: float
Determines the border of the interpolation range for the
refractive index: [n-(n-nmed)*nrel, n+(n-nmed)*nrel]
with nmed=qpi["medium_index"] and, initially, n=n0.
rrel: float
Determines the border of the interpolation range for the
radius: [r*(1-rrel), r*(1+rrel)] with, initially, r=r0.
crel: float
Determines the border of the interpolation range for the
center position: [cxy - dc, cxy + dc] with the center
position (along x or y) cxy, and the interval radius dc
defined by dc=max(lambda, crel * r0) with the vacuum
wavelength lambda=qpi["wavelenght"].
stop_dn: float
Stopping criterion for refractive index
stop_dr: float
Stopping criterion for radius
stop_dc: float
Stopping criterion for lateral offsets
min_iter: int
Minimum number of fitting iterations to perform
max_iter: int
Maximum number of fitting iterations to perform
ret_center: bool
If True, return the fitted center coordinates
ret_pha_offset: bool
If True, return the fitted phase offset
ret_qpi: bool
If True, return the final fit as a data set
ret_num_iter: bool
If True, return the number of iterations
ret_interim: bool
If True, return intermediate parameters of each iteration
verbose: int
Higher values increase verbosity
verbose_h5path: str
Path to hdf5 output file, created when `verbosity >= 2`
Returns
-------
n: float
Fitted refractive index
r: float
Fitted radius [m]
c: tuple of (float, float)
Only returned if `ret_center` is True
Center position of the sphere in ndarray index coordinates [px]
pha_offset: float
Only returned if `ret_pha_offset` is True
Fitted phase offset [rad]
qpi: qpimage.QPImage
Only returned if `ret_qpi` is True
Simulation using `model` with the final fit parameters
num_iter: int
Only returned if `ret_num_iter` is True
Number of iterations performed; negative number is
returned when iteration fails
interim: list
Only returned if `ret_interim` is True
Intermediate fitting parameters
"""
if not isinstance(qpi, qpimage.QPImage):
raise ValueError("`qpi` must be instance of `QPImage`!")
for var in ["medium index", "pixel size", "wavelength"]:
if var not in qpi:
raise ValueError("meta data '{}' not defined in `qpi`!")
if c0 is None:
c0 = [qpi.shape[0] / 2, qpi.shape[1] / 2]
model_kwargs = {"radius": r0,
"sphere_index": n0,
"medium_index": qpi["medium index"],
"wavelength": qpi["wavelength"],
"pixel_size": qpi["pixel size"],
"grid_size": qpi.shape,
"center": c0
}
spi = SpherePhaseInterpolator(model=model,
model_kwargs=model_kwargs,
pha_offset=pha_offset,
nrel=nrel,
rrel=rrel,
verbose=verbose)
# Results recorder to detect stuck iterations
recorder = []
# intermediate results
interim = []
interim.append([0, spi.params])
phase = qpi.pha
range_ipol = 47
range_off = 13
# allow to vary center offset for 5 % of radius or 1 wavelengths
dc = max(qpi["wavelength"], crel * r0) / qpi["pixel size"] # [px]
if verbose:
print("Starting phase fitting.")
ii = 0
message = None
if "identifier" in qpi:
ident = qpi["identifier"]
else:
ident = str(time.time())
while True:
if verbose >= 2:
export_phase_error_hdf5(h5path=verbose_h5path,
identifier=ident,
index=ii,
phase=phase,
mphase=spi.get_phase(),
model=model,
n0=n0,
r0=r0,
spi_params=spi.params)
ii += 1
# remember old values
r_old = spi.radius
n_old = spi.sphere_index
# 1st step: vary radius
rs = np.linspace(
spi.range_r[0], spi.range_r[1], range_ipol, endpoint=True)
assert np.allclose(np.min(np.abs(rs - spi.radius)), 0)
lsqs = []
for ri in rs:
phasei = spi.get_phase(rintp=ri)
lsqs.append(sq_phase_diff(phase, phasei))
idr = np.argmin(lsqs)
spi.radius = rs[idr]
# 2nd step: vary n_object
ns = np.linspace(
spi.range_n[0], spi.range_n[1], range_ipol, endpoint=True)
assert np.allclose(np.min(np.abs(ns - spi.sphere_index)), 0)
lsqs = []
for ni in ns:
phasei = spi.get_phase(nintp=ni)
lsqs.append(sq_phase_diff(phase, phasei))
idn = np.argmin(lsqs)
spi.sphere_index = ns[idn]
# 3rd step: vary center position
x = np.linspace(-dc, dc, range_off, endpoint=True)
assert np.allclose(np.min(np.abs(x)), 0)
xintp, yintp = np.meshgrid(x, x)
lsqs = []
for xoff, yoff in zip(xintp.flatten(), yintp.flatten()):
phasei = spi.get_phase(delta_offset_x=xoff, delta_offset_y=yoff)
err = sq_phase_diff(phase, phasei)
lsqs.append(err)
idc = np.argmin(lsqs)
deltax = xintp.flatten()[idc]
deltay = yintp.flatten()[idc]
# offsets must be added incrementally, because they are not overridden
# in the 3rd step
spi.posx_offset = spi.posx_offset - deltax
spi.posy_offset = spi.posy_offset - deltay
if not fix_pha_offset:
# Use average phase at image border without sphere
cabphase = spi.get_phase() - spi.pha_offset
# Determine background
cabphase[np.abs(cabphase) > .01 * np.abs(cabphase).max()] = np.nan
cb_border = max(5, min(cabphase.shape) // 5)
cabphase[cb_border:-cb_border, cb_border:-cb_border] = np.nan
phai_offset = np.nanmean(cabphase - phase)
if np.isnan(phai_offset):
phai_offset = 0
spi.pha_offset = - phai_offset
if verbose == 1:
print("Iteration {}: n={:.5e}, r={:.5e}m".format(ii,
spi.sphere_index,
spi.radius))
elif verbose >= 2:
print("Iteration {}: {}".format(ii, spi.params))
interim.append([ii, spi.params])
# update accuracies
if (idn > range_ipol / 2 - range_ipol / 10 and
idn < range_ipol / 2 + range_ipol / 10):
spi.dn /= 2
if verbose >= 2:
print("Halved search interval: spi.dn={:.8f}".format(spi.dn))
if (idr > range_ipol / 2 - range_ipol / 10 and
idr < range_ipol / 2 + range_ipol / 10):
spi.dr /= 2
if verbose >= 2:
print("Halved search interval: spi.dr={:.8f}".format(spi.dr))
if deltax**2 + deltay**2 < dc**2:
dc /= 2
if verbose >= 2:
print("Halved search interval: dc={:.8f}".format(dc))
if ii < min_iter:
if verbose:
print("Keep iterating because `min_iter`={}.".format(min_iter))
continue
elif ii >= max_iter:
ii *= -1
if verbose:
print("Stopping iteration: reached `max_iter`={}".format(
max_iter))
message = "fail, reached maximum number of iterations"
break
if stop_dc:
# check movement of center location and enforce next iteration
curoff = np.sqrt(deltax**2 + deltay**2)
if curoff > stop_dc:
if verbose:
print("Keep iterating because center location moved by "
+ "{} > `stop_dc`={}.".format(curoff, stop_dc))
continue
if (abs(spi.radius - r_old) / spi.radius < stop_dr and
abs(spi.sphere_index - n_old) < stop_dn):
# Radius, refractive index, and center position changed below
# user-defined threshold.
if verbose:
print("Stopping iteration: `stop_dr` and `stop_dn` satisfied")
message = "success, satisfied stopping criteria"
break
thisresult = (spi.sphere_index, spi.radius)
recorder.append(thisresult)
if recorder.count(thisresult) > 2:
ii *= -1
# We have already had this result 2 times and therefore we abort.
# TODO:
# - Select the one with the least error
warnings.warn("Aborting stuck iteration for {}!".format(qpi))
if verbose:
print("Stop iteration: encountered same parameters twice.")
message = "fail, same parameters encountered twice"
break
if verbose >= 2:
infostring = ""
if not abs(spi.sphere_index - n_old) < stop_dn:
infostring += " delta_n = {} > {}".format(
abs(spi.sphere_index - n_old), stop_dn)
if not abs(spi.radius - r_old) / spi.radius < stop_dr:
infostring += " delta_r = {} > {}".format(
abs(spi.radius - r_old) / spi.radius, stop_dr)
print("Keep iterating: {} (no convergence)".format(infostring))
if verbose:
print("Number of iterations: {}".format(ii))
print("Stopping rationale: {}".format(message))
if verbose >= 2:
export_phase_error_hdf5(h5path=verbose_h5path,
identifier=ident,
index=ii,
phase=phase,
mphase=spi.get_phase(),
model=model,
n0=n0,
r0=r0,
spi_params=spi.params)
res = [spi.sphere_index, spi.radius]
if ret_center:
res += [[spi.posx_offset, spi.posy_offset]]
if ret_pha_offset:
res += [spi.pha_offset]
if ret_qpi:
res += [spi.compute_qpi()]
if ret_num_iter:
res += [ii]
if ret_interim:
res += [interim]
return res | Fit a scattering model to a quantitative phase image
Parameters
----------
qpi: qpimage.QPImage
QPI data to fit (e.g. experimental data)
model: str
Name of the light-scattering model
(see :const:`qpsphere.models.available`)
n0: float
Initial refractive index of the sphere
r0: float
Initial radius of the sphere [m]
c0: tuple of (float, float)
Initial center position of the sphere in ndarray index
coordinates [px]; if set to `None` (default), the center
of the image is used.
pha_offset: float
Initial phase offset [rad]
fix_pha_offset: bool
If True, do not fit the phase offset `pha_offset`. The phase
offset is determined from the mean of all pixels whose absolute
phase is
- below 1% of the modeled phase and
- within a 5px or 20% border (depending on which is larger)
around the phase image.
nrel: float
Determines the border of the interpolation range for the
refractive index: [n-(n-nmed)*nrel, n+(n-nmed)*nrel]
with nmed=qpi["medium_index"] and, initially, n=n0.
rrel: float
Determines the border of the interpolation range for the
radius: [r*(1-rrel), r*(1+rrel)] with, initially, r=r0.
crel: float
Determines the border of the interpolation range for the
center position: [cxy - dc, cxy + dc] with the center
position (along x or y) cxy, and the interval radius dc
defined by dc=max(lambda, crel * r0) with the vacuum
wavelength lambda=qpi["wavelenght"].
stop_dn: float
Stopping criterion for refractive index
stop_dr: float
Stopping criterion for radius
stop_dc: float
Stopping criterion for lateral offsets
min_iter: int
Minimum number of fitting iterations to perform
max_iter: int
Maximum number of fitting iterations to perform
ret_center: bool
If True, return the fitted center coordinates
ret_pha_offset: bool
If True, return the fitted phase offset
ret_qpi: bool
If True, return the final fit as a data set
ret_num_iter: bool
If True, return the number of iterations
ret_interim: bool
If True, return intermediate parameters of each iteration
verbose: int
Higher values increase verbosity
verbose_h5path: str
Path to hdf5 output file, created when `verbosity >= 2`
Returns
-------
n: float
Fitted refractive index
r: float
Fitted radius [m]
c: tuple of (float, float)
Only returned if `ret_center` is True
Center position of the sphere in ndarray index coordinates [px]
pha_offset: float
Only returned if `ret_pha_offset` is True
Fitted phase offset [rad]
qpi: qpimage.QPImage
Only returned if `ret_qpi` is True
Simulation using `model` with the final fit parameters
num_iter: int
Only returned if `ret_num_iter` is True
Number of iterations performed; negative number is
returned when iteration fails
interim: list
Only returned if `ret_interim` is True
Intermediate fitting parameters |
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event) | Reimplement Qt method |
def save_dtrajs(self, prefix='', output_dir='.',
output_format='ascii', extension='.dtraj'):
r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj') |
def geoid(self):
""""Return first child of the column, or self that is marked as a geographic identifier"""
if self.valuetype_class.is_geoid():
return self
for c in self.table.columns:
if c.parent == self.name and c.valuetype_class.is_geoid():
return c | Return first child of the column, or self that is marked as a geographic identifier |
def reset_namespace(self):
"""Resets the namespace by removing all names defined by the user"""
self.shellwidget.reset_namespace(warning=self.reset_warning,
message=True) | Resets the namespace by removing all names defined by the user |
def export_true_table():
"""Export value, checker function output true table.
Help to organize thought.
klass.__dict__ 指的是在类定义中定义的, 从父类继承而来的不在此中。
"""
tester_list = [
("inspect.isroutine", lambda v: inspect.isroutine(v)),
("inspect.isfunction", lambda v: inspect.isfunction(v)),
("inspect.ismethod", lambda v: inspect.ismethod(v)),
("isinstance.property", lambda v: isinstance(v, property)),
("isinstance.staticmethod", lambda v: isinstance(v, staticmethod)),
("isinstance.classmethod", lambda v: isinstance(v, classmethod)),
]
class_attr_value_paris = [
("attribute", MyClass.attribute),
("property_method", MyClass.property_method),
("regular_method", MyClass.regular_method),
("static_method", MyClass.static_method),
("class_method", MyClass.class_method),
("__dict__['static_method']", Base.__dict__["static_method"]),
("__dict__['class_method']", Base.__dict__["class_method"]),
]
myclass = MyClass()
instance_attr_value_paris = [
("attribute", myclass.attribute),
("property_method", myclass.property_method),
("regular_method", myclass.regular_method),
("static_method", myclass.static_method),
("class_method", MyClass.class_method),
# ("__dict__['static_method']", myclass.__dict__["static_method"]),
# ("__dict__['class_method']", myclass.__dict__["class_method"]),
]
print(inspect.getargspec(MyClass.regular_method))
print(inspect.getargspec(MyClass.static_method))
print(inspect.getargspec(MyClass.class_method))
print(inspect.getargspec(myclass.regular_method))
print(inspect.getargspec(myclass.static_method))
print(inspect.getargspec(myclass.class_method))
# index是方法名, column是属性名
def create_true_table_dataframe(index_tester, column_attr):
df = pd.DataFrame()
for attr, value in column_attr:
col = list()
for name, tester in index_tester:
try:
if tester(value):
flag = 1
else:
flag = 0
except:
flag = -99
col.append(flag)
df[attr] = col
df.index = [name for name, _ in index_tester]
return df
version = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
df = create_true_table_dataframe(tester_list, class_attr_value_paris)
df.to_csv("%s_class.csv" % version, index=True)
df = create_true_table_dataframe(tester_list, instance_attr_value_paris)
df.to_csv("%s_instance.csv" % version, index=True) | Export value, checker function output true table.
Help to organize thought.
klass.__dict__ 指的是在类定义中定义的, 从父类继承而来的不在此中。 |
def keep_recent_datasets(max_dataset_history, info=None):
"""Keep track of the most recent recordings.
Parameters
----------
max_dataset_history : int
maximum number of datasets to remember
info : str, optional TODO
path to file
Returns
-------
list of str
paths to most recent datasets (only if you don't specify
new_dataset)
"""
history = settings.value('recent_recordings', [])
if isinstance(history, str):
history = [history]
if info is not None and info.filename is not None:
new_dataset = info.filename
if new_dataset in history:
lg.debug(new_dataset + ' already present, will be replaced')
history.remove(new_dataset)
if len(history) > max_dataset_history:
lg.debug('Removing last dataset ' + history[-1])
history.pop()
lg.debug('Adding ' + new_dataset + ' to list of recent datasets')
history.insert(0, new_dataset)
settings.setValue('recent_recordings', history)
return None
else:
return history | Keep track of the most recent recordings.
Parameters
----------
max_dataset_history : int
maximum number of datasets to remember
info : str, optional TODO
path to file
Returns
-------
list of str
paths to most recent datasets (only if you don't specify
new_dataset) |
def set_referencepixel(self, pix):
"""Set the reference pixel of the given axis in this coordinate."""
assert len(pix) == len(self._coord["crpix"])
self._coord["crpix"] = pix[::-1] | Set the reference pixel of the given axis in this coordinate. |
def get_default_gateway():
"""
Attempts to read /proc/self/net/route to determine the default gateway in use.
:return: String - the ip address of the default gateway or None if not found/possible/non-existant
"""
try:
# The first line is the header line
# We look for the line where the Destination is 00000000 - that is the default route
# The Gateway IP is encoded backwards in hex.
with open("/proc/self/net/route") as routes:
for line in routes:
parts = line.split('\t')
if '00000000' == parts[1]:
hip = parts[2]
if hip is not None and len(hip) is 8:
# Reverse order, convert hex to int
return "%i.%i.%i.%i" % (int(hip[6:8], 16), int(hip[4:6], 16), int(hip[2:4], 16), int(hip[0:2], 16))
except:
logger.warn("get_default_gateway: ", exc_info=True) | Attempts to read /proc/self/net/route to determine the default gateway in use.
:return: String - the ip address of the default gateway or None if not found/possible/non-existant |
def stats (self, antnames):
"""XXX may be out of date."""
nbyant = np.zeros (self.nants, dtype=np.int)
sum = np.zeros (self.nants, dtype=np.complex)
sumsq = np.zeros (self.nants)
q = np.abs (self.normvis - 1)
for i in range (self.nsamps):
i1, i2 = self.blidxs[i]
nbyant[i1] += 1
nbyant[i2] += 1
sum[i1] += q[i]
sum[i2] += q[i]
sumsq[i1] += q[i]**2
sumsq[i2] += q[i]**2
avg = sum / nbyant
std = np.sqrt (sumsq / nbyant - avg**2)
navg = 1. / np.median (avg)
nstd = 1. / np.median (std)
for i in range (self.nants):
print (' %2d %10s %3d %f %f %f %f' %
(i, antnames[i], nbyant[i], avg[i], std[i], avg[i] * navg, std[i] * nstd)) | XXX may be out of date. |
def _finish_transaction_with_retry(self, command_name, explict_retry):
"""Run commit or abort with one retry after any retryable error.
:Parameters:
- `command_name`: Either "commitTransaction" or "abortTransaction".
- `explict_retry`: True when this is an explict commit retry attempt,
ie the application called session.commit_transaction() twice.
"""
# This can be refactored with MongoClient._retry_with_session.
try:
return self._finish_transaction(command_name, explict_retry)
except ServerSelectionTimeoutError:
raise
except ConnectionFailure as exc:
try:
return self._finish_transaction(command_name, True)
except ServerSelectionTimeoutError:
# Raise the original error so the application can infer that
# an attempt was made.
raise exc
except OperationFailure as exc:
if exc.code not in _RETRYABLE_ERROR_CODES:
raise
try:
return self._finish_transaction(command_name, True)
except ServerSelectionTimeoutError:
# Raise the original error so the application can infer that
# an attempt was made.
raise exc | Run commit or abort with one retry after any retryable error.
:Parameters:
- `command_name`: Either "commitTransaction" or "abortTransaction".
- `explict_retry`: True when this is an explict commit retry attempt,
ie the application called session.commit_transaction() twice. |
def get_label_at_address(self, address, offset = None):
"""
Creates a label from the given memory address.
If the address belongs to the module, the label is made relative to
it's base address.
@type address: int
@param address: Memory address.
@type offset: None or int
@param offset: (Optional) Offset value.
@rtype: str
@return: Label pointing to the given address.
"""
# Add the offset to the address.
if offset:
address = address + offset
# Make the label relative to the base address if no match is found.
module = self.get_name()
function = None
offset = address - self.get_base()
# Make the label relative to the entrypoint if no other match is found.
# Skip if the entry point is unknown.
start = self.get_entry_point()
if start and start <= address:
function = "start"
offset = address - start
# Enumerate exported functions and debug symbols,
# then find the closest match, if possible.
try:
symbol = self.get_symbol_at_address(address)
if symbol:
(SymbolName, SymbolAddress, SymbolSize) = symbol
new_offset = address - SymbolAddress
if new_offset <= offset:
function = SymbolName
offset = new_offset
except WindowsError:
pass
# Parse the label and return it.
return _ModuleContainer.parse_label(module, function, offset) | Creates a label from the given memory address.
If the address belongs to the module, the label is made relative to
it's base address.
@type address: int
@param address: Memory address.
@type offset: None or int
@param offset: (Optional) Offset value.
@rtype: str
@return: Label pointing to the given address. |
def computeExpectations(self, A_n, output='averages', compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False, useGeneral = False, state_dependent = False):
"""Compute the expectation of an observable of a phase space function.
Compute the expectation of an observable of phase space
function A(x) at all states where potentials are generated,
including states for which no samples were drawn.
We assume observables are not function of the state. u is not
an observable -- it changes depending on the state. u_k is an
observable; the energy of state k does not depend on the
state. To compute the estimators of the energy at all K
states, use . . .
Parameters
----------
A_n : np.ndarray, float
A_n (N_max np float64 array) - A_n[n] = A(x_n)
output : string, optional
Either output averages, and uncertainties, or output a matrix of differences, with uncertainties.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
useGeneral: bool, whether to use the GeneralExpectations formalism = False,
state_dependent: bool, whether the expectations are state-dependent.
Returns
-------
A : np.ndarray, float
if output is 'averages'
A_i (K np float64 array) - A_i[i] is the estimate for the expectation of A(x) for state i.
if output is 'differences'
dA : np.ndarray, float
dA_i (K np float64 array) - dA_i[i] is uncertainty estimate (one standard deviation) for A_i[i]
or
dA_ij (K np float64 array) - dA_ij[i,j] is uncertainty estimate (one standard deviation) for the difference in A beteen i and j
Notes
-----
The reported statistical uncertainty should, in the asymptotic limit,
reflect one standard deviation for the normal distribution of the estimate.
The true expectation should fall within the interval [-dA, +dA] centered on the estimate 68% of the time, and within
the interval [-2 dA, +2 dA] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
This 'breakdown' can be exacerbated by the computation of observables like indicator functions for histograms that are sparsely populated.
References
----------
See Section IV of [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_n = x_n
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n)
>>> A_n = u_kn[0,:]
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n, output='differences')
"""
dims = len(np.shape(A_n))
# Retrieve N and K for convenience.
N = self.N
K = self.K
if dims == 3:
print("expecting dim=1 or dim=2")
return None
if (useGeneral):
state_list = np.zeros([K,2],int)
if (state_dependent):
for k in range(K):
state_list[k,0] = k
state_list[k,1] = k
A_in = A_n
else:
A_in = np.zeros([1,N], dtype=np.float64)
if dims == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
A_in[0,:] = A_n
for k in range(K):
state_list[k,0] = 0
state_list[k,1] = k
general_results = self.computeGeneralExpectations(A_in, self.u_kn, state_list,
compute_uncertainty=compute_uncertainty,
uncertainty_method=uncertainty_method,
warning_cutoff=warning_cutoff,
return_theta=return_theta)
returns = []
if output == 'averages':
# Return expectations and uncertainties.
returns.append(general_results[0])
if compute_uncertainty:
indices = np.eye(K,dtype=bool)
returns.append(np.sqrt(general_results[1][indices]))
if output == 'differences':
A_im = np.matrix(general_results[0])
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
if compute_uncertainty:
return np.sqrt(general_results[1])
if return_theta:
returns.append(general_results[2])
else: # current style
if dims == 2: #convert to 1xN shape
A_n = kn_to_n(A_n, N_k=self.N_k)
# Convert to np array.
A_n = np.array(A_n, np.float64)
# Augment W_nk, N_k, and c_k for q_A(x) for the observable, with one
# extra row/column for each state (Eq. 13 of [1]).
# log of weight matrix
Log_W_nk = np.zeros([N, K * 2], np.float64)
N_k = np.zeros([K * 2], np.int32) # counts
# "free energies" of the new states
f_k = np.zeros([K], np.float64)
# Fill in first half of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
# Make A_n all positive so we can operate logarithmically for
# robustness
A_i = np.zeros([K], np.float64)
A_min = np.min(A_n)
A_n = A_n - (A_min - 1)
# Compute the remaining rows/columns of W_nk and the rows c_k for the
# observables.
for l in range(K):
# this works because all A_n are now positive;
Log_W_nk[:, K + l] = np.log(A_n) + self.Log_W_nk[:, l]
# we took the min at the beginning.
f_k[l] = -_logsum(Log_W_nk[:, K + l])
Log_W_nk[:, K + l] += f_k[l] # normalize the row
A_i[l] = np.exp(-f_k[l])
if compute_uncertainty or return_theta:
# Compute augmented asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(Log_W_nk), N_k, method=uncertainty_method)
returns = []
if output == 'averages':
if compute_uncertainty:
# Compute uncertainties.
dA_i = np.zeros([K], np.float64)
# just the diagonals
for k in range(0, K):
dA_i[k] = np.abs(A_i[k]) * np.sqrt(
Theta_ij[K + k, K + k] + Theta_ij[k, k] - 2.0 * Theta_ij[k, K + k])
# add back minima now now that uncertainties are computed.
A_i += (A_min - 1)
# Return expectations and uncertainties.
returns.append(np.array(A_i))
if compute_uncertainty:
returns.append(np.array(dA_i))
if output == 'differences':
# Return differences of expectations and uncertainties.
# compute expectation differences
A_im = np.matrix(A_i)
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
# todo - vectorize the differences! Faster and less likely to give errors.
if compute_uncertainty:
dA_ij = np.zeros([K, K], dtype=np.float64)
for i in range(0, K):
for j in range(0, K):
try:
dA_ij[i, j] = np.sqrt(
+ A_i[i] * Theta_ij[i, i] * A_i[i]
- A_i[i] * Theta_ij[i, j] * A_i[j]
- A_i[i] * Theta_ij[i, K + i] * A_i[i]
+ A_i[i] * Theta_ij[i, K + j] * A_i[j]
- A_i[j] * Theta_ij[j, i] * A_i[i]
+ A_i[j] * Theta_ij[j, j] * A_i[j]
+ A_i[j] * Theta_ij[j, K + i] * A_i[i]
- A_i[j] * Theta_ij[j, K + j] * A_i[j]
- A_i[i] * Theta_ij[K + i, i] * A_i[i]
+ A_i[i] * Theta_ij[K + i, j] * A_i[j]
+ A_i[i] * Theta_ij[K + i, K + i] * A_i[i]
- A_i[i] * Theta_ij[K + i, K + j] * A_i[j]
+ A_i[j] * Theta_ij[K + j, i] * A_i[i]
- A_i[j] * Theta_ij[K + j, j] * A_i[j]
- A_i[j] * Theta_ij[K + j, K + i] * A_i[i]
+ A_i[j] * Theta_ij[K + j, K + j] * A_i[j]
)
except:
dA_ij[i, j] = 0.0
returns.append(dA_ij)
if return_theta:
returns.append(Theta_ij)
return returns | Compute the expectation of an observable of a phase space function.
Compute the expectation of an observable of phase space
function A(x) at all states where potentials are generated,
including states for which no samples were drawn.
We assume observables are not function of the state. u is not
an observable -- it changes depending on the state. u_k is an
observable; the energy of state k does not depend on the
state. To compute the estimators of the energy at all K
states, use . . .
Parameters
----------
A_n : np.ndarray, float
A_n (N_max np float64 array) - A_n[n] = A(x_n)
output : string, optional
Either output averages, and uncertainties, or output a matrix of differences, with uncertainties.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
useGeneral: bool, whether to use the GeneralExpectations formalism = False,
state_dependent: bool, whether the expectations are state-dependent.
Returns
-------
A : np.ndarray, float
if output is 'averages'
A_i (K np float64 array) - A_i[i] is the estimate for the expectation of A(x) for state i.
if output is 'differences'
dA : np.ndarray, float
dA_i (K np float64 array) - dA_i[i] is uncertainty estimate (one standard deviation) for A_i[i]
or
dA_ij (K np float64 array) - dA_ij[i,j] is uncertainty estimate (one standard deviation) for the difference in A beteen i and j
Notes
-----
The reported statistical uncertainty should, in the asymptotic limit,
reflect one standard deviation for the normal distribution of the estimate.
The true expectation should fall within the interval [-dA, +dA] centered on the estimate 68% of the time, and within
the interval [-2 dA, +2 dA] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
This 'breakdown' can be exacerbated by the computation of observables like indicator functions for histograms that are sparsely populated.
References
----------
See Section IV of [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_n = x_n
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n)
>>> A_n = u_kn[0,:]
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n, output='differences') |
def _trim_text(text, max_width):
"""
Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple.
"""
width = get_cwidth(text)
# When the text is too wide, trim it.
if width > max_width:
# When there are no double width characters, just use slice operation.
if len(text) == width:
trimmed_text = (text[:max(1, max_width-3)] + '...')[:max_width]
return trimmed_text, len(trimmed_text)
# Otherwise, loop until we have the desired width. (Rather
# inefficient, but ok for now.)
else:
trimmed_text = ''
for c in text:
if get_cwidth(trimmed_text + c) <= max_width - 3:
trimmed_text += c
trimmed_text += '...'
return (trimmed_text, get_cwidth(trimmed_text))
else:
return text, width | Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple. |
def _close_list(self):
"""
Add an close list tag corresponding to the currently open
list found in current_parent_element.
"""
list_type = self.current_parent_element['attrs']['class']
tag = LIST_TYPES[list_type]
html = '</{t}>'.format(
t=tag
)
self.cleaned_html += html
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = {} | Add an close list tag corresponding to the currently open
list found in current_parent_element. |
def padding(self, px):
"""
Add padding around four sides of box
:param px: padding value in pixels.
Can be an array in the format of [top right bottom left] or single value.
:return: New padding added box
"""
# if px is not an array, have equal padding all sides
if not isinstance(px, list):
px = [px] * 4
x = max(0, self.x - px[3])
y = max(0, self.y - px[0])
x2 = self.x + self.width + px[1]
y2 = self.y + self.height + px[2]
return Box.from_xy(x, y, x2, y2) | Add padding around four sides of box
:param px: padding value in pixels.
Can be an array in the format of [top right bottom left] or single value.
:return: New padding added box |
def prefix_keys(self, prefix, strip_prefix=False):
"""Get all keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All keys in the store that begin with ``prefix``.
"""
keys = self.keys(key_from=prefix)
start = 0
if strip_prefix:
start = len(prefix)
for key in keys:
if not key.startswith(prefix):
break
yield key[start:] | Get all keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All keys in the store that begin with ``prefix``. |
def _validate_data(self):
"""Verifies that the data points contained in the class are valid.
"""
msg = "Error! Expected {} timestamps, found {}.".format(
len(self._data_points), len(self._timestamps))
if len(self._data_points) != len(self._timestamps):
raise MonsoonError(msg) | Verifies that the data points contained in the class are valid. |
def list_buckets(self):
"""
List all buckets owned by the user.
Example:
bucket_list = minio.list_buckets()
for bucket in bucket_list:
print(bucket.name, bucket.created_date)
:return: An iterator of buckets owned by the current user.
"""
method = 'GET'
url = get_target_url(self._endpoint_url)
# Set user agent once before the request.
headers = {'User-Agent': self._user_agent}
# default for all requests.
region = 'us-east-1'
# region is set then use the region.
if self._region:
region = self._region
# Get signature headers if any.
headers = sign_v4(method, url, region,
headers, self._access_key,
self._secret_key,
self._session_token,
None)
response = self._http.urlopen(method, url,
body=None,
headers=headers)
if self._trace_output_stream:
dump_http(method, url, headers, response,
self._trace_output_stream)
if response.status != 200:
raise ResponseError(response, method).get_exception()
try:
return parse_list_buckets(response.data)
except InvalidXMLError:
if self._endpoint_url.endswith("s3.amazonaws.com") and (not self._access_key or not self._secret_key):
raise AccessDenied(response) | List all buckets owned by the user.
Example:
bucket_list = minio.list_buckets()
for bucket in bucket_list:
print(bucket.name, bucket.created_date)
:return: An iterator of buckets owned by the current user. |
def transform_26_27(inst, new_inst, i, n, offset,
instructions, new_asm):
"""Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE"""
if inst.opname in ('JUMP_IF_FALSE', 'JUMP_IF_TRUE'):
i += 1
assert i < n
assert instructions[i].opname == 'POP_TOP'
new_inst.offset = offset
new_inst.opname = (
'POP_JUMP_IF_FALSE' if inst.opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE'
)
new_asm.backpatch[-1].remove(inst)
new_inst.arg = 'L%d' % (inst.offset + inst.arg + 3)
new_asm.backpatch[-1].add(new_inst)
else:
xlate26_27(new_inst)
return xdis.op_size(new_inst.opcode, opcode_27) | Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE |
def needs_confirmation(self):
"""
set is_active to False if needs email confirmation
"""
if EMAIL_CONFIRMATION:
self.is_active = False
self.save()
return True
else:
return False | set is_active to False if needs email confirmation |
def init(self, back=None):
'''
Initialize the backend, only do so if the fs supports an init function
'''
back = self.backends(back)
for fsb in back:
fstr = '{0}.init'.format(fsb)
if fstr in self.servers:
self.servers[fstr]() | Initialize the backend, only do so if the fs supports an init function |
def symbolic_ref(cwd,
ref,
value=None,
opts='',
git_opts='',
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Interface to `git-symbolic-ref(1)`_
cwd
The path to the git checkout
ref
Symbolic ref to read/modify
value
If passed, then the symbolic ref will be set to this value and an empty
string will be returned.
If not passed, then the ref to which ``ref`` points will be returned,
unless ``--delete`` is included in ``opts`` (in which case the symbolic
ref will be deleted).
opts
Any additional options to add to the command line, in a single string
git_opts
Any additional options to add to git command itself (not the
``symbolic-refs`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-symbolic-ref(1)`: http://git-scm.com/docs/git-symbolic-ref
CLI Examples:
.. code-block:: bash
# Get ref to which HEAD is pointing
salt myminion git.symbolic_ref /path/to/repo HEAD
# Set/overwrite symbolic ref 'FOO' to local branch 'foo'
salt myminion git.symbolic_ref /path/to/repo FOO refs/heads/foo
# Delete symbolic ref 'FOO'
salt myminion git.symbolic_ref /path/to/repo FOO opts='--delete'
'''
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.append('symbolic-ref')
opts = _format_opts(opts)
if value is not None and any(x in opts for x in ('-d', '--delete')):
raise SaltInvocationError(
'Value cannot be set for symbolic ref if -d/--delete is included '
'in opts'
)
command.extend(opts)
command.append(ref)
if value:
command.extend(value)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'] | .. versionadded:: 2015.8.0
Interface to `git-symbolic-ref(1)`_
cwd
The path to the git checkout
ref
Symbolic ref to read/modify
value
If passed, then the symbolic ref will be set to this value and an empty
string will be returned.
If not passed, then the ref to which ``ref`` points will be returned,
unless ``--delete`` is included in ``opts`` (in which case the symbolic
ref will be deleted).
opts
Any additional options to add to the command line, in a single string
git_opts
Any additional options to add to git command itself (not the
``symbolic-refs`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-symbolic-ref(1)`: http://git-scm.com/docs/git-symbolic-ref
CLI Examples:
.. code-block:: bash
# Get ref to which HEAD is pointing
salt myminion git.symbolic_ref /path/to/repo HEAD
# Set/overwrite symbolic ref 'FOO' to local branch 'foo'
salt myminion git.symbolic_ref /path/to/repo FOO refs/heads/foo
# Delete symbolic ref 'FOO'
salt myminion git.symbolic_ref /path/to/repo FOO opts='--delete' |
def setData(self, index, value, role=QtCore.Qt.UserRole):
"""Sets the component at *index* to *value*"""
# item must already exist at provided index
self._stim.overwriteComponent(value, index.row(), index.column())
self.samplerateChanged.emit(self.samplerate()) | Sets the component at *index* to *value* |
def is_venv(directory, executable='python'):
"""
:param directory: base directory of python environment
"""
path=os.path.join(directory, 'bin', executable)
return os.path.isfile(path) | :param directory: base directory of python environment |
def _plotting(self, rank_metric, results, graph_num, outdir,
format, figsize, pheno_pos='', pheno_neg=''):
""" Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table
"""
# no values need to be returned
if self._outdir is None: return
#Plotting
top_term = self.res2d.index[:graph_num]
# multi-threading
pool = Pool(self._processes)
for gs in top_term:
hit = results.get(gs)['hits_indices']
NES = 'nes' if self.module != 'ssgsea' else 'es'
term = gs.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
# gseaplot(rank_metric=rank_metric, term=term, hits_indices=hit,
# nes=results.get(gs)[NES], pval=results.get(gs)['pval'],
# fdr=results.get(gs)['fdr'], RES=results.get(gs)['RES'],
# pheno_pos=pheno_pos, pheno_neg=pheno_neg, figsize=figsize,
# ofname=outfile)
pool.apply_async(gseaplot, args=(rank_metric, term, hit, results.get(gs)[NES],
results.get(gs)['pval'],results.get(gs)['fdr'],
results.get(gs)['RES'],
pheno_pos, pheno_neg,
figsize, 'seismic', outfile))
if self.module == 'gsea':
outfile2 = "{0}/{1}.heatmap.{2}".format(self.outdir, term, self.format)
# heatmap(df=self.heatmat.iloc[hit, :], title=term, ofname=outfile2,
# z_score=0, figsize=(self._width, len(hit)/2))
pool.apply_async(heatmap, args=(self.heatmat.iloc[hit, :], 0, term,
(self._width, len(hit)/2+2), 'RdBu_r',
True, True, outfile2))
pool.close()
pool.join() | Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table |
def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None, summary='', prefer_list=False,
default_slice=None, default_dims={},
show_examples=True,
example_call="filename, name=['my_variable'], ...",
plugin=None):
"""
Register a plotter for making plots
This class method registeres a plot function for the :class:`Project`
class under the name of the given `identifier`
Parameters
----------
%(Project._register_plotter.parameters)s
Other Parameters
----------------
prefer_list: bool
Determines the `prefer_list` parameter in the `from_dataset`
method. If True, the plotter is expected to work with instances of
:class:`psyplot.InteractiveList` instead of
:class:`psyplot.InteractiveArray`.
%(ArrayList.from_dataset.parameters.default_slice)s
default_dims: dict
Default dimensions that shall be used for plotting (e.g.
{'x': slice(None), 'y': slice(None)} for longitude-latitude plots)
show_examples: bool, optional
If True, examples how to access the plotter documentation are
included in class documentation
example_call: str, optional
The arguments and keyword arguments that shall be included in the
example of the generated plot method. This call will then appear as
``>>> psy.plot.%%(identifier)s(%%(example_call)s)`` in the
documentation
plugin: str
The name of the plugin
"""
full_name = '%s.%s' % (module, plotter_name)
if plotter_cls is not None: # plotter has already been imported
docstrings.params['%s.formatoptions' % (full_name)] = \
plotter_cls.show_keys(
indent=4, func=str,
# include links in sphinx doc
include_links=None)
doc_str = ('Possible formatoptions are\n\n'
'%%(%s.formatoptions)s') % full_name
else:
doc_str = ''
summary = summary or (
'Open and plot data via :class:`%s.%s` plotters' % (
module, plotter_name))
if plotter_cls is not None:
_versions.update(get_versions(key=lambda s: s == plugin))
class PlotMethod(cls._plot_method_base_cls):
__doc__ = cls._gen_doc(summary, full_name, identifier,
example_call, doc_str, show_examples)
_default_slice = default_slice
_default_dims = default_dims
_plotter_cls = plotter_cls
_prefer_list = prefer_list
_plugin = plugin
_summary = summary
setattr(cls, identifier, PlotMethod(identifier, module, plotter_name)) | Register a plotter for making plots
This class method registeres a plot function for the :class:`Project`
class under the name of the given `identifier`
Parameters
----------
%(Project._register_plotter.parameters)s
Other Parameters
----------------
prefer_list: bool
Determines the `prefer_list` parameter in the `from_dataset`
method. If True, the plotter is expected to work with instances of
:class:`psyplot.InteractiveList` instead of
:class:`psyplot.InteractiveArray`.
%(ArrayList.from_dataset.parameters.default_slice)s
default_dims: dict
Default dimensions that shall be used for plotting (e.g.
{'x': slice(None), 'y': slice(None)} for longitude-latitude plots)
show_examples: bool, optional
If True, examples how to access the plotter documentation are
included in class documentation
example_call: str, optional
The arguments and keyword arguments that shall be included in the
example of the generated plot method. This call will then appear as
``>>> psy.plot.%%(identifier)s(%%(example_call)s)`` in the
documentation
plugin: str
The name of the plugin |
def authenticate_user(self, response, **kwargs):
"""Handles user authentication with gssapi/kerberos"""
host = urlparse(response.url).hostname
try:
auth_header = self.generate_request_header(response, host)
except KerberosExchangeError:
# GSS Failure, return existing response
return response
log.debug("authenticate_user(): Authorization header: {0}".format(
auth_header))
response.request.headers['Authorization'] = auth_header
# Consume the content so we can reuse the connection for the next
# request.
response.content
response.raw.release_conn()
_r = response.connection.send(response.request, **kwargs)
_r.history.append(response)
log.debug("authenticate_user(): returning {0}".format(_r))
return _r | Handles user authentication with gssapi/kerberos |
def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins | Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2] |
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])} | Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly. |
def unified(old, new):
"""
Returns a generator yielding a unified diff between `old` and `new`.
"""
for diff in difflib.ndiff(old.splitlines(), new.splitlines()):
if diff[0] == " ":
yield diff
elif diff[0] == "?":
continue
else:
yield termcolor.colored(diff, "red" if diff[0] == "-" else "green", attrs=["bold"]) | Returns a generator yielding a unified diff between `old` and `new`. |
def brozzler_new_job(argv=None):
'''
Command line utility entry point for queuing a new brozzler job. Takes a
yaml brozzler job configuration file, creates job, sites, and pages objects
in rethinkdb, which brozzler-workers will look at and start crawling.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-new-job - queue new job with brozzler',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'job_conf_file', metavar='JOB_CONF_FILE',
help='brozzler job configuration file in yaml')
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
try:
brozzler.new_job_file(frontier, args.job_conf_file)
except brozzler.InvalidJobConf as e:
print('brozzler-new-job: invalid job file:', args.job_conf_file, file=sys.stderr)
print(' ' + yaml.dump(e.errors).rstrip().replace('\n', '\n '), file=sys.stderr)
sys.exit(1) | Command line utility entry point for queuing a new brozzler job. Takes a
yaml brozzler job configuration file, creates job, sites, and pages objects
in rethinkdb, which brozzler-workers will look at and start crawling. |
def create_temp_file(self, **mkstemp_kwargs) -> Tuple[int, str]:
"""
Creates a temp file.
:param mkstemp_kwargs: named arguments to be passed to `tempfile.mkstemp`
:return: tuple where the first element is the file handle and the second is the location of the temp file
"""
kwargs = {**self.default_mkstemp_kwargs, **mkstemp_kwargs}
handle, location = tempfile.mkstemp(**kwargs)
self._temp_files.add(location)
return handle, location | Creates a temp file.
:param mkstemp_kwargs: named arguments to be passed to `tempfile.mkstemp`
:return: tuple where the first element is the file handle and the second is the location of the temp file |
def pointlist(points, sr):
"""Convert a list of the form [[x, y] ...] to a list of Point instances
with the given x, y coordinates."""
assert all(isinstance(pt, Point) or len(pt) == 2
for pt in points), "Point(s) not in [x, y] form"
return [coord if isinstance(coord, Point)
else Point(coord[0], coord[1], sr)
for coord in points] | Convert a list of the form [[x, y] ...] to a list of Point instances
with the given x, y coordinates. |
def iter_contributors(self, anon=False, number=-1, etag=None):
"""Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': True}
return self._iter(int(number), url, User, params, etag) | Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s |
def add(self, child):
"""
Adds a typed child object to the conditional derived variable.
@param child: Child object to be added.
"""
if isinstance(child, Case):
self.add_case(child)
else:
raise ModelError('Unsupported child element') | Adds a typed child object to the conditional derived variable.
@param child: Child object to be added. |
def time_delay_from_earth_center(self, right_ascension, declination, t_gps):
"""Return the time delay from the earth center
"""
return self.time_delay_from_location(np.array([0, 0, 0]),
right_ascension,
declination,
t_gps) | Return the time delay from the earth center |
def evals_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价'''
request = TOPRequest('taobao.wangwang.eservice.evals.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.staff_eval_details | taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价 |
def _should_fuzz_node(self, fuzz_node, stage):
'''
The matching stage is either the name of the last node, or ClientFuzzer.STAGE_ANY.
:return: True if we are in the correct model node
'''
if stage == ClientFuzzer.STAGE_ANY:
return True
if fuzz_node.name.lower() == stage.lower():
if self._index_in_path == len(self._fuzz_path) - 1:
return True
else:
return False | The matching stage is either the name of the last node, or ClientFuzzer.STAGE_ANY.
:return: True if we are in the correct model node |
def get_state_machine(self):
"""Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine
"""
if self.parent:
if self.is_root_state:
return self.parent
else:
return self.parent.get_state_machine()
return None | Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine |
def has_isotropic_cells(self):
"""``True`` if `grid` is uniform and `cell_sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False
"""
return self.is_uniform and np.allclose(self.cell_sides[:-1],
self.cell_sides[1:]) | ``True`` if `grid` is uniform and `cell_sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False |
def search_google(self, query, *, max_results=100, **kwargs):
"""Search Google Music for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type to retrieve.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types.
"""
response = self._call(
mc_calls.Query,
query,
max_results=max_results,
**kwargs
)
clusters = response.body.get('clusterDetail', [])
results = defaultdict(list)
for cluster in clusters:
result_type = QueryResultType(cluster['cluster']['type']).name
entries = cluster.get('entries', [])
if len(entries) > 0:
for entry in entries:
item_key = next(
key
for key in entry
if key not in ['cluster', 'score', 'type']
)
results[f"{result_type}s"].append(entry[item_key])
return dict(results) | Search Google Music for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type to retrieve.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types. |
def _approx_eq_(self, other: Any, atol: float) -> bool:
"""Implementation of `SupportsApproximateEquality` protocol."""
if not isinstance(other, type(self)):
return NotImplemented
#self.value = value % period in __init__() creates a Mod
if isinstance(other.value, sympy.Mod):
return self.value == other.value
# Periods must be exactly equal to avoid drift of normalized value when
# original value increases.
if self.period != other.period:
return False
low = min(self.value, other.value)
high = max(self.value, other.value)
# Shift lower value outside of normalization interval in case low and
# high values are at the opposite borders of normalization interval.
if high - low > self.period / 2:
low += self.period
return cirq.protocols.approx_eq(low, high, atol=atol) | Implementation of `SupportsApproximateEquality` protocol. |
def sort(self):
"""Sorts all results rows.
Sorts by: size (descending), n-gram, count (descending), label,
text name, siglum.
"""
self._matches.sort_values(
by=[constants.SIZE_FIELDNAME, constants.NGRAM_FIELDNAME,
constants.COUNT_FIELDNAME, constants.LABEL_FIELDNAME,
constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME],
ascending=[False, True, False, True, True, True], inplace=True) | Sorts all results rows.
Sorts by: size (descending), n-gram, count (descending), label,
text name, siglum. |
def up(path, service_names=None):
'''
Create and start containers defined in the docker-compose.yml file
located in path, service_names is a python list, if omitted create and
start all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will create and start only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.up /path/where/docker-compose/stored
salt myminion dockercompose.up /path/where/docker-compose/stored '[janus]'
'''
debug_ret = {}
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
result = _get_convergence_plans(project, service_names)
ret = project.up(service_names)
if debug:
for container in ret:
if service_names is None or container.get('Name')[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get('Name')] = container.inspect()
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Adding containers via docker-compose', result, debug_ret) | Create and start containers defined in the docker-compose.yml file
located in path, service_names is a python list, if omitted create and
start all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will create and start only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.up /path/where/docker-compose/stored
salt myminion dockercompose.up /path/where/docker-compose/stored '[janus]' |
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from . import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback) | Runs a callback with error handling.
For use in subclasses. |
def extract_single_dist_for_current_platform(self, reqs, dist_key):
"""Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`.
"""
distributions = self._resolve_distributions_by_platform(reqs, platforms=['current'])
try:
matched_dist = assert_single_element(list(
dist
for _, dists in distributions.items()
for dist in dists
if dist.key == dist_key
))
except (StopIteration, ValueError) as e:
raise self.SingleDistExtractionError(
"Exactly one dist was expected to match name {} in requirements {}: {}"
.format(dist_key, reqs, e))
return matched_dist | Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`. |
def get_notes():
""" By convention : D is a length of the element, d is a gap """
notes = {'DPhi':{}, 'dPhi':{}}
# Toroidal width (mm, inner outer)
notes['DPhi']['In'] = 26.370
notes['DPhi']['Out'] = 31.929
# Inter tiles distance (mm, uniform)
notes['dl'] = 0.500
# Poloidal/Radial total length (mm)
notes['DL'] = 437.000
# Number of tiles radially
notes['nb'] = 35
notes['nbPhi'] = 19*2*12
# Radial length of a tile (mm, uniform)
notes['Dl'] = 12.000
# Vertical height of tiles (mm, uniform)
notes['DZ'] = 26.000
# Toroidal space between needles (mm, inner outer)
notes['dPhi']['In'] = 0.588
notes['dPhi']['Out'] = 0.612
# (X,Z,Y) polygon of one needle (mm) !!!!!! (X,Z,Y)
# 1 mm should be added towards Z>0 in the direction normal to the divertor's upper surface
notes['sampleXZY'] = [[-759.457, -625.500, -1797.591], # Old start point
[-759.603, -624.572, -1797.936], # Only for pattern
[-772.277, -620.864, -1794.112],
[-761.681, -610.036, -1769.498], # Computed,tube/plane
[-761.895, -620.231, -1764.921],
[-751.095, -609.687, -1741.154],
[-755.613, -580.944, -1751.852],
[-766.413, -591.488, -1775.620], # Edge of plane
[-763.902, -596.129, -1774.659], # Computed,tube/plane
[-774.498, -606.956, -1799.274], # Middle top of tube
[-763.246, -601.395, -1806.563],
[-767.575, -605.891, -1816.813],
[-763.932, -629.068, -1808.186],
[-764.112, -629.255, -1808.613],
[-767.755, -606.078, -1817.240],
[-772.084, -610.573, -1827.490],
[-768.441, -633.750, -1818.863],
[-768.622, -633.938, -1819.290],
[-772.265, -610.760, -1827.917],
[-776.594, -615.256, -1838.167],
[-772.950, -638.433, -1829.540],
[-773.131, -638.620, -1829.967],
[-776.774, -615.443, -1838.594],
[-781.103, -619.938, -1848.844],
[-777.460, -643.115, -1840.217],
[-777.640, -643.303, -1840.644],
[-781.283, -620.126, -1849.271],
[-785.612, -624.621, -1859.520],
[-781.969, -647.798, -1850.894],
[-782.149, -647.985, -1851.321],
[-785.793, -624.808, -1859.948],
[-790.122, -629.303, -1870.197],
[-786.478, -652.481, -1861.571],
[-786.659, -652.668, -1861.998],
[-790.302, -629.491, -1870.624],
[-794.631, -633.986, -1880.874],
[-790.988, -657.163, -1872.248],
[-791.168, -657.351, -1872.675],
[-794.811, -634.173, -1881.301]]
notes['sampleXZY'] = np.array(notes['sampleXZY'])
for kk in notes.keys():
if type(notes[kk]) is dict:
notes[kk]['In'] = notes[kk]['In']*1.e-3
notes[kk]['Out'] = notes[kk]['Out']*1.e-3
elif not 'nb' in kk:
notes[kk] = notes[kk]*1.e-3
return notes | By convention : D is a length of the element, d is a gap |
def display_completions_like_readline(event):
"""
Key binding handler for readline-style tab completion.
This is meant to be as similar as possible to the way how readline displays
completions.
Generate the completions immediately (blocking) and display them above the
prompt in columns.
Usage::
# Call this handler when 'Tab' has been pressed.
registry.add_binding(Keys.ControlI)(display_completions_like_readline)
"""
# Request completions.
b = event.current_buffer
if b.completer is None:
return
complete_event = CompleteEvent(completion_requested=True)
completions = list(b.completer.get_completions(b.document, complete_event))
# Calculate the common suffix.
common_suffix = get_common_complete_suffix(b.document, completions)
# One completion: insert it.
if len(completions) == 1:
b.delete_before_cursor(-completions[0].start_position)
b.insert_text(completions[0].text)
# Multiple completions with common part.
elif common_suffix:
b.insert_text(common_suffix)
# Otherwise: display all completions.
elif completions:
_display_completions_like_readline(event.cli, completions) | Key binding handler for readline-style tab completion.
This is meant to be as similar as possible to the way how readline displays
completions.
Generate the completions immediately (blocking) and display them above the
prompt in columns.
Usage::
# Call this handler when 'Tab' has been pressed.
registry.add_binding(Keys.ControlI)(display_completions_like_readline) |
def wind_shear(shear: str, unit_alt: str = 'ft', unit_wind: str = 'kt', spoken: bool = False) -> str:
"""
Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt
"""
if not shear or 'WS' not in shear or '/' not in shear:
return ''
shear = shear[2:].rstrip(unit_wind.upper()).split('/') # type: ignore
wdir = core.spoken_number(shear[1][:3]) if spoken else shear[1][:3]
return f'Wind shear {int(shear[0])*100}{unit_alt} from {wdir} at {shear[1][3:]}{unit_wind}' | Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt |
def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Return the most likely alpha for the data given an xmin
"""
alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha)
return alpha_vector[discrete_max_likelihood_arg(data, xmin,
alpharange=alpharange,
n_alpha=n_alpha)] | Return the most likely alpha for the data given an xmin |
def output_solution(self, fd, z, z_est, error_sqrsum):
""" Prints comparison of measurements and their estimations.
"""
col_width = 11
sep = ("=" * col_width + " ") * 4 + "\n"
fd.write("State Estimation\n")
fd.write("-" * 16 + "\n")
fd.write(sep)
fd.write("Type".center(col_width) + " ")
fd.write("Name".center(col_width) + " ")
fd.write("Measurement".center(col_width) + " ")
fd.write("Estimation".center(col_width) + " ")
fd.write("\n")
fd.write(sep)
c = 0
for t in [PF, PT, QF, QT, PG, QG, VM, VA]:
for meas in self.measurements:
if meas.type == t:
n = meas.b_or_l.name[:col_width].ljust(col_width)
fd.write(t.ljust(col_width) + " ")
fd.write(n + " ")
fd.write("%11.5f " % z[c])
fd.write("%11.5f\n" % z_est[c])
# fd.write("%s\t%s\t%.3f\t%.3f\n" % (t, n, z[c], z_est[c]))
c += 1
fd.write("\nWeighted sum of error squares = %.4f\n" % error_sqrsum) | Prints comparison of measurements and their estimations. |
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor | Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit. |
def _create_flat_pointers(dct, key_stack=()):
# type: (Dict[str, Any], Tuple[str, ...]) -> Generator[Tuple[Tuple[str, ...], Dict[str, Any], str], None, None]
"""Create a flattened dictionary of "key stacks" -> (value container, key)
"""
for k in dct.keys():
current_key = key_stack + (k,)
if isinstance(dct[k], BaseMapping):
for flat_ptr in _create_flat_pointers(dct[k], current_key):
yield flat_ptr
else:
yield (current_key, dct, k) | Create a flattened dictionary of "key stacks" -> (value container, key) |
async def set(
self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _cas_token=None, _conn=None
):
"""
Stores the value in the given key with ttl if specified
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if the value was set
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
res = await self._set(
ns_key, dumps(value), ttl=self._get_ttl(ttl), _cas_token=_cas_token, _conn=_conn
)
logger.debug("SET %s %d (%.4f)s", ns_key, True, time.monotonic() - start)
return res | Stores the value in the given key with ttl if specified
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if the value was set
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout |
def refresh(self):
""" Refreshes the editor panels (resize and update margins) """
_logger().log(5, 'refresh_panels')
self.resize()
self._update(self.editor.contentsRect(), 0,
force_update_margins=True) | Refreshes the editor panels (resize and update margins) |
def protein_statistics(self):
"""Get a dictionary of basic statistics describing this protein"""
# TODO: can i use get_dict here instead
d = {}
d['id'] = self.id
d['sequences'] = [x.id for x in self.sequences]
d['num_sequences'] = self.num_sequences
if self.representative_sequence:
d['representative_sequence'] = self.representative_sequence.id
d['repseq_gene_name'] = self.representative_sequence.gene_name
d['repseq_uniprot'] = self.representative_sequence.uniprot
d['repseq_description'] = self.representative_sequence.description
d['num_structures'] = self.num_structures
d['experimental_structures'] = [x.id for x in self.get_experimental_structures()]
d['num_experimental_structures'] = self.num_structures_experimental
d['homology_models'] = [x.id for x in self.get_homology_models()]
d['num_homology_models'] = self.num_structures_homology
if self.representative_structure:
d['representative_structure'] = self.representative_structure.id
d['representative_chain'] = self.representative_chain
d['representative_chain_seq_coverage'] = self.representative_chain_seq_coverage
d['repstruct_description'] = self.description
if self.representative_structure.is_experimental:
d['repstruct_resolution'] = self.representative_structure.resolution
d['num_sequence_alignments'] = len(self.sequence_alignments)
d['num_structure_alignments'] = len(self.structure_alignments)
return d | Get a dictionary of basic statistics describing this protein |
def buildElement(element, items, itemName):
""" Create an element for output.
"""
def assertNonnegative(i,name):
if i < 0:
raise RuntimeError("Negative value %s reported for %s" %(i,name) )
else:
return float(i)
itemTimes = []
itemClocks = []
itemMemory = []
for item in items:
itemTimes.append(assertNonnegative(float(item["time"]), "time"))
itemClocks.append(assertNonnegative(float(item["clock"]), "clock"))
itemMemory.append(assertNonnegative(float(item["memory"]), "memory"))
assert len(itemClocks) == len(itemTimes) == len(itemMemory)
itemWaits=[]
for index in range(0,len(itemTimes)):
itemWaits.append(itemTimes[index] - itemClocks[index])
itemWaits.sort()
itemTimes.sort()
itemClocks.sort()
itemMemory.sort()
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
element[itemName]=Expando(
total_number=float(len(items)),
total_time=float(sum(itemTimes)),
median_time=float(itemTimes[old_div(len(itemTimes),2)]),
average_time=float(old_div(sum(itemTimes),len(itemTimes))),
min_time=float(min(itemTimes)),
max_time=float(max(itemTimes)),
total_clock=float(sum(itemClocks)),
median_clock=float(itemClocks[old_div(len(itemClocks),2)]),
average_clock=float(old_div(sum(itemClocks),len(itemClocks))),
min_clock=float(min(itemClocks)),
max_clock=float(max(itemClocks)),
total_wait=float(sum(itemWaits)),
median_wait=float(itemWaits[old_div(len(itemWaits),2)]),
average_wait=float(old_div(sum(itemWaits),len(itemWaits))),
min_wait=float(min(itemWaits)),
max_wait=float(max(itemWaits)),
total_memory=float(sum(itemMemory)),
median_memory=float(itemMemory[old_div(len(itemMemory),2)]),
average_memory=float(old_div(sum(itemMemory),len(itemMemory))),
min_memory=float(min(itemMemory)),
max_memory=float(max(itemMemory)),
name=itemName
)
return element[itemName] | Create an element for output. |
def _import_all_troposphere_modules(self):
""" Imports all troposphere modules and returns them """
dirname = os.path.join(os.path.dirname(__file__))
module_names = [
pkg_name
for importer, pkg_name, is_pkg in
pkgutil.walk_packages([dirname], prefix="troposphere.")
if not is_pkg and pkg_name not in self.EXCLUDE_MODULES]
module_names.append('troposphere')
modules = []
for name in module_names:
modules.append(importlib.import_module(name))
def members_predicate(m):
return inspect.isclass(m) and not inspect.isbuiltin(m)
members = []
for module in modules:
members.extend((m[1] for m in inspect.getmembers(
module, members_predicate)))
return set(members) | Imports all troposphere modules and returns them |
def getFeatureID(self, location):
"""
Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid.
"""
if not self.contains(location):
return self.EMPTY_FEATURE
return self.SPHERICAL_SURFACE | Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid. |
def add_unit(unit,**kwargs):
"""
Add the unit defined into the object "unit" to the DB
If unit["project_id"] is None it means that the unit is global, otherwise is property of a project
If the unit exists emits an exception
A minimal example:
.. code-block:: python
new_unit = dict(
name = 'Teaspoons per second',
abbreviation = 'tsp s^-1',
cf = 0, # Constant conversion factor
lf = 1.47867648e-05, # Linear conversion factor
dimension_id = 2,
description = 'A flow of one teaspoon per second.',
)
add_unit(new_unit)
"""
new_unit = Unit()
new_unit.dimension_id = unit["dimension_id"]
new_unit.name = unit['name']
# Needed to uniform abbr to abbreviation
new_unit.abbreviation = unit['abbreviation']
# Needed to uniform into to description
new_unit.description = unit['description']
new_unit.lf = unit['lf']
new_unit.cf = unit['cf']
if ('project_id' in unit) and (unit['project_id'] is not None):
# Adding dimension to the "user" dimensions list
new_unit.project_id = unit['project_id']
# Save on DB
db.DBSession.add(new_unit)
db.DBSession.flush()
return JSONObject(new_unit) | Add the unit defined into the object "unit" to the DB
If unit["project_id"] is None it means that the unit is global, otherwise is property of a project
If the unit exists emits an exception
A minimal example:
.. code-block:: python
new_unit = dict(
name = 'Teaspoons per second',
abbreviation = 'tsp s^-1',
cf = 0, # Constant conversion factor
lf = 1.47867648e-05, # Linear conversion factor
dimension_id = 2,
description = 'A flow of one teaspoon per second.',
)
add_unit(new_unit) |
def calculate_dates(self, dt):
"""
Given a dt, find that day's close and period start (close - offset).
"""
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
# Align the market close time here with the execution time used by the
# simulation clock. This ensures that scheduled functions trigger at
# the correct times.
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end | Given a dt, find that day's close and period start (close - offset). |
def get_zone_info(self, controller, zone, return_variable):
""" Get all relevant info for the zone
When called with return_variable == 4, then the function returns a list with current
volume, source and ON/OFF status.
When called with 0, 1 or 2, it will return an integer with the Power, Source and Volume """
# Define the signature for a response message, used later to find the correct response from the controller.
# FF is the hex we use to signify bytes that need to be ignored when comparing to response message.
# resp_msg_signature = self.create_response_signature("04 02 00 @zz 07 00 00 01 00 0C", zone)
_LOGGER.debug("Begin - controller= %s, zone= %s, get status", controller, zone)
resp_msg_signature = self.create_response_signature("04 02 00 @zz 07", zone)
send_msg = self.create_send_message("F0 @cc 00 7F 00 00 @kk 01 04 02 00 @zz 07 00 00", controller, zone)
try:
self.lock.acquire()
_LOGGER.debug('Acquired lock for zone %s', zone)
self.send_data(send_msg)
_LOGGER.debug("Zone: %s Sent: %s", zone, send_msg)
# Expected response is as per pg 23 of cav6.6_rnet_protocol_v1.01.00.pdf
matching_message = self.get_response_message(resp_msg_signature)
if matching_message is not None:
# Offset of 11 is the position of return data payload is that we require for the signature we are using.
_LOGGER.debug("matching message to use= %s", matching_message)
_LOGGER.debug("matching message length= %s", len(matching_message))
if return_variable == 4:
return_value = [matching_message[11], matching_message[12], matching_message[13]]
else:
return_value = matching_message[return_variable + 11]
else:
return_value = None
_LOGGER.warning("Did not receive expected Russound power state for controller %s and zone %s.", controller, zone)
finally:
self.lock.release()
_LOGGER.debug("Released lock for zone %s", zone)
_LOGGER.debug("End - controller= %s, zone= %s, get status \n", controller, zone)
return return_value | Get all relevant info for the zone
When called with return_variable == 4, then the function returns a list with current
volume, source and ON/OFF status.
When called with 0, 1 or 2, it will return an integer with the Power, Source and Volume |
def expand(self, percentage):
"""
Expands the box co-ordinates by given percentage on four sides. Ignores negative values.
:param percentage: Percentage to expand
:return: New expanded Box
"""
ex_h = math.ceil(self.height * percentage / 100)
ex_w = math.ceil(self.width * percentage / 100)
x = max(0, self.x - ex_w)
y = max(0, self.y - ex_h)
x2 = self.x + self.width + ex_w
y2 = self.y + self.height + ex_h
return Box.from_xy(x, y, x2, y2) | Expands the box co-ordinates by given percentage on four sides. Ignores negative values.
:param percentage: Percentage to expand
:return: New expanded Box |
def Offset(self, vtableOffset):
"""Offset provides access into the Table's vtable.
Deprecated fields are ignored by checking the vtable's length."""
vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos)
vtableEnd = self.Get(N.VOffsetTFlags, vtable)
if vtableOffset < vtableEnd:
return self.Get(N.VOffsetTFlags, vtable + vtableOffset)
return 0 | Offset provides access into the Table's vtable.
Deprecated fields are ignored by checking the vtable's length. |
def shorter_name(key):
"""Return a shorter name for an id.
Does this by only taking the last part of the URI,
after the last / and the last #. Also replaces - and . with _.
Parameters
----------
key: str
Some URI
Returns
-------
key_short: str
A shortened, but more ambiguous, identifier
"""
key_short = key
for sep in ['#', '/']:
ind = key_short.rfind(sep)
if ind is not None:
key_short = key_short[ind+1:]
else:
key_short = key_short
return key_short.replace('-', '_').replace('.', '_') | Return a shorter name for an id.
Does this by only taking the last part of the URI,
after the last / and the last #. Also replaces - and . with _.
Parameters
----------
key: str
Some URI
Returns
-------
key_short: str
A shortened, but more ambiguous, identifier |
def find(self, vid=None, pid=None, serial=None, interface=None, \
path=None, release_number=None, manufacturer=None,
product=None, usage=None, usage_page=None):
"""
Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path.
"""
result = []
for dev in self.device_list:
if vid not in [0, None] and dev.vendor_id != vid:
continue
if pid not in [0, None] and dev.product_id != pid:
continue
if serial and dev.serial_number != serial:
continue
if path and dev.path != path:
continue
if manufacturer and dev.manufacturer_string != manufacturer:
continue
if product and dev.product_string != product:
continue
if release_number != None and dev.release_number != release_number:
continue
if interface != None and dev.interface_number != interface:
continue
if usage != None and dev.usage != usage:
continue
if usage_page != None and dev.usage_page != usage_page:
continue
result.append(dev)
return result | Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path. |
def derivative(self, point):
r"""Derivative of this operator in ``point``.
``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
point : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point.norm() == 0``, in which case the derivative is not well
defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D \|\cdot\|)(y)(x) = \langle y / \|y\|, x \rangle
Examples
--------
>>> r3 = odl.rn(3)
>>> op = NormOperator(r3)
>>> derivative = op.derivative([1, 0, 0])
>>> derivative([1, 0, 0])
1.0
"""
point = self.domain.element(point)
norm = point.norm()
if norm == 0:
raise ValueError('not differentiable in 0')
return InnerProductOperator(point / norm) | r"""Derivative of this operator in ``point``.
``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
point : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point.norm() == 0``, in which case the derivative is not well
defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D \|\cdot\|)(y)(x) = \langle y / \|y\|, x \rangle
Examples
--------
>>> r3 = odl.rn(3)
>>> op = NormOperator(r3)
>>> derivative = op.derivative([1, 0, 0])
>>> derivative([1, 0, 0])
1.0 |
def get_next_rngruns(self):
"""
Yield the next RngRun values that can be used in this campaign.
"""
available_runs = [result['params']['RngRun'] for result in
self.get_results()]
yield from DatabaseManager.get_next_values(available_runs) | Yield the next RngRun values that can be used in this campaign. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.