code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def is_valid_python(code, reraise=True, ipy_magic_workaround=False):
"""
References:
http://stackoverflow.com/questions/23576681/python-check-syntax
"""
import ast
try:
if ipy_magic_workaround:
code = '\n'.join(['pass' if re.match(r'\s*%[a-z]*', line) else line for line in code.split('\n')])
ast.parse(code)
except SyntaxError:
if reraise:
import utool as ut
print('Syntax Error')
ut.print_python_code(code)
raise
return False
return True | References:
http://stackoverflow.com/questions/23576681/python-check-syntax |
def stream_replicate():
"""Monitor changes in approximately real-time and replicate them"""
stream = primary.stream(SomeDataBlob, "trim_horizon")
next_heartbeat = pendulum.now()
while True:
now = pendulum.now()
if now >= next_heartbeat:
stream.heartbeat()
next_heartbeat = now.add(minutes=10)
record = next(stream)
if record is None:
continue
if record["new"] is not None:
replica.save(record["new"])
else:
replica.delete(record["old"]) | Monitor changes in approximately real-time and replicate them |
def set_polling_override(self, override):
"""Set the sensor polling timer override value in milliseconds.
Due to the time it takes to poll all the sensors on up to 5 IMUs, it's not
possible for the SK8 firmware to define a single fixed rate for reading
new samples without it being artificially low for most configurations.
Instead the firmware tries to define a sensible default value for each
combination of IMUs and sensors that can be enabled (any combination of
1-5 IMUs and 1-3 sensors on each IMU). In most cases this should work well,
but for example if you have multiple SK8s connected through the same dongle
and have multiple IMUs enabled on each, you may find packets start to be
dropped quite frequently.
To mitigate this, you can adjust the period of the timer used by the firmware
to poll for new sensor data (and send data packets to the host device). The
value should be in integer milliseconds, and have a minimum value of 20. Values
below 20 will be treated as a request to disable the override and return to the
default polling period.
The method can be called before or after streaming is activated, and will take
effect immediately.
NOTE1: the value is stored in RAM and will not persist across reboots, although
it should persist for multiple connections.
NOTE2: once set, the override applies to ALL sensor configurations, so for
example if you set it while using 5 IMUs on 2 SK8s, then switch to using
1 IMU on each SK8, you will probably want to disable it again as the
latter configuration should work fine with the default period.
Args:
override (int): polling timer override period in milliseconds. Values
below 20 are treated as 0, and have the effect of disabling the
override in favour of the default periods.
Returns:
True on success, False on error.
"""
polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)
if polling_override is None:
logger.warn('Failed to find handle for device name')
return False
if self.dongle._write_attribute(self.conn_handle, polling_override, struct.pack('B', override)):
return True
return False | Set the sensor polling timer override value in milliseconds.
Due to the time it takes to poll all the sensors on up to 5 IMUs, it's not
possible for the SK8 firmware to define a single fixed rate for reading
new samples without it being artificially low for most configurations.
Instead the firmware tries to define a sensible default value for each
combination of IMUs and sensors that can be enabled (any combination of
1-5 IMUs and 1-3 sensors on each IMU). In most cases this should work well,
but for example if you have multiple SK8s connected through the same dongle
and have multiple IMUs enabled on each, you may find packets start to be
dropped quite frequently.
To mitigate this, you can adjust the period of the timer used by the firmware
to poll for new sensor data (and send data packets to the host device). The
value should be in integer milliseconds, and have a minimum value of 20. Values
below 20 will be treated as a request to disable the override and return to the
default polling period.
The method can be called before or after streaming is activated, and will take
effect immediately.
NOTE1: the value is stored in RAM and will not persist across reboots, although
it should persist for multiple connections.
NOTE2: once set, the override applies to ALL sensor configurations, so for
example if you set it while using 5 IMUs on 2 SK8s, then switch to using
1 IMU on each SK8, you will probably want to disable it again as the
latter configuration should work fine with the default period.
Args:
override (int): polling timer override period in milliseconds. Values
below 20 are treated as 0, and have the effect of disabling the
override in favour of the default periods.
Returns:
True on success, False on error. |
def bls_stats_singleperiod(times, mags, errs, period,
magsarefluxes=False,
sigclip=10.0,
perioddeltapercent=10,
nphasebins=200,
mintransitduration=0.01,
maxtransitduration=0.4,
ingressdurationfraction=0.1,
verbose=True):
'''This calculates the SNR, depth, duration, a refit period, and time of
center-transit for a single period.
The equation used for SNR is::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
NOTE: you should set the kwargs `sigclip`, `nphasebins`,
`mintransitduration`, `maxtransitduration` to what you used for an initial
BLS run to detect transits in the input light curve to match those input
conditions.
Parameters
----------
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
period : float
The period to search around and refit the transits. This will be used to
calculate the start and end periods of a rerun of BLS to calculate the
stats.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
nphasebins : int
The number of phase bins to use in the BLS run.
mintransitduration : float
The minimum transit duration in phase to consider.
maxtransitduration : float
The maximum transit duration to consider.
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'period': the refit best period,
'epoch': the refit epoch (i.e. mid-transit time),
'snr':the SNR of the transit,
'transitdepth':the depth of the transit,
'transitduration':the duration of the transit,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'blsmodel':the full BLS model used along with its parameters,
'subtractedmags':BLS model - phased light curve,
'phasedmags':the phase light curve,
'phases': the phase values}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the period interval
startp = period - perioddeltapercent*period/100.0
if startp < 0:
startp = period
endp = period + perioddeltapercent*period/100.0
# rerun BLS in serial mode around the specified period to get the
# transit depth, duration, ingress and egress bins
blsres = bls_serial_pfind(stimes, smags, serrs,
verbose=verbose,
startp=startp,
endp=endp,
nphasebins=nphasebins,
mintransitduration=mintransitduration,
maxtransitduration=maxtransitduration,
magsarefluxes=magsarefluxes,
get_stats=False,
sigclip=None)
if (not blsres or
'blsresult' not in blsres or
blsres['blsresult'] is None):
LOGERROR("BLS failed during a period-search "
"performed around the input best period: %.6f. "
"Can't continue. " % period)
return None
thistransdepth = blsres['blsresult']['transdepth']
thistransduration = blsres['blsresult']['transduration']
thisbestperiod = blsres['bestperiod']
thistransingressbin = blsres['blsresult']['transingressbin']
thistransegressbin = blsres['blsresult']['transegressbin']
thisnphasebins = nphasebins
stats = _get_bls_stats(stimes,
smags,
serrs,
thistransdepth,
thistransduration,
ingressdurationfraction,
nphasebins,
thistransingressbin,
thistransegressbin,
thisbestperiod,
thisnphasebins,
magsarefluxes=magsarefluxes,
verbose=verbose)
return stats
# if there aren't enough points in the mag series, bail out
else:
LOGERROR('no good detections for these times and mags, skipping...')
return None | This calculates the SNR, depth, duration, a refit period, and time of
center-transit for a single period.
The equation used for SNR is::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
NOTE: you should set the kwargs `sigclip`, `nphasebins`,
`mintransitduration`, `maxtransitduration` to what you used for an initial
BLS run to detect transits in the input light curve to match those input
conditions.
Parameters
----------
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
period : float
The period to search around and refit the transits. This will be used to
calculate the start and end periods of a rerun of BLS to calculate the
stats.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
nphasebins : int
The number of phase bins to use in the BLS run.
mintransitduration : float
The minimum transit duration in phase to consider.
maxtransitduration : float
The maximum transit duration to consider.
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'period': the refit best period,
'epoch': the refit epoch (i.e. mid-transit time),
'snr':the SNR of the transit,
'transitdepth':the depth of the transit,
'transitduration':the duration of the transit,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'blsmodel':the full BLS model used along with its parameters,
'subtractedmags':BLS model - phased light curve,
'phasedmags':the phase light curve,
'phases': the phase values} |
def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, argmax=numpy.argmax):
"""Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time_coincs: tuple of numpy.ndarrays
trigger times for each ifo, or -1 if an ifo does not participate in a coinc
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
duration of clustering window in seconds
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences
"""
time_coinc_zip = zip(*time_coincs)
if len(time_coinc_zip) == 0:
logging.info('No coincident triggers.')
return numpy.array([])
time_avg_num = []
#find number of ifos and mean time over participating ifos for each coinc
for tc in time_coinc_zip:
time_avg_num.append(mean_if_greater_than_zero(tc))
time_avg, num_ifos = zip(*time_avg_num)
time_avg = numpy.array(time_avg)
num_ifos = numpy.array(num_ifos)
# shift all but the pivot ifo by (num_ifos-1) * timeslide_id * slide
# this leads to a mean coinc time located around pivot time
if numpy.isfinite(slide):
nifos_minusone = (num_ifos - numpy.ones_like(num_ifos))
time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos
tslide = timeslide_id.astype(numpy.float128)
time_avg = time_avg.astype(numpy.float128)
span = (time_avg.max() - time_avg.min()) + window * 10
time_avg = time_avg + span * tslide
cidx = cluster_over_time(stat, time_avg, window, argmax)
return cidx | Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time_coincs: tuple of numpy.ndarrays
trigger times for each ifo, or -1 if an ifo does not participate in a coinc
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
duration of clustering window in seconds
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences |
def pemp(stat, stat0):
""" Computes empirical values identically to bioconductor/qvalue empPvals """
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
# ranks can be fractional, we round down to the next integer, ranking returns values starting
# with 1, not 0:
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p | Computes empirical values identically to bioconductor/qvalue empPvals |
def c2ln(c,l1,l2,n):
"char[n] to two unsigned long???"
c = c + n
l1, l2 = U32(0), U32(0)
f = 0
if n == 8:
l2 = l2 | (U32(c[7]) << 24)
f = 1
if f or (n == 7):
l2 = l2 | (U32(c[6]) << 16)
f = 1
if f or (n == 6):
l2 = l2 | (U32(c[5]) << 8)
f = 1
if f or (n == 5):
l2 = l2 | U32(c[4])
f = 1
if f or (n == 4):
l1 = l1 | (U32(c[3]) << 24)
f = 1
if f or (n == 3):
l1 = l1 | (U32(c[2]) << 16)
f = 1
if f or (n == 2):
l1 = l1 | (U32(c[1]) << 8)
f = 1
if f or (n == 1):
l1 = l1 | U32(c[0])
return (l1, l2) | char[n] to two unsigned long??? |
def get_datasets_list(self, project_id=None):
"""
Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
]
"""
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets']
self.log.info("Datasets List: %s", datasets_list)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return datasets_list | Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
] |
def files_to_pif(files, verbose=0, quality_report=True, inline=True):
'''Given a directory that contains output from
a DFT calculation, parse the data and return
a pif object
Input:
files - [str] list of files from which the parser is allowed to read.
verbose - int, How much status messages to print
Output:
pif - ChemicalSystem, Results and settings of
the DFT calculation in pif format
'''
# Look for the first parser compatible with the directory
found_parser = False
for possible_parser in [PwscfParser, VaspParser]:
try:
parser = possible_parser(files)
found_parser = True
break
except InvalidIngesterException:
# Constructors fail when they cannot find appropriate files
pass
if not found_parser:
raise Exception('Directory is not in correct format for an existing parser')
if verbose > 0:
print("Found a {} directory".format(parser.get_name()))
# Get information about the chemical system
chem = ChemicalSystem()
chem.chemical_formula = parser.get_composition()
# Get software information, to list as method
software = Software(name=parser.get_name(),
version=parser.get_version_number())
# Define the DFT method object
method = Method(name='Density Functional Theory',
software=[software])
# Get the settings (aka. "conditions") of the DFT calculations
conditions = []
for name, func in parser.get_setting_functions().items():
# Get the condition
cond = getattr(parser, func)()
# If the condition is None or False, skip it
if cond is None:
continue
if inline and cond.files is not None:
continue
# Set the name
cond.name = name
# Set the types
conditions.append(cond)
# Get the properties of the system
chem.properties = []
for name, func in parser.get_result_functions().items():
# Get the property
prop = getattr(parser, func)()
# If the property is None, skip it
if prop is None:
continue
if inline and prop.files is not None:
continue
# Add name and other data
prop.name = name
prop.methods = [method,]
prop.data_type='COMPUTATIONAL'
if verbose > 0 and isinstance(prop, Value):
print(name)
if prop.conditions is None:
prop.conditions = conditions
else:
if not isinstance(prop.conditions, list):
prop.conditions = [prop.conditions]
prop.conditions.extend(conditions)
# Add it to the output
chem.properties.append(prop)
# Check to see if we should add the quality report
if quality_report and isinstance(parser, VaspParser):
_add_quality_report(parser, chem)
return chem | Given a directory that contains output from
a DFT calculation, parse the data and return
a pif object
Input:
files - [str] list of files from which the parser is allowed to read.
verbose - int, How much status messages to print
Output:
pif - ChemicalSystem, Results and settings of
the DFT calculation in pif format |
def transform(function):
"""Return a processor for a style's "transform" function.
"""
def transform_fn(_, result):
if isinstance(result, Nothing):
return result
lgr.debug("Transforming %r with %r", result, function)
try:
return function(result)
except:
exctype, value, tb = sys.exc_info()
try:
new_exc = StyleFunctionError(function, exctype, value)
# Remove the "During handling ..." since we're
# reraising with the traceback.
new_exc.__cause__ = None
six.reraise(StyleFunctionError, new_exc, tb)
finally:
# Remove circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
return transform_fn | Return a processor for a style's "transform" function. |
def vb_get_network_addresses(machine_name=None, machine=None, wait_for_pattern=None):
'''
TODO distinguish between private and public addresses
A valid machine_name or a machine is needed to make this work!
!!!
Guest prerequisite: GuestAddition
!!!
Thanks to Shrikant Havale for the StackOverflow answer http://stackoverflow.com/a/29335390
More information on guest properties: https://www.virtualbox.org/manual/ch04.html#guestadd-guestprops
@param machine_name:
@type machine_name: str
@param machine:
@type machine: IMachine
@return: All the IPv4 addresses we could get
@rtype: str[]
'''
if machine_name:
machine = vb_get_box().findMachine(machine_name)
ip_addresses = []
log.debug("checking for power on:")
if machine.state == _virtualboxManager.constants.MachineState_Running:
log.debug("got power on:")
#wait on an arbitrary named property
#for instance use a dhcp client script to set a property via VBoxControl guestproperty set dhcp_done 1
if wait_for_pattern and not machine.getGuestPropertyValue(wait_for_pattern):
log.debug("waiting for pattern:%s:", wait_for_pattern)
return None
_total_slots = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count')
#upon dhcp the net count drops to 0 and it takes some seconds for it to be set again
if not _total_slots:
log.debug("waiting for net count:%s:", wait_for_pattern)
return None
try:
total_slots = int(_total_slots)
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
except ValueError as e:
log.debug(e.message)
return None
log.debug("returning ip_addresses:%s:", ip_addresses)
return ip_addresses | TODO distinguish between private and public addresses
A valid machine_name or a machine is needed to make this work!
!!!
Guest prerequisite: GuestAddition
!!!
Thanks to Shrikant Havale for the StackOverflow answer http://stackoverflow.com/a/29335390
More information on guest properties: https://www.virtualbox.org/manual/ch04.html#guestadd-guestprops
@param machine_name:
@type machine_name: str
@param machine:
@type machine: IMachine
@return: All the IPv4 addresses we could get
@rtype: str[] |
def seq_2_StdStringVector(seq, vec=None):
"""Converts a python sequence<str> object to a :class:`tango.StdStringVector`
:param seq: the sequence of strings
:type seq: sequence<:py:obj:`str`>
:param vec: (optional, default is None) an :class:`tango.StdStringVector`
to be filled. If None is given, a new :class:`tango.StdStringVector`
is created
:return: a :class:`tango.StdStringVector` filled with the same contents as seq
:rtype: :class:`tango.StdStringVector`
"""
if vec is None:
if isinstance(seq, StdStringVector):
return seq
vec = StdStringVector()
if not isinstance(vec, StdStringVector):
raise TypeError('vec must be a tango.StdStringVector')
for e in seq:
vec.append(str(e))
return vec | Converts a python sequence<str> object to a :class:`tango.StdStringVector`
:param seq: the sequence of strings
:type seq: sequence<:py:obj:`str`>
:param vec: (optional, default is None) an :class:`tango.StdStringVector`
to be filled. If None is given, a new :class:`tango.StdStringVector`
is created
:return: a :class:`tango.StdStringVector` filled with the same contents as seq
:rtype: :class:`tango.StdStringVector` |
def get_molo_comments(parser, token):
"""
Get a limited set of comments for a given object.
Defaults to a limit of 5. Setting the limit to -1 disables limiting.
Set the amount of comments to
usage:
{% get_molo_comments for object as variable_name %}
{% get_molo_comments for object as variable_name limit amount %}
{% get_molo_comments for object as variable_name limit amount child_limit amount %} # noqa
"""
keywords = token.contents.split()
if len(keywords) != 5 and len(keywords) != 7 and len(keywords) != 9:
raise template.TemplateSyntaxError(
"'%s' tag takes exactly 2,4 or 6 arguments" % (keywords[0],))
if keywords[1] != 'for':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'for'" % (keywords[0],))
if keywords[3] != 'as':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'as'" % (keywords[0],))
if len(keywords) > 5 and keywords[5] != 'limit':
raise template.TemplateSyntaxError(
"third argument to '%s' tag must be 'limit'" % (keywords[0],))
if len(keywords) == 7:
return GetMoloCommentsNode(keywords[2], keywords[4], keywords[6])
if len(keywords) > 7 and keywords[7] != 'child_limit':
raise template.TemplateSyntaxError(
"third argument to '%s' tag must be 'child_limit'"
% (keywords[0],))
if len(keywords) > 7:
return GetMoloCommentsNode(keywords[2], keywords[4],
keywords[6], keywords[8])
return GetMoloCommentsNode(keywords[2], keywords[4]) | Get a limited set of comments for a given object.
Defaults to a limit of 5. Setting the limit to -1 disables limiting.
Set the amount of comments to
usage:
{% get_molo_comments for object as variable_name %}
{% get_molo_comments for object as variable_name limit amount %}
{% get_molo_comments for object as variable_name limit amount child_limit amount %} # noqa |
def user(self, login=None):
"""Returns a User object for the specified login name if
provided. If no login name is provided, this will return a User
object for the authenticated user.
:param str login: (optional)
:returns: :class:`User <github3.users.User>`
"""
if login:
url = self._build_url('users', login)
else:
url = self._build_url('user')
json = self._json(self._get(url), 200)
return User(json, self._session) if json else None | Returns a User object for the specified login name if
provided. If no login name is provided, this will return a User
object for the authenticated user.
:param str login: (optional)
:returns: :class:`User <github3.users.User>` |
def dump(self):
"""
Dump (return) a dict representation of the GameObject. This is a Python
dict and is NOT serialized. NB: the answer (a DigitWord object) and the
mode (a GameMode object) are converted to python objects of a list and
dict respectively.
:return: python <dict> of the GameObject as detailed above.
"""
return {
"key": self._key,
"status": self._status,
"ttl": self._ttl,
"answer": self._answer.word,
"mode": self._mode.dump(),
"guesses_made": self._guesses_made
} | Dump (return) a dict representation of the GameObject. This is a Python
dict and is NOT serialized. NB: the answer (a DigitWord object) and the
mode (a GameMode object) are converted to python objects of a list and
dict respectively.
:return: python <dict> of the GameObject as detailed above. |
def get_surface_as_bytes(self, order=None):
"""Returns the surface area as a bytes encoded RGB image buffer.
Subclass should override if there is a more efficient conversion
than from generating a numpy array first.
"""
arr8 = self.get_surface_as_array(order=order)
return arr8.tobytes(order='C') | Returns the surface area as a bytes encoded RGB image buffer.
Subclass should override if there is a more efficient conversion
than from generating a numpy array first. |
def keyword(self) -> Tuple[Optional[str], str]:
"""Parse a YANG statement keyword.
Raises:
EndOfInput: If past the end of input.
UnexpectedInput: If no syntactically correct keyword is found.
"""
i1 = self.yang_identifier()
if self.peek() == ":":
self.offset += 1
i2 = self.yang_identifier()
return (i1, i2)
return (None, i1) | Parse a YANG statement keyword.
Raises:
EndOfInput: If past the end of input.
UnexpectedInput: If no syntactically correct keyword is found. |
def get_episodes(self, series_id, **kwargs):
"""All episodes for a given series.
Paginated with 100 results per page.
.. warning::
authorization token required
The following search arguments currently supported:
* airedSeason
* airedEpisode
* imdbId
* dvdSeason
* dvdEpisode
* absoluteNumber
* page
:param str series_id: id of series as found on thetvdb
:parm kwargs: keyword args to search/filter episodes by (optional)
:returns: series episode records
:rtype: list
"""
params = {'page': 1}
for arg, val in six.iteritems(kwargs):
if arg in EPISODES_BY:
params[arg] = val
return self._exec_request(
'series',
path_args=[series_id, 'episodes', 'query'], params=params)['data'] | All episodes for a given series.
Paginated with 100 results per page.
.. warning::
authorization token required
The following search arguments currently supported:
* airedSeason
* airedEpisode
* imdbId
* dvdSeason
* dvdEpisode
* absoluteNumber
* page
:param str series_id: id of series as found on thetvdb
:parm kwargs: keyword args to search/filter episodes by (optional)
:returns: series episode records
:rtype: list |
def _fourier(self):
""" 1 side Fourier transform and scale by dt all waveforms in catalog """
freq_bin_upper = 2000
freq_bin_lower = 40
fs = self._metadata['fs']
Y_transformed = {}
for key in self.Y_dict.keys():
# normalize by fs, bins have units strain/Hz
fs = self._metadata["fs"]
Y_transformed[key] = (1./fs)*np.fft.fft(self.Y_dict[key])[freq_bin_lower:freq_bin_upper]
self.Y_transformed = Y_transformed
# add in transform metadata
self._metadata["dF"] = 1./self._metadata["T"]
# because we are in the fourier domain, we will need the psd
self.psd = load_psd()[freq_bin_lower:freq_bin_upper]
dF = self._metadata['dF']
self.sigma = convert_psd_to_sigma(self.psd, dF) | 1 side Fourier transform and scale by dt all waveforms in catalog |
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
"""
try:
client = self.get_evernote_client()
# finally we save the user auth token
# As we already stored the object ServicesActivated
# from the UserServiceCreateView now we update the same
# object to the database so :
# 1) we get the previous object
us = UserService.objects.get(user=request.user, name=ServicesActivated.objects.get(name='ServiceEvernote'))
# 2) then get the token
us.token = client.get_access_token(request.session['oauth_token'], request.session['oauth_token_secret'],
request.GET.get('oauth_verifier', ''))
# 3) and save everything
us.save()
except KeyError:
return '/'
return 'evernote/callback.html' | Called from the Service when the user accept to activate it |
def bresenham(x1, y1, x2, y2):
"""
Return a list of points in a bresenham line.
Implementation hastily copied from RogueBasin.
Returns:
List[Tuple[int, int]]: A list of (x, y) points,
including both the start and end-points.
"""
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points | Return a list of points in a bresenham line.
Implementation hastily copied from RogueBasin.
Returns:
List[Tuple[int, int]]: A list of (x, y) points,
including both the start and end-points. |
def upload_file(self, path=None, stream=None, name=None, **kwargs):
"""
Uploads file to WeedFS
I takes either path or stream and name and upload it
to WeedFS server.
Returns fid of the uploaded file.
:param string path:
:param string stream:
:param string name:
:rtype: string or None
"""
params = "&".join(["%s=%s" % (k, v) for k, v in kwargs.items()])
url = "http://{master_addr}:{master_port}/dir/assign{params}".format(
master_addr=self.master_addr,
master_port=self.master_port,
params="?" + params if params else ''
)
data = json.loads(self.conn.get_data(url))
if data.get("error") is not None:
return None
post_url = "http://{url}/{fid}".format(
url=data['publicUrl' if self.use_public_url else 'url'],
fid=data['fid']
)
if path is not None:
filename = os.path.basename(path)
with open(path, "rb") as file_stream:
res = self.conn.post_file(post_url, filename, file_stream)
# we have file like object and filename
elif stream is not None and name is not None:
res = self.conn.post_file(post_url, name, stream)
else:
raise ValueError(
"If `path` is None then *both* `stream` and `name` must not"
" be None ")
response_data = json.loads(res)
if "size" in response_data:
return data.get('fid')
return None | Uploads file to WeedFS
I takes either path or stream and name and upload it
to WeedFS server.
Returns fid of the uploaded file.
:param string path:
:param string stream:
:param string name:
:rtype: string or None |
def _lemmatise_roman_numerals(self, form, pos=False, get_lemma_object=False):
""" Lemmatise un mot f si c'est un nombre romain
:param form: Mot à lemmatiser
:param pos: Récupère la POS
:param get_lemma_object: Retrieve Lemma object instead of string representation of lemma
"""
if estRomain(form):
_lemma = Lemme(
cle=form, graphie_accentuee=form, graphie=form, parent=self, origin=0, pos="a",
modele=self.modele("inv")
)
yield Lemmatiseur.format_result(
form=form,
lemma=_lemma,
with_pos=pos,
raw_obj=get_lemma_object
)
if form.upper() != form:
yield from self._lemmatise_roman_numerals(form.upper(), pos=pos, get_lemma_object=get_lemma_object) | Lemmatise un mot f si c'est un nombre romain
:param form: Mot à lemmatiser
:param pos: Récupère la POS
:param get_lemma_object: Retrieve Lemma object instead of string representation of lemma |
def configs_in(src_dir):
"""Enumerate all configs in src_dir"""
for filename in files_in_dir(src_dir, 'json'):
with open(os.path.join(src_dir, filename), 'rb') as in_f:
yield json.load(in_f) | Enumerate all configs in src_dir |
def compute_bbox_with_margins(margin, x, y):
'Helper function to compute bounding box for the plot'
# set margins
pos = np.asarray((x, y))
minxy, maxxy = pos.min(axis=1), pos.max(axis=1)
xy1 = minxy - margin*(maxxy - minxy)
xy2 = maxxy + margin*(maxxy - minxy)
return tuple(xy1), tuple(xy2) | Helper function to compute bounding box for the plot |
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minicard:
if self.use_timer:
start_time = time.clock()
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
self.status = pysolvers.minicard_solve(self.minicard, assumptions)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if self.use_timer:
self.call_time = time.clock() - start_time
self.accu_time += self.call_time
return self.status | Solve internal formula. |
def supported_tags(self, interpreter=None, force_manylinux=True):
"""Returns a list of supported PEP425 tags for the current platform."""
if interpreter and not self.is_extended:
# N.B. If we don't get an extended platform specifier, we generate
# all possible ABI permutations to mimic earlier pex version
# behavior and make cross-platform resolution more intuitive.
return _get_supported_for_any_abi(
platform=self.platform,
impl=interpreter.identity.abbr_impl,
version=interpreter.identity.impl_ver,
force_manylinux=force_manylinux
)
else:
return _get_supported(
platform=self.platform,
impl=self.impl,
version=self.version,
abi=self.abi,
force_manylinux=force_manylinux
) | Returns a list of supported PEP425 tags for the current platform. |
def unqueue(self, timeout=10, should_wait=False):
"""Unqueue all the received ensime responses for a given file."""
start, now = time.time(), time.time()
wait = self.queue.empty() and should_wait
while (not self.queue.empty() or wait) and (now - start) < timeout:
if wait and self.queue.empty():
time.sleep(0.25)
now = time.time()
else:
result = self.queue.get(False)
self.log.debug('unqueue: result received\n%s', result)
if result and result != "nil":
wait = None
# Restart timeout
start, now = time.time(), time.time()
_json = json.loads(result)
# Watch out, it may not have callId
call_id = _json.get("callId")
if _json["payload"]:
self.handle_incoming_response(call_id, _json["payload"])
else:
self.log.debug('unqueue: nil or None received')
if (now - start) >= timeout:
self.log.warning('unqueue: no reply from server for %ss', timeout) | Unqueue all the received ensime responses for a given file. |
def _check_all_devices_in_sync(self):
'''Wait until all devices have failover status of 'In Sync'.
:raises: UnexpectedClusterState
'''
if len(self._get_devices_by_failover_status('In Sync')) != \
len(self.devices):
msg = "Expected all devices in group to have 'In Sync' status."
raise UnexpectedDeviceGroupState(msg) | Wait until all devices have failover status of 'In Sync'.
:raises: UnexpectedClusterState |
def get_top_sentences(self):
''' getter '''
if isinstance(self.__top_sentences, int) is False:
raise TypeError("The type of __top_sentences must be int.")
return self.__top_sentences | getter |
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | :calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository` |
def thresholdBlocks(self, blocks, recall_weight=1.5): # pragma: nocover
"""
Returns the threshold that maximizes the expected F score, a
weighted average of precision and recall for a sample of
blocked data.
Arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2.
"""
candidate_records = itertools.chain.from_iterable(self._blockedPairs(blocks))
probability = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores)['score']
probability = probability.copy()
probability.sort()
probability = probability[::-1]
expected_dupes = numpy.cumsum(probability)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / numpy.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = numpy.argmax(score)
logger.info('Maximum expected recall and precision')
logger.info('recall: %2.3f', recall[i])
logger.info('precision: %2.3f', precision[i])
logger.info('With threshold: %2.3f', probability[i])
return probability[i] | Returns the threshold that maximizes the expected F score, a
weighted average of precision and recall for a sample of
blocked data.
Arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2. |
def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Optional[tuple] = None) -> List[IModel]:
"""Get aggregated results
: param query: Rulez based query
: param group: Grouping structure
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: todo: Grouping structure need to be documented
"""
raise NotImplementedError | Get aggregated results
: param query: Rulez based query
: param group: Grouping structure
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: todo: Grouping structure need to be documented |
def get_season_stats(self, season_key):
"""
Calling Season Stats API.
Arg:
season_key: key of the season
Return:
json data
"""
season_stats_url = self.api_path + "season/" + season_key + "/stats/"
response = self.get_response(season_stats_url)
return response | Calling Season Stats API.
Arg:
season_key: key of the season
Return:
json data |
def import_generators(self, session, debug=False):
"""
Imports renewable (res) and conventional (conv) generators
Args:
session : sqlalchemy.orm.session.Session
Database session
debug: If True, information is printed during process
Notes:
Connection of generators is done later on in NetworkDing0's method connect_generators()
"""
def import_res_generators():
"""Imports renewable (res) generators"""
# build query
generators_sqla = session.query(
self.orm['orm_re_generators'].columns.id,
self.orm['orm_re_generators'].columns.subst_id,
self.orm['orm_re_generators'].columns.la_id,
self.orm['orm_re_generators'].columns.mvlv_subst_id,
self.orm['orm_re_generators'].columns.electrical_capacity,
self.orm['orm_re_generators'].columns.generation_type,
self.orm['orm_re_generators'].columns.generation_subtype,
self.orm['orm_re_generators'].columns.voltage_level,
self.orm['orm_re_generators'].columns.w_id,
func.ST_AsText(func.ST_Transform(
self.orm['orm_re_generators'].columns.rea_geom_new, srid)).label('geom_new'),
func.ST_AsText(func.ST_Transform(
self.orm['orm_re_generators'].columns.geom, srid)).label('geom')
). \
filter(
self.orm['orm_re_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \
filter(self.orm['orm_re_generators'].columns.voltage_level.in_([4, 5, 6, 7])). \
filter(self.orm['version_condition_re'])
# read data from db
generators = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators.loc[generators[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
for id_db, row in generators.iterrows():
# treat generators' geom:
# use geom_new (relocated genos from data processing)
# otherwise use original geom from EnergyMap
if row['geom_new']:
geo_data = wkt_loads(row['geom_new'])
elif not row['geom_new']:
geo_data = wkt_loads(row['geom'])
logger.warning(
'Generator {} has no geom_new entry,'
'EnergyMap\'s geom entry will be used.'.format(
id_db))
# if no geom is available at all, skip generator
elif not row['geom']:
#geo_data =
logger.error('Generator {} has no geom entry either'
'and will be skipped.'.format(id_db))
continue
# look up MV grid
mv_grid_district_id = row['subst_id']
mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid
# create generator object
if row['generation_type'] in ['solar', 'wind']:
generator = GeneratorFluctuatingDing0(
id_db=id_db,
mv_grid=mv_grid,
capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'])
else:
generator = GeneratorDing0(
id_db=id_db,
mv_grid=mv_grid,
capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']))
# MV generators
if generator.v_level in [4, 5]:
generator.geo_data = geo_data
mv_grid.add_generator(generator)
# LV generators
elif generator.v_level in [6, 7]:
# look up MV-LV substation id
mvlv_subst_id = row['mvlv_subst_id']
# if there's a LVGD id
if mvlv_subst_id and not isnan(mvlv_subst_id):
# assume that given LA exists
try:
# get LVGD
lv_station = lv_stations_dict[mvlv_subst_id]
lv_grid_district = lv_station.grid.grid_district
generator.lv_grid = lv_station.grid
# set geom (use original from db)
generator.geo_data = geo_data
# if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD
# this occurs due to exclusion of LA with peak load < 1kW
except:
lv_grid_district = random.choice(list(lv_grid_districts_dict.values()))
generator.lv_grid = lv_grid_district.lv_grid
generator.geo_data = lv_grid_district.lv_grid.station().geo_data
logger.warning('Generator {} cannot be assigned to '
'non-existent LV Grid District and was '
'allocated to a random LV Grid District ({}).'.format(
repr(generator), repr(lv_grid_district)))
pass
else:
lv_grid_district = random.choice(list(lv_grid_districts_dict.values()))
generator.lv_grid = lv_grid_district.lv_grid
generator.geo_data = lv_grid_district.lv_grid.station().geo_data
logger.warning('Generator {} has no la_id and was '
'assigned to a random LV Grid District ({}).'.format(
repr(generator), repr(lv_grid_district)))
generator.lv_load_area = lv_grid_district.lv_load_area
lv_grid_district.lv_grid.add_generator(generator)
def import_conv_generators():
"""Imports conventional (conv) generators"""
# build query
generators_sqla = session.query(
self.orm['orm_conv_generators'].columns.id,
self.orm['orm_conv_generators'].columns.subst_id,
self.orm['orm_conv_generators'].columns.name,
self.orm['orm_conv_generators'].columns.capacity,
self.orm['orm_conv_generators'].columns.fuel,
self.orm['orm_conv_generators'].columns.voltage_level,
func.ST_AsText(func.ST_Transform(
self.orm['orm_conv_generators'].columns.geom, srid)).label('geom')). \
filter(
self.orm['orm_conv_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \
filter(self.orm['orm_conv_generators'].columns.voltage_level.in_([4, 5, 6])). \
filter(self.orm['version_condition_conv'])
# read data from db
generators = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col='id')
for id_db, row in generators.iterrows():
# look up MV grid
mv_grid_district_id = row['subst_id']
mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid
# create generator object
generator = GeneratorDing0(id_db=id_db,
name=row['name'],
geo_data=wkt_loads(row['geom']),
mv_grid=mv_grid,
capacity=row['capacity'],
type=row['fuel'],
subtype='unknown',
v_level=int(row['voltage_level']))
# add generators to graph
if generator.v_level in [4, 5]:
mv_grid.add_generator(generator)
# there's only one conv. geno with v_level=6 -> connect to MV grid
elif generator.v_level in [6]:
generator.v_level = 5
mv_grid.add_generator(generator)
# get ding0s' standard CRS (SRID)
srid = str(int(cfg_ding0.get('geo', 'srid')))
# get predefined random seed and initialize random generator
seed = int(cfg_ding0.get('random', 'seed'))
random.seed(a=seed)
# build dicts to map MV grid district and Load Area ids to related objects
mv_grid_districts_dict,\
lv_load_areas_dict,\
lv_grid_districts_dict,\
lv_stations_dict = self.get_mvgd_lvla_lvgd_obj_from_id()
# import renewable generators
import_res_generators()
# import conventional generators
import_conv_generators()
logger.info('=====> Generators imported') | Imports renewable (res) and conventional (conv) generators
Args:
session : sqlalchemy.orm.session.Session
Database session
debug: If True, information is printed during process
Notes:
Connection of generators is done later on in NetworkDing0's method connect_generators() |
def plot_mixture(mixture, i=0, j=1, center_style=dict(s=0.15),
cmap='nipy_spectral', cutoff=0.0, ellipse_style=dict(alpha=0.3),
solid_edge=True, visualize_weights=False):
'''Plot the (Gaussian) components of the ``mixture`` density as
one-sigma ellipses in the ``(i,j)`` plane.
:param center_style:
If a non-empty ``dict``, plot mean value with the style passed to ``scatter``.
:param cmap:
The color map to which components are mapped in order to
choose their face color. It is unaffected by the
``cutoff``. The meaning depends on ``visualize_weights``.
:param cutoff:
Ignore components whose weight is below the ``cut off``.
:param ellipse_style:
Passed on to define the properties of the ``Ellipse``.
:param solid_edge:
Draw the edge of the ellipse as solid opaque line.
:param visualize_weights:
Colorize the components according to their weights if ``True``.
One can do `plt.colorbar()` after this function and the bar allows to read off the weights.
If ``False``, coloring is based on the component index and the total number of components.
This option makes it easier to track components by assigning them the same color in subsequent calls to this function.
'''
# imports inside the function because then "ImportError" is raised on
# systems without 'matplotlib' only when 'plot_mixture' is called
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.cm import get_cmap
assert i >= 0 and j >= 0, 'Invalid submatrix specification (%d, %d)' % (i, j)
assert i != j, 'Identical dimension given: i=j=%d' % i
assert mixture.dim >= 2, '1D plot not supported'
cmap = get_cmap(name=cmap)
if visualize_weights:
# colors according to weight
renormalized_component_weights = np.array(mixture.weights)
colors = [cmap(k) for k in renormalized_component_weights]
else:
# colors according to index
colors = [cmap(k) for k in np.linspace(0, _max_color, len(mixture.components))]
mask = mixture.weights >= cutoff
# plot component means
means = np.array([c.mu for c in mixture.components])
x_values = means.T[i]
y_values = means.T[j]
for k, w in enumerate(mixture.weights):
# skip components by hand to retain consistent coloring
if w < cutoff:
continue
cov = mixture.components[k].sigma
submatrix = np.array([[cov[i,i], cov[i,j]], \
[cov[j,i], cov[j,j]]])
# for idea, check
# 'Combining error ellipses' by John E. Davis
correlation = np.array([[1.0, cov[i,j] / np.sqrt(cov[i,i] * cov[j,j])], [0.0, 1.0]])
correlation[1,0] = correlation[0,1]
assert abs(correlation[0,1]) <= 1, 'Invalid component %d with correlation %g' % (k, correlation[0, 1])
ew, ev = np.linalg.eigh(submatrix)
assert ew.min() > 0, 'Nonpositive eigenvalue in component %d: %s' % (k, ew)
# rotation angle of major axis with x-axis
if submatrix[0,0] == submatrix[1,1]:
theta = np.sign(submatrix[0,1]) * np.pi / 4.
else:
theta = 0.5 * np.arctan( 2 * submatrix[0,1] / (submatrix[1,1] - submatrix[0,0]))
# put larger eigen value on y'-axis
height = np.sqrt(ew.max())
width = np.sqrt(ew.min())
# but change orientation of coordinates if the other is larger
if submatrix[0,0] > submatrix[1,1]:
height = np.sqrt(ew.min())
width = np.sqrt(ew.max())
# change sign to rotate in right direction
angle = -theta * 180 / np.pi
# copy keywords but override some
ellipse_style_clone = dict(ellipse_style)
# overwrite facecolor
ellipse_style_clone['facecolor'] = colors[k]
ax = plt.gca()
# need full width/height
e = Ellipse(xy=(x_values[k], y_values[k]),
width=2*width, height=2*height, angle=angle,
**ellipse_style_clone)
ax.add_patch(e)
if solid_edge:
ellipse_style_clone['facecolor'] = 'none'
ellipse_style_clone['edgecolor'] = colors[k]
ellipse_style_clone['alpha'] = 1
ax.add_patch(Ellipse(xy=(x_values[k], y_values[k]),
width=2*width, height=2*height, angle=angle,
**ellipse_style_clone))
if center_style:
plt.scatter(x_values[mask], y_values[mask], **center_style)
if visualize_weights:
# to enable plt.colorbar()
mappable = plt.gci()
mappable.set_array(mixture.weights)
mappable.set_cmap(cmap) | Plot the (Gaussian) components of the ``mixture`` density as
one-sigma ellipses in the ``(i,j)`` plane.
:param center_style:
If a non-empty ``dict``, plot mean value with the style passed to ``scatter``.
:param cmap:
The color map to which components are mapped in order to
choose their face color. It is unaffected by the
``cutoff``. The meaning depends on ``visualize_weights``.
:param cutoff:
Ignore components whose weight is below the ``cut off``.
:param ellipse_style:
Passed on to define the properties of the ``Ellipse``.
:param solid_edge:
Draw the edge of the ellipse as solid opaque line.
:param visualize_weights:
Colorize the components according to their weights if ``True``.
One can do `plt.colorbar()` after this function and the bar allows to read off the weights.
If ``False``, coloring is based on the component index and the total number of components.
This option makes it easier to track components by assigning them the same color in subsequent calls to this function. |
def addToLayout(self, analysis, position=None):
""" Adds the analysis passed in to the worksheet's layout
"""
# TODO Redux
layout = self.getLayout()
container_uid = self.get_container_for(analysis)
if IRequestAnalysis.providedBy(analysis) and \
not IDuplicateAnalysis.providedBy(analysis):
container_uids = map(lambda slot: slot['container_uid'], layout)
if container_uid in container_uids:
position = [int(slot['position']) for slot in layout if
slot['container_uid'] == container_uid][0]
elif not position:
used_positions = [0, ] + [int(slot['position']) for slot in
layout]
position = [pos for pos in range(1, max(used_positions) + 2)
if pos not in used_positions][0]
an_type = self.get_analysis_type(analysis)
self.setLayout(layout + [{'position': position,
'type': an_type,
'container_uid': container_uid,
'analysis_uid': api.get_uid(analysis)}, ]) | Adds the analysis passed in to the worksheet's layout |
def reset(name, soft=False, call=None):
'''
To reset a VM using its name
.. note::
If ``soft=True`` then issues a command to the guest operating system
asking it to perform a reboot. Otherwise hypervisor will terminate VM and start it again.
Default is soft=False
For ``soft=True`` vmtools should be installed on guest system.
CLI Example:
.. code-block:: bash
salt-cloud -a reset vmname
salt-cloud -a reset vmname soft=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The reset action must be called with '
'-a or --action.'
)
vm_properties = [
"name",
"summary.runtime.powerState"
]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
if vm["name"] == name:
if vm["summary.runtime.powerState"] == "suspended" or vm["summary.runtime.powerState"] == "poweredOff":
ret = 'cannot reset in suspended/powered off state'
log.info('VM %s %s', name, ret)
return ret
try:
log.info('Resetting VM %s', name)
if soft:
vm["object"].RebootGuest()
else:
task = vm["object"].ResetVM_Task()
salt.utils.vmware.wait_for_task(task, name, 'reset')
except Exception as exc:
log.error(
'Error while resetting VM %s: %s',
name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return 'failed to reset'
return 'reset' | To reset a VM using its name
.. note::
If ``soft=True`` then issues a command to the guest operating system
asking it to perform a reboot. Otherwise hypervisor will terminate VM and start it again.
Default is soft=False
For ``soft=True`` vmtools should be installed on guest system.
CLI Example:
.. code-block:: bash
salt-cloud -a reset vmname
salt-cloud -a reset vmname soft=True |
def stopped(name, connection=None, username=None, password=None):
'''
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
'''
return _virt_call(name, 'shutdown', 'stopped', "Machine has been shut down",
connection=connection, username=username, password=password) | Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped |
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig) | Create a new figure manager instance |
def on_service_arrival(self, svc_ref):
"""
Called when a service has been registered in the framework
:param svc_ref: A service reference
"""
with self._lock:
new_ranking = svc_ref.get_property(SERVICE_RANKING)
if self._current_ranking is not None:
if new_ranking > self._current_ranking:
# New service with better ranking: use it
self._pending_ref = svc_ref
old_ref = self.reference
old_value = self._value
# Clean up like for a departure
self._current_ranking = None
self._value = None
self.reference = None
# Unbind (new binding will be done afterwards)
self._ipopo_instance.unbind(self, old_value, old_ref)
else:
# No ranking yet: inject the service
self.reference = svc_ref
self._value = self._context.get_service(svc_ref)
self._current_ranking = new_ranking
self._pending_ref = None
self._ipopo_instance.bind(self, self._value, self.reference) | Called when a service has been registered in the framework
:param svc_ref: A service reference |
def sg_expand_dims(tensor, opt):
r"""Inserts a new axis.
See tf.expand_dims() in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : Dimension to expand. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=-1)
return tf.expand_dims(tensor, opt.axis, name=opt.name) | r"""Inserts a new axis.
See tf.expand_dims() in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : Dimension to expand. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`. |
async def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs) | Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental`` |
def updateVersions(region="us-east-1", table="credential-store"):
'''
do a full-table scan of the credential-store,
and update the version format of every credential if it is an integer
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="#N, version, #K, contents, hmac",
ExpressionAttributeNames={"#N": "name", "#K": "key"})
items = response["Items"]
for old_item in items:
if isInt(old_item['version']):
new_item = copy.copy(old_item)
new_item['version'] = credstash.paddedInt(new_item['version'])
if new_item['version'] != old_item['version']:
secrets.put_item(Item=new_item)
secrets.delete_item(Key={'name': old_item['name'], 'version': old_item['version']})
else:
print "Skipping item: %s, %s" % (old_item['name'], old_item['version']) | do a full-table scan of the credential-store,
and update the version format of every credential if it is an integer |
def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, (bytes, unicode_type)):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params | Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None. |
def get_ticket_results(mgr, ticket_id, update_count=1):
"""Get output about a ticket.
:param integer id: the ticket ID
:param integer update_count: number of entries to retrieve from ticket
:returns: a KeyValue table containing the details of the ticket
"""
ticket = mgr.get_ticket(ticket_id)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', ticket['id']])
table.add_row(['title', ticket['title']])
table.add_row(['priority', PRIORITY_MAP[ticket.get('priority', 0)]])
if ticket.get('assignedUser'):
user = ticket['assignedUser']
table.add_row([
'user',
"%s %s" % (user.get('firstName'), user.get('lastName')),
])
table.add_row(['status', ticket['status']['name']])
table.add_row(['created', ticket.get('createDate')])
table.add_row(['edited', ticket.get('lastEditDate')])
# Only show up to the specified update count
updates = ticket.get('updates', [])
count = min(len(updates), update_count)
count_offset = len(updates) - count + 1 # Display as one-indexed
for i, update in enumerate(updates[-count:]):
wrapped_entry = ""
# Add user details (fields are different between employee and users)
editor = update.get('editor')
if editor:
if editor.get('displayName'):
wrapped_entry += "By %s (Employee)\n" % (editor['displayName'])
if editor.get('firstName'):
wrapped_entry += "By %s %s\n" % (editor.get('firstName'),
editor.get('lastName'))
# NOTE(kmcdonald): Windows new-line characters need to be stripped out
wrapped_entry += click.wrap_text(update['entry'].replace('\r', ''))
table.add_row(['update %s' % (count_offset + i,), wrapped_entry])
return table | Get output about a ticket.
:param integer id: the ticket ID
:param integer update_count: number of entries to retrieve from ticket
:returns: a KeyValue table containing the details of the ticket |
def parse_methodcall(self, tup_tree):
"""
::
<!ELEMENT METHODCALL ((LOCALCLASSPATH | LOCALINSTANCEPATH),
PARAMVALUE*)>
<!ATTLIST METHODCALL
%CIMName;>
"""
self.check_node(tup_tree, 'METHODCALL', ('NAME',), (),
('LOCALCLASSPATH', 'LOCALINSTANCEPATH', 'PARAMVALUE'))
path = self.list_of_matching(tup_tree,
('LOCALCLASSPATH', 'LOCALINSTANCEPATH'))
if not path:
raise CIMXMLParseError(
_format("Element {0!A} missing a required child element "
"'LOCALCLASSPATH' or 'LOCALINSTANCEPATH'",
name(tup_tree)),
conn_id=self.conn_id)
if len(path) > 1:
raise CIMXMLParseError(
_format("Element {0!A} has too many child elements {1!A} "
"(allowed is one of 'LOCALCLASSPATH' or "
"'LOCALINSTANCEPATH')", name(tup_tree), path),
conn_id=self.conn_id)
path = path[0]
params = self.list_of_matching(tup_tree, ('PARAMVALUE',))
return (name(tup_tree), attrs(tup_tree), path, params) | ::
<!ELEMENT METHODCALL ((LOCALCLASSPATH | LOCALINSTANCEPATH),
PARAMVALUE*)>
<!ATTLIST METHODCALL
%CIMName;> |
def change_password(self, id_user, user_current_password, password):
"""Change password of User from by the identifier.
:param id_user: Identifier of the User. Integer value and greater than zero.
:param user_current_password: Senha atual do usuário.
:param password: Nova Senha do usuário.
:return: None
:raise UsuarioNaoExisteError: User not registered.
:raise InvalidParameterError: The identifier of User is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_user):
raise InvalidParameterError(
u'The identifier of User is invalid or was not informed.')
if password is None or password == "":
raise InvalidParameterError(
u'A nova senha do usuário é inválida ou não foi informada')
user_map = dict()
user_map['user_id'] = id_user
user_map['password'] = password
code, xml = self.submit(
{'user': user_map}, 'POST', 'user-change-pass/')
return self.response(code, xml) | Change password of User from by the identifier.
:param id_user: Identifier of the User. Integer value and greater than zero.
:param user_current_password: Senha atual do usuário.
:param password: Nova Senha do usuário.
:return: None
:raise UsuarioNaoExisteError: User not registered.
:raise InvalidParameterError: The identifier of User is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def text(self):
"""
String formed by concatenating the text of each run in the paragraph.
Tabs and line breaks in the XML are mapped to ``\\t`` and ``\\n``
characters respectively.
Assigning text to this property causes all existing paragraph content
to be replaced with a single run containing the assigned text.
A ``\\t`` character in the text is mapped to a ``<w:tab/>`` element
and each ``\\n`` or ``\\r`` character is mapped to a line break.
Paragraph-level formatting, such as style, is preserved. All
run-level formatting, such as bold or italic, is removed.
"""
text = ''
for run in self.runs:
text += run.text
return text | String formed by concatenating the text of each run in the paragraph.
Tabs and line breaks in the XML are mapped to ``\\t`` and ``\\n``
characters respectively.
Assigning text to this property causes all existing paragraph content
to be replaced with a single run containing the assigned text.
A ``\\t`` character in the text is mapped to a ``<w:tab/>`` element
and each ``\\n`` or ``\\r`` character is mapped to a line break.
Paragraph-level formatting, such as style, is preserved. All
run-level formatting, such as bold or italic, is removed. |
def draw_tiling(coord_generator, filename):
"""Given a coordinate generator and a filename, render those coordinates in
a new image and save them to the file."""
im = Image.new('L', size=(CANVAS_WIDTH, CANVAS_HEIGHT))
for shape in coord_generator(CANVAS_WIDTH, CANVAS_HEIGHT):
ImageDraw.Draw(im).polygon(shape, outline='white')
im.save(filename) | Given a coordinate generator and a filename, render those coordinates in
a new image and save them to the file. |
def pumper(html_generator):
"""
Pulls HTML from source generator,
feeds it to the parser and yields
DOM elements.
"""
source = html_generator()
parser = etree.HTMLPullParser(
events=('start', 'end'),
remove_comments=True
)
while True:
for element in parser.read_events():
yield element
try:
parser.feed(next(source))
except StopIteration:
# forces close of any unclosed tags
parser.feed('</html>')
for element in parser.read_events():
yield element
break | Pulls HTML from source generator,
feeds it to the parser and yields
DOM elements. |
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):
"""
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
"""
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_('Metric(s) {} must be aggregations.').format(invalid_metric_names))
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric['label']] = {
'fieldName': adhoc_metric['column']['column_name'],
'fieldNames': [adhoc_metric['column']['column_name']],
'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
'name': adhoc_metric['label'],
}
return aggregations | Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations |
def replace(self, **kw):
"""Return datetime with new specified fields given as arguments.
For example, dt.replace(days=4) would return a new datetime_tz object with
exactly the same as dt but with the days attribute equal to 4.
Any attribute can be replaced, but tzinfo can not be set to None.
Args:
Any datetime_tz attribute.
Returns:
A datetime_tz object with the attributes replaced.
Raises:
TypeError: If the given replacement is invalid.
"""
if "tzinfo" in kw:
if kw["tzinfo"] is None:
raise TypeError("Can not remove the timezone use asdatetime()")
else:
tzinfo = kw["tzinfo"]
del kw["tzinfo"]
else:
tzinfo = None
is_dst = None
if "is_dst" in kw:
is_dst = kw["is_dst"]
del kw["is_dst"]
else:
# Use our own DST setting..
is_dst = self.is_dst
replaced = self.asdatetime().replace(**kw)
return type(self)(
replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst) | Return datetime with new specified fields given as arguments.
For example, dt.replace(days=4) would return a new datetime_tz object with
exactly the same as dt but with the days attribute equal to 4.
Any attribute can be replaced, but tzinfo can not be set to None.
Args:
Any datetime_tz attribute.
Returns:
A datetime_tz object with the attributes replaced.
Raises:
TypeError: If the given replacement is invalid. |
def get_agent(self, agent_id):
"""Fetches the agent for the given agent ID"""
url = 'agents/%s' % agent_id
return Agent(**self._api._get(url)) | Fetches the agent for the given agent ID |
def handle(data_type, data, data_id=None, caller=None):
"""
execute all data handlers on the specified data according to data type
Args:
data_type (str): data type handle
data (dict or list): data
Kwargs:
data_id (str): can be used to differentiate between different data
sets of the same data type. If not specified will default to
the data type
caller (object): if specified, holds the object or function that
is trying to handle data
Returns:
dict or list - data after handlers have been executed on it
"""
if not data_id:
data_id = data_type
# instantiate handlers for data type if they havent been yet
if data_id not in _handlers:
_handlers[data_id] = dict(
[(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)])
for handler in list(_handlers[data_id].values()):
try:
data = handler(data, caller=caller)
except Exception as inst:
vodka.log.error("Data handler '%s' failed with error" % handler)
vodka.log.error(traceback.format_exc())
return data | execute all data handlers on the specified data according to data type
Args:
data_type (str): data type handle
data (dict or list): data
Kwargs:
data_id (str): can be used to differentiate between different data
sets of the same data type. If not specified will default to
the data type
caller (object): if specified, holds the object or function that
is trying to handle data
Returns:
dict or list - data after handlers have been executed on it |
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
#Start a new thread not to block other requests
args = ((data, client_address), )
t = threading.Thread(target=self.receive_datagram, args=args)
t.daemon = True
t.start()
except RuntimeError:
logging.exception("Exception with Executor")
logging.debug("closing socket")
self._socket.close() | Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds |
def feedforward(self):
"""
Soon to be depriciated.
Needed to make the SP implementation compatible
with some older code.
"""
m = self._numInputs
n = self._numColumns
W = np.zeros((n, m))
for i in range(self._numColumns):
self.getPermanence(i, W[i, :])
return W | Soon to be depriciated.
Needed to make the SP implementation compatible
with some older code. |
def _search_for_user_dn(self):
"""
Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs.
"""
search = self.settings.USER_SEARCH
if search is None:
raise ImproperlyConfigured(
"AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance."
)
results = search.execute(self.connection, {"user": self._username})
if results is not None and len(results) == 1:
(user_dn, self._user_attrs) = next(iter(results))
else:
user_dn = None
return user_dn | Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs. |
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True | Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise. |
def _locate_index(self, index):
"""
given index, find out sub-db and sub-index
Parameters
----------
index : int
index of a specific image
Returns
----------
a tuple (sub-db, sub-index)
"""
assert index >= 0 and index < self.num_images, "index out of range"
pos = self.image_set_index[index]
for k, v in enumerate(self.imdbs):
if pos >= v.num_images:
pos -= v.num_images
else:
return (k, pos) | given index, find out sub-db and sub-index
Parameters
----------
index : int
index of a specific image
Returns
----------
a tuple (sub-db, sub-index) |
def nlerp_quat(from_quat, to_quat, percent):
"""Return normalized linear interpolation of two quaternions.
Less computationally expensive than slerp (which not implemented in this
lib yet), but does not maintain a constant velocity like slerp.
"""
result = lerp_quat(from_quat, to_quat, percent)
result.normalize()
return result | Return normalized linear interpolation of two quaternions.
Less computationally expensive than slerp (which not implemented in this
lib yet), but does not maintain a constant velocity like slerp. |
def factor_kkt(S_LU, R, d):
""" Factor the U22 block that we can only do after we know D. """
nBatch, nineq = d.size()
neq = S_LU[1].size(1) - nineq
# TODO: There's probably a better way to add a batched diagonal.
global factor_kkt_eye
if factor_kkt_eye is None or factor_kkt_eye.size() != d.size():
# print('Updating batchedEye size.')
factor_kkt_eye = torch.eye(nineq).repeat(
nBatch, 1, 1).type_as(R).byte()
T = R.clone()
T[factor_kkt_eye] += (1. / d).squeeze().view(-1)
T_LU = btrifact_hack(T)
global shown_btrifact_warning
if shown_btrifact_warning or not T.is_cuda:
# TODO: Don't use pivoting in most cases because
# torch.btriunpack is inefficient here:
oldPivotsPacked = S_LU[1][:, -nineq:] - neq
oldPivots, _, _ = torch.btriunpack(
T_LU[0], oldPivotsPacked, unpack_data=False)
newPivotsPacked = T_LU[1]
newPivots, _, _ = torch.btriunpack(
T_LU[0], newPivotsPacked, unpack_data=False)
# Re-pivot the S_LU_21 block.
if neq > 0:
S_LU_21 = S_LU[0][:, -nineq:, :neq]
S_LU[0][:, -nineq:,
:neq] = newPivots.transpose(1, 2).bmm(oldPivots.bmm(S_LU_21))
# Add the new S_LU_22 block pivots.
S_LU[1][:, -nineq:] = newPivotsPacked + neq
# Add the new S_LU_22 block.
S_LU[0][:, -nineq:, -nineq:] = T_LU[0] | Factor the U22 block that we can only do after we know D. |
def send_request(self, request, callback=None, timeout=None, no_response=False): # pragma: no cover
"""
Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
#if timeout is set
response = None
return response | Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response |
def to_serializable(self, use_bytes=False, bias_dtype=np.float32,
bytes_type=bytes):
"""Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/
"""
from dimod.package_info import __version__
schema_version = "2.0.0"
try:
variables = sorted(self.variables)
except TypeError:
# sorting unlike types in py3
variables = list(self.variables)
num_variables = len(variables)
# when doing byte encoding we can use less space depending on the
# total number of variables
index_dtype = np.uint16 if num_variables <= 2**16 else np.uint32
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(
dtype=bias_dtype,
index_dtype=index_dtype,
sort_indices=True,
variable_order=variables)
doc = {"basetype": "BinaryQuadraticModel",
"type": type(self).__name__,
"version": {"dimod": __version__,
"bqm_schema": schema_version},
"variable_labels": variables,
"variable_type": self.vartype.name,
"info": self.info,
"offset": float(offset),
"use_bytes": bool(use_bytes)
}
if use_bytes:
doc.update({'linear_biases': array2bytes(ldata, bytes_type=bytes_type),
'quadratic_biases': array2bytes(qdata, bytes_type=bytes_type),
'quadratic_head': array2bytes(irow, bytes_type=bytes_type),
'quadratic_tail': array2bytes(icol, bytes_type=bytes_type)})
else:
doc.update({'linear_biases': ldata.tolist(),
'quadratic_biases': qdata.tolist(),
'quadratic_head': irow.tolist(),
'quadratic_tail': icol.tolist()})
return doc | Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/ |
def _formatters_default(self):
"""Activate the default formatters."""
formatter_classes = [
PlainTextFormatter,
HTMLFormatter,
SVGFormatter,
PNGFormatter,
JPEGFormatter,
LatexFormatter,
JSONFormatter,
JavascriptFormatter
]
d = {}
for cls in formatter_classes:
f = cls(config=self.config)
d[f.format_type] = f
return d | Activate the default formatters. |
def localize_field(self, value):
"""
Method that must transform the value from object to localized string
"""
if self.default is not None:
if value is None or value == '':
value = self.default
return value or '' | Method that must transform the value from object to localized string |
def list_events(self, source=None, severity=None, text_filter=None,
start=None, stop=None, page_size=500, descending=False):
"""
Reads events between the specified start and stop time.
Events are sorted by generation time, source, then sequence number.
:param str source: The source of the returned events.
:param str severity: The minimum severity level of the returned events.
One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``,
``CRITICAL`` or ``SEVERE``.
:param str text_filter: Filter the text message of the returned events
:param ~datetime.datetime start: Minimum start date of the returned events (inclusive)
:param ~datetime.datetime stop: Maximum start date of the returned events (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` events are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Event]
"""
params = {
'order': 'desc' if descending else 'asc',
}
if source is not None:
params['source'] = source
if page_size is not None:
params['limit'] = page_size
if severity is not None:
params['severity'] = severity
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
if text_filter is not None:
params['q'] = text_filter
return pagination.Iterator(
client=self._client,
path='/archive/{}/events'.format(self._instance),
params=params,
response_class=rest_pb2.ListEventsResponse,
items_key='event',
item_mapper=Event,
) | Reads events between the specified start and stop time.
Events are sorted by generation time, source, then sequence number.
:param str source: The source of the returned events.
:param str severity: The minimum severity level of the returned events.
One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``,
``CRITICAL`` or ``SEVERE``.
:param str text_filter: Filter the text message of the returned events
:param ~datetime.datetime start: Minimum start date of the returned events (inclusive)
:param ~datetime.datetime stop: Maximum start date of the returned events (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` events are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Event] |
def add_textop_iter(func):
"""Decorator to declare custom *ITER* function as a new textops op
An *ITER* function is a function that will receive the input text as a *LIST* of lines.
One have to iterate over this list and generate a result (it can be a list, a generator,
a dict, a string, an int ...)
Examples:
>>> @add_textop_iter
... def odd(lines, *args,**kwargs):
... for i,line in enumerate(lines):
... if not i % 2:
... yield line
>>> s = '''line 1
... line 2
... line 3'''
>>> s >> odd()
['line 1', 'line 3']
>>> s | odd().tolist()
['line 1', 'line 3']
>>> @add_textop_iter
... def sumsize(lines, *args,**kwargs):
... sum = 0
... for line in lines:
... sum += int(re.search(r'\d+',line).group(0))
... return sum
>>> '''1492 file1
... 1789 file2
... 2015 file3''' | sumsize()
5296
"""
op = type(func.__name__,(WrapOpIter,), {'fn':staticmethod(func)})
setattr(textops.ops,func.__name__,op)
return op | Decorator to declare custom *ITER* function as a new textops op
An *ITER* function is a function that will receive the input text as a *LIST* of lines.
One have to iterate over this list and generate a result (it can be a list, a generator,
a dict, a string, an int ...)
Examples:
>>> @add_textop_iter
... def odd(lines, *args,**kwargs):
... for i,line in enumerate(lines):
... if not i % 2:
... yield line
>>> s = '''line 1
... line 2
... line 3'''
>>> s >> odd()
['line 1', 'line 3']
>>> s | odd().tolist()
['line 1', 'line 3']
>>> @add_textop_iter
... def sumsize(lines, *args,**kwargs):
... sum = 0
... for line in lines:
... sum += int(re.search(r'\d+',line).group(0))
... return sum
>>> '''1492 file1
... 1789 file2
... 2015 file3''' | sumsize()
5296 |
def get_member_brief(self, member_id=0):
''' a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_brief' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/members/' % self.endpoint
params = {
'member_id': 'self'
}
if member_id:
params['member_id'] = member_id
# send request
response_details = self._get_request(url, params=params)
# construct method output dictionary
profile_details = {
'json': {}
}
for key, value in response_details.items():
if not key == 'json':
profile_details[key] = value
# parse response
if response_details['json']:
if 'results' in response_details['json'].keys():
if response_details['json']['results']:
details = response_details['json']['results'][0]
for key, value in details.items():
if key != 'topics':
profile_details['json'][key] = value
profile_details['json'] = self.objects.profile_brief.ingest(**profile_details['json'])
return profile_details | a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema |
def write(self, message, delay_seconds=None):
"""
Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written.
"""
new_msg = self.connection.send_message(self, message.get_body_encoded(), delay_seconds)
message.id = new_msg.id
message.md5 = new_msg.md5
return message | Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written. |
def confd_state_internal_cdb_client_lock(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
cdb = ET.SubElement(internal, "cdb")
client = ET.SubElement(cdb, "client")
lock = ET.SubElement(client, "lock")
lock.text = kwargs.pop('lock')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _set_gaussian_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
"""
Both meanfield and VBEM require expected statistics of the continuous latent
states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T]
and compute the expected sufficient statistics for the initial distribution,
dynamics distribution, and Gaussian observation distribution.
"""
assert not np.isnan(E_xtp1_xtT).any()
assert not np.isnan(smoothed_mus).any()
assert not np.isnan(smoothed_sigmas).any()
assert smoothed_mus.shape == (self.T, self.D_latent)
assert smoothed_sigmas.shape == (self.T, self.D_latent, self.D_latent)
assert E_xtp1_xtT.shape == (self.T-1, self.D_latent, self.D_latent)
# This is like LDSStates._set_expected_states but doesn't sum over time
T, D_obs = self.T, self.D_emission
E_x_xT = smoothed_sigmas + smoothed_mus[:, :, None] * smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1]
E_xtp1_xtp1T = E_x_xT[1:]
E_xtp1_utT = (smoothed_mus[1:, :, None] * self.inputs[:-1, None, :])
E_xtp1_xutT = np.concatenate((E_xtp1_xtT, E_xtp1_utT), axis=-1)
# Initial state stats
self.E_init_stats = (self.smoothed_mus[0], E_x_xT[0], 1.)
# Dynamics stats
self.E_dynamics_stats = (E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, np.ones(self.T-1))
# Emission stats -- special case diagonal noise
E_yyT = self.data**2 if self.diagonal_noise else self.data[:, :, None] * self.data[:, None, :]
E_yxT = self.data[:, :, None] * self.smoothed_mus[:, None, :]
E_yuT = self.data[:, :, None] * self.inputs[:, None, :]
E_yxuT = np.concatenate((E_yxT, E_yuT), axis=-1)
E_n = np.ones((T, D_obs)) if self.diagonal_noise else np.ones(T)
self.E_emission_stats = (E_yyT, E_yxuT, E_xu_xuT, E_n) | Both meanfield and VBEM require expected statistics of the continuous latent
states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T]
and compute the expected sufficient statistics for the initial distribution,
dynamics distribution, and Gaussian observation distribution. |
def vclose(L, V):
"""
gets the closest vector
"""
lam, X = 0, []
for k in range(3):
lam = lam + V[k] * L[k]
beta = np.sqrt(1. - lam**2)
for k in range(3):
X.append((old_div((V[k] - lam * L[k]), beta)))
return X | gets the closest vector |
def removeItem( self ):
"""
Removes the item from the menu.
"""
item = self.uiMenuTREE.currentItem()
if ( not item ):
return
opts = QMessageBox.Yes | QMessageBox.No
answer = QMessageBox.question( self,
'Remove Item',
'Are you sure you want to remove this '\
' item?',
opts )
if ( answer == QMessageBox.Yes ):
parent = item.parent()
if ( parent ):
parent.takeChild(parent.indexOfChild(item))
else:
tree = self.uiMenuTREE
tree.takeTopLevelItem(tree.indexOfTopLevelItem(item)) | Removes the item from the menu. |
def setup_logging():
"""Logging config."""
logging.basicConfig(format=("[%(levelname)s\033[0m] "
"\033[1;31m%(module)s\033[0m: "
"%(message)s"),
level=logging.INFO,
stream=sys.stdout)
logging.addLevelName(logging.ERROR, '\033[1;31mE')
logging.addLevelName(logging.INFO, '\033[1;32mI')
logging.addLevelName(logging.WARNING, '\033[1;33mW') | Logging config. |
def _validate_format(req):
'''
Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
'''
#check for all required keys
for key in SLOJSONRPC._min_keys:
if not key in req:
logging.debug('JSONRPC: Fmt Error: Need key "%s"' % key)
raise SLOJSONRPCError(-32600)
#check all keys if allowed
for key in req.keys():
if not key in SLOJSONRPC._allowed_keys:
logging.debug('JSONRPC: Fmt Error: Not allowed key "%s"' % key)
raise SLOJSONRPCError(-32600)
#needs to be jsonrpc 2.0
if req['jsonrpc'] != '2.0':
logging.debug('JSONRPC: Fmt Error: "jsonrpc" needs to be "2.0"')
raise SLOJSONRPCError(-32600) | Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error |
def serial_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath serially.
"""
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
data = []
count = 0
total = len(valid_paths)
for path in valid_paths:
newdata = self._drone.assimilate(path)
self._data.append(newdata)
count += 1
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder)) | Assimilate the entire subdirectory structure in rootpath serially. |
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext | Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text. |
def set_shuffle(self, shuffle):
"""stub"""
if not self.my_osid_object_form._is_valid_boolean(
shuffle):
raise InvalidArgument('shuffle')
self.my_osid_object_form._my_map['shuffle'] = shuffle | stub |
def _evaluate_model_single_file(target_folder, test_file):
"""
Evaluate a model for a single recording.
Parameters
----------
target_folder : string
Folder where the model is
test_file : string
The test file (.hdf5)
"""
logging.info("Create running model...")
model_src = get_latest_model(target_folder, "model")
model_file_pointer = tempfile.NamedTemporaryFile(delete=False)
model_use = model_file_pointer.name
model_file_pointer.close()
logging.info("Adjusted model is in %s.", model_use)
create_adjusted_model_for_percentages(model_src, model_use)
# Run evaluation
project_root = get_project_root()
time_prefix = time.strftime("%Y-%m-%d-%H-%M")
logging.info("Evaluate '%s' with '%s'...", model_src, test_file)
logfilefolder = os.path.join(project_root, "logs/")
if not os.path.exists(logfilefolder):
os.makedirs(logfilefolder)
logfile = os.path.join(project_root,
"logs/%s-error-evaluation.log" %
time_prefix)
with open(logfile, "w") as log, open(model_use, "r") as modl_src_p:
p = subprocess.Popen([get_nntoolkit(), 'run',
'--batch-size', '1',
'-f%0.4f', test_file],
stdin=modl_src_p,
stdout=log)
ret = p.wait()
if ret != 0:
logging.error("nntoolkit finished with ret code %s",
str(ret))
sys.exit()
return (logfile, model_use) | Evaluate a model for a single recording.
Parameters
----------
target_folder : string
Folder where the model is
test_file : string
The test file (.hdf5) |
def _parse(self, msg_dict):
'''
Parse a syslog message and check what OpenConfig object should
be generated.
'''
error_present = False
# log.debug('Matching the message:')
# log.debug(msg_dict)
for message in self.compiled_messages:
# log.debug('Matching using:')
# log.debug(message)
match_on = message['match_on']
if match_on not in msg_dict:
# log.debug('%s is not a valid key in the partially parsed dict', match_on)
continue
if message['tag'] != msg_dict[match_on]:
continue
if '__python_fun__' in message:
return {
'model': message['model'],
'error': message['error'],
'__python_fun__': message['__python_fun__']
}
error_present = True
match = message['line'].search(msg_dict['message'])
if not match:
continue
positions = message.get('positions', {})
values = message.get('values')
ret = {
'model': message['model'],
'mapping': message['mapping'],
'replace': message['replace'],
'error': message['error']
}
for key in values.keys():
# Check if the value needs to be replaced
if key in message['replace']:
result = napalm_logs.utils.cast(match.group(positions.get(key)), message['replace'][key])
else:
result = match.group(positions.get(key))
ret[key] = result
return ret
if error_present is True:
log.info('Configured regex did not match for os: %s tag %s', self._name, msg_dict.get('tag', ''))
else:
log.info('Syslog message not configured for os: %s tag %s', self._name, msg_dict.get('tag', '')) | Parse a syslog message and check what OpenConfig object should
be generated. |
def parse_color(self, color):
'''
color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255)
'''
if color == 'none':
return None
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)) | color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255) |
def calculateLocalElasticity(self, bp, frames=None, helical=False, unit='kT'):
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix.
"""
acceptedUnit = ['kT', 'kJ/mol', 'kcal/mol']
if unit not in acceptedUnit:
raise ValueError(" {0} not accepted. Use any of the following: {1} ".format(unit, acceptedUnit))
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}-local-{4}'.format(bp[0], bp[1], frames[0], frames[1], int(helical))
if bp[1]-bp[0]+1 > 4:
raise ValueError("Selected span {0} is larger than 4, and therefore, not recommended for local elasticity".format(bp[1]-bp[0]+1))
if name not in self.esMatrix:
time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)
mean = np.mean(array, axis = 1)
esMatrix = self.getElasticMatrix(array)
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if unit == 'kJ/mol':
result = 2.4946938107879997 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 ) kT.NA/1000
elif unit == 'kcal/mol':
result = 0.5962461306854684 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 / 4.184) kT.NA/1000
else:
result = esMatrix
return mean, result | r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix. |
def _retrieveRemoteCertificate(self, From, port=port):
"""
The entire conversation, starting with TCP handshake and ending at
disconnect, to retrieve a foreign domain's certificate for the first
time.
"""
CS = self.service.certificateStorage
host = str(From.domainAddress())
p = AMP()
p.wrapper = self.wrapper
f = protocol.ClientCreator(reactor, lambda: p)
connD = f.connectTCP(host, port)
def connected(proto):
dhost = From.domainAddress()
iddom = proto.callRemote(Identify, subject=dhost)
def gotCert(identifyBox):
theirCert = identifyBox['certificate']
theirIssuer = theirCert.getIssuer().commonName
theirName = theirCert.getSubject().commonName
if (theirName != str(dhost)):
raise VerifyError(
"%r claimed it was %r in IDENTIFY response"
% (theirName, dhost))
if (theirIssuer != str(dhost)):
raise VerifyError(
"self-signed %r claimed it was issued by "
"%r in IDENTIFY response" % (dhost, theirIssuer))
def storedCert(ignored):
return theirCert
return CS.storeSelfSignedCertificate(
str(dhost), theirCert).addCallback(storedCert)
def nothingify(x):
proto.transport.loseConnection()
return x
return iddom.addCallback(gotCert).addBoth(nothingify)
connD.addCallback(connected)
return connD | The entire conversation, starting with TCP handshake and ending at
disconnect, to retrieve a foreign domain's certificate for the first
time. |
def remove_labels(self, labels, relabel=False):
"""
Remove one or more labels.
Removed labels are assigned a value of zero (i.e., background).
Parameters
----------
labels : int, array-like (1D, int)
The label number(s) to remove.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3])
>>> segm.data
array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3], relabel=True)
>>> segm.data
array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]])
"""
self.check_labels(labels)
self.reassign_label(labels, new_label=0)
if relabel:
self.relabel_consecutive() | Remove one or more labels.
Removed labels are assigned a value of zero (i.e., background).
Parameters
----------
labels : int, array-like (1D, int)
The label number(s) to remove.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3])
>>> segm.data
array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3], relabel=True)
>>> segm.data
array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]]) |
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain | Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length. |
def netconf_state_statistics_in_bad_rpcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
statistics = ET.SubElement(netconf_state, "statistics")
in_bad_rpcs = ET.SubElement(statistics, "in-bad-rpcs")
in_bad_rpcs.text = kwargs.pop('in_bad_rpcs')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def scale(self, x, y=None, z=None):
"Uniform scale, if only sx argument is specified"
if y is None:
y = x
if z is None:
z = x
m = self
for col in range(4):
# Only the top three rows
m[0,col] *= x
m[1,col] *= y
m[2,col] *= z
return self | Uniform scale, if only sx argument is specified |
def venv_bin(name=None): # pylint: disable=inconsistent-return-statements
""" Get the directory for virtualenv stubs, or a full executable path
if C{name} is provided.
"""
if not hasattr(sys, "real_prefix"):
easy.error("ERROR: '%s' is not a virtualenv" % (sys.executable,))
sys.exit(1)
for bindir in ("bin", "Scripts"):
bindir = os.path.join(sys.prefix, bindir)
if os.path.exists(bindir):
if name:
bin_ext = os.path.splitext(sys.executable)[1] if sys.platform == 'win32' else ''
return os.path.join(bindir, name + bin_ext)
else:
return bindir
easy.error("ERROR: Scripts directory not found in '%s'" % (sys.prefix,))
sys.exit(1) | Get the directory for virtualenv stubs, or a full executable path
if C{name} is provided. |
def currentPage(self):
"""
Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again.
"""
self._updateResults(self._sortAttributeValue(0), equalToStart=True, refresh=True)
return self._currentResults | Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again. |
def repartition(self, npartitions):
"""
Repartition data (Spark only).
Parameters
----------
npartitions : int
Number of partitions after repartitions.
"""
if self.mode == 'spark':
return self._constructor(self.values.repartition(npartitions)).__finalize__(self)
else:
notsupported(self.mode) | Repartition data (Spark only).
Parameters
----------
npartitions : int
Number of partitions after repartitions. |
def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
"""Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
"""
if roi is not None:
assert len(roi) == len(field.shape) * \
2, "ROI must match field dimension"
initshape = field.shape
Fshape = len(initshape)
propfunc = fft_propagate
if roi is None:
if Fshape == 2:
roi = (0, 0, field.shape[0], field.shape[1])
else:
roi = (0, field.shape[0])
roi = 1*np.array(roi)
if padding:
# Pad with correct complex number
field = pad.pad_add(field)
if ival[0] > ival[1]:
ival = (ival[1], ival[0])
# set coarse interval
# coarse_acc = int(np.ceil(ival[1]-ival[0]))/100
N = 100 / coarse_acc
zc = np.linspace(ival[0], ival[1], N, endpoint=True)
# compute fft of field
fftfield = np.fft.fftn(field)
# fftplan = fftw3.Plan(fftfield.copy(), None, nthreads = _ncores,
# direction="backward", flags=_fftwflags)
# initiate gradient vector
gradc = np.zeros(zc.shape)
for i in range(len(zc)):
d = zc[i]
# fsp = propfunc(fftfield, d, nm, res, fftplan=fftplan)
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradc[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradc[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradc)
if minid == 0:
zc -= zc[1] - zc[0]
minid += 1
if minid == len(zc) - 1:
zc += zc[1] - zc[0]
minid -= 1
zf = 1*zc
gradf = 1 * gradc
numfine = 10
mingrad = gradc[minid]
while True:
gradf = np.zeros(numfine)
ival = (zf[minid - 1], zf[minid + 1])
zf = np.linspace(ival[0], ival[1], numfine)
for i in range(len(zf)):
d = zf[i]
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradf[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradf[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradf)
if minid == 0:
zf -= zf[1] - zf[0]
minid += 1
if minid == len(zf) - 1:
zf += zf[1] - zf[0]
minid -= 1
if abs(mingrad - gradf[minid]) / 100 < fine_acc:
break
minid = np.argmin(gradf)
fsp = propfunc(fftfield, zf[minid], nm, res)
if padding:
fsp = pad.pad_rem(fsp)
if return_gradient:
return fsp, zf[minid], [(zc, gradc), (zf, gradf)]
return fsp, zf[minid] | Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location |
def register(scheme):
"""
Registers a new scheme to the urlparser.
:param schema | <str>
"""
scheme = nstr(scheme)
urlparse.uses_fragment.append(scheme)
urlparse.uses_netloc.append(scheme)
urlparse.uses_params.append(scheme)
urlparse.uses_query.append(scheme)
urlparse.uses_relative.append(scheme) | Registers a new scheme to the urlparser.
:param schema | <str> |
def user_fields(self, user):
"""
Retrieve the user fields for this user.
:param user: User object or id
"""
return self._query_zendesk(self.endpoint.user_fields, 'user_field', id=user) | Retrieve the user fields for this user.
:param user: User object or id |
def nlargest(self, n=5, keep='first'):
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest() | Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64 |
def down(self, path, link, repo):
"""Download files
"""
filename = link.split("/")[-1]
if not os.path.isfile(path + filename):
Download(path, link.split(), repo).start() | Download files |
def start_polling(self, interval):
"""
Start polling for term updates and streaming.
"""
interval = float(interval)
self.polling = True
# clear the stored list of terms - we aren't tracking any
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
# wait for the interval unless interrupted, compensating for time elapsed in the loop
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!") | Start polling for term updates and streaming. |
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
if len(client_address) > 2:
client_address = (client_address[0], client_address[1])
except socket.timeout:
continue
except Exception as e:
if self._cb_ignore_listen_exception is not None and isinstance(self._cb_ignore_listen_exception, collections.Callable):
if self._cb_ignore_listen_exception(e, self):
continue
raise
try:
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
rst.mid = self._messageLayer.fetch_mid()
self.send_datagram(rst)
continue
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated, transaction completed")
if transaction.response is not None:
self.send_datagram(transaction.response)
continue
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated, transaction NOT completed")
self._send_ack(transaction)
continue
args = (transaction, )
t = threading.Thread(target=self.receive_request, args=args)
t.start()
# self.receive_datagram(data, client_address)
elif isinstance(message, Response):
logger.error("Received response from %s", message.source)
else: # is Message
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
with transaction:
self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
except RuntimeError:
logger.exception("Exception with Executor")
self._socket.close() | Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds |
def isel_points(self, dim='points', **indexers):
# type: (...) -> Dataset
"""Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.sel_points
DataArray.isel_points
""" # noqa
warnings.warn('Dataset.isel_points is deprecated: use Dataset.isel()'
'instead.', DeprecationWarning, stacklevel=2)
indexer_dims = set(indexers)
def take(variable, slices):
# Note: remove helper function when once when numpy
# supports vindex https://github.com/numpy/numpy/pull/6075
if hasattr(variable.data, 'vindex'):
# Special case for dask backed arrays to use vectorised list
# indexing
sel = variable.data.vindex[slices]
else:
# Otherwise assume backend is numpy array with 'fancy' indexing
sel = variable.data[slices]
return sel
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
coords = relevant_keys(self.coords)
indexers = [(k, np.asarray(v)) # type: ignore
for k, v in indexers.items()]
indexers_dict = dict(indexers)
non_indexed_dims = set(self.dims) - indexer_dims
non_indexed_coords = set(self.coords) - set(coords)
# All the indexers should be iterables
# Check that indexers are valid dims, integers, and 1D
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != 'i': # type: ignore
raise TypeError('Indexers must be integers')
if v.ndim != 1: # type: ignore
raise ValueError('Indexers must be 1 dimensional')
# all the indexers should have the same length
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError('All indexers must be the same length')
# Existing dimensions are not valid choices for the dim argument
if isinstance(dim, str):
if dim in self.dims:
# dim is an invalid string
raise ValueError('Existing dimension names are not valid '
'choices for the dim argument in sel_points')
elif hasattr(dim, 'dims'):
# dim is a DataArray or Coordinate
if dim.name in self.dims:
# dim already exists
raise ValueError('Existing dimensions are not valid choices '
'for the dim argument in sel_points')
# Set the new dim_name, and optionally the new dim coordinate
# dim is either an array-like or a string
if not utils.is_scalar(dim):
# dim is array like get name or assign 'points', get as variable
dim_name = 'points' if not hasattr(dim, 'name') else dim.name
dim_coord = as_variable(dim, name=dim_name)
else:
# dim is a string
dim_name = dim
dim_coord = None # type: ignore
reordered = self.transpose(
*(list(indexer_dims) + list(non_indexed_dims)))
variables = OrderedDict() # type: ignore
for name, var in reordered.variables.items():
if name in indexers_dict or any(
d in indexer_dims for d in var.dims):
# slice if var is an indexer or depends on an indexed dim
slc = [indexers_dict[k]
if k in indexers_dict
else slice(None) for k in var.dims]
var_dims = [dim_name] + [d for d in var.dims
if d in non_indexed_dims]
selection = take(var, tuple(slc))
var_subset = type(var)(var_dims, selection, var.attrs)
variables[name] = var_subset
else:
# If not indexed just add it back to variables or coordinates
variables[name] = var
coord_names = (set(coords) & set(variables)) | non_indexed_coords
dset = self._replace_vars_and_dims(variables, coord_names=coord_names)
# Add the dim coord to the new dset. Must be done after creation
# because_replace_vars_and_dims can only access existing coords,
# not add new ones
if dim_coord is not None:
dset.coords[dim_name] = dim_coord
return dset | Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.sel_points
DataArray.isel_points |
def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
'''Get the frequencies for Mel bins'''
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis | Get the frequencies for Mel bins |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.