code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1) | yesterday once more |
def safe_size_check(checked_path, error_detail, max_bytes=500000000):
"""Determines if a particular path is larger than expected. Useful before any recursive remove."""
actual_size = 0
for dirpath, dirnames, filenames in os.walk(checked_path):
for f in filenames:
fp = os.path.join(dirpath, f)
actual_size += os.path.getsize(fp)
assert actual_size <= max_bytes, "Path {} size of {} >= {} bytes. {}".format(
checked_path, actual_size, max_bytes, error_detail) | Determines if a particular path is larger than expected. Useful before any recursive remove. |
def valuecounter(table, *field, **kwargs):
"""
Find distinct values for the given field and count the number of
occurrences. Returns a :class:`dict` mapping values to counts. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', True],
... ['b'],
... ['b', True],
... ['c', False]]
>>> etl.valuecounter(table, 'foo')
Counter({'b': 2, 'a': 1, 'c': 1})
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
missing = kwargs.get('missing', None)
counter = Counter()
for v in values(table, field, missing=missing):
try:
counter[v] += 1
except IndexError:
pass # short row
return counter | Find distinct values for the given field and count the number of
occurrences. Returns a :class:`dict` mapping values to counts. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', True],
... ['b'],
... ['b', True],
... ['c', False]]
>>> etl.valuecounter(table, 'foo')
Counter({'b': 2, 'a': 1, 'c': 1})
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes. |
def groups_roles(self, room_id=None, room_name=None, **kwargs):
"""Lists all user’s roles in the private group."""
if room_id:
return self.__call_api_get('groups.roles', roomId=room_id, kwargs=kwargs)
elif room_name:
return self.__call_api_get('groups.roles', roomName=room_name, kwargs=kwargs)
else:
raise RocketMissingParamException('roomId or room_name required') | Lists all user’s roles in the private group. |
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields | register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image. |
def reftrack_uptodate_data(rt, role):
"""Return the data for the uptodate status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the uptodate status
:rtype: depending on role
:raises: None
"""
uptodate = rt.uptodate()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if uptodate:
return "Yes"
else:
return "No"
if role == QtCore.Qt.ForegroundRole:
if uptodate:
return QtGui.QColor(*UPTODATE_RGB)
elif rt.status():
return QtGui.QColor(*OUTDATED_RGB) | Return the data for the uptodate status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the uptodate status
:rtype: depending on role
:raises: None |
def _process_file(input_file, output_file, apikey):
"""Shrinks input_file to output_file.
This function should be used only inside process_directory.
It takes input_file, tries to shrink it and if shrink was successful
save compressed image to output_file. Otherwise raise exception.
@return compressed: PNGResponse
"""
bytes_ = read_binary(input_file)
compressed = shrink(bytes_, apikey)
if compressed.success and compressed.bytes:
write_binary(output_file, compressed.bytes)
else:
if compressed.errno in FATAL_ERRORS:
raise StopProcessing(compressed)
elif compressed.errno == TinyPNGError.InternalServerError:
raise RetryProcessing(compressed)
return compressed | Shrinks input_file to output_file.
This function should be used only inside process_directory.
It takes input_file, tries to shrink it and if shrink was successful
save compressed image to output_file. Otherwise raise exception.
@return compressed: PNGResponse |
def _build_migrated_variables(checkpoint_reader, name_value_fn):
"""Builds the TensorFlow variables of the migrated checkpoint.
Args:
checkpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to
be read from.
name_value_fn: Function taking two arguments, `name` and `value`, which
returns the pair of new name and value for that a variable of that name.
Returns:
Tuple of a dictionary with new variable names as keys and `tf.Variable`s as
values, and a dictionary that maps the old variable names to the new
variable names.
"""
names_to_shapes = checkpoint_reader.get_variable_to_shape_map()
new_name_to_variable = {}
name_to_new_name = {}
for name in names_to_shapes:
value = checkpoint_reader.get_tensor(name)
new_name, new_value = name_value_fn(name, value)
if new_name is None:
continue
name_to_new_name[name] = new_name
new_name_to_variable[new_name] = tf.Variable(new_value)
return new_name_to_variable, name_to_new_name | Builds the TensorFlow variables of the migrated checkpoint.
Args:
checkpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to
be read from.
name_value_fn: Function taking two arguments, `name` and `value`, which
returns the pair of new name and value for that a variable of that name.
Returns:
Tuple of a dictionary with new variable names as keys and `tf.Variable`s as
values, and a dictionary that maps the old variable names to the new
variable names. |
def fasper(x, y, ofac, hifac, n_threads, MACC=4):
"""
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
"""
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return
#print x, y, hifac, ofac
nout = int(0.5*ofac*hifac*n)
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean())**2).sum()/(len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x - xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC)
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk2 = fft_wk2() * len(wk2)
else:
wk1 = ifft(wk1)*len(wk1)
wk2 = ifft(wk2)*len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0*abs(wk2)
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))
den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2
cterm = (cwt*rwk1 + swt*iwk1)**2./den
sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)
wk1 = df*(arange(nout, dtype='float') + 1.)
wk2 = (cterm + sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy)**effm
return wk1, wk2, nout, jmax, prob | Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies) |
def clip_polygon(self, points):
"""Create a polygonal clip region. You must call endclip() after
you completed drawing. See also the polygon method."""
self.gsave()
self._path_polygon(points)
self.__clip_stack.append(self.__clip_box)
self.__clip_box = _intersect_box(self.__clip_box, _compute_bounding_box(points))
self.clip_sub() | Create a polygonal clip region. You must call endclip() after
you completed drawing. See also the polygon method. |
def get_sections(self, gradebook_id='', simple=False):
"""Get the sections for a gradebook.
Return a dictionary of types of sections containing a list of that
type for a given gradebook. Specified by a gradebookid.
If simple=True, a list of dictionaries is provided for each
section regardless of type. The dictionary only contains one
key ``SectionName``.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool): return a list of section names only
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: Dictionary of section types where each type has a
list of sections
An example return value is:
.. code-block:: python
{
u'recitation':
[
{
u'editable': False,
u'groupId': 1293925,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'Unassigned',
u'shortName': u'DefaultGroupNoCollisionPlease1234',
u'staffs': None
},
{
u'editable': True,
u'groupId': 1327565,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r01',
u'shortName': u'r01',
u'staffs': None},
{u'editable': True,
u'groupId': 1327555,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r02',
u'shortName': u'r02',
u'staffs': None
}
]
}
"""
params = dict(includeMembers='false')
section_data = self.get(
'sections/{gradebookId}'.format(
gradebookId=gradebook_id or self.gradebook_id
),
params=params
)
if simple:
sections = self.unravel_sections(section_data['data'])
return [{'SectionName': x['name']} for x in sections]
return section_data['data'] | Get the sections for a gradebook.
Return a dictionary of types of sections containing a list of that
type for a given gradebook. Specified by a gradebookid.
If simple=True, a list of dictionaries is provided for each
section regardless of type. The dictionary only contains one
key ``SectionName``.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
simple (bool): return a list of section names only
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: Dictionary of section types where each type has a
list of sections
An example return value is:
.. code-block:: python
{
u'recitation':
[
{
u'editable': False,
u'groupId': 1293925,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'Unassigned',
u'shortName': u'DefaultGroupNoCollisionPlease1234',
u'staffs': None
},
{
u'editable': True,
u'groupId': 1327565,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r01',
u'shortName': u'r01',
u'staffs': None},
{u'editable': True,
u'groupId': 1327555,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r02',
u'shortName': u'r02',
u'staffs': None
}
]
} |
def send_message(self, message):
"""Send a raw JSON-RPC message.
The *message* argument must be a dictionary containing a valid JSON-RPC
message according to the version passed into the constructor.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise JsonRpcError('not connected')
self._version.check_message(message)
self._writer.write(serialize(message)) | Send a raw JSON-RPC message.
The *message* argument must be a dictionary containing a valid JSON-RPC
message according to the version passed into the constructor. |
def write(self):
"""Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock"""
self._assure_writable("write")
if not self._dirty:
return
if isinstance(self._file_or_files, (list, tuple)):
raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files"
% len(self._file_or_files))
# end assert multiple files
if self._has_includes():
log.debug("Skipping write-back of configuration file as include files were merged in." +
"Set merge_includes=False to prevent this.")
return
# end
fp = self._file_or_files
# we have a physical file on disk, so get a lock
is_file_lock = isinstance(fp, string_types + (FileType, ))
if is_file_lock:
self._lock._obtain_lock()
if not hasattr(fp, "seek"):
with open(self._file_or_files, "wb") as fp:
self._write(fp)
else:
fp.seek(0)
# make sure we do not overwrite into an existing file
if hasattr(fp, 'truncate'):
fp.truncate()
self._write(fp) | Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock |
def asDigraph(self):
"""
Generate a L{graphviz.Digraph} that represents this machine's
states and transitions.
@return: L{graphviz.Digraph} object; for more information, please
see the documentation for
U{graphviz<https://graphviz.readthedocs.io/>}
"""
from ._visualize import makeDigraph
return makeDigraph(
self._automaton,
stateAsString=lambda state: state.method.__name__,
inputAsString=lambda input: input.method.__name__,
outputAsString=lambda output: output.method.__name__,
) | Generate a L{graphviz.Digraph} that represents this machine's
states and transitions.
@return: L{graphviz.Digraph} object; for more information, please
see the documentation for
U{graphviz<https://graphviz.readthedocs.io/>} |
def issue(self, issue_instance_id):
"""Select an issue.
Parameters:
issue_instance_id: int id of the issue instance to select
Note: We are selecting issue instances, even though the command is called
issue.
"""
with self.db.make_session() as session:
selected_issue = (
session.query(IssueInstance)
.filter(IssueInstance.id == issue_instance_id)
.scalar()
)
if selected_issue is None:
self.warning(
f"Issue {issue_instance_id} doesn't exist. "
"Type 'issues' for available issues."
)
return
self.sources = self._get_leaves_issue_instance(
session, issue_instance_id, SharedTextKind.SOURCE
)
self.sinks = self._get_leaves_issue_instance(
session, issue_instance_id, SharedTextKind.SINK
)
self.current_issue_instance_id = int(selected_issue.id)
self.current_frame_id = -1
self.current_trace_frame_index = 1 # first one after the source
print(f"Set issue to {issue_instance_id}.")
if int(selected_issue.run_id) != self.current_run_id:
self.current_run_id = int(selected_issue.run_id)
print(f"Set run to {self.current_run_id}.")
print()
self._generate_trace_from_issue()
self.show() | Select an issue.
Parameters:
issue_instance_id: int id of the issue instance to select
Note: We are selecting issue instances, even though the command is called
issue. |
def get_permissions(self, user_id):
"""Fetches the permissions object from the graph."""
response = self.request(
"{0}/{1}/permissions".format(self.version, user_id), {}
)["data"]
return {x["permission"] for x in response if x["status"] == "granted"} | Fetches the permissions object from the graph. |
def addSourceId(self, value):
'''Adds SourceId to External_Info
'''
if isinstance(value, Source_Id):
self.source_ids.append(value)
else:
raise (TypeError,
'source_id Type should be Source_Id, not %s' % type(source_id)) | Adds SourceId to External_Info |
def remove_user_from_acl(self, name, user):
"""Remove a user from the given acl (both allow and deny)."""
if name not in self._acl:
return False
if user in self._acl[name]['allow']:
self._acl[name]['allow'].remove(user)
if user in self._acl[name]['deny']:
self._acl[name]['deny'].remove(user)
return True | Remove a user from the given acl (both allow and deny). |
def make_data(n,width):
"""make_data: compute matrix distance and time windows."""
x = dict([(i,100*random.random()) for i in range(1,n+1)])
y = dict([(i,100*random.random()) for i in range(1,n+1)])
c = {}
for i in range(1,n+1):
for j in range(1,n+1):
if j != i:
c[i,j] = distance(x[i],y[i],x[j],y[j])
e = {1:0}
l = {1:0}
start = 0
delta = int(76.*math.sqrt(n)/n * width)+1
for i in range(1,n):
j = i+1
start += c[i,j]
e[j] = max(start-delta,0)
l[j] = start + delta
return c,x,y,e,l | make_data: compute matrix distance and time windows. |
def _events(self):
"""Get the monitoring events from the daemon
This is used by the arbiter to get the monitoring events from all its satellites
:return: Events list serialized
:rtype: list
"""
with self.app.events_lock:
res = self.app.get_events()
return serialize(res, True) | Get the monitoring events from the daemon
This is used by the arbiter to get the monitoring events from all its satellites
:return: Events list serialized
:rtype: list |
def get_layers_output(self, dataset):
"""Get output from each layer of the network.
:param dataset: input data
:return: list of np array, element i is the output of layer i
"""
layers_out = []
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
for l in self.layer_nodes:
layers_out.append(l.eval({self.input_data: dataset,
self.keep_prob: 1}))
if layers_out == []:
raise Exception("This method is not implemented for this model")
else:
return layers_out | Get output from each layer of the network.
:param dataset: input data
:return: list of np array, element i is the output of layer i |
def symbol(self, index):
"""Generates symbol name from index"""
#if index is actually a string, just return it
if isinstance(index, str):
return index
elif (index < 0) or (index >= self.symtab.table_len):
self.error("symbol table index out of range")
sym = self.symtab.table[index]
#local variables are located at negative offset from frame pointer register
if sym.kind == SharedData.KINDS.LOCAL_VAR:
return "-{0}(1:%14)".format(sym.attribute * 4 + 4)
#parameters are located at positive offset from frame pointer register
elif sym.kind == SharedData.KINDS.PARAMETER:
return "{0}(1:%14)".format(8 + sym.attribute * 4)
elif sym.kind == SharedData.KINDS.CONSTANT:
return "${0}".format(sym.name)
else:
return "{0}".format(sym.name) | Generates symbol name from index |
def readSB(self, bits):
""" Read a signed int using the specified number of bits """
shift = 32 - bits
return int32(self.readbits(bits) << shift) >> shift | Read a signed int using the specified number of bits |
def traverse_imports(names):
"""
Walks over all the names imported in a dotted_as_names node.
"""
pending = [names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield "".join([ch.value for ch in node.children])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError("unkown node type") | Walks over all the names imported in a dotted_as_names node. |
def constraint_matrices(model, array_type='dense', include_vars=False,
zero_tol=1e-6):
"""Create a matrix representation of the problem.
This is used for alternative solution approaches that do not use optlang.
The function will construct the equality matrix, inequality matrix and
bounds for the complete problem.
Notes
-----
To accomodate non-zero equalities the problem will add the variable
"const_one" which is a variable that equals one.
Arguments
---------
model : cobra.Model
The model from which to obtain the LP problem.
array_type : string
The type of array to construct. if 'dense', return a standard
numpy.array, 'dok', or 'lil' will construct a sparse array using
scipy of the corresponding type and 'DataFrame' will give a
pandas `DataFrame` with metabolite indices and reaction columns.
zero_tol : float
The zero tolerance used to judge whether two bounds are the same.
Returns
-------
collections.namedtuple
A named tuple consisting of 6 matrices and 2 vectors:
- "equalities" is a matrix S such that S*vars = b. It includes a row
for each constraint and one column for each variable.
- "b" the right side of the equality equation such that S*vars = b.
- "inequalities" is a matrix M such that lb <= M*vars <= ub.
It contains a row for each inequality and as many columns as
variables.
- "bounds" is a compound matrix [lb ub] containing the lower and
upper bounds for the inequality constraints in M.
- "variable_fixed" is a boolean vector indicating whether the variable
at that index is fixed (lower bound == upper_bound) and
is thus bounded by an equality constraint.
- "variable_bounds" is a compound matrix [lb ub] containing the
lower and upper bounds for all variables.
"""
if array_type not in ('DataFrame', 'dense') and not dok_matrix:
raise ValueError('Sparse matrices require scipy')
array_builder = {
'dense': np.array, 'dok': dok_matrix, 'lil': lil_matrix,
'DataFrame': pd.DataFrame,
}[array_type]
Problem = namedtuple("Problem",
["equalities", "b", "inequalities", "bounds",
"variable_fixed", "variable_bounds"])
equality_rows = []
inequality_rows = []
inequality_bounds = []
b = []
for const in model.constraints:
lb = -np.inf if const.lb is None else const.lb
ub = np.inf if const.ub is None else const.ub
equality = (ub - lb) < zero_tol
coefs = const.get_linear_coefficients(model.variables)
coefs = [coefs[v] for v in model.variables]
if equality:
b.append(lb if abs(lb) > zero_tol else 0.0)
equality_rows.append(coefs)
else:
inequality_rows.append(coefs)
inequality_bounds.append([lb, ub])
var_bounds = np.array([[v.lb, v.ub] for v in model.variables])
fixed = var_bounds[:, 1] - var_bounds[:, 0] < zero_tol
results = Problem(
equalities=array_builder(equality_rows),
b=np.array(b),
inequalities=array_builder(inequality_rows),
bounds=array_builder(inequality_bounds),
variable_fixed=np.array(fixed),
variable_bounds=array_builder(var_bounds))
return results | Create a matrix representation of the problem.
This is used for alternative solution approaches that do not use optlang.
The function will construct the equality matrix, inequality matrix and
bounds for the complete problem.
Notes
-----
To accomodate non-zero equalities the problem will add the variable
"const_one" which is a variable that equals one.
Arguments
---------
model : cobra.Model
The model from which to obtain the LP problem.
array_type : string
The type of array to construct. if 'dense', return a standard
numpy.array, 'dok', or 'lil' will construct a sparse array using
scipy of the corresponding type and 'DataFrame' will give a
pandas `DataFrame` with metabolite indices and reaction columns.
zero_tol : float
The zero tolerance used to judge whether two bounds are the same.
Returns
-------
collections.namedtuple
A named tuple consisting of 6 matrices and 2 vectors:
- "equalities" is a matrix S such that S*vars = b. It includes a row
for each constraint and one column for each variable.
- "b" the right side of the equality equation such that S*vars = b.
- "inequalities" is a matrix M such that lb <= M*vars <= ub.
It contains a row for each inequality and as many columns as
variables.
- "bounds" is a compound matrix [lb ub] containing the lower and
upper bounds for the inequality constraints in M.
- "variable_fixed" is a boolean vector indicating whether the variable
at that index is fixed (lower bound == upper_bound) and
is thus bounded by an equality constraint.
- "variable_bounds" is a compound matrix [lb ub] containing the
lower and upper bounds for all variables. |
def create_from_pytz(cls, tz_info):
"""Create an instance using the result of the timezone() call in
"pytz".
"""
zone_name = tz_info.zone
utc_transition_times_list_raw = getattr(tz_info,
'_utc_transition_times',
None)
utc_transition_times_list = [tuple(utt.timetuple())
for utt
in utc_transition_times_list_raw] \
if utc_transition_times_list_raw is not None \
else None
transition_info_list_raw = getattr(tz_info,
'_transition_info',
None)
transition_info_list = [(utcoffset_td.total_seconds(),
dst_td.total_seconds(),
tzname)
for (utcoffset_td, dst_td, tzname)
in transition_info_list_raw] \
if transition_info_list_raw is not None \
else None
try:
utcoffset_dt = tz_info._utcoffset
except AttributeError:
utcoffset = None
else:
utcoffset = utcoffset_dt.total_seconds()
tzname = getattr(tz_info, '_tzname', None)
parent_class_name = getmro(tz_info.__class__)[1].__name__
return cls(zone_name, parent_class_name, utc_transition_times_list,
transition_info_list, utcoffset, tzname) | Create an instance using the result of the timezone() call in
"pytz". |
def get_snapshots(self):
"""
Returns a list of all completed snapshots for this volume ID.
"""
ec2 = self.get_ec2_connection()
rs = ec2.get_all_snapshots()
all_vols = [self.volume_id] + self.past_volume_ids
snaps = []
for snapshot in rs:
if snapshot.volume_id in all_vols:
if snapshot.progress == '100%':
snapshot.date = boto.utils.parse_ts(snapshot.start_time)
snapshot.keep = True
snaps.append(snapshot)
snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))
return snaps | Returns a list of all completed snapshots for this volume ID. |
def remote(*args, **kwargs):
"""Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
* **max_reconstructions**: Only for *actors*. This specifies the maximum
number of times that the actor should be reconstructed when it dies
unexpectedly. The minimum valid value is 0 (default), which indicates
that the actor doesn't need to be reconstructed. And the maximum valid
value is ray.ray_constants.INFINITE_RECONSTRUCTIONS.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo(object):
def method(self):
return 1
"""
worker = get_global_worker()
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'num_cpus', 'num_gpus', "
"'resources', 'max_calls', "
"or 'max_reconstructions', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals", "num_cpus", "num_gpus", "resources",
"max_calls", "max_reconstructions"
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise Exception("The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(
type(resources)))
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs.get("num_return_vals")
max_calls = kwargs.get("max_calls")
max_reconstructions = kwargs.get("max_reconstructions")
return make_decorator(
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
max_calls=max_calls,
max_reconstructions=max_reconstructions,
worker=worker) | Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
* **max_reconstructions**: Only for *actors*. This specifies the maximum
number of times that the actor should be reconstructed when it dies
unexpectedly. The minimum valid value is 0 (default), which indicates
that the actor doesn't need to be reconstructed. And the maximum valid
value is ray.ray_constants.INFINITE_RECONSTRUCTIONS.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo(object):
def method(self):
return 1 |
def initialize():
"""
Initialize the configuration system by installing YAML handlers.
Automatically done on first call to load() specified in this file.
"""
global is_initialized
# Add the custom multi-constructor
yaml.add_multi_constructor('!obj:', multi_constructor)
yaml.add_multi_constructor('!pkl:', multi_constructor_pkl)
yaml.add_multi_constructor('!import:', multi_constructor_import)
yaml.add_multi_constructor('!include:', multi_constructor_include)
def import_constructor(loader, node):
value = loader.construct_scalar(node)
return try_to_import(value)
yaml.add_constructor('!import', import_constructor)
yaml.add_implicit_resolver(
'!import',
re.compile(r'(?:[a-zA-Z_][\w_]+\.)+[a-zA-Z_][\w_]+')
)
is_initialized = True | Initialize the configuration system by installing YAML handlers.
Automatically done on first call to load() specified in this file. |
def all(self, data={}, **kwargs):
""""
Fetch all Virtual Account entities
Returns:
Dictionary of Virtual Account data
"""
return super(VirtualAccount, self).all(data, **kwargs) | Fetch all Virtual Account entities
Returns:
Dictionary of Virtual Account data |
def receive_response(self, transaction):
"""
Handles the Blocks option in a incoming response.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
host, port = transaction.response.source
key_token = hash(str(host) + str(port) + str(transaction.response.token))
if key_token in self._block1_sent and transaction.response.block1 is not None:
item = self._block1_sent[key_token]
transaction.block_transfer = True
if item.m == 0:
transaction.block_transfer = False
del transaction.request.block1
return transaction
n_num, n_m, n_size = transaction.response.block1
if n_num != item.num: # pragma: no cover
logger.warning("Blockwise num acknowledged error, expected " + str(item.num) + " received " +
str(n_num))
return None
if n_size < item.size:
logger.debug("Scale down size, was " + str(item.size) + " become " + str(n_size))
item.size = n_size
request = transaction.request
del request.mid
del request.block1
request.payload = item.payload[item.byte: item.byte+item.size]
item.num += 1
item.byte += item.size
if len(item.payload) <= item.byte:
item.m = 0
else:
item.m = 1
request.block1 = (item.num, item.m, item.size)
elif transaction.response.block2 is not None:
num, m, size = transaction.response.block2
if m == 1:
transaction.block_transfer = True
if key_token in self._block2_sent:
item = self._block2_sent[key_token]
if num != item.num: # pragma: no cover
logger.error("Receive unwanted block")
return self.error(transaction, defines.Codes.REQUEST_ENTITY_INCOMPLETE.number)
if item.content_type is None:
item.content_type = transaction.response.content_type
if item.content_type != transaction.response.content_type: # pragma: no cover
logger.error("Content-type Error")
return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)
item.byte += size
item.num = num + 1
item.size = size
item.m = m
item.payload += transaction.response.payload
else:
item = BlockItem(size, num + 1, m, size, transaction.response.payload,
transaction.response.content_type)
self._block2_sent[key_token] = item
request = transaction.request
del request.mid
del request.block2
request.block2 = (item.num, 0, item.size)
else:
transaction.block_transfer = False
if key_token in self._block2_sent:
if self._block2_sent[key_token].content_type != transaction.response.content_type: # pragma: no cover
logger.error("Content-type Error")
return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)
transaction.response.payload = self._block2_sent[key_token].payload + transaction.response.payload
del self._block2_sent[key_token]
else:
transaction.block_transfer = False
return transaction | Handles the Blocks option in a incoming response.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction |
def _request(self, *args, **kwargs):
# type (Any) -> Response
"""Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed."""
self._amend_request_kwargs(kwargs)
_response = self._requests_session.request(*args, **kwargs)
try:
_response.raise_for_status()
except HTTPError as e:
if e.response is not None:
raise_from(ConjureHTTPError(e), e)
raise e
return _response | Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed. |
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, basestring):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint) | Regular callback function for undefined objects that raises an
`UndefinedError` on call. |
def remove_group(self, group = None):
"""This method removes a group.
The group needed to remove the group.
group must be a v1Group.
"""
if group is None:
raise KPError("Need group to remove a group")
elif type(group) is not v1Group:
raise KPError("group must be v1Group")
children = []
entries = []
if group in self.groups:
# Save all children and entries to
# delete them later
children.extend(group.children)
entries.extend(group.entries)
# Finally remove group
group.parent.children.remove(group)
self.groups.remove(group)
else:
raise KPError("Given group doesn't exist")
self._num_groups -= 1
for i in children:
self.remove_group(i)
for i in entries:
self.remove_entry(i)
return True | This method removes a group.
The group needed to remove the group.
group must be a v1Group. |
def _FormatExpression(self, frame, expression):
"""Evaluates a single watched expression and formats it into a string form.
If expression evaluation fails, returns error message string.
Args:
frame: Python stack frame in which the expression is evaluated.
expression: string expression to evaluate.
Returns:
Formatted expression value that can be used in the log message.
"""
rc, value = _EvaluateExpression(frame, expression)
if not rc:
message = _FormatMessage(value['description']['format'],
value['description'].get('parameters'))
return '<' + message + '>'
return self._FormatValue(value) | Evaluates a single watched expression and formats it into a string form.
If expression evaluation fails, returns error message string.
Args:
frame: Python stack frame in which the expression is evaluated.
expression: string expression to evaluate.
Returns:
Formatted expression value that can be used in the log message. |
def hash160(msg_bytes):
'''
byte-like -> bytes
'''
h = hashlib.new('ripemd160')
if 'decred' in riemann.get_current_network_name():
h.update(blake256(msg_bytes))
return h.digest()
h.update(sha256(msg_bytes))
return h.digest() | byte-like -> bytes |
def GetOptionBool(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set.
"""
return (not self.config.has_option(section, option)
or self.config.getboolean(section, option)) | Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set. |
def get_files_by_path(path):
'''Get a file or set of files from a file path
Return list of files with path
'''
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
return get_morph_files(path)
raise IOError('Invalid data path %s' % path) | Get a file or set of files from a file path
Return list of files with path |
def _get_data_from_rawfile(path_to_data, raw_data_id):
"""Get a HandwrittenData object that has ``raw_data_id`` from a pickle file
``path_to_data``.
:returns: The HandwrittenData object if ``raw_data_id`` is in
path_to_data, otherwise ``None``."""
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
for raw_dataset in raw_datasets:
if raw_dataset['handwriting'].raw_data_id == raw_data_id:
return raw_dataset['handwriting']
return None | Get a HandwrittenData object that has ``raw_data_id`` from a pickle file
``path_to_data``.
:returns: The HandwrittenData object if ``raw_data_id`` is in
path_to_data, otherwise ``None``. |
def components(self, visible=True):
""" Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible) | Return the component notes of chord
:param bool visible: returns the name of notes if True else list of int
:rtype: list[(str or int)]
:return: component notes of chord |
def make_mask(filename, ext, trail_coords, sublen=75, subwidth=200, order=3,
sigma=4, pad=10, plot=False, verbose=False):
"""Create DQ mask for an image for a given satellite trail.
This mask can be added to existing DQ data using :func:`update_dq`.
.. note::
Unlike :func:`detsat`, multiprocessing is not available for
this function.
Parameters
----------
filename : str
FITS image filename.
ext : int, str, or tuple
Extension for science data, as accepted by ``astropy.io.fits``.
trail_coords : ndarray
One of the trails returned by :func:`detsat`.
This must be in the format of ``[[x0, y0], [x1, y1]]``.
sublen : int, optional
Length of strip to use as the fitting window for the trail.
subwidth : int, optional
Width of box to fit trail on.
order : int, optional
The order of the spline interpolation for image rotation.
See :func:`skimage.transform.rotate`.
sigma : float, optional
Sigma of the satellite trail for detection. If points are
a given sigma above the background in the subregion then it is
marked as a satellite. This may need to be lowered for resolved
trails.
pad : int, optional
Amount of extra padding in pixels to give the satellite mask.
plot : bool, optional
Plot the result.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
Returns
-------
mask : ndarray
Boolean array marking the satellite trail with `True`.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
IndexError
Invalid subarray indices.
ValueError
Image has no positive values, trail subarray too small, or
trail profile not found.
"""
if not HAS_OPDEP:
raise ImportError('Missing scipy or skimage>=0.11 packages')
if verbose:
t_beg = time.time()
fname = '{0}[{1}]'.format(filename, ext)
image = fits.getdata(filename, ext)
dx = image.max()
if dx <= 0:
raise ValueError('Image has no positive values')
# rescale the image
image = image / dx
# make sure everything is at least 0
image[image < 0] = 0
(x0, y0), (x1, y1) = trail_coords # p0, p1
# Find out how much to rotate the image
rad = np.arctan2(y1 - y0, x1 - x0)
newrad = (np.pi * 2) - rad
deg = np.degrees(rad)
if verbose:
print('Rotation: {0}'.format(deg))
rotate = transform.rotate(image, deg, resize=True, order=order)
if plot and plt is not None:
plt.ion()
mean = np.median(image)
stddev = image.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax1.set_title(fname)
fig2, ax2 = plt.subplots()
ax2.imshow(rotate, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax2.set_title('{0} rotated by {1} deg'.format(fname, deg))
plt.draw()
# Will do all of this in the loop, but want to make sure there is a
# good point first and that there is indeed a profile to fit.
# get starting point
sx, sy = _rotate_point((x0, y0), newrad, image.shape, rotate.shape)
# start with one subarray around p0
dx = int(subwidth / 2)
ix0, ix1, iy0, iy1 = _get_valid_indices(
rotate.shape, sx - dx, sx + dx, sy - sublen, sy + sublen)
subr = rotate[iy0:iy1, ix0:ix1]
if len(subr) <= sublen:
raise ValueError('Trail subarray size is {0} but expected {1} or '
'larger'.format(len(subr), sublen))
# Flatten the array so we are looking along rows
# Take median of each row, should filter out most outliers
# This list will get appended in the loop
medarr = np.median(subr, axis=1)
flat = [medarr]
# get the outliers
# mean = biweight_location(medarr)
mean = sigma_clipped_stats(medarr)[0]
stddev = biweight_midvariance(medarr)
# only flag things that are sigma from the mean
z = np.where(medarr > (mean + (sigma * stddev)))[0]
if plot and plt is not None:
fig1, ax1 = plt.subplots()
ax1.plot(medarr, 'b.')
ax1.plot(z, medarr[z], 'r.')
ax1.set_xlabel('Index')
ax1.set_ylabel('Value')
ax1.set_title('Median array in flat[0]')
plt.draw()
# Make sure there is something in the first pass before trying to move on
if len(z) < 1:
raise ValueError(
'First look at finding a profile failed. '
'Nothing found at {0} from background! '
'Adjust parameters and try again.'.format(sigma))
# get the bounds of the flagged points
lower = z.min()
upper = z.max()
diff = upper - lower
# add in a pading value to make sure all of the wings are accounted for
lower = lower - pad
upper = upper + pad
# for plotting see how the profile was made (append to plot above)
if plot and plt is not None:
padind = np.arange(lower, upper)
ax1.plot(padind, medarr[padind], 'yx')
plt.draw()
# start to create a mask
mask = np.zeros(rotate.shape)
lowerx, upperx, lowery, uppery = _get_valid_indices(
mask.shape, np.floor(sx - subwidth), np.ceil(sx + subwidth),
np.floor(sy - sublen + lower), np.ceil(sy - sublen + upper))
mask[lowery:uppery, lowerx:upperx] = 1
done = False
first = True
nextx = upperx # np.ceil(sx + subwidth)
centery = np.ceil(lowery + diff) # np.ceil(sy - sublen + lower + diff)
counter = 0
while not done:
# move to the right of the centerpoint first. do the same
# as above but keep moving right until the edge is hit.
ix0, ix1, iy0, iy1 = _get_valid_indices(
rotate.shape, nextx - dx, nextx + dx,
centery - sublen, centery + sublen)
subr = rotate[iy0:iy1, ix0:ix1]
# determines the edge, if the subr is not good, then the edge was
# hit.
if 0 in subr.shape:
if verbose:
print('Hit edge, subr shape={0}, first={1}'.format(
subr.shape, first))
if first:
first = False
centery = sy
nextx = sx
else:
done = True
continue
medarr = np.median(subr, axis=1)
flat.append(medarr)
# mean = biweight_location(medarr)
mean = sigma_clipped_stats(medarr, sigma=sigma)[0]
# Might give RuntimeWarning
stddev = biweight_midvariance(medarr)
z = np.where(medarr > (mean + (sigma * stddev)))[0]
if len(z) < 1:
if first:
if verbose:
print('No good profile found for counter={0}. Start '
'moving left from starting point.'.format(counter))
centery = sy
nextx = sx
first = False
else:
if verbose:
print('z={0} is less than 1, subr shape={1}, '
'we are done'.format(z, subr.shape))
done = True
continue
# get the bounds of the flagged points
lower = z.min()
upper = z.max()
diff = upper - lower
# add in a pading value to make sure all of the wings
# are accounted for
lower = np.floor(lower - pad)
upper = np.ceil(upper + pad)
lowerx, upperx, lowery, uppery = _get_valid_indices(
mask.shape,
np.floor(nextx - subwidth),
np.ceil(nextx + subwidth),
np.floor(centery - sublen + lower),
np.ceil(centery - sublen + upper))
mask[lowery:uppery, lowerx:upperx] = 1
# lower_p = (lowerx, lowery)
upper_p = (upperx, uppery)
# lower_t = _rotate_point(
# lower_p, newrad, image.shape, rotate.shape, reverse=True)
upper_t = _rotate_point(
upper_p, newrad, image.shape, rotate.shape, reverse=True)
# lowy = np.floor(lower_t[1])
highy = np.ceil(upper_t[1])
# lowx = np.floor(lower_t[0])
highx = np.ceil(upper_t[0])
# Reset the next subr to be at the center of the profile
if first:
nextx = nextx + dx
centery = lowery + diff # centery - sublen + lower + diff
if (nextx + subwidth) > rotate.shape[1]:
if verbose:
print('Hit rotate edge at counter={0}'.format(counter))
first = False
elif (highy > image.shape[0]) or (highx > image.shape[1]):
if verbose:
print('Hit image edge at counter={0}'.format(counter))
first = False
if not first:
centery = sy
nextx = sx
# Not first, this is the pass the other way.
else:
nextx = nextx - dx
centery = lowery + diff # centery - sublen + lower + diff
if (nextx - subwidth) < 0:
if verbose:
print('Hit rotate edge at counter={0}'.format(counter))
done = True
elif (highy > image.shape[0]) or (highx > image.shape[1]):
if verbose:
print('Hit image edge at counter={0}'.format(counter))
done = True
counter += 1
# make sure it does not try to go infinetly
if counter > 500:
if verbose:
print('Too many loops, exiting')
done = True
# End while
rot = transform.rotate(mask, -deg, resize=True, order=1)
ix0 = (rot.shape[1] - image.shape[1]) / 2
iy0 = (rot.shape[0] - image.shape[0]) / 2
lowerx, upperx, lowery, uppery = _get_valid_indices(
rot.shape, ix0, image.shape[1] + ix0, iy0, image.shape[0] + iy0)
mask = rot[lowery:uppery, lowerx:upperx]
if mask.shape != image.shape:
warnings.warn(
'Output mask shape is {0} but input image shape is '
'{1}'.format(mask.shape, image.shape), AstropyUserWarning)
# Change to boolean mask
mask = mask.astype(np.bool)
if plot and plt is not None:
# debugging array
test = image.copy()
test[mask] = 0
mean = np.median(test)
stddev = test.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(test, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax1.set_title('Masked image')
fig2, ax2 = plt.subplots()
ax2.imshow(mask, cmap=plt.cm.gray)
ax2.set_title('DQ mask')
plt.draw()
if verbose:
t_end = time.time()
print('Run time: {0} s'.format(t_end - t_beg))
return mask | Create DQ mask for an image for a given satellite trail.
This mask can be added to existing DQ data using :func:`update_dq`.
.. note::
Unlike :func:`detsat`, multiprocessing is not available for
this function.
Parameters
----------
filename : str
FITS image filename.
ext : int, str, or tuple
Extension for science data, as accepted by ``astropy.io.fits``.
trail_coords : ndarray
One of the trails returned by :func:`detsat`.
This must be in the format of ``[[x0, y0], [x1, y1]]``.
sublen : int, optional
Length of strip to use as the fitting window for the trail.
subwidth : int, optional
Width of box to fit trail on.
order : int, optional
The order of the spline interpolation for image rotation.
See :func:`skimage.transform.rotate`.
sigma : float, optional
Sigma of the satellite trail for detection. If points are
a given sigma above the background in the subregion then it is
marked as a satellite. This may need to be lowered for resolved
trails.
pad : int, optional
Amount of extra padding in pixels to give the satellite mask.
plot : bool, optional
Plot the result.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
Returns
-------
mask : ndarray
Boolean array marking the satellite trail with `True`.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
IndexError
Invalid subarray indices.
ValueError
Image has no positive values, trail subarray too small, or
trail profile not found. |
def network_info(name=None, **kwargs):
'''
Return informations on a virtual network provided its name.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
If no name is provided, return the infos for all defined virtual networks.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_info default
'''
result = {}
conn = __get_conn(**kwargs)
def _net_get_leases(net):
'''
Get all DHCP leases for a network
'''
leases = net.DHCPLeases()
for lease in leases:
if lease['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
lease['type'] = 'ipv4'
elif lease['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV6:
lease['type'] = 'ipv6'
else:
lease['type'] = 'unknown'
return leases
try:
nets = [net for net in conn.listAllNetworks() if name is None or net.name() == name]
result = {net.name(): {
'uuid': net.UUIDString(),
'bridge': net.bridgeName(),
'autostart': net.autostart(),
'active': net.isActive(),
'persistent': net.isPersistent(),
'leases': _net_get_leases(net)} for net in nets}
except libvirt.libvirtError as err:
log.debug('Silenced libvirt error: %s', str(err))
finally:
conn.close()
return result | Return informations on a virtual network provided its name.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
If no name is provided, return the infos for all defined virtual networks.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_info default |
def create_router(self, name, tenant_id, subnet_lst):
"""Create a openstack router and add the interfaces. """
try:
body = {'router': {'name': name, 'tenant_id': tenant_id,
'admin_state_up': True}}
router = self.neutronclient.create_router(body=body)
rout_dict = router.get('router')
rout_id = rout_dict.get('id')
except Exception as exc:
LOG.error("Failed to create router with name %(name)s"
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return None
ret = self.add_intf_router(rout_id, tenant_id, subnet_lst)
if not ret:
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s, Exc %(exc)s",
{'name': name, 'exc': str(exc)})
return None
return rout_id | Create a openstack router and add the interfaces. |
def _validate_response(url, response):
"""Validates that the response from Google was successful."""
if response['status'] not in [GooglePlaces.RESPONSE_STATUS_OK,
GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS]:
error_detail = ('Request to URL %s failed with response code: %s' %
(url, response['status']))
raise GooglePlacesError(error_detail) | Validates that the response from Google was successful. |
def complete_pool_name(arg):
""" Returns list of matching pool names
"""
search_string = '^'
if arg is not None:
search_string += arg
res = Pool.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for p in res['result']:
ret.append(p.name)
return ret | Returns list of matching pool names |
def get(cls, user_id, db_session=None):
"""
Fetch row using primary key -
will use existing object in session if already present
:param user_id:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
return db_session.query(cls.model).get(user_id) | Fetch row using primary key -
will use existing object in session if already present
:param user_id:
:param db_session:
:return: |
def main():
"""Takes crash data via stdin and generates a Socorro signature"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
args = parser.parse_args()
generator = SignatureGenerator(debug=args.verbose)
crash_data = json.loads(sys.stdin.read())
ret = generator.generate(crash_data)
print(json.dumps(ret, indent=2)) | Takes crash data via stdin and generates a Socorro signature |
def font_size_splitter(font_map):
"""
Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font.
:param font_map: input fontmap
:type font_map : dict
:return: splitted fonts as dict
"""
small_font = []
medium_font = []
large_font = []
xlarge_font = []
fonts = set(font_map.keys()) - set(RANDOM_FILTERED_FONTS)
for font in fonts:
length = max(map(len, font_map[font][0].values()))
if length <= FONT_SMALL_THRESHOLD:
small_font.append(font)
elif length > FONT_SMALL_THRESHOLD and length <= FONT_MEDIUM_THRESHOLD:
medium_font.append(font)
elif length > FONT_MEDIUM_THRESHOLD and length <= FONT_LARGE_THRESHOLD:
large_font.append(font)
else:
xlarge_font.append(font)
return {
"small_list": small_font,
"medium_list": medium_font,
"large_list": large_font,
"xlarge_list": xlarge_font} | Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font.
:param font_map: input fontmap
:type font_map : dict
:return: splitted fonts as dict |
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip.""" # noqa: E501
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) # noqa: E501
return responses is not None | Try to guess if target is in Promisc mode. The target is provided by its ip. |
def json_decode(data_type, serialized_obj, caller_permissions=None,
alias_validators=None, strict=True, old_style=False):
"""Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute.
"""
try:
deserialized_obj = json.loads(serialized_obj)
except ValueError:
raise bv.ValidationError('could not decode input as JSON')
else:
return json_compat_obj_decode(
data_type, deserialized_obj, caller_permissions=caller_permissions,
alias_validators=alias_validators, strict=strict, old_style=old_style) | Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute. |
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl | Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection. |
def dump(self):
""" raw dump of all records in the b-tree """
print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount))
self.dumpfree()
self.dumptree(self.firstindex) | raw dump of all records in the b-tree |
def replace_volume_attachment(self, name, body, **kwargs):
"""
replace the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_volume_attachment(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param V1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_volume_attachment_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_volume_attachment_with_http_info(name, body, **kwargs)
return data | replace the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_volume_attachment(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param V1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread. |
def get_subgraph(graph,
seed_method: Optional[str] = None,
seed_data: Optional[Any] = None,
expand_nodes: Optional[List[BaseEntity]] = None,
remove_nodes: Optional[List[BaseEntity]] = None,
):
"""Run a pipeline query on graph with multiple sub-graph filters and expanders.
Order of Operations:
1. Seeding by given function name and data
2. Add nodes
3. Remove nodes
:param pybel.BELGraph graph: A BEL graph
:param seed_method: The name of the get_subgraph_by_* function to use
:param seed_data: The argument to pass to the get_subgraph function
:param expand_nodes: Add the neighborhoods around all of these nodes
:param remove_nodes: Remove these nodes and all of their in/out edges
:rtype: Optional[pybel.BELGraph]
"""
# Seed by the given function
if seed_method == SEED_TYPE_INDUCTION:
result = get_subgraph_by_induction(graph, seed_data)
elif seed_method == SEED_TYPE_PATHS:
result = get_subgraph_by_all_shortest_paths(graph, seed_data)
elif seed_method == SEED_TYPE_NEIGHBORS:
result = get_subgraph_by_neighborhood(graph, seed_data)
elif seed_method == SEED_TYPE_DOUBLE_NEIGHBORS:
result = get_subgraph_by_second_neighbors(graph, seed_data)
elif seed_method == SEED_TYPE_UPSTREAM:
result = get_multi_causal_upstream(graph, seed_data)
elif seed_method == SEED_TYPE_DOWNSTREAM:
result = get_multi_causal_downstream(graph, seed_data)
elif seed_method == SEED_TYPE_PUBMED:
result = get_subgraph_by_pubmed(graph, seed_data)
elif seed_method == SEED_TYPE_AUTHOR:
result = get_subgraph_by_authors(graph, seed_data)
elif seed_method == SEED_TYPE_ANNOTATION:
result = get_subgraph_by_annotations(graph, seed_data['annotations'], or_=seed_data.get('or'))
elif seed_method == SEED_TYPE_SAMPLE:
result = get_random_subgraph(
graph,
number_edges=seed_data.get('number_edges'),
seed=seed_data.get('seed')
)
elif not seed_method: # Otherwise, don't seed a sub-graph
result = graph.copy()
log.debug('no seed function - using full network: %s', result.name)
else:
raise ValueError('Invalid seed method: {}'.format(seed_method))
if result is None:
log.debug('query returned no results')
return
log.debug('original graph has (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges())
# Expand around the given nodes
if expand_nodes:
expand_nodes_neighborhoods(graph, result, expand_nodes)
log.debug('graph expanded to (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges())
# Delete the given nodes
if remove_nodes:
for node in remove_nodes:
if node not in result:
log.debug('%s is not in graph %s', node, graph.name)
continue
result.remove_node(node)
log.debug('graph contracted to (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges())
log.debug(
'Subgraph coming from %s (seed type) %s (data) contains %d nodes and %d edges',
seed_method,
seed_data,
result.number_of_nodes(),
result.number_of_edges()
)
return result | Run a pipeline query on graph with multiple sub-graph filters and expanders.
Order of Operations:
1. Seeding by given function name and data
2. Add nodes
3. Remove nodes
:param pybel.BELGraph graph: A BEL graph
:param seed_method: The name of the get_subgraph_by_* function to use
:param seed_data: The argument to pass to the get_subgraph function
:param expand_nodes: Add the neighborhoods around all of these nodes
:param remove_nodes: Remove these nodes and all of their in/out edges
:rtype: Optional[pybel.BELGraph] |
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(e.attr.keys()) for e in ls))
df = pd.DataFrame({
src_column_name: [e.src_vid for e in ls],
dst_column_name: [e.dst_vid for e in ls]})
for c in cols:
df[c] = [e.attr.get(c) for e in ls]
return df | Convert a list of edges into dataframe. |
def get_storage_hash(storage):
"""
Return a hex string hash for a storage object (or string containing
'full.path.ClassName' referring to a storage object).
"""
# If storage is wrapped in a lazy object we need to get the real thing.
if isinstance(storage, LazyObject):
if storage._wrapped is None:
storage._setup()
storage = storage._wrapped
if not isinstance(storage, six.string_types):
storage_cls = storage.__class__
storage = '%s.%s' % (storage_cls.__module__, storage_cls.__name__)
return hashlib.md5(storage.encode('utf8')).hexdigest() | Return a hex string hash for a storage object (or string containing
'full.path.ClassName' referring to a storage object). |
def list(self, id, seq): # pylint: disable=invalid-name,redefined-builtin
"""Get a list of captures.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:return: :class:`captures.Capture <captures.Capture>` list
"""
schema = CaptureSchema(exclude=('id', 'seq'))
resp = self.service.list(self._base(id, seq))
return self.service.decode(schema, resp, many=True) | Get a list of captures.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:return: :class:`captures.Capture <captures.Capture>` list |
def getColorHSV(name):
"""Retrieve the hue, saturation, value triple of a color name.
Returns:
a triple (degree, percent, percent). If not found (-1, -1, -1) is returned.
"""
try:
x = getColorInfoList()[getColorList().index(name.upper())]
except:
return (-1, -1, -1)
r = x[1] / 255.
g = x[2] / 255.
b = x[3] / 255.
cmax = max(r, g, b)
V = round(cmax * 100, 1)
cmin = min(r, g, b)
delta = cmax - cmin
if delta == 0:
hue = 0
elif cmax == r:
hue = 60. * (((g - b)/delta) % 6)
elif cmax == g:
hue = 60. * (((b - r)/delta) + 2)
else:
hue = 60. * (((r - g)/delta) + 4)
H = int(round(hue))
if cmax == 0:
sat = 0
else:
sat = delta / cmax
S = int(round(sat * 100))
return (H, S, V) | Retrieve the hue, saturation, value triple of a color name.
Returns:
a triple (degree, percent, percent). If not found (-1, -1, -1) is returned. |
def package_locations(self, package_keyname):
"""List datacenter locations for a package keyname
:param str package_keyname: The package for which to get the items.
:returns: List of locations a package is orderable in
"""
mask = "mask[description, keyname, locations]"
package = self.get_package_by_key(package_keyname, mask='id')
regions = self.package_svc.getRegions(id=package['id'], mask=mask)
return regions | List datacenter locations for a package keyname
:param str package_keyname: The package for which to get the items.
:returns: List of locations a package is orderable in |
def get_permission_requests(parser, token):
"""
Retrieves all permissions requests associated with the given obj and user
and assigns the result to a context variable.
Syntax::
{% get_permission_requests obj %}
{% for perm in permissions %}
{{ perm }}
{% endfor %}
{% get_permission_requests obj as "my_permissions" %}
{% get_permission_requests obj for request.user as "my_permissions" %}
"""
return PermissionsForObjectNode.handle_token(parser, token,
approved=False,
name='"permission_requests"') | Retrieves all permissions requests associated with the given obj and user
and assigns the result to a context variable.
Syntax::
{% get_permission_requests obj %}
{% for perm in permissions %}
{{ perm }}
{% endfor %}
{% get_permission_requests obj as "my_permissions" %}
{% get_permission_requests obj for request.user as "my_permissions" %} |
def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error | set the error |
def _get_stats_columns(cls, table, relation_type):
"""Given a table, return an iterator of key/value pairs for stats
column names/values.
"""
column_names = cls._get_stats_column_names()
# agate does not handle the array of column names gracefully
clustering_value = None
if table.clustering_fields is not None:
clustering_value = ','.join(table.clustering_fields)
# cast num_bytes/num_rows to str before they get to agate, or else
# agate will incorrectly decide they are booleans.
column_values = (
'Number of bytes',
str(table.num_bytes),
'The number of bytes this table consumes',
relation_type == 'table',
'Number of rows',
str(table.num_rows),
'The number of rows in this table',
relation_type == 'table',
'Location',
table.location,
'The geographic location of this table',
True,
'Partitioning Type',
table.partitioning_type,
'The partitioning type used for this table',
relation_type == 'table',
'Clustering Fields',
clustering_value,
'The clustering fields for this table',
relation_type == 'table',
)
return zip(column_names, column_values) | Given a table, return an iterator of key/value pairs for stats
column names/values. |
def _match_type(self, i):
"""Looks at line 'i' to see if the line matches a module user type def."""
self.col_match = self.RE_TYPE.match(self._source[i])
if self.col_match is not None:
self.section = "types"
self.el_type = CustomType
self.el_name = self.col_match.group("name")
return True
else:
return False | Looks at line 'i' to see if the line matches a module user type def. |
def distinct_words(string_matrix: List[List[str]]) -> Set[str]:
"""
Diagnostic function
:param string_matrix:
:return:
>>> dl = distinct_words([['the', 'quick', 'brown'], ['here', 'lies', 'the', 'fox']])
>>> sorted(dl)
['brown', 'fox', 'here', 'lies', 'quick', 'the']
"""
return set([word
for sentence in string_matrix
for word in sentence]) | Diagnostic function
:param string_matrix:
:return:
>>> dl = distinct_words([['the', 'quick', 'brown'], ['here', 'lies', 'the', 'fox']])
>>> sorted(dl)
['brown', 'fox', 'here', 'lies', 'quick', 'the'] |
def get_slopes(data, s_freq, level='all', smooth=0.05):
"""Get the slopes (average and/or maximum) for each quadrant of a slow
wave, as well as the combination of quadrants 2 and 3.
Parameters
----------
data : ndarray
raw data as vector
s_freq : int
sampling frequency
level : str
if 'average', returns average slopes (uV / s). if 'maximum', returns
the maximum of the slope derivative (uV / s**2). if 'all', returns all.
smooth : float or None
if not None, signal will be smoothed by moving average, with a window
of this duration
Returns
-------
tuple of ndarray
each array is len 5, with q1, q2, q3, q4 and q23. First array is
average slopes and second is maximum slopes.
Notes
-----
This function is made to take automatically detected start and end
times AS WELL AS manually delimited ones. In the latter case, the first
and last zero has to be detected within this function.
"""
data = negative(data) # legacy code
nan_array = empty((5,))
nan_array[:] = nan
idx_trough = data.argmin()
idx_peak = data.argmax()
if idx_trough >= idx_peak:
return nan_array, nan_array
zero_crossings_0 = where(diff(sign(data[:idx_trough])))[0]
zero_crossings_1 = where(diff(sign(data[idx_trough:idx_peak])))[0]
zero_crossings_2 = where(diff(sign(data[idx_peak:])))[0]
if zero_crossings_1.any():
idx_zero_1 = idx_trough + zero_crossings_1[0]
else:
return nan_array, nan_array
if zero_crossings_0.any():
idx_zero_0 = zero_crossings_0[-1]
else:
idx_zero_0 = 0
if zero_crossings_2.any():
idx_zero_2 = idx_peak + zero_crossings_2[0]
else:
idx_zero_2 = len(data) - 1
avgsl = nan_array
if level in ['average', 'all']:
q1 = data[idx_trough] / ((idx_trough - idx_zero_0) / s_freq)
q2 = data[idx_trough] / ((idx_zero_1 - idx_trough) / s_freq)
q3 = data[idx_peak] / ((idx_peak - idx_zero_1) / s_freq)
q4 = data[idx_peak] / ((idx_zero_2 - idx_peak) / s_freq)
q23 = (data[idx_peak] - data[idx_trough]) \
/ ((idx_peak - idx_trough) / s_freq)
avgsl = asarray([q1, q2, q3, q4, q23])
avgsl[isinf(avgsl)] = nan
maxsl = nan_array
if level in ['maximum', 'all']:
if smooth is not None:
win = int(smooth * s_freq)
flat = ones(win)
data = fftconvolve(data, flat / sum(flat), mode='same')
if idx_trough - idx_zero_0 >= win:
maxsl[0] = min(diff(data[idx_zero_0:idx_trough]))
if idx_zero_1 - idx_trough >= win:
maxsl[1] = max(diff(data[idx_trough:idx_zero_1]))
if idx_peak - idx_zero_1 >= win:
maxsl[2] = max(diff(data[idx_zero_1:idx_peak]))
if idx_zero_2 - idx_peak >= win:
maxsl[3] = min(diff(data[idx_peak:idx_zero_2]))
if idx_peak - idx_trough >= win:
maxsl[4] = max(diff(data[idx_trough:idx_peak]))
maxsl[isinf(maxsl)] = nan
return avgsl, maxsl | Get the slopes (average and/or maximum) for each quadrant of a slow
wave, as well as the combination of quadrants 2 and 3.
Parameters
----------
data : ndarray
raw data as vector
s_freq : int
sampling frequency
level : str
if 'average', returns average slopes (uV / s). if 'maximum', returns
the maximum of the slope derivative (uV / s**2). if 'all', returns all.
smooth : float or None
if not None, signal will be smoothed by moving average, with a window
of this duration
Returns
-------
tuple of ndarray
each array is len 5, with q1, q2, q3, q4 and q23. First array is
average slopes and second is maximum slopes.
Notes
-----
This function is made to take automatically detected start and end
times AS WELL AS manually delimited ones. In the latter case, the first
and last zero has to be detected within this function. |
def health(self, index=None, params=None):
"""
Get a very simple status on the health of the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html>`_
:arg index: Limit the information returned to a specific index
:arg level: Specify the level of detail for returned information,
default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of shards
is active
:arg wait_for_events: Wait until all currently queued events with the
given priority are processed, valid choices are: 'immediate',
'urgent', 'high', 'normal', 'low', 'languid'
:arg wait_for_no_relocating_shards: Whether to wait until there are no
relocating shards in the cluster
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_status: Wait until cluster is in a specific state, default
None, valid choices are: 'green', 'yellow', 'red'
"""
return self.transport.perform_request('GET', _make_path('_cluster',
'health', index), params=params) | Get a very simple status on the health of the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html>`_
:arg index: Limit the information returned to a specific index
:arg level: Specify the level of detail for returned information,
default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of shards
is active
:arg wait_for_events: Wait until all currently queued events with the
given priority are processed, valid choices are: 'immediate',
'urgent', 'high', 'normal', 'low', 'languid'
:arg wait_for_no_relocating_shards: Whether to wait until there are no
relocating shards in the cluster
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_status: Wait until cluster is in a specific state, default
None, valid choices are: 'green', 'yellow', 'red' |
def should_include_file_in_search(file_name, extensions, exclude_dirs):
""" Whether or not a filename matches a search criteria according to arguments.
Args:
file_name (str): A file path to check.
extensions (list): A list of file extensions file should match.
exclude_dirs (list): A list of directories to exclude from search.
Returns:
A boolean of whether or not file matches search criteria.
"""
return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \
any(file_name.endswith(e) for e in extensions) | Whether or not a filename matches a search criteria according to arguments.
Args:
file_name (str): A file path to check.
extensions (list): A list of file extensions file should match.
exclude_dirs (list): A list of directories to exclude from search.
Returns:
A boolean of whether or not file matches search criteria. |
def setdim(P, dim=None):
"""
Adjust the dimensions of a polynomial.
Output the results into Poly object
Args:
P (Poly) : Input polynomial
dim (int) : The dimensions of the output polynomial. If omitted,
increase polynomial with one dimension. If the new dim is
smaller then P's dimensions, variables with cut components are
all cut.
Examples:
>>> x,y = chaospy.variable(2)
>>> P = x*x-x*y
>>> print(chaospy.setdim(P, 1))
q0^2
"""
P = P.copy()
ldim = P.dim
if not dim:
dim = ldim+1
if dim==ldim:
return P
P.dim = dim
if dim>ldim:
key = numpy.zeros(dim, dtype=int)
for lkey in P.keys:
key[:ldim] = lkey
P.A[tuple(key)] = P.A.pop(lkey)
else:
key = numpy.zeros(dim, dtype=int)
for lkey in P.keys:
if not sum(lkey[ldim-1:]) or not sum(lkey):
P.A[lkey[:dim]] = P.A.pop(lkey)
else:
del P.A[lkey]
P.keys = sorted(P.A.keys(), key=sort_key)
return P | Adjust the dimensions of a polynomial.
Output the results into Poly object
Args:
P (Poly) : Input polynomial
dim (int) : The dimensions of the output polynomial. If omitted,
increase polynomial with one dimension. If the new dim is
smaller then P's dimensions, variables with cut components are
all cut.
Examples:
>>> x,y = chaospy.variable(2)
>>> P = x*x-x*y
>>> print(chaospy.setdim(P, 1))
q0^2 |
def slice_around_gaps (values, maxgap):
"""Given an ordered array of values, generate a set of slices that traverse
all of the values. Within each slice, no gap between adjacent values is
larger than `maxgap`. In other words, these slices break the array into
chunks separated by gaps of size larger than maxgap.
"""
if not (maxgap > 0):
# above test catches NaNs, other weird cases
raise ValueError ('maxgap must be positive; got %r' % maxgap)
values = np.asarray (values)
delta = values[1:] - values[:-1]
if np.any (delta < 0):
raise ValueError ('values must be in nondecreasing order')
whgap = np.where (delta > maxgap)[0] + 1
prev_idx = None
for gap_idx in whgap:
yield slice (prev_idx, gap_idx)
prev_idx = gap_idx
yield slice (prev_idx, None) | Given an ordered array of values, generate a set of slices that traverse
all of the values. Within each slice, no gap between adjacent values is
larger than `maxgap`. In other words, these slices break the array into
chunks separated by gaps of size larger than maxgap. |
def _check_channel_state_for_update(
self,
channel_identifier: ChannelID,
closer: Address,
update_nonce: Nonce,
block_identifier: BlockSpecification,
) -> Optional[str]:
"""Check the channel state on chain to see if it has been updated.
Compare the nonce, we are about to update the contract with, with the
updated nonce in the onchain state and, if it's the same, return a
message with which the caller should raise a RaidenRecoverableError.
If all is okay return None.
"""
msg = None
closer_details = self._detail_participant(
channel_identifier=channel_identifier,
participant=closer,
partner=self.node_address,
block_identifier=block_identifier,
)
if closer_details.nonce == update_nonce:
msg = (
'updateNonClosingBalanceProof transaction has already '
'been mined and updated the channel succesfully.'
)
return msg | Check the channel state on chain to see if it has been updated.
Compare the nonce, we are about to update the contract with, with the
updated nonce in the onchain state and, if it's the same, return a
message with which the caller should raise a RaidenRecoverableError.
If all is okay return None. |
def perform_remote_action(i):
"""
Input: { See 'perform_action' function }
Output: { See 'perform_action' function }
"""
# Import modules compatible with Python 2.x and 3.x
import urllib
try: import urllib.request as urllib2
except: import urllib2 # pragma: no cover
try: from urllib.parse import urlencode
except: from urllib import urlencode # pragma: no cover
rr={'return':0}
# Get action
act=i.get('action','')
# Check output
o=i.get('out','')
if o=='con':
# out('Initiating remote access ...')
# out('')
i['out']='con'
i['quiet']='yes'
if act=='pull':
i['out']='json'
else:
i['out']='json'
# # Clean up input
# if o!='json_file':
# rr['out']='json' # Decided to return json to show that it's remote ...
if 'cid' in i:
del(i['cid']) # already processed
# Get URL
url=i.get('remote_server_url','')
# Process i
if 'remote_server_url' in i: del(i['remote_server_url'])
# Pre process if push file ...
if act=='push':
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
rx=convert_file_to_upload_string({'filename':fn})
if rx['return']>0: return rx
i['file_content_base64']=rx['file_content_base64']
# Leave only filename without path
i['filename']=os.path.basename(fn)
# Prepare post variables
r=dumps_json({'dict':i, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
post=urlencode({'ck_json':s})
if sys.version_info[0]>2: post=post.encode('utf8')
# If auth
au=i.get('remote_server_user','')
if au!='':
del(i['remote_server_user'])
ap=i.get('remote_server_pass','')
if ap!='':
del(i['remote_server_pass'])
auth = urllib2.HTTPPasswordMgrWithDefaultRealm()
auth.add_password(None, url, au, ap)
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth)))
# Prepare request
request = urllib2.Request(url, post)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access to remote CK repository failed ('+format(e)+')'}
# Read from Internet
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed reading stream from remote CK web service ('+format(e)+')'}
# Check output
try: s=s.decode('utf8')
except Exception as e: pass
if o=='con' and act!='pull':
out(s.rstrip())
else:
# Try to convert output to dictionary
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from remote CK server ('+r['error']+'):\n'+s[:256]+'\n\n...)'}
d=r['dict']
if 'return' in d: d['return']=int(d['return']) # Fix for some strange behavior when 'return' is not integer - should check why ...
if d.get('return',0)>0:
return d
# Post process if pull file ...
if act=='pull':
if o!='json' and o!='json_file':
# Convert encoded file to real file ...
x=d.get('file_content_base64','')
fn=d.get('filename','')
if fn=='': fn=cfg['default_archive_name']
r=convert_upload_string_to_file({'file_content_base64':x, 'filename':fn})
if r['return']>0: return r
if 'file_content_base64' in d: del(d['file_content_base64'])
rr.update(d)
# Restore original output
i['out']=o
return rr | Input: { See 'perform_action' function }
Output: { See 'perform_action' function } |
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv) | Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored. |
def check(self):
"""
Checks the values of the windows. If any problems are found,
it flags them by changing the background colour. Only active
windows are checked.
Returns status, flag for whether parameters are viable.
"""
status = True
synced = True
xbin = self.xbin.value()
ybin = self.ybin.value()
nwin = self.nwin.value()
# individual window checks
g = get_root(self).globals
for xsw, ysw, nxw, nyw in \
zip(self.xs[:nwin], self.ys[:nwin],
self.nx[:nwin], self.ny[:nwin]):
xsw.config(bg=g.COL['main'])
ysw.config(bg=g.COL['main'])
nxw.config(bg=g.COL['main'])
nyw.config(bg=g.COL['main'])
status = status if xsw.ok() else False
status = status if ysw.ok() else False
status = status if nxw.ok() else False
status = status if nyw.ok() else False
xs = xsw.value()
ys = ysw.value()
nx = nxw.value()
ny = nyw.value()
# Are unbinned dimensions consistent with binning factors?
if nx is None or nx % xbin != 0:
nxw.config(bg=g.COL['error'])
status = False
elif (nx // xbin) % 4 != 0:
"""
The NGC collects pixel data in chunks before transmission.
As a result, to avoid loss of data from frames, the binned
x-size must be a multiple of 4.
"""
nxw.config(bg=g.COL['error'])
status = False
if ny is None or ny % ybin != 0:
nyw.config(bg=g.COL['error'])
status = False
# Are the windows synchronised? This means that they
# would be consistent with the pixels generated were
# the whole CCD to be binned by the same factors
# If relevant values are not set, we count that as
# "synced" because the purpose of this is to enable
# / disable the sync button and we don't want it to be
# enabled just because xs or ys are not set.
if (xs is not None and ys is not None and nx is not None and
ny is not None):
if (xs < 1025 and ((xs - 1) % xbin != 0 or (ys - 1) % ybin != 0)
or ((xs-1025) % xbin != 0 or (ys - 1) % ybin != 0)):
synced = False
# Range checks
if xs is None or nx is None or xs + nx - 1 > xsw.imax:
xsw.config(bg=g.COL['error'])
status = False
if ys is None or ny is None or ys + ny - 1 > ysw.imax:
ysw.config(bg=g.COL['error'])
status = False
# Overlap checks. Compare each window with the next one, requiring
# no y overlap and that the second is higher than the first
if status:
n1 = 0
for ysw1, nyw1 in zip(self.ys[:nwin-1], self.ny[:nwin-1]):
ys1 = ysw1.value()
ny1 = nyw1.value()
n1 += 1
ysw2 = self.ys[n1]
ys2 = ysw2.value()
if ys2 < ys1 + ny1:
ysw2.config(bg=g.COL['error'])
status = False
if synced:
self.sbutt.config(bg=g.COL['main'])
self.sbutt.disable()
else:
if not self.frozen:
self.sbutt.enable()
self.sbutt.config(bg=g.COL['warn'])
return status | Checks the values of the windows. If any problems are found,
it flags them by changing the background colour. Only active
windows are checked.
Returns status, flag for whether parameters are viable. |
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}):
"""Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not
"""
with open(filename, 'r') as f:
# Skip the n first lines
if has_header:
header = f.readline().strip().split(delimiter)
else:
header = None
for i in range(skip):
f.readline()
for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header):
if use_types:
yield apply_types(use_types, guess_type, line)
elif guess_type:
yield dmap(determine_type, line)
else:
yield line | Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not |
def osd_page_handler(config=None, identifier=None, prefix=None, **args):
"""Flask handler to produce HTML response for OpenSeadragon view of identifier.
Arguments:
config - Config object for this IIIF handler
identifier - identifier of image/generator
prefix - path prefix
**args - other aguments ignored
"""
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
with open(os.path.join(template_dir, 'testserver_osd.html'), 'r') as f:
template = f.read()
d = dict(prefix=prefix,
identifier=identifier,
api_version=config.api_version,
osd_version='2.0.0',
osd_uri='/openseadragon200/openseadragon.min.js',
osd_images_prefix='/openseadragon200/images',
osd_height=500,
osd_width=500,
info_json_uri='info.json')
return make_response(Template(template).safe_substitute(d)) | Flask handler to produce HTML response for OpenSeadragon view of identifier.
Arguments:
config - Config object for this IIIF handler
identifier - identifier of image/generator
prefix - path prefix
**args - other aguments ignored |
def ref2names2commdct(ref2names, commdct):
"""embed ref2names into commdct"""
for comm in commdct:
for cdct in comm:
try:
refs = cdct['object-list'][0]
validobjects = ref2names[refs]
cdct.update({'validobjects':validobjects})
except KeyError as e:
continue
return commdct | embed ref2names into commdct |
def create(cls, name, division, api=None):
"""
Create team within a division
:param name: Team name.
:param division: Parent division.
:param api: Api instance.
:return: Team object.
"""
division = Transform.to_division(division)
api = api if api else cls._API
data = {
'name': name,
'division': division
}
extra = {
'resource': cls.__name__,
'query': data
}
logger.info('Creating team', extra=extra)
created_team = api.post(cls._URL['query'], data=data).json()
return Team(api=api, **created_team) | Create team within a division
:param name: Team name.
:param division: Parent division.
:param api: Api instance.
:return: Team object. |
def deref(self, ctx):
"""
Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return
the value this reference references.
If the call was already made, it returns a cached result.
It also makes sure there's no cyclic reference, and if so raises CyclicReferenceError.
"""
if self in ctx.call_nodes:
raise CyclicReferenceError(ctx, self)
if self in ctx.cached_results:
return ctx.cached_results[self]
try:
ctx.call_nodes.add(self)
ctx.call_stack.append(self)
result = self.evaluate(ctx)
ctx.cached_results[self] = result
return result
except:
if ctx.exception_call_stack is None:
ctx.exception_call_stack = list(ctx.call_stack)
raise
finally:
ctx.call_stack.pop()
ctx.call_nodes.remove(self) | Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return
the value this reference references.
If the call was already made, it returns a cached result.
It also makes sure there's no cyclic reference, and if so raises CyclicReferenceError. |
def visit_ellipsis(self, node, parent):
"""visit an Ellipsis node by returning a fresh instance of it"""
return nodes.Ellipsis(
getattr(node, "lineno", None), getattr(node, "col_offset", None), parent
) | visit an Ellipsis node by returning a fresh instance of it |
def program_files(self, executable):
"""
Determine the file paths to be adopted
"""
if self._get_version() == 6:
paths = self.REQUIRED_PATHS_6
elif self._get_version() > 6:
paths = self.REQUIRED_PATHS_7_1
return paths | Determine the file paths to be adopted |
def _match_processes(self, pid, name, cur_process):
"""
Determine whether user-specified "pid/processes" contain this process
:param pid: The user input of pid
:param name: The user input of process name
:param process: current process info
:return: True or Not; (if both pid/process are given, then both of them need to match)
"""
cur_pid, cur_name = self._get_tuple(cur_process.split('/'))
pid_match = False
if not pid:
pid_match = True
elif pid == cur_pid:
pid_match = True
name_match = False
if not name:
name_match = True
elif name == cur_name:
name_match = True
return pid_match and name_match | Determine whether user-specified "pid/processes" contain this process
:param pid: The user input of pid
:param name: The user input of process name
:param process: current process info
:return: True or Not; (if both pid/process are given, then both of them need to match) |
def get(self, name, param=None):
"""Retreive a metadata attribute.
:param string name: name of the attribute to retrieve. See `attribs`
:param param: Required parameter for some attributes
"""
if name not in self.attribs:
raise exceptions.SoftLayerError('Unknown metadata attribute.')
call_details = self.attribs[name]
if call_details.get('param_req'):
if not param:
raise exceptions.SoftLayerError(
'Parameter required to get this attribute.')
params = tuple()
if param is not None:
params = (param,)
try:
return self.client.call('Resource_Metadata',
self.attribs[name]['call'],
*params)
except exceptions.SoftLayerAPIError as ex:
if ex.faultCode == 404:
return None
raise ex | Retreive a metadata attribute.
:param string name: name of the attribute to retrieve. See `attribs`
:param param: Required parameter for some attributes |
def getlocals(back=2):
"""Get the local variables some levels back (-1 is top)."""
import inspect
fr = inspect.currentframe()
try:
while fr and back != 0:
fr1 = fr
fr = fr.f_back
back -= 1
except:
pass
return fr1.f_locals | Get the local variables some levels back (-1 is top). |
def network(n):
"""Validate a |Network|.
Checks the TPM and connectivity matrix.
"""
tpm(n.tpm)
connectivity_matrix(n.cm)
if n.cm.shape[0] != n.size:
raise ValueError("Connectivity matrix must be NxN, where N is the "
"number of nodes in the network.")
return True | Validate a |Network|.
Checks the TPM and connectivity matrix. |
def validate(self):
"""Validate that the OutputContextField is correctly representable."""
if not isinstance(self.location, Location):
raise TypeError(u'Expected Location location, got: {} {}'.format(
type(self.location).__name__, self.location))
if not self.location.field:
raise ValueError(u'Expected Location object that points to a field, got: '
u'{}'.format(self.location))
if not is_graphql_type(self.field_type):
raise ValueError(u'Invalid value of "field_type": {}'.format(self.field_type))
stripped_field_type = strip_non_null_from_type(self.field_type)
if isinstance(stripped_field_type, GraphQLList):
inner_type = strip_non_null_from_type(stripped_field_type.of_type)
if GraphQLDate.is_same_type(inner_type) or GraphQLDateTime.is_same_type(inner_type):
# This is a compilation error rather than a ValueError as
# it can be caused by an invalid GraphQL query on an otherwise valid schema.
# In other words, it's an error in writing the GraphQL query, rather than
# a programming error within the library.
raise GraphQLCompilationError(
u'Lists of Date or DateTime cannot currently be represented as '
u'OutputContextField objects: {}'.format(self.field_type)) | Validate that the OutputContextField is correctly representable. |
def sign_execute_deposit(deposit_params, key_pair):
"""
Function to execute the deposit request by signing the transaction generated by the create deposit function.
Execution of this function is as follows::
sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary with the result status of the deposit attempt.
"""
signature = sign_transaction(transaction=deposit_params['transaction'],
private_key_hex=private_key_to_hex(key_pair=key_pair))
return {'signature': signature} | Function to execute the deposit request by signing the transaction generated by the create deposit function.
Execution of this function is as follows::
sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair)
The expected return result for this function is as follows::
{
'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....'
}
:param deposit_params: The parameters generated by the create deposit function that now requires signature.
:type deposit_params: dict
:param key_pair: The KeyPair for the wallet being used to sign deposit message.
:type key_pair: KeyPair
:return: Dictionary with the result status of the deposit attempt. |
def compile_file_into_spirv(filepath, stage, optimization='size',
warnings_as_errors=False):
"""Compile shader file into Spir-V binary.
This function uses shaderc to compile your glsl file code into Spir-V
code.
Args:
filepath (strs): Absolute path to your shader file
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails.
"""
with open(filepath, 'rb') as f:
content = f.read()
return compile_into_spirv(content, stage, filepath,
optimization=optimization,
warnings_as_errors=warnings_as_errors) | Compile shader file into Spir-V binary.
This function uses shaderc to compile your glsl file code into Spir-V
code.
Args:
filepath (strs): Absolute path to your shader file
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails. |
def _dstr(degrees, places=1, signed=False):
r"""Convert floating point `degrees` into a sexagesimal string.
>>> _dstr(181.875)
'181deg 52\' 30.0"'
>>> _dstr(181.875, places=3)
'181deg 52\' 30.000"'
>>> _dstr(181.875, signed=True)
'+181deg 52\' 30.0"'
>>> _dstr(float('nan'))
'nan'
"""
if isnan(degrees):
return 'nan'
sgn, d, m, s, etc = _sexagesimalize_to_int(degrees, places)
sign = '-' if sgn < 0.0 else '+' if signed else ''
return '%s%02ddeg %02d\' %02d.%0*d"' % (sign, d, m, s, places, etc) | r"""Convert floating point `degrees` into a sexagesimal string.
>>> _dstr(181.875)
'181deg 52\' 30.0"'
>>> _dstr(181.875, places=3)
'181deg 52\' 30.000"'
>>> _dstr(181.875, signed=True)
'+181deg 52\' 30.0"'
>>> _dstr(float('nan'))
'nan' |
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap | Checks whether a param is explicitly set by user. |
def send_event_to_salt(self, result):
'''
This function identifies whether the engine is running on the master
or the minion and sends the data to the master event bus accordingly.
:param result: It's a dictionary which has the final data and topic.
'''
if result['send']:
data = result['data']
topic = result['topic']
# If the engine is run on master, get the event bus and send the
# parsed event.
if __opts__['__role'] == 'master':
event.get_master_event(__opts__,
__opts__['sock_dir']
).fire_event(data, topic)
# If the engine is run on minion, use the fire_master execution
# module to send event on the master bus.
else:
__salt__['event.fire_master'](data=data, tag=topic) | This function identifies whether the engine is running on the master
or the minion and sends the data to the master event bus accordingly.
:param result: It's a dictionary which has the final data and topic. |
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits | Get the integer value of a hexadecimal number. |
def save_config(self):
""" Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified"""
if not self.opts['dirty_config'][1]:
if logger.isEnabledFor(logging.INFO):
logger.info('Config not saved (not modified)')
return 1
txt ='''# PyRadio Configuration File
# Player selection
# This is the equivalent to the -u , --use-player command line parameter
# Specify the player to use with PyRadio, or the player detection order
# Example:
# player = vlc
# or
# player = vlc,mpv, mplayer
# Default value: mpv,mplayer,vlc
player = {0}
# Default playlist
# This is the playlist to open if none is specified
# You can scecify full path to CSV file, or if the playlist is in the
# config directory, playlist name (filename without extension) or
# playlist number (as reported by -ls command line option)
# Default value: stations
default_playlist = {1}
# Default station
# This is the equivalent to the -p , --play command line parameter
# The station number within the default playlist to play
# Value is 1..number of stations, "-1" or "False" means no auto play
# "0" or "Random" means play a random station
# Default value: False
default_station = {2}
# Default encoding
# This is the encoding used by default when reading data provided by
# a station (such as song title, etc.) If reading said data ends up
# in an error, 'utf-8' will be used instead.
#
# A valid encoding list can be found at:
# https://docs.python.org/2.7/library/codecs.html#standard-encodings
# replacing 2.7 with specific version:
# 3.0 up to current python version.
#
# Default value: utf-8
default_encoding = {3}
# Connection timeout
# PyRadio will wait for this number of seconds to get a station/server
# message indicating that playback has actually started.
# If this does not happen (within this number of seconds after the
# connection is initiated), PyRadio will consider the station
# unreachable, and display the "Failed to connect to: [station]"
# message.
#
# Valid values: 5 - 60
# Default value: 10
connection_timeout = {4}
# Default theme
# Hardcooded themes:
# dark (default) (8 colors)
# light (8 colors)
# dark_16_colors (16 colors dark theme alternative)
# light_16_colors (16 colors light theme alternative)
# black_on_white (bow) (256 colors)
# white_on_black (wob) (256 colors)
# Default value = 'dark'
theme = {5}
# Transparency setting
# If False, theme colors will be used.
# If True and a compositor is running, the stations' window
# background will be transparent. If True and a compositor is
# not running, the terminal's background color will be used.
# Valid values: True, true, False, false
# Default value: False
use_transparency = {6}
# Playlist management
#
# Specify whether you will be asked to confirm
# every station deletion action
# Valid values: True, true, False, false
# Default value: True
confirm_station_deletion = {7}
# Specify whether you will be asked to confirm
# playlist reloading, when the playlist has not
# been modified within Pyradio
# Valid values: True, true, False, false
# Default value: True
confirm_playlist_reload = {8}
# Specify whether you will be asked to save a
# modified playlist whenever it needs saving
# Valid values: True, true, False, false
# Default value: False
auto_save_playlist = {9}
'''
copyfile(self.config_file, self.config_file + '.restore')
if self.opts['default_station'][1] is None:
self.opts['default_station'][1] = '-1'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt.format(self.opts['player'][1],
self.opts['default_playlist'][1],
self.opts['default_station'][1],
self.opts['default_encoding'][1],
self.opts['connection_timeout'][1],
self.opts['theme'][1],
self.opts['use_transparency'][1],
self.opts['confirm_station_deletion'][1],
self.opts['confirm_playlist_reload'][1],
self.opts['auto_save_playlist'][1]))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Error saving config')
return -1
try:
remove(self.config_file + '.restore')
except:
pass
if logger.isEnabledFor(logging.INFO):
logger.info('Config saved')
self.opts['dirty_config'][1] = False
return 0 | Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified |
def check_cgroup_availability_in_thread(options):
"""
Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used).
"""
thread = _CheckCgroupsThread(options)
thread.start()
thread.join()
if thread.error:
raise thread.error | Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used). |
def run():
"""This client pulls PCAP files for building report.
Returns:
A list with `view_pcap` , `meta` and `filename` objects.
"""
global WORKBENCH
# Grab grab_server_argsrver args
args = client_helper.grab_server_args()
# Start up workbench connection
WORKBENCH = zerorpc.Client(timeout=300, heartbeat=60)
WORKBENCH.connect('tcp://'+args['server']+':'+args['port'])
data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../data/pcap')
file_list = [os.path.join(data_path, child) for child in \
os.listdir(data_path)]
results = []
for filename in file_list:
# Skip OS generated files
if '.DS_Store' in filename: continue
# Process the pcap file
with open(filename,'rb') as f:
md5 = WORKBENCH.store_sample(f.read(), filename, 'pcap')
result = WORKBENCH.work_request('view_pcap', md5)
result.update(WORKBENCH.work_request('meta', result['view_pcap']['md5']))
result['filename'] = result['meta']['filename'].split('/')[-1]
results.append(result)
return results | This client pulls PCAP files for building report.
Returns:
A list with `view_pcap` , `meta` and `filename` objects. |
def __ensure_provisioning_writes(
table_name, table_key, gsi_name, gsi_key, num_consec_write_checks):
""" Ensure that provisioning of writes is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_write_units, num_consec_write_checks
"""
if not get_gsi_option(table_key, gsi_key, 'enable_writes_autoscaling'):
logger.info(
'{0} - GSI: {1} - '
'Autoscaling of writes has been disabled'.format(
table_name, gsi_name))
return False, dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name), 0
update_needed = False
try:
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
lookback_period = get_gsi_option(
table_key, gsi_key, 'lookback_period')
current_write_units = dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)
consumed_write_units_percent = \
gsi_stats.get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_write_count = \
gsi_stats.get_throttled_write_event_count(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_by_provisioned_write_percent = \
gsi_stats.get_throttled_by_provisioned_write_event_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_by_consumed_write_percent = \
gsi_stats.get_throttled_by_consumed_write_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
writes_upper_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_upper_threshold')
writes_lower_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_lower_threshold')
throttled_writes_upper_threshold = \
get_gsi_option(
table_key, gsi_key, 'throttled_writes_upper_threshold')
increase_writes_unit = \
get_gsi_option(table_key, gsi_key, 'increase_writes_unit')
increase_writes_with = \
get_gsi_option(table_key, gsi_key, 'increase_writes_with')
decrease_writes_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_unit')
decrease_writes_with = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_with')
min_provisioned_writes = \
get_gsi_option(table_key, gsi_key, 'min_provisioned_writes')
max_provisioned_writes = \
get_gsi_option(table_key, gsi_key, 'max_provisioned_writes')
num_write_checks_before_scale_down = \
get_gsi_option(
table_key, gsi_key, 'num_write_checks_before_scale_down')
num_write_checks_reset_percent = \
get_gsi_option(
table_key, gsi_key, 'num_write_checks_reset_percent')
increase_throttled_by_provisioned_writes_unit = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_provisioned_writes_unit')
increase_throttled_by_provisioned_writes_scale = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_provisioned_writes_scale')
increase_throttled_by_consumed_writes_unit = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_consumed_writes_unit')
increase_throttled_by_consumed_writes_scale = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_consumed_writes_scale')
increase_consumed_writes_unit = \
get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_unit')
increase_consumed_writes_with = \
get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_with')
increase_consumed_writes_scale = \
get_gsi_option(
table_key, gsi_key, 'increase_consumed_writes_scale')
decrease_consumed_writes_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_unit')
decrease_consumed_writes_with = \
get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_with')
decrease_consumed_writes_scale = \
get_gsi_option(
table_key, gsi_key, 'decrease_consumed_writes_scale')
except JSONResponseError:
raise
except BotoServerError:
raise
# Set the updated units to the current write unit value
updated_write_units = current_write_units
# Reset consecutive write count if num_write_checks_reset_percent
# is reached
if num_write_checks_reset_percent:
if consumed_write_units_percent >= num_write_checks_reset_percent:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: Consumed percent {2} is '
'greater than reset percent: {3}'.format(
table_name,
gsi_name,
consumed_write_units_percent,
num_write_checks_reset_percent))
num_consec_write_checks = 0
# Exit if up scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_writes_up_scaling'):
logger.debug(
'{0} - GSI: {1} - Up scaling event detected. No action taken as '
'scaling up writes has been disabled in the configuration'.format(
table_name, gsi_name))
else:
# If local/granular values not specified use global values
increase_consumed_writes_unit = \
increase_consumed_writes_unit or increase_writes_unit
increase_throttled_by_provisioned_writes_unit = (
increase_throttled_by_provisioned_writes_unit
or increase_writes_unit)
increase_throttled_by_consumed_writes_unit = \
increase_throttled_by_consumed_writes_unit or increase_writes_unit
increase_consumed_writes_with = \
increase_consumed_writes_with or increase_writes_with
# Initialise variables to store calculated provisioning
throttled_by_provisioned_calculated_provisioning = scale_reader(
increase_throttled_by_provisioned_writes_scale,
throttled_by_provisioned_write_percent)
throttled_by_consumed_calculated_provisioning = scale_reader(
increase_throttled_by_consumed_writes_scale,
throttled_by_consumed_write_percent)
consumed_calculated_provisioning = scale_reader(
increase_consumed_writes_scale,
consumed_write_units_percent)
throttled_count_calculated_provisioning = 0
calculated_provisioning = 0
# Increase needed due to high throttled to provisioned ratio
if throttled_by_provisioned_calculated_provisioning:
if increase_throttled_by_provisioned_writes_unit == 'percent':
throttled_by_provisioned_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
throttled_by_provisioned_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_by_provisioned_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
throttled_by_provisioned_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high throttled to consumed ratio
if throttled_by_consumed_calculated_provisioning:
if increase_throttled_by_consumed_writes_unit == 'percent':
throttled_by_consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
throttled_by_consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_by_consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
throttled_by_consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high CU consumption
if consumed_calculated_provisioning:
if increase_consumed_writes_unit == 'percent':
consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
elif (writes_upper_threshold
and consumed_write_units_percent > writes_upper_threshold
and not increase_consumed_writes_scale):
if increase_consumed_writes_unit == 'percent':
consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
increase_consumed_writes_with,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
increase_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high throttling
if (throttled_writes_upper_threshold
and throttled_write_count > throttled_writes_upper_threshold):
if increase_writes_unit == 'percent':
throttled_count_calculated_provisioning = \
calculators.increase_writes_in_percent(
updated_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_count_calculated_provisioning = \
calculators.increase_writes_in_units(
updated_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Determine which metric requires the most scaling
if (throttled_by_provisioned_calculated_provisioning
> calculated_provisioning):
calculated_provisioning = \
throttled_by_provisioned_calculated_provisioning
scale_reason = (
"due to throttled events by provisioned "
"units threshold being exceeded")
if (throttled_by_consumed_calculated_provisioning
> calculated_provisioning):
calculated_provisioning = \
throttled_by_consumed_calculated_provisioning
scale_reason = (
"due to throttled events by consumed "
"units threshold being exceeded")
if consumed_calculated_provisioning > calculated_provisioning:
calculated_provisioning = consumed_calculated_provisioning
scale_reason = "due to consumed threshold being exceeded"
if throttled_count_calculated_provisioning > calculated_provisioning:
calculated_provisioning = throttled_count_calculated_provisioning
scale_reason = "due to throttled events threshold being exceeded"
if calculated_provisioning > current_write_units:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: scale up {2}'.format(
table_name, gsi_name, scale_reason))
num_consec_write_checks = 0
update_needed = True
updated_write_units = calculated_provisioning
# Decrease needed due to low CU consumption
if not update_needed:
# If local/granular values not specified use global values
decrease_consumed_writes_unit = \
decrease_consumed_writes_unit or decrease_writes_unit
decrease_consumed_writes_with = \
decrease_consumed_writes_with or decrease_writes_with
# Initialise variables to store calculated provisioning
consumed_calculated_provisioning = scale_reader_decrease(
decrease_consumed_writes_scale,
consumed_write_units_percent)
calculated_provisioning = None
# Exit if down scaling has been disabled
if not get_gsi_option(
table_key, gsi_key, 'enable_writes_down_scaling'):
logger.debug(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling '
'down writes has been disabled in the configuration'.format(
table_name, gsi_name))
# Exit if writes == 0% and downscaling has been disabled at 0%
elif (consumed_write_units_percent == 0 and not get_gsi_option(
table_key, gsi_key, 'allow_scaling_down_writes_on_0_percent')):
logger.info(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling down writes is not done when'
' usage is at 0%'.format(table_name, gsi_name))
else:
if consumed_calculated_provisioning:
if decrease_consumed_writes_unit == 'percent':
calculated_provisioning = \
calculators.decrease_writes_in_percent(
updated_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = \
calculators.decrease_writes_in_units(
updated_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
elif (writes_lower_threshold
and consumed_write_units_percent < writes_lower_threshold
and not decrease_consumed_writes_scale):
if decrease_consumed_writes_unit == 'percent':
calculated_provisioning = \
calculators.decrease_writes_in_percent(
updated_write_units,
decrease_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = \
calculators.decrease_writes_in_units(
updated_write_units,
decrease_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if (calculated_provisioning
and current_write_units != calculated_provisioning):
num_consec_write_checks += 1
if num_consec_write_checks >= \
num_write_checks_before_scale_down:
update_needed = True
updated_write_units = calculated_provisioning
# Never go over the configured max provisioning
if max_provisioned_writes:
if int(updated_write_units) > int(max_provisioned_writes):
update_needed = True
updated_write_units = int(max_provisioned_writes)
logger.info(
'{0} - GSI: {1} - '
'Will not increase writes over gsi-max-provisioned-writes '
'limit ({2} writes)'.format(
table_name,
gsi_name,
updated_write_units))
# Ensure that we have met the min-provisioning
if min_provisioned_writes:
if int(min_provisioned_writes) > int(updated_write_units):
update_needed = True
updated_write_units = int(min_provisioned_writes)
logger.info(
'{0} - GSI: {1} - Increasing writes to '
'meet gsi-min-provisioned-writes '
'limit ({2} writes)'.format(
table_name,
gsi_name,
updated_write_units))
if calculators.is_consumed_over_proposed(
current_write_units,
updated_write_units,
consumed_write_units_percent):
update_needed = False
updated_write_units = current_write_units
logger.info(
'{0} - GSI: {1} - Consumed is over proposed write units. Will leave '
'table at current setting.'.format(table_name, gsi_name))
logger.info('{0} - GSI: {1} - Consecutive write checks {2}/{3}'.format(
table_name,
gsi_name,
num_consec_write_checks,
num_write_checks_before_scale_down))
return update_needed, updated_write_units, num_consec_write_checks | Ensure that provisioning of writes is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_write_units, num_consec_write_checks |
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) | Converts and image to matrix (one pixel per line) |
def _rds_cluster_tags(model, dbs, session_factory, generator, retry):
"""Augment rds clusters with their respective tags."""
client = local_session(session_factory).client('rds')
def process_tags(db):
try:
db['Tags'] = retry(
client.list_tags_for_resource,
ResourceName=generator(db[model.id]))['TagList']
return db
except client.exceptions.DBClusterNotFoundFault:
return None
# Rds maintains a low api call limit, so this can take some time :-(
return list(filter(None, map(process_tags, dbs))) | Augment rds clusters with their respective tags. |
def revoke(self, auth, codetype, code, defer=False):
""" Given an activation code, the associated entity is revoked after which the activation
code can no longer be used.
Args:
auth: Takes the owner's cik
codetype: The type of code to revoke (client | share)
code: Code specified by <codetype> (cik | share-activation-code)
"""
return self._call('revoke', auth, [codetype, code], defer) | Given an activation code, the associated entity is revoked after which the activation
code can no longer be used.
Args:
auth: Takes the owner's cik
codetype: The type of code to revoke (client | share)
code: Code specified by <codetype> (cik | share-activation-code) |
def _get_model_parameters_estimations(self, error_model):
"""
Infer model estimation method from the 'error_model'. Return an object
of type ModelParametersEstimation.
"""
if error_model.dependance == NIDM_INDEPEDENT_ERROR:
if error_model.variance_homo:
estimation_method = STATO_OLS
else:
estimation_method = STATO_WLS
else:
estimation_method = STATO_GLS
mpe = ModelParametersEstimation(estimation_method, self.software.id)
return mpe | Infer model estimation method from the 'error_model'. Return an object
of type ModelParametersEstimation. |
def order_assets(self, asset_ids, composition_id):
"""Reorders a set of assets in a composition.
arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of
``Assets``
arg: composition_id (osid.id.Id): ``Id`` of the
``Composition``
raise: NotFound - ``composition_id`` not found or, an
``asset_id`` not related to ``composition_id``
raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if (not isinstance(composition_id, ABCId) and
composition_id.get_identifier_namespace() != 'repository.Composition'):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
composition_map, collection = self._get_composition_collection(composition_id)
composition_map['assetIds'] = order_ids(asset_ids, composition_map['assetIds'])
collection.save(composition_map) | Reorders a set of assets in a composition.
arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of
``Assets``
arg: composition_id (osid.id.Id): ``Id`` of the
``Composition``
raise: NotFound - ``composition_id`` not found or, an
``asset_id`` not related to ``composition_id``
raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.