code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
r
elem = strel.copy()
x_dim, y_dim, z_dim = im.shape
x_min = x-r
x_max = x+r+1
y_min = y-r
y_max = y+r+1
z_min = z-r
z_max = z+r+1
if x_min < 0:
x_adj = -x_min
elem = elem[x_adj:, :, :]
x_min = 0
elif x_max > x_dim:
x_adj = x_max - x_dim
elem = elem[:-x_adj, :, :]
if y_min < 0:
y_adj = -y_min
elem = elem[:, y_adj:, :]
y_min = 0
elif y_max > y_dim:
y_adj = y_max - y_dim
elem = elem[:, :-y_adj, :]
if z_min < 0:
z_adj = -z_min
elem = elem[:, :, z_adj:]
z_min = 0
elif z_max > z_dim:
z_adj = z_max - z_dim
elem = elem[:, :, :-z_adj]
ex, ey, ez = elem.shape
im[x_min:x_min+ex, y_min:y_min+ey, z_min:z_min+ez] += elem
return im | def _fit_strel_to_im_3d(im, strel, r, x, y, z) | r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit. | 1.487343 | 1.434385 | 1.03692 |
r'''
Fill in the edges of the input image.
Used by RSA to ensure that no elements are placed too close to the edge.
'''
edge = sp.ones_like(im)
if len(im.shape) == 2:
sx, sy = im.shape
edge[r:sx-r, r:sy-r] = im[r:sx-r, r:sy-r]
else:
sx, sy, sz = im.shape
edge[r:sx-r, r:sy-r, r:sz-r] = im[r:sx-r, r:sy-r, r:sz-r]
return edge | def _remove_edge(im, r) | r'''
Fill in the edges of the input image.
Used by RSA to ensure that no elements are placed too close to the edge. | 2.965395 | 1.86811 | 1.587377 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
im = sp.copy(im)
if im.ndim == 2:
im = (sp.swapaxes(im, 1, 0))
im = im[-1::-1, :]
elif im.ndim == 3:
im = (sp.swapaxes(im, 2, 0))
im = im[:, -1::-1, :]
return im | def align_image_with_openpnm(im) | r"""
Rotates an image to agree with the coordinates used in OpenPNM. It is
unclear why they are not in agreement to start with. This is necessary
for overlaying the image and the network in Paraview.
Parameters
----------
im : ND-array
The image to be rotated. Can be the Boolean image of the pore space or
any other image of interest.
Returns
-------
image : ND-array
Returns a copy of ``im`` rotated accordingly. | 3.741484 | 3.899988 | 0.959358 |
r
def erode(im, strel):
t = fftconvolve(im, strel, mode='same') > (strel.sum() - 0.1)
return t
def dilate(im, strel):
t = fftconvolve(im, strel, mode='same') > 0.1
return t
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
# Perform erosion and dilation
# The array must be padded with 0's so it works correctly at edges
temp = sp.pad(array=im, pad_width=1, mode='constant', constant_values=0)
if mode.startswith('ero'):
temp = erode(temp, strel)
if mode.startswith('dila'):
temp = dilate(temp, strel)
# Remove padding from resulting image
if im.ndim == 2:
result = temp[1:-1, 1:-1]
elif im.ndim == 3:
result = temp[1:-1, 1:-1, 1:-1]
# Perform opening and closing
if mode.startswith('open'):
temp = fftmorphology(im=im, strel=strel, mode='erosion')
result = fftmorphology(im=temp, strel=strel, mode='dilation')
if mode.startswith('clos'):
temp = fftmorphology(im=im, strel=strel, mode='dilation')
result = fftmorphology(im=temp, strel=strel, mode='erosion')
return result | def fftmorphology(im, strel, mode='opening') | r"""
Perform morphological operations on binary images using fft approach for
improved performance
Parameters
----------
im : nd-array
The binary image on which to perform the morphological operation
strel : nd-array
The structuring element to use. Must have the same dims as ``im``.
mode : string
The type of operation to perform. Options are 'dilation', 'erosion',
'opening' and 'closing'.
Returns
-------
image : ND-array
A copy of the image with the specified moropholgical operation applied
using the fft-based methods available in scipy.fftconvolve.
Notes
-----
This function uses ``scipy.signal.fftconvolve`` which *can* be more than
10x faster than the standard binary morphology operation in
``scipy.ndimage``. This speed up may not always be realized, depending
on the scipy distribution used.
Examples
--------
>>> import porespy as ps
>>> from numpy import array_equal
>>> import scipy.ndimage as spim
>>> from skimage.morphology import disk
>>> im = ps.generators.blobs(shape=[100, 100], porosity=0.8)
Check that erosion, dilation, opening, and closing are all the same as
the ``scipy.ndimage`` functions:
>>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='erosion')
>>> temp = spim.binary_erosion(im, structure=disk(5))
>>> array_equal(result, temp)
True
>>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='dilation')
>>> temp = spim.binary_dilation(im, structure=disk(5))
>>> array_equal(result, temp)
True
>>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='opening')
>>> temp = spim.binary_opening(im, structure=disk(5))
>>> array_equal(result, temp)
True
>>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='closing')
>>> temp = spim.binary_closing(im, structure=disk(5))
>>> array_equal(result, temp)
True | 2.6736 | 2.681887 | 0.99691 |
r
# Expand scalar divs
if isinstance(divs, int):
divs = [divs for i in range(im.ndim)]
s = shape_split(im.shape, axis=divs)
return s | def subdivide(im, divs=2) | r"""
Returns slices into an image describing the specified number of sub-arrays.
This function is useful for performing operations on smaller images for
memory or speed. Note that for most typical operations this will NOT work,
since the image borders would cause artifacts (e.g. ``distance_transform``)
Parameters
----------
im : ND-array
The image of the porous media
divs : scalar or array_like
The number of sub-divisions to create in each axis of the image. If a
scalar is given it is assumed this value applies in all dimensions.
Returns
-------
slices : 1D-array
A 1-D array containing slice objects for indexing into ``im`` that
extract the sub-divided arrays.
Notes
-----
This method uses the
`array_split package <https://github.com/array-split/array_split>`_ which
offers the same functionality as the ``split`` method of Numpy's ND-array,
but supports the splitting multidimensional arrays in all dimensions.
Examples
--------
>>> import porespy as ps
>>> import matplotlib.pyplot as plt
>>> im = ps.generators.blobs(shape=[200, 200])
>>> s = ps.tools.subdivide(im, divs=[2, 2])
``s`` contains an array with the shape given by ``divs``. To access the
first and last quadrants of ``im`` use:
>>> print(im[tuple(s[0, 0])].shape)
(100, 100)
>>> print(im[tuple(s[1, 1])].shape)
(100, 100)
It can be easier to index the array with the slices by applying ``flatten``
first:
>>> s_flat = s.flatten()
>>> for i in s_flat:
... print(im[i].shape)
(100, 100)
(100, 100)
(100, 100)
(100, 100) | 8.589875 | 12.124369 | 0.70848 |
r
if len(bbox) == 4:
ret = (slice(bbox[0], bbox[2]),
slice(bbox[1], bbox[3]))
else:
ret = (slice(bbox[0], bbox[3]),
slice(bbox[1], bbox[4]),
slice(bbox[2], bbox[5]))
return ret | def bbox_to_slices(bbox) | r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image. | 2.110756 | 2.288972 | 0.922142 |
r
p = sp.ones(shape=im.ndim, dtype=int) * sp.array(pad)
s = sp.ones(shape=im.ndim, dtype=int) * sp.array(size)
slc = []
for dim in range(im.ndim):
lower_im = sp.amax((center[dim] - s[dim] - p[dim], 0))
upper_im = sp.amin((center[dim] + s[dim] + 1 + p[dim], im.shape[dim]))
slc.append(slice(lower_im, upper_im))
return slc | def get_slice(im, center, size, pad=0) | r"""
Given a ``center`` location and ``radius`` of a feature, returns the slice
object into the ``im`` that bounds the feature but does not extend beyond
the image boundaries.
Parameters
----------
im : ND-image
The image of the porous media
center : array_like
The coordinates of the center of the feature of interest
size : array_like or scalar
The size of the feature in each direction. If a scalar is supplied,
this implies the same size in all directions.
pad : scalar or array_like
The amount to pad onto each side of the slice. The default is 0. A
scalar value will increase the slice size equally in all directions,
while an array the same shape as ``im.shape`` can be passed to pad
a specified amount in each direction.
Returns
-------
slices : list
A list of slice objects, each indexing into one dimension of the image. | 2.636213 | 2.90515 | 0.907428 |
r
if r == 0:
dt = spim.distance_transform_edt(input=im)
r = int(sp.amax(dt)) * 2
im_padded = sp.pad(array=im, pad_width=r, mode='constant',
constant_values=True)
dt = spim.distance_transform_edt(input=im_padded)
seeds = (dt >= r) + get_border(shape=im_padded.shape)
# Remove seeds not connected to edges
labels = spim.label(seeds)[0]
mask = labels == 1 # Assume label of 1 on edges, assured by adding border
dt = spim.distance_transform_edt(~mask)
outer_region = dt < r
outer_region = extract_subsection(im=outer_region, shape=im.shape)
return outer_region | def find_outer_region(im, r=0) | r"""
Finds regions of the image that are outside of the solid matrix.
This function uses the rolling ball method to define where the outer region
ends and the void space begins.
This function is particularly useful for samples that do not fill the
entire rectangular image, such as cylindrical cores or samples with non-
parallel faces.
Parameters
----------
im : ND-array
Image of the porous material with 1's for void and 0's for solid
r : scalar
The radius of the rolling ball to use. If not specified then a value
is calculated as twice maximum of the distance transform. The image
size is padded by this amount in all directions, so the image can
become quite large and unwieldy if too large a value is given.
Returns
-------
image : ND-array
A boolean mask the same shape as ``im``, containing True in all voxels
identified as *outside* the sample. | 4.932724 | 4.950382 | 0.996433 |
r
if r is None:
a = list(im.shape)
a.pop(axis)
r = sp.floor(sp.amin(a) / 2)
dim = [range(int(-s / 2), int(s / 2) + s % 2) for s in im.shape]
inds = sp.meshgrid(*dim, indexing='ij')
inds[axis] = inds[axis] * 0
d = sp.sqrt(sp.sum(sp.square(inds), axis=0))
mask = d < r
im_temp = im*mask
return im_temp | def extract_cylinder(im, r=None, axis=0) | r"""
Returns a cylindrical section of the image of specified radius.
This is useful for making square images look like cylindrical cores such
as those obtained from X-ray tomography.
Parameters
----------
im : ND-array
The image of the porous material. Can be any data type.
r : scalr
The radius of the cylinder to extract. If ``None`` is given then the
default is the largest cylinder that can fit inside the specified
plane.
axis : scalar
The axis along with the cylinder will be oriented.
Returns
-------
image : ND-array
A copy of ``im`` with values outside the cylindrical area set to 0 or
``False``. | 3.77168 | 3.89712 | 0.967812 |
r
# Check if shape was given as a fraction
shape = sp.array(shape)
if shape[0] < 1:
shape = sp.array(im.shape) * shape
center = sp.array(im.shape) / 2
s_im = []
for dim in range(im.ndim):
r = shape[dim] / 2
lower_im = sp.amax((center[dim] - r, 0))
upper_im = sp.amin((center[dim] + r, im.shape[dim]))
s_im.append(slice(int(lower_im), int(upper_im)))
return im[tuple(s_im)] | def extract_subsection(im, shape) | r"""
Extracts the middle section of a image
Parameters
----------
im : ND-array
Image from which to extract the subsection
shape : array_like
Can either specify the size of the extracted section or the fractional
size of the image to extact.
Returns
-------
image : ND-array
An ND-array of size given by the ``shape`` argument, taken from the
center of the image.
Examples
--------
>>> import scipy as sp
>>> from porespy.tools import extract_subsection
>>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]])
>>> print(im)
[[1 1 1 1]
[1 2 2 2]
[1 2 3 3]
[1 2 3 4]]
>>> im = extract_subsection(im=im, shape=[2, 2])
>>> print(im)
[[2 2]
[2 3]] | 2.924827 | 3.075852 | 0.9509 |
r
x, y, z = (sp.array(im.shape) / 2).astype(int)
planes = [im[x, :, :], im[:, y, :], im[:, :, z]]
if not squeeze:
imx = planes[0]
planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]])
imy = planes[1]
planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]])
imz = planes[2]
planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1])
return planes | def get_planes(im, squeeze=True) | r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images | 1.892924 | 2.029765 | 0.932583 |
r
pad = int(pad)
a = []
for i, dim in zip(s, shape):
start = 0
stop = dim
if i.start - pad >= 0:
start = i.start - pad
if i.stop + pad < dim:
stop = i.stop + pad
a.append(slice(start, stop, None))
return tuple(a) | def extend_slice(s, shape, pad=1) | r"""
Adjust slice indices to include additional voxles around the slice.
This function does bounds checking to ensure the indices don't extend
outside the image.
Parameters
----------
s : list of slice objects
A list (or tuple) of N slice objects, where N is the number of
dimensions in the image.
shape : array_like
The shape of the image into which the slice objects apply. This is
used to check the bounds to prevent indexing beyond the image.
pad : int
The number of voxels to expand in each direction.
Returns
-------
slices : list of slice objects
A list slice of objects with the start and stop attributes respectively
incremented and decremented by 1, without extending beyond the image
boundaries.
Examples
--------
>>> from scipy.ndimage import label, find_objects
>>> from porespy.tools import extend_slice
>>> im = sp.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])
>>> labels = label(im)[0]
>>> s = find_objects(labels)
Using the slices returned by ``find_objects``, set the first label to 3
>>> labels[s[0]] = 3
>>> print(labels)
[[3 0 0]
[3 0 0]
[0 0 2]]
Next extend the slice, and use it to set the values to 4
>>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)
>>> labels[s_ext] = 4
>>> print(labels)
[[4 4 0]
[4 4 0]
[4 4 2]]
As can be seen by the location of the 4s, the slice was extended by 1, and
also handled the extension beyond the boundary correctly. | 2.694649 | 3.490949 | 0.771896 |
r
im = sp.copy(im)
if keep_zeros:
mask = (im == 0)
im[mask] = im.min() - 1
im = im - im.min()
im_flat = im.flatten()
im_vals = sp.unique(im_flat)
im_map = sp.zeros(shape=sp.amax(im_flat) + 1)
im_map[im_vals] = sp.arange(0, sp.size(sp.unique(im_flat)))
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
im_new = sp.array(im_new, dtype=im_flat.dtype)
return im_new | def make_contiguous(im, keep_zeros=True) | r"""
Take an image with arbitrary greyscale values and adjust them to ensure
all values fall in a contiguous range starting at 0.
This function will handle negative numbers such that most negative number
will become 0, *unless* ``keep_zeros`` is ``True`` in which case it will
become 1, and all 0's in the original image remain 0.
Parameters
----------
im : array_like
An ND array containing greyscale values
keep_zeros : Boolean
If ``True`` (default) then 0 values remain 0, regardless of how the
other numbers are adjusted. This is mostly relevant when the array
contains negative numbers, and means that -1 will become +1, while
0 values remain 0.
Returns
-------
image : ND-array
An ND-array the same size as ``im`` but with all values in contiguous
orders.
Example
-------
>>> import porespy as ps
>>> import scipy as sp
>>> im = sp.array([[0, 2, 9], [6, 8, 3]])
>>> im = ps.tools.make_contiguous(im)
>>> print(im)
[[0 1 5]
[3 4 2]] | 2.493434 | 2.925436 | 0.852329 |
r
ndims = len(shape)
t = thickness
border = sp.ones(shape, dtype=bool)
if mode == 'faces':
if ndims == 2:
border[t:-t, t:-t] = False
if ndims == 3:
border[t:-t, t:-t, t:-t] = False
elif mode == 'edges':
if ndims == 2:
border[t:-t, t:-t] = False
if ndims == 3:
border[0::, t:-t, t:-t] = False
border[t:-t, 0::, t:-t] = False
border[t:-t, t:-t, 0::] = False
elif mode == 'corners':
if ndims == 2:
border[t:-t, 0::] = False
border[0::, t:-t] = False
if ndims == 3:
border[t:-t, 0::, 0::] = False
border[0::, t:-t, 0::] = False
border[0::, 0::, t:-t] = False
if return_indices:
border = sp.where(border)
return border | def get_border(shape, thickness=1, mode='edges', return_indices=False) | r"""
Creates an array of specified size with corners, edges or faces labelled as
True. This can be used as mask to manipulate values laying on the
perimeter of an image.
Parameters
----------
shape : array_like
The shape of the array to return. Can be either 2D or 3D.
thickness : scalar (default is 1)
The number of pixels/voxels to place along perimeter.
mode : string
The type of border to create. Options are 'faces', 'edges' (default)
and 'corners'. In 2D 'faces' and 'edges' give the same result.
return_indices : boolean
If ``False`` (default) an image is returned with the border voxels set
to ``True``. If ``True``, then a tuple with the x, y, z (if ``im`` is
3D) indices is returned. This tuple can be used directly to index into
the image, such as ``im[tup] = 2``.
Returns
-------
image : ND-array
An ND-array of specified shape with ``True`` values at the perimeter
and ``False`` elsewhere
Notes
-----
TODO: This function uses brute force to create an image then fill the
edges using location-based logic, and if the user requests
``return_indices`` it finds them using ``np.where``. Since these arrays
are cubic it should be possible to use more elegant and efficient
index-based logic to find the indices, then use them to fill an empty
image with ``True`` using these indices.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> mask = ps.tools.get_border(shape=[3, 3], mode='corners')
>>> print(mask)
[[ True False True]
[False False False]
[ True False True]]
>>> mask = ps.tools.get_border(shape=[3, 3], mode='edges')
>>> print(mask)
[[ True True True]
[ True False True]
[ True True True]] | 1.625586 | 1.742179 | 0.933077 |
from scipy.spatial import Delaunay, ConvexHull
if isinstance(hull, ConvexHull):
hull = hull.points
hull = Delaunay(hull)
return hull.find_simplex(points) >= 0 | def in_hull(points, hull) | Test if a list of coordinates are inside a given convex hull
Parameters
----------
points : array_like (N x ndims)
The spatial coordinates of the points to check
hull : scipy.spatial.ConvexHull object **OR** array_like
Can be either a convex hull object as returned by
``scipy.spatial.ConvexHull`` or simply the coordinates of the points
that define the convex hull.
Returns
-------
result : 1D-array
A 1D-array Boolean array of length *N* indicating whether or not the
given points in ``points`` lies within the provided ``hull``. | 2.066545 | 2.648057 | 0.7804 |
r
if scale is None:
scale = [im.min(), im.max()]
im = (im - sp.mean(im)) / sp.std(im)
im = 1 / 2 * sp.special.erfc(-im / sp.sqrt(2))
im = (im - im.min()) / (im.max() - im.min())
im = im * (scale[1] - scale[0]) + scale[0]
return im | def norm_to_uniform(im, scale=None) | r"""
Take an image with normally distributed greyscale values and converts it to
a uniform (i.e. flat) distribution. It's also possible to specify the
lower and upper limits of the uniform distribution.
Parameters
----------
im : ND-image
The image containing the normally distributed scalar field
scale : [low, high]
A list or array indicating the lower and upper bounds for the new
randomly distributed data. The default is ``None``, which uses the
``max`` and ``min`` of the original image as the the lower and upper
bounds, but another common option might be [0, 1].
Returns
-------
image : ND-array
A copy of ``im`` with uniformly distributed greyscale values spanning
the specified range, if given. | 2.651742 | 2.949622 | 0.899011 |
r
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-'*colwidth[0] + '+' + '-'*colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0]-2) + 's} {2:1s} {3:' \
+ str(colwidth[1]-2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s | def functions_to_table(mod, colwidth=[27, 48]) | r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths. | 2.535796 | 2.507451 | 1.011304 |
r
im = region
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if strel is None:
if region.ndim == 3:
strel = ball(1)
if region.ndim == 2:
strel = disk(1)
pad_width = sp.amax(strel.shape)
if im.ndim == 3:
padded_mask = sp.pad(im, pad_width=pad_width, mode='constant')
padded_mask = spim.convolve(padded_mask * 1.0,
weights=strel) / sp.sum(strel)
else:
padded_mask = sp.reshape(im, (1,) + im.shape)
padded_mask = sp.pad(padded_mask, pad_width=pad_width, mode='constant')
verts, faces, norm, val = marching_cubes_lewiner(padded_mask)
result = namedtuple('mesh', ('verts', 'faces', 'norm', 'val'))
result.verts = verts - pad_width
result.faces = faces
result.norm = norm
result.val = val
return result | def mesh_region(region: bool, strel=None) | r"""
Creates a tri-mesh of the provided region using the marching cubes
algorithm
Parameters
----------
im : ND-array
A boolean image with ``True`` values indicating the region of interest
strel : ND-array
The structuring element to use when blurring the region. The blur is
perfomed using a simple convolution filter. The point is to create a
greyscale region to allow the marching cubes algorithm some freedom
to conform the mesh to the surface. As the size of ``strel`` increases
the region will become increasingly blurred and inaccurate. The default
is a spherical element with a radius of 1.
Returns
-------
mesh : tuple
A named-tuple containing ``faces``, ``verts``, ``norm``, and ``val``
as returned by ``scikit-image.measure.marching_cubes`` function. | 3.207792 | 2.988902 | 1.073234 |
r
rad = int(sp.ceil(radius))
other = sp.ones((2 * rad + 1, 2 * rad + 1), dtype=bool)
other[rad, rad] = False
disk = spim.distance_transform_edt(other) < radius
return disk | def ps_disk(radius) | r"""
Creates circular disk structuring element for morphological operations
Parameters
----------
radius : float or int
The desired radius of the structuring element
Returns
-------
strel : 2D-array
A 2D numpy bool array of the structring element | 4.946869 | 5.50753 | 0.898201 |
r
rad = int(sp.ceil(radius))
other = sp.ones((2 * rad + 1, 2 * rad + 1, 2 * rad + 1), dtype=bool)
other[rad, rad, rad] = False
ball = spim.distance_transform_edt(other) < radius
return ball | def ps_ball(radius) | r"""
Creates spherical ball structuring element for morphological operations
Parameters
----------
radius : float or int
The desired radius of the structuring element
Returns
-------
strel : 3D-array
A 3D numpy array of the structuring element | 4.341214 | 4.47913 | 0.969209 |
r
shape = im2.shape
for ni in shape:
if ni % 2 == 0:
raise Exception("Structuring element must be odd-voxeled...")
nx, ny, nz = [(ni - 1) // 2 for ni in shape]
cx, cy, cz = c
im1[cx-nx:cx+nx+1, cy-ny:cy+ny+1, cz-nz:cz+nz+1] += im2
return im1 | def overlay(im1, im2, c) | r"""
Overlays ``im2`` onto ``im1``, given voxel coords of center of ``im2``
in ``im1``.
Parameters
----------
im1 : ND-array
Original voxelated image
im2 : ND-array
Template voxelated image
c : array_like
[x, y, z] coordinates in ``im1`` where ``im2`` will be centered
Returns
-------
image : ND-array
A modified version of ``im1``, with ``im2`` overlaid at the specified
location | 4.500708 | 4.6383 | 0.970336 |
r
c = sp.array(c, dtype=int)
if c.size != im.ndim:
raise Exception('Coordinates do not match dimensionality of image')
bbox = []
[bbox.append(sp.clip(c[i] - r, 0, im.shape[i])) for i in range(im.ndim)]
[bbox.append(sp.clip(c[i] + r, 0, im.shape[i])) for i in range(im.ndim)]
bbox = sp.ravel(bbox)
s = bbox_to_slices(bbox)
temp = im[s]
blank = sp.ones_like(temp)
blank[tuple(c - bbox[0:im.ndim])] = 0
blank = spim.distance_transform_edt(blank) < r
im[s] = blank
return im | def insert_sphere(im, c, r) | r"""
Inserts a sphere of a specified radius into a given image
Parameters
----------
im : array_like
Image into which the sphere should be inserted
c : array_like
The [x, y, z] coordinate indicating the center of the sphere
r : int
The radius of sphere to insert
Returns
-------
image : ND-array
The original image with a sphere inerted at the specified location | 3.307784 | 3.449698 | 0.958862 |
r
if im.ndim != 3:
raise Exception('This function is only implemented for 3D images')
# Converting coordinates to numpy array
xyz0, xyz1 = [sp.array(xyz).astype(int) for xyz in (xyz0, xyz1)]
r = int(r)
L = sp.absolute(xyz0 - xyz1).max() + 1
xyz_line = [sp.linspace(xyz0[i], xyz1[i], L).astype(int) for i in range(3)]
xyz_min = sp.amin(xyz_line, axis=1) - r
xyz_max = sp.amax(xyz_line, axis=1) + r
shape_template = xyz_max - xyz_min + 1
template = sp.zeros(shape=shape_template)
# Shortcut for orthogonal cylinders
if (xyz0 == xyz1).sum() == 2:
unique_dim = [xyz0[i] != xyz1[i] for i in range(3)].index(True)
shape_template[unique_dim] = 1
template_2D = disk(radius=r).reshape(shape_template)
template = sp.repeat(template_2D, repeats=L, axis=unique_dim)
xyz_min[unique_dim] += r
xyz_max[unique_dim] += -r
else:
xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)]
template[tuple(xyz_line_in_template_coords)] = 1
template = spim.distance_transform_edt(template == 0) <= r
im[xyz_min[0]:xyz_max[0]+1,
xyz_min[1]:xyz_max[1]+1,
xyz_min[2]:xyz_max[2]+1] += template
return im | def insert_cylinder(im, xyz0, xyz1, r) | r"""
Inserts a cylinder of given radius onto a given image
Parameters
----------
im : array_like
Original voxelated image
xyz0, xyz1 : 3-by-1 array_like
Voxel coordinates of the two end points of the cylinder
r : int
Radius of the cylinder
Returns
-------
im : ND-array
Original voxelated image overlayed with the cylinder
Notes
-----
This function is only implemented for 3D images | 2.687209 | 2.617791 | 1.026518 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
f = faces
if f is not None:
if im.ndim == 2:
faces = [(int('left' in f) * 3, int('right' in f) * 3),
(int(('front') in f) * 3 or int(('bottom') in f) * 3,
int(('back') in f) * 3 or int(('top') in f) * 3)]
if im.ndim == 3:
faces = [(int('left' in f) * 3, int('right' in f) * 3),
(int('front' in f) * 3, int('back' in f) * 3),
(int('top' in f) * 3, int('bottom' in f) * 3)]
im = sp.pad(im, pad_width=faces, mode='edge')
else:
im = im
return im | def pad_faces(im, faces) | r"""
Pads the input image at specified faces. This shape of image is
same as the output image of add_boundary_regions function.
Parameters
----------
im : ND_array
The image that needs to be padded
faces : list of strings
Labels indicating where image needs to be padded. Given a 3D image
of shape ``[x, y, z] = [i, j, k]``, the following conventions are used
to indicate along which axis the padding should be applied:
* 'left' -> ``x = 0``
* 'right' -> ``x = i``
* 'front' -> ``y = 0``
* 'back' -> ``y = j``
* 'bottom' -> ``z = 0``
* 'top' -> ``z = k``
Returns
-------
A image padded at specified face(s)
See also
--------
add_boundary_regions | 2.875158 | 2.992656 | 0.960738 |
r
# -------------------------------------------------------------------------
# Get alias if provided by user
phases_num = sp.unique(im * 1)
phases_num = sp.trim_zeros(phases_num)
al = {}
for values in phases_num:
al[values] = 'phase{}'.format(values)
if alias is not None:
alias_sort = dict(sorted(alias.items()))
phase_labels = sp.array([*alias_sort])
al = alias
if set(phase_labels) != set(phases_num):
raise Exception('Alias labels does not match with image labels '
'please provide correct image labels')
return al | def _create_alias_map(im, alias=None) | r"""
Creates an alias mapping between phases in original image and identifyable
names. This mapping is used during network extraction to label
interconnection between and properties of each phase.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer. Phase integer should start from 1. Boolean image will extract
only one network labeled with True's only.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phase.
For example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties.
If ``None`` then default labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
A dictionary with numerical phase labels as key, and readable phase names
as valuies. If no alias is provided then default labelling is used
i.e {1: 'Phase1',..} | 6.830304 | 5.79519 | 1.178616 |
if hosts_entry and isinstance(hosts_entry, str):
entry = hosts_entry.strip()
if not entry or not entry[0] or entry[0] == "\n":
return 'blank'
if entry[0] == "#":
return 'comment'
entry_chunks = entry.split()
if is_ipv6(entry_chunks[0]):
return 'ipv6'
if is_ipv4(entry_chunks[0]):
return 'ipv4' | def get_entry_type(hosts_entry=None) | Return the type of entry for the line of hosts file passed
:param hosts_entry: A line from the hosts file
:return: 'comment' | 'blank' | 'ipv4' | 'ipv6' | 2.515836 | 2.392822 | 1.05141 |
line_parts = entry.strip().split()
if is_ipv4(line_parts[0]) and valid_hostnames(line_parts[1:]):
return HostsEntry(entry_type='ipv4',
address=line_parts[0],
names=line_parts[1:])
elif is_ipv6(line_parts[0]) and valid_hostnames(line_parts[1:]):
return HostsEntry(entry_type='ipv6',
address=line_parts[0],
names=line_parts[1:])
else:
return False | def str_to_hostentry(entry) | Transform a line from a hosts file into an instance of HostsEntry
:param entry: A line from the hosts file
:return: An instance of HostsEntry | 2.210713 | 2.097427 | 1.054012 |
if not platform:
platform = sys.platform
if platform.startswith('win'):
result = r"c:\windows\system32\drivers\etc\hosts"
return result
else:
return '/etc/hosts' | def determine_hosts_path(platform=None) | Return the hosts file path based on the supplied
or detected platform.
:param platform: a string used to identify the platform
:return: detected filesystem path of the hosts file | 3.043882 | 3.29111 | 0.92488 |
written_count = 0
comments_written = 0
blanks_written = 0
ipv4_entries_written = 0
ipv6_entries_written = 0
if path:
output_file_path = path
else:
output_file_path = self.hosts_path
try:
with open(output_file_path, 'w') as hosts_file:
for written_count, line in enumerate(self.entries):
if line.entry_type == 'comment':
hosts_file.write(line.comment + "\n")
comments_written += 1
if line.entry_type == 'blank':
hosts_file.write("\n")
blanks_written += 1
if line.entry_type == 'ipv4':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names),
)
)
ipv4_entries_written += 1
if line.entry_type == 'ipv6':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names), ))
ipv6_entries_written += 1
except:
raise UnableToWriteHosts()
return {'total_written': written_count + 1,
'comments_written': comments_written,
'blanks_written': blanks_written,
'ipv4_entries_written': ipv4_entries_written,
'ipv6_entries_written': ipv6_entries_written} | def write(self, path=None) | Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts | 1.977646 | 1.87665 | 1.053817 |
for entry in self.entries:
if entry.entry_type in ('ipv4', 'ipv6'):
if address and address == entry.address:
return True
if names:
for name in names:
if name in entry.names:
return True
elif entry.entry_type == 'comment' and entry.comment == comment:
return True
return False | def exists(self, address=None, names=None, comment=None) | Determine if the supplied address and/or names, or comment, exists in a HostsEntry within Hosts
:param address: An ipv4 or ipv6 address to search for
:param names: A list of names to search for
:param comment: A comment to search for
:return: True if a supplied address, name, or comment is found. Otherwise, False. | 2.470503 | 2.446151 | 1.009955 |
if self.entries:
if address and name:
func = lambda entry: not entry.is_real_entry() or (entry.address != address and name not in entry.names)
elif address:
func = lambda entry: not entry.is_real_entry() or entry.address != address
elif name:
func = lambda entry: not entry.is_real_entry() or name not in entry.names
else:
raise ValueError('No address or name was specified for removal.')
self.entries = list(filter(func, self.entries)) | def remove_all_matching(self, address=None, name=None) | Remove all HostsEntry instances from the Hosts object
where the supplied ip address or name matches
:param address: An ipv4 or ipv6 address
:param name: A host name
:return: None | 2.484693 | 2.554989 | 0.972487 |
file_contents = self.get_hosts_by_url(url=url).decode('utf-8')
file_contents = file_contents.rstrip().replace('^M', '\n')
file_contents = file_contents.rstrip().replace('\r\n', '\n')
lines = file_contents.split('\n')
skipped = 0
import_entries = []
for line in lines:
stripped_entry = line.strip()
if (not stripped_entry) or (stripped_entry.startswith('#')):
skipped += 1
else:
line = line.partition('#')[0]
line = line.rstrip()
import_entry = HostsEntry.str_to_hostentry(line)
if import_entry:
import_entries.append(import_entry)
add_result = self.add(entries=import_entries, force=force)
write_result = self.write()
return {'result': 'success',
'skipped': skipped,
'add_result': add_result,
'write_result': write_result} | def import_url(self, url=None, force=None) | Read a list of host entries from a URL, convert them into instances of HostsEntry and
then append to the list of entries in Hosts
:param url: The URL of where to download a hosts file
:return: Counts reflecting the attempted additions | 2.792755 | 2.548225 | 1.095961 |
skipped = 0
invalid_count = 0
if is_readable(import_file_path):
import_entries = []
with open(import_file_path, 'r') as infile:
for line in infile:
stripped_entry = line.strip()
if (not stripped_entry) or (stripped_entry.startswith('#')):
skipped += 1
else:
line = line.partition('#')[0]
line = line.rstrip()
import_entry = HostsEntry.str_to_hostentry(line)
if import_entry:
import_entries.append(import_entry)
else:
invalid_count += 1
add_result = self.add(entries=import_entries)
write_result = self.write()
return {'result': 'success',
'skipped': skipped,
'invalid_count': invalid_count,
'add_result': add_result,
'write_result': write_result}
else:
return {'result': 'failed',
'message': 'Cannot read: file {0}.'.format(import_file_path)} | def import_file(self, import_file_path=None) | Read a list of host entries from a file, convert them into instances
of HostsEntry and then append to the list of entries in Hosts
:param import_file_path: The path to the file containing the host entries
:return: Counts reflecting the attempted additions | 2.621667 | 2.405975 | 1.089648 |
ipv4_count = 0
ipv6_count = 0
comment_count = 0
invalid_count = 0
duplicate_count = 0
replaced_count = 0
import_entries = []
existing_addresses = [x.address for x in self.entries if x.address]
existing_names = []
for item in self.entries:
if item.names:
existing_names.extend(item.names)
existing_names = dedupe_list(existing_names)
for entry in entries:
if entry.entry_type == 'comment':
entry.comment = entry.comment.strip()
if entry.comment[0] != "#":
entry.comment = "# " + entry.comment
import_entries.append(entry)
elif entry.address in ('0.0.0.0', '127.0.0.1') or allow_address_duplication:
# Allow duplicates entries for addresses used for adblocking
if set(entry.names).intersection(existing_names):
if force:
for name in entry.names:
self.remove_all_matching(name=name)
import_entries.append(entry)
else:
duplicate_count += 1
else:
import_entries.append(entry)
elif entry.address in existing_addresses:
if not force:
duplicate_count += 1
elif force:
self.remove_all_matching(address=entry.address)
replaced_count += 1
import_entries.append(entry)
elif set(entry.names).intersection(existing_names):
if not force:
duplicate_count += 1
else:
for name in entry.names:
self.remove_all_matching(name=name)
replaced_count += 1
import_entries.append(entry)
else:
import_entries.append(entry)
for item in import_entries:
if item.entry_type == 'comment':
comment_count += 1
self.entries.append(item)
elif item.entry_type == 'ipv4':
ipv4_count += 1
self.entries.append(item)
elif item.entry_type == 'ipv6':
ipv6_count += 1
self.entries.append(item)
return {'comment_count': comment_count,
'ipv4_count': ipv4_count,
'ipv6_count': ipv6_count,
'invalid_count': invalid_count,
'duplicate_count': duplicate_count,
'replaced_count': replaced_count} | def add(self, entries=None, force=False, allow_address_duplication=False) | Add instances of HostsEntry to the instance of Hosts.
:param entries: A list of instances of HostsEntry
:param force: Remove matching before adding
:param allow_address_duplication: Allow using multiple entries for same address
:return: The counts of successes and failures | 1.962433 | 1.936411 | 1.013438 |
try:
with open(self.hosts_path, 'r') as hosts_file:
hosts_entries = [line for line in hosts_file]
for hosts_entry in hosts_entries:
entry_type = HostsEntry.get_entry_type(hosts_entry)
if entry_type == "comment":
hosts_entry = hosts_entry.replace("\r", "")
hosts_entry = hosts_entry.replace("\n", "")
self.entries.append(HostsEntry(entry_type="comment",
comment=hosts_entry))
elif entry_type == "blank":
self.entries.append(HostsEntry(entry_type="blank"))
elif entry_type in ("ipv4", "ipv6"):
chunked_entry = hosts_entry.split()
stripped_name_list = [name.strip() for name in chunked_entry[1:]]
self.entries.append(
HostsEntry(
entry_type=entry_type,
address=chunked_entry[0].strip(),
names=stripped_name_list))
except IOError:
return {'result': 'failed',
'message': 'Cannot read: {0}.'.format(self.hosts_path)} | def populate_entries(self) | Called by the initialiser of Hosts. This reads the entries from the local hosts file,
converts them into instances of HostsEntry and adds them to the Hosts list of entries.
:return: None | 2.457922 | 2.38126 | 1.032194 |
try:
if socket.inet_pton(socket.AF_INET6, entry):
return True
except socket.error:
return False | def is_ipv6(entry) | Check if the string provided is a valid ipv6 address
:param entry: A string representation of an IP address
:return: True if valid, False if invalid | 2.353022 | 2.509048 | 0.937815 |
for entry in hostname_list:
if len(entry) > 255:
return False
allowed = re.compile('(?!-)[A-Z\d-]{1,63}(?<!-)$', re.IGNORECASE)
if not all(allowed.match(x) for x in entry.split(".")):
return False
return True | def valid_hostnames(hostname_list) | Check if the supplied list of strings are valid hostnames
:param hostname_list: A list of strings
:return: True if the strings are valid hostnames, False if not | 1.767279 | 1.830593 | 0.965413 |
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
return False | def is_readable(path=None) | Test if the supplied filesystem path can be read
:param path: A filesystem path
:return: True if the path is a file that can be read. Otherwise, False | 2.511409 | 3.563263 | 0.704806 |
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))] | def dedupe_list(seq) | Utility function to remove duplicates from a list
:param seq: The sequence (list) to deduplicate
:return: A list with original duplicates removed | 2.209254 | 3.035071 | 0.727909 |
sha = hashlib.sha256()
with open(filepath, 'rb') as fp:
while 1:
data = fp.read(blocksize)
if data:
sha.update(data)
else:
break
return sha | def _filehash(filepath, blocksize=4096) | Return the hash object for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file | 1.916465 | 2.215985 | 0.864837 |
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f)
return data | def compute_diff(dir_base, dir_cmp) | Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs' | 2.112083 | 2.010282 | 1.05064 |
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = ()
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = (archive_path)
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname=self.file)
return _return | def compress_to(self, archive_path=None) | Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created | 2.716097 | 2.706393 | 1.003586 |
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest() | def hash(self, index_func=os.path.getmtime) | Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash) | 5.371937 | 4.849203 | 1.107798 |
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f)) | def iterfiles(self, pattern=None, abspath=False) | Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern | 2.484056 | 3.100078 | 0.801288 |
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse) | def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False) | Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths. | 3.089569 | 4.168935 | 0.741093 |
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d)) | def itersubdirs(self, pattern=None, abspath=False) | Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern | 2.564496 | 3.107946 | 0.825142 |
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse) | def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False) | Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths. | 3.524673 | 4.428846 | 0.795844 |
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size | def size(self) | Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes. | 4.25145 | 4.220238 | 1.007396 |
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False | def is_excluded(self, path) | Return True if `path' should be excluded
given patterns in the `exclude_file'. | 5.189888 | 4.954308 | 1.047551 |
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects | def find_projects(self, file_identifier=".project") | Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it. | 2.589478 | 3.109457 | 0.832775 |
return os.path.relpath(path, start=self.path) | def relpath(self, path) | Return a relative filepath to path from Dir path. | 4.174272 | 3.20276 | 1.303336 |
data = {}
data['directory'] = self._dir.path
data['files'] = list(self._dir.files())
data['subdirs'] = list(self._dir.subdirs())
data['index'] = self.index()
return data | def compute_state(self) | Generate the index. | 4.082477 | 3.633605 | 1.123533 |
exitcode = 0
running = []
progress = {}
def progress_cb(report):
pid, count, success, *_, stats = report
print('\x1b[%sA' % (1+len(running)))
if pid not in progress:
running.append(pid)
progress[pid] = count, success
for pid in running:
count, success = progress[pid]
if success is None:
if count == 0:
print('\x1b[2K%s: \x1b[31m%s\x1b[0m' % (pid, 'WAITING FOR OUTPUT'))
else:
print('\x1b[2K%s: \x1b[33mRUNNING, processed %s rows\x1b[0m' % (pid, count))
else:
if success:
print('\x1b[2K%s: \x1b[32mSUCCESS, processed %s rows\x1b[0m' % (pid, count))
else:
print('\x1b[2K%s: \x1b[31mFAILURE, processed %s rows\x1b[0m' % (pid, count))
results = run_pipelines(pipeline_id, '.', use_cache,
dirty, force, concurrency,
verbose, progress_cb if not verbose else None,
slave)
if not slave:
logging.info('RESULTS:')
errd = False
for result in results:
stats = user_facing_stats(result.stats)
errd = errd or result.errors or not result.success
logging.info('%s: %s %s%s',
'SUCCESS' if result.success else 'FAILURE',
result.pipeline_id,
repr(stats) if stats is not None else '',
(
'\nERROR log from processor %s:\n+--------\n| ' % result.errors[0] +
'\n| '.join(result.errors[1:]) +
'\n+--------'
) if result.errors else '')
else:
result_obj = []
errd = False
for result in results:
errd = errd or result.errors or not result.success
stats = user_facing_stats(result.stats)
result_obj.append(dict(
success=result.success,
pipeline_id=result.pipeline_id,
stats=result.stats,
errors=result.errors
))
json.dump(result_obj, sys.stderr)
if errd:
exitcode = 1
exit(exitcode) | def run(pipeline_id, verbose, use_cache, dirty, force, concurrency, slave) | Run a pipeline by pipeline-id.
pipeline-id supports '%' wildcard for any-suffix matching,
'all' for running all pipelines and
comma-delimited list of pipeline ids | 2.848778 | 2.896526 | 0.983515 |
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper | def basic_auth_required(view_func) | A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars. | 1.935249 | 1.964154 | 0.985284 |
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color) | def badge(pipeline_id) | An individual pipeline status | 3.664458 | 3.65842 | 1.00165 |
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color) | def badge_collection(pipeline_path) | Status badge for a collection of pipelines. | 2.67312 | 2.649379 | 1.008961 |
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency,
thread_name_prefix='T') as executor:
try:
results = []
pending_futures = set()
done_futures = set()
finished_futures = []
progress_thread = None
progress_queue = None
status_manager = status_mgr(root_dir)
if progress_cb is not None:
progress_queue = Queue()
progress_thread = threading.Thread(target=progress_report_handler, args=(progress_cb, progress_queue))
progress_thread.start()
all_specs = specs_to_execute(pipeline_id_pattern, root_dir, status_manager, force, dirty, results)
while True:
done = None
if len(done_futures) > 0:
done = done_futures.pop()
finished_futures.append(done)
done = done.result()[0]
try:
spec = all_specs.send(done)
except StopIteration:
spec = None
if spec is None:
# Wait for all runners to idle...
if len(done_futures) == 0:
if len(pending_futures) > 0:
done_futures, pending_futures = \
concurrent.futures.wait(pending_futures,
return_when=concurrent.futures.FIRST_COMPLETED)
continue
else:
break
else:
continue
if len(spec.validation_errors) > 0:
results.append(
ExecutionResult(spec.pipeline_id,
False,
{},
['init'] + list(map(str, spec.validation_errors)))
)
continue
if slave:
ps = status_manager.get(spec.pipeline_id)
ps.init(spec.pipeline_details,
spec.source_details,
spec.validation_errors,
spec.cache_hash)
eid = gen_execution_id()
if ps.queue_execution(eid, 'manual'):
success, stats, errors = \
execute_pipeline(spec, eid,
use_cache=use_cache)
results.append(ExecutionResult(
spec.pipeline_id,
success,
stats,
errors
))
else:
results.append(
ExecutionResult(spec.pipeline_id,
False,
None,
['Already Running'])
)
else:
f = executor.submit(remote_execute_pipeline,
spec,
root_dir,
use_cache,
verbose_logs,
progress_queue)
pending_futures.add(f)
for f in finished_futures:
ret = f.result()
results.append(ExecutionResult(*ret))
except KeyboardInterrupt:
pass
finally:
if slave:
finalize()
if progress_thread is not None:
progress_queue.put(None)
progress_thread.join()
return results | def run_pipelines(pipeline_id_pattern,
root_dir,
use_cache=True,
dirty=False,
force=False,
concurrency=1,
verbose_logs=True,
progress_cb=None,
slave=False) | Run a pipeline by pipeline-id.
pipeline-id supports the '%' wildcard for any-suffix matching.
Use 'all' or '%' for running all pipelines | 2.964543 | 3.012438 | 0.984101 |
if key in self.map:
return
# compute the right index
size = len(self.items)
if index < 0:
index = size + index if size + index > 0 else 0
else:
index = index if index < size else size
# insert the value
self.items.insert(index, key)
for k, v in self.map.items():
if v >= index:
self.map[k] = v + 1
self.map[key] = index | def insert(self, index, key) | Adds an element at a dedicated position in an OrderedSet.
This implementation is meant for the OrderedSet from the ordered_set
package only. | 2.802553 | 2.773077 | 1.010629 |
if not self.items:
raise KeyError('Set is empty')
def remove_index(i):
elem = self.items[i]
del self.items[i]
del self.map[elem]
return elem
if index is None:
elem = remove_index(-1)
else:
size = len(self.items)
if index < 0:
index = size + index
if index < 0:
raise IndexError('assignement index out of range')
elif index >= size:
raise IndexError('assignement index out of range')
elem = remove_index(index)
for k, v in self.map.items():
if v >= index and v > 0:
self.map[k] = v - 1
return elem | def pop(self, index=None) | Removes an element at the tail of the OrderedSet or at a dedicated
position.
This implementation is meant for the OrderedSet from the ordered_set
package only. | 2.756614 | 2.579231 | 1.068774 |
if isinstance(uri, str):
uri = URI(uri)
try:
resource = self.resource_factory[uri.extension](uri)
except KeyError:
resource = self.resource_factory['*'](uri)
self.resources[uri.normalize()] = resource
resource.resource_set = self
resource.decoders.insert(0, self)
return resource | def create_resource(self, uri) | Creates a new Resource.
The created ressource type depends on the used URI.
:param uri: the resource URI
:type uri: URI
:return: a new Resource
:rtype: Resource
.. seealso:: URI, Resource, XMIResource | 3.841715 | 4.096101 | 0.937896 |
superclass = cls.__bases__
if not issubclass(cls, EObject):
sclasslist = list(superclass)
if object in superclass:
index = sclasslist.index(object)
sclasslist.insert(index, EObject)
sclasslist.remove(object)
else:
sclasslist.insert(0, EObject)
superclass = tuple(sclasslist)
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return MetaEClass(cls.__name__, superclass, orig_vars) | def EMetaclass(cls) | Class decorator for creating PyEcore metaclass. | 2.095057 | 2.055397 | 1.019296 |
for annotation in self.eAnnotations:
if annotation.source == source:
return annotation
return None | def getEAnnotation(self, source) | Return the annotation with a matching source attribute. | 3.422228 | 2.771515 | 1.234786 |
if day < 1:
day = 1
date = datetime(year=date.year, month=date.month, day=day, tzinfo=utc)
return reverse('calendar_week', kwargs={'year': date.isocalendar()[0],
'week': date.isocalendar()[1]}) | def get_week_URL(date, day=0) | Returns the week view URL for a given date.
:param date: A date instance.
:param day: Day number in a month. | 2.863419 | 3.167993 | 0.903859 |
str_time = time.strptime('{0} {1} 1'.format(year, week), '%Y %W %w')
date = timezone.datetime(year=str_time.tm_year, month=str_time.tm_mon,
day=str_time.tm_mday, tzinfo=timezone.utc)
if timezone.datetime(year, 1, 4).isoweekday() > 4:
# ISO 8601 where week 1 is the first week that has at least 4 days in
# the current year
date -= timezone.timedelta(days=7)
return date | def monday_of_week(year, week) | Returns a datetime for the monday of the given week of the given year. | 2.919583 | 2.778164 | 1.050904 |
return self.lookup.pop(
(occ.event, occ.original_start, occ.original_end),
occ) | def get_occurrence(self, occ) | Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched | 10.712315 | 6.73102 | 1.591485 |
return [occ for key, occ in self.lookup.items() if (
(end and occ.start < end) and
occ.end >= start and not occ.cancelled)] | def get_additional_occurrences(self, start, end) | Return persisted occurrences which are now in the period | 9.274688 | 8.409698 | 1.102856 |
# make up to three attempts to dance with the API, use a jittered
# exponential back-off delay
for i in range(3):
try:
full_url = '{b}{u}'.format(b=self.api_url, u=uri)
response = None
if method == 'POST':
response = requests.post(full_url, data=params, files=files, headers=headers,
verify=self.verify_ssl, auth=auth, proxies=self.proxies)
else:
response = requests.get(full_url, params=params, headers=headers,
verify=self.verify_ssl, auth=auth, proxies=self.proxies)
# if the status code is 503, is no longer available.
if response.status_code >= 500:
# server error
self.server_available = False
raise SandboxError("server returned {c} status code on {u}, assuming unavailable...".format(
c=response.status_code, u=response.url))
else:
return response
# 0.4, 1.6, 6.4, 25.6, ...
except requests.exceptions.RequestException:
time.sleep(random.uniform(0, 4 ** i * 100 / 1000.0))
# if we couldn't reach the API, we assume that the box is down and lower availability flag.
self.server_available = False
# raise an exception.
msg = "exceeded 3 attempts with sandbox API: {u}, p:{p}, f:{f}".format(u=full_url,
p=params, f=files)
try:
msg += "\n" + response.content.decode('utf-8')
except AttributeError:
pass
raise SandboxError(msg) | def _request(self, uri, method='GET', params=None, files=None, headers=None, auth=None) | Robustness wrapper. Tries up to 3 times to dance with the Sandbox API.
:type uri: str
:param uri: URI to append to base_url.
:type params: dict
:param params: Optional parameters for API.
:type files: dict
:param files: Optional dictionary of files for multipart post.
:type headers: dict
:param headers: Optional headers to send to the API.
:type auth: dict
:param auth: Optional authentication object to send to the API.
:rtype: requests.response.
:return: Response object.
:raises SandboxError: If all attempts failed. | 3.602015 | 3.423135 | 1.052256 |
response = self._request("tasks/list")
return json.loads(response.content.decode('utf-8'))['tasks'] | def analyses(self) | Retrieve a list of analyzed samples.
:rtype: list
:return: List of objects referencing each analyzed file. | 7.724145 | 9.070184 | 0.851597 |
# multipart post files.
files = {"file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
response = self._request("tasks/create/file", method='POST', files=files)
# return task id; try v1.3 and v2.0 API response formats
try:
return str(json.loads(response.content.decode('utf-8'))["task_id"])
except KeyError:
return str(json.loads(response.content.decode('utf-8'))["task_ids"][0]) | def analyze(self, handle, filename) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: Task ID as a string | 5.274661 | 6.054318 | 0.871223 |
response = self._request("tasks/view/{id}".format(id=item_id))
if response.status_code == 404:
# probably an unknown task id
return False
try:
content = json.loads(response.content.decode('utf-8'))
status = content['task']["status"]
if status == 'completed' or status == "reported":
return True
except ValueError as e:
raise sandboxapi.SandboxError(e)
return False | def check(self, item_id) | Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not. | 4.45198 | 4.422399 | 1.006689 |
try:
response = self._request("tasks/delete/{id}".format(id=item_id))
if response.status_code == 200:
return True
except sandboxapi.SandboxError:
pass
return False | def delete(self, item_id) | Delete the reports associated with the given item_id.
:type item_id: int
:param item_id: Report ID to delete.
:rtype: bool
:return: True on success, False otherwise. | 5.198609 | 5.379163 | 0.966435 |
# if the availability flag is raised, return True immediately.
# NOTE: subsequent API failures will lower this flag. we do this here
# to ensure we don't keep hitting Cuckoo with requests while
# availability is there.
if self.server_available:
return True
# otherwise, we have to check with the cloud.
else:
try:
response = self._request("cuckoo/status")
# we've got cuckoo.
if response.status_code == 200:
self.server_available = True
return True
except sandboxapi.SandboxError:
pass
self.server_available = False
return False | def is_available(self) | Determine if the Cuckoo Sandbox API servers are alive or in maintenance mode.
:rtype: bool
:return: True if service is available, False otherwise. | 8.351388 | 7.638336 | 1.093352 |
response = self._request("tasks/list")
tasks = json.loads(response.content.decode('utf-8'))["tasks"]
return len([t for t in tasks if t['status'] == 'pending']) | def queue_size(self) | Determine Cuckoo sandbox queue length
There isn't a built in way to do this like with Joe
:rtype: int
:return: Number of submissions in sandbox queue. | 4.754285 | 4.940178 | 0.962371 |
report_format = report_format.lower()
response = self._request("tasks/report/{id}/{format}".format(id=item_id, format=report_format))
# if response is JSON, return it as an object
if report_format == "json":
try:
return json.loads(response.content.decode('utf-8'))
except ValueError:
pass
# otherwise, return the raw content.
return response.content | def report(self, item_id, report_format="json") | Retrieves the specified report for the analyzed item, referenced by item_id.
Available formats include: json, html, all, dropped, package_files.
:type item_id: int
:param item_id: Task ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. | 2.952808 | 3.664842 | 0.805712 |
score = 0
try:
# cuckoo-modified format
score = report['malscore']
except KeyError:
# cuckoo-2.0 format
score = report.get('info', {}).get('score', 0)
return score | def score(self, report) | Pass in the report from self.report(), get back an int. | 7.133331 | 6.700836 | 1.064543 |
if params:
params['environment_id'] = self.env_id
else:
params = {
'environment_id': self.env_id,
}
if headers:
headers['api-key'] = self.key
headers['User-Agent'] = 'Falcon Sandbox'
headers['Accept'] = 'application/json'
else:
headers = {
'api-key': self.key,
'User-Agent': 'Falcon Sandbox',
'Accept': 'application/json',
}
return sandboxapi.SandboxAPI._request(self, uri, method, params, files, headers) | def _request(self, uri, method='GET', params=None, files=None, headers=None, auth=None) | Override the parent _request method.
We have to do this here because FireEye requires some extra
authentication steps. | 2.380741 | 2.305008 | 1.032856 |
# multipart post files.
files = {"file" : (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
response = self._request("/submit/file", method='POST', files=files)
try:
if response.status_code == 201:
# good response
return response.json()['job_id']
else:
raise sandboxapi.SandboxError("api error in analyze: {r}".format(r=response.content.decode('utf-8')))
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | def analyze(self, handle, filename) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File hash as a string | 4.494201 | 5.040288 | 0.891656 |
report_format = report_format.lower()
response = self._request("/report/{job_id}/summary".format(job_id=item_id))
if response.status_code == 429:
raise sandboxapi.SandboxError('API rate limit exceeded while fetching report')
# if response is JSON, return it as an object
if report_format == "json":
try:
return json.loads(response.content.decode('utf-8'))
except ValueError:
pass
# otherwise, return the raw content.
return response.content.decode('utf-8') | def report(self, item_id, report_format="json") | Retrieves the specified report for the analyzed item, referenced by item_id.
Available formats include: json, html.
:type item_id: str
:param item_id: File ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. | 3.544217 | 4.168504 | 0.850237 |
try:
threatlevel = int(report['threat_level'])
threatscore = int(report['threat_score'])
except (KeyError, IndexError, ValueError, TypeError) as e:
raise sandboxapi.SandboxError(e)
# from falcon docs:
# threatlevel is the verdict field with values: 0 = no threat, 1 = suspicious, 2 = malicious
# threascore is the "heuristic" confidence value of Falcon Sandbox in the verdict and is a value between 0
# and 100. A value above 75/100 is "pretty sure", a value above 90/100 is "very sure".
# the scoring below converts these values to a scalar. modify as needed.
score = 0
if threatlevel == 2 and threatscore >= 90:
score = 10
elif threatlevel == 2 and threatscore >= 75:
score = 9
elif threatlevel == 2:
score = 8
elif threatlevel == 1 and threatscore >= 90:
score = 7
elif threatlevel == 1 and threatscore >= 75:
score = 6
elif threatlevel == 1:
score = 5
elif threatlevel == 0 and threatscore < 75:
score = 1
return score | def score(self, report) | Pass in the report from self.report(), get back an int 0-10. | 3.883609 | 3.810736 | 1.019123 |
# multipart post files.
files = {"sample_file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
response = self._request("/sample/submit", method='POST', files=files, headers=self.headers)
try:
if response.status_code == 200 and not response.json()['data']['errors']:
# only support single-file submissions; just grab the first one.
return response.json()['data']['samples'][0]['sample_id']
else:
raise sandboxapi.SandboxError("api error in analyze ({u}): {r}".format(u=response.url, r=response.content))
except (ValueError, KeyError, IndexError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | def analyze(self, handle, filename) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File ID as a string | 4.736 | 5.325421 | 0.889319 |
response = self._request("/submission/sample/{sample_id}".format(sample_id=item_id), headers=self.headers)
if response.status_code == 404:
# unknown id
return False
try:
finished = False
for submission in response.json()['data']:
finished = finished or submission['submission_finished']
if finished:
return True
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError(e)
return False | def check(self, item_id) | Check if an analysis is complete.
:type item_id: str
:param item_id: File ID to check.
:rtype: bool
:return: Boolean indicating if a report is done or not. | 4.579677 | 4.896771 | 0.935244 |
if report_format == "html":
return "Report Unavailable"
# grab an analysis id from the submission id.
response = self._request("/analysis/sample/{sample_id}".format(sample_id=item_id),
headers=self.headers)
try:
# the highest score is probably the most interesting.
# vmray uses this internally with sample_highest_vti_score so this seems like a safe assumption.
analysis_id = 0
top_score = -1
for analysis in response.json()['data']:
if analysis['analysis_vti_score'] > top_score:
top_score = analysis['analysis_vti_score']
analysis_id = analysis['analysis_id']
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError(e)
# assume report format json.
response = self._request("/analysis/{analysis_id}/archive/logs/summary.json".format(analysis_id=analysis_id),
headers=self.headers)
# if response is JSON, return it as an object.
try:
return response.json()
except ValueError:
pass
# otherwise, return the raw content.
return response.content | def report(self, item_id, report_format="json") | Retrieves the specified report for the analyzed item, referenced by item_id.
Available formats include: json.
:type item_id: str
:param item_id: File ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. | 5.389826 | 5.647145 | 0.954434 |
if headers:
headers['Accept'] = 'application/json'
else:
headers = {
'Accept': 'application/json',
}
if not self.api_token:
# need to log in
response = sandboxapi.SandboxAPI._request(self, '/auth/login', 'POST', headers=headers,
auth=HTTPBasicAuth(self.username, self.password))
if response.status_code != 200:
raise sandboxapi.SandboxError("Can't log in, HTTP Error {e}".format(e=response.status_code))
# we are now logged in, save the token
self.api_token = response.headers.get('X-FeApi-Token')
headers['X-FeApi-Token'] = self.api_token
response = sandboxapi.SandboxAPI._request(self, uri, method, params, files, headers)
# handle session timeout
unauthorized = False
try:
if json.loads(response.content.decode('utf-8'))['fireeyeapis']['httpStatus'] == 401:
unauthorized = True
except (ValueError, KeyError, TypeError):
# non-JSON response, or no such keys.
pass
if response.status_code == 401 or unauthorized:
self.api_token = None
try:
headers.pop('X-FeApi-Token')
except KeyError:
pass
# recurse
return self._request(uri, method, params, files, headers)
return response | def _request(self, uri, method='GET', params=None, files=None, headers=None, auth=None) | Override the parent _request method.
We have to do this here because FireEye requires some extra
authentication steps. On each request we pass the auth headers, and
if the session has expired, we automatically reauthenticate. | 3.004364 | 2.910311 | 1.032317 |
# multipart post files.
files = {"file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
# add submission options
data = {
#FIXME: These may need to change, see docs page 36
'options': '{"application":"0","timeout":"500","priority":"0","profiles":["%s"],"analysistype":"0","force":"true","prefetch":"1"}' % self.profile,
}
response = self._request("/submissions", method='POST', params=data, files=files)
try:
if response.status_code == 200:
# good response
try:
return response.json()['ID']
except TypeError:
return response.json()[0]['ID']
else:
raise sandboxapi.SandboxError("api error in analyze ({u}): {r}".format(u=response.url, r=response.content))
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | def analyze(self, handle, filename) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File ID as a string | 6.066097 | 6.387113 | 0.94974 |
response = self._request("/submissions/status/{file_id}".format(file_id=item_id))
if response.status_code == 404:
# unknown id
return False
try:
status = response.json()['submissionStatus']
if status == 'Done':
return True
except ValueError as e:
raise sandboxapi.SandboxError(e)
return False | def check(self, item_id) | Check if an analysis is complete.
:type item_id: str
:param item_id: File ID to check.
:rtype: bool
:return: Boolean indicating if a report is done or not. | 5.00181 | 5.046407 | 0.991162 |
if report_format == "html":
return "Report Unavailable"
# else we try JSON
response = self._request("/submissions/results/{file_id}?info_level=extended".format(file_id=item_id))
# if response is JSON, return it as an object
try:
return response.json()
except ValueError:
pass
# otherwise, return the raw content.
return response.content | def report(self, item_id, report_format="json") | Retrieves the specified report for the analyzed item, referenced by item_id.
Available formats include: json.
:type item_id: str
:param item_id: File ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. | 6.432623 | 6.916763 | 0.930005 |
# ensure the handle is at offset 0.
handle.seek(0)
try:
return self.jbx.submit_sample(handle)['webids'][0]
except (jbxapi.JoeException, KeyError, IndexError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | def analyze(self, handle, filename) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: Task ID as a string | 11.658891 | 13.892957 | 0.839194 |
try:
return self.jbx.info(item_id).get('status').lower() == 'finished'
except jbxapi.JoeException:
return False
return False | def check(self, item_id) | Check if an analysis is complete.
:type item_id: str
:param item_id: File ID to check.
:rtype: bool
:return: Boolean indicating if a report is done or not. | 10.704586 | 11.849408 | 0.903386 |
# if the availability flag is raised, return True immediately.
# NOTE: subsequent API failures will lower this flag. we do this here
# to ensure we don't keep hitting Joe with requests while availability
# is there.
if self.server_available:
return True
# otherwise, we have to check with the cloud.
else:
try:
self.server_available = self.jbx.server_online()
return self.server_available
except jbxapi.JoeException:
pass
self.server_available = False
return False | def is_available(self) | Determine if the Joe Sandbox API server is alive.
:rtype: bool
:return: True if service is available, False otherwise. | 10.800068 | 9.846586 | 1.096834 |
if report_format == "json":
report_format = "jsonfixed"
try:
return json.loads(self.jbx.download(item_id, report_format)[1].decode('utf-8'))
except (jbxapi.JoeException, ValueError, IndexError) as e:
raise sandboxapi.SandboxError("error in report fetch: {e}".format(e=e)) | def report(self, item_id, report_format="json") | Retrieves the specified report for the analyzed item, referenced by item_id.
For available report formats, see online Joe Sandbox documentation.
:type item_id: str
:param item_id: File ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. | 6.618678 | 6.348575 | 1.042545 |
# The cpu/isolated sysfs was added in Linux 4.2
# (commit 59f30abe94bff50636c8cad45207a01fdcb2ee49)
path = sysfs_path('devices/system/cpu/isolated')
isolated = read_first_line(path)
if isolated:
return parse_cpu_list(isolated)
cmdline = read_first_line(proc_path('cmdline'))
if cmdline:
match = re.search(r'\bisolcpus=([^ ]+)', cmdline)
if match:
isolated = match.group(1)
return parse_cpu_list(isolated)
return None | def get_isolated_cpus() | Get the list of isolated CPUs.
Return a sorted list of CPU identifiers, or return None if no CPU is
isolated. | 6.524326 | 6.214969 | 1.049776 |
inner_loops = kwargs.pop('inner_loops', None)
metadata = kwargs.pop('metadata', None)
self._no_keyword_argument(kwargs)
if not self._check_worker_task():
return None
if args:
func = functools.partial(func, *args)
def task_func(task, loops):
# use fast local variables
local_timer = perf.perf_counter
local_func = func
if loops != 1:
range_it = range(loops)
t0 = local_timer()
for _ in range_it:
local_func()
dt = local_timer() - t0
else:
t0 = local_timer()
local_func()
dt = local_timer() - t0
return dt
task = WorkerProcessTask(self, name, task_func, metadata)
task.inner_loops = inner_loops
return self._main(task) | def bench_func(self, name, func, *args, **kwargs) | Benchmark func(*args). | 4.261943 | 4.290844 | 0.993264 |
"True if all suites have one benchmark with the same name"
if any(len(suite) > 1 for suite in self.suites):
return False
names = self.suites[0].get_benchmark_names()
return all(suite.get_benchmark_names() == names
for suite in self.suites[1:]) | def has_same_unique_benchmark(self) | True if all suites have one benchmark with the same name | 3.693968 | 2.618262 | 1.410848 |
client.capture(
'Message',
message=message,
params=tuple(params),
data={
'site': site,
'logger': logger,
},
) | def send_message(message, params, site, logger) | Send a message to the Sentry server | 5.191024 | 3.942485 | 1.316689 |
parser = argparse.ArgumentParser(description='Send logs to Django Sentry.')
parser.add_argument('--sentryconfig', '-c', default=None,
help='A configuration file (.ini, .yaml) of some '
'Sentry integration to extract the Sentry DSN from')
parser.add_argument('--sentrydsn', '-s', default="",
help='The Sentry DSN string (overrides -c)')
parser.add_argument('--daemonize', '-d', default=False,
action='store_const', const=True,
help='Run this script in background')
parser.add_argument('--follow', '-f', default="all",
help='Which logs to follow, default ALL')
parser.add_argument('--nginxerrorpath', '-n', default=None,
help='Nginx error log path')
return parser.parse_args() | def get_command_line_args() | CLI command line arguments handling | 4.165234 | 4.112759 | 1.012759 |
if args.sentryconfig:
print('Parsing DSN from %s' % args.sentryconfig)
os.environ['SENTRY_DSN'] = parse_sentry_configuration(args.sentryconfig)
if args.sentrydsn:
print('Using the DSN %s' % args.sentrydsn)
os.environ['SENTRY_DSN'] = args.sentrydsn
if args.nginxerrorpath:
print('Using the Nginx error log path %s' % args.nginxerrorpath)
os.environ['NGINX_ERROR_PATH'] = args.nginxerrorpath
from ..conf import settings # noqa; pylint: disable=unused-variable
if args.daemonize:
print('Running process in background')
from ..daemonize import create_daemon
create_daemon() | def process_arguments(args) | Deal with arguments passed on the command line | 3.1998 | 3.110461 | 1.028722 |
filetype = os.path.splitext(filename)[-1][1:].lower()
if filetype == 'ini': # Pyramid, Pylons
config = ConfigParser()
config.read(filename)
ini_key = 'dsn'
ini_sections = ['sentry', 'filter:raven']
for section in ini_sections:
if section in config:
print('- Using value from [{section}]:[{key}]'
.format(section=section, key=ini_key))
try:
return config[section][ini_key]
except KeyError:
print('- Warning: Key "{key}" not found in section '
'[{section}]'.format(section=section, key=ini_key))
raise SystemExit('No DSN found in {file}. Tried sections [{sec_list}]'
.format(
file=filename,
sec_list='], ['.join(ini_sections),
))
elif filetype == 'py': # Django, Flask, Bottle, ...
raise SystemExit('Parsing configuration from pure Python (Django,'
'Flask, Bottle, etc.) not implemented yet.')
else:
raise SystemExit('Configuration file type not supported for parsing: '
'%s' % filetype) | def parse_sentry_configuration(filename) | Parse Sentry DSN out of an application or Sentry configuration file | 3.907422 | 3.753406 | 1.041033 |
try:
follower = tailhead.follow_path(self.filepath)
except (FileNotFoundError, PermissionError) as err:
raise SystemExit("Error: Can't read logfile %s (%s)" %
(self.filepath, err))
for line in follower:
self.message = None
self.params = None
self.site = None
if line is not None:
self.parse(line)
send_message(self.message,
self.params,
self.site,
self.logger) | def follow_tail(self) | Read (tail and follow) the log file, parse entries and send messages
to Sentry using Raven. | 5.320967 | 4.511196 | 1.179503 |
for item in glob(file_glob, recursive=True):
try:
os.remove(item)
print('%s removed ...' % item)
except OSError:
try:
shutil.rmtree(item)
print('%s/ removed ...' % item)
except OSError as err:
print(err) | def rmtree_glob(file_glob) | Platform independent rmtree, which also allows wildcards (globbing) | 2.665666 | 2.719558 | 0.980184 |
with open(join(abspath(dirname(__file__)), filename)) as file:
return file.read() | def read_file(filename) | Read the contents of a file located relative to setup.py | 3.938998 | 3.172203 | 1.241723 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.