content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Benjamin Vial
# License: MIT
from dolfin import *
# parameters['allow_extrapolation'] = True
mesh = UnitIntervalMesh(15)
V = FunctionSpace(mesh, "CG", 2)
u = Function(V)
# u_vec = u.vector()
# u_vec[:] = MPI.comm_world.rank + 1
#
# v_vec = Vector(MPI.comm_self, u_vec.local_size())
# u_vec.gather(v_vec, V.dofmap().dofs())
f = interpolate(Expression("x[0]", degree=2), V)
#
mpi_comm = u.function_space().mesh().mpi_comm()
array = u.vector().get_local()
# gather solution from all processes on proc 0
array_gathered = mpi_comm.gather(array, root=0)
# compute coefficients on proc 0
if mpi_comm.Get_rank() == 0:
print(array_gathered)
else:
print(array)
coef = None
# broadcast from proc 0 to other processes
# mpi_comm.Bcast(coef, root=0)
# print(f(0))
# print(f(1))
#
# v_vec(0)
# print("Original vs copied: ", u_vec.get_local(), v_vec.get_local()) # [1, 1], [0, 0, 0]
|
"""Defines Blink cameras."""
from shutil import copyfileobj
import logging
from blinkpy import api
from blinkpy.helpers.constants import TIMEOUT_MEDIA
_LOGGER = logging.getLogger(__name__)
class BlinkCamera:
"""Class to initialize individual camera."""
def __init__(self, sync):
"""Initiailize BlinkCamera."""
self.sync = sync
self.name = None
self.camera_id = None
self.network_id = None
self.thumbnail = None
self.serial = None
self.motion_enabled = None
self.battery_voltage = None
self.clip = None
self.temperature = None
self.temperature_calibrated = None
self.battery_state = None
self.motion_detected = None
self.wifi_strength = None
self.last_record = None
self._cached_image = None
self._cached_video = None
self.camera_type = ""
@property
def attributes(self):
"""Return dictionary of all camera attributes."""
attributes = {
"name": self.name,
"camera_id": self.camera_id,
"serial": self.serial,
"temperature": self.temperature,
"temperature_c": self.temperature_c,
"temperature_calibrated": self.temperature_calibrated,
"battery": self.battery,
"battery_voltage": self.battery_voltage,
"thumbnail": self.thumbnail,
"video": self.clip,
"motion_enabled": self.motion_enabled,
"motion_detected": self.motion_detected,
"wifi_strength": self.wifi_strength,
"network_id": self.sync.network_id,
"sync_module": self.sync.name,
"last_record": self.last_record,
}
return attributes
@property
def battery(self):
"""Return battery as string."""
return self.battery_state
@property
def temperature_c(self):
"""Return temperature in celcius."""
try:
return round((self.temperature - 32) / 9.0 * 5.0, 1)
except TypeError:
return None
@property
def image_from_cache(self):
"""Return the most recently cached image."""
if self._cached_image:
return self._cached_image
return None
@property
def video_from_cache(self):
"""Return the most recently cached video."""
if self._cached_video:
return self._cached_video
return None
@property
def arm(self):
"""Return arm status of camera."""
return self.motion_enabled
@arm.setter
def arm(self, value):
"""Set camera arm status."""
if value:
return api.request_motion_detection_enable(
self.sync.blink, self.network_id, self.camera_id
)
return api.request_motion_detection_disable(
self.sync.blink, self.network_id, self.camera_id
)
def get_media(self, media_type="image"):
"""Download media (image or video)."""
url = self.thumbnail
if media_type.lower() == "video":
url = self.clip
return api.http_get(
self.sync.blink, url=url, stream=True, json=False, timeout=TIMEOUT_MEDIA,
)
def snap_picture(self):
"""Take a picture with camera to create a new thumbnail."""
return api.request_new_image(self.sync.blink, self.network_id, self.camera_id)
def set_motion_detect(self, enable):
"""Set motion detection."""
_LOGGER.warning(
"Method is deprecated as of v0.16.0 and will be removed in a future version. Please use the BlinkCamera.arm property instead."
)
if enable:
return api.request_motion_detection_enable(
self.sync.blink, self.network_id, self.camera_id
)
return api.request_motion_detection_disable(
self.sync.blink, self.network_id, self.camera_id
)
def update(self, config, force_cache=False, **kwargs):
"""Update camera info."""
self.extract_config_info(config)
self.get_sensor_info()
self.update_images(config, force_cache=force_cache)
def extract_config_info(self, config):
"""Extract info from config."""
self.name = config.get("name", "unknown")
self.camera_id = str(config.get("id", "unknown"))
self.network_id = str(config.get("network_id", "unknown"))
self.serial = config.get("serial", None)
self.motion_enabled = config.get("enabled", "unknown")
self.battery_voltage = config.get("battery_voltage", None)
self.battery_state = config.get("battery_state", None)
self.temperature = config.get("temperature", None)
self.wifi_strength = config.get("wifi_strength", None)
def get_sensor_info(self):
"""Retrieve calibrated temperatue from special endpoint."""
resp = api.request_camera_sensors(
self.sync.blink, self.network_id, self.camera_id
)
try:
self.temperature_calibrated = resp["temp"]
except (TypeError, KeyError):
self.temperature_calibrated = self.temperature
_LOGGER.warning("Could not retrieve calibrated temperature.")
def update_images(self, config, force_cache=False):
"""Update images for camera."""
new_thumbnail = None
thumb_addr = None
if config.get("thumbnail", False):
thumb_addr = config["thumbnail"]
else:
_LOGGER.warning("Could not find thumbnail for camera %s", self.name)
if thumb_addr is not None:
new_thumbnail = f"{self.sync.urls.base_url}{thumb_addr}.jpg"
try:
self.motion_detected = self.sync.motion[self.name]
except KeyError:
self.motion_detected = False
clip_addr = None
try:
clip_addr = self.sync.last_record[self.name]["clip"]
self.last_record = self.sync.last_record[self.name]["time"]
self.clip = f"{self.sync.urls.base_url}{clip_addr}"
except KeyError:
pass
# If the thumbnail or clip have changed, update the cache
update_cached_image = False
if new_thumbnail != self.thumbnail or self._cached_image is None:
update_cached_image = True
self.thumbnail = new_thumbnail
update_cached_video = False
if self._cached_video is None or self.motion_detected:
update_cached_video = True
if new_thumbnail is not None and (update_cached_image or force_cache):
self._cached_image = self.get_media()
if clip_addr is not None and (update_cached_video or force_cache):
self._cached_video = self.get_media(media_type="video")
def get_liveview(self):
"""Get livewview rtsps link."""
response = api.request_camera_liveview(
self.sync.blink, self.sync.network_id, self.camera_id
)
return response["server"]
def image_to_file(self, path):
"""
Write image to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing image from %s to %s", self.name, path)
response = self.get_media()
if response.status_code == 200:
with open(path, "wb") as imgfile:
copyfileobj(response.raw, imgfile)
else:
_LOGGER.error(
"Cannot write image to file, response %s", response.status_code
)
def video_to_file(self, path):
"""
Write video to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing video from %s to %s", self.name, path)
response = self.get_media(media_type="video")
if response is None:
_LOGGER.error("No saved video exist for %s.", self.name)
return
with open(path, "wb") as vidfile:
copyfileobj(response.raw, vidfile)
class BlinkCameraMini(BlinkCamera):
"""Define a class for a Blink Mini camera."""
def __init__(self, sync):
"""Initialize a Blink Mini cameras."""
super().__init__(sync)
self.camera_type = "mini"
@property
def arm(self):
"""Return camera arm status."""
return self.sync.arm
@arm.setter
def arm(self, value):
"""Set camera arm status."""
_LOGGER.warning(
"Individual camera motion detection enable/disable for Blink Mini cameras is unsupported at this time."
)
def snap_picture(self):
"""Snap picture for a blink mini camera."""
url = f"{self.sync.urls.base_url}/api/v1/accounts/{self.sync.blink.account_id}/networks/{self.network_id}/owls/{self.camera_id}/thumbnail"
return api.http_post(self.sync.blink, url)
def get_sensor_info(self):
"""Get sensor info for blink mini camera."""
def get_liveview(self):
"""Get liveview link."""
url = f"{self.sync.urls.base_url}/api/v1/accounts/{self.sync.blink.account_id}/networks/{self.network_id}/owls/{self.camera_id}/liveview"
response = api.http_post(self.sync.blink, url)
server = response["server"]
server_split = server.split(":")
server_split[0] = "rtsps:"
link = "".join(server_split)
return link
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/01.ML_Landscape.ipynb (unless otherwise specified).
__all__ = [] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests_cache.backends.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains BaseCache class which can be used as in-memory cache backend or
extended to support persistence.
"""
from datetime import datetime
import hashlib
from copy import copy
from alp.request import requests
from alp.request.requests_cache.compat import is_py2
class BaseCache(object):
""" Base class for cache implementations, can be used as in-memory cache.
To extend it you can provide dictionary-like objects for
:attr:`keys_map` and :attr:`responses` or override public methods.
"""
def __init__(self, *args, **kwargs):
#: `key` -> `key_in_responses` mapping
self.keys_map = {}
#: `key_in_cache` -> `response` mapping
self.responses = {}
def save_response(self, key, response):
""" Save response to cache
:param key: key for this response
:param response: response to save
.. note:: Response is reduced before saving (with :meth:`reduce_response`)
to make it picklable
"""
self.responses[key] = self.reduce_response(response), datetime.utcnow()
def add_key_mapping(self, new_key, key_to_response):
"""
Adds mapping of `new_key` to `key_to_response` to make it possible to
associate many keys with single response
:param new_key: new key (e.g. url from redirect)
:param key_to_response: key which can be found in :attr:`responses`
:return:
"""
self.keys_map[new_key] = key_to_response
def get_response_and_time(self, key, default=(None, None)):
""" Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
.. note:: Response is restored after unpickling with :meth:`restore_response`
"""
try:
if key not in self.responses:
key = self.keys_map[key]
response, timestamp = self.responses[key]
except KeyError:
return default
return self.restore_response(response), timestamp
def delete(self, key):
""" Delete `key` from cache. Also deletes all responses from response history
"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
else:
response, _ = self.responses[self.keys_map[key]]
del self.keys_map[key]
for r in response.history:
del self.keys_map[self.create_key(r.request)]
except KeyError:
pass
def delete_url(self, url):
""" Delete response associated with `url` from cache.
Also deletes all responses from response history. Works only for GET requests
"""
self.delete(self._url_to_key(url))
def clear(self):
""" Clear cache
"""
self.responses.clear()
self.keys_map.clear()
def has_key(self, key):
""" Returns `True` if cache has `key`, `False` otherwise
"""
return key in self.responses or key in self.keys_map
def has_url(self, url):
""" Returns `True` if cache has `url`, `False` otherwise.
Works only for GET request urls
"""
return self.has_key(self._url_to_key(url))
def _url_to_key(self, url):
from requests import Request
return self.create_key(Request('GET', url).prepare())
_response_attrs = ['_content', 'url', 'status_code', 'cookies',
'headers', 'encoding', 'request', 'reason']
def reduce_response(self, response):
""" Reduce response object to make it compatible with ``pickle``
"""
result = _Store()
# prefetch
response.content
for field in self._response_attrs:
setattr(result, field, self._picklable_field(response, field))
result.history = tuple(self.reduce_response(r) for r in response.history)
return result
def _picklable_field(self, response, name):
value = getattr(response, name)
if name == 'request':
value = copy(value)
value.hooks = []
return value
def restore_response(self, response):
""" Restore response object after unpickling
"""
result = requests.Response()
for field in self._response_attrs:
setattr(result, field, getattr(response, field))
result.history = tuple(self.restore_response(r) for r in response.history)
return result
def create_key(self, request):
key = hashlib.sha256()
key.update(_to_bytes(request.method.upper()))
key.update(_to_bytes(request.url))
if request.body:
key.update(_to_bytes(request.body))
return key.hexdigest()
def __str__(self):
return 'keys: %s\nresponses: %s' % (self.keys_map, self.responses)
# used for saving response attributes
class _Store(object):
pass
def _to_bytes(s, encoding='utf-8'):
if is_py2 or isinstance(s, bytes):
return s
return bytes(s, encoding)
|
__docformat__ = "restructuredtext"
__version__ = "0.2.7"
__doc__ = """
This<https://github.com/WinVector/wvpy> is a package of example files for teaching data science.
"""
|
import insightconnect_plugin_runtime
from .schema import Component, DeleteUrlListByIdInput, DeleteUrlListByIdOutput, Input
class DeleteUrlListById(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="delete_url_list_by_id",
description=Component.DESCRIPTION,
input=DeleteUrlListByIdInput(),
output=DeleteUrlListByIdOutput(),
)
def run(self, params={}):
return self.connection.client.delete_url_list_by_id(params.get(Input.ID))
|
#
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from smvbasetest import SmvBaseTest
from smv import smvPy, SmvPyCsvStringData
import pyspark
from pyspark.context import SparkContext
from pyspark.sql import SQLContext, HiveContext
from pyspark.sql.functions import col, struct
class D1(SmvPyCsvStringData):
def schemaStr(self):
return "a:String;b:Integer"
def dataStr(self):
return "x,10;y,1"
class SmvFrameworkTest(SmvBaseTest):
def test_SmvCsvStringData(self):
fqn = self.__module__ + ".D1"
df = smvPy.runModule(fqn)
expect = self.createDF("a:String;b:Integer", "x,10;y,1")
self.should_be_same(expect, df)
|
import numpy as np
from keras import backend as K
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import Conv1D
from keras.layers import Conv2D
from keras.layers import Conv3D
from keras.layers import SeparableConv2D
from keras.layers import Conv2DTranspose
from keras.layers import BatchNormalization
from keras.layers import SimpleRNN
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers import multiply
from keras.layers import average
from keras.layers import concatenate
from keras.layers import add
from keras.layers import AveragePooling2D
from keras.layers import MaxPooling2D
from keras.layers import MaxPooling3D
from keras.layers import GlobalMaxPooling1D
from keras.layers import GlobalMaxPooling2D
from keras.layers import GlobalMaxPooling3D
from keras.layers import GlobalAveragePooling1D
from keras.layers import GlobalAveragePooling2D
from keras.layers import ZeroPadding2D
from keras.layers import UpSampling2D
from keras.layers import UpSampling3D
from keras.constraints import unit_norm
from keras.regularizers import l1
from deephar.utils.math import linspace_2d
from deephar.activations import channel_softmax_1d
from deephar.activations import channel_softmax_2d
def conv(x, filters, size, strides=(1, 1), padding='same', name=None):
x = Conv2D(filters, size, strides=strides, padding=padding,
use_bias=False, name=name)(x)
return x
def deconv(x, filters, size, strides=(1, 1), padding='same', name=None):
x = Conv2DTranspose(filters, size, strides=strides, padding=padding,
data_format=K.image_data_format(), use_bias=False, name=name)(x)
return x
def conv_bn(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
conv_name = name + '_conv'
else:
conv_name = None
x = conv(x, filters, size, strides, padding, conv_name)
x = BatchNormalization(axis=-1, scale=False, name=name)(x)
return x
def conv_act(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
conv_name = name + '_conv'
else:
conv_name = None
x = conv(x, filters, size, strides, padding, conv_name)
x = Activation('relu', name=name)(x)
return x
def conv_bn_act(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
conv_name = name + '_conv'
bn_name = name + '_bn'
else:
conv_name = None
bn_name = None
x = conv(x, filters, size, strides, padding, conv_name)
x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def bn_act_conv(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
bn_name = name + '_bn'
act_name = name + '_act'
else:
bn_name = None
act_name = None
x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
x = Activation('relu', name=act_name)(x)
x = conv(x, filters, size, strides, padding, name)
return x
def act_conv_bn(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
conv_name = name + '_conv'
act_name = name + '_act'
else:
conv_name = None
act_name = None
x = Activation('relu', name=act_name)(x)
x = conv(x, filters, size, strides, padding, conv_name)
x = BatchNormalization(axis=-1, scale=False, name=name)(x)
return x
def separable_conv_bn_act(x, filters, size, strides=(1, 1), padding='same',
name=None):
if name is not None:
conv_name = name + '_conv'
bn_name = name + '_bn'
else:
conv_name = None
bn_name = None
x = SeparableConv2D(filters, size, strides=strides, padding=padding,
use_bias=False, name=conv_name)(x)
x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def separable_act_conv_bn(x, filters, size, strides=(1, 1), padding='same',
name=None):
if name is not None:
conv_name = name + '_conv'
act_name = name + '_act'
else:
conv_name = None
act_name = None
x = Activation('relu', name=act_name)(x)
x = SeparableConv2D(filters, size, strides=strides, padding=padding,
use_bias=False, name=conv_name)(x)
x = BatchNormalization(axis=-1, scale=False, name=name)(x)
return x
def separable_conv_bn(x, filters, size, strides=(1, 1), padding='same',
name=None):
if name is not None:
conv_name = name + '_conv'
else:
conv_name = None
x = SeparableConv2D(filters, size, strides=strides, padding=padding,
use_bias=False, name=conv_name)(x)
x = BatchNormalization(axis=-1, scale=False, name=name)(x)
return x
def act_conv(x, filters, size, strides=(1, 1), padding='same', name=None):
if name is not None:
act_name = name + '_act'
else:
act_name = None
x = Activation('relu', name=act_name)(x)
x = conv(x, filters, size, strides, padding, name)
return x
def bn_act_conv3d(x, filters, size, strides=(1, 1, 1), padding='same',
name=None):
if name is not None:
bn_name = name + '_bn'
act_name = name + '_act'
else:
bn_name = None
act_name = None
x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
x = Activation('relu', name=act_name)(x)
x = Conv3D(filters, size, strides=strides, padding=padding,
use_bias=False, name=name)(x)
return x
def dense(x, filters, name=None):
x = Dense(filters, kernel_regularizer=l1(0.001), name=name)(x)
return x
def bn_act_dense(x, filters, name=None):
if name is not None:
bn_name = name + '_bn'
act_name = name + '_act'
else:
bn_name = None
act_name = None
x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
x = Activation('relu', name=act_name)(x)
x = Dense(filters, kernel_regularizer=l1(0.001), name=name)(x)
return x
def act_channel_softmax(x, name=None):
x = Activation(channel_softmax_2d(), name=name)(x)
return x
def act_depth_softmax(x, name=None):
x = Activation(channel_softmax_1d(), name=name)(x)
return x
def aggregate_position_probability(inp):
y,p = inp
p = concatenate([p, p], axis=-1)
yp = p * y
yn = (1 - p) * y
y = concatenate([yp, yn], axis=-1)
return y
def fc_aggregation_block(y, p, name=None):
dim = K.int_shape(y)[-1]
x = Lambda(aggregate_position_probability, name=name)([y, p])
x = Dense(2*dim, use_bias=False, kernel_regularizer=l1(0.0002),
name=name + '_fc1')(x)
x = Activation('relu', name=name + '_act')(x)
x = Dense(dim, kernel_regularizer=l1(0.0002), name=name + '_fc2')(x)
return x
def sparse_fc_mapping(x, input_idxs):
num_units = len(input_idxs)
d = Dense(num_units, use_bias=False)
d.trainable = False
x = d(x)
w = d.get_weights()
w[0].fill(0)
for i in range(num_units):
w[0][input_idxs[i], i] = 1.
d.set_weights(w)
return x
def max_min_pooling(x, strides=(2, 2), padding='same', name=None):
if 'max_min_pool_cnt' not in globals():
global max_min_pool_cnt
max_min_pool_cnt = 0
if name is None:
name = 'MaxMinPooling2D_%d' % max_min_pool_cnt
max_min_pool_cnt += 1
def _max_plus_min(x):
x1 = MaxPooling2D(strides, padding=padding)(x)
x2 = MaxPooling2D(strides, padding=padding)(-x)
return x1 - x2
return Lambda(_max_plus_min, name=name)(x)
def global_max_min_pooling(x, name=None):
if 'global_max_min_pool_cnt' not in globals():
global global_max_min_pool_cnt
global_max_min_pool_cnt = 0
if name is None:
name = 'GlobalMaxMinPooling2D_%d' % global_max_min_pool_cnt
global_max_min_pool_cnt += 1
def _global_max_plus_min(x):
x1 = GlobalMaxPooling2D()(x)
x2 = GlobalMaxPooling2D()(-x)
return x1 - x2
return Lambda(_global_max_plus_min, name=name)(x)
def kl_divergence_regularizer(x, rho=0.01):
def _kl_regularizer(y_pred):
_, rows, cols, _ = K.int_shape(y_pred)
vmax = K.max(y_pred, axis=(1, 2))
vmax = K.expand_dims(vmax, axis=(1))
vmax = K.expand_dims(vmax, axis=(1))
vmax = K.tile(vmax, [1, rows, cols, 1])
y_delta = K.cast(K.greater_equal(y_pred, vmax), 'float32')
return rho * K.sum(y_pred *
(K.log(K.clip(y_pred, K.epsilon(), 1.))
- K.log(K.clip(y_delta, K.epsilon(), 1.))) / (rows * cols)
)
# Build an auxiliary non trainable layer, just to use the activity reg.
num_filters = K.int_shape(x)[-1]
aux_conv = Conv2D(num_filters, (1, 1), use_bias=False,
activity_regularizer=_kl_regularizer)
aux_conv.trainable = False
x = aux_conv(x)
# Set identity weights
w = aux_conv.get_weights()
w[0].fill(0)
for i in range(num_filters):
w[0][0,0,i,i] = 1.
aux_conv.set_weights(w)
return x
def kronecker_prod(h, f, name='Kronecker_prod'):
""" # Inputs: inp[0] (heatmaps) and inp[1] (visual features)
"""
inp = [h, f]
def _combine_heatmaps_visual(inp):
hm = inp[0]
x = inp[1]
nj = K.int_shape(hm)[-1]
nf = K.int_shape(x)[-1]
hm = K.expand_dims(hm, axis=-1)
hm = K.tile(hm, (1, 1, 1, 1, 1, nf))
x = K.expand_dims(x, axis=-2)
x = K.tile(x, (1, 1, 1, 1, nj, 1))
x = hm * x
x = K.sum(x, axis=(2, 3))
return x
return Lambda(_combine_heatmaps_visual, name=name)(inp)
def lin_interpolation_1d(inp):
depth, num_filters = K.int_shape(inp)[1:]
conv = Conv1D(num_filters, depth, use_bias=False)
x = conv(inp)
w = conv.get_weights()
w[0].fill(0)
linspace = np.linspace(0.0, 1.0, num=depth)
for i in range(num_filters):
w[0][:, i, i] = linspace[:]
conv.set_weights(w)
conv.trainable = False
def _traspose(x):
x = K.squeeze(x, axis=-2)
x = K.expand_dims(x, axis=-1)
return x
x = Lambda(_traspose)(x)
return x
def lin_interpolation_2d(inp, dim):
num_rows, num_cols, num_filters = K.int_shape(inp)[1:]
conv = SeparableConv2D(num_filters, (num_rows, num_cols), use_bias=False)
x = conv(inp)
w = conv.get_weights()
w[0].fill(0)
w[1].fill(0)
linspace = linspace_2d(num_rows, num_cols, dim=dim)
for i in range(num_filters):
w[0][:,:, i, 0] = linspace[:,:]
w[1][0, 0, i, i] = 1.
conv.set_weights(w)
conv.trainable = False
x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
return x
|
s=input()
x="hello"
t=0
l=0
for item in range (len(s)):
if(t<=4 and s[item]==x[t]):
t=t+1
l=l+1
if(l==5):
print("YES")
else:
print("NO") |
class Errors(object):
def __init__(self, data=None, parameter=None, token=None):
self.data = data
self.parameter = parameter
self.token = token
def check_error(self):
if 'error' in self.data:
if self.data['error'] == 'Authorization required':
raise ValueError('Ключ API неверный. Проверь ключ API в Личном Кабинете Reg.ru услуги Облачные сервера')
elif 'action' in self.data:
if self.data['action'] is None:
raise KeyError('Задание с таким id не найдено')
else:
return self.data
elif 'detail' in self.data:
detail = self.data['detail']
if 'is too short' in detail and 'ptr' in detail:
raise ValueError('Предоставленный домен неверен')
elif 'does not match' in self.data['detail'] and \
'^(?:([0-9]{1,3}\\\\.){3}[0-9]{1,3})' in detail:
raise ValueError('Предоставленный домен неверен')
elif 'The server encountered an internal error and was unable to complete your request' in detail:
raise ValueError('Вероятно был использован неверный id')
else:
return self.data
elif 'code' in self.data:
code = self.data['code']
if code == 401:
raise ValueError('Ключ API неверный. Проверь ключ API в Личном Кабинете Reg.ru услуги Облачные сервера')
elif 'IP_NOT_FOUND' in code:
raise ValueError('Такой IP-адрес не найден')
elif 'NO_SUCH_REGLET' in code:
raise ValueError('Такой сервер отсутствует')
elif 'NO_SUCH_SNAPSHOT' in code:
raise ValueError('Такой снэпшот отсутствует')
elif 'NOT_IMPLEMENTED' in code:
raise ValueError('Такой id отсутствует')
elif 'VALIDATION_ERROR' in code and 'data' in self.data:
if 'image' in self.data['data'][0]:
raise ValueError('Проверь значение image')
elif 'size' in self.data['data'][0]:
raise ValueError('Проверь значение size')
elif 'ssh_key_id' in self.data['data'][0]:
raise ValueError('Проверь значение ssh_keys')
elif self.data['data'][0] == 'id':
raise KeyError('Сервер с таким id не существует. Проверь id сервера')
elif self.data['data'][0] == 'name':
raise ValueError('Неверное название сервера')
else:
return self.data
elif 'SSH_KEY_DOES_NOT_EXIST' in code:
raise KeyError('SSH-ключ с этим id не существует')
elif 'RESOURCE_LOCKED' in code:
raise RuntimeError('Реглет заблокирован, так как задание уже выполняется')
elif 'RESOURCE_NOT_FOUND' in code:
raise KeyError('Ресурс не был найден. '
'Вероятно был указан неверный id ресурса, либо он не существует')
elif 'ERROR_IP_COUNT' in code:
raise KeyError('Ошибка в количестве IP-адресов')
else:
return self.data
else:
return self.data
def check_images(self):
params = ['distribution', 'application', 'snapshot', 'backup']
if self.parameter not in params:
raise KeyError('Отсутствующее значение. Используй одно из значений: '
'distribution, application, snapshot, backup')
def check_actions(self):
params = ['reboot', 'password_reset', 'start', 'stop', 'enable_backups', 'disable_backups', 'resize',
'rebuild', 'restore', 'clone', 'snapshot', 'generate_vnc_link', 'resize_isp_license']
if self.parameter not in params:
raise KeyError(f'Такой тип операции отсутствует. Используй одно из значений: {*params, }')
def check_ssh_key(self):
if 'detail' in self.data:
detail = self.data['detail']
if 'does not match' in detail:
if 'name' in detail:
raise ValueError('Неверное имя ключа')
elif 'public_key' in detail:
raise ValueError('Предоставленный SSH-ключ неверен')
elif 'code' in self.data:
code = self.data['code']
if code == 'SSH_KEY_ALREADY_EXIST':
raise ValueError(f'Предоставленный SSH-ключ уже существует: {self.data}')
elif code == 'SSH_KEY_DOES_NOT_EXIST':
raise KeyError(f'SSH-ключ с этим id не существует: {self.data}')
else:
return Errors(self.data).check_error()
else:
return Errors(self.data).check_error()
|
# The MIT License (MIT)
#
# Copyright (c) 2021 Huimao Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import HMLLDBHelpers as HM
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f HMFont.printFont pfont -h "Print all font names supported by the device."')
def printFont(debugger, command, exe_ctx, result, internal_dict):
"""
Syntax:
pfont
Examples:
(lldb) pfont
This command is implemented in HMFont.py
"""
command_script = '''
NSMutableString *result = [[NSMutableString alloc] init];
unsigned int fontNamesCount = 0;
NSArray *familyNames = [UIFont familyNames];
for (NSString *familyName in familyNames) {
[result appendFormat:@"familyNames: %@\\n", familyName];
NSArray *fontNames = [UIFont fontNamesForFamilyName:familyName];
for (NSString *fontName in fontNames) {
[result appendFormat:@"\\tfontName: %@\\n", fontName];
fontNamesCount += 1;
}
}
[result insertString:[[NSString alloc] initWithFormat:@"Family names count: %ld, font names count: %u\\n", [familyNames count], fontNamesCount] atIndex:0];
(NSMutableString *)result;
'''
fontNames = HM.evaluateExpressionValue(command_script).GetObjectDescription()
HM.DPrint(fontNames)
|
from math import floor, log2
from typing import (
Any,
Collection,
Dict,
Iterator,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from pystiche import ComplexObject, loss
from pystiche.misc import zip_equal
from .level import PyramidLevel
from .storage import ImageStorage
__all__ = ["ImagePyramid", "OctaveImagePyramid"]
class ImagePyramid(ComplexObject):
r"""Image pyramid for a coarse-to-fine optimization on different levels. If
iterated on yields :class:`~pystiche.pyramid.PyramidLevel` s and handles the
resizing of all set images and guides of ``resize_targets``.
Args:
edge_sizes: Edge sizes for each level.
num_steps: Number of steps for each level. If sequence of ``int`` its length
has to match the length of ``edge_sizes``.
edge: Corresponding edge to the edge size for each level. Can be ``"short"`` or
``"long"``. If sequence of ``str`` its length has to match the length of
``edge_sizes``. Defaults to ``"short"``.
interpolation_mode: Interpolation mode used for the resizing of the images.
Defaults to ``"bilinear"``.
.. note::
For the resizing of guides ``"nearest"`` is used regardless of the
``interpolation_mode``.
resize_targets: Targets for resizing of set images and guides during iteration.
"""
def __init__(
self,
edge_sizes: Sequence[int],
num_steps: Union[Sequence[int], int],
edge: Union[Sequence[str], str] = "short",
interpolation_mode: str = "bilinear",
resize_targets: Collection[loss.Loss] = (),
):
self._levels = self.build_levels(edge_sizes, num_steps, edge)
self.interpolation_mode = interpolation_mode
self._resize_targets = set(resize_targets)
@staticmethod
def build_levels(
edge_sizes: Sequence[int],
num_steps: Union[Sequence[int], int],
edge: Union[Sequence[str], str],
) -> Tuple[PyramidLevel, ...]:
num_levels = len(edge_sizes)
if isinstance(num_steps, int):
num_steps = [num_steps] * num_levels
if isinstance(edge, str):
edge = [edge] * num_levels
return tuple(
PyramidLevel(edge_size, num_steps_, edge_)
for edge_size, num_steps_, edge_ in zip_equal(edge_sizes, num_steps, edge)
)
# TODO: can this be removed?
def add_resize_target(self, loss: loss.Loss) -> None:
self._resize_targets.add(loss)
def __len__(self) -> int:
return len(self._levels)
def __getitem__(self, idx: int) -> PyramidLevel:
return self._levels[idx]
def __iter__(self) -> Iterator[PyramidLevel]:
image_storage = ImageStorage(self._resize_losses())
for level in self._levels:
try:
self._resize(level)
yield level
finally:
image_storage.restore()
def _resize(self, level: PyramidLevel) -> None:
for loss_ in self._resize_losses():
if isinstance(loss_, loss.ComparisonLoss):
if loss_.target_image is not None:
resized_image = level.resize_image(
loss_.target_image, interpolation_mode=self.interpolation_mode
)
resized_guide = (
level.resize_guide(loss_.target_guide)
if loss_.target_guide is not None
else None
)
loss_.set_target_image(resized_image, guide=resized_guide)
if loss_.input_guide is not None:
resized_guide = level.resize_guide(loss_.input_guide)
loss_.set_input_guide(resized_guide)
def _resize_losses(self) -> Set[loss.Loss]:
resize_losses = set()
for target in self._resize_targets:
if isinstance(target, loss.Loss):
resize_losses.add(target)
for loss_ in target._losses():
if not isinstance(loss_, loss.LossContainer):
resize_losses.add(loss_)
return resize_losses
def _properties(self) -> Dict[str, Any]:
dct = super()._properties()
if self.interpolation_mode != "bilinear":
dct["interpolation_mode"] = self.interpolation_mode
return dct
def _named_children(self) -> Iterator[Tuple[str, Any]]:
yield from super()._named_children()
for idx, level in enumerate(self._levels):
yield str(idx), level
class OctaveImagePyramid(ImagePyramid):
r"""Image pyramid that comprises levels spaced by a factor of two.
Args:
max_edge_size: Maximum edge size.
num_steps: Number of steps for each level.
.. note::
If ``num_steps`` is specified as sequence of ``int``s, you should also
specify ``num_levels`` to match the lengths
num_levels: Optional number of levels. If ``None``, the number is determined by
the number of steps of factor two between ``max_edge_size`` and
``min_edge_size``.
min_edge_size: Minimum edge size for the automatic calculation of
``num_levels``.
image_pyramid_kwargs: Additional options. See
:class:`~pystiche.pyramid.ImagePyramid` for details.
"""
def __init__(
self,
max_edge_size: int,
num_steps: Union[int, Sequence[int]],
num_levels: Optional[int] = None,
min_edge_size: int = 64,
**image_pyramid_kwargs: Any,
) -> None:
if num_levels is None:
num_levels = int(floor(log2(max_edge_size / min_edge_size))) + 1
edge_sizes = [
round(max_edge_size / (2.0 ** ((num_levels - 1) - level)))
for level in range(num_levels)
]
super().__init__(edge_sizes, num_steps, **image_pyramid_kwargs)
|
import chainercv
import copy
import numpy as np
def _copy_transform(in_data):
out = []
for elem in in_data:
if isinstance(elem, np.ndarray):
elem = elem.copy()
else:
elem = copy.copy(elem)
out.append(elem)
return tuple(out)
def copy_dataset(dataset):
return chainercv.datasets.TransformDataset(dataset, _copy_transform)
|
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Suppose we represent our file system by a string in the following manner:
The string "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext" represents:
dir
subdir1
subdir2
file.ext
The directory dir contains an empty sub-directory subdir1 and a sub-directory subdir2 containing a file file.ext.
The string "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext" represents:
dir
subdir1
file1.ext
subsubdir1
subdir2
subsubdir2
file2.ext
The directory dir contains two sub-directories subdir1 and subdir2. subdir1 contains a file file1.ext and an empty second-level sub-directory subsubdir1. subdir2 contains a second-level sub-directory subsubdir2 containing a file file2.ext.
We are interested in finding the longest (number of characters) absolute path to a file within our file system. For example, in the second example above, the longest absolute path is "dir/subdir2/subsubdir2/file2.ext", and its length is 32 (not including the double quotes).
Given a string representing the file system in the above format, return the length of the longest absolute path to a file in the abstracted file system. If there is no file in the system, return 0.
Note:
The name of a file contains at least a period and an extension.
The name of a directory or sub-directory will not contain a period.
https://github.com/r1cc4rdo/daily_coding_problem/blob/master/daily_coding_problem_16_20.py
"""
def coding_problem_17(path_str):
if not path_str:
return 0
dirs, max_len = [None], 0
for token in path_str.split('\n'):
tabs = 0
while token[tabs] == '\t':
tabs += 1
if tabs > len(dirs):
raise RuntimeError('Malformed path string: nesting more than one level at a time.')
if tabs == len(dirs): # go one level deeper
if '.' in dirs[-1]:
raise RuntimeError('Malformed path string: a file cannot contain something else.')
dirs.append(None) # make room for the new path item
else:
dirs = dirs[:tabs + 1]
dirs[-1] = str.strip(token)
if '.' in dirs[-1]: # path ends with a file
max_len = max(max_len, len('/'.join(dirs)))
return max_len
if __name__ == '__main__':
print(coding_problem_17('dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext'))
|
#! /usr/bin/env python
"""
singleSession.py
===================
This program is used to generate the subject- and session-specific workflows for BRAINSTool processing
Usage:
singleSession.py [--rewrite-datasinks] [--wfrun PLUGIN] [--use-sentinal] [--dry-run] --workphase WORKPHASE --pe ENV --ExperimentConfig FILE SESSIONS...
singleSession.py -v | --version
singleSession.py -h | --help
Arguments:
SESSIONS List of sessions to process. Specifying 'all' processes every session in
the database (specified in the --ExperimentConfig FILE)
Options:
-h, --help Show this help and exit
-v, --version Print the version and exit
--rewrite-datasinks Turn on the Nipype option to overwrite all files in the 'results' directory
--use-sentinal Use the t1_average file as a marker to determine if session needs to be run
--dry-run Do not submit jobs, but print diagnostics about which jobs would be run
--pe ENV The processing environment to use from configuration file
--wfrun PLUGIN The name of the workflow plugin option (default: 'local')
--workphase WORKPHASE The type of processing to be done [atlas-based-reference|subject-based-reference]
--ExperimentConfig FILE The configuration file
Examples:
$ singleSession.py --pe OSX --ExperimentConfig my_baw.config all
$ singleSession.py --use-sentinal --wfrun SGEGraph --pe OSX --ExperimentConfig my_baw.config 00001 00002
$ singleSession.py --use-sentinal --dry-run --wfrun SGEGraph --pe OSX --ExperimentConfig my_baw.config 00001 00002
$ singleSession.py --rewrite-datasinks --pe OSX --ExperimentConfig my_baw.config 00003
"""
import re
def _create_single_session(dataDict, master_config, interpMode, pipeline_name):
"""
Create singleSession workflow on a single session
This is the main function to call when processing a data set with T1 & T2
data. ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
are the lists of images to be used in the auto-workup. atlas_fname_wpath is
the path and filename of the atlas to use.
:param dataDict:
:param master_config:
:param interpMode:
:param pipeline_name:
:return:
"""
assert (
"tissue_classify" in master_config["components"]
or "auxlmk" in master_config["components"]
or "denoise" in master_config["components"]
or "landmark" in master_config["components"]
or "segmentation" in master_config["components"]
or "jointfusion_2015_wholebrain" in master_config["components"]
)
from nipype import config, logging
config.update_config(master_config) # Set universal pipeline options
logging.update_logging(config)
from BAW.workflows.baseline import generate_single_session_template_wf
project = dataDict["project"]
subject = dataDict["subject"]
session = dataDict["session"]
blackListFileName = dataDict["T1s"][0] + "_noDenoise"
isBlackList = os.path.isfile(blackListFileName)
pname = "{0}_{1}_{2}".format(master_config["workflow_phase"], subject, session)
onlyT1 = not (len(dataDict["T2s"]) > 0)
hasPDs = len(dataDict["PDs"]) > 0
hasFLs = len(dataDict["FLs"]) > 0
if onlyT1:
print("T1 Only processing starts ...")
else:
print("Multimodal processing starts ...")
doDenoise = False
if "denoise" in master_config["components"]:
if isBlackList:
print(
"""
Denoise is ignored when the session is in Blacklist
There is known issue that Landmark Detection algorithm
may not work well with denoising step
"""
)
doDenoise = False
else:
doDenoise = True
useEMSP = None
if len(dataDict["EMSP"]) > 0:
useEMSP = dataDict["EMSP"][0]
def replace_image_extensions( filename, new_extension ):
filename_base = filename
for rmext in [r".gz$",r".nii$",r".hdr$",r".img$",r".dcm$",r".nrrd$",r".nhdr$", r".mhd$"]:
filename_base= re.sub(rmext, "",filename_base)
return filename_base + new_extension
input_sidecare_fcsv_filename = replace_image_extensions( dataDict["T1s"][0], ".fcsv" )
if os.path.exists( input_sidecare_fcsv_filename):
useEMSP = input_sidecare_fcsv_filename
sessionWorkflow = generate_single_session_template_wf(
project,
subject,
session,
onlyT1,
hasPDs,
hasFLs,
master_config,
phase=master_config["workflow_phase"],
interpMode=interpMode,
pipeline_name=pipeline_name,
doDenoise=doDenoise,
badT2=dataDict["BadT2"],
useEMSP=useEMSP,
)
sessionWorkflow.base_dir = master_config["cachedir"]
sessionWorkflow_inputsspec = sessionWorkflow.get_node("inputspec")
sessionWorkflow_inputsspec.inputs.T1s = dataDict["T1s"]
sessionWorkflow_inputsspec.inputs.T2s = dataDict["T2s"]
sessionWorkflow_inputsspec.inputs.PDs = dataDict["PDs"]
sessionWorkflow_inputsspec.inputs.FLs = dataDict["FLs"]
if useEMSP is not None:
sessionWorkflow_inputsspec.inputs.EMSP = useEMSP
sessionWorkflow_inputsspec.inputs.OTHERs = dataDict["OTHERs"]
return sessionWorkflow
def create_and_run(
sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun
):
"""
This function...
:param sessions:
:param environment:
:param experiment:
:param pipeline:
:param cluster:
:param useSentinal:
:param dryRun:
:return:
"""
from BAW.baw_exp import open_subject_database
from BAW.utilities.misc import add_dict
from collections import OrderedDict
import sys
from collections import (
OrderedDict,
) # Need OrderedDict internally to ensure consistent ordering
from BAW.workflows.utils import run_workflow
master_config = OrderedDict()
for configDict in [environment, experiment, pipeline, cluster]:
master_config = add_dict(master_config, configDict)
database = open_subject_database(
experiment["cachedir"], ["all"], environment["prefix"], experiment["dbfile"]
)
database.open_connection()
try:
all_sessions = database.get_all_sessions()
if not set(sessions) <= set(all_sessions) and "all" not in sessions:
missing = set(sessions) - set(all_sessions)
assert (
len(missing) == 0
), "Requested sessions are missing from the database: {0}\n\n{1}".format(
missing, all_sessions
)
elif "all" in sessions:
sessions = set(all_sessions)
else:
sessions = set(sessions)
print(("!=" * 40))
print(("Doing sessions {0}".format(sessions)))
print(("!=" * 40))
for session in sessions:
_dict = OrderedDict()
t1_list = database.get_filenames_by_scan_type(session, ["T1-15", "T1-30"])
if len(t1_list) == 0:
print(
(
"ERROR: Skipping session {0} for subject {1} due to missing T1's".format(
session, subject
)
)
)
print("REMOVE OR FIX BEFORE CONTINUING")
continue
subject = database.get_subj_from_session(session)
_dict["session"] = session
_dict["project"] = database.get_proj_from_session(session)
_dict["subject"] = subject
_dict["T1s"] = t1_list
_dict["T2s"] = database.get_filenames_by_scan_type(session, ["T2-15", "T2-30"])
_dict["BadT2"] = False
if _dict["T2s"] == database.get_filenames_by_scan_type(session, ["T2-15"]):
print("This T2 is not going to be used for JointFusion")
print("This T2 is not going to be used for JointFusion")
print("This T2 is not going to be used for JointFusion")
print("This T2 is not going to be used for JointFusion")
print((_dict["T2s"]))
_dict["BadT2"] = True
_dict["PDs"] = database.get_filenames_by_scan_type(session, ["PD-15", "PD-30"])
_dict["FLs"] = database.get_filenames_by_scan_type(session, ["FL-15", "FL-30"])
_dict["EMSP"] = database.get_filenames_by_scan_type(session, ["EMSP"])
_dict["OTHERs"] = database.get_filenames_by_scan_type(
session, ["OTHER-15", "OTHER-30"]
)
sentinal_file_basedir = os.path.join(
master_config["resultdir"],
_dict["project"],
_dict["subject"],
_dict["session"],
)
sentinal_file_list = list()
sentinal_file_list.append(os.path.join(sentinal_file_basedir))
if "denoise" in master_config["components"]:
# # NO SENTINAL FILE
pass
# # Use t1 average sentinal file if specified.
if "landmark" in master_config["components"]:
sentinal_file_list.append(
os.path.join(
sentinal_file_basedir,
"ACPCAlign",
"landmarkInitializer_atlas_to_subject_transform.h5",
)
)
if "tissue_classify" in master_config["components"]:
for tc_file in [
"complete_brainlabels_seg.nii.gz",
"t1_average_BRAINSABC.nii.gz",
]:
sentinal_file_list.append(
os.path.join(sentinal_file_basedir, "TissueClassify", tc_file)
)
if "warp_atlas_to_subject" in master_config["components"]:
warp_atlas_file_list = [
"hncma_atlas.nii.gz",
"l_accumben_ProbabilityMap.nii.gz",
"l_caudate_ProbabilityMap.nii.gz",
"l_globus_ProbabilityMap.nii.gz",
"l_hippocampus_ProbabilityMap.nii.gz",
"l_putamen_ProbabilityMap.nii.gz",
"l_thalamus_ProbabilityMap.nii.gz",
"left_hemisphere_wm.nii.gz",
"phi.nii.gz",
"r_accumben_ProbabilityMap.nii.gz",
"r_caudate_ProbabilityMap.nii.gz",
"r_globus_ProbabilityMap.nii.gz",
"r_hippocampus_ProbabilityMap.nii.gz",
"r_putamen_ProbabilityMap.nii.gz",
"r_thalamus_ProbabilityMap.nii.gz",
"rho.nii.gz",
"right_hemisphere_wm.nii.gz",
"template_WMPM2_labels.nii.gz",
"template_headregion.nii.gz",
"template_leftHemisphere.nii.gz",
"template_nac_labels.nii.gz",
"template_rightHemisphere.nii.gz",
"template_ventricles.nii.gz",
"theta.nii.gz",
]
for ff in warp_atlas_file_list:
sentinal_file_list.append(
os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject", ff)
)
if "jointfusion_2015_wholebrain" in master_config["components"]:
sentinal_file_list.append(
os.path.join(
sentinal_file_basedir,
"TissueClassify",
"JointFusion_HDAtlas20_2015_lobar_label.nii.gz",
)
)
sentinal_file_list.append(
os.path.join(
sentinal_file_basedir, "TissueClassify", "lobeVolumes_JSON.json"
)
)
if master_config["workflow_phase"] == "atlas-based-reference":
atlasDirectory = os.path.join(
master_config["atlascache"], "spatialImages", "rho.nii.gz"
)
sentinal_file_list.append(atlasDirectory)
else:
atlasDirectory = os.path.join(
master_config["previousresult"], subject, "Atlas", "AVG_rho.nii.gz"
)
sentinal_file_list.append(atlasDirectory)
sentinal_file_list.append(
os.path.join(
master_config["previousresult"],
subject,
"Atlas",
"AVG_template_headregion.nii.gz",
)
)
if os.path.exists(atlasDirectory):
print(("LOOKING FOR DIRECTORY {0}".format(atlasDirectory)))
else:
print(("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory)))
print(("SKIPPING: {0} prerequisites missing".format(session)))
continue
## Use different sentinal file if segmentation specified.
from BAW.workflows.baseline import determine_if_segmentation_should_be_done
do_BRAINSCut_Segmentation = determine_if_segmentation_should_be_done(
master_config
)
if do_BRAINSCut_Segmentation:
sentinal_file_list.append(
os.path.join(
sentinal_file_basedir,
"CleanedDenoisedRFSegmentations",
"allLabels_seg.nii.gz",
)
)
def all_paths_exists(list_of_paths):
"""
This function...
:param list_of_paths:
:return:
"""
is_missing = False
for ff in list_of_paths:
if not os.path.exists(ff):
is_missing = True
print(("MISSING: {0}".format(ff)))
return not is_missing
if useSentinal and all_paths_exists(sentinal_file_list):
print(("SKIPPING: {0} exists".format(sentinal_file_list)))
else:
print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
if dryRun == False:
workflow = _create_single_session(
_dict,
master_config,
"Linear",
"singleSession_{0}_{1}".format(
_dict["subject"], _dict["session"]
),
)
print(("Starting session {0}".format(session)))
# HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
run_workflow(
workflow,
plugin=master_config["plugin_name"],
plugin_args=master_config["plugin_args"],
)
else:
print("EXITING WITHOUT WORK DUE TO dryRun flag")
except:
raise
finally:
try:
database.close_connection()
except:
pass
def single_session_main(environment, experiment, pipeline, cluster, **kwds):
"""
This function...
:param environment:
:param experiment:
:param pipeline:
:param cluster:
:param **kwds:
:return:
"""
from BAW.utilities.configFileParser import nipype_options
print("Copying Atlas directory and determining appropriate Nipype options...")
pipeline = nipype_options(
kwds, pipeline, cluster, experiment, environment
) # Generate Nipype options
print("Getting session(s) from database...")
create_and_run(
kwds["SESSIONS"],
environment,
experiment,
pipeline,
cluster,
useSentinal=kwds["--use-sentinal"],
dryRun=kwds["--dry-run"],
)
return 0
# #####################################
# Set up the environment, process command line options, and start processing
#
if __name__ == "__main__":
import sys
import os
from docopt import docopt
from BAW import setup_environment
argv = docopt(__doc__, version="1.1")
print(argv)
print(("=" * 100))
environment, experiment, pipeline, cluster = setup_environment(argv)
exit = single_session_main(environment, experiment, pipeline, cluster, **argv)
sys.exit(exit)
|
# +
from RPA.Browser.Selenium import Selenium
import time
import os
from os import replace
import pandas as pd
browser=Selenium()
url='https://heycarson.com/task-catalog-browse/task-types/development'
if not os.path.exists('Output'):
os.makedirs('Output')
path_dload=(((os.getcwd()).replace('\\','\\\\'))+'\\Output')
demo_link_list=[]
desc_list=[]
task_list=[]
def all_scrap():
for z in range(1,15):
for i in range(1,10):
browser.open_available_browser(url)
browser.maximize_browser_window()
for x in range(1,z):
time.sleep(5)
browser.click_element('//*[@id="page-results"]/div[2]/div/a[4]')
time.sleep(5)
_new_path_='//*[@id="page-results"]/div[1]/div/div['+(str(i))+']/div/div/div[1]/a'
browser.click_element(_new_path_)
time.sleep(5)
print("Page:-",(str(z)),"Title:-",(str(i)),browser.get_text('//*[@id="root"]/div[4]/div/div[1]/div[2]/h1'))
task_list.append(browser.get_text('//*[@id="root"]/div[4]/div/div[1]/div[2]/h1'))
print("Page:-",(str(z)),"Article:-",(str(i)),browser.get_text('//*[@id="root"]/div[4]/div/div[1]/div[5]/div[2]'))
desc_list.append(browser.get_text('//*[@id="root"]/div[4]/div/div[1]/div[5]/div[2]'))
ele_status=browser.get_element_status('//*[@id="root"]/div[4]/div/div[1]/div[4]/div[1]/a')
if ele_status['visible']==True:
browser.click_element('//*[@id="root"]/div[4]/div/div[1]/div[4]/div[1]/a')
r=browser.get_window_handles()
browser.switch_window(r[1])
demo_link=browser.get_location()
print("Demo Link:-",demo_link)
demo_link_list.append(demo_link)
else:
demo_link_list.append('nan')
browser.close_browser()
all_scrap()
df = pd.DataFrame({'Task Name':task_list,'Description':desc_list,'Demo Links':demo_link_list})
df.to_csv('OUTPUT.csv')
# + active=""
#
# -
|
"""Config for mara Google Sheets Downloader
You need to configure oauth2 credentials for either a google service or a google user account.
"""
import typing as t
def gs_service_account_private_key_id()-> t.Optional[str]:
"""Google Service Account private_key_id used to download the Google Sheets"""
return None
def gs_service_account_private_key()-> t.Optional[str]:
"""Google Service Account private_key used to download the Google Sheets"""
return None
def gs_service_account_client_email()-> t.Optional[str]:
"""Google Service Account client_email used to download the Google Sheets"""
return None
def gs_service_account_client_id()-> t.Optional[str]:
"""Google Service Account client_id used to download the Google Sheets"""
return None
def gs_user_account_client_id()-> t.Optional[str]:
"""Google User Account client_id used to download the Google Sheets"""
return None
def gs_user_account_client_secret()-> t.Optional[str]:
"""Google User Account client_secret used to download the Google Sheets"""
return None
def gs_user_account_refresh_token()-> t.Optional[str]:
"""Google User Account refresh_token used to download the Google Sheets"""
return None
|
# Assesses the readability score of a document, line-by-line.
import argparse
import textstat
def get_scorer(choice):
scorers = {
0: textstat.flesch_reading_ease,
1: textstat.flesch_kincaid_grade,
2: textstat.dale_chall_readability_score,
3: textstat.text_standard
}
return scorers[choice]
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--scorer', default=0, type=int, help='Readability Scorer')
ap.add_argument('--input', help='input file to be scored')
ap.add_argument('--output', help='output file for readability scores')
ap.add_argument('--summary', default=0,
help='whether to calculate average readability score for the test set')
args = ap.parse_args()
sentences = [line for line in open(args.input)]
scorer = get_scorer(args.scorer)
scores = list(map(lambda x: scorer(x), sentences))
with open(args.output, 'w') as o:
for score in scores:
o.write(str(score) + '\n')
o.close()
if args.summary:
avg_score = sum(scores) / len(scores)
summary_f = open(args.output + '.summary', 'w')
summary_f.write(str(avg_score))
summary_f.close()
if __name__ == '__main__':
main()
|
from django.contrib import admin
from emails.models import Email
# Register your models here.
admin.site.register(Email)
|
from GenericRequest import GenericRequest
from kol.manager import PatternManager
class LoadClanAdminRequest(GenericRequest):
"Load's the clan administration page."
def __init__(self, session):
super(LoadClanAdminRequest, self).__init__(session)
self.url = session.serverURL + "clan_admin.php"
def parseResponse(self):
# Get the clan name.
namePattern = PatternManager.getOrCompilePattern("clanName")
match = namePattern.search(self.responseText)
self.responseData["clanName"] = match.group(1)
# Get the clan credo.
credoPattern = PatternManager.getOrCompilePattern("clanCredo")
match = credoPattern.search(self.responseText)
self.responseData["clanCredo"] = match.group(1)
# Get the clan website.
websitePattern = PatternManager.getOrCompilePattern("clanWebsite")
match = websitePattern.search(self.responseText)
self.responseData["clanWebsite"] = match.group(1)
# See if the clan is accepting applications.
clanAcceptingAppsPattern = PatternManager.getOrCompilePattern("clanAcceptingApps")
if clanAcceptingAppsPattern.search(self.responseText):
self.responseData["acceptingApps"] = True
else:
self.responseData["acceptingApps"] = False
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.layers import Conv2D, Dense, Flatten
from keras.models import Sequential
# https://www.kaggle.com/crawford/deepsat-sat4
# _____________________________________________________________________________
# - Each sample image is 28x28 pixels and consists of 4 bands - red, green, blue and near infrared.
# - X_train_sat4.csv: 400,000 training images, 28x28 images each with 4 channels
# - y_train_sat4.csv: 400,000 training labels, 1x4 one-hot encoded vectors
# - X_test_sat4.csv: 100,000 training images, 28x28 images each with 4 channels
# - y_test_sat4.csv: 100,000 training labels, 1x4 one-hot encoded vectors
IM_SIZE = (28, 28, 4)
X_TRAIN_PATH = 'X_train_sat4.csv'
Y_TRAIN_PATH = 'y_train_sat4.csv'
X_TEST_PATH = 'X_test_sat4.csv'
Y_TEST_PATH = 'y_test_sat4.csv'
class_names = ['Barren Land', 'Trees', 'Grassland', 'None']
def index(arr):
ind = arr.tolist().index(1)
return ind
# Load data and labels
X_train = pd.read_csv(X_TRAIN_PATH, nrows=10000)
Y_train = pd.read_csv(Y_TRAIN_PATH, nrows=10000)
X_test = pd.read_csv(X_TRAIN_PATH, nrows=100)
Y_test = pd.read_csv(Y_TRAIN_PATH, nrows=100)
# Convert pandas to numpy
X_train = X_train.to_numpy()
Y_train = Y_train.to_numpy()
X_test = X_test.to_numpy()
Y_test = Y_test.to_numpy()
# Now we have to reshape each of them from a list of numbers to a 28*28*4 image and normalize dataset
X_train_img = X_train.reshape(-1, *IM_SIZE).astype(np.uint8)/255
X_test_img = X_test.reshape(-1, *IM_SIZE).astype(np.uint8)/255
# Check some picture
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train_img[i], cmap=plt.cm.binary)
plt.xlabel(class_names[index(Y_train[i])])
plt.show()
# create model
model = Sequential()
# add model layers
model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(28, 28, 4)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(4, activation='softmax'))
# compile model using accuracy to measure model performance
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# train the model
model.fit(X_train_img,
Y_train,
validation_data=(X_test_img, Y_test),
epochs=3)
# we can make prediction for test data
prediction = model.predict(X_test_img)
print(prediction)
print(np.argmax(prediction[0]))
print(index(Y_test[0]))
plt.figure()
plt.imshow(X_test_img[0])
plt.xlabel(class_names[index(Y_test[0])])
plt.colorbar()
plt.grid(False)
plt.show()
|
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for algosec_resilient"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the algosec_resilient package"""
reload_params = {"package": u"algosec_resilient",
"incident_fields": [],
"action_fields": [],
"function_params": [u"algosec_hostname"],
"datatables": [u"algosec_associated_applications", u"algosec_internet_connectivity_queries", u"algosec_isolation_requests"],
"message_destinations": [u"algosec"],
"functions": [u"algosec_check_host_internet_connectivity", u"algosec_isolate_host_from_network", u"algosec_list_associated_applications"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_algosec_check_host_internet_connectivity", u"example_algosec_isolate_host_from_network", u"example_algosec_list_associated_applications"],
"actions": [u"Example: AlgoSec: Check Host Internet Connectivity", u"Example: AlgoSec: List Associated Applications", u"Isolate from Network (AlgoSec)"]
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# algosec_hostname
# DataTables:
# algosec_associated_applications
# algosec_internet_connectivity_queries
# algosec_isolation_requests
# Message Destinations:
# algosec
# Functions:
# algosec_check_host_internet_connectivity
# algosec_isolate_host_from_network
# algosec_list_associated_applications
# Workflows:
# example_algosec_check_host_internet_connectivity
# example_algosec_isolate_host_from_network
# example_algosec_list_associated_applications
# Rules:
# Example: AlgoSec: Check Host Internet Connectivity
# Example: AlgoSec: List Associated Applications
# Isolate from Network (AlgoSec)
yield ImportDefinition(u"""
eyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgIm1pbm9yIjogMCwgImJ1aWxkX251bWJl
ciI6IDQyNTQsICJ2ZXJzaW9uIjogIjMxLjAuNDI1NCJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u
IjogMiwgImlkIjogNzAsICJleHBvcnRfZGF0ZSI6IDE1NDM5NTM2MjkzMDYsICJmaWVsZHMiOiBb
eyJpZCI6IDUyOTAsICJuYW1lIjogImluY190cmFpbmluZyIsICJ0ZXh0IjogIlNpbXVsYXRpb24i
LCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAwLCAidG9vbHRpcCI6ICJXaGV0aGVyIHRoZSBp
bmNpZGVudCBpcyBhIHNpbXVsYXRpb24gb3IgYSByZWd1bGFyIGluY2lkZW50LiAgVGhpcyBmaWVs
ZCBpcyByZWFkLW9ubHkuIiwgImlucHV0X3R5cGUiOiAiYm9vbGVhbiIsICJoaWRlX25vdGlmaWNh
dGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi
OiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQi
OiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4Y2NhIiwgIm9wZXJhdGlvbnMiOiBb
XSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiB0cnVl
LCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAi
aW5jaWRlbnQvaW5jX3RyYWluaW5nIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZh
bHNlfSwgeyJpZCI6IDU3NzMsICJuYW1lIjogImFsZ29zZWNfaG9zdG5hbWUiLCAidGV4dCI6ICJh
bGdvc2VjX2hvc3RuYW1lIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTEsICJ0b29sdGlw
IjogIlRoZSBob3N0bmFtZSB1c2VkIHRvIGNhcnJ5IG9wZXJhdGlvbnMgd2l0aCB0aGUgQWxnb1Nl
YyBzZXJ2ZXIiLCAicGxhY2Vob2xkZXIiOiAiSG9zdG5hbWUgKGUuZyAxMC4wLjAuMTIpIiwgImlu
cHV0X3R5cGUiOiAidGV4dCIsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAiaGlkZV9ub3RpZmljYXRp
b24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjog
ZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2UsICJ1dWlkIjog
ImZkOWIzNjUwLWY3OWYtNDg2Mi04ZjBkLWEzZGZlY2ExNjYwZiIsICJvcGVyYXRpb25zIjogW10s
ICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5IjogZmFsc2Us
ICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJf
X2Z1bmN0aW9uL2FsZ29zZWNfaG9zdG5hbWUiLCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVk
IjogZmFsc2V9XSwgImluY2lkZW50X3R5cGVzIjogW3sidXBkYXRlX2RhdGUiOiAxNTQzOTUzOTk0
MzE4LCAiY3JlYXRlX2RhdGUiOiAxNTQzOTUzOTk0MzE4LCAidXVpZCI6ICJiZmVlYzJkNC0zNzcw
LTExZTgtYWQzOS00YTAwMDQwNDRhYTAiLCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9taXphdGlvbiBQ
YWNrYWdlcyAoaW50ZXJuYWwpIiwgImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdl
cyAoaW50ZXJuYWwpIiwgIm5hbWUiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwp
IiwgImVuYWJsZWQiOiBmYWxzZSwgInN5c3RlbSI6IGZhbHNlLCAicGFyZW50X2lkIjogbnVsbCwg
ImhpZGRlbiI6IGZhbHNlLCAiaWQiOiAwfV0sICJwaGFzZXMiOiBbXSwgImF1dG9tYXRpY190YXNr
cyI6IFtdLCAib3ZlcnJpZGVzIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7Im5hbWUi
OiAiYWxnb3NlYyIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJhbGdvc2VjIiwgImRlc3RpbmF0aW9u
X3R5cGUiOiAwLCAiZXhwZWN0X2FjayI6IHRydWUsICJ1c2VycyI6IFsiYWxtb2cuY29oZW5AYWxn
b3NlYy5jb20iXSwgInV1aWQiOiAiYjc5OWViMTYtNDUzZi00YmU4LWJmYTgtMmU1NDdlMWU3MmM0
IiwgImV4cG9ydF9rZXkiOiAiYWxnb3NlYyJ9XSwgImFjdGlvbnMiOiBbeyJpZCI6IDg3NCwgIm5h
bWUiOiAiRXhhbXBsZTogQWxnb1NlYzogQ2hlY2sgSG9zdCBJbnRlcm5ldCBDb25uZWN0aXZpdHki
LCAidHlwZSI6IDAsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJjb25kaXRpb25zIjogW3si
bWV0aG9kIjogIm9iamVjdF9hZGRlZCIsICJmaWVsZF9uYW1lIjogbnVsbCwgInZhbHVlIjogbnVs
bCwgInR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGx9LCB7Im1ldGhvZCI6ICJlcXVh
bHMiLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFjdC50eXBlIiwgInZhbHVlIjogIklQIEFkZHJlc3Mi
LCAidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lkIjogbnVsbH1dLCAiYXV0b21hdGlvbnMiOiBb
XSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW10sICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfYWxn
b3NlY19jaGVja19ob3N0X2ludGVybmV0X2Nvbm5lY3Rpdml0eSJdLCAidmlld19pdGVtcyI6IFtd
LCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogImVlYjhkOTJkLTY3MmUtNGY4YS1i
YjA0LThlZWUyNjVlNTM5OCIsICJleHBvcnRfa2V5IjogIkV4YW1wbGU6IEFsZ29TZWM6IENoZWNr
IEhvc3QgSW50ZXJuZXQgQ29ubmVjdGl2aXR5IiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQi
OiA4NzYsICJuYW1lIjogIkV4YW1wbGU6IEFsZ29TZWM6IExpc3QgQXNzb2NpYXRlZCBBcHBsaWNh
dGlvbnMiLCAidHlwZSI6IDAsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJjb25kaXRpb25z
IjogW3sibWV0aG9kIjogImVxdWFscyIsICJmaWVsZF9uYW1lIjogImFydGlmYWN0LnR5cGUiLCAi
dmFsdWUiOiAiSVAgQWRkcmVzcyIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxs
fSwgeyJtZXRob2QiOiAib2JqZWN0X2FkZGVkIiwgImZpZWxkX25hbWUiOiBudWxsLCAidmFsdWUi
OiBudWxsLCAidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lkIjogbnVsbH1dLCAiYXV0b21hdGlv
bnMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW10sICJ3b3JrZmxvd3MiOiBbImV4YW1w
bGVfYWxnb3NlY19saXN0X2Fzc29jaWF0ZWRfYXBwbGljYXRpb25zIl0sICJ2aWV3X2l0ZW1zIjog
W10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiMzFkNTgyZGYtYTI3OS00NDc3
LWE4NWMtYzhiZWI4NDViNGM2IiwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogQWxnb1NlYzogTGlz
dCBBc3NvY2lhdGVkIEFwcGxpY2F0aW9ucyIsICJsb2dpY190eXBlIjogImFsbCJ9LCB7ImlkIjog
ODc5LCAibmFtZSI6ICJJc29sYXRlIGZyb20gTmV0d29yayAoQWxnb1NlYykiLCAidHlwZSI6IDEs
ICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJjb25kaXRpb25zIjogW3sibWV0aG9kIjogImVx
dWFscyIsICJmaWVsZF9uYW1lIjogImFydGlmYWN0LnR5cGUiLCAidmFsdWUiOiAiSVAgQWRkcmVz
cyIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6
IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9h
bGdvc2VjX2lzb2xhdGVfaG9zdF9mcm9tX25ldHdvcmsiXSwgInZpZXdfaXRlbXMiOiBbXSwgInRp
bWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICIxYTdlZmQ4Mi1hNmY5LTRjZTItODE3Yi1j
OTQ2MDA3MjkxMDMiLCAiZXhwb3J0X2tleSI6ICJJc29sYXRlIGZyb20gTmV0d29yayAoQWxnb1Nl
YykiLCAibG9naWNfdHlwZSI6ICJhbGwifV0sICJsYXlvdXRzIjogW10sICJub3RpZmljYXRpb25z
IjogbnVsbCwgInRpbWVmcmFtZXMiOiBudWxsLCAibG9jYWxlIjogbnVsbCwgImluZHVzdHJpZXMi
OiBudWxsLCAicmVndWxhdG9ycyI6IG51bGwsICJnZW9zIjogbnVsbCwgInRhc2tfb3JkZXIiOiBb
XSwgImFjdGlvbl9vcmRlciI6IFtdLCAidHlwZXMiOiBbeyJpZCI6IG51bGwsICJ0eXBlX2lkIjog
OCwgInR5cGVfbmFtZSI6ICJhbGdvc2VjX2Fzc29jaWF0ZWRfYXBwbGljYXRpb25zIiwgImZpZWxk
cyI6IHsiYXJ0aWZhY3RfaXAiOiB7ImlkIjogNTgyNSwgIm5hbWUiOiAiYXJ0aWZhY3RfaXAiLCAi
dGV4dCI6ICJBcnRpZmFjdCBJUCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEwOTQsICJ0
b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInJl
cXVpcmVkIjogImFsd2F5cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjog
ZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6
IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiNDAyNDFiNDMtNGIxMS00YjUyLThi
NWUtYjY3YTA0OGU0ZWNkIiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9
LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAi
cmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogImFsZ29zZWNfYXNzb2NpYXRlZF9hcHBs
aWNhdGlvbnMvYXJ0aWZhY3RfaXAiLCAib3JkZXIiOiAwLCAid2lkdGgiOiAxMTMsICJ0ZW1wbGF0
ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJhcHBsaWNhdGlvbl9uYW1lIjogeyJpZCI6
IDU4MjYsICJuYW1lIjogImFwcGxpY2F0aW9uX25hbWUiLCAidGV4dCI6ICJBcHBsaWNhdGlvbiBO
YW1lIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTA5NCwgInRvb2x0aXAiOiAiVGhlIGFz
c29jaWF0ZWQgYnVzaW5lc3MgYXBwbGljYXRpb24gbmFtZSIsICJwbGFjZWhvbGRlciI6ICJFeGNo
YW5nZSBTZXJ2ZXIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInJlcXVpcmVkIjogImFsd2F5cyIs
ICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1bHRfY2hv
c2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVybmFsIjog
ZmFsc2UsICJ1dWlkIjogIjA1ZTQyN2Y3LWMyMDMtNGI4Yi05YTIwLTYzOGRjM2FlNDlkZCIsICJv
cGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVh
ZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAi
ZXhwb3J0X2tleSI6ICJhbGdvc2VjX2Fzc29jaWF0ZWRfYXBwbGljYXRpb25zL2FwcGxpY2F0aW9u
X25hbWUiLCAib3JkZXIiOiAxLCAid2lkdGgiOiAxODQsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJl
Y2F0ZWQiOiBmYWxzZX0sICJidXNpbmVzc2Zsb3dfZGFzaGJvYXJkIjogeyJpZCI6IDU4MjgsICJu
YW1lIjogImJ1c2luZXNzZmxvd19kYXNoYm9hcmQiLCAidGV4dCI6ICJBcHBsaWNhdGlvbiBJbiBC
dXNpbmVzc0Zsb3ciLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMDk0LCAidG9vbHRpcCI6
ICJVUkwgZm9yIHRoZSBhcHBsaWNhdGlvbidzIGRhc2hib2FyZCBvbiBBbGdvU2VjIEJ1c2luZXNz
RmxvdyIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0YXJlYSIsICJyZXF1
aXJlZCI6ICJhbHdheXMiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IHRy
dWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRy
dWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI3MDVhMmJkNi1kNTQ3LTQ2MzUtYmFhNi05
MmRlMmMyMjJjM2YiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2
YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo
X3RleHQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJhbGdvc2VjX2Fzc29jaWF0ZWRfYXBwbGljYXRp
b25zL2J1c2luZXNzZmxvd19kYXNoYm9hcmQiLCAib3JkZXIiOiAzLCAid2lkdGgiOiAzMTksICJ0
ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJpc19jcml0aWNhbCI6IHsiaWQi
OiA1ODI3LCAibmFtZSI6ICJpc19jcml0aWNhbCIsICJ0ZXh0IjogIklzIENyaXRpY2FsIiwgInBy
ZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTA5NCwgInRvb2x0aXAiOiAiSXMgdGhlIGFwcGxpY2F0
aW9uIG1hcmtlZCBhcyBjcml0aWNhbCBpbiBBbGdvU2VjIEJ1c2luZXNzRmxvdyIsICJwbGFjZWhv
bGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInJlcXVpcmVkIjogImFsd2F5cyIs
ICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2No
b3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJpbnRlcm5hbCI6
IGZhbHNlLCAidXVpZCI6ICI5ZmE2ZDRkNS01MDIzLTRiOTAtODI4Yi05YWFmOTljM2ZlZDIiLCAi
b3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJl
YWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwg
ImV4cG9ydF9rZXkiOiAiYWxnb3NlY19hc3NvY2lhdGVkX2FwcGxpY2F0aW9ucy9pc19jcml0aWNh
bCIsICJvcmRlciI6IDIsICJ3aWR0aCI6IDM4LCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVk
IjogZmFsc2V9fSwgInByb3BlcnRpZXMiOiB7ImNhbl9jcmVhdGUiOiBmYWxzZSwgImNhbl9kZXN0
cm95IjogZmFsc2UsICJmb3Jfd2hvIjogW119LCAicGFyZW50X3R5cGVzIjogWyJpbmNpZGVudCJd
LCAiZGlzcGxheV9uYW1lIjogIkFzc29jaWF0ZWQgQXBwbGljYXRpb25zIChBbGdvU2VjKSIsICJm
b3Jfbm90aWZpY2F0aW9ucyI6IGZhbHNlLCAiZm9yX2FjdGlvbnMiOiBmYWxzZSwgImZvcl9jdXN0
b21fZmllbGRzIjogZmFsc2UsICJleHBvcnRfa2V5IjogImFsZ29zZWNfYXNzb2NpYXRlZF9hcHBs
aWNhdGlvbnMiLCAidXVpZCI6ICI2ZjYwZjU0YS01NmMzLTRlODctYWQwZS0xZDQxM2ZhZmIxYjYi
LCAiYWN0aW9ucyI6IFtdLCAic2NyaXB0cyI6IFtdfSwgeyJpZCI6IG51bGwsICJ0eXBlX2lkIjog
OCwgInR5cGVfbmFtZSI6ICJhbGdvc2VjX2ludGVybmV0X2Nvbm5lY3Rpdml0eV9xdWVyaWVzIiwg
ImZpZWxkcyI6IHsiYXJ0aWZhY3RfaXAiOiB7ImlkIjogNTgyMywgIm5hbWUiOiAiYXJ0aWZhY3Rf
aXAiLCAidGV4dCI6ICJBcnRpZmFjdCBJUCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEw
OTMsICJ0b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0
IiwgInJlcXVpcmVkIjogImFsd2F5cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hv
c2VuIjogdHJ1ZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0
aW9uIjogdHJ1ZSwgImludGVybmFsIjogZmFsc2UsICJ1dWlkIjogIjc4NzA4NzA5LWI1MWQtNGEx
Mi05M2U0LWUzYWZmMDg3ZjQ5OCIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMi
OiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1
ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJhbGdvc2VjX2ludGVybmV0X2Nv
bm5lY3Rpdml0eV9xdWVyaWVzL2FydGlmYWN0X2lwIiwgIm9yZGVyIjogMCwgIndpZHRoIjogMTE0
LCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFsc2V9LCAiaXNfaXRfY29ubmVjdGVk
X3RvX3RoZV9pbnRlcm5ldCI6IHsiaWQiOiA1ODI5LCAibmFtZSI6ICJpc19pdF9jb25uZWN0ZWRf
dG9fdGhlX2ludGVybmV0IiwgInRleHQiOiAiQ29ubmVjdGVkIFRvIEludGVybmV0PyIsICJwcmVm
aXgiOiBudWxsLCAidHlwZV9pZCI6IDEwOTMsICJ0b29sdGlwIjogIldoZXRoZXIgb3Igbm90IHRo
ZSBob3N0IGhhcyBhY2Nlc3MgdG8gdGhlIGludGVybmV0IiwgInBsYWNlaG9sZGVyIjogIlllcy9O
byIsICJpbnB1dF90eXBlIjogInRleHQiLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgImhpZGVfbm90
aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy
dmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1
aWQiOiAiMDQ5ZWMwZWMtYzBiNC00YzFmLWIwYWItYTU0YzRhYjRmMmE1IiwgIm9wZXJhdGlvbnMi
OiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBm
YWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5
IjogImFsZ29zZWNfaW50ZXJuZXRfY29ubmVjdGl2aXR5X3F1ZXJpZXMvaXNfaXRfY29ubmVjdGVk
X3RvX3RoZV9pbnRlcm5ldCIsICJvcmRlciI6IDEsICJ3aWR0aCI6IDg4LCAidGVtcGxhdGVzIjog
W10sICJkZXByZWNhdGVkIjogZmFsc2V9LCAicXVlcnlfdXJsIjogeyJpZCI6IDU4MzEsICJuYW1l
IjogInF1ZXJ5X3VybCIsICJ0ZXh0IjogIlRyYWZmaWMgU2ltdWxhdGlvbiBEZXRhaWxzIiwgInBy
ZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTA5MywgInRvb2x0aXAiOiAiIiwgInBsYWNlaG9sZGVy
IjogIiIsICJpbnB1dF90eXBlIjogInRleHRhcmVhIiwgInJlcXVpcmVkIjogImFsd2F5cyIsICJo
aWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nl
bl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBm
YWxzZSwgInV1aWQiOiAiZjI2M2Q5ZDYtNGUzNC00OWMxLTg4YzYtM2I1NDEwOGEyNmM5IiwgIm9w
ZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFk
X29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogdHJ1ZSwgImV4
cG9ydF9rZXkiOiAiYWxnb3NlY19pbnRlcm5ldF9jb25uZWN0aXZpdHlfcXVlcmllcy9xdWVyeV91
cmwiLCAib3JkZXIiOiAyLCAid2lkdGgiOiA0ODIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0
ZWQiOiBmYWxzZX19LCAicHJvcGVydGllcyI6IHsiY2FuX2NyZWF0ZSI6IGZhbHNlLCAiY2FuX2Rl
c3Ryb3kiOiBmYWxzZSwgImZvcl93aG8iOiBbXX0sICJwYXJlbnRfdHlwZXMiOiBbImluY2lkZW50
Il0sICJkaXNwbGF5X25hbWUiOiAiQ29ubmVjdGl2aXR5IHRvIEludGVybmV0IChBbGdvU2VjKSIs
ICJmb3Jfbm90aWZpY2F0aW9ucyI6IGZhbHNlLCAiZm9yX2FjdGlvbnMiOiBmYWxzZSwgImZvcl9j
dXN0b21fZmllbGRzIjogZmFsc2UsICJleHBvcnRfa2V5IjogImFsZ29zZWNfaW50ZXJuZXRfY29u
bmVjdGl2aXR5X3F1ZXJpZXMiLCAidXVpZCI6ICIzODgzYzA3MS0yMmYyLTQ0YmUtOGJhMC05YzA3
YzZjMGQ1OTIiLCAiYWN0aW9ucyI6IFtdLCAic2NyaXB0cyI6IFtdfSwgeyJpZCI6IG51bGwsICJ0
eXBlX2lkIjogOCwgInR5cGVfbmFtZSI6ICJhbGdvc2VjX2lzb2xhdGlvbl9yZXF1ZXN0cyIsICJm
aWVsZHMiOiB7Imhvc3RuYW1lIjogeyJpZCI6IDU3NzUsICJuYW1lIjogImhvc3RuYW1lIiwgInRl
eHQiOiAiQXJ0aWZhY3QgSVAiLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMDkxLCAidG9v
bHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRl
X25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1bHRfY2hvc2VuX2J5
X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVybmFsIjogZmFsc2Us
ICJ1dWlkIjogIjlmZGNiYmM1LTlkNTUtNDE1Yy05OWFjLTc4ZjQ3Mjk1ZWU0NSIsICJvcGVyYXRp
b25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5
IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0
X2tleSI6ICJhbGdvc2VjX2lzb2xhdGlvbl9yZXF1ZXN0cy9ob3N0bmFtZSIsICJvcmRlciI6IDEs
ICJ3aWR0aCI6IDEyMywgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgImNo
YW5nZV9yZXF1ZXN0X3VybCI6IHsiaWQiOiA1Nzc2LCAibmFtZSI6ICJjaGFuZ2VfcmVxdWVzdF91
cmwiLCAidGV4dCI6ICJDaGFuZ2UgUmVxdWVzdCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6
IDEwOTEsICJ0b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0
ZXh0YXJlYSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJk
ZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAi
aW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiM2FkOTAyZWItNzlhNS00NWQ5LWI4MzYtNTZjMGU1
Y2FlNzA0IiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVz
IjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0
IjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiYWxnb3NlY19pc29sYXRpb25fcmVxdWVzdHMvY2hhbmdl
X3JlcXVlc3RfdXJsIiwgIm9yZGVyIjogMiwgIndpZHRoIjogNDY1LCAidGVtcGxhdGVzIjogW10s
ICJkZXByZWNhdGVkIjogZmFsc2V9LCAiaWQiOiB7ImlkIjogNTc3NCwgIm5hbWUiOiAiaWQiLCAi
dGV4dCI6ICJDaGFuZ2UgUmVxdWVzdCBJRCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEw
OTEsICJ0b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJudW1i
ZXIiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IHRydWUsICJkZWZhdWx0
X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJpbnRlcm5h
bCI6IGZhbHNlLCAidXVpZCI6ICJiZWFiNmY4My1hZDQ2LTQ1MTAtODA4Ny0xYzM5N2JiNzkxNmIi
LCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwg
InJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxz
ZSwgImV4cG9ydF9rZXkiOiAiYWxnb3NlY19pc29sYXRpb25fcmVxdWVzdHMvaWQiLCAib3JkZXIi
OiAwLCAid2lkdGgiOiA5MiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfX0s
ICJwcm9wZXJ0aWVzIjogeyJjYW5fY3JlYXRlIjogZmFsc2UsICJjYW5fZGVzdHJveSI6IGZhbHNl
LCAiZm9yX3dobyI6IFtdfSwgInBhcmVudF90eXBlcyI6IFsiaW5jaWRlbnQiXSwgImRpc3BsYXlf
bmFtZSI6ICJJc29sYXRpb24gQ2hhbmdlIFJlcXVlc3RzIChBbGdvU2VjKSIsICJmb3Jfbm90aWZp
Y2F0aW9ucyI6IGZhbHNlLCAiZm9yX2FjdGlvbnMiOiBmYWxzZSwgImZvcl9jdXN0b21fZmllbGRz
IjogZmFsc2UsICJleHBvcnRfa2V5IjogImFsZ29zZWNfaXNvbGF0aW9uX3JlcXVlc3RzIiwgInV1
aWQiOiAiZDgyYzAxYTAtNDdiYS00MmQ1LTg1NmQtMTA2OGM4NTMxMzRiIiwgImFjdGlvbnMiOiBb
XSwgInNjcmlwdHMiOiBbXX1dLCAic2NyaXB0cyI6IFtdLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlw
ZXMiOiBbXSwgIndvcmtmbG93cyI6IFt7IndvcmtmbG93X2lkIjogMzAxLCAibmFtZSI6ICJFeGFt
cGxlOiBBbGdvU2VjOiBDaGVjayBIb3N0IEludGVybmV0IENvbm5lY3Rpdml0eSIsICJwcm9ncmFt
bWF0aWNfbmFtZSI6ICJleGFtcGxlX2FsZ29zZWNfY2hlY2tfaG9zdF9pbnRlcm5ldF9jb25uZWN0
aXZpdHkiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiAiQW4gZXhh
bXBsZSB3b3JrZmxvdyBzaG93aW5nIGhvdyBjaGVjayBpbnRlcm5ldCBjb25uZWN0aW9uIGZvciBh
IGdpdmVuIFwiSVAgQWRkcmVzc1wiIGFydGlmYWN0IHVzaW5nIEFsZ29TZWMgRmlyZUZsb3cuIFRo
ZSBjb25uZWN0aXZpdHkgcXVlcnkgcmVzdWx0cyBhcmUgdXBkYXRlZCBpbiB0aGUgaW5jaWRlbnQn
cyBEYXRhIFRhYmxlIGNhbGxlZCBcIkFsZ29TZWMgSW50ZXJuZXQgQ29ubmVjdGlvblwiIiwgImNy
ZWF0b3JfaWQiOiAiYWxtb2cuY29oZW5AYWxnb3NlYy5jb20iLCAibGFzdF9tb2RpZmllZF9ieSI6
ICJhbG1vZy5jb2hlbkBhbGdvc2VjLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTQzOTUx
NTM0MjMxLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX2FsZ29zZWNfY2hlY2tfaG9zdF9pbnRlcm5l
dF9jb25uZWN0aXZpdHkiLCAidXVpZCI6ICIwN2FlOWNjNS04OGE4LTRjM2UtOTA2Ni1iMTc0NTI1
MzU4MTQiLCAiY29udGVudCI6IHsid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9hbGdvc2VjX2NoZWNr
X2hvc3RfaW50ZXJuZXRfY29ubmVjdGl2aXR5IiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4w
XCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9t
Zy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cu
b21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jl
c2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAx
L1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1h
LWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0
XCI+PHByb2Nlc3MgaWQ9XCJleGFtcGxlX2FsZ29zZWNfY2hlY2tfaG9zdF9pbnRlcm5ldF9jb25u
ZWN0aXZpdHlcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IEFsZ29TZWM6
IENoZWNrIEhvc3QgSW50ZXJuZXQgQ29ubmVjdGl2aXR5XCI+PGRvY3VtZW50YXRpb24+PCFbQ0RB
VEFbQW4gZXhhbXBsZSB3b3JrZmxvdyBzaG93aW5nIGhvdyBjaGVjayBpbnRlcm5ldCBjb25uZWN0
aW9uIGZvciBhIGdpdmVuIFwiSVAgQWRkcmVzc1wiIGFydGlmYWN0IHVzaW5nIEFsZ29TZWMgRmly
ZUZsb3cuIFRoZSBjb25uZWN0aXZpdHkgcXVlcnkgcmVzdWx0cyBhcmUgdXBkYXRlZCBpbiB0aGUg
aW5jaWRlbnQncyBEYXRhIFRhYmxlIGNhbGxlZCBcIkFsZ29TZWMgSW50ZXJuZXQgQ29ubmVjdGlv
blwiXV0+PC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3ht
XCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wMjZvcjlwPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+
PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMGZmczk0OFwiIG5hbWU9XCJBbGdvU2VjOiBD
aGVjayBIb3N0IEludGVybmV0IENvbm4uLi5cIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+
PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cImU2ODA5MmZjLTZk
ODItNGQ2OC1iOWQ2LTA0NTVmYjExZWZiNVwiPntcImlucHV0c1wiOnt9LFwicG9zdF9wcm9jZXNz
aW5nX3NjcmlwdFwiOlwiIyMgIEV4YW1wbGU6IEFsZ29TZWM6IENoZWNrIEhvc3QgSW50ZXJuZXQg
Q29ubmVjdGl2aXR5IC0gcG9zdC1wcm9jZXNzaW5nIHNjcmlwdCAjI1xcblxcbiMgRXhhbXBsZSBv
ZiBleHBlY3RlZCBpbnRlcm5ldCBjb25uZWN0aXZpdHkgcmVzdWx0XFxuXFxcIlxcXCJcXFwiXFxu
eydhcnRpZmFjdF9pcCc6ICcxMC4wLjAuMScsICdpc19pdF9jb25uZWN0ZWRfdG9fdGhlX2ludGVy
bmV0JzogJ1llcycsICdxdWVyeV91cmwnOiAnaHR0cHM6Ly9sb2NhbC5hbGdvc2VjL2ZhL3F1ZXJ5
L3Jlc3VsdHMvIy93b3JrL0FMTF9GSVJFV0FMTFNfcXVlcnktMTU0MzYyMjU2MjIwNi8nfVxcblxc
bm9yXFxuXFxueydhcnRpZmFjdF9pcCc6ICcxMC4wLjAuMScsICdpc19pdF9jb25uZWN0ZWRfdG9f
dGhlX2ludGVybmV0JzogJ05vdCBSb3V0ZWQnLCAncXVlcnlfdXJsJzogJ2h0dHBzOi8vbG9jYWwu
YWxnb3NlYy9mYS9xdWVyeS9yZXN1bHRzLyMvd29yay9BTExfRklSRVdBTExTX3F1ZXJ5LTE1NDM2
MjI1NjIyMDYvJ31cXG5cXFwiXFxcIlxcXCJcXG5cXG4jICBHbG9iYWxzXFxuRklFTERfTkFNRVMg
PSBbXFxuICAgJ2FydGlmYWN0X2lwJyxcXG4gICAnaXNfaXRfY29ubmVjdGVkX3RvX3RoZV9pbnRl
cm5ldCcsXFxuICAgJ3F1ZXJ5X3VybCcsXFxuXVxcblxcbiMgUHJvY2Vzc2luZyBpZiB0aGUgZnVu
Y3Rpb24gaXMgYSBzdWNjZXNzXFxuIyBpZihyZXN1bHRzLnN1Y2Nlc3MpOlxcbmVudHJ5ID0gcmVz
dWx0c1xcbiMgQWRkIFJvd1xcbnJvdyA9IGluY2lkZW50LmFkZFJvdyhcXFwiYWxnb3NlY19pbnRl
cm5ldF9jb25uZWN0aXZpdHlfcXVlcmllc1xcXCIpXFxuXFxuZm9yIGZpZWxkX25hbWUgaW4gRklF
TERfTkFNRVM6XFxuICB0cnk6XFxuICAgIHJvd192YWx1ZSA9IGhlbHBlci5jcmVhdGVSaWNoVGV4
dChzdHIoZW50cnlbZmllbGRfbmFtZV0pKVxcbiAgICByb3dbZmllbGRfbmFtZV0gPSByb3dfdmFs
dWVcXG4gIGV4Y2VwdCBJbmRleEVycm9yOlxcbiAgICByb3dbZmllbGRfbmFtZV0gPSAnTi9BJ1wi
LFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuYWxnb3NlY19ob3N0bmFtZSA9IGFy
dGlmYWN0LnZhbHVlXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48
aW5jb21pbmc+U2VxdWVuY2VGbG93XzAyNm9yOXA8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5j
ZUZsb3dfMHlpbzFwNDwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48ZW5kRXZlbnQgaWQ9XCJFbmRF
dmVudF8wZHQwOHIxXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18weWlvMXA0PC9pbmNvbWluZz48
L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMDI2b3I5cFwiIHNvdXJj
ZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzBmZnM5
NDhcIi8+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18weWlvMXA0XCIgc291cmNlUmVm
PVwiU2VydmljZVRhc2tfMGZmczk0OFwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzBkdDA4cjFcIi8+
PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0
IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBp
ZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1c
IiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIvPjwvcHJvY2Vzcz48YnBtbmRp
OkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1fMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5F
bGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1OUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBl
IGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVhc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFz
eG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIxNjJc
IiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwi
IHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIyM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9i
cG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90
YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpC
b3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEwMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9i
cG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlv
bl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50
IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIyMFwiLz48b21nZGk6d2F5
cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjU0XCIvPjwvYnBt
bmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tf
MGZmczk0OFwiIGlkPVwiU2VydmljZVRhc2tfMGZmczk0OF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVp
Z2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIyNjBcIiB5PVwiMTY2XCIvPjwvYnBtbmRpOkJQ
TU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzBkdDA4cjFc
IiBpZD1cIkVuZEV2ZW50XzBkdDA4cjFfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIg
d2lkdGg9XCIzNlwiIHg9XCI0MjlcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdk
YzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDQ3XCIgeT1cIjIyN1wiLz48
L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt
bkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMDI2b3I5cFwiIGlkPVwiU2VxdWVuY2VGbG93XzAyNm9y
OXBfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRc
IiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjYwXCIgeHNpOnR5cGU9XCJvbWdkYzpQ
b2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9
XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMjlcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJl
bD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVu
Y2VGbG93XzB5aW8xcDRcIiBpZD1cIlNlcXVlbmNlRmxvd18weWlvMXA0X2RpXCI+PG9tZ2RpOndh
eXBvaW50IHg9XCIzNjBcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21n
ZGk6d2F5cG9pbnQgeD1cIjQyOVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIv
PjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBc
IiB4PVwiMzk0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1O
RWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9u
cz4iLCAidmVyc2lvbiI6IDIxfSwgImFjdGlvbnMiOiBbXX0sIHsid29ya2Zsb3dfaWQiOiAzMDIs
ICJuYW1lIjogIkV4YW1wbGU6IEFsZ29TZWM6IExpc3QgQXNzb2NpYXRlZCBBcHBsaWNhdGlvbnMi
LCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9hbGdvc2VjX2xpc3RfYXNzb2NpYXRlZF9h
cHBsaWNhdGlvbnMiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiAi
R2l2ZW4gYW4gSVAvSG9zdCBsaXN0IGFsbCB0aGUgYXBwbGljYXRpb25zIGFzc29jaWF0ZWQgd2l0
aCBpdCB0byBiZXR0ZXIgYXNzZXMgdGhlIHJpc2sgb2YgdGhlIGluY2lkZW50LiBJbnNlcnQgdGhl
IHJlc3VsdHMgaW50byB0aGUgXCJBbGdvU2VjIEFzc29jaWF0ZWQgQXBwbGljYXRpb25zXCIgRGF0
YSBUYWJsZSBhbmQgc3BlY2lmeSB0aGUgYXBwbGljYXRpb24ncyBjcml0aWNhbGl0eSBhbmQgYSBs
aW5rIHVybCB0byB0aGUgYXBwbGljYXRpb24gZGFzaGJvYXJkIG9uIEFsZ29TZWMgQnVzaW5lc3NG
bG93LiIsICJjcmVhdG9yX2lkIjogImFsbW9nLmNvaGVuQGFsZ29zZWMuY29tIiwgImxhc3RfbW9k
aWZpZWRfYnkiOiAiYWxtb2cuY29oZW5AYWxnb3NlYy5jb20iLCAibGFzdF9tb2RpZmllZF90aW1l
IjogMTU0Mzk1MTUzNTkzNywgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9hbGdvc2VjX2xpc3RfYXNz
b2NpYXRlZF9hcHBsaWNhdGlvbnMiLCAidXVpZCI6ICI5NjJhODVjNS02MTFkLTQzZjMtOGIyOS0y
YjQ1ZDliYzhhOTMiLCAiY29udGVudCI6IHsid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9hbGdvc2Vj
X2xpc3RfYXNzb2NpYXRlZF9hcHBsaWNhdGlvbnMiLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIx
LjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cu
b21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8v
cmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIw
MDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hl
bWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rl
c3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVfYWxnb3NlY19saXN0X2Fzc29jaWF0ZWRfYXBwbGlj
YXRpb25zXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBBbGdvU2VjOiBM
aXN0IEFzc29jaWF0ZWQgQXBwbGljYXRpb25zXCI+PGRvY3VtZW50YXRpb24+PCFbQ0RBVEFbR2l2
ZW4gYW4gSVAvSG9zdCBsaXN0IGFsbCB0aGUgYXBwbGljYXRpb25zIGFzc29jaWF0ZWQgd2l0aCBp
dCB0byBiZXR0ZXIgYXNzZXMgdGhlIHJpc2sgb2YgdGhlIGluY2lkZW50LiBJbnNlcnQgdGhlIHJl
c3VsdHMgaW50byB0aGUgXCJBbGdvU2VjIEFzc29jaWF0ZWQgQXBwbGljYXRpb25zXCIgRGF0YSBU
YWJsZSBhbmQgc3BlY2lmeSB0aGUgYXBwbGljYXRpb24ncyBjcml0aWNhbGl0eSBhbmQgYSBsaW5r
IHVybCB0byB0aGUgYXBwbGljYXRpb24gZGFzaGJvYXJkIG9uIEFsZ29TZWMgQnVzaW5lc3NGbG93
Ll1dPjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwi
PjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMTg4Y25sdTwvb3V0Z29pbmc+PC9zdGFydEV2ZW50Pjxz
ZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFsNWo3cXlcIiBuYW1lPVwiQWxnb1NlYzogTGlz
dCBBc3NvY2lhdGVkIEFwcGxpY2F0Li4uXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxl
eHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCJiN2M1NDliMi00MDUy
LTRiYzgtYjI1Zi02MTQwYzQzYjFlMWJcIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2lu
Z19zY3JpcHRcIjpcIiMjIEV4YW1wbGU6IEFsZ29TZWM6IExpc3QgQXNzb2NpYXRlZCBBcHBsaWNh
dGlvbnMgLSBwb3N0LXByb2Nlc3Npbmcgc2NyaXB0ICMjXFxuXFxuIyBFeGFtcGxlIG9mIGV4cGVj
dGVkIGFzc29jaWF0ZWQgYXBwbGljYXRpb25zIHF1ZXJ5XFxuXFxcIlxcXCJcXFwiXFxue1xcbiAg
J3N1Y2Nlc3MnOiBUcnVlLFxcbiAgJ2VudHJpZXMnOiBbXFxuICAgIHsnYXJ0aWZhY3RfaXAnOiAn
MTAuMC4wLjEnLCAnYXBwbGljYXRpb25fbmFtZSc6ICdIUiBDUk0nLCAnaXNfY3JpdGljYWwnOiBU
cnVlLCAnYnVzaW5lc3NmbG93X2Rhc2hib2FyZCc6ICdodHRwczovLzEwLjAuMC4xMi9CdXNpbmVz
c0Zsb3cvIyFhcHBsaWNhdGlvbi8yOTMvZGFzaGJvYXJkJ30sXFxuICAgIHsnYXJ0aWZhY3RfaXAn
OiAnMTI4LjAuMC4xMicsICdhcHBsaWNhdGlvbl9uYW1lJzogJ0Nyb24gTWFuYWdlcicsICdpc19j
cml0aWNhbCc6IEZhbHNlLCAnYnVzaW5lc3NmbG93X2Rhc2hib2FyZCc6ICdodHRwczovLzEwLjAu
MC4xMi9CdXNpbmVzc0Zsb3cvIyFhcHBsaWNhdGlvbi8yOTgvZGFzaGJvYXJkJ30sXFxuICBdXFxu
fVxcblxcXCJcXFwiXFxcIlxcblxcbiMgIEdsb2JhbHNcXG5GSUVMRF9OQU1FUyA9IFtcXG4gICAn
YXJ0aWZhY3RfaXAnLFxcbiAgICdhcHBsaWNhdGlvbl9uYW1lJyxcXG4gICAnaXNfY3JpdGljYWwn
LFxcbiAgICdidXNpbmVzc2Zsb3dfZGFzaGJvYXJkJyxcXG5dXFxuXFxuIyBQcm9jZXNzaW5nIGlm
IHRoZSBmdW5jdGlvbiBpcyBhIHN1Y2Nlc3NcXG5pZihyZXN1bHRzWydzdWNjZXNzJ10pOlxcbiAg
Zm9yIGVudHJ5IGluIHJlc3VsdHNbJ2VudHJpZXMnXTpcXG4gICAgIyBBZGQgUm93XFxuICAgIHJv
dyA9IGluY2lkZW50LmFkZFJvdyhcXFwiYWxnb3NlY19hc3NvY2lhdGVkX2FwcGxpY2F0aW9uc1xc
XCIpXFxuICAgIFxcbiAgICBmb3IgZmllbGRfbmFtZSBpbiBGSUVMRF9OQU1FUzpcXG4gICAgICB0
cnk6XFxuICAgICAgICBpZiBmaWVsZF9uYW1lID09IFxcXCJpc19jcml0aWNhbFxcXCI6XFxuICAg
ICAgICAgIHJvd1tmaWVsZF9uYW1lXSA9IGJvb2woZW50cnlbZmllbGRfbmFtZV0pXFxuICAgICAg
ICBlbHNlOlxcbiAgICAgICAgICByb3dfdmFsdWUgPSBoZWxwZXIuY3JlYXRlUmljaFRleHQoc3Ry
KGVudHJ5W2ZpZWxkX25hbWVdKSlcXG4gICAgICAgICAgcm93W2ZpZWxkX25hbWVdID0gcm93X3Zh
bHVlXFxuICAgICAgZXhjZXB0IEluZGV4RXJyb3I6XFxuICAgICAgICByb3dbZmllbGRfbmFtZV0g
PSBcXFwiTi9BXFxcIlxcblwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuYWxn
b3NlY19ob3N0bmFtZSA9IGFydGlmYWN0LnZhbHVlXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9l
eHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzE4OGNubHU8L2luY29taW5n
PjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMXhwM2w2aTwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48
ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8wMXRndTFyXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18x
eHAzbDZpPC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZs
b3dfMTg4Y25sdVwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1c
IlNlcnZpY2VUYXNrXzFsNWo3cXlcIi8+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18x
eHAzbDZpXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMWw1ajdxeVwiIHRhcmdldFJlZj1cIkVu
ZEV2ZW50XzAxdGd1MXJcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWt4
eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRBbm5vdGF0
aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VSZWY9XCJT
dGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIv
PjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1fMVwiPjxicG1u
ZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1OUGxhbmVfMVwi
PjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVhc3htXCIgaWQ9
XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lk
dGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpC
b3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIyM1wiLz48L2Jw
bW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5F
bGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4dEFubm90YXRpb25fMWt4
eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEwMFwiIHg9XCI5
OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVs
ZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4X2Rp
XCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c
IjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRc
IiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt
ZW50PVwiU2VydmljZVRhc2tfMWw1ajdxeVwiIGlkPVwiU2VydmljZVRhc2tfMWw1ajdxeV9kaVwi
PjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIyNjZcIiB5PVwi
MTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1c
IkVuZEV2ZW50XzAxdGd1MXJcIiBpZD1cIkVuZEV2ZW50XzAxdGd1MXJfZGlcIj48b21nZGM6Qm91
bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0NDlcIiB5PVwiMTg4XCIvPjxicG1u
ZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwi
NDY3XCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxi
cG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTg4Y25sdVwiIGlkPVwi
U2VxdWVuY2VGbG93XzE4OGNubHVfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0
eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjY2XCIg
eHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9t
Z2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMzJcIiB5PVwiMTg0XCIv
PjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5FZGdlIGJw
bW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzF4cDNsNmlcIiBpZD1cIlNlcXVlbmNlRmxvd18xeHAz
bDZpX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzNjZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50
XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjQ0OVwiIHhzaTp0eXBlPVwib21nZGM6
UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0
PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDA3LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5M
YWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRp
YWdyYW0+PC9kZWZpbml0aW9ucz4iLCAidmVyc2lvbiI6IDEyfSwgImFjdGlvbnMiOiBbXX0sIHsi
d29ya2Zsb3dfaWQiOiAyNzksICJuYW1lIjogIkV4YW1wbGU6IEFsZ29TZWM6IElzb2xhdGUgSG9z
dCBGcm9tIE5ldHdvcmsiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9hbGdvc2VjX2lz
b2xhdGVfaG9zdF9mcm9tX25ldHdvcmsiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVz
Y3JpcHRpb24iOiAiQW4gZXhhbXBsZSB3b3JrZmxvdyBzaG93aW5nIGhvdyB0byBpc29sYXRlIFwi
SVAgQWRkcmVzc1wiIGFydGlmYWN0IHVzaW5nIEFsZ29TZWMgRmlyZUZsb3cuIFRoZSBGaXJlRmxv
dyBpc29sYXRpb24gcmVxdWVzdCBkZXRhaWxzIGFyZSB1cGRhdGVkIGluIHRoZSBpbmNpZGVudCdz
IERhdGEgVGFibGUgY2FsbGVkIFwiQWxnb1NlYyBJc29sYXRpb24gUmVxdWVzdHNcIiIsICJjcmVh
dG9yX2lkIjogImFsbW9nLmNvaGVuQGFsZ29zZWMuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAi
YWxtb2cuY29oZW5AYWxnb3NlYy5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU0Mzk1MTI4
MjI5MiwgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9hbGdvc2VjX2lzb2xhdGVfaG9zdF9mcm9tX25l
dHdvcmsiLCAidXVpZCI6ICI1YjgwMmE0Mi1mZWU1LTQ4MGYtYTQ5Yy0yMzBiZjVjZGNhZmIiLCAi
Y29udGVudCI6IHsid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9hbGdvc2VjX2lzb2xhdGVfaG9zdF9m
cm9tX25ldHdvcmsiLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1cIlVU
Ri04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4v
MjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9C
UE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9E
RC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQv
MjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5jb20v
YnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1s
bnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJn
ZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1c
ImV4YW1wbGVfYWxnb3NlY19pc29sYXRlX2hvc3RfZnJvbV9uZXR3b3JrXCIgaXNFeGVjdXRhYmxl
PVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBBbGdvU2VjOiBJc29sYXRlIEhvc3QgRnJvbSBOZXR3
b3JrXCI+PGRvY3VtZW50YXRpb24+PCFbQ0RBVEFbQW4gZXhhbXBsZSB3b3JrZmxvdyBzaG93aW5n
IGhvdyB0byBpc29sYXRlIFwiSVAgQWRkcmVzc1wiIGFydGlmYWN0IHVzaW5nIEFsZ29TZWMgRmly
ZUZsb3cuIFRoZSBGaXJlRmxvdyBpc29sYXRpb24gcmVxdWVzdCBkZXRhaWxzIGFyZSB1cGRhdGVk
IGluIHRoZSBpbmNpZGVudCdzIERhdGEgVGFibGUgY2FsbGVkIFwiQWxnb1NlYyBJc29sYXRpb24g
UmVxdWVzdHNcIl1dPjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRf
MTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGZza3ljMzwvb3V0Z29pbmc+PC9zdGFy
dEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzBobWEwMWZcIiBuYW1lPVwiQWxn
b1NlYzogSXNvbGF0ZSBIb3N0IEZyb20gTmV0d29yLi4uXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5j
dGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCJkZjI5
NzllZC0wMTc2LTRjOTgtOWQxZS1lMzNmYjllYjY3MGVcIj57XCJpbnB1dHNcIjp7fSxcInBvc3Rf
cHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMjICBFeGFtcGxlOiBBbGdvU2VjOiBJc29sYXRlIGhvc3Qg
ZnJvbSBuZXR3b3JrIC0gcG9zdC1wcm9jZXNzaW5nIHNjcmlwdCAjI1xcblxcbiMgRXhhbXBsZSBv
ZiBleHBlY3RlZCBpc29sYXRpb24gcmVxdWVzdCByZXN1bHRcXG5cXFwiXFxcIlxcXCJcXG57J2hv
c3RuYW1lJzogJzEwLjAuMC4xJywgJ3VybCc6ICdodHRwczovLzE5Mi4xNjguNTguMTI5L0ZpcmVG
bG93L1RpY2tldC9EaXNwbGF5Lmh0bWw/aWQ9NTI1Myd9XFxuXFxcIlxcXCJcXFwiXFxuXFxuIyAg
R2xvYmFsc1xcbkVOVFJZX1RPX0RBVEFUQUJMRV9NQVAgPSB7XFxuICAgXFxcImlkXFxcIjogXFxc
ImlkXFxcIixcXG4gICBcXFwiaG9zdG5hbWVcXFwiOiBcXFwiaG9zdG5hbWVcXFwiLFxcbiAgIFxc
XCJ1cmxcXFwiOiBcXFwiY2hhbmdlX3JlcXVlc3RfdXJsXFxcIixcXG59XFxuXFxuIyBQcm9jZXNz
aW5nIGlmIHRoZSBmdW5jdGlvbiBpcyBhIHN1Y2Nlc3NcXG4jIGlmKHJlc3VsdHMuc3VjY2Vzcyk6
XFxuZW50cnkgPSByZXN1bHRzXFxuIyBBZGQgUm93XFxucm93ID0gaW5jaWRlbnQuYWRkUm93KFxc
XCJhbGdvc2VjX2lzb2xhdGlvbl9yZXF1ZXN0c1xcXCIpXFxuXFxuZm9yIGZpZWxkX25hbWUgaW4g
RU5UUllfVE9fREFUQVRBQkxFX01BUDpcXG5cXG4gIGlmIGVudHJ5LmdldChmaWVsZF9uYW1lKSBp
cyBOb25lOlxcbiAgICByb3dbRU5UUllfVE9fREFUQVRBQkxFX01BUFtmaWVsZF9uYW1lXV0gPSBc
XFwiTi9BXFxcIlxcblxcbiAgdHJ5OlxcbiAgICBpZiBmaWVsZF9uYW1lID09IFxcXCJpZFxcXCI6
XFxuICAgICAgcm93W0VOVFJZX1RPX0RBVEFUQUJMRV9NQVBbZmllbGRfbmFtZV1dID0gaW50KGVu
dHJ5W2ZpZWxkX25hbWVdKVxcbiAgICBlbHNlOlxcbiAgICAgIHJvd192YWx1ZSA9IGhlbHBlci5j
cmVhdGVSaWNoVGV4dChzdHIoZW50cnlbZmllbGRfbmFtZV0pKVxcbiAgICAgIHJvd1tFTlRSWV9U
T19EQVRBVEFCTEVfTUFQW2ZpZWxkX25hbWVdXSA9IHJvd192YWx1ZVxcbiAgZXhjZXB0IEluZGV4
RXJyb3I6XFxuICAgIHJvd1tFTlRSWV9UT19EQVRBVEFCTEVfTUFQW2ZpZWxkX25hbWVdXSA9IFxc
XCJOL0FcXFwiXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5hbGdvc2VjX2hv
c3RuYW1lID0gYXJ0aWZhY3QudmFsdWVcIixcInJlc3VsdF9uYW1lXCI6XCJcIn08L3Jlc2lsaWVu
dDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGZz
a3ljMzwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xMzl5djAwPC9vdXRnb2luZz48
L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMGZza3ljM1wiIHNv
dXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzBo
bWEwMWZcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMDVtYjB0ZFwiPjxpbmNvbWluZz5TZXF1
ZW5jZUZsb3dfMTM5eXYwMDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwi
U2VxdWVuY2VGbG93XzEzOXl2MDBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18waG1hMDFmXCIg
dGFyZ2V0UmVmPVwiRW5kRXZlbnRfMDVtYjB0ZFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0
QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0
PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwi
IHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0
aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlh
Z3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1c
IkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50
XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVp
Z2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1O
TGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5
PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC
UE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0
QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRo
PVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC
UE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0
aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21n
ZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9
XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1O
U2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18waG1hMDFmXCIgaWQ9XCJTZXJ2aWNlVGFz
a18waG1hMDFmX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIg
eD1cIjI0Ni40MDgzNTI2NjgyMTM0N1wiIHk9XCIxNjUuNjQ3MzMxNzg2NTQyOVwiLz48L2JwbW5k
aTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18w
ZnNreWMzXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGZza3ljM19kaVwiPjxvbWdkaTp3YXlwb2ludCB4
PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBv
aW50IHg9XCIyNDZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRp
OkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjIy
MlwiIHk9XCIxODQuNVwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJw
bW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRFdmVudF8wNW1iMHRkXCIgaWQ9XCJFbmRF
dmVudF8wNW1iMHRkX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZc
IiB4PVwiMzg3LjEyMTE2MDQwOTU1NjNcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv
bWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDA1LjEyMTE2MDQwOTU1
NjNcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJw
bW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xMzl5djAwXCIgaWQ9XCJT
ZXF1ZW5jZUZsb3dfMTM5eXYwMF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzQ2XCIgeHNpOnR5
cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzODdcIiB4
c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21n
ZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjM2Ni41XCIgeT1cIjE4NFwi
Lz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5l
PjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiAxNn0sICJh
Y3Rpb25zIjogW119XSwgInJvbGVzIjogW10sICJ3b3Jrc3BhY2VzIjogW10sICJmdW5jdGlvbnMi
OiBbeyJpZCI6IDM3NCwgIm5hbWUiOiAiYWxnb3NlY19jaGVja19ob3N0X2ludGVybmV0X2Nvbm5l
Y3Rpdml0eSIsICJkaXNwbGF5X25hbWUiOiAiQWxnb1NlYzogQ2hlY2sgSG9zdCBJbnRlcm5ldCBD
b25uZWN0aXZpdHkiLCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQi
OiAiR2l2ZW4gYSBob3N0bmFtZSwgcmV0dXJuIHdoZXRoZXIgb3Igbm90IGl0IGhhcyBpbnRlcm5l
dCBhY2Nlc3MuXG5cblRoZSBBbGdvU2VjIGludGVncmF0aW9uIHdpbGwgY2hlY2sgaWYgYSBnaXZl
biBob3N0L0lQIGlzIGhhcyBhY2Nlc3MgdG8gYSBwdWJsaWMga25vd24gaW50ZXJuZXQgbm9kZSBz
dWNoIGFzIDguOC44LjgifSwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJhbGdvc2VjIiwgImV4cG9y
dF9rZXkiOiAiYWxnb3NlY19jaGVja19ob3N0X2ludGVybmV0X2Nvbm5lY3Rpdml0eSIsICJ1dWlk
IjogImU2ODA5MmZjLTZkODItNGQ2OC1iOWQ2LTA0NTVmYjExZWZiNSIsICJ2ZXJzaW9uIjogMiwg
ImNyZWF0b3IiOiB7ImlkIjogOTQsICJ0eXBlIjogInVzZXIiLCAibmFtZSI6ICJhbG1vZy5jb2hl
bkBhbGdvc2VjLmNvbSIsICJkaXNwbGF5X25hbWUiOiAiQWxtb2cgQ29oZW4ifSwgImxhc3RfbW9k
aWZpZWRfYnkiOiB7ImlkIjogOTQsICJ0eXBlIjogInVzZXIiLCAibmFtZSI6ICJhbG1vZy5jb2hl
bkBhbGdvc2VjLmNvbSIsICJkaXNwbGF5X25hbWUiOiAiQWxtb2cgQ29oZW4ifSwgImxhc3RfbW9k
aWZpZWRfdGltZSI6IDE1NDM1NTUzMjI5NDgsICJ2aWV3X2l0ZW1zIjogW3sic3RlcF9sYWJlbCI6
IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90
eXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVudCI6ICJmZDliMzY1MC1mNzlmLTQ4NjItOGYwZC1h
M2RmZWNhMTY2MGYiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlfV0sICJ3b3JrZmxvd3MiOiBb
eyJ3b3JrZmxvd19pZCI6IDMwMSwgIm5hbWUiOiAiRXhhbXBsZTogQWxnb1NlYzogQ2hlY2sgSG9z
dCBJbnRlcm5ldCBDb25uZWN0aXZpdHkiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9h
bGdvc2VjX2NoZWNrX2hvc3RfaW50ZXJuZXRfY29ubmVjdGl2aXR5IiwgIm9iamVjdF90eXBlIjog
ImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjogbnVsbCwgInV1aWQiOiBudWxsLCAiYWN0aW9ucyI6
IFtdfV19LCB7ImlkIjogMzU2LCAibmFtZSI6ICJhbGdvc2VjX2lzb2xhdGVfaG9zdF9mcm9tX25l
dHdvcmsiLCAiZGlzcGxheV9uYW1lIjogIkFsZ29TZWM6IElzb2xhdGUgSG9zdCBGcm9tIE5ldHdv
cmsiLCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAiQ3JlYXRl
IGEgdHJhZmZpYyBjaGFuZ2UgcmVxdWVzdCB3aXRoIEFsZ29TZWMncyBGaXJlRmxvdyB0byBpc29s
YXRlIGEgaG9zdCBmcm9tIHRoZSBuZXR3b3JrLiBUaGVuIEFsZ29TZWMncyBBY3RpdmVDaGFuZ2Ug
dGhlbiBhdXRvbWF0aWNhbGx5IGltcGxlbWVudHMgcnVsZSBjaGFuZ2VzIGFjcm9zcyBhbGwgZmly
ZXdhbGxzIGluIHRoZSBuZXR3b3JrIHRvIGlzb2xhdGUgdGhlIGhvc3QgY29tcGxldGVseS4ifSwg
ImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJhbGdvc2VjIiwgImV4cG9ydF9rZXkiOiAiYWxnb3NlY19p
c29sYXRlX2hvc3RfZnJvbV9uZXR3b3JrIiwgInV1aWQiOiAiZGYyOTc5ZWQtMDE3Ni00Yzk4LTlk
MWUtZTMzZmI5ZWI2NzBlIiwgInZlcnNpb24iOiAzLCAiY3JlYXRvciI6IHsiaWQiOiA5NCwgInR5
cGUiOiAidXNlciIsICJuYW1lIjogImFsbW9nLmNvaGVuQGFsZ29zZWMuY29tIiwgImRpc3BsYXlf
bmFtZSI6ICJBbG1vZyBDb2hlbiJ9LCAibGFzdF9tb2RpZmllZF9ieSI6IHsiaWQiOiA5NCwgInR5
cGUiOiAidXNlciIsICJuYW1lIjogImFsbW9nLmNvaGVuQGFsZ29zZWMuY29tIiwgImRpc3BsYXlf
bmFtZSI6ICJBbG1vZyBDb2hlbiJ9LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU0MzUxMDQ1NjQx
MCwgInZpZXdfaXRlbXMiOiBbeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAi
ZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250
ZW50IjogImZkOWIzNjUwLWY3OWYtNDg2Mi04ZjBkLWEzZGZlY2ExNjYwZiIsICJzaG93X2xpbmtf
aGVhZGVyIjogZmFsc2V9XSwgIndvcmtmbG93cyI6IFt7IndvcmtmbG93X2lkIjogMjc5LCAibmFt
ZSI6ICJFeGFtcGxlOiBBbGdvU2VjOiBJc29sYXRlIEhvc3QgRnJvbSBOZXR3b3JrIiwgInByb2dy
YW1tYXRpY19uYW1lIjogImV4YW1wbGVfYWxnb3NlY19pc29sYXRlX2hvc3RfZnJvbV9uZXR3b3Jr
IiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjogbnVsbCwgInV1aWQi
OiBudWxsLCAiYWN0aW9ucyI6IFtdfV19LCB7ImlkIjogMzc2LCAibmFtZSI6ICJhbGdvc2VjX2xp
c3RfYXNzb2NpYXRlZF9hcHBsaWNhdGlvbnMiLCAiZGlzcGxheV9uYW1lIjogIkFsZ29TZWM6IExp
c3QgQXNzb2NpYXRlZCBBcHBsaWNhdGlvbnMiLCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0
ZXh0IiwgImNvbnRlbnQiOiAiR2l2ZW4gYW4gSVAvSG9zdCBsaXN0IGFsbCBhc3NvY2lhdGVkIEJ1
c2luZXNzRmxvdyBhcHBsaWNhdGlvbnMuXG5Qcm92aWRlcyBiZXR0ZXIgYXNzZXNzbWVudCB0aGUg
cmlzayBvZiB0aGUgaW5jaWRlbnQuIFRoZSByZXN1bHRzIGNvbnRhaW4gd2hldGhlciBvciBub3Qg
aXQncyBhIGNyaXRpY2FsIGFwcGxpY2F0aW9uIGFuZCBhIHVybCBsaW5rIHRvIHRoZSBhcHBsaWNh
dGlvbiBvbiB0aGUgQWxnb1NlYyBCdXNpbmVzc0Zsb3cgZGFzaGJvYXJkLiJ9LCAiZGVzdGluYXRp
b25faGFuZGxlIjogImFsZ29zZWMiLCAiZXhwb3J0X2tleSI6ICJhbGdvc2VjX2xpc3RfYXNzb2Np
YXRlZF9hcHBsaWNhdGlvbnMiLCAidXVpZCI6ICJiN2M1NDliMi00MDUyLTRiYzgtYjI1Zi02MTQw
YzQzYjFlMWIiLCAidmVyc2lvbiI6IDMsICJjcmVhdG9yIjogeyJpZCI6IDk0LCAidHlwZSI6ICJ1
c2VyIiwgIm5hbWUiOiAiYWxtb2cuY29oZW5AYWxnb3NlYy5jb20iLCAiZGlzcGxheV9uYW1lIjog
IkFsbW9nIENvaGVuIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDk0LCAidHlwZSI6ICJ1
c2VyIiwgIm5hbWUiOiAiYWxtb2cuY29oZW5AYWxnb3NlYy5jb20iLCAiZGlzcGxheV9uYW1lIjog
IkFsbW9nIENvaGVuIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTQzNTk2ODY0MTE3LCAidmll
d19pdGVtcyI6IFt7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGwsICJlbGVtZW50
IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNvbnRlbnQiOiAi
ZmQ5YjM2NTAtZjc5Zi00ODYyLThmMGQtYTNkZmVjYTE2NjBmIiwgInNob3dfbGlua19oZWFkZXIi
OiBmYWxzZX1dLCAid29ya2Zsb3dzIjogW3sid29ya2Zsb3dfaWQiOiAzMDIsICJuYW1lIjogIkV4
YW1wbGU6IEFsZ29TZWM6IExpc3QgQXNzb2NpYXRlZCBBcHBsaWNhdGlvbnMiLCAicHJvZ3JhbW1h
dGljX25hbWUiOiAiZXhhbXBsZV9hbGdvc2VjX2xpc3RfYXNzb2NpYXRlZF9hcHBsaWNhdGlvbnMi
LCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiBudWxsLCAidXVpZCI6
IG51bGwsICJhY3Rpb25zIjogW119XX1dfQ==
"""
) |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class ImageMaker:
def __init__(self,img):
plt.axis('off')
plt.imshow(mpimg.imread(img))
def addPoint(self,x,y):
plt.scatter([x],[y])
def saveImage(self):
plt.savefig('/var/www/html/images/test.png')
plt.clf()
|
from trinsicokapi.proto.okapi.metadata import MetadataResponse, MetadataRequest
from trinsicokapi.wrapper import _typed_wrap_and_call
def get_metadata() -> MetadataResponse:
response = _typed_wrap_and_call(
"okapi_metadata", MetadataRequest(), MetadataResponse
)
return response
|
#!/usr/bin/python3.5
# I don't believe in license.
# You can do whatever you want with this program.
def doWork():
while True:
host = q.get()
resolve( host )
q.task_done()
def resolve( host ):
if t_multiproc['n_current']%5000 == 0:
save(False)
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
try:
ip = socket.gethostbyname( host )
t_alive[host] = ip
# print(ip)
except Exception as e:
t_dead.append( host )
# sys.stdout.write( "%s[-] error occurred: %s (%s)%s\n" % (fg('red'),e,host,attr(0)) )
def save(alts):
if alts:
fp = open( 'h_alts', 'w' )
for h in t_alts:
if len(h):
fp.write( "%s\n" % h )
fp.close()
fp = open( 'h_alive', 'w' )
for h in sorted(t_alive.keys()):
if len(h):
# fp.write( "%s:%s\n" % (h,t_alive[h]) )
fp.write( "%s\n" % h )
fp.close()
fp = open( 'h_dead', 'w' )
for h in t_dead:
if len(h):
fp.write( "%s\n" % h )
fp.close()
def occalts( t_array ):
t_occ = []
l = len(t_array)
# print(l)
for i in range(0,l):
for j in range(0,l):
if i == j:
continue
maxmax = t_array[i]
for nn in range(0,maxmax+1):
t_array2 = t_array.copy()
t_array2[i] = nn
max = t_array[j]
print(max)
for n in range(0,max+1):
for pad in range(1,2):
print(pad)
t_array3 = t_array2.copy()
t_array3[j] = str(n).rjust(pad,'0')
print(t_array3)
# print(t_array2)
t_occ.append( t_array3 )
# break
print(t_occ)
print(len(t_occ))
return t_occ
def generateAlts( host, current, minnum, multiplicator ):
index = 0
matches = re.compile( '[0-9]+' ).finditer( host )
temp = list(matches)
n_matches = len(temp)
matches = iter(temp)
# print("\nhost %s" % host)
# print("CURRENT %d" % current)
# print("n_matches %d" % n_matches)
t_alts.append( host )
for m in matches:
# print("INDEX %d" % index)
# print(m.group())
if index > current:
# print("index != current NO SKIP")
n_start = 0
n_end = int( int(m.group()) * multiplicator )
if n_end < minnum:
n_end = minnum
# n_end = int(m.group())
n_end = n_end
# print(n_end)
p_start = m.start()
p_end = m.end()
p_len = p_end - p_start
s_prefix = host[0:p_start]
s_suffix = host[p_end:]
for i in range(n_start,n_end):
new_h = s_prefix + str(i) + s_suffix
generateAlts( new_h, index, minnum, multiplicator )
# else:
# if not host in t_alts:
# print("index = current SKIP")
index = index + 1
def getAlts( minnum, multiplicator, host ):
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
# for host in t_hosts:
generateAlts( host, -1, minnum, multiplicator )
# print(sorted(t_alts))
# print( len(t_alts) )
# exit()
import os
import sys
import re
import socket
import argparse
from functools import partial
from colored import fg, bg, attr
from threading import Thread
from queue import Queue
from multiprocessing.dummy import Pool
parser = argparse.ArgumentParser()
parser.add_argument( "-o","--host",help="set hosts file list" )
parser.add_argument( "-t","--threads",help="threads, default 10" )
parser.add_argument( "-n","--minnum",help="minimum n, default 10" )
parser.add_argument( "-m","--multi",help="multiplicator, default 1" )
parser.parse_args()
args = parser.parse_args()
if args.minnum:
_minnum = int(args.minnum)
else:
_minnum = 10
if args.multi:
_multiplicator = int(args.multi)
else:
_multiplicator = 1
if args.threads:
_threads = int(args.threads)
else:
_threads = 10
t_hosts = []
if args.host:
if os.path.isfile(args.host):
fp = open( args.host, 'r' )
t_hosts = fp.read().strip().split("\n")
fp.close()
n_host = len(t_hosts)
if not n_host:
parser.error( 'hosts list missing' )
sys.stdout.write( '%s[+] %d hosts loaded: %s%s\n' % (fg('green'),n_host,args.host,attr(0)) )
sys.stdout.write( '[+] generating alts...\n' )
t_alive = {}
t_dead = []
t_alts = []
t_multiproc = {
'n_current': 0,
'n_total': n_host
}
pool = Pool( 20 )
pool.map( partial(getAlts,_minnum,_multiplicator), t_hosts )
pool.close()
pool.join()
# getAlts( t_hosts )
n_alt = len(t_alts)
save(True)
sys.stdout.write( '%s[+] %d alts generated%s\n' % (fg('green'),n_alt,attr(0)) )
sys.stdout.write( '[+] resolving...\n' )
t_multiproc = {
'n_current': 0,
'n_total': n_alt
}
q = Queue( _threads*2 )
for i in range(_threads):
t = Thread( target=doWork )
t.daemon = True
t.start()
try:
for host in t_alts:
q.put( host )
q.join()
except KeyboardInterrupt:
sys.exit(1)
# print( t_alive)
# print( t_dead)
sys.stdout.write( '%s[+] %d hosts alive, %d dead hosts%s\n' % (fg('green'),len(t_alive),len(t_dead),attr(0)) )
save(False)
exit()
|
import datetime as dt
import json
import multiprocessing as mul
import os
from sqlite3 import dbapi2 as sqlite
from unittest import skip, TestCase
import numpy as np
import pandas as pd
from requests.exceptions import ConnectionError
import responses
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from gscap import gps
class TestGPS(TestCase):
"""test class for
"""
@classmethod
def setUpClass(cls):
"""perform at test class initialization
"""
now = dt.datetime(
year=2005, month=5, day=5, hour=12
)
cls.day = dt.datetime(year=now.year, month=now.month, day=now.day)
cls.time = now.time()
cls.lat = 32.3788
cls.lon = -84.90685
cls.zipcode = 31905
cls.gcname = 'sqlite+pysqlite:///test_gps_cache.sqlite'
cls.del_cache()
engine = create_engine(cls.gcname, module=sqlite)
gps.Base.metadata.create_all(engine)
cls.session = sessionmaker(bind=engine)()
cls.cache_kwargs = {'engine': cls.gcname}
gps.CONNECTION_RESET_ATTEMPTS = 1
gps.CONNECTION_WAIT_TIME = 0
cls.home_cluster = cls.gen_cluster(0, 0, list(range(1, 7)) + list(range(18, 24)))
cls.work_cluster = cls.gen_cluster(0.5, 0.5, list(range(8, 12)) + list(range(13, 17)))
home_and_work = pd.concat(
[cls.home_cluster, cls.work_cluster],
sort=False)\
.sort_values(by='ts')\
.reset_index(drop=True)
cls.home_and_work = gps.process_velocities(home_and_work)
@classmethod
def del_cache(cls):
fn = cls.gcname.replace('sqlite+pysqlite:///', '')
if os.path.exists(fn):
os.remove(fn)
@classmethod
def mock_gmap_response(cls):
fn = 'mock_gmap_response'
if not os.path.exists(fn):
fn = os.path.join('tests', fn)
with open(fn, 'r') as f:
mock_response = f.read()
return mock_response
@classmethod
def gen_cluster(cls, lat, lon, hours):
t = []
for d in range(1, 7):
for h in hours:
for m in range(60):
t.append(dict(
ts=dt.datetime(
year=2019,
month=1,
day=d,
hour=h,
minute=m
),
lat=lat+np.random.uniform(-0.0002, 0.0002),
lon=lon+np.random.uniform(-0.0002, 0.0002)
))
return pd.DataFrame(t, index=list(range(len(t))))
@property
def clusters(self):
fn = 'some_clusters.csv'
if not os.path.exists(fn):
fn = os.path.join('tests', fn)
return pd.read_csv(fn)
@property
def entries(self):
fn = 'some_entries.csv'
if not os.path.exists(fn):
fn = os.path.join('tests', fn)
df = pd.read_csv(fn, parse_dates=[
'time_in', 'midpoint', 'time_out'
])
df.duration = pd.to_timedelta(df.duration)
return df
@property
def gps_records(self):
fn = 'some_gps.csv'
if not os.path.exists(fn):
fn = os.path.join('tests', fn)
return pd.read_csv(fn, parse_dates=['ts'])
@classmethod
def tearDownClass(cls):
"""perform when all tests are complete
"""
cls.del_cache()
def setUp(self):
"""perform before each unittest"""
pass
def tearDown(self):
"""perform after each unittest
"""
t = self.session.query(gps.PlaceRequest).all()
for ti in t:
self.session.delete(ti)
self.session.commit()
responses.reset()
def test_gpsrecords_named_tuple_conversion(self):
d = dt.datetime(year=2019, month=1, day=1)
gpsr = [
gps.GPS(1, 1, d)
]
rec = gps.gpsr_to_records(gpsr)
assert isinstance(rec, pd.DataFrame)
assert len(rec) == 1
assert all(c in rec.columns for c in [
'lat', 'lon', 'ts'
])
rec = rec.iloc[0]
assert rec.lat == 1
assert rec.lon == 1
assert rec.ts == d
def test_records_to_gpsr(self):
r = pd.DataFrame(columns=['lat', 'lon', 'ts'])
d = dt.datetime(year=2019, month=1, day=1)
r.loc[0] = (1, 1, d)
gpsr = gps.records_to_gpsr(r)
assert isinstance(gpsr, list)
assert len(gpsr) == 1
assert isinstance(gpsr[0], gps.GPS)
def test_gps_dbscan_accepts_both_types(self):
r = pd.DataFrame(columns=['lat', 'lon', 'ts'])
d = dt.datetime(year=2019, month=1, day=1)
r.loc[0] = (1, 1, d)
l, c = gps.gps_dbscan(r)
assert isinstance(l, list) and isinstance(c, list)
assert len(l) == 1 and len(c) == 0
r = gps.records_to_gpsr(r)
l, c = gps.gps_dbscan(r)
assert isinstance(l, list) and isinstance(c, list)
assert len(l) == 1 and len(c) == 0
@skip
def test_takeout_parser(self):
fn = 'location_history.json'
results = gps.prep_takeout_data(fn)
self.assertTrue(isinstance(results, pd.DataFrame))
@responses.activate
def test_yelp_call(self):
base = 'https://api.yelp.com/v3/businesses/search'
url = f'{base}?latitude={self.lat}&longitude={self.lon}&radius=50&sort_by=best_match'
responses.add(
responses.GET,
url,
body='{"businesses": [], "total": 0, "region": {"center": {"longitude": -84.90685, "latitude": 32.3788}}}',
status=200
)
t = gps.yelp_call(gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
radius=50,
rankby=gps.YelpRankBy.BEST_MATCH
))
self.assertTrue(isinstance(t, gps.PlaceRequest))
j = json.loads(t.content)
self.assertTrue(isinstance(j, dict))
self.assertTrue('businesses' in j.keys())
def test_parse_yelp_response(self):
self.assertRaises(TypeError, gps.parse_yelp_response, 1)
t = gps.parse_yelp_response('nan')
self.assertTrue({'name', 'rank_order', 'categories', 'major_categories'} == set(t.keys()))
self.assertTrue(t['name'] == 'not found')
self.assertTrue(t['rank_order'] == -1)
self.assertTrue(t['major_categories'] == 'none')
t = gps.parse_yelp_response('}{')
self.assertTrue(t['major_categories'] == 'JSONDecodeError')
responses.reset()
t = json.dumps(dict(
businesses=[
dict(
name='test',
categories=[
dict(alias='3dprinting')
]
)
]))
t = gps.parse_yelp_response(t)
self.assertTrue(t['name'] == 'test')
self.assertTrue(t['major_categories'] == 'personal_services')
@responses.activate
def test_gmap_call(self):
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=32.3788%2C-84.90685&maxprice' \
'=None&minprice=None&radius=50&rankby=prominence&key=AIza'
responses.add(
responses.GET,
url=url,
body=self.mock_gmap_response()
)
r = gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
radius=50,
rankby=gps.GmapsRankBy.PROMINENCE
)
t = gps.gmap_call(r)
self.assertTrue(isinstance(t, gps.PlaceRequest))
def test_gmap_response(self):
c = self.mock_gmap_response()
c = gps.parse_gmap_response(c)
self.assertTrue(c['rank_order'] == 0)
self.assertTrue(c['name'] == 'c')
self.assertTrue(c['categories'] == 'campground')
self.assertTrue(c['major_categories'] == 'lodging')
def test_gmapping(self):
t = gps.gmapping('campground')
self.assertTrue(t == {'lodging'})
t = gps.gmapping(pd.Series(['campground']))
self.assertTrue(t == {'lodging'})
t = gps.gmapping('Expecting value: d')
self.assertTrue(t == {'JSON Decode Error'})
t = gps.gmapping('.')
self.assertTrue(t == {'undefined category'})
@responses.activate
def test_process_request(self):
request = gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)
progqu, reqque, resque = mul.Queue(), mul.Queue(), mul.Queue()
args = (request, True, False, progqu, reqque, resque)
resque.put(dict(
pid=os.getpid(),
response=request
))
t = gps.process_request(args)
self.assertTrue(t['hits'] == 1 and t['misses'] == 0)
self.assertTrue(isinstance(t['report'], dict))
t = pd.DataFrame(t['report'], index=[0])
self.assertTrue(isinstance(t, pd.DataFrame))
resque.put(dict(
pid=os.getpid(),
response=None
))
args = (request, False, False, progqu, reqque, resque)
self.assertRaises(
ConnectionError, gps.process_request, args
)
base = 'https://api.yelp.com/v3/businesses/search'
url = f'{base}?latitude={self.lat}&longitude={self.lon}&radius=50&sort_by=best_match'
responses.add(
responses.GET,
url,
body='{"businesses": [], "total": 0, "region": {"center": {"longitude": -84.90685, "latitude": 32.3788}}}',
status=200
)
resque.put(dict(
pid=os.getpid(),
response=None
))
args = (request, False, False, progqu, reqque, resque)
t = gps.process_request(args)
self.assertTrue(t['hits'] == 0)
self.assertTrue(t['misses'] == 1)
args = (request, True, False, progqu, reqque, resque)
resque.put(dict(
pid=os.getpid(),
response=None
))
t = gps.process_request(args)
self.assertTrue(t['report']['content'] == '{"error": "not found in cache"}')
def empty_and_close(qu):
while not qu.empty():
qu.get()
qu.close()
empty_and_close(progqu)
empty_and_close(reqque)
empty_and_close(resque)
del progqu, reqque, resque
@responses.activate
def test_request_nearby_places(self):
base = 'https://api.yelp.com/v3/businesses/search'
url = f'{base}?latitude={self.lat}&longitude={self.lon}&radius=50&sort_by=best_match'
responses.add(
responses.GET,
url,
body='{"businesses": [], "total": 0, "region": {"center": {"longitude": -84.90685, "latitude": 32.3788}}}',
status=200
)
# check a single request
request = gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)
t = gps.request_nearby_places(request, 1, kwargs=self.cache_kwargs)
self.assertTrue(isinstance(t['request'], pd.DataFrame))
self.assertTrue(t['misses'] == 1 and t['hits'] == 0)
self.assertTrue(len(t['request']) == 1)
# check multiple
request = [request for i in range(2)]
t = gps.request_nearby_places(request, 1, kwargs=self.cache_kwargs)
self.assertTrue(isinstance(t['request'], pd.DataFrame))
self.assertTrue(t['misses'] == 0 and t['hits'] == 2)
self.assertTrue(len(t['request']) == 2)
def test_update_qu(self):
qu = mul.Queue()
gps.update_queue(qu)
t = qu.get()
self.assertTrue(t == 1)
def test_get_from_cache(self):
t = gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)
self.session.add(t)
self.session.commit()
t = gps.get_from_cache(t, self.session)
self.assertTrue(t is not None)
self.assertTrue(isinstance(t, gps.PlaceRequest))
def test_put_to_cache(self):
t = gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)
gps.put_to_cache(t, self.session)
t = self.session.query(gps.PlaceRequest).all()
self.assertTrue(len(t) == 1)
def test_cache_man(self):
reqqu, resqu = mul.Queue(), mul.Queue()
# test the get
reqqu.put(dict(
pid=os.getpid(),
type='get',
args=[gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)]
))
reqqu.put(dict(type='end'))
gps.cache_manager(reqqu, resqu, self.gcname)
t = resqu.get()
self.assertTrue(t['response'] is None)
# test the put
reqqu.put(dict(
pid=os.getpid(),
type='put',
args=[gps.PlaceRequest(
lat=self.lat,
lon=self.lon,
source=gps.ApiSource.YELP,
rankby=gps.YelpRankBy.BEST_MATCH,
radius=50
)]
))
reqqu.put(dict(type='end'))
gps.cache_manager(reqqu, resqu, self.gcname)
t = self.session.query(gps.PlaceRequest).all()
self.assertTrue(len(t) == 1)
self.assertTrue(t[0].content is None)
def test_api_source(self):
self.assertTrue(gps.api_source('Google Places') == gps.ApiSource.GMAPS)
self.assertTrue(gps.api_source('Yelp') == gps.ApiSource.YELP)
self.assertRaises(KeyError, gps.api_source, 'none')
def test_cluster_metrics(self):
t = gps.cluster_metrics(self.clusters, self.entries)
self.assertTrue(
list(t.columns) == [
'username', 'cid', 'name', 'lat', 'lon', 'categories',
'max_duration', 'mean_duration', 'mean_ti_between_visits',
'min_duration', 'std_duration', 'times_entered',
'total_duration'
]
)
self.assertTrue(isinstance(t, pd.DataFrame))
self.assertTrue('xNot' not in t.cid)
def test_process_velocities(self):
t = gps.process_velocities(self.gps_records.iloc[:2])
self.assertTrue(
set(t.columns) == {
'ts', 'lat', 'lon', 'binning', 'displacement',
'time_delta', 'velocity'
}
)
self.assertTrue(t.loc[1].binning == 'stationary')
self.assertTrue(t.loc[1].displacement == 11.1)
self.assertTrue(t.loc[1].time_delta == 60)
self.assertTrue(t.loc[1].velocity == 0.185)
def test_vdiscrete_powered(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(minutes=1)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (47.673600, -122.364783, end)
)
self.assertTrue(result.get('binning') == 'powered_vehicle')
def test_vdiscrete_walking(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(hours=1)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (47.673600, -122.364783, end)
)
self.assertTrue(result.get('binning') == 'walking')
def test_vdiscrete_stationary(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(hours=1)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (47.679853, -122.325744, end)
)
self.assertTrue(result.get('binning') == 'stationary')
def test_vdiscrete_brunch(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(minutes=30)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (47.673600, -122.364783, end)
)
self.assertTrue(result.get('binning') == 'active')
def test_vdiscrete_high_speed(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(hours=2)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (40.772849, -111.838413, end)
)
self.assertTrue(result.get('binning') == 'high_speed_transportation')
def test_vdiscrete_anomaly(self):
start = dt.datetime(year=2018, month=1, day=1)
end = start + dt.timedelta(minutes=1)
result = gps.discrete_velocity(
(47.679853, -122.325744, start), (40.772849, -111.838413, end)
)
self.assertTrue(result.get('binning') == 'anomaly')
def test_vdiscrete_throws(self):
self.assertRaises(TypeError, gps.discrete_velocity, (0, 0, 0), (0, 0, dt.datetime.now()))
self.assertRaises(TypeError, gps.discrete_velocity, (0, 0, dt.datetime.now(), (0, 0, 0)))
def test_estimate_home(self):
t = gps.estimate_home_location(self.gps_records)
self.assertTrue(t[0] is None and len(t[1]) == 0)
r = pd.concat([
self.gps_records for i in range(100)
], axis=0, sort=False)
r['ts'] = dt.datetime(
year=2005, month=1, day=1, hour=4, minute=4
)
t = gps.estimate_home_location(r)
self.assertTrue(t[0]['cid'] == 'home')
self.assertTrue(t[0]['lat'] == 40.00015)
self.assertTrue(t[0]['lon'] == -45.0)
def test_estimate_work(self):
t = gps.estimate_work_location(self.gps_records)
self.assertTrue(t[0] is None and len(t[1]) == 0)
r = pd.concat([
self.gps_records for i in range(100)
], axis=0, sort=False)
r['ts'] = dt.datetime(
year=2005, month=1, day=3, hour=12, minute=4
)
t = gps.estimate_work_location(r)
self.assertTrue(t[0]['cid'] == 'work')
self.assertTrue(t[0]['lat'] == 40.00015)
self.assertTrue(t[0]['lon'] == -45.0)
def test_geo_pairwise(self):
x = [(0, 0), (1, 0)]
t = gps.geo_pairwise_distances(x)
self.assertTrue(len(t) == 1)
self.assertTrue(t[0] == 111194.9)
x.append((0, 1))
t = gps.geo_pairwise_distances(x)
self.assertTrue(len(t) == 3)
def test_get_clusters_with_context(self):
records, clusters = gps.get_clusters_with_context(self.home_and_work)
clusters = clusters.cid.unique()
self.assertTrue('work' in clusters)
self.assertTrue('home' in clusters)
def test_get_clusters_with_context_only_home_when_work_out_of_range(self):
work = self.work_cluster.copy()
work.lat = work.lat + 10
y = pd.concat([self.home_cluster, work], sort=False).sort_values(by='ts').reset_index(drop=True)
y = gps.process_velocities(y)
records, clusters = gps.get_clusters_with_context(y)
clusters = clusters.cid.unique()
self.assertTrue('work' not in clusters)
self.assertTrue('home' in clusters)
def test_get_clusters_with_context_only_home_when_not_working(self):
y = self.home_and_work.copy()
y['working'] = False
records, clusters = gps.get_clusters_with_context(y)
clusters = clusters.cid.unique()
self.assertTrue('work' not in clusters)
self.assertTrue('home' in clusters)
|
#!/usr/bin/env python3
import sys
import getopt
import Mail
import subprocess
import socket
config = {
'smtp_host' : "smtp.qq.com",
'smtp_user' : "[email protected]",
'smtp_pass' : "dngcjckjeylmdfbi",
'from_email' : "[email protected]",
'to_email' : ['[email protected]', "[email protected]"],
'ip_cmd' : "/usr/sbin/ip",
'zpool_cmd' : "/usr/sbin/zpool"
}
class ZCheckStat :
def __init__(self):
pass
def usage(self):
print("ZFS health check tool ")
print("-----------------------------------------------")
print("%s -h <pool> " % (sys.argv[0]))
print(" -h this message ")
print(" <pool> zfs pool name")
exit(0)
def check(self, pool):
print("checking "+ pool)
cmd = [config['zpool_cmd'], "status", pool]
output = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT).decode("UTF-8")
return output
def notify(self, subject, msg):
email = Mail.SMTP(config)
email.subject(subject)
email.content(msg)
email.send()
def parse_output(self, content):
lines = content.split("\n")
for l in lines:
try:
(k, w) = l.split(": ")
k = k.strip()
w = w.strip()
if k == "state":
if w != "ONLINE":
return -1
else:
return 0
except ValueError as e:
continue
return -1
def get_ip_address(self):
cmd = [config['ip_cmd'], "addr"]
output = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT).decode("UTF-8")
return output
if __name__ == '__main__':
o = ZCheckStat()
if len(sys.argv) < 2:
o.usage()
pool = sys.argv[1]
out = o.check(pool)
if o.parse_output(out) < 0:
hostname = socket.gethostname()
ipaddr = o.get_ip_address()
o.notify(hostname + " 提醒邮件 zpool: "+pool , out + ipaddr) |
import os
from argparse import ArgumentParser
def get_args():
parser = ArgumentParser(description='PyTorch/torchtext SNLI example')
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--d_embed', type=int, default=300)
parser.add_argument('--d_proj', type=int, default=300)
parser.add_argument('--d_hidden', type=int, default=300)
parser.add_argument('--d_mlp', type=int, default=600)
parser.add_argument('--n_mlp_layers', type=int, default=3)
parser.add_argument('--d_tracker', type=int, default=None)
parser.add_argument('--n_layers', type=int, default=1)
parser.add_argument('--log_every', type=int, default=50)
parser.add_argument('--lr', type=float, default=.001)
parser.add_argument('--lr_decay_by', type=float, default=1)
parser.add_argument('--lr_decay_every', type=float, default=1)
parser.add_argument('--dev_every', type=int, default=1000)
parser.add_argument('--save_every', type=int, default=1000)
parser.add_argument('--embed_dropout', type=float, default=0.2)
parser.add_argument('--mlp_dropout', type=float, default=0.2)
parser.add_argument('--rnn_dropout', type=float, default=0.2)
parser.add_argument('--no-bidirectional', action='store_false', dest='birnn')
parser.add_argument('--preserve-case', action='store_false', dest='lower')
parser.add_argument('--no-projection', action='store_false', dest='projection')
parser.add_argument('--train_embed', action='store_false', dest='fix_emb')
parser.add_argument('--predict_transitions', action='store_true', dest='predict')
parser.add_argument('--spinn', action='store_true', dest='spinn')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--save_path', type=str, default='results')
parser.add_argument('--data_cache', type=str, default=os.path.join(os.getcwd(), '.data_cache'))
parser.add_argument('--vector_cache', type=str, default=os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt'))
parser.add_argument('--word_vectors', type=str, default='glove.42B')
parser.add_argument('--resume_snapshot', type=str, default='')
args = parser.parse_args()
return args
|
import bpy
from bpy.props import *
from ... data_structures import DoubleList
from ... base_types import AnimationNode
modeItems = [
("AVERAGE", "Average", "", "FORCE_TURBULENCE", 0),
("SPECTRUM", "Spectrum", "", "RNDCURVE", 1)
]
class EvaluateSoundNode(bpy.types.Node, AnimationNode):
bl_idname = "an_EvaluateSoundNode"
bl_label = "Evaluate Sound"
errorHandlingType = "MESSAGE"
mode: EnumProperty(name = "Mode", default = "AVERAGE",
items = modeItems, update = AnimationNode.refresh)
useCurrentFrame: BoolProperty(name = "Use Current Frame", default = True,
update = AnimationNode.refresh)
def create(self):
self.newInput("Sound", "Sound", "sound",
typeFilter = self.mode, defaultDrawType = "PROPERTY_ONLY")
if not self.useCurrentFrame:
self.newInput("Float", "Frame", "frame")
if self.mode == "AVERAGE":
self.newOutput("Float", "Volume", "volume")
elif self.mode == "SPECTRUM":
self.newOutput("Float List", "Volumes", "volumes")
def draw(self, layout):
layout.prop(self, "mode", text = "")
def drawAdvanced(self, layout):
layout.prop(self, "useCurrentFrame")
def getExecutionCode(self, required):
if self.useCurrentFrame: yield "_frame = self.nodeTree.scene.frame_current_final"
else: yield "_frame = frame"
if self.mode == "AVERAGE":
yield "volume = self.execute_Average(sound, _frame)"
elif self.mode == "SPECTRUM":
yield "volumes = self.execute_Spectrum(sound, _frame)"
def execute_Average(self, sound, frame):
if sound is None: return 0
if sound.type != "AVERAGE":
self.setErrorMessage("Wrong sound type")
return 0
return sound.evaluate(frame)
def execute_Spectrum(self, sound, frame):
if sound is None: return DoubleList()
if sound.type != "SPECTRUM":
self.setErrorMessage("Wrong sound type")
return DoubleList()
return DoubleList.fromValues(sound.evaluate(frame))
|
"""Mix-ins used to add defined behaviors to Tapis CLI commands
"""
import argparse
import copy
import json
import os
import sys
import validators
import docker as dockerpy
from agavepy.agave import Agave
from cliff.command import Command
from cliff.hooks import CommandHook
from cliff.app import App
from tapis_cli import constants
from tapis_cli.display import Verbosity
from tapis_cli.utils import serializable
from tapis_cli import project_ini, templating
__all__ = [
'OptionNotImplemented', 'AppVerboseLevel', 'JsonVerbose',
'ServiceIdentifier', 'UploadJsonFile', 'AgaveURI', 'RemoteFilePath',
'LocalFilePath', 'Username', 'InvalidIdentifier', 'OptionalLocalFilePath',
'InvalidValue', 'URL', 'TapisEntityUUID', 'OptionalTapisEntityUUID',
'UploadJSONTemplate', 'WorkingDirectory', 'WorkingDirectoryOpt',
'WorkingDirectoryArg', 'DownloadDirectoryArg', 'DockerPy'
]
class InvalidValue(ValueError):
pass
class InvalidIdentifier(InvalidValue):
"""Raised when an invalid identifier is encountered
"""
pass
class OptionNotImplemented(ValueError):
"""Raised when an option that is only a placeholder is specified
"""
pass
class ParserExtender(object):
working_dir = '.'
def getwd(self):
return getattr(self, 'working_dir')
def extend_parser(self, parser):
# When sublcassing: DO NOT FORGET TO RETURN PARSER
return parser
def preprocess_args(self, parsed_args):
# When sublcassing: DO NOT FORGET TO RETURN PARSED_ARGS
return parsed_args
def render_extended_parser_value(self, key, value, formatter=None):
return key, value
def validate(self, value, permissive=True):
"""Placeholder to implement validation of a value passed
via a ParserExtender
"""
return True
class AppVerboseLevel(ParserExtender):
"""Configures a Command to access the parent cliff App's verbosity level
The calling App's verbose_level is made available via method
app_verbose_level(). In addition, two properties 'VERBOSITY' and
'EXTRA_VERBOSITY' are defined. These are intended to be values defined
by the `Verbosity` module. 'VERBOSITY' is the default field-display
verbosity for the Command, while `EXTRA_VERBOSITY` is the verbosity level
when a user or process specifies that additional verbosity is needed.
"""
VERBOSITY = None
EXTRA_VERBOSITY = VERBOSITY
@property
def app_verbose_level(self):
"""Exposes the app-scoped verbosity level as a formatter property
"""
vlevel = 1
try:
vlevel = self.app_args.verbose_level
except Exception:
pass
return vlevel
class JsonVerbose(AppVerboseLevel):
"""Configures a Command to use JSON as formatter when verbose is requested
Overrides the Command.formatter_default property such that passing an
instance of '-v' to the cliff App when running a command will configure the
Command to use JSON formatter and to increase its field-display verbosity
to the level defined by 'EXTRA_VERBOSITY'
"""
EXTRA_VERBOSITY = Verbosity.RECORD
@property
def formatter_default(self):
"""Overrides formatter_default to return JSON when -v is passed
"""
if self.app_verbose_level > 1:
return 'json'
else:
return 'table'
def verbosify_parsed_args(self, parsed_args):
if self.app_verbose_level > 1:
# raise SystemError(dir(self.app.options))
parsed_args.formatter = 'json'
if self.EXTRA_VERBOSITY is not None:
self.VERBOSITY = self.EXTRA_VERBOSITY
return parsed_args
def preprocess_args(self, parsed_args):
parsed_args = super(JsonVerbose, self).preprocess_args(parsed_args)
if self.app_verbose_level > 1:
parsed_args.formatter = 'json'
if self.EXTRA_VERBOSITY is not None:
self.VERBOSITY = self.EXTRA_VERBOSITY
return parsed_args
class AgaveURI(ParserExtender):
"""Configures a Command to require a mandatory 'agave uri'
positional parameter
"""
def extend_parser(self, parser):
parser.add_argument('agave_uri',
type=str,
metavar='<agave_uri>',
help='Agave files URI (agave://)')
return parser
@classmethod
def parse_url(cls, url):
"""Parse an Agave files resource URI into storageSystem and filePath
"""
# TODO - Move implementation down to agavepy.utils
# Agave URI
if url.startswith('agave://'):
url = url.replace('agave://', '', 1)
parts = url.split('/')
return parts[0], '/' + '/'.join(parts[1:])
# Agave media URL
elif url.startswith('https://'):
url = url.replace('https://', '')
parts = url.split('/')
if parts[1] == 'files' and parts[3] == 'media':
return parts[5], '/'.join(parts[6:])
else:
raise InvalidValue('{0} not a valid Agave URL or URI'.format(url))
def validate(self, url, permissive=False):
try:
self.parse_url(url)
return True
except Exception:
if permissive:
return False
else:
raise
class ServiceIdentifier(ParserExtender):
"""Configures a Command to require a mandatory 'identifier' positional param
Adds a positional parameter to the Command parser. The value for the
parameter's 'metavar' is set by the Command.service_id_type property.
"""
# Stem for naming the identifier
service_id_type = 'Service'
# Leaf for naming the identifier
id_type = 'identifier'
# If True, the argument is optional
optional = False
# argparse destination
dest = id_type
@classmethod
def arg_display(cls, id_value):
return '<{0}_id>'.format(id_value).lower()
@classmethod
def arg_metavar(cls, id_value):
return cls.arg_display(id_value)
@classmethod
def arg_help(cls, id_value):
if not cls.optional:
return '{0} {1}'.format(id_value, cls.id_type)
else:
return 'Optional {0} {1}'.format(id_value, cls.id_type)
def extend_parser(self, parser):
id_value = getattr(self, 'service_id_type')
if id_value is not None:
arg_display = '<{0}_id>'.format(id_value).lower()
if self.optional:
nargs = '?'
else:
nargs = None
if id_value is not None:
parser.add_argument(self.dest,
type=str,
nargs=nargs,
metavar=self.arg_metavar(id_value),
help=self.arg_help(id_value))
return parser
def validate_identifier(self, identifier, permissive=True):
return self.validate(identifier)
def get_identifier(self, parsed_args, validate=False, permissive=False):
identifier = None
try:
identifier = getattr(parsed_args, self.dest)
# identifier = parsed_args.identifier
self.validate_identifier(identifier)
except Exception:
if permissive:
return None
else:
raise
return identifier
class TapisEntityUUID(ServiceIdentifier):
service_id_type = 'Tapis Entity'
id_type = 'UUID'
@classmethod
def arg_display(cls, id_value):
return '<{0}_uuid>'.format(id_value).lower()
class OptionalTapisEntityUUID(TapisEntityUUID):
optional = True
class RemoteFilePath(ParserExtender):
"""Configures a Command to accept an optional file path
"""
def extend_parser(self, parser):
parser.add_argument(
'file_path',
default='.',
nargs='?',
metavar='<file_path>',
help='Optional file path relative to output directory')
return parser
class LocalFilePath(ParserExtender):
"""Configures a Command to accept a local file path
"""
def extend_parser(self, parser):
parser.add_argument('local_file_path',
metavar='<file_path>',
help='Path (relative to working directory)')
return parser
class OptionalLocalFilePath(ParserExtender):
"""Configures a Command to accept a local file path
"""
def extend_parser(self, parser):
parser.add_argument(
'local_file_path',
nargs='?',
metavar='<file_path>',
help='Optional path (relative to working directory)')
return parser
class WorkingDirectory(ParserExtender):
"""Allows the working directory to be set via positional argument.
"""
help_string = 'Working directory'
def extend_parser(self, parser):
parser.add_argument('working_directory',
metavar='<dir>',
default='.',
type=str,
help=self.help_string)
return parser
def set_working_directory(self, parsed_args, working_dir='.'):
wd_value = getattr(parsed_args, 'working_directory', working_dir)
setattr(self, 'working_dir', wd_value)
return self
class WorkingDirectoryOpt(WorkingDirectory):
"""Allows the working directory to be set via optional, terminal argument.
"""
def extend_parser(self, parser):
parser.add_argument('working_directory',
metavar='<dir>',
default='.',
nargs='?',
type=str,
help=self.help_string)
return parser
class WorkingDirectoryArg(WorkingDirectory):
"""Allows the working directory to be set via optional argument.
"""
def extend_parser(self, parser):
parser.add_argument('-W',
dest='working_directory',
metavar='<dir>',
default='.',
type=str,
help=self.help_string)
return parser
class DownloadDirectoryArg(WorkingDirectoryArg):
"""Allows the working directory to be set via optional argument.
"""
help_string = 'Download directory'
def extend_parser(self, parser):
parser.add_argument('-W',
dest='working_directory',
metavar='<dir>',
default='.',
type=str,
help=self.help_string)
return parser
class UploadJsonFile(ParserExtender):
"""Configures a client to accept and load a JSON file
Adds -F and --file to a Command's parser. To load the designated file,
the handle_file_upload() must then be called. JSON file contents will
reside in self.json_file_contents.
"""
json_loaded = dict()
validate = True
optional = False
default = None
def extend_parser(self, parser):
if self.default is None:
parser.add_argument('-F',
'--file',
dest='json_file_name',
metavar='<file>',
type=str,
help='JSON payload file')
else:
parser.add_argument('-F',
'--file',
dest='json_file_name',
metavar='<file>',
default=self.default,
type=str,
help='JSON payload file ({})'.format(
self.default))
return parser
def handle_file_upload(self, parsed_args):
document_path, document_source = None, None
if parsed_args.json_file_name == '-':
document_source = sys.stdin
elif parsed_args.json_file_name is None:
if self.optional:
setattr(self, 'json_file_contents', {})
return self.json_file_contents
else:
raise ValueError('JSON file path must be specified')
else:
document_path = os.path.join(self.getwd(),
parsed_args.json_file_name)
document_source = open(document_path, 'rb')
# Load up the data source, validating that it's minimally serializable
# TODO - factor validation into its own method so it can be overridden
try:
payload = json.load(document_source)
if self.validate:
serializable(payload)
setattr(self, 'json_file_contents', payload)
return self.json_file_contents
except Exception as exc:
setattr(self, 'json_file_contents', None)
raise ValueError('{0} was not valid JSON: {1}'.format(
parsed_args.json_file_name, exc))
class UploadJSONTemplate(UploadJsonFile):
def extend_parser(self, parser):
parser = super(UploadJSONTemplate, self).extend_parser(parser)
parser.add_argument('--ini',
dest='ini_file_name',
metavar='<file>',
type=str,
help='Optional .ini file')
return parser
def get_ini_path(self, filename):
return project_ini.config_path(filename, self.getwd())
# OVERRIDES DO NOT SEEM TO BE WORKING
def _all_key_values(self, parsed_args, passed_vals):
t = templating.key_values(passed_vals)
ini_path = self.get_ini_path(parsed_args.ini_file_name)
p = project_ini.key_values(ini_path)
raise SystemError(p)
project_ini.update_config(t, p, add_keys=True)
# tapis dynamic variables
tapis_variables = self.key_values()
# right-merged dictionary
# dynamic values always overide ini-loaded defaults
project_ini.update_config(t, tapis_variables, add_keys=True)
project_ini.update_config(t, {}, add_keys=True)
return t
# OVERRIDES DO NOT SEEM TO BE WORKING
def all_key_values(self, parsed_args, passed_vals):
# Load up ini file
ini_path = self.get_ini_path(parsed_args.ini_file_name)
cfg = project_ini.key_values(ini_path)
# Load up core template vars
tmpl = templating.key_values({})
project_ini.update_config(cfg, tmpl, add_keys=True)
# Extend with API-related dynamic vars
tapis = self.key_values()
project_ini.update_config(cfg, tapis, add_keys=True)
# Finally, layer over passed values. Assumption is that these
# are passed by CLI or other run-time means
project_ini.update_config(cfg, passed_vals, add_keys=True)
return cfg
def _render_json_file_contents(self, passed_vals):
"""Transform the JSON file contents by rendering it as a Jinja template
"""
payload = getattr(self, 'json_file_contents')
txt_payload = json.dumps(payload)
txt_payload = templating.render_template(txt_payload,
passed_vals=passed_vals)
payload = json.loads(txt_payload)
setattr(self, 'json_file_contents', payload)
return self.json_file_contents
def handle_file_upload(self, parsed_args, passed_vals={}):
super(UploadJSONTemplate, self).handle_file_upload(parsed_args)
# payload = getattr(self, 'json_file_contents')
# load variable sets
# ini-based configuration
ini_path = self.get_ini_path(parsed_args.ini_file_name)
config = project_ini.key_values(ini_path, as_dict=True)
# tapis dynamic variables
tapis_variables = self.key_values()
# right-merged dictionary
# dynamic values always overide ini-loaded defaults
project_ini.update_config(config, tapis_variables, add_keys=True)
# Accept run-time overrides
project_ini.update_config(config, passed_vals, add_keys=True)
# render, where merged variables overrides module-provided values
self._render_json_file_contents(passed_vals=config)
return self.json_file_contents
class Username(ParserExtender):
"""Configures a Command to accept an positional username
"""
def extend_parser(self, parser):
parser.add_argument('username',
metavar='<username>',
help='{0} username'.format(constants.PLATFORM))
return parser
class URL(ParserExtender):
"""Configures a Command to require a mandatory 'url' positional parameter
"""
def extend_parser(self, parser):
parser.add_argument('url',
type=str,
metavar='<url>',
help='Valid URL [http(s)://]')
return parser
def validate(self, url, permissive=False):
try:
validators.url(url, public=True)
return True
except Exception:
if permissive:
return False
else:
raise
class DockerPy:
dockerpy = None
def docker_client_from_env(self):
setattr(self, 'dockerpy', dockerpy.from_env())
|
# ------------------------------------------------------------------------------
# Class all model definitions should descend from.
# ------------------------------------------------------------------------------
import os
import torch.nn as nn
import numpy as np
from torchsummary import summary
from mp.utils.pytorch.pytorch_load_restore import load_model_state, save_model_state
class Model(nn.Module):
r"""A model that descends from torch.nn.Model and includes methods to output
a model summary, as well as the input_shape and output_shape fields used in
other models and the logic to restore previous model states from a path.
Args:
input_shape tuple (int): Input shape with the form
(channels, width, height, Opt(depth))
output_shape (Obj): output shape, which takes different forms depending
on the problem
"""
def __init__(self, input_shape=(1, 32, 32), output_shape=2):
super(Model, self).__init__()
self.input_shape = input_shape
self.output_shape = output_shape
def preprocess_input(self, x):
r"""E.g. pretrained features. Override if needed. """
return x
def initialize(self, weights_init_path, device):
r"""Tries to restore a previous model. If no model is found, the initial
weights are saved.
"""
path, name = os.path.split(weights_init_path)
restored = load_model_state(self, path=path, name=name, device=device)
if restored:
print('Initial parameters {} were restored'.format(weights_init_path))
else:
save_model_state(self, path=path, name=name)
print('Initial parameters {} were saved'.format(weights_init_path))
def get_param_list_static(self):
r"""Returns a 1D array of parameter values
"""
model_params_array = []
for _, param in self.state_dict().items():
model_params_array.append(param.reshape(-1).cpu().numpy())
return np.concatenate(model_params_array)
# Method to output model information
def model_summary(self, verbose=False):
r"""Return a Keras-style summary."""
summary_str = str(summary(self, input_data=self.input_shape, verbose=0))
if verbose:
print(summary_str)
return summary_str
# Methods to calculate the feature size
def num_flat_features(self, x):
r"""Flattened view of all dimensions except the batch size.
"""
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
def flatten(self, x):
r"""Flatten x into 1 dimension."""
return x.view(-1, self.num_flat_features(x))
def size_before_lin(self, shape_input):
r"""Size after linearization.
Returns (int): integer of dense size
"""
return shape_input[0]*shape_input[1]*shape_input[2]
def size_after_conv(self, shape_input, output_channels, kernel):
r"""Gives the number of output neurons after the conv operation.
The first dimension is the channel depth and the other 2 are given by
input volume (size - kernel size + 2*padding)/stride + 1
"""
return (output_channels, shape_input[1]-kernel+1, shape_input[2]-kernel+1)
def size_after_pooling(self, shape_input, shape_pooling):
r"""Maintains the first input dimension, which is the output channels in
the previous conv layer. The others are divided by the shape of the
pooling.
"""
return (shape_input[0], shape_input[1]//shape_pooling[0], shape_input[2]//shape_pooling[1])
|
from . import tests
|
# Exercício Python 086:
# Crie um programa que declare uma matriz de dimensão 3x3 e
# preencha com valores lidos pelo teclado.
# No final, mostre a matriz na tela, com a formatação correta.
#matriz = [[2, 3, 4], [7, 5 , 4], [8, 4, 9]]
def l(ll): print(30*ll)
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
l('=')
print('valores de matriz a serem adionados')
for i in range(len(matriz)):
for ii in range(len(matriz)):
matriz[i][ii] = int(input(f'[{i}|{ii}]numero: '))
l('_')
print('Valores de matriz:')
for i in range(3):
for ii in range(3):
print(f'{matriz[i][ii]:^10} ', end='')
print()
l('-') |
# Copyright 2017 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import base64
import os
from urllib import parse
import requests_mock
from odoo.exceptions import AccessError
from odoo.addons.component.tests.common import TransactionComponentCase
class StorageImageCase(TransactionComponentCase):
def setUp(self):
super(StorageImageCase, self).setUp()
# FIXME: remove this, should have explicit permission tests
# Run the test with the demo user in order to check the access right
self.user = self.env.ref("base.user_demo")
self.user.write(
{"groups_id": [(4, self.env.ref("storage_image.group_image_manager").id)]}
)
self.env = self.env(user=self.user)
self.backend = self.env.ref("storage_backend.default_storage_backend")
path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(path, "static/akretion-logo.png"), "rb") as f:
data = f.read()
self.filesize = len(data)
self.filedata = base64.b64encode(data)
self.filename = "akretion-logo.png"
def _create_storage_image(self):
return self.env["storage.image"].create(
{"name": self.filename, "image_medium_url": self.filedata}
)
def _check_thumbnail(self, image):
self.assertEqual(len(image.thumbnail_ids), 2)
medium, small = image.thumbnail_ids
self.assertEqual(medium.size_x, 128)
self.assertEqual(medium.size_y, 128)
self.assertEqual(small.size_x, 64)
self.assertEqual(small.size_y, 64)
def test_create_and_read_image(self):
image = self._create_storage_image()
self.assertEqual(image.data, self.filedata)
self.assertEqual(image.mimetype, u"image/png")
self.assertEqual(image.extension, u".png")
self.assertEqual(image.filename, u"akretion-logo")
url = parse.urlparse(image.url)
self.assertEqual(
url.path, "/storage.file/akretion-logo-%d.png" % image.file_id.id
)
self.assertEqual(image.file_size, self.filesize)
self.assertEqual(self.backend.id, image.backend_id.id)
def test_create_thumbnail(self):
image = self._create_storage_image()
self.assertIsNotNone(image.image_medium_url)
self.assertIsNotNone(image.image_small_url)
self._check_thumbnail(image)
def test_create_specific_thumbnail(self):
image = self._create_storage_image()
thumbnail = image.get_or_create_thumbnail(100, 100, u"my-image-thumbnail")
self.assertEqual(thumbnail.url_key, u"my-image-thumbnail")
self.assertEqual(thumbnail.relative_path[0:26], u"my-image-thumbnail_100_100")
# Check that method will return the same thumbnail
# Check also that url_key have been slugified
new_thumbnail = image.get_or_create_thumbnail(100, 100, u"My Image Thumbnail")
self.assertEqual(new_thumbnail.id, thumbnail.id)
# Check that method will return a new thumbnail
new_thumbnail = image.get_or_create_thumbnail(
100, 100, u"My New Image Thumbnail"
)
self.assertNotEqual(new_thumbnail.id, thumbnail.id)
def test_name_onchange(self):
image = self.env["storage.image"].new({"name": "Test-of image_name.png"})
image.onchange_name()
self.assertEqual(image.name, u"test-of-image_name.png")
self.assertEqual(image.alt_name, u"Test of image name")
def test_unlink(self):
image = self._create_storage_image()
stfile = image.file_id
thumbnail_files = image.thumbnail_ids.mapped("file_id")
image.unlink()
self.assertEqual(stfile.to_delete, True)
self.assertEqual(stfile.active, False)
for thumbnail_file in thumbnail_files:
self.assertEqual(thumbnail_file.to_delete, True)
self.assertEqual(thumbnail_file.active, False)
def test_no_manager_user_can_not_write(self):
# Remove access rigth to demo user
group_manager = self.env.ref("storage_image.group_image_manager")
self.user = self.env.ref("base.user_demo")
self.user.sudo().write({"groups_id": [(3, group_manager.id)]})
with self.assertRaises(AccessError):
self._create_storage_image()
def test_create_thumbnail_pilbox(self):
self.env["ir.config_parameter"].sudo().create(
{
"key": "storage.image.resize.server",
"value": "http://pilbox:8888?url={url}&w={width}&h={height}"
"&mode=fill&fmt={fmt}",
}
)
self.env["ir.config_parameter"].sudo().create(
{"key": "storage.image.resize.format", "value": "webp"}
)
backend = self.env["storage.backend"].sudo().browse([self.backend.id])
backend.served_by = "external"
backend.base_url = "test"
with requests_mock.mock() as m:
m.get("http://pilbox:8888?", text="data")
image = self._create_storage_image()
self.assertEqual(len(m.request_history), 2)
self.assertEqual(
m.request_history[0].url,
"http://pilbox:8888/?url=test/akretion-logo-%s.png"
"&w=128&h=128&mode=fill&fmt=webp" % image.file_id.id,
)
self.assertEqual(
m.request_history[1].url,
"http://pilbox:8888/?url=test/akretion-logo-%s.png"
"&w=64&h=64&mode=fill&fmt=webp" % image.file_id.id,
)
|
# -*- coding: utf-8 -*-
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
from pathlib import Path
import git
from .lib import TestBase, with_rw_directory
class TestClone(TestBase):
@with_rw_directory
def test_checkout_in_non_empty_dir(self, rw_dir):
non_empty_dir = Path(rw_dir)
garbage_file = non_empty_dir / "not-empty"
garbage_file.write_text("Garbage!")
# Verify that cloning into the non-empty dir fails while complaining about
# the target directory not being empty/non-existent
try:
self.rorepo.clone(non_empty_dir)
except git.GitCommandError as exc:
self.assertTrue(
exc.stderr, "GitCommandError's 'stderr' is unexpectedly empty"
)
expr = re.compile(
r"(?is).*\bfatal:\s+destination\s+path\b.*\bexists\b.*\bnot\b.*\bempty\s+directory\b"
)
self.assertTrue(
expr.search(exc.stderr),
'"%s" does not match "%s"' % (expr.pattern, exc.stderr),
)
else:
self.fail("GitCommandError not raised")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetFhirResult',
'AwaitableGetFhirResult',
'get_fhir',
'get_fhir_output',
]
@pulumi.output_type
class GetFhirResult:
def __init__(__self__, content_type=None, data=None, extensions=None):
if content_type and not isinstance(content_type, str):
raise TypeError("Expected argument 'content_type' to be a str")
pulumi.set(__self__, "content_type", content_type)
if data and not isinstance(data, str):
raise TypeError("Expected argument 'data' to be a str")
pulumi.set(__self__, "data", data)
if extensions and not isinstance(extensions, list):
raise TypeError("Expected argument 'extensions' to be a list")
pulumi.set(__self__, "extensions", extensions)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> str:
"""
The HTTP Content-Type header value specifying the content type of the body.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def data(self) -> str:
"""
The HTTP request/response body as raw binary.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def extensions(self) -> Sequence[Mapping[str, str]]:
"""
Application specific response metadata. Must be set in the first response for streaming APIs.
"""
return pulumi.get(self, "extensions")
class AwaitableGetFhirResult(GetFhirResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFhirResult(
content_type=self.content_type,
data=self.data,
extensions=self.extensions)
def get_fhir(dataset_id: Optional[str] = None,
fhir_id: Optional[str] = None,
fhir_id1: Optional[str] = None,
fhir_store_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFhirResult:
"""
Gets the contents of a FHIR resource. Implements the FHIR standard read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)). Also supports the FHIR standard conditional read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread)) specified by supplying an `If-Modified-Since` header with a date/time value or an `If-None-Match` header with an ETag value. On success, the response body contains a JSON-encoded representation of the resource. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `read`, see [Getting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#getting_a_fhir_resource).
"""
__args__ = dict()
__args__['datasetId'] = dataset_id
__args__['fhirId'] = fhir_id
__args__['fhirId1'] = fhir_id1
__args__['fhirStoreId'] = fhir_store_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:healthcare/v1:getFhir', __args__, opts=opts, typ=GetFhirResult).value
return AwaitableGetFhirResult(
content_type=__ret__.content_type,
data=__ret__.data,
extensions=__ret__.extensions)
@_utilities.lift_output_func(get_fhir)
def get_fhir_output(dataset_id: Optional[pulumi.Input[str]] = None,
fhir_id: Optional[pulumi.Input[str]] = None,
fhir_id1: Optional[pulumi.Input[str]] = None,
fhir_store_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFhirResult]:
"""
Gets the contents of a FHIR resource. Implements the FHIR standard read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#read), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#read), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#read)). Also supports the FHIR standard conditional read interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#cread), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#cread), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#cread)) specified by supplying an `If-Modified-Since` header with a date/time value or an `If-None-Match` header with an ETag value. On success, the response body contains a JSON-encoded representation of the resource. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. For samples that show how to call `read`, see [Getting a FHIR resource](/healthcare/docs/how-tos/fhir-resources#getting_a_fhir_resource).
"""
...
|
"""
Feature Scaling
- We discussed previously that the scale of the features is an important consideration when building machine
learning models.
Briefly:
Feature magnitude matters because:
- The regression coefficients of linear models are directly influenced by the scale of the variable.
- Variables with bigger magnitude / larger value range dominate over those with smaller magnitude / value range
- Gradient descent converges faster when features are on similar scales
- Feature scaling helps decrease the time to find support vectors for SVMs
- Euclidean distances are sensitive to feature magnitude.
- Some algorithms, like PCA require the features to be centered at 0.
The machine learning models affected by the feature scale are:
- Linear and Logistic Regression
- Neural Networks
- Support Vector Machines
- KNN
- K-means clustering
- Linear Discriminant Analysis (LDA)
- Principal Component Analysis (PCA)
Feature Scaling
- Feature scaling refers to the methods or techniques used to normalize the range of independent variables in our
data, or in other words, the methods to set the feature value range within a similar scale.
- Feature scaling is generally the last step in the data preprocessing pipeline, performed just before training the
machine learning algorithms.
There are several Feature Scaling techniques, which we will discuss throughout this section:
- Standardisation
- Mean normalisation
- Scaling to minimum and maximum values - MinMaxScaling
- Scaling to maximum value - MaxAbsScaling
- Scaling to quantiles and median - RobustScaling
- Normalization to vector unit length
In this example, we will discuss Scaling to quantiles and median.
=================================================================
Scaling to quantiles and median - RobustScaling
- In this procedure the median is removed from the observations and then they are scaled to the inter-quantile
range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile).
X_scaled = X - X_median / ( X.quantile(0.75) - X.quantile(0.25) )
- This robust scaling method produces more robust estimates for the center and range of the variable, and is
recommended if the data shows outliers.
In a nutshell, RobustScaling:
- centers the median at 0
- variance varies across variables
- may not preserve the shape of the original distribution
- the minimum and maximum values vary.
- robust outliers
In this example
We will perform robust scaling using the Boston House Prices data set that comes with Scikit-learn """
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
# the scaler - for robust scaling
from sklearn.preprocessing import RobustScaler
# load the the Boston House price data
boston_dataset = load_boston()
# create a dataframe with the independent variables
data = pd.DataFrame(boston_dataset.data,
columns=boston_dataset.feature_names)
# add target
data['MEDV'] = boston_dataset.target
data.head()
"""
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV
0 0.00632 18.0 2.31 0.0 0.538 6.575 65.2 4.0900 1.0 296.0 15.3 396.90 4.98 24.0
1 0.02731 0.0 7.07 0.0 0.469 6.421 78.9 4.9671 2.0 242.0 17.8 396.90 9.14 21.6
2 0.02729 0.0 7.07 0.0 0.469 7.185 61.1 4.9671 2.0 242.0 17.8 392.83 4.03 34.7
3 0.03237 0.0 2.18 0.0 0.458 6.998 45.8 6.0622 3.0 222.0 18.7 394.63 2.94 33.4
4 0.06905 0.0 2.18 0.0 0.458 7.147 54.2 6.0622 3.0 222.0 18.7 396.90 5.33 36.2 """
# Information about the boston house prince dataset
# you will find details about the different variables
# the aim is to predict the "Median value of the houses"
# MEDV column in this dataset
# and there are variables with characteristics about
# the homes and the neighborhoods
# print the dataset description
print(boston_dataset.DESCR)
""" .. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. """
# let's have a look at the main statistical parameters of the variables
# to get an idea of the feature magnitudes
data.describe()
"""
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV
count 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000 506.000000
mean 3.613524 11.363636 11.136779 0.069170 0.554695 6.284634 68.574901 3.795043 9.549407 408.237154 18.455534 356.674032 12.653063 22.532806
std 8.601545 23.322453 6.860353 0.253994 0.115878 0.702617 28.148861 2.105710 8.707259 168.537116 2.164946 91.294864 7.141062 9.197104
min 0.006320 0.000000 0.460000 0.000000 0.385000 3.561000 2.900000 1.129600 1.000000 187.000000 12.600000 0.320000 1.730000 5.000000
25% 0.082045 0.000000 5.190000 0.000000 0.449000 5.885500 45.025000 2.100175 4.000000 279.000000 17.400000 375.377500 6.950000 17.025000
50% 0.256510 0.000000 9.690000 0.000000 0.538000 6.208500 77.500000 3.207450 5.000000 330.000000 19.050000 391.440000 11.360000 21.200000
75% 3.677083 12.500000 18.100000 0.000000 0.624000 6.623500 94.075000 5.188425 24.000000 666.000000 20.200000 396.225000 16.955000 25.000000
max 88.976200 100.000000 27.740000 1.000000 0.871000 8.780000 100.000000 12.126500 24.000000 711.000000 22.000000 396.900000 37.970000 50.000000
- The different variables present different value ranges, mean, max, min, standard deviations, etc.
- In other words, they show different magnitudes or scales. Note for this demo, how the maximum values are are quite
different in the different variables.
- When performing maximum absolute scaling on the data set, we need to first identify the maximum values of the variables.
- These parameters need to be learned from the train set, stored, and then used to scale test and future data.
- Thus, we will first divide the data set into train and test, as we have done throughout the course.
"""
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data.drop('MEDV', axis=1),
data['MEDV'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# ((354, 13), (152, 13))
""" RobustScaling
- The MaxAbsScaler from scikit-learn re-scales features to their maximum value, so that the new maximum value is 1.
"""
# set up the scaler
scaler = RobustScaler()
# fit the scaler to the train set, it will learn the parameters
scaler.fit(X_train)
# transform train and test sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# the scaler stores the median values of the features as learned from train set
scaler.center_
""" array([2.62660e-01, 0.00000e+00, 8.56000e+00, 0.00000e+00, 5.38000e-01,
6.21550e+00, 7.94500e+01, 3.21570e+00, 5.00000e+00, 3.11000e+02,
1.91000e+01, 3.91605e+02, 1.11600e+01]) """
# the scaler stores the IQR values of the features as learned from train set
scaler.scale_
""" array([3.030275e+00, 2.000000e+01, 1.315000e+01, 1.000000e+00,
1.792500e-01, 7.520000e-01, 4.857500e+01, 2.971650e+00,
2.000000e+01, 3.900000e+02, 2.800000e+00, 1.963250e+01,
9.982500e+00]) """
# let's transform the returned NumPy arrays to dataframes for the rest of the example
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
# let's have a look at the original training dataset: median values
# I use np.round to reduce the number of decimals to 1.
np.round(X_train.median(), 1)
"""
CRIM 0.3
ZN 0.0
INDUS 8.6
CHAS 0.0
NOX 0.5
RM 6.2
AGE 79.4
DIS 3.2
RAD 5.0
TAX 311.0
PTRATIO 19.1
B 391.6
LSTAT 11.2
dtype: float64 """
# let's have a look at the scaled training dataset: median values
# I use np.round to reduce the number of decimals to 1.
np.round(X_train_scaled.median(), 1)
"""
CRIM -0.0
ZN 0.0
INDUS 0.0
CHAS 0.0
NOX 0.0
RM -0.0
AGE 0.0
DIS 0.0
RAD 0.0
TAX 0.0
PTRATIO 0.0
B -0.0
LSTAT 0.0
dtype: float64
- The variables were centered to the median values.
"""
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['RM'], ax=ax1)
sns.kdeplot(X_train['LSTAT'], ax=ax1)
sns.kdeplot(X_train['CRIM'], ax=ax1)
# after scaling
ax2.set_title('After Robust Scaling')
sns.kdeplot(X_train_scaled['RM'], ax=ax2)
sns.kdeplot(X_train_scaled['LSTAT'], ax=ax2)
sns.kdeplot(X_train_scaled['CRIM'], ax=ax2)
plt.show()
"""
- The median of the distributions are centered at zero, but every other parameter may vary in the different variables.
- It does, though, squeeze the value range in the original variables, particularly for those highly skewed, like CRIM.
"""
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['AGE'], ax=ax1)
sns.kdeplot(X_train['DIS'], ax=ax1)
sns.kdeplot(X_train['NOX'], ax=ax1)
# after scaling
ax2.set_title('After Robust Scaling')
sns.kdeplot(X_train_scaled['AGE'], ax=ax2)
sns.kdeplot(X_train_scaled['DIS'], ax=ax2)
sns.kdeplot(X_train_scaled['NOX'], ax=ax2)
plt.show()
"""
- Compare this scaling with mean normalisation, to see how this procedure affects the distribution shape.
- That is all for this example. I hope you enjoyed the info, and see you in the next one.
"""
|
#
# Copyright (c) 2021 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pytest
import logging
from klever.core.vtg.emg.common.c import Function
from klever.core.vtg.emg.common.c.source import Source
from klever.core.vtg.emg.common.process import ProcessCollection
from klever.core.vtg.emg.common.process.serialization import CollectionDecoder
from klever.core.vtg.emg.decomposition.separation import SeparationStrategy
from klever.core.vtg.emg.decomposition.separation.linear import LinearStrategy
from klever.core.vtg.emg.decomposition.modelfactory.selective import SelectiveFactory
MAIN = {
"comment": "Main process.",
"labels": {},
"process": "<root>",
"actions": {
"root": {
"comment": "Some action",
"statements": []
}
}
}
REGISTER = {
"comment": "",
"labels": {"container": {"declaration": "struct validation *var"}},
"process": "[register_p1]",
"actions": {
"register_p1": {"parameters": ["%container%"]}
}
}
DEREGISTER = {
"comment": "",
"labels": {"container": {"declaration": "struct validation *var"}},
"process": "[deregister_p1]",
"actions": {
"deregister_p1": {"parameters": ["%container%"]}
}
}
B1 = {
"comment": "",
"labels": {
"container": {"declaration": "struct validation *var"},
"ret": {"declaration": "int x", "value": "0"}
},
"process": "(!register_p1).{main}",
"actions": {
"main": {
"comment": "",
"process": "<probe>.(<success>.[register_p2] | <fail>.<remove>).{main} | (deregister_p1)"
},
"register_p1": {
"condition": ["$ARG1 != 0"],
"parameters": ['%container%'],
"savepoints": {'s1': {"statements": []}}
},
"probe": {
"comment": "Do probing.",
"statements": ["%ret% = f4(%container%);"]
},
"success": {
"comment": "Successful probing.",
"condition": ["%ret% == 0"]
},
"fail": {
"comment": "Failed probing.",
"condition": ["%ret% != 0"]
},
"deregister_p1": {
"parameters": ['%container%']
},
"remove": {
"comment": "Removing.",
"statements": ["$FREE(%container%);"]
},
"register_p2": {
"parameters": ['%container%']
}
}
}
B2 = {
"comment": "",
"labels": {
"container": {"declaration": "struct validation *var"}
},
"process": "(!register_p2).([read] | [write])",
"actions": {
"register_p2": {
"parameters": ['%container%'],
"savepoints": {'s2': {"statements": []}},
"require": {"c/p1": {"include": ["probe", "success"]}}
},
"read": {"comment": "", "statements": []},
"write": {"comment": "Do write.", "statements": []}
}
}
@pytest.fixture()
def model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"name": 'base',
"functions models": {
"f1": REGISTER,
"f2": DEREGISTER,
},
"environment processes": {
"c/p1": B1,
"c/p2": B2
},
"main process": MAIN
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
P1 = {
"comment": "",
"labels": {},
"process": "(!register_p1).<init>.(<exit> | <init_failed>)",
"actions": {
"register_p1": {
"parameters": [],
"savepoints": {
'sp_init_first': {"statements": []},
'sp_init_second': {"statements": []},
'sp_init_third': {"statements": []}
}
},
"init": {"comment": ""},
"exit": {"comment": ""},
"init_failed": {"comment": ""}
}
}
REGISTER_P2 = {
"comment": "",
"labels": {},
"process": "[register_p2]",
"actions": {"register_p2": {}}
}
DEREGISTER_P2 = {
"comment": "",
"labels": {},
"process": "[deregister_p2]",
"actions": {"deregister_p2": {}}
}
P2 = {
"comment": "",
"labels": {"ret": {"declaration": "int x"}},
"process": "(!register_p2).{main}",
"actions": {
"main": {
"comment": "Test initialization.",
"process": "<probe>.(<success>.[register_p3].[deregister_p3] | <fail>.<remove>).{main} | (deregister_p2)"
},
"register_p2": {
"parameters": [],
"require": {
"c/p1": {"include": ["init", "exit"]}
}
},
"deregister_p2": {"parameters": []},
"probe": {"comment": ""},
"success": {"comment": "", "condition": ["%ret% == 0"]},
"fail": {"comment": "Failed probing.", "condition": ["%ret% != 0"]},
"remove": {"comment": ""},
"register_p3": {"parameters": []},
"deregister_p3": {"parameters": []}
}
}
P3 = {
"comment": "",
"labels": {},
"process": "(!register_p3).<init>.{scenario1}",
"actions": {
"register_p3": {
"parameters": [],
"savepoints": {
'sp_init_p3': {"statements": [], "comment": "test comment"}
},
"require": {
"c/p2": {"include": ["register_p3", "deregister_p3"]}
}
},
"deregister_p3": {"parameters": []},
"free": {"comment": ""},
"terminate": {"comment": "", "process": "<free>.(deregister_p3)"},
"init": {"comment": ""},
"create": {"comment": ""},
"create_fail": {"comment": ""},
"create2": {"comment": ""},
"create2_fail": {"comment": ""},
"success": {"comment": ""},
"work1": {"comment": ""},
"work2": {"comment": ""},
"register_p4": {"parameters": []},
"deregister_p4": {"parameters": []},
"create_scenario": {
"comment": "",
"process": "<create>.(<success>.({work_scenario} | {p4_scenario}) | <create_fail>.{terminate})"
},
"create2_scenario": {"comment": "", "process": "<create2>.(<create2_fail> | <success>).{terminate}"},
"work_scenario": {"comment": "", "process": "(<work1> | <work2>).{terminate}"},
"p4_scenario": {"comment": "", "process": "[register_p4].[deregister_p4].{terminate}"},
"scenario1": {"comment": "", "process": "{create_scenario} | {create2_scenario}"}
}
}
P4 = {
"comment": "",
"labels": {},
"process": "(!register_p4).<write>.(deregister_p4)",
"actions": {
"register_p4": {
"parameters": [],
"require": {
"c/p3": {"include": ["register_p4"]}
}
},
"deregister_p4": {"parameters": []},
"write": {"comment": ""}
}
}
P5 = {
"comment": "",
"labels": {},
"process": "(!register_p2).(<w1> | <w2>).(deregister_p2)",
"actions": {
"register_p2": {
"parameters": [],
"savepoints": {
'sp_p5': {"statements": []}
}
},
"deregister_p2": {"parameters": []},
"w1": {"comment": ""},
"w2": {"comment": ""}
}
}
P6 = {
"comment": "The process that does not rely on any other.",
"labels": {},
"process": "(!register_unique).(<w1> | <w2>)",
"actions": {
"register_unique": {
"parameters": [],
"savepoints": {
'sp_unique_1': {"statements": []},
'sp_unique_2': {"statements": []}
}
},
"w1": {"comment": ""},
"w2": {"comment": ""}
}
}
@pytest.fixture()
def double_init_model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
c1p1 = {
"comment": "Category 1, process 1.",
"process": "(!register_c1p1).<init>.(<ok>.[register_c2p2].[deregister_c2p2] | <fail>)",
"actions": {
"register_c1p1": {
"parameters": [],
"savepoints": {
"s1": {"statements": []}
}
},
"register_c2p2": {"parameters": []},
"deregister_c2p2": {"parameters": []},
"init": {"coment": ""},
"ok": {"coment": ""},
"fail": {"coment": ""}
}
}
c1p2 = {
"comment": "Category 1, process 1.",
"process": "(!register_c1p2).<init>.(<ok> | <fail>)",
"actions": {
"register_c1p2": {
"parameters": [],
"savepoints": {
"basic": {"statements": []}
}
},
"init": {"coment": ""},
"ok": {"coment": ""},
"fail": {"coment": ""}
}
}
c2p1 = {
"comment": "Category 2, process 1.",
"process": "(!register_p1).<probe>.(deregister_p1)",
"labels": {"container": {"declaration": "struct validation *var"}},
"actions": {
"register_p1": {
"parameters": ["%container%"],
"require": {
"c1/p1": {"include": ["ok"]},
"c1/p2": {"include": ["ok"]}
}
},
"deregister_p1": {"parameters": ["%container%"]},
"probe": {"comment": ""},
}
}
c2p2 = {
"comment": "Category 2, process 2.",
"process": "(!register_c2p2).(<v1> | <v2>).(deregister_c2p2)",
"actions": {
"register_c2p2": {
"parameters": [], "require": {"c2/p1": {"include": ["probe"]}}
},
"deregister_c2p2": {"parameters": []},
"v1": {"comment": ""},
"v2": {"comment": ""}
}
}
spec = {
"name": 'test_model',
"functions models": {
"f1": REGISTER,
"f2": DEREGISTER
},
"environment processes": {
"c1/p1": c1p1,
"c1/p2": c1p2,
"c2/p1": c2p1,
"c2/p2": c2p2
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def advanced_model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p3": P3,
"c/p4": P4
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def advanced_model_with_unique():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p3": P3,
"c/p4": P4,
"c/p6": P6
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def model_with_independent_process():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p5": P5
},
"main process": MAIN
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def logger():
logger = logging.getLogger(__name__)
# todo: Uncomment when you will need a log or implement ini file
# logger.setLevel(logging.DEBUG)
# handler = logging.StreamHandler(sys.stdout)
# handler.setLevel(logging.DEBUG)
# logger.addHandler(handler)
return logger
def _obtain_model(logger, model, specification):
separation = SelectiveFactory(logger, specification)
scenario_generator = SeparationStrategy(logger, dict())
processes_to_scenarios = {str(process): list(scenario_generator(process)) for process in model.environment.values()}
return processes_to_scenarios, list(separation(processes_to_scenarios, model))
def _obtain_linear_model(logger, model, specification, separate_dispatches=False):
separation = SelectiveFactory(logger, specification)
scenario_generator = LinearStrategy(logger, dict() if not separate_dispatches else
{'add scenarios without dispatches': True})
processes_to_scenarios = {str(process): list(scenario_generator(process)) for process in model.environment.values()}
return processes_to_scenarios, list(separation(processes_to_scenarios, model))
def _to_sorted_attr_str(attrs):
return ", ".join(f"{k}: {attrs[k]}" for k in sorted(attrs.keys()))
def _expect_models_with_attrs(models, attributes):
model_attrs = {_to_sorted_attr_str(m.attributes) for m in models}
attrs = {_to_sorted_attr_str(attrs) for attrs in attributes}
unexpected = model_attrs.difference(attrs)
assert len(unexpected) == 0, f"There are unexpected models: {unexpected}"
missing = attrs.difference(model_attrs)
assert len(missing) == 0, f"There are missing models: {missing}"
def test_default_coverage(logger, advanced_model):
spec = {
"must not contain": {"c/p3": {}},
"must contain": {
"c/p2": {"scenarios only": False}
},
"cover scenarios": {"c/p1": {"savepoints except": []}}
}
processes_to_scenarios, models = _obtain_model(logger, advanced_model, spec)
# Cover all p1 savepoints + base p2 process, expect no p3, p4
p1scenarios = processes_to_scenarios['c/p1']
assert len(p1scenarios) == len(models)
for model in models:
assert 'c/p2' in model.environment
assert 'c/p3' not in model.environment
assert 'c/p4' not in model.environment
def test_inclusion_p2(logger, model):
spec = {
"must contain": {"c/p2": {}},
"cover scenarios": {"c/p2": {}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
# Cover all c2p2 scenarios
p2scenarios = processes_to_scenarios['c/p2']
assert len(p2scenarios) == len(models)
actions = [m.environment['c/p2'].actions for m in models if 'c/p2' in m.environment] + \
[m.entry.actions for m in models]
for scenario in p2scenarios:
assert scenario.actions in actions
def test_inclusion_p1(logger, model):
spec = {
"must contain": {"c/p1": {}},
"cover scenarios": {"c/p1": {}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
# Cover all scenarios from c2p1
p1scenarios = processes_to_scenarios['c/p1']
assert len(p1scenarios) == len(models)
actions = [m.environment['c/p1'].actions for m in models if 'c/p1' in m.environment] + \
[m.entry.actions for m in models]
for scenario in p1scenarios:
assert scenario.actions in actions
# No savepoints from c2p2
c2p2_withsavepoint = [s for s in processes_to_scenarios['c/p2'] if s.savepoint].pop()
for model in models:
if model.entry.actions == c2p2_withsavepoint.actions:
assert False, f"Model {model.attributed_name} has a savepoint from p2"
def test_deletion(logger, model):
spec = {
"must not contain": {"c/p2": {}},
"cover scenarios": {"c/p1": {}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
# Cover all scenarios from p1
p1scenarios = {s for s in processes_to_scenarios['c/p1']}
assert len(p1scenarios) == len(models)
actions = [m.environment['c/p1'].actions for m in models if 'c/p1' in m.environment] + \
[m.entry.actions for m in models]
for scenario in p1scenarios:
assert scenario.actions in actions
# No savepoints from p2
p2_withsavepoint = [s for s in processes_to_scenarios['c/p2'] if s.savepoint].pop()
assert all([True if p2_withsavepoint.actions != m.entry.actions else False for m in models])
# No other actions
for model in models:
assert 'c/p2' not in model.environment
def test_complex_restrictions(logger, model):
spec = {
"must contain": {"c/p2": {"actions": [["read"]]}},
"must not contain": {"c/p1": {"savepoints": ["s1"]},
"c/p2": {"actions": [["write"]]}},
"cover scenarios": {"c/p2": {}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
# Cover only scenarios with read from p2
scenarios_with_read = [s for s in processes_to_scenarios['c/p2'] if 'write' not in s.actions]
assert len(models) == len(scenarios_with_read)
actions = [m.environment['c/p2'].actions for m in models if 'c/p2' in m.environment] + \
[m.entry.actions for m in models if 'c/p2' not in m.environment]
for model_actions in actions:
assert 'write' not in model_actions
assert 'read' in model_actions
# No scenarios with a savepoint p1s1
p1_withsavepoint = [s for s in processes_to_scenarios['c/p1'] if s.savepoint].pop()
assert all([True if p1_withsavepoint.actions != m.entry.actions else False for m in models])
def test_controversial_conditions(logger, model):
spec = {
"must contain": {"c/p2": {}},
"must not contain": {"c/p1": {}},
"cover scenarios": {"c/p1": {}}
}
with pytest.raises(ValueError):
_obtain_linear_model(logger, model, spec)
spec = {
"must contain": {"c/p2": {}},
"must not contain": {"c/p1": {}, "c/p2": {"savepoints": []}},
"cover scenarios": {"c/p2": {}}
}
with pytest.raises(ValueError):
_obtain_linear_model(logger, model, spec)
def test_complex_exclusion(logger, model):
spec = {
"must contain": {"c/p1": {}},
"must not contain": {"c/p1": {"actions": [["probe", "success"]]}},
"cover scenarios": {"c/p1": {}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
relevant_scenarios = [s.actions for s in processes_to_scenarios['c/p1']
if not {"probe", "success"}.issubset(set(s.actions.keys()))]
# Test the number of models
assert len(models) == len(relevant_scenarios)
# Test that there is a p1 model in models
actions = [m.environment['c/p1'].actions for m in models if 'c/p1' in m.environment] + \
[m.entry.actions for m in models if 'c/p1' not in m.environment]
# Test all scenarios of p1 are covered
assert len(actions) == len(relevant_scenarios)
for scenario_actions in relevant_scenarios:
assert scenario_actions in actions
def test_cover_actions(logger, model):
spec = {
"cover scenarios": {"c/p1": {"actions": ["probe"], "savepoints": []}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
assert len(models) == 1
model = models.pop()
if 'c/p1' in model.environment:
actions = model.environment['c/p1'].actions
else:
actions = model.entry.actions
assert "probe" in actions
def test_cover_savepoint(logger, model):
spec = {
"must contain": {"c/p1": {"savepoints": ["s1"]}},
"cover scenarios": {"c/p1": {"savepoints": ["s1"]}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
relevant_scenarios = [s.actions for s in processes_to_scenarios['c/p1'] if s.savepoint]
assert len(relevant_scenarios) == len(models)
for model in models:
assert "c/p1" not in model.environment
assert "s1" in model.entry.actions
def test_cover_except_savepoint(logger, model):
spec = {
"must contain": {"c/p1": {}},
"cover scenarios": {"c/p1": {"savepoints except": ["s1"]}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
relevant_scenarios = [s.actions for s in processes_to_scenarios['c/p1'] if not s.savepoint]
assert len(relevant_scenarios) == len(models)
model_actions = [m.environment['c/p1'].actions for m in models]
for relevant in relevant_scenarios:
assert relevant in model_actions
def test_cover_except_actions(logger, model):
spec = {
"must contain": {"c/p2": {}},
"cover scenarios": {"c/p2": {"actions except": ["read"], "savepoints": []}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model, spec)
relevant_scenarios = [s.actions for s in processes_to_scenarios['c/p2']
if not s.savepoint and "read" not in s.actions]
assert len(relevant_scenarios) == len(models)
model_actions = [m.environment['c/p2'].actions for m in models if 'c/p2' in m.environment] +\
[m.entry.actions for m in models if 'c/p2' not in m.environment]
for relevant in relevant_scenarios:
assert relevant in model_actions
def test_missing_keys(logger, model):
error_specs = [
{
"must contain": {"c/p3": {}},
"cover scenarios": {"c/p1": {}}
},
{
"cover scenarios": {"c/p3": {}}
},
{
"must not contain": {"c/p3": {}},
"cover scenarios": {"c/p1": {}}
},
{
"cover scenarios": {"c/p1": {"savepoints": ["x"]}}
},
{
"cover scenarios": {"c/p1": {"actions": ["x"]}}
},
{
"cover scenarios": {"c/p1": {"savepoints except": ["x"]}}
},
{
"cover scenarios": {"c/p1": {"actions except": ["x"]}}
},
{
"must contain": {"c/p1": {"actions": [['']]}},
"cover scenarios": {"c/p1": {}}
},
{
"must contain": {"c/p1": {"actions": ['']}},
"cover scenarios": {"c/p1": {}}
},
{
"must contain": {"c/p1": {"savepoints": [['']]}},
"cover scenarios": {"c/p1": {}}
},
{
"must contain": {"c/p1": {"savepoints": ['x']}},
"cover scenarios": {"c/p1": {}}
},
{
"must not contain": {"c/p1": {"actions": ['']}},
"cover scenarios": {"c/p1": {}}
},
{
"must not contain": {"c/p1": {"savepoints": [['']]}},
"cover scenarios": {"c/p1": {}}
},
{
"must not contain": {"c/p1": {"savepoints": ['x']}},
"cover scenarios": {"c/p1": {}}
}
]
for spec in error_specs:
with pytest.raises(AssertionError):
_obtain_linear_model(logger, model, spec)
def test_combinations_with_transitive_dependencies(logger, advanced_model):
spec = {
"must contain": {"c/p3": {}},
"cover scenarios": {"c/p3": {"actions": ["create2", "success"]}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p3scenarios = {s for s in processes_to_scenarios['c/p3'] if {"create2", "success"}.issubset(set(s.actions.keys()))}
assert len(p3scenarios) == len(models)
actions = [m.environment['c/p3'].actions for m in models if 'c/p3' in m.environment] + \
[m.entry.actions for m in models]
for scenario in p3scenarios:
assert scenario.actions in actions
def test_savepoints_with_deps(logger, advanced_model):
spec = {
"cover scenarios": {
"c/p1": {"savepoints only": True}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p1scenarios = {s for s in processes_to_scenarios['c/p1'] if s.savepoint}
assert len(models) == len(p1scenarios)
names = [m.attributes['c/p1'] for m in models]
for scenario in p1scenarios:
assert scenario.name in names
def test_savepoints_with_mc_deps(logger, advanced_model):
spec = {
"must contain": {"c/p3": {}},
"cover scenarios": {
"c/p1": {"savepoints only": True},
"c/p3": {"actions": ["create2", "success"], "savepoints": []}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p1scenarios = {s for s in processes_to_scenarios['c/p1'] if s.savepoint and 'exit' in s.actions}
assert len(models) == len(p1scenarios)
names = [m.attributes['c/p1'] for m in models]
for scenario in p1scenarios:
assert scenario.name in names
def test_combinations_with_savepoints_only(logger, advanced_model):
spec = {
"cover scenarios": {
"c/p1": {"savepoints only": True},
"c/p3": {"actions": ["create2", "success"], "savepoints only": True}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p1scenarios = {s for s in processes_to_scenarios['c/p1'] if s.savepoint}
p3scenarios = {s for s in processes_to_scenarios['c/p3']
if s.savepoint and {"create2", "success"}.issubset(set(s.actions.keys()))}
assert len(models) == (len(p1scenarios) + len(p3scenarios))
names = [m.attributes['c/p1'] for m in models if m.attributes.get('c/p1')]
for scenario in p1scenarios:
assert scenario.name in names
names = [m.attributes['c/p3'] for m in models if m.attributes.get('c/p3')]
for scenario in p3scenarios:
assert scenario.name in names
def test_combinations_with_extra_dependencies(logger, advanced_model):
spec = {
"cover scenarios": {"c/p2": {}, "c/p3": {"actions": ["create2", "success"], "savepoints only": True}}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
# Cover all scenarios from p1
p3scenarios = {s for s in processes_to_scenarios['c/p3']
if s.savepoint and {"create2", "success"}.issubset(set(s.actions.keys()))}
p2scenarios = {s for s in processes_to_scenarios['c/p2']}
assert len(models) <= (len(p3scenarios) + len(p2scenarios))
names = [m.attributes['c/p3'] for m in models if m.attributes.get('c/p3')]
for scenario in p3scenarios:
assert scenario.name in names
names = [m.attributes['c/p2'] for m in models if m.attributes.get('c/p2')]
for scenario in p2scenarios:
assert scenario.name in names
def test_savepoints_only_with_deps(logger, advanced_model):
spec = {
"cover scenarios": {
"c/p1": {"savepoints only": True},
"c/p3": {"actions": ["create2", "success"]}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p1scenarios = {s for s in processes_to_scenarios['c/p1'] if s.savepoint}
p3scenarios = {s for s in processes_to_scenarios['c/p3']
if s.savepoint and {"create2", "success"}.issubset(set(s.actions.keys()))}
p1scenarios_for_p3 = {s for s in processes_to_scenarios['c/p1'] if s.savepoint and "exit" in s.actions}
assert len(models) <= (len(p1scenarios) + len(p1scenarios_for_p3) + len(p3scenarios))
names = [m.attributes['c/p3'] for m in models if m.attributes.get('c/p3')]
for scenario in p3scenarios:
assert scenario.name in names
names = [m.attributes['c/p1'] for m in models if m.attributes.get('c/p2')]
for scenario in p1scenarios:
assert scenario.name in names
def test_savepoints_without_base_actions(logger, advanced_model):
spec = {
"cover scenarios": {
"c/p1": {"actions": ["exit"], "savepoints only": True},
"c/p3": {"actions": ["create2", "success"], "savepoints only": True}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec)
p1scenarios = {s for s in processes_to_scenarios['c/p1'] if s.savepoint and
{"exit"}.issubset(set(s.actions.keys()))}
p3scenarios = {s for s in processes_to_scenarios['c/p3']
if s.savepoint and {"create2", "success"}.issubset(set(s.actions.keys()))}
assert len(models) <= (len(p1scenarios) + len(p3scenarios))
names = [m.attributes['c/p3'] for m in models if m.attributes.get('c/p3')]
for scenario in p3scenarios:
assert scenario.name in names
names = [m.attributes['c/p1'] for m in models if m.attributes.get('c/p2')]
for scenario in p1scenarios:
assert scenario.name in names
def test_all_process_savepoints_and_actions_without_base(logger, advanced_model):
spec = {
"cover scenarios": {
"c/p1": {"savepoints only": True},
"c/p2": {},
"c/p3": {"savepoints only": True},
"c/p4": {}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model, spec, separate_dispatches=True)
# Check attributes
for model in models:
assert len(model.attributes) == 4
s1 = {s for s in processes_to_scenarios['c/p1'] if s.savepoint}
s3 = {s for s in processes_to_scenarios['c/p3'] if s.savepoint}
s2 = set(processes_to_scenarios['c/p2'])
s4 = set(processes_to_scenarios['c/p4'])
names = ['c/p1', 'c/p2', 'c/p3', 'c/p4']
for name, scenarios in zip(names, [s1, s2, s3, s4]):
model_scenarios = {m.attributes[name] for m in models}
assert {s.name for s in scenarios}.issubset(model_scenarios)
def test_advanced_model_with_unique_processes(logger, advanced_model_with_unique):
spec = {
"cover scenarios": {
"c/p6": {"savepoints only": True}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, advanced_model_with_unique, spec,
separate_dispatches=True)
model_attrs = {_to_sorted_attr_str(m.attributes) for m in models}
expected = [
{"c/p1": "Removed", "c/p2": "Removed", "c/p3": "Removed", "c/p4": "Removed", "c/p6": "sp_unique_2 with w2"},
{"c/p1": "Removed", "c/p2": "Removed", "c/p3": "Removed", "c/p4": "Removed", "c/p6": "sp_unique_2 with w1"},
{"c/p1": "Removed", "c/p2": "Removed", "c/p3": "Removed", "c/p4": "Removed", "c/p6": "sp_unique_1 with w2"},
{"c/p1": "Removed", "c/p2": "Removed", "c/p3": "Removed", "c/p4": "Removed", "c/p6": "sp_unique_1 with w1"}
]
_expect_models_with_attrs(models, expected)
def test_process_without_deps(logger, model_with_independent_process):
spec = {
"must not contain": {"c/p1": {"savepoints": ["sp_init_first", "sp_init_second", "sp_init_third"]}},
"cover scenarios": {
"c/p5": {}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model_with_independent_process, spec,
separate_dispatches=True)
p5scenarios = set(processes_to_scenarios['c/p5'])
assert len(models) == len(p5scenarios)
names = [m.attributes['c/p5'] for m in models if m.attributes.get('c/p5')]
for scenario in p5scenarios:
assert scenario.name in names
def test_process_ignoring_free_process(logger, model_with_independent_process):
spec = {
"cover scenarios": {
"c/p1": {"savepoints only": True},
"c/p2": {"actions": ["fail"]}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model_with_independent_process, spec,
separate_dispatches=True)
s1 = {s for s in processes_to_scenarios['c/p1'] if s.savepoint}
s3 = {s for s in processes_to_scenarios['c/p2'] if 'fail' in s.actions}
assert len(models) == len(s1)
names = [m.attributes['c/p1'] for m in models if m.attributes.get('c/p1')]
for scenario in s1:
assert scenario.name in names
names = [m.attributes['c/p2'] for m in models if m.attributes.get('c/p2')]
for scenario in s3:
assert scenario.name in names
def test_combine_free_and_dependent_processes(logger, model_with_independent_process):
spec = {
"cover scenarios": {
"c/p5": {},
"c/p2": {"actions": ["fail"]}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, model_with_independent_process, spec,
separate_dispatches=True)
s5 = {s for s in processes_to_scenarios['c/p5']}
s2 = {s for s in processes_to_scenarios['c/p2'] if 'fail' in s.actions}
assert len(models) == len(s5)
names = [m.attributes['c/p5'] for m in models if m.attributes.get('c/p5')]
for scenario in s5:
assert scenario.name in names
names = [m.attributes['c/p2'] for m in models if m.attributes.get('c/p2')]
for scenario in s2:
assert scenario.name in names
def test_double_sender_model_single_init(logger, double_init_model):
spec = {
"cover scenarios": {
"c1/p1": {"savepoints only": True},
"c2/p2": {}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, double_init_model, spec)
expected = [
{'c2/p2': 'Removed', 'c2/p1': 'Removed', 'c1/p1': 's1 with fail', 'c1/p2': 'Removed'},
{'c2/p2': 'v1', 'c1/p1': 's1 with ok', 'c2/p1': 'base', 'c1/p2': 'Removed'},
{'c2/p2': 'v2', 'c1/p1': 's1 with ok', 'c2/p1': 'base', 'c1/p2': 'Removed'}
]
_expect_models_with_attrs(models, expected)
def test_double_sender_model(logger, double_init_model):
spec = {
"cover scenarios": {
"c1/p1": {"savepoints only": True},
"c1/p2": {"savepoints only": True},
"c2/p2": {}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, double_init_model, spec)
expected = [
{'c2/p2': 'Removed', 'c1/p1': 'Removed', 'c2/p1': 'Removed', 'c1/p2': 'basic with fail'},
{'c2/p2': 'Removed', 'c2/p1': 'Removed', 'c1/p1': 'Removed', 'c1/p2': 'basic with ok'},
{'c2/p2': 'Removed', 'c2/p1': 'Removed', 'c1/p1': 's1 with fail', 'c1/p2': 'Removed'},
{'c2/p2': 'v1', 'c1/p1': 's1 with ok', 'c1/p2': 'Removed', 'c2/p1': 'base'},
{'c2/p2': 'v2', 'c1/p1': 's1 with ok', 'c1/p2': 'Removed', 'c2/p1': 'base'}
]
_expect_models_with_attrs(models, expected)
def test_double_sender_model_full_list(logger, double_init_model):
spec = {
"cover scenarios": {
"c1/p1": {"savepoints only": True},
"c1/p2": {"savepoints only": True},
"c2/p1": {},
"c2/p2": {}
}
}
processes_to_scenarios, models = _obtain_linear_model(logger, double_init_model, spec)
expected = [
{'c2/p2': 'Removed', 'c2/p1': 'Removed', 'c1/p1': 'Removed', 'c1/p2': 'basic with fail'},
{'c2/p2': 'Removed', 'c2/p1': 'base', 'c1/p1': 'Removed', 'c1/p2': 'basic with ok'},
{'c2/p2': 'Removed', 'c2/p1': 'Removed', 'c1/p1': 's1 with fail', 'c1/p2': 'Removed'},
{'c2/p2': 'v1', 'c2/p1': 'base', 'c1/p1': 's1 with ok', 'c1/p2': 'Removed'},
{'c2/p2': 'v2', 'c2/p1': 'base', 'c1/p1': 's1 with ok', 'c1/p2': 'Removed'}
]
_expect_models_with_attrs(models, expected)
|
from builtins import object
import os
from vnc_api_test import *
from tcutils.config.vnc_introspect_utils import *
from tcutils.config.svc_mon_introspect_utils import SvcMonInspect
from tcutils.control.cn_introspect_utils import *
from tcutils.agent.vna_introspect_utils import *
from tcutils.collector.opserver_introspect_utils import *
from tcutils.collector.analytics_tests import *
from tcutils.go.go_server_utils import *
from tcutils.collector.policy_generator_tests import PolicyGeneratorClient
from tcutils.kubernetes.k8s_introspect_utils import KubeManagerInspect
from vnc_api.vnc_api import *
from tcutils.vdns.dns_introspect_utils import DnsAgentInspect
from tcutils.util import custom_dict, get_plain_uuid
from openstack import OpenstackAuth, OpenstackOrchestrator
from vcenter import VcenterAuth, VcenterOrchestrator
from vro import VroWorkflows
from common.contrail_test_init import ContrailTestInit
from vcenter_gateway import VcenterGatewayOrch
from tcutils.util import retry
try:
from tcutils.kubernetes.api_client import Client as Kubernetes_client
except ImportError:
pass
try:
from tcutils.kubernetes.openshift_client import Client as Openshift_client
except ImportError:
pass
try:
from webui.ui_login import UILogin
except ImportError:
pass
class ContrailConnections(object):
def __init__(self, inputs=None, logger=None, project_name=None,
username=None, password=None, domain_name=None, input_file=None, domain_obj=None,scope='domain'):
self.inputs = inputs or ContrailTestInit(input_file,
stack_tenant=project_name)
self.project_name = project_name or self.inputs.project_name
self.domain_name = domain_name or self.inputs.domain_name
self.orch_domain_name = domain_name or self.inputs.domain_name
if self.orch_domain_name == 'Default':
self.domain_name = 'default-domain'
self.scope = scope
self.username = username or self.inputs.stack_user
self.password = password or self.inputs.stack_password
self.logger = logger or self.inputs.logger
self.nova_h = None
self.quantum_h = None
self.vnc_lib_fixture = None
self.ironic_h = None
self.swift_h = None
self.api_server_inspects = custom_dict(self.get_api_inspect_handle,
'api_inspect:'+self.project_name+':'+self.username)
self.dnsagent_inspect = custom_dict(self.get_dns_agent_inspect_handle,
'dns_inspect')
self.agent_inspect = custom_dict(self.get_vrouter_agent_inspect_handle,
'agent_inspect')
self.ops_inspects = custom_dict(self.get_opserver_inspect_handle,
'ops_inspect:'+self.project_name+':'+self.username)
self.cn_inspect = custom_dict(self.get_control_node_inspect_handle,
'cn_inspect')
self.k8s_cluster = self.get_k8s_cluster()
self.k8s_client = self.get_k8s_api_client_handle()
# ToDo: msenthil/sandipd rest of init needs to be better handled
self.domain_id = None
if self.inputs.domain_isolation:
#get admin auth to list domains and get domain_id
auth = self.get_auth_h(username = self.inputs.admin_username,
password=self.inputs.admin_password,
project_name=self.inputs.admin_tenant,
domain_name=self.inputs.admin_domain)
self.domain_id = auth.get_domain_id(self.domain_name)
self.auth = self.get_auth_h()
self.vnc_lib = self.get_vnc_lib_h()
self.project_id = self.get_project_id()
if self.inputs.orchestrator == 'openstack':
if self.inputs.verify_thru_gui():
self.ui_login = UILogin(self, self.inputs, project_name, username, password)
self.browser = self.ui_login.browser
self.browser_openstack = self.ui_login.browser_openstack
self.orch = OpenstackOrchestrator(inputs=self.inputs,
vnclib=self.vnc_lib,
logger=self.logger,
auth_h=self.auth
)
self.ironic_h = self.orch.get_ironic_handler()
self.nova_h = self.orch.get_compute_handler()
self.swift_h = self.orch.get_swift_handler()
self.quantum_h = self.orch.get_network_handler()
self.glance_h = self.orch.get_image_handler()
elif self.inputs.orchestrator == 'vcenter':
self.orch = VcenterOrchestrator(user=self.inputs.vcenter_username,
pwd= self.inputs.vcenter_password,
host=self.inputs.vcenter_server,
port=self.inputs.vcenter_port,
dc_name=self.inputs.vcenter_dc,
vnc=self.vnc_lib,
inputs=self.inputs,
logger=self.logger)
if self.inputs.vro_server:
self.vro_orch = VroWorkflows(user=self.inputs.vcenter_username,
pwd= self.inputs.vcenter_password,
host=self.inputs.vcenter_server,
port=self.inputs.vcenter_port,
dc_name=self.inputs.vcenter_dc,
vnc=self.vnc_lib,
inputs=self.inputs,
logger=self.logger)
elif self.inputs.orchestrator == 'kubernetes':
self.orch = None
if self.inputs.vcenter_gw_setup: # vcenter_gateway
self.slave_orch = VcenterGatewayOrch(user=self.inputs.vcenter_username,
pwd=self.inputs.vcenter_password,
host=self.inputs.vcenter_server,
port=int(self.inputs.vcenter_port),
dc_name=self.inputs.vcenter_dc,
vnc=self.vnc_lib,
inputs=self.inputs,
logger=self.logger)
self._kube_manager_inspect = None
# end __init__
def get_project_id(self, project_name=None):
project_name = project_name or self.project_name
auth = self.get_auth_h(project_name=project_name)
if auth:
return auth.get_project_id(project_name or self.project_name,
self.domain_id)
else:
return self.vnc_lib_fixture.project_id if self.vnc_lib_fixture else None
def get_auth_h(self, refresh=False, project_name=None,
username=None, password=None, domain_name=None):
project_name = project_name or self.project_name
username = username or self.username
password = password or self.password
attr = '_auth_'+project_name+'_'+username
if not getattr(env, attr, None) or refresh:
if self.inputs.orchestrator == 'openstack':
env[attr] = OpenstackAuth(username, password,
project_name, self.inputs, self.logger,
domain_name=domain_name or self.orch_domain_name,
scope=self.scope)
elif self.inputs.orchestrator == 'vcenter':
env[attr] = VcenterAuth(username, password,
project_name, self.inputs)
# elif self.inputs.orchestrator == 'kubernetes':
# env[attr] = self.get_k8s_api_client_handle()
return env.get(attr)
def get_vnc_lib_h(self, refresh=False):
attr = '_vnc_lib_fixture_' + self.project_name + '_' + self.username
cfgm_ip = self.inputs.command_server_ip or self.inputs.api_server_ip or \
self.inputs.cfgm_ip
api_server_url = self.go_config_proxy_url
api_server_port = self.inputs.go_server_port if self.inputs.command_server_ip \
else self.inputs.api_server_port
insecure = True if self.inputs.command_server_ip else self.inputs.insecure
use_ssl = False
if self.inputs.command_server_ip:
use_ssl = True
if self.inputs.api_protocol == 'https':
use_ssl = True
if not getattr(env, attr, None) or refresh:
if self.inputs.orchestrator == 'openstack' :
domain = self.orch_domain_name
else:
domain = self.domain_name
env[attr] = VncLibFixture(
username=self.username, password=self.password,
domain=domain, project_name=self.project_name,
inputs=self.inputs,
cfgm_ip=cfgm_ip,
project_id=self.get_project_id(),
api_server_port=api_server_port,
api_server_url=api_server_url,
orchestrator=self.inputs.orchestrator,
certfile = self.inputs.keystonecertfile,
keyfile = self.inputs.keystonekeyfile,
cacert = self.inputs.certbundle,
insecure = insecure,
use_ssl = use_ssl,
logger=self.logger)
env[attr].setUp()
self.vnc_lib_fixture = env[attr]
self.vnc_lib = self.vnc_lib_fixture.get_handle()
return self.vnc_lib
def get_policy_generator_handle(self):
if not self.inputs.policy_generator_ips:
return None
return PolicyGeneratorClient(inputs=self.inputs, logger=self.logger)
def get_go_client_handle(self):
if not self.inputs.command_server_ip:
return None
return GoApiInspect(self.inputs.command_server_ip,
port=self.inputs.go_server_port,
inputs=self.inputs,
logger=self.logger)
def get_api_inspect_handle(self, host):
cfgm_ip = self.inputs.command_server_ip or self.inputs.api_server_ip
if cfgm_ip:
host = cfgm_ip
api_protocol = 'https' if self.inputs.command_server_ip else self.inputs.api_protocol
api_server_port = self.inputs.go_server_port if self.inputs.command_server_ip \
else self.inputs.api_server_port
insecure = True if self.inputs.command_server_ip else self.inputs.insecure
if host not in self.api_server_inspects:
self.api_server_inspects[host] = VNCApiInspect(host,
inputs=self.inputs,
port=api_server_port,
protocol=api_protocol,
base_url=self.go_config_proxy_url,
insecure=insecure,
logger=self.logger)
return self.api_server_inspects[host]
def get_control_node_inspect_handle(self, host):
if host not in self.cn_inspect:
self.cn_inspect[host] = ControlNodeInspect(host,
self.inputs.bgp_port,
logger=self.logger,
args=self.inputs,
protocol=self.inputs.introspect_protocol)
return self.cn_inspect[host]
def get_dns_agent_inspect_handle(self, host):
if host not in self.dnsagent_inspect:
self.dnsagent_inspect[host] = DnsAgentInspect(host,
self.inputs.dns_port,
logger=self.logger,
args=self.inputs,
protocol=self.inputs.introspect_protocol)
return self.dnsagent_inspect[host]
def get_vrouter_agent_inspect_handle(self, host):
if host not in self.agent_inspect:
self.agent_inspect[host] = AgentInspect(host,
port=self.inputs.agent_port,
logger=self.logger,
inputs=self.inputs,
protocol=self.inputs.introspect_protocol)
return self.agent_inspect[host]
def get_opserver_inspect_handle(self, host):
#ToDo: WA till scripts are modified to use ip rather than hostname
ip = host if is_v4(host) else self.inputs.get_host_ip(host)
collector_ip = self.inputs.command_server_ip or self.inputs.analytics_api_ip
if collector_ip:
ip = collector_ip
port = self.inputs.go_server_port if self.inputs.command_server_ip \
else self.inputs.analytics_api_port
protocol = 'https' if self.inputs.command_server_ip else \
self.inputs.analytics_api_protocol
insecure = True if self.inputs.command_server_ip else self.inputs.insecure
if ip not in self.ops_inspects:
self.ops_inspects[ip] = VerificationOpsSrv(ip,
port=port,
protocol=protocol,
base_url=self.go_analytics_proxy_url,
insecure=insecure,
logger=self.logger,
inputs=self.inputs)
return self.ops_inspects[ip]
def get_k8s_cluster(self):
if self.inputs.slave_orchestrator != 'kubernetes':
return None
if not getattr(self, 'k8s_cluster', None):
self.k8s_cluster = None
for clus in self.inputs.k8s_clusters:
if clus['name'] == self.project_name:
self.k8s_cluster = clus
break
return self.k8s_cluster
def get_k8s_api_client_handle(self):
if self.inputs.orchestrator != 'kubernetes' and \
self.inputs.slave_orchestrator != 'kubernetes' and \
self.inputs.additional_orchestrator != 'kubernetes':
return None
if not getattr(self, 'k8s_client', None):
if self.inputs.deployer == 'openshift':
self.k8s_client = Openshift_client(self.inputs.kube_config_file,
self.logger)
elif self.inputs.slave_orchestrator == 'kubernetes' and self.k8s_cluster:
self.k8s_client = Kubernetes_client(
cluster=self.k8s_cluster,
logger=self.logger)
else:
self.k8s_client = Kubernetes_client(
self.inputs.kube_config_file,
self.logger)
return self.k8s_client
# end get_k8s_api_client_handle
def get_svc_mon_h(self, refresh=False):
if not getattr(self, '_svc_mon_inspect', None) or refresh:
for cfgm_ip in self.inputs.cfgm_ips:
#contrail-status would increase run time hence netstat approach
cmd = 'netstat -antp | grep :8088 | grep LISTEN'
if 'LISTEN' in self.inputs.run_cmd_on_server(cfgm_ip, cmd, container='svc-monitor'):
self._svc_mon_inspect = SvcMonInspect(cfgm_ip,
logger=self.logger,
args=self.inputs,
protocol=self.inputs.introspect_protocol)
break
return self._svc_mon_inspect
@retry(delay=3, tries=10)
def _get_kube_manager_h(self, refresh=False):
if self.k8s_cluster:
self._kube_manager_inspect = KubeManagerInspect(
self.k8s_cluster['master_public_ip'],
logger=self.logger,
args=self.inputs,
protocol=self.inputs.introspect_protocol)
return True
for km_ip in self.inputs.kube_manager_ips:
#contrail-status would increase run time hence netstat approach
cmd = 'netstat -antp | grep :%s | grep LISTEN' % self.inputs.k8s_port
if 'LISTEN' in self.inputs.run_cmd_on_server(km_ip, cmd,
container='contrail-kube-manager'):
self._kube_manager_inspect = KubeManagerInspect(km_ip,
logger=self.logger,
args=self.inputs,
protocol=self.inputs.introspect_protocol)
return True
return False
# end get_kube_manager_h
def get_kube_manager_h(self, refresh=False):
if not getattr(self, '_kube_manager_inspect', None) or refresh:
self._kube_manager_inspect = None
self._get_kube_manager_h(refresh=refresh)
msg = "Kubernetes manager service is not up"
assert self._kube_manager_inspect is not None, msg
return self._kube_manager_inspect
@property
def policy_generator_handle(self):
if not getattr(self, '_policygen', None):
self._policygen = self.get_policy_generator_handle()
return self._policygen
@property
def go_api_handle(self):
if not getattr(self, '_go_api_handle', None):
self._go_api_handle = self.get_go_client_handle()
return self._go_api_handle
@property
def go_cluster_id(self):
if not self.go_api_handle:
return None
if not getattr(self, '_go_cluster_id', None):
self._go_cluster_id = self.go_api_handle.get_cluster_id()
return self._go_cluster_id
@property
def go_config_proxy_url(self):
if not self.go_api_handle:
return '/'
if not getattr(self, '_config_proxy_url', None):
self._config_proxy_url = '/proxy/%s/config/'%self.go_cluster_id
return self._config_proxy_url
@property
def go_analytics_proxy_url(self):
if not self.go_api_handle:
return '/'
if not getattr(self, '_analytics_proxy_url', None):
self._analytics_proxy_url = '/proxy/%s/telemetry/'%self.go_cluster_id
return self._analytics_proxy_url
@property
def api_server_inspect(self):
if not getattr(self, '_api_server_inspect', None):
self._api_server_inspect = self.api_server_inspects[
self.inputs.cfgm_ips[0]]
return self._api_server_inspect
@api_server_inspect.setter
def api_server_inspect(self, value):
self._api_server_inspect = value
@property
def ops_inspect(self):
if not getattr(self, '_ops_inspect', None):
self._ops_inspect = self.ops_inspects[self.inputs.collector_ips[0]]
return self._ops_inspect
@ops_inspect.setter
def ops_inspect(self, value):
self._ops_inspect = value
@property
def analytics_obj(self):
if not getattr(self, '_analytics_obj', None):
self._analytics_obj = AnalyticsVerification(self.inputs,
self.cn_inspect, self.agent_inspect,
self.ops_inspects, logger=self.logger)
return self._analytics_obj
@analytics_obj.setter
def analytics_obj(self, value):
self._analytics_obj = value
def update_inspect_handles(self):
self.api_server_inspects.clear()
self.cn_inspect.clear()
self.dnsagent_inspect.clear()
self.agent_inspect.clear()
self.ops_inspects.clear()
self._svc_mon_inspect = None
self._api_server_inspect = None
self._ops_inspect = None
self._analytics_obj = None
self._kube_manager_inspect = None
# end update_inspect_handles
def update_vnc_lib_fixture(self):
self.vnc_lib = self.get_vnc_lib_h(refresh=True)
# end update_vnc_lib_fixture()
def set_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None):
return self.update_vrouter_config_encap(encap1, encap2, encap3, create=True)
# end set_vrouter_config_encap
def update_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None, create=False):
'''Used to change the existing encapsulation priorities to new values'''
if not (encap1 and encap2 and encap3):
return self.delete_vrouter_encap()
try:
# Reading Existing config
current_config = self.vnc_lib.global_vrouter_config_read(
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except NoIdError as e:
self.logger.exception('No config id found. Creating new one')
if not create:
raise
conf_obj = GlobalVrouterConfig()
self.vnc_lib.global_vrouter_config_create(conf_obj)
encaps_obj = EncapsulationPrioritiesType(
encapsulation=[encap1, encap2, encap3])
confs_obj = GlobalVrouterConfig(encapsulation_priorities=encaps_obj)
result = self.vnc_lib.global_vrouter_config_update(confs_obj)
return result
# end update_vrouter_config_encap
def delete_vrouter_encap(self):
try:
conf_id = self.vnc_lib.get_default_global_vrouter_config_id()
obj = self.vnc_lib.global_vrouter_config_read(id=conf_id)
encap_obj = obj.get_encapsulation_priorities()
if not encap_obj:
return ['', '', '']
encaps = encap_obj.encapsulation
l = len(encaps)
encaps.extend([''] * (3 - l))
obj.set_encapsulation_priorities(None)
self.vnc_lib.global_vrouter_config_update(obj)
return encaps
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return (errmsg)
# end delete_vrouter_encap
def read_vrouter_config_encap(self):
result = None
try:
conf_id = self.vnc_lib.get_default_global_vrouter_config_id()
config_parameters = self.vnc_lib.global_vrouter_config_read(id=conf_id)
obj = config_parameters.get_encapsulation_priorities()
if not obj:
return ['', '', '']
else:
return obj.encapsulation
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return result
# end read_vrouter_config_encap
def set_vrouter_config_evpn(self, evpn_status=True):
self.obj = self.vnc_lib
# Check if already configured
try:
conf_id = self.obj.get_default_global_vrouter_config_id()
self.obj.global_vrouter_config_delete(id=conf_id)
except Exception:
msg = "No config id found. Configuring new one"
self.logger.info(msg)
pass
if evpn_status == True:
conf_obj = GlobalVrouterConfig(evpn_status=True)
else:
conf_obj = GlobalVrouterConfig(evpn_status=False)
result = self.obj.global_vrouter_config_create(conf_obj)
return result
# end set_vrouter_config_evpn
def update_vrouter_config_evpn(self, evpn_status=True):
self.obj = self.vnc_lib
if evpn_status == True:
conf_obj = GlobalVrouterConfig(evpn_status=True)
else:
conf_obj = GlobalVrouterConfig(evpn_status=False)
result = self.obj.global_vrouter_config_update(conf_obj)
return result
# end update_vrouter_config_evpn
def delete_vrouter_config_evpn(self):
try:
self.obj = self.vnc_lib
conf_id = self.obj.get_default_global_vrouter_config_id()
self.obj.global_vrouter_config_delete(id=conf_id)
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
# end delete_vrouter_config_evpn
def read_vrouter_config_evpn(self):
result = False
try:
self.obj = self.vnc_lib
conf_id = self.obj.get_default_global_vrouter_config_id()
out = self.obj.global_vrouter_config_read(id=conf_id)
if 'evpn_status' in list(out.__dict__.keys()):
result = out.evpn_status
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return result
# end read_vrouter_config_evpn
|
import re
REGEX = r"^([a-z0-9-._]+)@([a-z0-9]+)(\.[a-z]{1,3})+$"
def valid_email(email):
match = re.match(REGEX, email)
if match:
return True
return False
def filter_email(emails):
return [email for email in emails if valid_email(email)]
|
import pytest
from src import Store
def test_invalid_constructor_argument():
with pytest.raises(Exception):
Store(None)
with pytest.raises(Exception):
Store(1)
with pytest.raises(Exception):
Store("AHAHA")
|
from setuptools import find_packages
from distutils.core import setup
from modbus_tcp_server import __version__
setup(version=__version__,
packages=find_packages(include=['modbus_tcp_server', 'modbus_tcp_server.*']),
install_requires=['satella'],
python_requires='!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*',
zip_safe=True,
entry_points={
'console_scripts': [
'modbus-tcp-server = modbus_tcp_server.run:run'
]
}
)
|
import sys
import numpy as np
import seaborn as sns
import pandas
import matplotlib
import matplotlib.pylab as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
file = sys.argv[1]
outbase = sys.argv[2]
with open(file) as f:
labels = f.readline().split('\t');
ncols = len(labels);
print str(ncols)+' columns\n'
data = pandas.read_csv(file, sep='\s+', index_col=0);
fig = sns.clustermap(data, method='average', metric='euclidean', cmap="vlag", yticklabels=True, xticklabels=True, figsize=(15,15))
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xmajorticklabels(), fontsize = 5)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_ymajorticklabels(), fontsize = 5)
outname = outbase + ".png"
fig.savefig(outname, dpi=300)
plt.show()
|
#!/usr/bin/python
######################################################################
# import utils.database # @UnusedImport to ensure database connection
######################################################################
from __future__ import division
from sys import argv
from cronutils import run_tasks, ErrorSentry
from cronutils.error_handler import NullErrorHandler
from raven.exceptions import InvalidDsn
from raven.transport import HTTPTransport
from conf.secure import SENTRY_DSN
def run_cron():
from backend.analytics.status_counts import store_status_counts
from backend.analytics.users_and_messages import send_billing_report_by_cohort
from frontend.pages.cron_dispatch import start_every_hour
from supertools.database_integrity import test_database_integrity
from supertools.server import email_server_summary, email_disk_alerts
from utils.database import do_backup
FIVE_MINUTES = "five_minutes"
HOURLY = "hourly"
FOUR_HOURLY = "four_hourly"
DAILY = "daily"
WEEKLY = "weekly"
MONTHLY = "monthly"
TASKS = {
FIVE_MINUTES: [email_server_summary],
HOURLY: [start_every_hour, email_disk_alerts],
FOUR_HOURLY: [test_database_integrity],
DAILY: [store_status_counts, do_backup],
WEEKLY: [],
MONTHLY: [send_billing_report_by_cohort],
}
TIME_LIMITS = {
FIVE_MINUTES: 180, # 3 minutes
HOURLY: 3600, # 60 minutes
FOUR_HOURLY: 5400, # 1.5 hours
DAILY: 43200, # 12 hours
WEEKLY: 86400, # 1 day
MONTHLY: 86400, # 1 day
}
VALID_ARGS = [FIVE_MINUTES, HOURLY, FOUR_HOURLY, DAILY, WEEKLY, MONTHLY]
if len(argv) <= 1:
raise Exception("Not enough arguments to cron\n")
elif argv[1] in VALID_ARGS:
cron_type = argv[1]
run_tasks(TASKS[cron_type], TIME_LIMITS[cron_type], cron_type)
else:
raise Exception("Invalid argument to cron\n")
if __name__ == "__main__":
try:
error_handler = ErrorSentry(SENTRY_DSN, sentry_client_kwargs={'transport': HTTPTransport})
except InvalidDsn:
error_handler = NullErrorHandler()
print "\nThe sentry DSN provided, '%s', is not valid. Running without sentry.\n" % SENTRY_DSN
with error_handler:
run_cron()
# when running with Sentry want to forcibly exit 0 because we do not want cron emails.
if isinstance(error_handler, ErrorSentry):
exit(0)
|
# -*- coding: utf-8 -*-
import socket
import os
import time
import cv2
import math
import serial
import matplotlib.pyplot as plt
from configparser import ConfigParser
from Modules import module_calibrate_camera, module_vision, module_calibrate_color
#Read config.ini file
config_object = ConfigParser()
config_object.read("config.ini")
system = config_object["SYSTEM"]
HOST = str(system['HOST'])
PORT = int(system['PORT'])
DEVICE_NUMBER = int(system['DEVICE_NUMBER'])
arduino = serial.Serial(port='COM4', baudrate=9600, timeout=.1)
def write_read_seriel(x):
arduino.write(bytes(x, 'utf-8'))
time.sleep(0.05)
data = arduino.readline()
return data
def init_system():
cap = init_vision_system(DEVICE_NUMBER)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Ensure that you can restart your server quickly when it terminates
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Set the client socket's TCP "well-known port" number
sock.bind((HOST, PORT))
# Set the number of clients waiting for connection that can be queued
sock.listen(5)
try:
while True:
newSocket, address = sock.accept()
print("Connected from ", address)
receivedData = newSocket.recv(1024).decode('utf-8')
if not receivedData:
break
robot_name = receivedData
print("Robot Connected: ", robot_name)
newSocket.send("OK".encode('utf-8'))
loop = True
while loop:
_, frame = cap.read()
robot_position = module_vision.robotDetecting(frame, config_object["ROBOT_COLOR"])
obstacle_position = module_vision.obstacleDetecting(frame, config_object["OBSTACLE_COLOR"])
cv2.line(frame, robot_position['center'], obstacle_position['center'], (0, 255, 0), 2)
cv2.imshow("Cenario", frame)
#scenery_points = module_vision.arucoDetecting(frame)
# Calculando a distância
dist_robot_obstacle = math.sqrt((robot_position['center'][0] - obstacle_position['center'][0]) ** 2) +\
math.sqrt((robot_position['center'][1] - obstacle_position['center'][1]) ** 2)
print('A distância entre esses dois pontos é de:', dist_robot_obstacle, 'px')
# x = (robot_position['center'][0], obstacle_position['center'][0])
# y = (robot_position['center'][1], obstacle_position['center'][1])
# plotting the points
# plt.plot(x, y, color='green', linestyle='dashed', linewidth=3, marker='o', markerfacecolor='blue', markersize=12)
# plt.plot(x, y)
# naming the x axis
# plt.xlabel('x - axis')
# naming the y axis
# plt.ylabel('y - axis')
# giving a title to my graph
# plt.title('Distance between the robot and the obstacle')
# function to show the plot
# plt.show()
# (v.rodaDireita, v.rodaEsquerda, direção)
if dist_robot_obstacle < 200:
newSocket.send("0;0;0\n".encode('utf-8'))
else:
newSocket.send("255;255;1\n".encode('utf-8'))
receivedData = newSocket.recv(1024).decode('utf-8')
print(">>Receive Data : ", receivedData)
if receivedData == "exit":
print(">>Disconnected from", address)
newSocket.close()
loop = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
sock.close()
def init_vision_system(device_number):
cap = cv2.VideoCapture(device_number)
if cap.isOpened():
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
# print(cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT) # 3, 4
print('width, height:', width, height)
fps = cap.get(cv2.CAP_PROP_FPS)
print('fps:', fps) # float
# print(cv2.CAP_PROP_FPS) # 5
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print('frames count:', frame_count) # float
# print(cv2.CAP_PROP_FRAME_COUNT) # 7
return cap
else:
print('Error on open video device', device_number)
def print_menuCentral(): ## Your menu design here
print(24 * "-", "CENTRAL CONTROL SYSTEM", 24 * "-")
print("1. START CENTRAL CONTROL")
print("2. CAMERA CALIBRATION")
print("3. ROBOT COLOR CALIBRATION")
print("4. OBSTACLE COLOR CALIBRATION")
print("5. EXIT")
print(71 * "-")
def main():
#dist_robot_obstacle = math.sqrt((308 - 297) ** 2) + math.sqrt((177 - 103) ** 2)
#catX = 308 - 297
#catY = 177 - 103
#print(dist_robot_obstacle)
while 1:
print_menuCentral()
try:
choice = int(input("Enter your choice [1-3]:"))
except ValueError:
print("Not an integer! Try again.")
continue
if choice == 1:
print(">> Starting Centrel Control")
init_system()
if choice == 2:
print(">> Starting Camera Calibration")
module_calibrate_camera.CalibrationCamera()
if choice == 3:
print(">> Starting Robot Color Calibration")
module_calibrate_color.CalibrationColor('robot')
if choice == 4:
print(">> Starting Obstacle Color Calibration")
module_calibrate_color.CalibrationColor('obstacle')
elif choice == 5:
print(">> Exit")
quit()
else:
print("Wrong option selection. Enter any key to try again..")
if __name__ == '__main__':
main() |
from django.template import Library
register = Library()
@register.filter
def is_false(arg):
return arg is False
|
import zmq
import logging
import sys
from os import path
import pickle
import numpy as np
import time
import threading
from collections import deque
import math
try:
from attitude_config import (init, ATTITUDE_TOPIC, IMU_TOPIC, TIME_STEP)
except ImportError as e:
print(f'failed to import: {e} - exit')
sys.exit(-1)
imu_buffer = deque(maxlen=1)
def read_from_zeromq(socket):
logging.debug(f'in consumer thread')
global imu_buffer
try:
while True:
topic_bin, data_bin = socket.recv_multipart()
logging.debug(f'received {topic_bin}')
imu_buffer.append(data_bin)
except Exception as e:
logging.critical(f"failed: {e}")
sys.exit(-1)
def main():
config = init()
logging.debug('msb_attitude.py starting up')
broker_xsub = f'{config["ipc_protocol"]}:{config["broker_xsub"]}'
broker_xpub = f'{config["ipc_protocol"]}:{config["broker_xpub"]}'
ctx = zmq.Context()
socket_broker_xsub = ctx.socket(zmq.PUB)
logging.debug(f'trying to connect to {broker_xsub}')
try:
socket_broker_xsub.connect(broker_xsub)
except Exception as e:
logging.fatal(f'failed to bind to zeromq socket {broker_xsub}: {e}')
sys.exit(-1)
logging.debug(f'successfully connected to broker XSUB socket as a publisher')
socket_broker_xpub = ctx.socket(zmq.SUB)
logging.debug(f'trying to connect to {broker_xpub}')
try:
socket_broker_xpub.connect(broker_xpub)
except Exception as e:
logging.fatal(f'failed to bind to zeromq socket {broker_xpub}: {e}')
sys.exit(-1)
logging.debug(f'successfully connected to broker XPUB socket as a subscriber')
socket_broker_xpub.setsockopt(zmq.SUBSCRIBE, IMU_TOPIC)
logging.debug(f'starting imu consumer thread')
threading.Thread(target=read_from_zeromq, daemon=True, args=[socket_broker_xpub]).start()
t_old = time.time()
t_cur = time.time()
t_int_old = time.time()
t_int_cur = time.time()
dt_int = 0.1
dt_sleep = 0.001
pitch = 0
pitch_corr = 0
roll = 0
roll_corr = 0
try:
while True:
# check if data is available in the deque
if len(imu_buffer) == 0:
logging.debug(f'no imu data in buffer')
time.sleep(0.001)
continue
# calculate dt
t_cur = time.time()
dt = t_cur - t_old
data = pickle.loads(
imu_buffer.pop()
)
imu_time = data[0]
acc = data[2:5]
gyr = data[5:8]
mag = data[8:11]
t_int_cur = imu_time
dt_int = t_int_cur - t_int_old
t_int_old = t_int_cur
if config['print']:
print(f'time : {imu_time} acc : {acc} gyr : {gyr} mag : {mag}')
# remove constant offset from gyro data
# low pass filter gyro data
# temporally integrate rotation
pitch += gyr[0]*dt_int
roll += gyr[1]*dt_int
# Only use accelerometer when it's steady (magnitude is near 1g)
force_magnitude = math.sqrt(acc[0]**2 + acc[1]**2 + acc[2]**2)
if force_magnitude > 0.95 and force_magnitude < 1.05:
logging.debug(f'correcting angles: {force_magnitude}')
pitch_corr = math.atan2(-1*acc[1], -1*acc[2])*(180/math.pi)
logging.debug(f'pitch acc: {pitch_corr}')
pitch = (pitch * 0.9) + (pitch_corr * 0.1)
roll_corr = math.atan2(acc[0], -1*acc[2])*(180/math.pi)
logging.debug(f'roll acc: {roll_corr}')
roll = (roll * 0.9) + (roll_corr * 0.1)
else:
logging.debug(f'exceeding acceleration magnitude: {force_magnitude}')
p = (pitch*180/math.pi)
r = (roll*180/math.pi)
if config['print']:
print(f'pitch: {p} roll: {r}')
# print received data if --print flag was set
# if config['print']:
# print(f'imu: {data}')
# save for next step
socket_broker_xsub.send_multipart(
[
ATTITUDE_TOPIC, # topic
pickle.dumps( # serialize the payload
[imu_time, pitch, roll, pitch_corr, roll_corr]
)
]
)
dt_sleep = (t_cur + TIME_STEP) - time.time()
if dt_sleep > 0:
logging.debug(f'sleeping for {dt_sleep} s')
time.sleep(dt_sleep)
#while (tt := time.time() - t_cur) < TIME_STEP:
# logging.debug(f'sleeping {tt}')
# time.sleep(0.001)
t_old = t_cur
except Exception as e:
logging.fatal(f'received Exception: {e}')
logging.fatal('cleaning up')
socket_broker_xpub.close()
socket_broker_xsub.close()
ctx.terminate()
if __name__ == '__main__':
main()
|
import re
from threading import Lock
from pycor import korutils, parser
from pycor import morpheme as lm
from pycor import speechmodel as sm
# Y_TAGS0 = set(['EFN','ETN','EFQ'])
# Y_TAGS1 = set(['EPT-pp','EPT-f','EPT-guess','EFN','EFI','EC-to','EC-for','EC-but'])
# Y_TAGS2 = set(['EPT-pr','ETM'])
# C_TAGS0 = set(['JKS','JKC','JKP'])
# C_TAGS1 = set(['JKG','JKB-TO','JKB-FM','JX-from','JKB-AS','JKB-WZ','JKB-LK','JC','JX','JKB-TT|AS|BY',
# 'EC-evenif','JKG-as','JKB-CM'])
# C_TAGS2 = set(['JKO','JX-SO'])
# C_POS = set(['NP','NNB'])
def _debug_word(writer, word):
if len(word.particles) == 0:
writer.writerow([word.text, 'X'])
for part in word.particles:
h = part.head.text if part.head else ''
t = part.tail.text if part.tail else ''
writer.writerow([word.text, h, t, part.score, part.tags, part.pos])
class Trainer(parser.SentenceParser) :
def __init__(self, wordsthreshold=100000):
super().__init__()
self.wordsthreshold = wordsthreshold
print("Init Trainer")
self.lock = Lock()
def setwordlimit(self, wordsthreshold):
self.wordsthreshold = wordsthreshold
def buildVocab(self, debugWriter=None) :
if len(self.wordmap.words) < 3:
return
for word in self.wordmap.words.values():
self.scoreword(word, force=True)
collList = list(self.wordmap.collocations.values())
for col in collList:
if col.frequency < 3:
del self.wordmap.collocations[col.text]
snglist, ylist, clist, ambilist = self.classifyWords(self.wordmap.words.values())
print("Single Count:", len(snglist))
print("용언 Count:", len(ylist))
print("체언 Count:", len(clist))
print("Ambiguous Count:", len(ambilist))
print("Heads Count:", len(self.wordmap.heads))
print("Collocations Count:", len(self.wordmap.collocations))
print("Tails Count:", len(self.wordmap.tails))
if debugWriter:
debugWriter.writerow(["---",len(self.wordmap.words),"---"])
for word in self.wordmap.words.values():
_debug_word(debugWriter, word)
self.wordmap.clearwords()
return snglist, ylist, clist, ambilist
def train(self,filepath, debugWriter=None):
sentence_array = self.loadfile(filepath)
# self._doresolver(sentence_array)
self.checkVocab(sentence_array, debugWriter)
# 각 문서별 Scoring 생략
# def resolveDocument(self, sentence_array):
# return None
def checkVocab(self, sentence_array, debugWriter=None):
if len(self.wordmap.words) > self.wordsthreshold :
self.buildVocab(debugWriter)
return None
def classifyWords(self,words):
headTagsMap = {}
for word in words:
if word.bestpair:
head = word.bestpair.head
tail = word.bestpair.tail
tags = headTagsMap.get(head)
if not tags:
tags = set()
headTagsMap[head] = tags
tags.update(tail.tags)
# tuples = list(headTagsMap.items())
# for head, tags in tuples:
# self.analyzeHead(head, tags, self.wordmap.heads, headTagsMap)
snglist = []
ylist = []
clist = []
ambilist = []
for head, tags in headTagsMap.items():
self.classify(head, tags, snglist, ylist, clist, ambilist)
return snglist, ylist, clist, ambilist
# def analyzeHead(self,head, tags, headMap, headTagsMap):
# headText = head.text
# length = len(headText)
# if length < 2:
# return
# suffixes = lm.getSuffixes(headText[length-1])
# if suffixes:
# wordTokens = parser.WordTokens(headText)
# wordTokens.prev()
# curindex = wordTokens.curidx
# for suf in suffixes:
# wordTokens.setPos(curindex)
# pairs = suf.procede(wordTokens,None,None,None,None,None,head, tags)
def classify(self,head, tags, snglist, ylist, clist, ambilist):
if 'Y' in head.pos:
ylist.append(head)
if 'C' in head.pos:
clist.append(head)
if 'SNG' in head.pos:
snglist.append(head)
if 'AMBI' in head.pos:
ambilist.append(head)
# def classifyAmbi(self,head, tags, snglist, ylist, clist, ambilist):
# yscore = 0.0
# cscore = 0.0
# for tail in head.tails:
# yscore += len(Y_TAGS0 & tail.tags) * 3
# yscore += len(Y_TAGS1 & tail.tags) * 2
# yscore += len(Y_TAGS2 & tail.tags)
# cscore += len(C_TAGS1 & tail.tags) * 3
# cscore += len(C_TAGS2 & tail.tags) * 2
# if max(yscore,cscore) > 0 and abs(yscore-cscore) / max(yscore,cscore) < 0.3:
# # print(head.text, abs(yscore-cscore), max(yscore,cscore))
# head.addpos('AMBI')
# ambilist.append(head)
# elif yscore > cscore:
# head.addpos('Y')
# ylist.append(head)
# elif cscore > yscore :
# if len(head.pos) > 0 and (head.pos & C_POS) == 0:
# snglist.append(head)
# else:
# head.addpos('C')
# clist.append(head)
# else:
# head.addpos('AMBI')
# ambilist.append(head)
|
import re
import random
from cwbot.modules.BaseChatModule import BaseChatModule
from cwbot.util.shuntingYard import evalInfix
def parseDice(args):
""" Parse a dice expression (e.g., 3d10-1d6) and evaluate the dice
(e.g., to the string 18-2) """
m = re.search(r'(\d*)d(\d+)', args)
while m is not None:
total = 0
qty = 1 if m.group(1) is "" else int(m.group(1))
val = int(m.group(2))
if qty > 100:
# prevent abusive requests
raise ValueError("I can't hold {} dice in my robotic hands!"
.format(qty))
if val == 0:
raise ValueError("A zero-sided die! "
"Quite the existential conundrum.")
if val < 0:
raise ValueError("You want me to roll a die with negative sides?")
for _i in range(qty):
total += random.randint(1, val)
args = args[:m.start()] + " " + str(total) + " " + args[m.end():]
m = re.search(r'(\d*)d(\d+)', args)
return args
class DiceModule(BaseChatModule):
"""
A dice-rolling module capable of rolling arbitrary dice sequences and
permuting lists. Also can be used for math, oddly enough.
No configuration options.
"""
requiredCapabilities = ['chat']
_name = "dice"
def _processCommand(self, message, cmd, args):
if cmd in ["roll", "dice"]:
if args.strip() == "":
return self._availableCommands()['roll']
# default manager ignores all text after (); for example
# !roll (10)+1d5 is interpreted as !roll 10. Here we extract
# the full argument from the message by removing the first
# word, and then testing if that works.
s = message.get('text', "")
splitStr = re.split(r'([\s(])', s)
fullArgs = (''.join(word for word in splitStr[1:])).strip()
(replyStr, success) = self.rollDice(fullArgs)
# if success == False, try again with just args. A return of false
# either indicates an evaluation error, or that the input was
# interpreted as a list. Either way, we want to just use args.
if success:
return replyStr
(replyStr2, success2) = self.rollDice(args)
if success2:
return replyStr2
# failed. just return the original failure message.
return replyStr
if cmd in ["order", "permute", "permutation"]:
if args.strip() == "":
return self._availableCommands()['permute']
return self.rollOrder(args)
return None
def getNameList(self, args):
""" Split a list into its elements (e.g., "a,b,c" -> ['a', 'b', 'c'])
"""
nList = re.split(r'\s*,\s*', args)
self.debugLog("found names: {}".format(nList))
return [name for name in nList if len(name) > 0]
def rollMdN(self, args):
""" roll a set of "XdX+XdX-XdX etc...
returns a tuple: (outputString, successBool) """
args = args.replace(" ", "")
returnStr = args
try:
# first, roll the dice
argsRolled = parseDice(args).strip()
# add to result chain
if args != argsRolled:
returnStr += " -> " + argsRolled
except ValueError as e:
# return the error
returnStr += " -> " + e.args[0]
return (returnStr, False)
try:
# now, evaluate the expression
argsEvaluated = evalInfix(argsRolled, self.properties.debug)
# add to result chain
if argsRolled != str(argsEvaluated):
returnStr += " -> {:g}".format(argsEvaluated)
except (ValueError, ZeroDivisionError, OverflowError) as e:
# return error
returnStr += " -> Evaluation error ({})".format(e.args[0])
return (returnStr, False)
return (returnStr, True)
def rollOrder(self, args):
""" permute a list """
names = []
maxLength = 100
try:
# convert !permute 10 to !permute 1,2,3,4,5,6,7,8,9,10
n = int(re.findall(r'^\d+[^,]*$', args)[0])
if n > maxLength:
return "I'm not permuting that many things!"
names = [str(num+1) for num in range(n)]
except (ValueError, IndexError):
names = self.getNameList(args)
self.debugLog("Received names {}".format(names))
if names is None or len(names) == 0:
return ("I couldn't understand your permute request. See !help "
"permute for format options.")
if len(names) == 1:
return ("It doesn't make much sense to randomly permute a list "
"of one item.")
if len(names) > maxLength:
return "I can't keep track of all that!"
random.shuffle(names)
return ', '.join(names)
def rollDice(self, args):
""" returns a tuple: (outputString, diceSuccessBool)
diceSuccessBool is True if the input was interpreted as a
dice expression and was successful. It returns false if there
was an evaluation error, OR if args is interpreted as a list of
names. """
try:
# if expression is !roll N, change to !roll 1dN
m = re.search(r'^(\d+)$', args)
if m is not None:
n = int(m.group(1))
self.debugLog("Rolling a d{}".format(n))
return self.rollMdN("1d{}".format(n))
except (ValueError, IndexError):
pass
names = self.getNameList(args)
if names is None or len(names) == 0:
# if no names available, try evaluating the expression
return self.rollMdN(args)
if len(names) == 1:
# only one name found? try evaluating it
return self.rollMdN(args)
n = random.randint(0, len(names)-1)
self.debugLog("Selecting {} out of {}".format(n, str(names)))
returnStr = "{} (out of {} entries)".format(names[n], len(names))
return (returnStr, False)
def _availableCommands(self):
return {'roll': "!roll: Use '!roll N' to roll a dN "
"(also allowed: !roll MdN or !roll MdN+OdP). "
"Use '!roll name1,name2,...' to select a name. "
"See also !permute.",
'permute': "!permute: Use '!permute N' to generate a "
"permutation from 1 to N, or '!permute "
"name1,name2,...' to assign a list order. "
"See also !roll.",
'dice': None, 'order': None, 'permutation': None}
|
# Configuration Blender
import bpy
bpy.context.user_preferences.edit.use_drag_immediately = True
bpy.context.user_preferences.edit.use_insertkey_xyz_to_rgb = False
bpy.context.user_preferences.inputs.select_mouse = 'LEFT'
bpy.context.user_preferences.inputs.view_zoom_method = 'DOLLY'
bpy.context.user_preferences.inputs.view_zoom_axis = 'HORIZONTAL'
bpy.context.user_preferences.inputs.view_rotate_method = 'TURNTABLE'
bpy.context.user_preferences.inputs.invert_mouse_zoom = True
|
"""The Azure Storage Block Blob backend for Celery."""
from __future__ import absolute_import, unicode_literals
from kombu.utils import cached_property
from kombu.utils.encoding import bytes_to_str
from celery.exceptions import ImproperlyConfigured
from celery.utils.log import get_logger
from .base import KeyValueStoreBackend
try:
import azure.storage as azurestorage
from azure.common import AzureMissingResourceHttpError
from azure.storage.blob import BlockBlobService
from azure.storage.common.retry import ExponentialRetry
except ImportError: # pragma: no cover
azurestorage = BlockBlobService = ExponentialRetry = \
AzureMissingResourceHttpError = None # noqa
__all__ = ("AzureBlockBlobBackend",)
LOGGER = get_logger(__name__)
class AzureBlockBlobBackend(KeyValueStoreBackend):
"""Azure Storage Block Blob backend for Celery."""
def __init__(self,
url=None,
container_name=None,
retry_initial_backoff_sec=None,
retry_increment_base=None,
retry_max_attempts=None,
*args,
**kwargs):
super(AzureBlockBlobBackend, self).__init__(*args, **kwargs)
if azurestorage is None:
raise ImproperlyConfigured(
"You need to install the azure-storage library to use the "
"AzureBlockBlob backend")
conf = self.app.conf
self._connection_string = self._parse_url(url)
self._container_name = (
container_name or
conf["azureblockblob_container_name"])
self._retry_initial_backoff_sec = (
retry_initial_backoff_sec or
conf["azureblockblob_retry_initial_backoff_sec"])
self._retry_increment_base = (
retry_increment_base or
conf["azureblockblob_retry_increment_base"])
self._retry_max_attempts = (
retry_max_attempts or
conf["azureblockblob_retry_max_attempts"])
@classmethod
def _parse_url(cls, url, prefix="azureblockblob://"):
connection_string = url[len(prefix):]
if not connection_string:
raise ImproperlyConfigured("Invalid URL")
return connection_string
@cached_property
def _client(self):
"""Return the Azure Storage Block Blob service.
If this is the first call to the property, the client is created and
the container is created if it doesn't yet exist.
"""
client = BlockBlobService(connection_string=self._connection_string)
created = client.create_container(
container_name=self._container_name, fail_on_exist=False)
if created:
LOGGER.info("Created Azure Blob Storage container %s",
self._container_name)
client.retry = ExponentialRetry(
initial_backoff=self._retry_initial_backoff_sec,
increment_base=self._retry_increment_base,
max_attempts=self._retry_max_attempts).retry
return client
def get(self, key):
"""Read the value stored at the given key.
Args:
key: The key for which to read the value.
"""
key = bytes_to_str(key)
LOGGER.debug("Getting Azure Block Blob %s/%s",
self._container_name, key)
try:
return self._client.get_blob_to_text(
self._container_name, key).content
except AzureMissingResourceHttpError:
return None
def set(self, key, value):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug("Creating Azure Block Blob at %s/%s",
self._container_name, key)
return self._client.create_blob_from_text(
self._container_name, key, value)
def mget(self, keys):
"""Read all the values for the provided keys.
Args:
keys: The list of keys to read.
"""
return [self.get(key) for key in keys]
def delete(self, key):
"""Delete the value at a given key.
Args:
key: The key of the value to delete.
"""
key = bytes_to_str(key)
LOGGER.debug("Deleting Azure Block Blob at %s/%s",
self._container_name, key)
self._client.delete_blob(self._container_name, key)
|
import logging
from loguru import logger as loguru_logger
import os
import os.path as osp
import sys
from setproctitle import setproctitle
import torch
from mmcv import Config
import cv2
from pytorch_lightning import seed_everything
from pytorch_lightning.lite import LightningLite # import LightningLite
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, osp.join(cur_dir, "../../"))
from core.utils.default_args_setup import my_default_argument_parser, my_default_setup
from core.utils.my_setup import setup_for_distributed
from core.utils.my_checkpoint import MyCheckpointer
from core.utils import my_comm as comm
from lib.utils.utils import iprint
from lib.utils.time_utils import get_time_str
from core.gdrn_modeling.dataset_factory import register_datasets_in_cfg
from core.gdrn_modeling.engine import GDRN_Lite
from core.gdrn_modeling.models import GDRN # noqa
logger = logging.getLogger("detectron2")
def setup(args):
"""Create configs and perform basic setups."""
cfg = Config.fromfile(args.config_file)
if args.opts is not None:
cfg.merge_from_dict(args.opts)
############## pre-process some cfg options ######################
# NOTE: check if need to set OUTPUT_DIR automatically
if cfg.OUTPUT_DIR.lower() == "auto":
cfg.OUTPUT_DIR = osp.join(cfg.OUTPUT_ROOT, osp.splitext(args.config_file)[0].split("configs/")[1])
iprint(f"OUTPUT_DIR was automatically set to: {cfg.OUTPUT_DIR}")
if cfg.get("EXP_NAME", "") == "":
setproctitle("{}.{}".format(osp.splitext(osp.basename(args.config_file))[0], get_time_str()))
else:
setproctitle("{}.{}".format(cfg.EXP_NAME, get_time_str()))
if cfg.SOLVER.AMP.ENABLED:
if torch.cuda.get_device_capability() <= (6, 1):
iprint("Disable AMP for older GPUs")
cfg.SOLVER.AMP.ENABLED = False
# NOTE: pop some unwanted configs in detectron2
# ---------------------------------------------------------
cfg.SOLVER.pop("STEPS", None)
cfg.SOLVER.pop("MAX_ITER", None)
# NOTE: get optimizer from string cfg dict
if cfg.SOLVER.OPTIMIZER_CFG != "":
if isinstance(cfg.SOLVER.OPTIMIZER_CFG, str):
optim_cfg = eval(cfg.SOLVER.OPTIMIZER_CFG)
cfg.SOLVER.OPTIMIZER_CFG = optim_cfg
else:
optim_cfg = cfg.SOLVER.OPTIMIZER_CFG
iprint("optimizer_cfg:", optim_cfg)
cfg.SOLVER.OPTIMIZER_NAME = optim_cfg["type"]
cfg.SOLVER.BASE_LR = optim_cfg["lr"]
cfg.SOLVER.MOMENTUM = optim_cfg.get("momentum", 0.9)
cfg.SOLVER.WEIGHT_DECAY = optim_cfg.get("weight_decay", 1e-4)
# -------------------------------------------------------------------------
if cfg.get("DEBUG", False):
iprint("DEBUG")
args.num_gpus = 1
args.num_machines = 1
cfg.DATALOADER.NUM_WORKERS = 0
cfg.TRAIN.PRINT_FREQ = 1
# register datasets
register_datasets_in_cfg(cfg)
exp_id = "{}".format(osp.splitext(osp.basename(args.config_file))[0])
if args.eval_only:
if cfg.TEST.USE_PNP:
# NOTE: need to keep _test at last
exp_id += "{}_test".format(cfg.TEST.PNP_TYPE.upper())
else:
exp_id += "_test"
cfg.EXP_ID = exp_id
cfg.RESUME = args.resume
####################################
return cfg
class Lite(GDRN_Lite):
def set_my_env(self, args, cfg):
my_default_setup(cfg, args) # will set os.environ["PYTHONHASHSEED"]
seed_everything(int(os.environ["PYTHONHASHSEED"]), workers=True)
setup_for_distributed(is_master=self.is_global_zero)
def run(self, args, cfg):
self.set_my_env(args, cfg)
logger.info(f"Used GDRN module name: {cfg.MODEL.CDPN.NAME}")
model, optimizer = eval(cfg.MODEL.CDPN.NAME).build_model_optimizer(cfg)
logger.info("Model:\n{}".format(model))
# don't forget to call `setup` to prepare for model / optimizer for distributed training.
# the model is moved automatically to the right device.
model, optimizer = self.setup(model, optimizer)
if True:
# sum(p.numel() for p in model.parameters() if p.requires_grad)
params = sum(p.numel() for p in model.parameters()) / 1e6
logger.info("{}M params".format(params))
if args.eval_only:
MyCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
return self.do_test(cfg, model)
self.do_train(cfg, args, model, optimizer, resume=args.resume)
torch.multiprocessing.set_sharing_strategy("file_system")
return self.do_test(cfg, model)
@loguru_logger.catch
def main(args):
cfg = setup(args)
logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs")
if args.num_gpus > 1 and args.strategy is None:
args.strategy = "ddp"
Lite(
accelerator="gpu",
strategy=args.strategy,
devices=args.num_gpus,
num_nodes=args.num_machines,
precision=16 if cfg.SOLVER.AMP.ENABLED else 32,
).run(args, cfg)
if __name__ == "__main__":
import resource
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(500000, hard_limit)
iprint("soft limit: ", soft_limit, "hard limit: ", hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
parser = my_default_argument_parser()
parser.add_argument(
"--strategy",
default=None,
type=str,
help="the strategy for parallel training: dp | ddp | ddp_spawn | deepspeed | ddp_sharded",
)
args = parser.parse_args()
iprint("Command Line Args: {}".format(args))
if args.eval_only:
torch.multiprocessing.set_sharing_strategy("file_system")
main(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ldig : Language Detector with Infinite-Gram
# This code is available under the MIT License.
# (c)2011 Nakatani Shuyo / Cybozu Labs Inc.
import os
import sys
import re
import codecs
import json
import gzip
import htmlentitydefs
import numpy
from Microblog_Trec.common import da
reload(sys)
sys.setdefaultencoding('utf-8')
class LangDetector:
def __init__(self, model_dir):
self.features = os.path.join(model_dir, 'features')
self.labels = os.path.join(model_dir, 'labels.json')
self.param = os.path.join(model_dir, 'parameters.npy')
self.doublearray = os.path.join(model_dir, 'doublearray.npz')
def load_da(self):
trie = da.DoubleArray()
trie.load(self.doublearray)
return trie
def load_features(self):
features = []
with codecs.open(self.features, 'rb', 'utf-8') as f:
pre_feature = ""
for n, s in enumerate(f):
m = re.match(r'(.+)\t([0-9]+)', s)
if not m:
sys.exit("irregular feature : '%s' at %d" % (s, n + 1))
if pre_feature >= m.groups(1):
sys.exit("unordered feature : '%s' at %d" % (s, n + 1))
pre_feature = m.groups(1)
features.append(m.groups())
return features
def load_labels(self):
with open(self.labels, 'rb') as f:
return json.load(f)
def load_params(self):
trie = self.load_da()
param = numpy.load(self.param)
labels = self.load_labels()
return param, labels, trie
# from http://www.programming-magic.com/20080820002254/
reference_regex = re.compile(u'&(#x?[0-9a-f]+|[a-z]+);', re.IGNORECASE)
num16_regex = re.compile(u'#x\d+', re.IGNORECASE)
num10_regex = re.compile(u'#\d+', re.IGNORECASE)
def htmlentity2unicode(text):
result = u''
i = 0
while True:
match = reference_regex.search(text, i)
if match is None:
result += text[i:]
break
result += text[i:match.start()]
i = match.end()
name = match.group(1)
if name in htmlentitydefs.name2codepoint.keys():
result += unichr(htmlentitydefs.name2codepoint[name])
elif num16_regex.match(name):
result += unichr(int(u'0'+name[1:], 16))
elif num10_regex.match(name):
result += unichr(int(name[1:]))
return result
def normalize_twitter(text):
"""normalization for twitter"""
text = re.sub(r'(@|#|https?:\/\/)[^ ]+', '', text)
text = re.sub(r'(^| )[:;x]-?[\(\)dop]($| )', ' ', text) # facemark
text = re.sub(r'(^| )(rt[ :]+)*', ' ', text)
text = re.sub(r'([hj])+([aieo])+(\1+\2+){1,}', r'\1\2\1\2', text, re.IGNORECASE) # laugh
text = re.sub(r' +(via|live on) *$', '', text)
return text
re_ignore_i = re.compile(r'[^I]')
re_turkish_alphabet = re.compile(u'[\u011e\u011f\u0130\u0131]')
vietnamese_norm = {
u'\u0041\u0300':u'\u00C0', u'\u0045\u0300':u'\u00C8', u'\u0049\u0300':u'\u00CC', u'\u004F\u0300':u'\u00D2',
u'\u0055\u0300':u'\u00D9', u'\u0059\u0300':u'\u1EF2', u'\u0061\u0300':u'\u00E0', u'\u0065\u0300':u'\u00E8',
u'\u0069\u0300':u'\u00EC', u'\u006F\u0300':u'\u00F2', u'\u0075\u0300':u'\u00F9', u'\u0079\u0300':u'\u1EF3',
u'\u00C2\u0300':u'\u1EA6', u'\u00CA\u0300':u'\u1EC0', u'\u00D4\u0300':u'\u1ED2', u'\u00E2\u0300':u'\u1EA7',
u'\u00EA\u0300':u'\u1EC1', u'\u00F4\u0300':u'\u1ED3', u'\u0102\u0300':u'\u1EB0', u'\u0103\u0300':u'\u1EB1',
u'\u01A0\u0300':u'\u1EDC', u'\u01A1\u0300':u'\u1EDD', u'\u01AF\u0300':u'\u1EEA', u'\u01B0\u0300':u'\u1EEB',
u'\u0041\u0301':u'\u00C1', u'\u0045\u0301':u'\u00C9', u'\u0049\u0301':u'\u00CD', u'\u004F\u0301':u'\u00D3',
u'\u0055\u0301':u'\u00DA', u'\u0059\u0301':u'\u00DD', u'\u0061\u0301':u'\u00E1', u'\u0065\u0301':u'\u00E9',
u'\u0069\u0301':u'\u00ED', u'\u006F\u0301':u'\u00F3', u'\u0075\u0301':u'\u00FA', u'\u0079\u0301':u'\u00FD',
u'\u00C2\u0301':u'\u1EA4', u'\u00CA\u0301':u'\u1EBE', u'\u00D4\u0301':u'\u1ED0', u'\u00E2\u0301':u'\u1EA5',
u'\u00EA\u0301':u'\u1EBF', u'\u00F4\u0301':u'\u1ED1', u'\u0102\u0301':u'\u1EAE', u'\u0103\u0301':u'\u1EAF',
u'\u01A0\u0301':u'\u1EDA', u'\u01A1\u0301':u'\u1EDB', u'\u01AF\u0301':u'\u1EE8', u'\u01B0\u0301':u'\u1EE9',
u'\u0041\u0303':u'\u00C3', u'\u0045\u0303':u'\u1EBC', u'\u0049\u0303':u'\u0128', u'\u004F\u0303':u'\u00D5',
u'\u0055\u0303':u'\u0168', u'\u0059\u0303':u'\u1EF8', u'\u0061\u0303':u'\u00E3', u'\u0065\u0303':u'\u1EBD',
u'\u0069\u0303':u'\u0129', u'\u006F\u0303':u'\u00F5', u'\u0075\u0303':u'\u0169', u'\u0079\u0303':u'\u1EF9',
u'\u00C2\u0303':u'\u1EAA', u'\u00CA\u0303':u'\u1EC4', u'\u00D4\u0303':u'\u1ED6', u'\u00E2\u0303':u'\u1EAB',
u'\u00EA\u0303':u'\u1EC5', u'\u00F4\u0303':u'\u1ED7', u'\u0102\u0303':u'\u1EB4', u'\u0103\u0303':u'\u1EB5',
u'\u01A0\u0303':u'\u1EE0', u'\u01A1\u0303':u'\u1EE1', u'\u01AF\u0303':u'\u1EEE', u'\u01B0\u0303':u'\u1EEF',
u'\u0041\u0309':u'\u1EA2', u'\u0045\u0309':u'\u1EBA', u'\u0049\u0309':u'\u1EC8', u'\u004F\u0309':u'\u1ECE',
u'\u0055\u0309':u'\u1EE6', u'\u0059\u0309':u'\u1EF6', u'\u0061\u0309':u'\u1EA3', u'\u0065\u0309':u'\u1EBB',
u'\u0069\u0309':u'\u1EC9', u'\u006F\u0309':u'\u1ECF', u'\u0075\u0309':u'\u1EE7', u'\u0079\u0309':u'\u1EF7',
u'\u00C2\u0309':u'\u1EA8', u'\u00CA\u0309':u'\u1EC2', u'\u00D4\u0309':u'\u1ED4', u'\u00E2\u0309':u'\u1EA9',
u'\u00EA\u0309':u'\u1EC3', u'\u00F4\u0309':u'\u1ED5', u'\u0102\u0309':u'\u1EB2', u'\u0103\u0309':u'\u1EB3',
u'\u01A0\u0309':u'\u1EDE', u'\u01A1\u0309':u'\u1EDF', u'\u01AF\u0309':u'\u1EEC', u'\u01B0\u0309':u'\u1EED',
u'\u0041\u0323':u'\u1EA0', u'\u0045\u0323':u'\u1EB8', u'\u0049\u0323':u'\u1ECA', u'\u004F\u0323':u'\u1ECC',
u'\u0055\u0323':u'\u1EE4', u'\u0059\u0323':u'\u1EF4', u'\u0061\u0323':u'\u1EA1', u'\u0065\u0323':u'\u1EB9',
u'\u0069\u0323':u'\u1ECB', u'\u006F\u0323':u'\u1ECD', u'\u0075\u0323':u'\u1EE5', u'\u0079\u0323':u'\u1EF5',
u'\u00C2\u0323':u'\u1EAC', u'\u00CA\u0323':u'\u1EC6', u'\u00D4\u0323':u'\u1ED8', u'\u00E2\u0323':u'\u1EAD',
u'\u00EA\u0323':u'\u1EC7', u'\u00F4\u0323':u'\u1ED9', u'\u0102\u0323':u'\u1EB6', u'\u0103\u0323':u'\u1EB7',
u'\u01A0\u0323':u'\u1EE2', u'\u01A1\u0323':u'\u1EE3', u'\u01AF\u0323':u'\u1EF0', u'\u01B0\u0323':u'\u1EF1',
}
re_vietnamese = re.compile(u'[AEIOUYaeiouy\u00C2\u00CA\u00D4\u00E2\u00EA\u00F4\u0102\u0103\u01A0\u01A1\u01AF\u01B0][\u0300\u0301\u0303\u0309\u0323]')
re_latin_cont = re.compile(u'([a-z\u00e0-\u024f])\\1{2,}')
re_symbol_cont = re.compile(u'([^a-z\u00e0-\u024f])\\1{1,}')
def normalize_text(org):
m = re.match(r'([-A-Za-z]+)\t(.+)', org)
if m:
label, org = m.groups()
else:
label = ""
m = re.search(r'\t([^\t]+)$', org)
if m:
s = m.group(0)
else:
s = org
s = htmlentity2unicode(s)
s = re.sub(u'[\u2010-\u2015]', '-', s)
s = re.sub(u'[0-9]+', '0', s)
s = re.sub(u'[^\u0020-\u007e\u00a1-\u024f\u0300-\u036f\u1e00-\u1eff]+', ' ', s)
s = re.sub(u' +', ' ', s)
# vietnamese normalization
s = re_vietnamese.sub(lambda x:vietnamese_norm[x.group(0)], s)
# lower case with Turkish
s = re_ignore_i.sub(lambda x:x.group(0).lower(), s)
#if re_turkish_alphabet.search(s):
# s = s.replace(u'I', u'\u0131')
#s = s.lower()
# Romanian normalization
s = s.replace(u'\u0219', u'\u015f').replace(u'\u021b', u'\u0163')
s = normalize_twitter(s)
s = re_latin_cont.sub(r'\1\1', s)
s = re_symbol_cont.sub(r'\1', s)
return label, s.strip(), org
# load courpus
def load_corpus(filelist, labels):
idlist = dict((x, []) for x in labels)
corpus = []
for filename in filelist:
f = codecs.open(filename, 'rb', 'utf-8')
for i, s in enumerate(f):
label, text, org_text = normalize_text(s)
if label not in labels:
sys.exit("unknown label '%s' at %d in %s " % (label, i+1, filename))
idlist[label].append(len(corpus))
corpus.append((label, text, org_text))
f.close()
return corpus, idlist
# prediction probability
def predict(param, events):
sum_w = numpy.dot(param[events.keys(),].T, events.values())
exp_w = numpy.exp(sum_w - sum_w.max())
return exp_w / exp_w.sum()
def predict_lang(param, labels, trie, inText):
K = len(labels)
corrects = numpy.zeros(K, dtype=int)
counts = numpy.zeros(K, dtype=int)
label_map = dict((x, i) for i, x in enumerate(labels))
n_available_data = 0
log_likely = 0.0
label, text, org_text = normalize_text(inText)
if label not in label_map:
sys.stderr.write("WARNING : unknown label '%s' in %s (ignore the later same labels)\n" % (label, text))
label_map[label] = -1
label_k = label_map[label]
events = trie.extract_features(u"\u0001" + text + u"\u0001")
y = predict(param, events)
predict_k = y.argmax()
if label_k >= 0:
log_likely -= numpy.log(y[label_k])
n_available_data += 1
counts[label_k] += 1
if label_k == predict_k and y[predict_k] >= 0.6:
corrects[predict_k] += 1
predict_lang = labels[predict_k]
if y[predict_k] < 0.6: predict_lang = ""
#print "%s\t%s\t%s" % (label, predict_lang, org_text)
return predict_lang
def generate_doublearray(file, features):
trie = da.DoubleArray()
trie.initialize(features)
trie.save(file)
re_filtUrl=re.compile(r'(#|@|https?:\/\/)[^ ]+')
def extract_text(line):
line = line.decode('utf-8')
origin_text=""; text = ""; id = ""
if line[0] == '{':
tweet = json.loads(line)
if tweet.has_key('created_at') and tweet['user']['lang'] == 'en':
origin_text = re.sub(r'\t+|\n+', ' ', tweet['text'])
text = re_filtUrl.sub(' ', origin_text).strip()
id = tweet['id_str']
return origin_text, text, id
if __name__ == '__main__':
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
fw = open("originEn_tweets"+sys.argv[1][-11:-3]+".txt", 'w')
model = os.path.join(sys.path[0], 'model.latin')
detector = ldig(model)
param, labels, trie = detector.load_params()
count_en = 0; count_total = 0
# for text in fileinput.input():
for line in gzip.open(sys.argv[1], 'rb'):
count_total += 1
origin_text, text, id = extract_text(line)
if text != "":
lang = predict_lang(param, labels, trie, 'en\t'+text)
if lang == 'en':
count_en += 1
#fw.write(id + '\t' + origin_text + "\n")
fw.write(line)
if count_en % 1000 == 0: print "count: ", count_en
print "total count: ", count_total, " en count: ", count_en
|
import matplotlib.pyplot as plt
import numpy as np
focal_length = 100 # focal length in mm
angle_deg = 0 # angle of incidence of the incident beam in degrees
rays = 21 # number of rays
p = 2 * focal_length # parameter of the parabola equation y**2 = 2*p*z
a = 1.1 * focal_length # mirror field
inc_ang = -angle_deg * np.pi / 180 if angle_deg > 0.000001 else 0.000001 * np.pi / 180 # incident ray angle in radians
var = np.arange(-a, a, 0.1)
# mirror equation
def surface(y):
return -y ** 2 / (2 * p)
# reflection angle
def refl_ang(y, inc_ang):
return 2 * np.arctan(y / p) - inc_ang
# incident ray vector (y_start, y_end)
# x_vec is vector (x_start, x_end)
def inc_vec(y, inc_ang, x_vec):
return np.tan(-inc_ang) * (x_vec - surface(y)) + y
# reflected ray vector (y_start, y_end)
# x_vec is vector (x_start, x_end)
def refl_vec(y, inc_ang, x_vec):
r = refl_ang(y, inc_ang)
return np.tan(r) * (x_vec - surface(y) + y / np.tan(r))
plt.figure(figsize=(13, 8))
plt.plot(surface(var), var) # mirror surface visualization
plt.plot([-p, 0], [0, 0]) # axis of the mirror
plt.plot([-focal_length], [0], 'o') # focal point
for y in np.linspace(-focal_length, focal_length, rays):
x_vec = np.array([-p, surface(y)])
plt.plot(x_vec, inc_vec(y, inc_ang, x_vec), 'k', lw=1)
r = refl_ang(y, inc_ang)
if (r < np.pi / 2 and r > -np.pi / 2):
plt.plot(x_vec, refl_vec(y, inc_ang, x_vec), 'r', lw=1)
else:
x_vec_out = np.array([surface(y), 0])
plt.plot(x_vec_out, refl_vec(y, inc_ang, x_vec_out), 'r', lw=1)
plt.title("Focal length = {:.1f} mm. Incident angle = {:.1f} deg. Number of rays = {}".format(focal_length, angle_deg, rays))
plt.xlabel("z, mm")
plt.ylabel("r, mm")
plt.ylim(-a, a)
plt.xlim(-p, 0)
plt.grid()
plt.show() |
import psycopg2
from app import database
from app.vendors.prepare import PreparingCursor
def get_db():
try:
connection = database.connect()
cursor = connection.cursor(cursor_factory=PreparingCursor)
return cursor, connection
except Exception as exc:
raise ValueError(f"{exc}")
def zip_column_name(table, rows):
results = []
column = get_columns(table)
for row in rows:
results.append(dict(zip(column, row)))
return results
def get_columns(table):
column = None
cursor, _ = get_db()
try:
query = f"SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name='{table}'"
cursor.execute(query)
column = [row[0] for row in cursor.fetchall()]
except (Exception, psycopg2.DatabaseError) as error:
raise ValueError(f"{error}")
return column
def get_all(table):
results = []
cursor, connection = get_db()
try:
query = f'SELECT * FROM "{table}"'
cursor.prepare(query)
cursor.execute()
rows = cursor.fetchall()
results = zip_column_name(table, rows)
except (psycopg2.DatabaseError, psycopg2.OperationalError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
return results
def get_one(table, field=None, value=None):
results = []
cursor, connection = get_db()
column = get_columns(table)
try:
query = f'SELECT * FROM "{table}" WHERE "{field}"=%(value)s'
cursor.prepare(query)
cursor.execute({"value": value})
rows = cursor.fetchone()
if not rows:
return
results = dict(zip(column, list(rows)))
except (psycopg2.DatabaseError, psycopg2.OperationalError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
return results
def insert(table, data=None):
cursor, connection = get_db()
rows = []
rows_value = []
# arrange row and values
for row in data:
rows.append(row)
rows_value.append(str(data[row]))
str_placeholer = ["%s"] * len(rows)
try:
rows = ",".join(rows)
str_placeholer = ",".join(str_placeholer)
query = f'INSERT INTO "{table}" ({rows}) VALUES ({str_placeholer}) RETURNING *'
cursor.prepare(query)
cursor.execute((tuple(rows_value)))
except (Exception, psycopg2.DatabaseError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
inserted_data_id = cursor.fetchone()[0]
return inserted_data_id
def update(table, data=None):
cursor, connection = get_db()
data_ = data["data"]
rows = []
set_value = []
for row in data_:
rows.append(row)
row_value = str(data_[row])
set_value.append(f"{row}='{row_value}'")
field = list(data["where"].keys())[0] # must be one
field_data = data["where"][field]
try:
set_ = ",".join(set_value)
query = f'UPDATE "{table}" SET {set_} WHERE {field}=%(field_data)s'
cursor.prepare(query)
cursor.execute({"field_data": field_data})
except (Exception, psycopg2.DatabaseError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
rows_edited = cursor.rowcount
return rows_edited
def delete(table, field=None, value=None):
cursor, connection = get_db()
rows_deleted = 0
try:
query = f'DELETE FROM "{table}" WHERE {field}=%(value)s'
cursor.prepare(query)
cursor.execute({"value": value})
except (Exception, psycopg2.DatabaseError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
rows_deleted = cursor.rowcount
return rows_deleted
def is_unique(table, field=None, value=None):
"""Check if data only appear once."""
cursor, connection = get_db()
query = f'SELECT * FROM "{table}" WHERE "{field}"=%(value)s'
cursor.prepare(query)
cursor.execute({"value": value})
rows = cursor.fetchall()
if rows: # initial database will return None
if len(rows) != 0:
return False
return True
def plain_get(table, query, value=None):
"""Accept plain SQL to be sent as prepared statement."""
results = []
cursor, connection = get_db()
try:
cursor.prepare(query)
cursor.execute(value)
rows = cursor.fetchall()
results = zip_column_name(table, rows)
except (psycopg2.DatabaseError, psycopg2.OperationalError) as error:
connection.rollback()
raise ValueError(f"{error}")
else:
connection.commit()
return results
|
# xpyBuild - eXtensible Python-based Build System
#
# Copyright (c) 2013 - 2017 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: native.py 301527 2017-02-06 15:31:43Z matj $
#
import os, inspect, re, string, time
from buildcommon import *
from basetarget import BaseTarget
from propertysupport import defineOption
from utils.process import call
from pathsets import PathSet, BasePathSet
from buildcontext import getBuildInitializationContext
from buildexceptions import BuildException
from propertyfunctors import make_functor, Composable
from utils.fileutils import openForWrite, mkdir, deleteFile, getmtime, exists, normLongPath
class __CompilersNotSpecified(object):
def __getattr__(self, attr):
raise Exception('Cannot use native targets until a compiler is configured by setting the native.compilers option')
defineOption('native.compilers', __CompilersNotSpecified())
defineOption('native.libs', [])
defineOption('native.libpaths', [])
defineOption('native.c.flags', None) # defaults to native.cxx.flags if not set
defineOption('native.cxx.flags', [])
defineOption('native.cxx.path', [])
defineOption('native.include', [])
defineOption('native.link.flags', [])
if isWindows():
defineOption('native.cxx.exenamefn', FilenameStringFormatter("%s.exe"))
defineOption('native.cxx.libnamefn', FilenameStringFormatter("%s.dll"))
defineOption('native.cxx.staticlibnamefn', FilenameStringFormatter("%s.lib"))
defineOption('native.cxx.objnamefn', FilenameStringFormatter("%s.obj"))
else:
defineOption('native.cxx.exenamefn', FilenameStringFormatter("%s"))
defineOption('native.cxx.libnamefn', FilenameStringFormatter("lib%s.so"))
defineOption('native.cxx.staticlibnamefn', FilenameStringFormatter("lib%s.a"))
defineOption('native.cxx.objnamefn', FilenameStringFormatter("%s.o"))
makedeplog = logging.getLogger('MakeDepend')
class CompilerMakeDependsPathSet(BasePathSet):
"""
Use the selection ToolChain to get a list of dependencies from a set of source files
"""
def __init__(self, target, src, flags=None, includes=None):
"""
@param target: the BaseTarget object for which this path set is being caculated
@param src: a PathSet of source file paths
@param flags: additional compiler flags
@param includes: a list of include directory paths
"""
BasePathSet.__init__(self)
self.log = makedeplog
self.target = target
self.sources = src
self.flags = flatten([flags]) or []
self.includes = includes or []
def __repr__(self):
return "MakeDepend(%s, %s)" % (self.sources, self.flags)
def resolveWithDestinations(self, context):
return [(i, os.path.basename(i)) for i in _resolveUnderlyingDependencies(context)]
def clean(self):
dfile = self.target.workDir+'.makedepend'
deleteFile(dfile)
def _resolveUnderlyingDependencies(self, context):
deplist = None
options = self.target.options # get the merged options
dfile = normLongPath(self.target.workDir+'.makedepend')
testsources = self.sources.resolve(context)
depsources = self.sources._resolveUnderlyingDependencies(context)
needsRebuild = not os.path.exists(dfile)
if needsRebuild:
self.log.info("Rebuilding dependencies for %s because cached dependencies file does not exist (%s)" % (self.target, dfile))
dfiletime = 0 if needsRebuild else getmtime(dfile)
for x in testsources:
if not exists(x):
# can't generate any deps if some source files don't yet exist
self.log.info("Dependency generation %s postponed because source file does not exist: %s" % (self.target, x))
return depsources
elif getmtime(x) > dfiletime:
if not needsRebuild: self.log.info("Rebuilding dependencies for %s because cached dependencies file is older than %s" % (self.target, x))
needsRebuild = True
if not needsRebuild: # read in cached dependencies
deplist = []
with open(dfile) as f:
lines = f.readlines()
header = lines[0].strip()
lines = lines[1:]
for d in lines:
d = d.strip()
if context._isValidTarget(d) or exists(normLongPath(d)):
deplist.append(d)
else:
needsRebuild = True
self.log.warn("Rebuilding dependencies for %s because dependency %s is missing" % (self.target, d))
break
if header != str(self):
self.log.info("Rebuilding dependencies for %s because target options have changed (%s != %s)" % (self.target, header, str(self)))
elif not needsRebuild:
return deplist
# generate them again
startt = time.time()
self.log.info("*** Generating native dependencies for %s" % self.target)
try:
deplist = options['native.compilers'].dependencies.depends(context=context, src=testsources, options=options, flags=flatten(options['native.cxx.flags']+[context.expandPropertyValues(x).split(' ') for x in self.flags]), includes=flatten(self.includes.resolve(context)+[context.expandPropertyValues(x, expandList=True) for x in options['native.include']]))
except BuildException, e:
if len(testsources)==1 and testsources[0] not in str(e):
raise BuildException('Dependency resolution failed for %s: %s'%(testsources[0], e))
raise
deplist += depsources
mkdir(os.path.dirname(dfile))
with openForWrite(dfile, 'wb') as f:
assert not os.linesep in str(self)
f.write(str(self)+os.linesep)
for d in deplist:
f.write(d.encode('UTF-8')+os.linesep)
if time.time()-startt > 5: # this should usually be pretty quick, so may indicate a real build file mistake
self.log.warn('Dependency generation took a long time: %0.1f s to evaluate %s', time.time()-startt, self)
return deplist
class Cpp(BaseTarget):
""" A target that compiles a C++ source file to a .o
"""
def __init__(self, object, source, includes=None, flags=None, dependencies=None, options=None):
"""
@param object: the object file to generate
@param source: a (list of) source files
@param includes: a (list of) include directories
@param flags: a list of additional compiler flags
@param dependencies: a list of additional dependencies that need to be built
before this target
@param options: [DEPRECATED - use .option() instead]
"""
self.source = PathSet(source)
self.includes = PathSet(includes or [])
self.flags = flatten([flags]) or []
self.makedepend = CompilerMakeDependsPathSet(self, self.source, flags=self.flags, includes=self.includes)
BaseTarget.__init__(self, object, [dependencies or [], self.source, self.makedepend])
for k,v in (options or {}).items(): self.option(k, v)
self.tags('native')
def run(self, context):
options = self.options
mkdir(os.path.dirname(self.path))
options['native.compilers'].cxxcompiler.compile(context, output=self.path, options=options, flags=flatten(options['native.cxx.flags']+[context.expandPropertyValues(x).split(' ') for x in self.flags]), src=self.source.resolve(context), includes=flatten(self.includes.resolve(context)+[context.expandPropertyValues(x, expandList=True) for x in options['native.include']]))
def clean(self, context):
self.makedepend.clean()
BaseTarget.clean(self, context)
def getHashableImplicitInputs(self, context):
r = super(Cpp, self).getHashableImplicitInputs(context)
# include input to makedepends, since even without running makedepends
# we know we're out of date if inputs have changed
r.append('depends: '+context.expandPropertyValues(str(self.makedepend)))
return r
class C(BaseTarget):
""" A target that compiles a C source file to a .o
"""
def __init__(self, object, source, includes=None, flags=None, options=None, dependencies=None):
"""
@param object: the object file to generate
@param source: a (list of) source files
@param includes: a (list of) include directories
@param flags: a list of additional compiler flags
@param dependencies: a list of additional dependencies that need to be built
before this target
@param options: [DEPRECATED - use .option() instead]
"""
self.source = PathSet(source)
self.includes = PathSet(includes or [])
self.flags = flags or []
self.makedepend = CompilerMakeDependsPathSet(self, self.source, flags=self.flags, includes=self.includes)
BaseTarget.__init__(self, object, [dependencies or [], self.makedepend])
for k,v in (options or {}).items(): self.option(k, v)
self.tags('native')
def run(self, context):
options = self.options
mkdir(os.path.dirname(self.path))
options['native.compilers'].ccompiler.compile(context, output=self.path,
options=options,
flags=flatten((options['native.c.flags'] or options['native.cxx.flags'])+[context.expandPropertyValues(x).split(' ') for x in self.flags]),
src=self.source.resolve(context),
includes=flatten(self.includes.resolve(context)+[context.expandPropertyValues(x, expandList=True) for x in options['native.include']]))
def clean(self, context):
self.makedepend.clean()
BaseTarget.clean(self, context)
def getHashableImplicitInputs(self, context):
r = super(C, self).getHashableImplicitInputs(context)
# include input to makedepends, since even without running makedepends
# we know we're out of date if inputs have changed
r.append(context.expandPropertyValues(str(self.makedepend)))
return r
class Link(BaseTarget):
""" A target that links object files to binaries
"""
def __init__(self, bin, objects, libs=None, libpaths=None, shared=False, options=None, flags=None, dependencies=None):
"""
@param bin: the output binary
@param objects: a (list of) input object
@param libs: a (list of) libraries linked against (optional) in platform-neutral format.
Can include list properties like '${FOO_LIB_NAMES[]}'.
@param libpaths: a (list of) additional library search directories (optional)
@param shared: if true compiles to a shared object (.dll or .so) (optional, defaults to false)
@param flags: a list of additional linker flags
@param options: [DEPRECATED - use .option() instead]
@param dependencies: a list of additional dependencies (targets or files)
"""
self.objects = PathSet(objects)
self.libs = libs or []
self.libpaths = PathSet(libpaths or [])
self.shared=shared
self.flags = flags or []
BaseTarget.__init__(self, bin, PathSet(self.objects, (dependencies or [])))
for k,v in (options or {}).items(): self.option(k, v)
self.tags('native')
def run(self, context):
options = self.options
mkdir(os.path.dirname(self.path))
options['native.compilers'].linker.link(context, output=self.path,
options=options,
flags=options['native.link.flags']+self.flags,
shared=self.shared,
src=self.objects.resolve(context),
libs=flatten([map(string.strip, context.expandPropertyValues(x, expandList=True)) for x in self.libs+options['native.libs'] if x]),
libdirs=flatten(self.libpaths.resolve(context)+[context.expandPropertyValues(x, expandList=True) for x in options['native.libpaths']]))
def getHashableImplicitInputs(self, context):
r = super(Link, self).getHashableImplicitInputs(context)
options = self.options
r.append('libs: '+context.expandPropertyValues(str(self.libs+options['native.libs'])))
r.append('libpaths: '+context.expandPropertyValues(str(self.libpaths)))
r.append('native.libpaths: %s'%options['native.libpaths'])
r.append('shared: %s, flags=%s'%(self.shared, self.flags))
return r
class Ar(BaseTarget):
""" A target that compiles .a files from collections of .o files
"""
def __init__(self, bin, objects):
"""
@param bin: the output library
@param objects: a (list of) input objects
"""
self.objects = PathSet(objects)
BaseTarget.__init__(self, bin, self.objects)
self.tags('native')
def run(self, context):
options = self.options
mkdir(os.path.dirname(self.path))
options['native.compilers'].archiver.archive(context, output=self.path,
options=options,
src=self.objects.resolve(context))
def getHashableImplicitInputs(self, context):
r = super(Ar, self).getHashableImplicitInputs(context)
r.append('objects: %s'%self.objects)
return r
exename = make_functor(lambda c, i:c.mergeOptions()['native.cxx.exenamefn'](c.expandPropertyValues(i)), name='exename')
objectname = make_functor(lambda c, i:c.mergeOptions()['native.cxx.objnamefn'](c.expandPropertyValues(i)), name='objectname')
libname = make_functor(lambda c, i:c.mergeOptions()['native.cxx.libnamefn'](c.expandPropertyValues(i)), name='libname')
staticlibname = make_functor(lambda c, i:c.mergeOptions()['native.cxx.staticlibnamefn'](c.expandPropertyValues(i)), name='staticlibname')
|
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
def execute():
company = frappe.get_all('Company', filters = {'country': 'India'})
if not company:
return
create_custom_field('Delivery Note', {
'fieldname': 'ewaybill',
'label': 'E-Way Bill No.',
'fieldtype': 'Data',
'depends_on': 'eval:(doc.docstatus === 1)',
'allow_on_submit': 1,
'insert_after': 'customer_name_in_arabic',
'translatable': 0,
'owner': 'Administrator'
}) |
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
t = n & 1
n >>= 1
while n:
if (n & 1) == t: return False
t = n & 1
n >>= 1
return True
|
import re
from django import template
from festival.models import Project
from filmfestival.models import Film
register = template.Library()
@register.simple_tag(takes_context=True)
def project_film_by_directors(context, **kwargs):
projects = Project.objects.filter(**kwargs)
context['films'] = Film.objects.filter(project__in=projects, status=Film.SELECTED).order_by('dir_by')
return ''
@register.simple_tag(takes_context=True)
def project_film_by_title(context, **kwargs):
projects = Project.objects.filter(**kwargs)
context['films'] = Film.objects.filter(project__in=projects, status=Film.SELECTED).order_by('title')
return ''
@register.simple_tag(takes_context=True)
def project_film_by_id(context, *ids):
context['films'] = Film.objects.filter(id__in=ids, status=Film.SELECTED).order_by('title')
return ''
|
from collections import defaultdict
from datetime import datetime, timezone
from itertools import chain
import logging
import pickle
from typing import Any, Collection, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
from sqlalchemy import sql
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql import ClauseElement
from athenian.api import metadata
from athenian.api.async_utils import gather, read_sql_query
from athenian.api.cache import cached, short_term_exptime
from athenian.api.controllers.jira import JIRAConfig
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.label import fetch_labels_to_filter
from athenian.api.controllers.miners.github.logical import split_logical_repositories
from athenian.api.controllers.miners.github.precomputed_prs import triage_by_release_match
from athenian.api.controllers.miners.types import PullRequestFactsMap
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseMatch, \
ReleaseSettings
from athenian.api.db import Database, DatabaseLike
from athenian.api.models.metadata.github import NodePullRequest, NodePullRequestJiraIssues, \
NodeRepository, PullRequest
from athenian.api.models.metadata.jira import AthenianIssue, Component, Epic, Issue, Status
from athenian.api.models.precomputed.models import GitHubDonePullRequestFacts
from athenian.api.tracing import sentry_span
async def generate_jira_prs_query(filters: List[ClauseElement],
jira: JIRAFilter,
meta_ids: Optional[Tuple[int, ...]],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
seed=PullRequest,
on=(PullRequest.node_id, PullRequest.acc_id),
) -> sql.Select:
"""
Produce SQLAlchemy statement to fetch PRs that satisfy JIRA conditions.
:param filters: Extra WHERE conditions.
:param columns: SELECT these columns.
:param seed: JOIN with this object.
:param on: JOIN by these two columns: node ID-like and acc_id-like.
"""
assert jira
if columns is PullRequest:
columns = [PullRequest]
_map = aliased(NodePullRequestJiraIssues, name="m")
meta_ids_cond = (on[1].in_(meta_ids),) if meta_ids is not None else ()
if jira.unmapped:
return sql.select(columns).select_from(sql.outerjoin(
seed, _map, sql.and_(on[0] == _map.node_id, on[1] == _map.node_acc),
)).where(sql.and_(*filters, *meta_ids_cond, _map.node_id.is_(None)))
_issue = aliased(Issue, name="j")
filters.extend((
_issue.acc_id == jira.account,
_issue.project_id.in_(jira.projects),
_issue.is_deleted.is_(False),
*meta_ids_cond,
))
if jira.labels:
components = await _load_components(jira.labels, jira.account, mdb, cache)
_append_label_filters(
jira.labels, components, mdb.url.dialect == "postgresql", filters, model=_issue)
if jira.issue_types:
filters.append(_issue.type.in_(jira.issue_types))
if not jira.epics:
return sql.select(columns).select_from(sql.join(
seed, sql.join(_map, _issue, sql.and_(
_map.jira_acc == _issue.acc_id,
_map.jira_id == _issue.id,
)),
sql.and_(
on[0] == _map.node_id,
on[1] == _map.node_acc,
),
)).where(sql.and_(*filters))
_issue_epic = aliased(Issue, name="e")
filters.append(_issue_epic.key.in_(jira.epics))
return sql.select(columns).select_from(sql.join(
seed, sql.join(
_map, sql.join(_issue, _issue_epic, sql.and_(
_issue.epic_id == _issue_epic.id,
_issue.acc_id == _issue_epic.acc_id,
)),
sql.and_(
_map.jira_id == _issue.id,
_map.jira_acc == _issue.acc_id,
)),
sql.and_(
on[0] == _map.node_id,
on[1] == _map.node_acc,
),
)).where(sql.and_(*filters))
@sentry_span
@cached(
exptime=60 * 60, # 1 hour
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda labels, account, **_: (labels, account),
refresh_on_access=True,
)
async def _load_components(labels: LabelFilter,
account: int,
mdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, str]:
all_labels = set()
for label in chain(labels.include, labels.exclude):
for part in label.split(","):
all_labels.add(part.strip())
rows = await mdb.fetch_all(sql.select([Component.id, Component.name]).where(sql.and_(
sql.func.lower(Component.name).in_(all_labels),
Component.acc_id == account,
)))
return {r[1].lower(): r[0] for r in rows}
def _append_label_filters(labels: LabelFilter,
components: Dict[str, str],
postgres: bool,
filters: List[ClauseElement],
model=Issue):
if postgres:
if labels.include:
singles, multiples = LabelFilter.split(labels.include)
or_items = []
if singles:
or_items.append(model.labels.overlap(singles))
or_items.extend(model.labels.contains(m) for m in multiples)
if components:
if singles:
cinc = [components[s] for s in singles if s in components]
if cinc:
or_items.append(model.components.overlap(cinc))
if multiples:
cinc = [[components[c] for c in g if c in components] for g in multiples]
or_items.extend(model.components.contains(g) for g in cinc if g)
filters.append(sql.or_(*or_items))
if labels.exclude:
filters.append(sql.not_(model.labels.overlap(labels.exclude)))
if components:
filters.append(sql.not_(model.components.overlap(
[components[s] for s in labels.exclude if s in components])))
else:
# neither 100% correct nor efficient, but enough for local development
if labels.include:
or_items = []
singles, multiples = LabelFilter.split(labels.include)
or_items.extend(model.labels.like("%%%s%%" % s) for s in singles)
or_items.extend(
sql.and_(*(model.labels.like("%%%s%%" % s) for s in g)) for g in multiples)
if components:
if singles:
or_items.extend(
model.components.like("%%%s%%" % components[s])
for s in singles if s in components)
if multiples:
or_items.extend(
sql.and_(*(model.components.like("%%%s%%" % components[s]) for s in g
if s in components))
for g in multiples)
filters.append(sql.or_(*or_items))
if labels.exclude:
filters.append(sql.not_(sql.or_(*(
model.labels.like("%%%s%%" % s) for s in labels.exclude))))
if components:
filters.append(sql.not_(sql.or_(*(
model.components.like("%%%s%%" % components[s])
for s in labels.exclude if s in components))))
ISSUE_PRS_BEGAN = "prs_began"
ISSUE_PRS_RELEASED = "prs_released"
ISSUE_PRS_COUNT = "prs_count"
ISSUE_PR_IDS = "pr_ids"
@sentry_span
@cached(
exptime=short_term_exptime,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda installation_ids, time_from, time_to, exclude_inactive, labels, priorities, types, epics, reporters, assignees, commenters, nested_assignees, release_settings, logical_settings, **kwargs: ( # noqa
installation_ids[0],
",".join(installation_ids[1]),
time_from.timestamp() if time_from else "-",
time_to.timestamp() if time_to else "-",
exclude_inactive,
labels,
",".join(sorted(priorities)),
",".join(sorted(types)),
",".join(sorted(epics) if not isinstance(epics, bool) else ["<flying>"]),
",".join(sorted(reporters)),
",".join(sorted((ass if ass is not None else "<None>") for ass in assignees)),
",".join(sorted(commenters)),
nested_assignees,
",".join(c.name for c in kwargs.get("extra_columns", ())),
release_settings,
logical_settings,
),
)
async def fetch_jira_issues(installation_ids: JIRAConfig,
time_from: Optional[datetime],
time_to: Optional[datetime],
exclude_inactive: bool,
labels: LabelFilter,
priorities: Collection[str],
types: Collection[str],
epics: Union[Collection[str], bool],
reporters: Collection[str],
assignees: Collection[Optional[str]],
commenters: Collection[str],
nested_assignees: bool,
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
extra_columns: Iterable[InstrumentedAttribute] = (),
) -> pd.DataFrame:
"""
Load JIRA issues following the specified filters.
The aggregation is OR between the participation roles.
:param installation_ids: JIRA installation ID and the enabled project IDs.
:param time_from: Issues should not be resolved before this timestamp.
:param time_to: Issues should be opened before this timestamp.
:param exclude_inactive: Issues must be updated after `time_from`.
:param labels: Issues must satisfy these label conditions.
:param priorities: List of lower-case priorities.
:param types: List of lower-case types.
:param epics: List of required parent epic keys. If empty, disable filtering by epics. \
If false, return only those issues which are without an epic and are not epics \
themselves.
:param reporters: List of lower-case issue reporters.
:param assignees: List of lower-case issue assignees. None means unassigned.
:param commenters: List of lower-case issue commenters.
:param nested_assignees: If filter by assignee, include all the children's.
:param extra_columns: Additional `Issue` or `AthenianIssue` columns to fetch.
"""
log = logging.getLogger("%s.jira" % metadata.__package__)
issues = await _fetch_issues(
installation_ids, time_from, time_to, exclude_inactive, labels, priorities, types, epics,
reporters, assignees, commenters, nested_assignees, mdb, cache,
extra_columns=extra_columns)
if not exclude_inactive:
# DEV-1899: exclude and report issues with empty AthenianIssue
if (missing_updated := issues[AthenianIssue.updated.name].isnull().values).any():
log.error("JIRA issues are missing in jira.athenian_issue: %s",
", ".join(issues[Issue.key.name].take(np.nonzero(missing_updated)[0])))
issues = issues.take(np.nonzero(~missing_updated)[0])
if len(issues.index) >= 20:
jira_id_cond = NodePullRequestJiraIssues.jira_id.in_any_values(issues.index)
else:
jira_id_cond = NodePullRequestJiraIssues.jira_id.in_(issues.index)
nullable_repository_id = NodePullRequest.repository_id
nullable_repository_id = nullable_repository_id.label(nullable_repository_id.name)
nullable_repository_id.nullable = True
pr_cols = [
NodePullRequestJiraIssues.node_id,
NodePullRequestJiraIssues.jira_id,
NodePullRequest.title,
NodePullRequest.created_at,
nullable_repository_id,
NodeRepository.name_with_owner.label(PullRequest.repository_full_name.name),
]
prs = await read_sql_query(
sql.select(pr_cols)
.select_from(
sql.outerjoin(
sql.outerjoin(NodePullRequestJiraIssues, NodePullRequest, sql.and_(
NodePullRequestJiraIssues.node_acc == NodePullRequest.acc_id,
NodePullRequestJiraIssues.node_id == NodePullRequest.graph_id,
)),
NodeRepository,
sql.and_(
NodePullRequest.acc_id == NodeRepository.acc_id,
NodePullRequest.repository_id == NodeRepository.graph_id,
)))
.where(sql.and_(NodePullRequestJiraIssues.jira_acc == installation_ids[0],
NodePullRequestJiraIssues.node_acc.in_(meta_ids),
jira_id_cond)),
mdb, pr_cols, index=NodePullRequestJiraIssues.node_id.name,
)
# TODO(vmarkovtsev): load the "fresh" released PRs
existing_repos = np.flatnonzero(np.not_equal(
prs[PullRequest.repository_full_name.name].values, None))
if len(existing_repos) < len(prs):
log.error(
"Repositories referenced by github.node_pullrequest do not exist in "
"github.node_repository on GitHub account %s: %s",
meta_ids, np.unique(prs[NodePullRequest.repository_id.name].values[np.setdiff1d(
np.arange(len(prs)), existing_repos, assume_unique=True)]).tolist())
prs = prs.take(existing_repos)
released_prs, labels = await gather(
_fetch_released_prs(prs.index.values, default_branches, release_settings, account, pdb),
fetch_labels_to_filter(prs.index.values, meta_ids, mdb),
)
prs = split_logical_repositories(
prs, labels, logical_settings.all_logical_repos(), logical_settings)
pr_to_issue = {
key: ji for key, ji in zip(
prs.index.values,
prs[NodePullRequestJiraIssues.jira_id.name].values,
)
}
issue_to_index = {iid: i for i, iid in enumerate(issues.index.values)}
pr_node_ids = prs.index.get_level_values(0).values
jira_ids = prs[NodePullRequestJiraIssues.jira_id.name].values
unique_jira_ids, index_map, counts = np.unique(
jira_ids, return_inverse=True, return_counts=True)
split_pr_node_ids = np.split(pr_node_ids[np.argsort(index_map)], np.cumsum(counts[:-1]))
issue_prs = [[]] * len(issues) # yes, the references to the same list
issue_indexes = []
for issue, node_ids in zip(unique_jira_ids, split_pr_node_ids):
issue_index = issue_to_index[issue]
issue_indexes.append(issue_index)
issue_prs[issue_index] = node_ids
prs_count = np.zeros(len(issues), dtype=int)
prs_count[issue_indexes] = counts
nat = np.datetime64("nat")
work_began = np.full(len(issues), nat, "datetime64[ns]")
released = work_began.copy()
for key, pr_created_at in zip(
prs.index.values,
prs[NodePullRequest.created_at.name].values,
):
i = issue_to_index[pr_to_issue[key]]
node_id, repo = key
if pr_created_at is not None:
work_began[i] = np.nanmin(np.array(
[work_began[i], pr_created_at],
dtype=np.datetime64))
if (row := released_prs.get(key)) is not None:
released[i] = np.nanmax(np.array(
[released[i], row[GitHubDonePullRequestFacts.pr_done_at.name]],
dtype=np.datetime64))
continue
if repo not in release_settings.native:
# deleted repository, consider the PR as force push dropped
released[i] = work_began[i]
else:
released[i] = nat
issues[ISSUE_PRS_BEGAN] = work_began
issues[ISSUE_PRS_RELEASED] = released
issues[ISSUE_PRS_COUNT] = prs_count
issues[ISSUE_PR_IDS] = issue_prs
resolved_colname = AthenianIssue.resolved.name
created_colname = Issue.created.name
issues[resolved_colname] = issues[resolved_colname].astype(issues[created_colname].dtype)
if (negative := issues[resolved_colname].values < issues[created_colname].values).any():
log.error("JIRA issues have resolved < created: %s",
issues.index.values[negative].tolist())
issues[resolved_colname].values[negative] = issues[created_colname].values[negative]
return issues
@sentry_span
async def _fetch_released_prs(pr_node_ids: Iterable[int],
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
account: int,
pdb: Database,
) -> Dict[Tuple[int, str], Mapping[str, Any]]:
ghdprf = GitHubDonePullRequestFacts
released_rows = await pdb.fetch_all(
sql.select([ghdprf.pr_node_id,
ghdprf.pr_created_at,
ghdprf.pr_done_at,
ghdprf.repository_full_name,
ghdprf.release_match])
.where(sql.and_(ghdprf.pr_node_id.in_(pr_node_ids),
ghdprf.acc_id == account)))
released_by_repo = defaultdict(lambda: defaultdict(dict))
for r in released_rows:
released_by_repo[
r[ghdprf.repository_full_name.name]][
r[ghdprf.release_match.name]][
r[ghdprf.pr_node_id.name]] = r
released_prs = {}
ambiguous = {ReleaseMatch.tag.name: {}, ReleaseMatch.branch.name: {}}
for repo, matches in released_by_repo.items():
for match, prs in matches.items():
if repo not in release_settings.native:
for node_id, row in prs.items():
key = (node_id, repo)
try:
if released_prs[key][ghdprf.pr_done_at] < row[ghdprf.pr_done_at]:
released_prs[key] = row
except KeyError:
released_prs[key] = row
continue
dump = triage_by_release_match(
repo, match, release_settings, default_branches, released_prs, ambiguous)
if dump is None:
continue
for node_id, row in prs.items():
dump[(node_id, repo)] = row
released_prs.update(ambiguous[ReleaseMatch.tag.name])
for key, row in ambiguous[ReleaseMatch.branch.name].items():
released_prs.setdefault(key, row)
return released_prs
@sentry_span
async def _fetch_issues(ids: JIRAConfig,
time_from: Optional[datetime],
time_to: Optional[datetime],
exclude_inactive: bool,
labels: LabelFilter,
priorities: Collection[str],
types: Collection[str],
epics: Union[Collection[str], bool],
reporters: Collection[str],
assignees: Collection[Optional[str]],
commenters: Collection[str],
nested_assignees: bool,
mdb: Database,
cache: Optional[aiomcache.Client],
extra_columns: Iterable[InstrumentedAttribute] = (),
) -> pd.DataFrame:
postgres = mdb.url.dialect == "postgresql"
columns = [
Issue.id,
Issue.type,
Issue.created,
AthenianIssue.updated,
AthenianIssue.work_began,
AthenianIssue.resolved,
Issue.priority_name,
Issue.epic_id,
Issue.status,
Status.category_name,
Issue.labels,
]
columns.extend(extra_columns)
# this is backed with a DB index
far_away_future = datetime(3000, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
and_filters = [
Issue.acc_id == ids[0],
Issue.project_id.in_(ids[1]),
Issue.is_deleted.is_(False),
]
if time_from is not None:
and_filters.append(sql.func.coalesce(AthenianIssue.resolved, far_away_future) >= time_from)
if time_to is not None:
and_filters.append(Issue.created < time_to)
if exclude_inactive and time_from is not None:
and_filters.append(AthenianIssue.acc_id == ids[0])
and_filters.append(AthenianIssue.updated >= time_from)
if len(priorities):
and_filters.append(sql.func.lower(Issue.priority_name).in_(priorities))
if len(types):
and_filters.append(sql.func.lower(Issue.type).in_(types))
if isinstance(epics, bool):
assert epics is False
epics_major = aliased(Epic, name="epics_major")
epics_parent = aliased(Epic, name="epics_parent")
epics_self = aliased(Epic, name="epics_self")
for alias in (epics_major, epics_parent, epics_self):
and_filters.append(alias.name.is_(None))
elif len(epics):
and_filters.append(Epic.key.in_(epics))
or_filters = []
if labels:
components = await _load_components(labels, ids[0], mdb, cache)
_append_label_filters(
labels, components, mdb.url.dialect == "postgresql", and_filters)
if reporters and (postgres or not commenters):
or_filters.append(sql.func.lower(Issue.reporter_display_name).in_(reporters))
if assignees and (postgres or (not commenters and not nested_assignees)):
if None in assignees:
# NULL IN (NULL) = false
or_filters.append(sql.func.lower(Issue.assignee_display_name).is_(None))
if nested_assignees:
or_filters.append(AthenianIssue.nested_assignee_display_names.has_any(assignees))
else:
or_filters.append(sql.func.lower(Issue.assignee_display_name).in_(assignees))
if commenters:
if postgres:
or_filters.append(Issue.commenters_display_names.overlap(commenters))
else:
if reporters:
columns.append(sql.func.lower(Issue.reporter_display_name).label("_reporter"))
if assignees:
columns.append(sql.func.lower(Issue.assignee_display_name).label("_assignee"))
if nested_assignees and all(
c.name != AthenianIssue.nested_assignee_display_names.name
for c in extra_columns):
columns.append(AthenianIssue.nested_assignee_display_names)
if all(c.name != "commenters" for c in extra_columns):
columns.append(Issue.commenters_display_names.label("commenters"))
if assignees and not postgres:
if nested_assignees and all(
c.name != AthenianIssue.nested_assignee_display_names.name
for c in columns):
columns.append(AthenianIssue.nested_assignee_display_names)
if None in assignees and all(c.name != "_assignee" for c in columns):
columns.append(sql.func.lower(Issue.assignee_display_name).label("_assignee"))
def query_starts():
seeds = [seed := sql.join(Issue, Status, sql.and_(Issue.status_id == Status.id,
Issue.acc_id == Status.acc_id))]
if epics is False:
seeds = [
sql.outerjoin(
sql.outerjoin(
sql.outerjoin(seed, epics_major,
sql.and_(Issue.epic_id == epics_major.id,
Issue.acc_id == epics_major.acc_id)),
epics_parent, sql.and_(Issue.parent_id == epics_parent.id,
Issue.acc_id == epics_parent.acc_id),
),
epics_self, sql.and_(Issue.id == epics_self.id,
Issue.acc_id == epics_self.acc_id),
),
]
elif len(epics):
seeds = [
sql.join(seed, Epic, sql.and_(Issue.epic_id == Epic.id,
Issue.acc_id == Epic.acc_id)),
sql.join(seed, Epic, sql.and_(Issue.parent_id == Epic.id,
Issue.acc_id == Epic.acc_id)),
]
return tuple(sql.select(columns).select_from(sql.outerjoin(
seed, AthenianIssue, sql.and_(Issue.acc_id == AthenianIssue.acc_id,
Issue.id == AthenianIssue.id)))
for seed in seeds)
if or_filters:
if postgres:
query = [start.where(sql.and_(or_filter, *and_filters))
for or_filter in or_filters
for start in query_starts()]
else:
query = [start.where(sql.and_(sql.or_(*or_filters), *and_filters))
for start in query_starts()]
else:
query = [start.where(sql.and_(*and_filters)) for start in query_starts()]
if postgres:
if len(query) == 1:
query = query[0]
else:
query = sql.union(*query)
df = await read_sql_query(query, mdb, columns, index=Issue.id.name)
else:
# SQLite does not allow to use parameters multiple times
df = pd.concat(await gather(*(read_sql_query(q, mdb, columns, index=Issue.id.name)
for q in query)))
df = _validate_and_clean_issues(df, ids[0])
df.sort_index(inplace=True)
if postgres or (not commenters and (not nested_assignees or not assignees)):
return df
passed = np.full(len(df), False)
if reporters:
passed |= df["_reporter"].isin(reporters).values
if assignees:
if nested_assignees:
assignees = set(assignees)
passed |= df[AthenianIssue.nested_assignee_display_names.name].apply(
lambda obj: bool(obj.keys() & assignees)).values
else:
passed |= df["_assignee"].isin(assignees).values
if None in assignees:
passed |= df["_assignee"].isnull().values
if commenters:
# don't go hardcore vectorized here, we don't have to with SQLite
for i, issue_commenters in enumerate(df["commenters"].values):
if len(np.intersect1d(issue_commenters, commenters)):
passed[i] = True
return df.take(np.nonzero(passed)[0])
def _validate_and_clean_issues(df: pd.DataFrame, acc_id: int) -> pd.DataFrame:
in_progress = df[Status.category_name.name].values == Status.CATEGORY_IN_PROGRESS
done = df[Status.category_name.name].values == Status.CATEGORY_DONE
no_work_began = df[AthenianIssue.work_began.name].isnull().values
no_resolved = df[AthenianIssue.resolved.name].isnull().values
in_progress_no_work_began = in_progress & no_work_began
done_no_work_began = done & no_work_began
done_no_resolved = done & no_resolved
invalid = in_progress_no_work_began | done_no_work_began | done_no_resolved
if not invalid.any():
return df
log = logging.getLogger(f"{metadata.__package__}.validate_and_clean_issues")
issue_ids = df.index.values
if in_progress_no_work_began.any():
log.error("account %d has issues in progress but their `work_began` is null: %s",
acc_id, issue_ids[in_progress_no_work_began].tolist())
if done_no_work_began.any():
log.error("account %d has issues done but their `work_began` is null: %s",
acc_id, issue_ids[done_no_work_began].tolist())
if done_no_resolved.any():
log.error("account %d has issues done but their `resolved` is null: %s",
acc_id, issue_ids[done_no_resolved].tolist())
old_len = len(df)
df = df.take(np.flatnonzero(~invalid))
log.warning("cleaned JIRA issues %d / %d", len(df), old_len)
return df
class PullRequestJiraMapper:
"""Mapper of pull requests to JIRA tickets."""
@classmethod
async def append_pr_jira_mapping(cls,
prs: PullRequestFactsMap,
meta_ids: Tuple[int, ...],
mdb: DatabaseLike) -> None:
"""Load and insert "jira_id" to the PR facts."""
pr_node_ids = defaultdict(list)
for node_id, repo in prs:
pr_node_ids[node_id].append(repo)
jira_map = await cls.load_pr_jira_mapping(pr_node_ids, meta_ids, mdb)
for pr_node_id, jira in jira_map.items():
for repo in pr_node_ids[pr_node_id]:
try:
prs[(pr_node_id, repo)].jira_ids = jira
except KeyError:
# we removed this PR in JIRA filter
continue
@classmethod
@sentry_span
async def load_pr_jira_mapping(cls,
prs: Collection[int],
meta_ids: Tuple[int, ...],
mdb: DatabaseLike,
) -> Dict[int, List[str]]:
"""Fetch the mapping from PR node IDs to JIRA issue IDs."""
nprji = NodePullRequestJiraIssues
if len(prs) >= 100:
node_id_cond = nprji.node_id.in_any_values(prs)
else:
node_id_cond = nprji.node_id.in_(prs)
rows = await mdb.fetch_all(
sql.select([nprji.node_id, Issue.key])
.select_from(sql.outerjoin(nprji, Issue, sql.and_(
nprji.jira_acc == Issue.acc_id,
nprji.jira_id == Issue.id,
)))
.where(sql.and_(node_id_cond,
nprji.node_acc.in_(meta_ids))))
result = defaultdict(list)
for r in rows:
result[r[0]].append(r[1])
return result
def resolve_work_began_and_resolved(issue_work_began: Optional[np.datetime64],
prs_began: Optional[np.datetime64],
issue_resolved: Optional[np.datetime64],
prs_released: Optional[np.datetime64],
) -> Tuple[Optional[np.datetime64], Optional[np.datetime64]]:
"""Compute the final timestamps of when the work started on the issue, and when the issue \
became fully resolved."""
if issue_work_began != issue_work_began or issue_work_began is None:
return None, None
if prs_began != prs_began or prs_began is None:
return issue_work_began, \
issue_resolved \
if (issue_resolved == issue_resolved and issue_resolved is not None) \
else None
work_began = min(prs_began, issue_work_began)
if (prs_released != prs_released or prs_released is None) or \
(issue_resolved != issue_resolved or issue_resolved is None):
return work_began, None
return work_began, max(issue_resolved, prs_released)
async def fetch_jira_issues_for_prs(pr_nodes: Collection[int],
meta_ids: Tuple[int, ...],
jira_ids: JIRAConfig,
mdb: DatabaseLike,
) -> List[Mapping[str, Any]]:
"""Load brief information about JIRA issues mapped to the given PRs."""
regiss = aliased(Issue, name="regular")
epiciss = aliased(Epic, name="epic")
prmap = aliased(NodePullRequestJiraIssues, name="m")
return await mdb.fetch_all(
sql.select([prmap.node_id.label("node_id"),
regiss.key.label("key"),
regiss.title.label("title"),
regiss.labels.label("labels"),
regiss.type.label("type"),
epiciss.key.label("epic")])
.select_from(sql.outerjoin(
sql.join(regiss, prmap, sql.and_(regiss.id == prmap.jira_id,
regiss.acc_id == prmap.jira_acc)),
epiciss, sql.and_(epiciss.id == regiss.epic_id,
epiciss.acc_id == regiss.acc_id)))
.where(sql.and_(prmap.node_id.in_(pr_nodes),
prmap.node_acc.in_(meta_ids),
regiss.project_id.in_(jira_ids[1]),
regiss.is_deleted.is_(False))))
|
# app seettings
EC2_ACCESS_ID = 'A***Q'
EC2_ACCESS_KEY = 'R***I'
YCSB_SIZE =0
MCROUTER_NOISE = 0
MEMCACHED_OD_SIZE = 1
MEMCACHED_SPOT_SIZE = 0
G_M_MIN = 7.5*1024
G_M_MAX = 7.5*1024
G_C_MIN = 2
G_C_MAX = 2
M_DEFAULT = 7.5*1024
C_DEFAULT = 2
G_M_MIN_2 = 7.5*1024
G_M_MAX_2 = 7.5*1024
G_C_MIN_2 = 2
G_C_MAX_2 = 2
M_DEFAULT_2 = 7.5*1024
C_DEFAULT_2 = 2
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
class PasswordGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user , timestamp: int):
return str(user.pk)+str(timestamp) |
# $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os, sys
import io
import unittest
import pickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
def feq(v1, v2, tol=1e-4):
return abs(v1 - v2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[5])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == 5)
v2 = ds.IntSparseIntVect(5)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 3: 3})
def test2Long(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
self.assertTrue(v1.GetLength() == l)
v2 = ds.LongSparseIntVect(l)
self.assertTrue(v1 != v2)
v2 |= v1
self.assertTrue(v2 == v1)
v3 = v2 | v1
self.assertTrue(v3 == v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs == {0: 1, 2: 2, 1 << 35: 3})
def test3Pickle1(self):
"""
"""
l = 1 << 42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 35] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('lsiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test3Pickle2(self):
"""
"""
l = 1 << 21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError, lambda: v1[l + 1])
v1[0] = 1
v1[2] = 2
v1[1 << 12] = 3
self.assertTrue(v1 == v1)
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(v2 == v1)
v3 = ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2 == v3)
self.assertTrue(v1 == v3)
#pickle.dump(v1,file('isiv.pkl','wb+'))
with open(os.path.join(RDConfig.RDBaseDir, 'Code/DataStructs/Wrap/testData/isiv.pkl'),
'r') as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = pickle.load(f)
self.assertTrue(v3 == v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError, lambda: v1[6])
v1[0] = 1
v1[2] = 2
v1[3] = 3
self.assertTrue(v1 == v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0, 2, 3, 3, 2, 3))
self.assertTrue(v1 == v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4] = 4
v1[0] = 2
v1[3] = 1
self.assertTrue(feq(ds.DiceSimilarity(v1, v1), 1.0))
v1 = ds.IntSparseIntVect(5)
v1[0] = 2
v1[2] = 1
v1[3] = 4
v1[4] = 6
v2 = ds.IntSparseIntVect(5)
v2[1] = 2
v2[2] = 3
v2[3] = 4
v2[4] = 4
self.assertTrue(feq(ds.DiceSimilarity(v1, v2), 18.0 / 26.))
self.assertTrue(feq(ds.DiceSimilarity(v2, v1), 18.0 / 26.))
def test6BulkDice(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0], vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz = 10
nToSet = 5
nVs = 6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0, sz - 1)] = random.randint(1, 10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0], vs[x], .5, .5) for x in range(1, nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 0.5, 0.5)
diceDs = [ds.DiceSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i], bulkDs[i]))
self.assertTrue(feq(baseDs[i], diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0], vs[1:], 1.0, 1.0)
taniDs = [ds.TanimotoSimilarity(vs[0], vs[x]) for x in range(1, nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0], vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i], taniDs[i]))
if __name__ == '__main__':
unittest.main()
|
# Copyright 2009-2011 by Eric Talevich. All rights reserved.
# Revisions copyright 2009-2013 by Peter Cock. All rights reserved.
# Revisions copyright 2013 Lenna X. Peterson. All rights reserved.
# Revisions copyright 2013 Gokcen Eraslan. All rights reserved.
# Revisions copyright 2020 Joao Rodrigues. All rights reserved.
#
# Converted by Eric Talevich from an older unit test copyright 2002
# by Thomas Hamelryck.
#
# Merged related test files into one, by Joao Rodrigues (2020)
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Unit tests for the Bio.PDB.NACCESS submodule."""
import subprocess
import unittest
import warnings
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.PDB."
) from None
from Bio import MissingExternalDependencyError
from Bio.PDB import PDBParser
from Bio.PDB.NACCESS import NACCESS, process_asa_data, process_rsa_data
class NACCESS_test(unittest.TestCase):
"""Tests for Bio.PDB.NACCESS and output parsing."""
def test_NACCESS_rsa_file(self):
"""Test parsing of pregenerated rsa NACCESS file."""
with open("PDB/1A8O.rsa") as rsa:
naccess = process_rsa_data(rsa)
self.assertEqual(len(naccess), 66)
def test_NACCESS_asa_file(self):
"""Test parsing of pregenerated asa NACCESS file."""
with open("PDB/1A8O.asa") as asa:
naccess = process_asa_data(asa)
self.assertEqual(len(naccess), 524)
def test_NACCESS(self):
"""Test calling NACCESS from Bio.PDB."""
# Check if NACCESS is available
try:
subprocess.check_call(
["naccess", "-q"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
raise self.skipTest("Install naccess if you want to use it from Biopython.")
p = PDBParser()
pdbfile = "PDB/1A8O.pdb"
model = p.get_structure("1A8O", pdbfile)[0]
naccess = NACCESS(model, pdbfile)
self.assertEqual(len(naccess), 66)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
import json
import pytest
import os
import nomad
import uuid
import responses
import tests.common as common
def test_register_job(nomad_setup):
with open("example.json") as fh:
job = json.loads(fh.read())
nomad_setup.job.register_job("example", job)
assert "example" in nomad_setup.job
# integration tests requires nomad Vagrant VM or Binary running
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_get_deployment(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
assert isinstance(nomad_setup.deployment.get_deployment(deploymentID), dict)
assert deploymentID == nomad_setup.deployment.get_deployment(deploymentID)["ID"]
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_get_deployment_allocations(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
assert isinstance(nomad_setup.deployment.get_deployment_allocations(deploymentID), list)
assert isinstance(nomad_setup.deployment.get_deployment_allocations(deploymentID)[0], dict)
assert "example" == nomad_setup.deployment.get_deployment_allocations(deploymentID)[0]["JobID"]
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_fail_deployment(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
try:
nomad_setup.deployment.fail_deployment(deploymentID)
except nomad.api.exceptions.URLNotFoundNomadException as err:
assert err.nomad_resp.text == "can't fail terminal deployment"
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_pause_deployment(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
try:
nomad_setup.deployment.pause_deployment(deploymentID, False)
except nomad.api.exceptions.BaseNomadException as err:
assert err.nomad_resp.text == "can't resume terminal deployment"
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_promote_all_deployment(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
try:
nomad_setup.deployment.promote_deployment_all(deploymentID)
except nomad.api.exceptions.BaseNomadException as err:
assert err.nomad_resp.text == "can't promote terminal deployment"
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_promote_all_deployment(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
try:
nomad_setup.deployment.promote_deployment_groups(deploymentID)
except nomad.api.exceptions.BaseNomadException as err:
assert err.nomad_resp.text == "can't promote terminal deployment"
@pytest.mark.skipif(tuple(int(i) for i in os.environ.get("NOMAD_VERSION").split(".")) < (0, 6, 0), reason="Not supported in version")
def test_deployment_allocation_health(nomad_setup):
deploymentID = nomad_setup.deployments.get_deployments()[0]["ID"]
allocationID = nomad_setup.deployment.get_deployment(deploymentID)["ID"]
try:
nomad_setup.deployment.deployment_allocation_health(deploymentID, unhealthy_allocations=[allocationID])
except nomad.api.exceptions.BaseNomadException as err:
assert err.nomad_resp.text == "can't set health of allocations for a terminal deployment"
def test_dunder_getitem_exist(nomad_setup):
evalID = nomad_setup.job.get_allocations("example")[0]["EvalID"]
e = nomad_setup.evaluation[evalID]
assert isinstance(e, dict)
def test_dunder_getitem_not_exist(nomad_setup):
with pytest.raises(KeyError):
_ = nomad_setup.deployment[str(uuid.uuid4())]
def test_dunder_contain_exists(nomad_setup):
evalID = nomad_setup.job.get_allocations("example")[0]["EvalID"]
assert evalID in nomad_setup.evaluation
def test_dunder_contain_not_exist(nomad_setup):
assert str(uuid.uuid4()) not in nomad_setup.deployment
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.deployment), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.deployment), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
_ = nomad_setup.deployment.does_not_exist
@responses.activate
#
# fix No data when you are using namespaces #82
#
def test_get_deployment_with_namespace(nomad_setup_with_namespace):
responses.add(
responses.GET,
"http://{ip}:{port}/v1/deployment/a8198d79-cfdb-6593-a999-1e9adabcba2e?namespace={namespace}".format(ip=common.IP, port=common.NOMAD_PORT, namespace=common.NOMAD_NAMESPACE),
status=200,
json={"ID": "70638f62-5c19-193e-30d6-f9d6e689ab8e","JobID": "example", "JobVersion": 1, "JobModifyIndex": 17, "JobSpecModifyIndex": 17, "JobCreateIndex": 7,"Namespace": common.NOMAD_NAMESPACE, "Name": "example.cache[0]"}
)
assert common.NOMAD_NAMESPACE in nomad_setup_with_namespace.deployment.get_deployment("a8198d79-cfdb-6593-a999-1e9adabcba2e")["Namespace"]
|
import sys
from pathlib import Path
from typing import List
from pipx_release import copy_file_replace_line, python_mypy_ok
def fix_version_py(new_version: str) -> bool:
version_code_file = Path("src/pipx/version.py")
new_version_code_file = Path("src/pipx/version.py.new")
new_version_list = new_version.split(".")
copy_file_replace_line(
version_code_file,
new_version_code_file,
line_re=r"^\s*__version_info__\s*=",
new_line=f'__version_info__ = ({", ".join(new_version_list)})',
)
if python_mypy_ok(new_version_code_file):
new_version_code_file.rename(version_code_file)
return True
else:
print(f"Aborting: syntax error in {new_version_code_file}")
return False
def fix_changelog(new_version: str) -> bool:
changelog_file = Path("docs/changelog.md")
new_changelog_file = Path("docs/changelog.new")
copy_file_replace_line(
changelog_file, new_changelog_file, line_re=r"^\s*dev\s*$", new_line=new_version
)
new_changelog_file.rename(changelog_file)
return True
def pre_release(new_version: str) -> int:
if fix_version_py(new_version) and fix_changelog(new_version):
return 0
else:
return 1
def main(argv: List[str]) -> int:
if len(argv) > 1:
new_version = argv[1]
else:
new_version = input("Enter new version: ")
return pre_release(new_version)
if __name__ == "__main__":
try:
status = main(sys.argv)
except KeyboardInterrupt:
print("Stopped by Keyboard Interrupt", file=sys.stderr)
status = 130
sys.exit(status)
|
import logging
from selenium.webdriver.common.keys import Keys
logger = logging.getLogger(__name__)
class TextBoxActionsMixin:
def set_text(self, text, skip_if_none=True, blur_and_focus=False):
"""
clear the text field and type new text
:param text: text that should be set
:param skip_if_none: true - do nothing if text isn't specified, set text if it specified
false - set text if it specified, error if text isn't specified
"""
if text is None and skip_if_none:
return self
logger.info(f"Clear text field '{self.element.name}' and set text '{text}'")
self.element.clear()
self.element.send_keys(text)
if blur_and_focus:
self.blur_and_focus()
return self
def type_text(self, text, force_open_keyboard=False):
"""
type text into the text field
:param text: text that should be typed
:param force_open_keyboard: if True tap the field to open keyboard
"""
logger.info(f"Type text '{text}' into text field '{self.element.name}'")
if force_open_keyboard:
self.element.click()
self.element.send_keys(text)
return self
def clear_text(self):
"""
clear text in the text field
"""
self.element.clear()
return self
def assert_is_text_masked(self, is_masked=True):
"""
assert element text masked, test fails if expected state isn't equal to actual
:param is_masked: if true - should be masked, if false - not masked
"""
if is_masked:
expected_state = "true"
else:
expected_state = "false"
actual_state = self.element.get_attribute("password")
if actual_state == expected_state:
logger.info("Correct state of '{0}': masked={1}".format(
self.element.name, actual_state))
else:
logger.log_fail(
"Incorrect state of '{0}': masked={1}. Expected state: masked={2}".format(
self.element.name, actual_state, expected_state))
assert (actual_state == expected_state)
def blur_and_focus(self):
self.element.click()
self.element.send_keys(Keys.TAB)
self.element.click()
@property
def value(self):
return self.element.get_attribute('value')
|
# Generated by Django 3.1.7 on 2021-04-10 11:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spell', '0007_components'),
]
operations = [
migrations.AddField(
model_name='spell',
name='components',
field=models.ManyToManyField(blank=True, to='spell.Components'),
),
]
|
from framework.util.db import get_db_port
def main():
port = get_db_port()
print(port)
if __name__ == "__main__":
main()
|
"""Tests of easy.py
"""
import unittest
from reversi.board import Board
from reversi.strategies import Random, Greedy, Unselfish, SlowStarter
class TestEasy(unittest.TestCase):
"""easy
"""
def test_random(self):
random = Random()
board = Board()
legal_moves = board.get_legal_moves('black')
self.assertTrue(random.next_move('black', board) in legal_moves)
def test_greedy(self):
greedy = Greedy()
board = Board()
board.put_disc('black', 5, 4)
board.put_disc('white', 3, 5)
board.put_disc('black', 2, 4)
board.put_disc('white', 5, 3)
board.put_disc('black', 3, 6)
board.put_disc('white', 6, 4)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 6)
board.put_disc('black', 6, 3)
board.put_disc('white', 3, 7)
self.assertTrue(greedy.next_move('black', board) in [(7, 4), (1, 7)])
def test_unselfish(self):
unselfish = Unselfish()
board = Board()
board.put_disc('black', 5, 4)
board.put_disc('white', 3, 5)
board.put_disc('black', 2, 4)
board.put_disc('white', 5, 3)
board.put_disc('black', 3, 6)
board.put_disc('white', 6, 4)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 6)
board.put_disc('black', 6, 3)
board.put_disc('white', 3, 7)
self.assertTrue(unselfish.next_move('black', board) in [(7, 5), (4, 6)])
def test_slowstarter(self):
slowstarter = SlowStarter()
board = Board()
board.put_disc('black', 5, 4) # 5
board.put_disc('white', 3, 5) # 6
board.put_disc('black', 2, 4) # 7
board.put_disc('white', 5, 3) # 8
board.put_disc('black', 3, 6) # 9
# unselfish
self.assertTrue(slowstarter.next_move('white', board) in [(6, 4), (1, 5), (2, 5), (5, 5), (6, 5), (2, 6)])
board.put_disc('white', 6, 4) # 10
# greedy
self.assertTrue(slowstarter.next_move('black', board) in [(7, 4)])
|
#!/usr/bin/env python
import os
from UrlConfig import UrlConfig
HOST_NAME = '0.0.0.0'
PORT_NUMBER = 443 # This is the bind port
SYSTEM_PROFILER = "/in"
SYSTEM_PROFILER_REDIRECT = "https://linkedin.com"
POSHDIR = "/opt/PoshC2_Python/"
ROOTDIR = "/opt/PoshC2_Project/"
HostnameIP = "https://192.36.15.234"
DomainFrontHeader = "" # example df.azureedge.net
DefaultSleep = "5s"
Jitter = 0.20
KillDate = "08/06/2019"
UserAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko"
urlConfig = UrlConfig("%soldurls.txt" % POSHDIR) # Instantiate UrlConfig object - old urls using a list from a text file
#urlConfig = UrlConfig(wordList="%swordlist.txt" % POSHDIR) # Instantiate UrlConfig object - wordlist random url generator
QuickCommand = urlConfig.fetchQCUrl()
DownloadURI = urlConfig.fetchConnUrl()
Sounds = "No"
ServerPort = "443" # This the port the payload communicates with
NotificationsProjectName = "PoshC2"
EnableNotifications = "No"
DefaultMigrationProcess = "C:\\Windows\\system32\\netsh.exe" # Used in the PoshXX_migrate.exe payloads
# ClockworkSMS - https://www.clockworksms.com
APIKEY = ""
MobileNumber = '"07777777777","07777777777"'
# Pushover - https://pushover.net/
APIToken = ""
APIUser = ""
URLS = urlConfig.fetchUrls()
SocksURLS = urlConfig.fetchSocks()
Referrer = "" # optional
HTTPResponse = """<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL was not found on this server.</p>
<hr>
<address>Apache (Debian) Server</address>
</body></html>
"""
HTTPResponses = [
"STATUS 200",
"OK",
"<html><head></head><body>#RANDOMDATA#</body></html>",
"<html><body>#RANDOMDATA#</body></html>",
"""<?xml version="1.0" encoding="UTF-8"?>
<heading>#RANDOMDATA#</heading>
<body>#RANDOMDATA#</body>""",
"<html><head>#RANDOMDATA#</head><body><div>#RANDOMDATA#</div></body></html>"
]
ServerHeader = "Apache"
Insecure = "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}"
# DO NOT CHANGE #
FilesDirectory = "%sFiles%s" % (POSHDIR, os.sep)
PayloadsDirectory = "%spayloads%s" % (ROOTDIR, os.sep)
ModulesDirectory = "%sModules%s" % (POSHDIR, os.sep)
DownloadsDirectory = "%sdownloads%s" % (ROOTDIR, os.sep)
ReportsDirectory = "%sreports%s" % (ROOTDIR, os.sep)
Database = "%s%sPowershellC2.SQLite" % (ROOTDIR, os.sep)
# DO NOT CHANGE #
# These rules aren't needed as you'll find them auto-generated within the project folder now.
# checkout <project-name>/rewrite-rules.txt but left them here just in case.
|
__all__ = ["utils","sra","mgrast","imicrobe"]
import os, sys, argparse, warnings, shutil
import pandas as pd
from pathlib import Path
from grabseqslib.sra import process_sra, add_sra_subparser
from grabseqslib.imicrobe import process_imicrobe, add_imicrobe_subparser
from grabseqslib.mgrast import process_mgrast, add_mgrast_subparser
def main():
'''
Command-line argument-handling function
'''
# Set up parsers
parser = argparse.ArgumentParser(prog="grabseqs",
description='Download metagenomic sequences from public datasets.')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 0.7.0')
subpa = parser.add_subparsers(help='repositories available')
add_sra_subparser(subpa)
add_imicrobe_subparser(subpa)
add_mgrast_subparser(subpa)
args = parser.parse_args()
# Make output directories if they don't exist
try:
if args.outdir != "":
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
except AttributeError:
# No subcommand provided (all subcomands have `-o`)
print("Subcommand not specified, run `grabseqs -h` or `grabseqs {repository} -h` for help")
sys.exit(0)
# Figure out which subparser was called
try:
if args.rastid:
repo = "MG-RAST"
except AttributeError:
try:
if args.imicrobeid:
repo = "iMicrobe"
except AttributeError:
repo = "SRA"
# Check deps
zip_func = "gzip"
if shutil.which("pigz"):
zip_func = "pigz"
else:
print("pigz not found, using gzip")
metadata_agg = None
# Download samples
if repo == "SRA":
metadata_agg = process_sra(args, zip_func)
elif repo == "MG-RAST":
metadata_agg = process_mgrast(args, zip_func)
elif repo == "iMicrobe":
metadata_agg = process_imicrobe(args, zip_func)
# Handle metadata
if args.metadata != "":
md_path = Path(args.outdir) / Path(args.metadata)
if not os.path.isfile(md_path):
metadata_agg.to_csv(md_path, index = False)
print("Metadata saved to new file: " + str(md_path))
else:
metadata_i = pd.read_csv(md_path)
metadata_f = metadata_i.append(metadata_agg,sort=True)
metadata_f.to_csv(md_path, index = False)
print("Metadata appended to existing file: " + str(md_path))
|
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docstring ${module}
"""
# Futures
from __future__ import absolute_import, print_function
# Built-in modules
import copy
import itertools
import os
import types
from tempfile import SpooledTemporaryFile, mkstemp
from unittest import TestCase, main
# Third party modules
import six
from six.moves import range
# Own modules
import microprobe
from microprobe.target import import_definition
if six.PY2:
import subprocess32 as subprocess # @UnresolvedImport @UnusedImport
else:
import subprocess # @Reimport
# Constants
BASEPATH = os.path.join(os.path.dirname(microprobe.__file__), "..", "..")
MP_TESTING_ARCH = os.environ.get("MP_TESTING_ARCH", None)
def copy_func(f, name=None):
return types.FunctionType(f.__code__, copy.copy(f.__globals__),
name or f.__name__,
f.__defaults__, f.__closure__)
def variations(basestr, params):
"""
:param basestr:
:type basestr:
:param params:
:type params:
"""
tvariation = itertools.product(*params)
return [
basestr + " " + " ".join([elem2 for elem2 in elem if elem2 != ""])
for elem in tvariation
]
def subins(instructions):
"""
:param instructions:
:type instructions:
"""
if MP_TESTING_ARCH is not None:
return instructions
myins = []
for instr in instructions:
if instr.format not in [ins.format for ins in myins]:
myins.append(instr)
continue
# if str(instr.instruction_checks) not in [
# str(ins.instruction_checks) for ins in myins]:
# myins.append(instr)
# continue
# if str(instr.target_checks) not in [
# str(ins.target_checks) for ins in myins]:
# myins.append(instr)
# continue
# if str(instr.operands) not in [
# str(ins.operands) for ins in myins]:
# myins.append(instr)
# continue
return myins
# Classes
class epi(TestCase): # pylint: disable-msg=invalid-name
"""
epi test class
"""
_multiprocess_can_split_ = True
name = "mp_epi"
description = "mp_epi tool tests"
cmd = [os.path.join(BASEPATH, "targets", "generic", "tools", "mp_epi.py")]
target = os.path.join(BASEPATH, "targets")
trials = 3
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
tempfile = mkstemp(prefix="microprobe_%s_" % self.name)
os.close(tempfile[0])
os.unlink(tempfile[1])
self.filename = tempfile[1]
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
def wrapper(self, target, oformat, instr, extra=None):
"""
Common execution wrapper
"""
self.filename = "%s.%s" % (self.filename, oformat)
test_cmd = self.cmd[:]
test_cmd.extend(["-T", target])
test_cmd.extend(["-P", self.target])
test_cmd.extend(["-O", self.filename])
test_cmd.extend(["-ins", instr])
if extra is not None:
test_cmd.extend(extra.split(' '))
test_cmd = [elem for elem in test_cmd if elem != ""]
print(" ".join(test_cmd))
for trial in range(0, self.trials):
print("Trial %s" % trial)
tfile = SpooledTemporaryFile()
error_code = subprocess.call(
test_cmd,
stdout=tfile,
stderr=subprocess.STDOUT
)
if error_code == 0:
break
if error_code != 0:
tfile.seek(0)
print(tfile.read())
self.assertEqual(error_code, 0)
if oformat == "bin":
print("Checking BIN...")
test_cmd = [os.path.join(BASEPATH, "targets", "generic", "tools",
"mp_bin2objdump.py")]
test_cmd.extend(['-T', target])
test_cmd.extend(['-i', self.filename])
test_cmd.append("-S")
tfile = SpooledTemporaryFile()
error_code = subprocess.call(
test_cmd,
stdout=tfile,
stderr=subprocess.STDOUT
)
if error_code != 0:
tfile.seek(0)
print(tfile.read())
self.assertEqual(error_code, 0)
TEST_TARGETS = []
if MP_TESTING_ARCH is None:
_PARAM1 = ['']
_PARAM2 = ['']
_PARAM3 = ['-B 10']
TEST_TARGETS.append(("riscv_v22-riscv_generic-riscv64_linux_gcc",
"c",
["C.FSDSP_V0", "C.JALR_V0", "C.LDSP_V0",
"C.LWSP_V0", "C.LW_V0", "C.SWSP_V0",
"C.SDSP_V0", "JALR_V0"]))
TEST_TARGETS.append(("riscv_v22-riscv_generic-riscv64_test_p",
"S",
["C.FSDSP_V0", "C.JALR_V0", "C.LDSP_V0",
"C.LWSP_V0", "C.LW_V0", "C.SWSP_V0",
"C.SDSP_V0", "JALR_V0"]))
else:
_PARAM1 = ['', '-dd 1', '-dd 5.5']
_PARAM2 = ['', '-R']
_PARAM3 = ['', '-B 10']
if MP_TESTING_ARCH is "RISCV":
TEST_TARGETS.append(("riscv_v22-riscv_generic-riscv64_linux_gcc",
"c",
["C.FSDSP_V0", "C.JALR_V0", "C.LDSP_V0",
"C.LWSP_V0", "C.LW_V0", "C.SWSP_V0",
"C.SDSP_V0", "JALR_V0"]))
TEST_TARGETS.append(("riscv_v22-riscv_generic-riscv64_test_p",
"S",
["C.FSDSP_V0", "C.JALR_V0", "C.LDSP_V0",
"C.LWSP_V0", "C.LW_V0", "C.SWSP_V0",
"C.SDSP_V0", "JALR_V0"]))
TEST_FLAGS = []
TEST_FLAGS.extend(
variations("", [_PARAM1, _PARAM2, _PARAM3])
)
_TEST_NUMBER = 1
for _TEST_TARGET in TEST_TARGETS:
_TARGET = import_definition(_TEST_TARGET[0])
for _TEST_INSTR in [
my_instr.name for my_instr in subins(
list(_TARGET.isa.instructions.values()))]:
if _TEST_INSTR in _TEST_TARGET[2]:
continue
for _TEST_FLAG in TEST_FLAGS:
def test_function(self):
""" test_function """
self.wrapper(
_TEST_TARGET[0],
_TEST_TARGET[1],
_TEST_INSTR,
extra=_TEST_FLAG)
func_name = "test_%s_%03d" % (
_TEST_INSTR.replace(".", "x"), _TEST_NUMBER)
func_doc = "epi_test_%s_%03d on %s flags: %s" % (
_TEST_INSTR.replace(".", "x"), _TEST_NUMBER, _TEST_TARGET[0],
_TEST_FLAG)
setattr(epi, func_name, copy_func(test_function, func_name))
if six.PY2:
mfunc = getattr(getattr(epi, func_name), "__func__")
else:
mfunc = getattr(epi, func_name)
setattr(mfunc, "__doc__", func_doc)
mfunc.__name__ = func_name
globals().pop("mfunc")
globals().pop("test_function")
_TEST_NUMBER += 1
TEST_CLASSES = [epi]
if __name__ == '__main__':
main()
|
#! python3
#stopwatch.py - a simple stopwatch program
import time
#display instructions
print('Press ENTER to begin. Afterwards, press ENTER to "click" the stopwatch Press CTRL-C to quit.')
input() #press enter to begin
print('Started.')
startTime = time.time() #grabs the first 'lap's' startime
lastTime = startTime
lapNum = 1
#TODO Start tracking laptimes
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print('Lap #%s: %s (%s)' % (lapNum, totalTime, lapTime), end='')
lapNum += 1
lastTime = time.time() # resets the last laptime
except KeyboardInterrupt:
#Handle the ctrl-c exception to keep its error message from displaying.
print('\nDone!') |
import sys
import numpy as np
import configReader
import scipy.stats as stats
import os, shutil
def initializeIndividual(n, allzero=False, sigma=0.0, bounds=(0,1)):
"""
Initialize the single-population EA. If the allzero flag is on, then
the individual is initialized at the 0^n position. If sigma is 0, then
it is assumed the individual is a binary string. Otherwise, it is assumed
the individual is a real valued vector. When not all zeros, binary strings
are initialized uniformly at random from {0,1}^n, and real vectors are
initialized from [0,1]^n uniformly at random.
"""
# First, setup an all-zero vector
x = np.array([0]*n)
# If not allzero and we're binary, init in {0,1}^n i.i.d.
if (not allzero and sigma <= 0.0):
x = np.random.random_integers(low=0, high=1, size=n)
# If not allzero and we're numeric, init in [lp,ub]^n
elif (not allzero and sigma > 0.0):
x = np.random.uniform(low=bounds[0], high=bounds[1], size=n)
# Return the initialized vector
return (x)
def isInsideEuclideanBoundary(x, low=0, high=1):
"""
Determine whether the selected point is inside or outside
the square boundary given. By default, this boundary is
the unit square.
"""
inside = True
for xitem in x:
if (xitem < low) or (xitem > high):
inside = False
return (inside)
def inSpecialArea(parent, criteria, variance):
"""
Is the parent inside a special criteria boundary.
"""
special=False
if (getDistance(parent, np.array([criteria] * len(parent))) < variance) or\
(parent[1] < 0) or (parent[2] < 0) or (parent[0] < 0):
special=True
return (special)
def mutateIndividual(x, pm, sigma=0.0, bound=[0,1], useEscapeSphere=False):
"""
Mutate the individual, where pm is the probability of mutating for
binary representation (ignored for numeric), and sigma is the Gaussian
spread for each real-valued gene. If sigma is 0 or lower, we assume
a binary representation. If the bound variable is set, repeat
mutations until child is inside the bound. To turn this off,
set bound=None.
"""
# Get the length and initialize a child vector
n = len(x)
child = np.array([0]*n)
# If we're using a binary representation, then flip bits
# at random according to pm, i.i.d.
if (sigma <= 0.0):
mask = np.random.choice([0, 1], size=n, p=[1-pm,pm])
y = x.copy()
child = y ^ mask
# Otherwise, jitter the gene by sigma according to
# a normal distribution, i.i.d. for each gene. If there
# is a bound, then repeat mutation until a valid child
# is produced.
else:
inside = False
stuckCount = 0
while (not inside):
# Check if we're stuck
if stuckCount > 50:
#raise Exception("The re-mutate loop is stuck ... should not take this many iterations to find a good point.")
inside=True
stuckCount += 1
# Generate a mutation
offsets = np.random.normal(scale=sigma, size=n)
child = x + offsets
# Check if the mutation is inside the bounds
if (bound==None):
inside=True
elif (useEscapeSphere) and (inSpecialArea(x, 0.25, .2)):
inside=True
else:
inside = isInsideEuclideanBoundary(child, bound[0], bound[1])
# Return the child
return (child)
def isBinary(x):
"""
Determine if the array is made up of just 0's and 1's.
"""
binary = True
for arg in x:
if (arg > 0) and (arg < 1):
binary = False
return (binary)
def getDistance(x1, x2):
"""
Return the distance betweeen two points. Use L2 norm (Euclidean) distance
for real values and Hamming for binary spaces.
"""
distance = sum( (x1-x2)**2 )
# If this is a real-value, use L2, otherwise using Hamming distance
if not (isBinary(x1) and isBinary(x2)):
distance = np.sqrt(distance)
return (distance)
def computeSparseness(x, archive, k):
"""
Give a potential candidate for the archive, the archive, and k,
estimate the sparseness contribution of x. This is the
average distances over the k closest neighbors in the archive.
"""
# Compute the distance to x from every point in the archive
# and put them in a vector.
distances = []
for xi in archive:
distances.append( getDistance(x,xi) )
# Arrange things so that we can get either
# the k closest values or the size of the
# sarchive, whichever is smaller
numElements = min(len(distances), k)
distances.sort()
# Estimate sparseness as the average over that subset, then return
sparseness = float(sum(distances[0:(numElements+1)]))/float(numElements)
return (sparseness)
def getPairwiseSparsenessMetrics(archive, k, n):
"""
Get minimum archive distances according to the sparseness metric,
as well as minimum distance to 1^n.
"""
# Setup the arbitrary "piton" measure (the all 1 binary string)
minDistTo1N = n
maxDistInArchive = 0
OneN = np.array([1.0]*n)
# Initialize the distances
distances = []
# Compute pair-wise distances of all points in the archive.
# Also, keep track of the distance to the all 1 string, 1^n.
for idx in range(len(archive)):
minDistTo1N = min( minDistTo1N, getDistance(archive[idx], OneN) )
tmpDistances = []
for jdx in range(len(archive)):
if (not idx == jdx):
xi = archive[idx]
xj = archive[jdx]
dist = getDistance(xi,xj)
tmpDistances.append( dist )
maxDistInArchive = max( [maxDistInArchive, dist] )
# Use these pairwise distances to estimate the sparseness of
# each point in the archive to the rest of the archive
# (excluding itself).
numElements = min(len(tmpDistances), k)
if numElements > 0:
tmpDistances.sort()
dist = float(sum(tmpDistances[0:(numElements+1)]))/float(numElements)
distances.append( dist)
# Return the sparseness distances for all points in the archive, as well
# as the minimum distances of any point to the "piton" point at the all 1
# string.
return (distances, minDistTo1N, maxDistInArchive)
def estimatePackingEpsilon(archive, sampleSize, maxPacking=sys.float_info.max):
"""
Compute the best epsilon estimate for this archive's epsilon-packing.
The result is the maximum distance between any two points in the archive
divided by 2. See epsilon-Packing definition in KNN literature.
--> Larger epsilon mean that the archive is more efficiently spread out
"""
archiveSize = len(archive)
sampleSize = min(sampleSize, archiveSize-1)
sampleDistances = [0]#[maxPacking]
# Compute epsilon metrics for every point in the archive
for idx in range(archiveSize):
# Shuffle all the indexes to other points in
# the archive, except this point
shuffledIndexes = range(archiveSize)
shuffledIndexes.remove(idx)
np.random.shuffle(shuffledIndexes)
# Compute all distances from idx to sampled
# points from the archive
for jdx in shuffledIndexes[0:sampleSize]:
if (not idx == jdx):
xi = archive[idx]
xj = archive[jdx]
sampleDistances.append( getDistance(xi,xj) )
# Return the estimation metrics for the epsilon-Packing
# Subset Y of U is a eps-Packing iff D(x,y) > 2eps for all x,y \in Y
return (max(sampleDistances)/2)
def estimateCoverEpsilon(archive, sampleSize, n, sigma=0.0, bounds=(0,1)):
"""
Compute the best epsilon estimate for this archive's epsilon-cover.
We do this by sampling the whole space and finding the the point
with the smallest distance to that point to *any* point in the archive.
Our estimate is the maximum of all such distances. We also include the
1^n string as one of those points as a kind of "piton" measure since our
algorithms tend to start at 0^n. See epsilon-cover definition in KNN
literature.
--> Smaller epsilon means fewer points are "close" to whole space
"""
archiveSize = len(archive)
sampleSize = min(sampleSize, 2**n)
sampleDistances = []
xi, xj, xk = (None, None, None)
for sampleIdx in range(sampleSize):
# Sample a point from the space
xj = initializeIndividual(n, False, sigma, bounds)
# Find the closest point in archive to a random point
archiveDistances = []
for idx in range(archiveSize):
xi = archive[idx]
archiveDistances.append( getDistance(xi,xj) )
sampleDistances.append( min(archiveDistances) )
# Find the closest points in archive to 1^n
xk = np.array([1]*len(xi))
archiveDistances = []
for idx in range(archiveSize):
xi = archive[idx]
archiveDistances.append( getDistance(xi,xk) )
sampleDistances.append( min(archiveDistances) )
# Our epsilon estimate is the *maximum* of those closest points
epsilon = 0.0
if (len(sampleDistances) > 1):
epsilon = max(sampleDistances)
# Return the estimation metrics for the epsilon-Packing
# Subet Y of U is an eps-cover if for every x \in U,
# there is some y \in Y where D(x,y) < eps
return (epsilon)
#################################################################
# Example: There exists a 2-net for U={0,1}^4 with archive of #
# size 6. #
# #
# A = {0000, 0011, 1100, 0110, 1001, 1111} #
# |A| = 6 #
# #
# 1) No point in {0,1}^4 is more than 2 away from some point #
# in A. So the archive is a 2-Cover #
# #
# 2) The largest distance between any two points in A is 4, #
# and 4/2 = 2. So the archive is a 2-Packing. #
# #
# Therefore this archive is an 2-*optimal* archive. #
#################################################################
def archiveReportHeader():
print "XX: Trial \t Generation \t CoverEpsilon \t PackingEpsilon \t MinArchiveSparseness \t ArchiveSize"
def archiveReport(archive, n, gen, trial, sampleSize, sigma, k, bounds):
"""
Print output for the trial and various epsilon metrics
"""
maxPackingDistance = getDistance(np.array([0]*n), np.array([1]*n))
packingEps = estimatePackingEpsilon(archive, sampleSize, maxPackingDistance)
coverEps = estimateCoverEpsilon(archive, sampleSize, n, sigma)
maxDistInArchive = 0
minSparse = -1
if (len(archive) > 1):
sparsenessVals, minAllOne, maxDistInArchive = getPairwiseSparsenessMetrics(archive, k, n)
minSparse = min(sparsenessVals)
print "XX:", int(trial), '\t', int(gen), '\t',\
coverEps, '\t', packingEps, '\t', \
minSparse, '\t', \
len(archive)
# Flush standard out so we see the output in a timely fashion
sys.stdout.flush()
def clearVisualizationDir(vizDirName):
"""
Wipe out the directory we're writing for the visualization.
"""
try:
shutil.rmtree(vizDirName)
except:
print "Could not remove directory:", vizDirName
try:
os.mkdir(vizDirName)
except:
print "Could not make directory:", vizDirName
def writeVisualizationFile(vizDirName, gen, archive):
"""
Write a file for reading and visualizing in Paraview
"""
filename = "archive" + str(gen) + ".csv"
dirname = vizDirName.strip()
fullPathFilename = os.path.join(dirname, filename)
f = open(fullPathFilename, "w")
f.write("x, y, z, idx\n")
for idx in range(len(archive)):
lineStr = ""
for arg in archive[idx]:
lineStr += str(arg) + ", "
lineStr += str(idx) + '\n'
f.write(lineStr)
f.close()
def isAlreadyInArchive(archive, x):
"""
Check to see if the candidate is already in the archive.
"""
already = False
for xi in archive:
if tuple(xi) == tuple(x):
already=True
return (already)
def isArchiveOutOfBounds(archive, bounds):
"""
Check to see if there are any points n the archive that are
outside the specified bounds.
"""
outOfBounds = False
for x in archive:
for arg in x:
if (arg > max(bounds)):
outOfBounds = True
elif (arg < min(bounds)):
outOfBounds = True
return(outOfBounds)
def writeArchive(archive, archiveFilename):
"""
Write the archive out to the specified file.
"""
archiveStrings = []
for x in archive:
outStr = ''
for idx in range(len(x)-1):
outStr += str(x[idx]) + ','
outStr += str(x[-1]) + '\n'
archiveStrings.append( outStr )
f = open(archiveFilename, 'w')
f.writelines(archiveStrings)
f.close()
def unitTest():
print("Testing the snseaBase sparseness calculuations...")
# Create a random archive of four individuals
archive = []
for idx in range(4):
archive.append( np.random.uniform(low=0.0, high=1.0, size=2) )
# Create an individual to which to compare
y = np.array([0.0, 0.0])
py = computeSparseness(y, archive, 3)
# Show the archive
print "Archive:"
for x in archive:
print " ", x
print
print "y =", y
print "Sparseness =", py
print
if __name__ == '__main__':
unitTest()
|
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import ctypes
import math
import numpy
import os
import PIL.Image
import six
import threading
from functools import partial
from xml.etree import ElementTree
from large_image import config
from large_image.cache_util import LRUCache, strhash, methodcache
from large_image.tilesource import etreeToDict
try:
from libtiff import libtiff_ctypes
except ValueError as exc:
# If the python libtiff module doesn't contain a pregenerated module for
# the appropriate version of libtiff, it tries to generate a module from
# the libtiff header file. If it can't find this file (possibly because it
# is in a virtual environment), it raises a ValueError instead of an
# ImportError. We convert this to an ImportError, so that we will print a
# more lucid error message and just fail to load this one tile source
# instead of failing to load the whole plugin.
config.getConfig('logger').warn(
'Failed to import libtiff; try upgrading the python module (%s)' % exc)
raise ImportError(str(exc))
# This suppress warnings about unknown tags
libtiff_ctypes.suppress_warnings()
def patchLibtiff():
libtiff_ctypes.libtiff.TIFFFieldWithTag.restype = \
ctypes.POINTER(libtiff_ctypes.TIFFFieldInfo)
libtiff_ctypes.libtiff.TIFFFieldWithTag.argtypes = \
(libtiff_ctypes.TIFF, libtiff_ctypes.c_ttag_t)
# BigTIFF 64-bit unsigned integer
libtiff_ctypes.TIFFDataType.TIFF_LONG8 = 16
# BigTIFF 64-bit signed integer
libtiff_ctypes.TIFFDataType.TIFF_SLONG8 = 17
# BigTIFF 64-bit unsigned integer (offset)
libtiff_ctypes.TIFFDataType.TIFF_IFD8 = 18
patchLibtiff()
class TiffException(Exception):
pass
class InvalidOperationTiffException(TiffException):
"""
An exception caused by the user making an invalid request of a TIFF file.
"""
pass
class IOTiffException(TiffException):
"""
An exception caused by an internal failure, due to an invalid file or other
error.
"""
pass
class ValidationTiffException(TiffException):
"""
An exception caused by the TIFF reader not being able to support a given
file.
"""
pass
class TiledTiffDirectory(object):
CoreFunctions = [
'SetDirectory', 'SetSubDirectory', 'GetField',
'LastDirectory', 'GetMode', 'IsTiled', 'IsByteSwapped', 'IsUpSampled',
'IsMSB2LSB', 'NumberOfStrips',
]
def __init__(self, filePath, directoryNum, mustBeTiled=True, subDirectoryNum=0, validate=True):
"""
Create a new reader for a tiled image file directory in a TIFF file.
:param filePath: A path to a TIFF file on disk.
:type filePath: str
:param directoryNum: The number of the TIFF image file directory to
open.
:type directoryNum: int
:param mustBeTiled: if True, only tiled images validate. If False,
only non-tiled images validate. None validates both.
:type mustBeTiled: bool
:param subDirectoryNum: if set, the number of the TIFF subdirectory.
:type subDirectoryNum: int
:param validate: if False, don't validate that images can be read.
:type mustBeTiled: bool
:raises: InvalidOperationTiffException or IOTiffException or
ValidationTiffException
"""
# create local cache to store Jpeg tables and getTileByteCountsType
self.cache = LRUCache(10)
self._mustBeTiled = mustBeTiled
self._tiffFile = None
self._tileLock = threading.RLock()
self._open(filePath, directoryNum, subDirectoryNum)
self._loadMetadata()
config.getConfig('logger').debug(
'TiffDirectory %d:%d Information %r',
directoryNum, subDirectoryNum or 0, self._tiffInfo)
try:
if validate:
self._validate()
except ValidationTiffException:
self._close()
raise
def __del__(self):
self._close()
def _open(self, filePath, directoryNum, subDirectoryNum=0):
"""
Open a TIFF file to a given file and IFD number.
:param filePath: A path to a TIFF file on disk.
:type filePath: str
:param directoryNum: The number of the TIFF IFD to be used.
:type directoryNum: int
:param subDirectoryNum: The number of the TIFF sub-IFD to be used.
:type subDirectoryNum: int
:raises: InvalidOperationTiffException or IOTiffException
"""
self._close()
if not os.path.isfile(filePath):
raise InvalidOperationTiffException(
'TIFF file does not exist: %s' % filePath)
try:
bytePath = filePath
if not isinstance(bytePath, six.binary_type):
bytePath = filePath.encode('utf8')
self._tiffFile = libtiff_ctypes.TIFF.open(bytePath)
except TypeError:
raise IOTiffException(
'Could not open TIFF file: %s' % filePath)
# pylibtiff changed the case of some functions between version 0.4 and
# the version that supports libtiff 4.0.6. To support both, ensure
# that the cased functions exist.
for func in self.CoreFunctions:
if (not hasattr(self._tiffFile, func) and
hasattr(self._tiffFile, func.lower())):
setattr(self._tiffFile, func, getattr(
self._tiffFile, func.lower()))
self._setDirectory(directoryNum, subDirectoryNum)
def _setDirectory(self, directoryNum, subDirectoryNum=0):
self._directoryNum = directoryNum
if self._tiffFile.SetDirectory(self._directoryNum) != 1:
self._tiffFile.close()
raise IOTiffException(
'Could not set TIFF directory to %d' % directoryNum)
self._subDirectoryNum = subDirectoryNum
if self._subDirectoryNum:
subifds = self._tiffFile.GetField('subifd')
if (subifds is None or self._subDirectoryNum < 1 or
self._subDirectoryNum > len(subifds)):
raise IOTiffException(
'Could not set TIFF subdirectory to %d' % subDirectoryNum)
subifd = subifds[self._subDirectoryNum - 1]
if self._tiffFile.SetSubDirectory(subifd) != 1:
self._tiffFile.close()
raise IOTiffException(
'Could not set TIFF subdirectory to %d' % subDirectoryNum)
def _close(self):
if self._tiffFile:
self._tiffFile.close()
self._tiffFile = None
def _validate(self): # noqa
"""
Validate that this TIFF file and directory are suitable for reading.
:raises: ValidationTiffException
"""
if not self._mustBeTiled:
if self._mustBeTiled is not None and self._tiffInfo.get('istiled'):
raise ValidationTiffException('Expected a non-tiled TIFF file')
# For any non-supported file, we probably can add a conversion task in
# the create_image.py script, such as flatten or colourspace. These
# should only be done if necessary, which would require the conversion
# job to check output and perform subsequent processing as needed.
if (not self._tiffInfo.get('samplesperpixel') or
(self._tiffInfo.get('samplesperpixel') != 1 and
self._tiffInfo.get('samplesperpixel') < 3)):
raise ValidationTiffException(
'Only RGB and greyscale TIFF files are supported')
if self._tiffInfo.get('bitspersample') not in (8, 16):
raise ValidationTiffException(
'Only 8 and 16 bits-per-sample TIFF files are supported')
if self._tiffInfo.get('sampleformat') not in {
None, # default is still SAMPLEFORMAT_UINT
libtiff_ctypes.SAMPLEFORMAT_UINT}:
raise ValidationTiffException(
'Only unsigned int sampled TIFF files are supported')
if (self._tiffInfo.get('planarconfig') != libtiff_ctypes.PLANARCONFIG_CONTIG and
self._tiffInfo.get('photometric') not in {
libtiff_ctypes.PHOTOMETRIC_MINISBLACK}):
raise ValidationTiffException(
'Only contiguous planar configuration TIFF files are supported')
if self._tiffInfo.get('photometric') not in {
libtiff_ctypes.PHOTOMETRIC_MINISBLACK,
libtiff_ctypes.PHOTOMETRIC_RGB,
libtiff_ctypes.PHOTOMETRIC_YCBCR}:
raise ValidationTiffException(
'Only greyscale (black is 0), RGB, and YCbCr photometric '
'interpretation TIFF files are supported')
if self._tiffInfo.get('orientation') not in {
libtiff_ctypes.ORIENTATION_TOPLEFT,
libtiff_ctypes.ORIENTATION_TOPRIGHT,
libtiff_ctypes.ORIENTATION_BOTRIGHT,
libtiff_ctypes.ORIENTATION_BOTLEFT,
libtiff_ctypes.ORIENTATION_LEFTTOP,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT,
None}:
raise ValidationTiffException(
'Unsupported TIFF orientation')
if self._mustBeTiled and (
not self._tiffInfo.get('istiled') or
not self._tiffInfo.get('tilewidth') or
not self._tiffInfo.get('tilelength')):
raise ValidationTiffException('A tiled TIFF is required.')
if self._mustBeTiled is False and (
self._tiffInfo.get('istiled') or
not self._tiffInfo.get('rowsperstrip')):
raise ValidationTiffException('A non-tiled TIFF with strips is required.')
if (self._tiffInfo.get('compression') == libtiff_ctypes.COMPRESSION_JPEG and
self._tiffInfo.get('jpegtablesmode') !=
libtiff_ctypes.JPEGTABLESMODE_QUANT |
libtiff_ctypes.JPEGTABLESMODE_HUFF):
raise ValidationTiffException(
'Only TIFF files with separate Huffman and quantization '
'tables are supported')
if self._tiffInfo.get('compression') == libtiff_ctypes.COMPRESSION_JPEG:
try:
self._getJpegTables()
except IOTiffException:
self._completeJpeg = True
def _loadMetadata(self):
fields = [key.split('_', 1)[1].lower() for key in
dir(libtiff_ctypes.tiff_h) if key.startswith('TIFFTAG_')]
info = {}
for field in fields:
try:
value = self._tiffFile.GetField(field)
if value is not None:
info[field] = value
except TypeError as err:
config.getConfig('logger').debug(
'Loading field "%s" in directory number %d resulted in TypeError - "%s"',
field, self._directoryNum, err)
for func in self.CoreFunctions[3:]:
if hasattr(self._tiffFile, func):
value = getattr(self._tiffFile, func)()
if value:
info[func.lower()] = value
self._tiffInfo = info
self._tileWidth = info.get('tilewidth') or info.get('imagewidth')
self._tileHeight = info.get('tilelength') or info.get('rowsperstrip')
self._imageWidth = info.get('imagewidth')
self._imageHeight = info.get('imagelength')
if not info.get('tilelength'):
self._stripsPerTile = int(max(1, math.ceil(256.0 / self._tileHeight)))
self._stripHeight = self._tileHeight
self._tileHeight = self._stripHeight * self._stripsPerTile
self._stripCount = int(math.ceil(float(self._imageHeight) / self._stripHeight))
if info.get('orientation') in {
libtiff_ctypes.ORIENTATION_LEFTTOP,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
self._imageWidth, self._imageHeight = self._imageHeight, self._imageWidth
self._tileWidth, self._tileHeight = self._tileHeight, self._tileWidth
self.parse_image_description(info.get('imagedescription', ''))
# From TIFF specification, tag 0x128, 2 is inches, 3 is centimeters.
units = {2: 25.4, 3: 10}
# If the resolution value is less than a threshold (100), don't use it,
# as it is probably just an inaccurate default. Values like 72dpi and
# 96dpi are common defaults, but so are small metric values, too.
if (not self._pixelInfo.get('mm_x') and info.get('xresolution') and
units.get(info.get('resolutionunit')) and
info.get('xresolution') >= 100):
self._pixelInfo['mm_x'] = units[info['resolutionunit']] / info['xresolution']
if (not self._pixelInfo.get('mm_y') and info.get('yresolution') and
units.get(info.get('resolutionunit')) and
info.get('yresolution') >= 100):
self._pixelInfo['mm_y'] = units[info['resolutionunit']] / info['yresolution']
if not self._pixelInfo.get('width') and self._imageWidth:
self._pixelInfo['width'] = self._imageWidth
if not self._pixelInfo.get('height') and self._imageHeight:
self._pixelInfo['height'] = self._imageHeight
@methodcache(key=partial(strhash, '_getJpegTables'))
def _getJpegTables(self):
"""
Get the common JPEG Huffman-coding and quantization tables.
See http://www.awaresystems.be/imaging/tiff/tifftags/jpegtables.html
for more information.
:return: All Huffman and quantization tables, with JPEG table start
markers.
:rtype: bytes
:raises: Exception
"""
# TIFFTAG_JPEGTABLES uses (uint32*, void**) output arguments
# http://www.remotesensing.org/libtiff/man/TIFFGetField.3tiff.html
tableSize = ctypes.c_uint32()
tableBuffer = ctypes.c_voidp()
# Some versions of pylibtiff set an explicit list of argtypes for
# TIFFGetField. When this is done, we need to adjust them to match
# what is needed for our specific call. Other versions do not set
# argtypes, allowing any types to be passed without validation, in
# which case we do not need to alter the list.
if libtiff_ctypes.libtiff.TIFFGetField.argtypes:
libtiff_ctypes.libtiff.TIFFGetField.argtypes = \
libtiff_ctypes.libtiff.TIFFGetField.argtypes[:2] + \
[ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_void_p)]
if libtiff_ctypes.libtiff.TIFFGetField(
self._tiffFile,
libtiff_ctypes.TIFFTAG_JPEGTABLES,
ctypes.byref(tableSize),
ctypes.byref(tableBuffer)) != 1:
raise IOTiffException('Could not get JPEG Huffman / quantization tables')
tableSize = tableSize.value
tableBuffer = ctypes.cast(tableBuffer, ctypes.POINTER(ctypes.c_char))
if tableBuffer[:2] != b'\xff\xd8':
raise IOTiffException(
'Missing JPEG Start Of Image marker in tables')
if tableBuffer[tableSize - 2:tableSize] != b'\xff\xd9':
raise IOTiffException('Missing JPEG End Of Image marker in tables')
if tableBuffer[2:4] not in (b'\xff\xc4', b'\xff\xdb'):
raise IOTiffException(
'Missing JPEG Huffman or Quantization Table marker')
# Strip the Start / End Of Image markers
tableData = tableBuffer[2:tableSize - 2]
return tableData
def _toTileNum(self, x, y, transpose=False):
"""
Get the internal tile number of a tile, from its row and column index.
:param x: The column index of the desired tile.
:type x: int
:param y: The row index of the desired tile.
:type y: int
:param transpose: If true, transpose width and height
:type tranpose: boolean
:return: The internal tile number of the desired tile.
:rtype int
:raises: InvalidOperationTiffException
"""
# TIFFCheckTile and TIFFComputeTile require pixel coordinates
if not transpose:
pixelX = int(x * self._tileWidth)
pixelY = int(y * self._tileHeight)
if x < 0 or y < 0 or pixelX >= self._imageWidth or pixelY >= self._imageHeight:
raise InvalidOperationTiffException(
'Tile x=%d, y=%d does not exist' % (x, y))
else:
pixelX = int(x * self._tileHeight)
pixelY = int(y * self._tileWidth)
if x < 0 or y < 0 or pixelX >= self._imageHeight or pixelY >= self._imageWidth:
raise InvalidOperationTiffException(
'Tile x=%d, y=%d does not exist' % (x, y))
# We had been using TIFFCheckTile, but with z=0 and sample=0, this is
# just a check that x, y is within the image
# if libtiff_ctypes.libtiff.TIFFCheckTile(
# self._tiffFile, pixelX, pixelY, 0, 0) == 0:
# raise InvalidOperationTiffException(
# 'Tile x=%d, y=%d does not exist' % (x, y))
if self._tiffInfo.get('istiled'):
tileNum = libtiff_ctypes.libtiff.TIFFComputeTile(
self._tiffFile, pixelX, pixelY, 0, 0).value
else:
# TIFFComputeStrip with sample=0 is just the row divided by the
# strip height
tileNum = int(pixelY // self._stripHeight)
return tileNum
@methodcache(key=partial(strhash, '_getTileByteCountsType'))
def _getTileByteCountsType(self):
"""
Get data type of the elements in the TIFFTAG_TILEBYTECOUNTS array.
:return: The element type in TIFFTAG_TILEBYTECOUNTS.
:rtype: ctypes.c_uint64 or ctypes.c_uint16
:raises: IOTiffException
"""
tileByteCountsFieldInfo = libtiff_ctypes.libtiff.TIFFFieldWithTag(
self._tiffFile, libtiff_ctypes.TIFFTAG_TILEBYTECOUNTS).contents
tileByteCountsLibtiffType = tileByteCountsFieldInfo.field_type
if tileByteCountsLibtiffType == libtiff_ctypes.TIFFDataType.TIFF_LONG8:
return ctypes.c_uint64
elif tileByteCountsLibtiffType == \
libtiff_ctypes.TIFFDataType.TIFF_SHORT:
return ctypes.c_uint16
else:
raise IOTiffException(
'Invalid type for TIFFTAG_TILEBYTECOUNTS: %s' % tileByteCountsLibtiffType)
def _getJpegFrameSize(self, tileNum):
"""
Get the file size in bytes of the raw encoded JPEG frame for a tile.
:param tileNum: The internal tile number of the desired tile.
:type tileNum: int
:return: The size in bytes of the raw tile data for the desired tile.
:rtype: int
:raises: InvalidOperationTiffException or IOTiffException
"""
# TODO: is it worth it to memoize this?
# TODO: remove this check, for additional speed
totalTileCount = libtiff_ctypes.libtiff.TIFFNumberOfTiles(
self._tiffFile).value
if tileNum >= totalTileCount:
raise InvalidOperationTiffException('Tile number out of range')
# pylibtiff treats the output of TIFFTAG_TILEBYTECOUNTS as a scalar
# uint32; libtiff's documentation specifies that the output will be an
# array of uint32; in reality and per the TIFF spec, the output is an
# array of either uint64 or unit16, so we need to call the ctypes
# interface directly to get this tag
# http://www.awaresystems.be/imaging/tiff/tifftags/tilebytecounts.html
rawTileSizesType = self._getTileByteCountsType()
rawTileSizes = ctypes.POINTER(rawTileSizesType)()
# Some versions of pylibtiff set an explicit list of argtypes for
# TIFFGetField. When this is done, we need to adjust them to match
# what is needed for our specific call. Other versions do not set
# argtypes, allowing any types to be passed without validation, in
# which case we do not need to alter the list.
if libtiff_ctypes.libtiff.TIFFGetField.argtypes:
libtiff_ctypes.libtiff.TIFFGetField.argtypes = \
libtiff_ctypes.libtiff.TIFFGetField.argtypes[:2] + \
[ctypes.POINTER(ctypes.POINTER(rawTileSizesType))]
if libtiff_ctypes.libtiff.TIFFGetField(
self._tiffFile,
libtiff_ctypes.TIFFTAG_TILEBYTECOUNTS,
ctypes.byref(rawTileSizes)) != 1:
raise IOTiffException('Could not get raw tile size')
# In practice, this will never overflow, and it's simpler to convert the
# long to an int
return int(rawTileSizes[tileNum])
def _getJpegFrame(self, tileNum, entire=False):
"""
Get the raw encoded JPEG image frame from a tile.
:param tileNum: The internal tile number of the desired tile.
:type tileNum: int
:param entire: True to return the entire frame. False to strip off
container information.
:return: The JPEG image frame, including a JPEG Start Of Frame marker.
:rtype: bytes
:raises: InvalidOperationTiffException or IOTiffException
"""
# This raises an InvalidOperationTiffException if the tile doesn't exist
rawTileSize = self._getJpegFrameSize(tileNum)
frameBuffer = ctypes.create_string_buffer(rawTileSize)
bytesRead = libtiff_ctypes.libtiff.TIFFReadRawTile(
self._tiffFile, tileNum,
frameBuffer, rawTileSize).value
if bytesRead == -1:
raise IOTiffException('Failed to read raw tile')
elif bytesRead < rawTileSize:
raise IOTiffException('Buffer underflow when reading tile')
elif bytesRead > rawTileSize:
# It's unlikely that this will ever occur, but incomplete reads will
# be checked for by looking for the JPEG end marker
raise IOTiffException('Buffer overflow when reading tile')
if entire:
return frameBuffer.raw[:]
if frameBuffer.raw[:2] != b'\xff\xd8':
raise IOTiffException('Missing JPEG Start Of Image marker in frame')
if frameBuffer.raw[-2:] != b'\xff\xd9':
raise IOTiffException('Missing JPEG End Of Image marker in frame')
if frameBuffer.raw[2:4] in (b'\xff\xc0', b'\xff\xc2'):
frameStartPos = 2
else:
# VIPS may encode TIFFs with the quantization (but not Huffman)
# tables also at the start of every frame, so locate them for
# removal
# VIPS seems to prefer Baseline DCT, so search for that first
frameStartPos = frameBuffer.raw.find(b'\xff\xc0', 2, -2)
if frameStartPos == -1:
frameStartPos = frameBuffer.raw.find(b'\xff\xc2', 2, -2)
if frameStartPos == -1:
raise IOTiffException('Missing JPEG Start Of Frame marker')
# If the photometric value is RGB and the JPEG component ids are just
# 0, 1, 2, change the component ids to R, G, B to ensure color space
# information is preserved.
if self._tiffInfo.get('photometric') == libtiff_ctypes.PHOTOMETRIC_RGB:
sof = frameBuffer.raw.find(b'\xff\xc0')
if sof == -1:
sof = frameBuffer.raw.find(b'\xff\xc2')
sos = frameBuffer.raw.find(b'\xff\xda')
if (sof >= frameStartPos and sos >= frameStartPos and
frameBuffer[sof + 2:sof + 4] == b'\x00\x11' and
frameBuffer[sof + 10:sof + 19:3] == b'\x00\x01\x02' and
frameBuffer[sos + 5:sos + 11:2] == b'\x00\x01\x02'):
for idx, val in enumerate(b'RGB'):
frameBuffer[sof + 10 + idx * 3] = val
frameBuffer[sos + 5 + idx * 2] = val
# Strip the Start / End Of Image markers
tileData = frameBuffer.raw[frameStartPos:-2]
return tileData
def _getUncompressedTile(self, tileNum):
"""
Get an uncompressed tile or strip.
:param tileNum: The internal tile or strip number of the desired tile
or strip.
:type tileNum: int
:return: the tile as a PIL 8-bit-per-channel images.
:rtype: PIL.Image
:raises: IOTiffException
"""
with self._tileLock:
if self._tiffInfo.get('istiled'):
tileSize = libtiff_ctypes.libtiff.TIFFTileSize(self._tiffFile).value
else:
stripSize = libtiff_ctypes.libtiff.TIFFStripSize(
self._tiffFile).value
stripsCount = min(self._stripsPerTile, self._stripCount - tileNum)
tileSize = stripSize * self._stripsPerTile
imageBuffer = ctypes.create_string_buffer(tileSize)
with self._tileLock:
if self._tiffInfo.get('istiled'):
readSize = libtiff_ctypes.libtiff.TIFFReadEncodedTile(
self._tiffFile, tileNum, imageBuffer, tileSize)
else:
readSize = 0
for stripNum in range(stripsCount):
chunkSize = libtiff_ctypes.libtiff.TIFFReadEncodedStrip(
self._tiffFile,
tileNum + stripNum,
ctypes.byref(imageBuffer, stripSize * stripNum),
stripSize).value
if chunkSize <= 0:
raise IOTiffException(
'Read an unexpected number of bytes from an encoded strip')
readSize += chunkSize
if readSize < tileSize:
ctypes.memset(ctypes.byref(imageBuffer, readSize), 0, tileSize - readSize)
readSize = tileSize
if readSize < tileSize:
raise IOTiffException('Read an unexpected number of bytes from an encoded tile')
tw, th = self._tileWidth, self._tileHeight
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_LEFTTOP,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
tw, th = th, tw
image = numpy.ctypeslib.as_array(
ctypes.cast(imageBuffer, ctypes.POINTER(
ctypes.c_uint16 if self._tiffInfo.get('bitspersample') == 16 else ctypes.c_uint8)),
(th, tw, self._tiffInfo.get('samplesperpixel')))
if (self._tiffInfo.get('samplesperpixel') == 3 and
self._tiffInfo.get('photometric') == libtiff_ctypes.PHOTOMETRIC_YCBCR):
if self._tiffInfo.get('bitspersample') == 16:
image = numpy.floor_divide(image, 256).astype(numpy.uint8)
image = PIL.Image.fromarray(image, 'YCbCr')
image = numpy.array(image.convert('RGB'))
return image
def _getTileRotated(self, x, y):
"""
Get a tile from a rotated TIF. This composites uncompressed tiles as
necessary and then rotates the result.
:param x: The column index of the desired tile.
:param y: The row index of the desired tile.
:return: either a buffer with a JPEG or a PIL image.
"""
x0 = x * self._tileWidth
x1 = x0 + self._tileWidth
y0 = y * self._tileHeight
y1 = y0 + self._tileHeight
iw, ih = self._imageWidth, self._imageHeight
tw, th = self._tileWidth, self._tileHeight
transpose = False
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_LEFTTOP,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
x0, x1, y0, y1 = y0, y1, x0, x1
iw, ih = ih, iw
tw, th = th, tw
transpose = True
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_TOPRIGHT,
libtiff_ctypes.ORIENTATION_BOTRIGHT,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT}:
x0, x1 = iw - x1, iw - x0
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_BOTRIGHT,
libtiff_ctypes.ORIENTATION_BOTLEFT,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
y0, y1 = ih - y1, ih - y0
tx0 = x0 // tw
tx1 = (x1 - 1) // tw
ty0 = y0 // th
ty1 = (y1 - 1) // th
tile = None
for ty in range(max(0, ty0), max(0, ty1 + 1)):
for tx in range(max(0, tx0), max(0, tx1 + 1)):
subtile = self._getUncompressedTile(self._toTileNum(tx, ty, transpose))
if tile is None:
tile = numpy.zeros(
(th, tw) if len(subtile.shape) == 2 else
(th, tw, subtile.shape[2]), dtype=subtile.dtype)
stx, sty = tx * tw - x0, ty * th - y0
if (stx >= tw or stx + subtile.shape[1] <= 0 or
sty >= th or sty + subtile.shape[0] <= 0):
continue
if stx < 0:
subtile = subtile[:, -stx:]
stx = 0
if sty < 0:
subtile = subtile[-sty:, :]
sty = 0
subtile = subtile[:min(subtile.shape[0], th - sty),
:min(subtile.shape[1], tw - stx)]
tile[sty:sty + subtile.shape[0], stx:stx + subtile.shape[1]] = subtile
if tile is None:
raise InvalidOperationTiffException(
'Tile x=%d, y=%d does not exist' % (x, y))
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_BOTRIGHT,
libtiff_ctypes.ORIENTATION_BOTLEFT,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
tile = tile[::-1, :]
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_TOPRIGHT,
libtiff_ctypes.ORIENTATION_BOTRIGHT,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT}:
tile = tile[:, ::-1]
if self._tiffInfo.get('orientation') in {
libtiff_ctypes.ORIENTATION_LEFTTOP,
libtiff_ctypes.ORIENTATION_RIGHTTOP,
libtiff_ctypes.ORIENTATION_RIGHTBOT,
libtiff_ctypes.ORIENTATION_LEFTBOT}:
tile = tile.transpose((1, 0) if len(tile.shape) == 2 else (1, 0, 2))
return tile
@property
def tileWidth(self):
"""
Get the pixel width of tiles.
:return: The tile width in pixels.
:rtype: int
"""
return self._tileWidth
@property
def tileHeight(self):
"""
Get the pixel height of tiles.
:return: The tile height in pixels.
:rtype: int
"""
return self._tileHeight
@property
def imageWidth(self):
return self._imageWidth
@property
def imageHeight(self):
return self._imageHeight
@property
def pixelInfo(self):
return self._pixelInfo
def getTile(self, x, y):
"""
Get the complete JPEG image from a tile.
:param x: The column index of the desired tile.
:type x: int
:param y: The row index of the desired tile.
:type y: int
:return: either a buffer with a JPEG or a PIL image.
:rtype: bytes
:raises: InvalidOperationTiffException or IOTiffException
"""
if self._tiffInfo.get('orientation') not in {
libtiff_ctypes.ORIENTATION_TOPLEFT,
None}:
return self._getTileRotated(x, y)
# This raises an InvalidOperationTiffException if the tile doesn't exist
tileNum = self._toTileNum(x, y)
if (not self._tiffInfo.get('istiled') or
self._tiffInfo.get('compression') not in (
libtiff_ctypes.COMPRESSION_JPEG, 33003, 33005) or
self._tiffInfo.get('bitspersample') != 8):
return self._getUncompressedTile(tileNum)
imageBuffer = six.BytesIO()
if (self._tiffInfo.get('compression') == libtiff_ctypes.COMPRESSION_JPEG and
not getattr(self, '_completeJpeg', False)):
# Write JPEG Start Of Image marker
imageBuffer.write(b'\xff\xd8')
imageBuffer.write(self._getJpegTables())
imageBuffer.write(self._getJpegFrame(tileNum))
# Write JPEG End Of Image marker
imageBuffer.write(b'\xff\xd9')
return imageBuffer.getvalue()
# Get the whole frame, which is in a JPEG or JPEG 2000 format, and
# convert it to a PIL image
imageBuffer.write(self._getJpegFrame(tileNum, True))
image = PIL.Image.open(imageBuffer)
# Converting the image mode ensures that it gets loaded once and is in
# a form we expect. If this isn't done, then PIL can load the image
# multiple times, which sometimes throws an exception in PIL's JPEG
# 2000 module.
image = image.convert('RGB')
return image
def parse_image_description(self, meta=None): # noqa
self._pixelInfo = {}
self._embeddedImages = {}
if not meta:
return
if not isinstance(meta, six.string_types):
meta = meta.decode('utf8', 'ignore')
try:
xml = ElementTree.fromstring(meta)
except Exception:
if 'AppMag = ' in meta:
try:
self._pixelInfo = {
'magnification': float(meta.split('AppMag = ')[1].split('|')[0].strip())
}
except Exception:
pass
return
try:
image = xml.find(
".//DataObject[@ObjectType='DPScannedImage']")
columns = int(image.find(".//*[@Name='PIM_DP_IMAGE_COLUMNS']").text)
rows = int(image.find(".//*[@Name='PIM_DP_IMAGE_ROWS']").text)
spacing = [float(val.strip('"')) for val in image.find(
".//*[@Name='DICOM_PIXEL_SPACING']").text.split()]
self._pixelInfo = {
'width': columns,
'height': rows,
'mm_x': spacing[0],
'mm_y': spacing[1]
}
except Exception:
pass
# Extract macro and label images
for image in xml.findall(".//*[@ObjectType='DPScannedImage']"):
try:
typestr = image.find(".//*[@Name='PIM_DP_IMAGE_TYPE']").text
datastr = image.find(".//*[@Name='PIM_DP_IMAGE_DATA']").text
except Exception:
continue
if not typestr or not datastr:
continue
typemap = {
'LABELIMAGE': 'label',
'MACROIMAGE': 'macro',
'WSI': 'thumbnail',
}
self._embeddedImages[typemap.get(typestr, typestr.lower())] = datastr
try:
self._description_record = etreeToDict(xml)
except Exception:
pass
return True
|
'''
Class regrouping all methods and attributes for an experiment.
'''
import os
import sys
import time
import subprocess
import csv
from copy import deepcopy
from datetime import datetime
from ast import literal_eval
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
from matplotlib.ticker import FormatStrFormatter, MaxNLocator
from lmfit import fit_report
from IPython.display import clear_output
import ipysheet
from lib.frontend import notebook as nb
from lib.extraction import PyNexus as PN
from lib.fit import funFit
# Define colors for prints
_RED='\x1b[31;01m'
_BOLD="\x1b[01;06m"
_RESET="\x1b[0m"
class Experiment:
'''
Class for an experiment.
Attributes
----------
all_spectrums : ndarray
2D array of float containing all the spectrums extracted.
baseline : ndarray
1D array of float containing the baseline part of a spectrum fit.
bckg_eVs_inf : float
Lower bound of the fitting range for the background (if is_bckg_subset).
bckg_eVs_sup : float
Upper bound of the fitting range for the background (if is_bckg_subset).
beam_energy : float
Beam energy in eV.
broad_factor : float
Broadening factor of the Compton peak's width as compared to an elastic peak.
compton_part : ndarray
1D array of float containing the Compton part of a spectrum fit.
ct : float
Constant part of the baseline = sl*eVs+ct.
channels : ndarray
1D array of int corresponding to the subset of selected channels.
current_sensorsRelTimestamps : float
Time stamp of the spectrum fitted.
data0D : ndarray
2D array of float containing the values of the sensors recorded during the scan.
elements : list of Elements
List of objects Elements.
elements_fit : list of Elements
List of objects Elements, used in the fitting process.
eV0 : float
Parameter in the conversion of channels to eVs, eVs = channels*gain + eV0.
eVs : ndarray
1D array of float containing the channels converted to eVs.
eVs_fit : ndarray
1D array of float containing the channels converted to eVs, used in the fitting process.
files_dir : str
Directory where the nexus files are.
first_channel : int
Index of the first channel after the extraction.
first_spectrum : int
Index of the first spectrum after the extraction.
gain : float
Parameter in the conversion of channels to eVs, eVs = channels*gain + eV0.
gaussian_part : ndarray
1D array of float containing the gaussian part of a spectrum fit.
highTF0 : float
Tail fraction of the peak, on the high energy side.
highTW0 : float
Tail width of the peak, on the high energy side.
is_bckg_subset : bool
True if fitting the background on a subset of the spectrum.
is_broad_factor : bool
True if broad_factor is a fit parameter.
is_ct : bool
True if ct is a fit parameter.
is_eV0 : bool
True if eV0 is a fit parameter.
is_extract_done : bool
True if an extraction has been done.
is_fit_done : book
True if a fit has been done.
is_fit_fail : bool
True if a fit did not converge.
is_gain : bool
True if gain is a fit parameter.
is_highTF0 : bool
True if highTF0 is a fit parameter.
is_highTW0 : bool
True if highTW0 is a fit parameter.
is_last_fit_a_preview : bool
True if the last fit done was a preview (single fit).
is_lowTF0 : bool
True if lowTF0 is a fit parameter.
is_lowTW0 : bool
True if lowTW0 is a fit parameter.
is_noise : bool
True if noise is a fit parameter.
is_SDD_elem_0 : bool
True if the SDD element 0 is used.
is_SDD_elem_1 : bool
True if the SDD element 1 is used.
is_SDD_elem_2 : bool
True if the SDD element 2 is used.
is_SDD_elem_3 : bool
True if the SDD element 3 is used.
is_SDD_elem_4 : bool
True if the SDD element 4 is used.
is_SF0 : bool
True if SF0 is a fit parameter.
is_sheet_loaded : bool
True if the ipysheet has been loaded.
is_sl : bool
True if sl is a fit parameter.
is_spectrum_empty : bool
True if the spectrum is considered as empty.
is_TF0 : bool
True if TF0 is a fit parameter.
is_TW0 : bool
True if TW0 is a fit parameter.
last_channel : int
Index of the last channel after the extraction.
last_non_zero_spectrum : int
Index of the last non-zeron spectrum before extraction (all spectrums after are empty).
last_spectrum : int
Index of the last spectrum after the extraction.
list_extract_params_files : list of str
List of csv files with extraction params.
list_fit_params_files : list of str
List of csv files with fit params.
list_isfit : list of str
List of params names which are active fit parameters.
list_isfit_str : str
list_isfit converted to a string, with a comma as a separator.
list_files_filenames : list of str
List of files in the 'files' directory.
list_peaks_params_files : list of str
List of csv files with peaks params.
logs : str
String to be displayed in the logs window.
logs_lvl : int
Depth level of the logs.
lowTF0 : float
Tail fraction of the peak, on the low energy side.
lowTW0 : float
Tail width of the peak, on the low energy side.
min_strength : float
Minimum strength of peak to be displayed in the peak selection widget.
name : str
Name of the scan.
nb_allspectrums : int
Number of spectrums taken during the scan.
nb_spectrums : int
Number of spectrums extracted.
noise : float
Width of the peak in keV, before contribution from the detector.
notebook_name : str
Name of the notebook (necessary for saving in pdf).
filename : str
Name of the nexus file.
path_to_db : str
Relative path to the xraylib database.
path_to_extract_params : str
Relative path to the csv file with loaded extraction parameters.
path_to_extract_params_default : str
Relative path to the csv file with default extraction parameters.
path_to_extract_params_save : str
Relative path to the csv file with the saved extraction parameters.
path_to_fit_curve_results : str
Relative path to the csv file with the fit curve of a given spectrum.
path_to_fit_folder : str
Relative path to the folder containing the fit curves.
path_to_fit_log_results : str
Relative path to the log file with logs from the fit.
path_to_fit_params : str
Relative path to the csv file with loaded fit parameters.
path_to_fit_params_default : str
Relative path to the csv file with default fit parameters.
path_to_fit_params_results : str
Relative path to the csv file with the parameters resulting from the fit.
path_to_fit_params_save : str
Relative path to the csv file with the saved fit parameters.
path_to_file : str
Relative path to the scan.
path_to_peaks_params_default : str
Relative path to the csv file with default peaks parameters.
path_to_peaks_params_save : str
Relative path to the csv file with the saved peaks parameters.
peaks_params : ndarray
2D array of str containing the peak parameters.
peaks_params_filled : ndarray
2D array of str containing the peak parameters + extra lines filled with ''.
peaks_params_to_add : ndarray
2D array of str containing the peak parameters to import from the database.
report : ndarray
1D array of str containing the text for the report.
result : object lmfit.MinimizerResult
Result of the fit.
save_dir : str
Directory where the data will be saved.
SCF_Si : ndarray
2D array of float containing the energy, f1, f2 from CXRO for Si.
SDD_elems_available : list of str
Indices of SDD elems available (e.g. ['0','1','2','3']).
SDD_elems_chosen_int : list of int
Indices of SDD selected (e.g. [0, 1, 2])
selected_element : str
Name of the element chosen in the database.
selected_line : str
Name of the line chosen in the database.
sensorsRelTimestamps : ndarray
1D array of float containing the time stamp of all the spectrum in the nexus file.
session_id : str
Session ID based on time.
SF0 : float
Shelf fraction of the peak.
shelf_part : ndarray
1D array of float containing the shelf part of a spectrum fit.
sl : float
Linear part of the baseline = sl*eVs+ct.
spectrum_model : ndarray
1D array of float containing the spectrum fit.
spectrum_to_fit : ndarray
1D array of float containing the spectrum to fit.
spectrums : ndarray
2D array of float corresponding to the subset of selected spectrums.
spectrums_sum : ndarray
1D array of float corresponding to the sum of all spectrums over time.
stamps0D : ndarray
2D array of list containing the stamps of the sensors recorded during the scan.
tail_part : ndarray
1D array of float containing the tail part of a spectrum fit.
TF0 : float
Tail fraction of the peak.
transition_names : list of str
List with all possible transition names from xraylib database.
TW0 : float
Tail width of the peak.
Methods
-------
__init__(notebook_name, save_dir, files_dir):
Constructor.
add_str_to_logs(wid, str_to_add):
Add a string to the logs and update the logs window.
check_and_init():
Check if files and folders exist, then create the interactive cell.
create_fit_params_results(wid, path_to_fit_params_results):
Create the csv file for the results of the fit and write its header.
export_nb_to_pdf(wid):
Export the notebook to pdf using a command line through the OS.
generate_report(wid):
Generate the text for the report.
get_and_save_all_params_in_files(wid):
Get the parameters from the widgets and save them in files.
plot_extraction(wid, is_range_spectrums_empty):
Plot the extraction with the current set of parameters.
plot_peaks(wid):
Plot the peaks with the current set of parameters.
plot_single_fit(wid, title):
Plot the fit (data, fitting curve, residual).
plot_all_fit_results(fit_index, path_to_result_folder):
Function to call each individual plotting subfunctions when printing the report.
plot_fit_areas_from_file(path_to_fit_params_results):
Plot the result areas of a fit after loading it from file.
plot_fit_curve_from_file(path_to_fit_folder, fit_index):
Plot the result curve of a fit after loading it from file.
plot_fit_parameters_from_file(path_to_fit_params_save, path_to_fit_params_results):
Plot the result parameters of a fit after loading it from file.
plot_fit_positions_from_file(path_to_fit_params_results):
Plot the result peaks positions of a fit after loading it from file.
run_single_fit(wid):
Run the fit.
save_extract_params_in_file(wid, path_to_extract_params):
Save extraction parameters in a csv file.
save_fit_curves_results_in_file(wid, path_to_fit_curve_results):
Save fit curve in a csv file.
save_fit_logs_in_file(wid, path_to_fit_log_results, spectrum_index)
Save fit logs in a txt file.
save_fit_params_in_file(wid, path_to_fit_params):
Save fit parameters in a csv file.
save_fit_params_results_in_file(wid, path_to_fit_params_results, spectrum_index):
Save fit results in a csv file.
save_peaks_params_in_file(wid, path_to_peaks_params):
Save peaks parameters in a csv file.
set_elements(wid):
Extract elements/lines/peaks from the sheet.
set_extract_params_from_file(wid, path_to_extract_params):
Load extraction parameters from a csv file.
set_extract_params_from_widgets(wid):
Set the extraction values of expt from the current values of the widgets.
set_fit_params_from_file(wid, path_to_fit_params):
Load fit parameters from a csv file.
set_fit_params_from_widgets(wid):
Set the fit values of expt from the current values of the widgets.
set_list_extract_params_files(wid):
Set the list of csv files containing extraction parameters.
set_list_fit_params_files(wid):
Set the list of csv files containing fit parameters.
set_list_files(wid):
Set the list of scans available.
set_list_peaks_params_files(wid):
Set the list of csv files containing peaks parameters.
set_paths(wid):
Set the path to the different saving files.
set_peaks_params_from_file(wid, path_to_peaks_params):
Load peaks parameters from a csv file.
set_peaks_params_from_sheet(wid, sheet):
Set the peaks values of expt from the current values of the sheet.
set_peaks_params_from_widgets(wid):
Set the peak values of expt from the current values of the widgets.
set_peaks_relative_strength(wid):
Set the relative strength of each peak relative to the most intense one within the line.
set_result_params_to_fit_output(wid):
Update elements_fit and eVs_fit with the result of the fit, and compute the fitting curve with its sub-parts.
set_result_params_to_nan(wid):
Update the results of the fit (params and curves) with NaNs.
set_scan_info(wid):
Extract SDD elems available and number of points in the scan.
set_session_id(wid):
Set the session ID based on time.
validate_sheet(wid):
Validate the sheet in the peak tab.
Classes
-------
Element:
Class containing lines/peaks parameters relative to an element.
'''
def __init__(self, notebook_name, save_dir, files_dir, logs_lvl):
'''
Constructor.
Parameters
----------
notebook_name : str
The name of the notebook (necessary for saving in pdf).
save_dir : str
The directory where the data will be saved.
files_dir : str
The directory where the nexus files are.
logs_lvl : int
Depth level of the logs.
'''
self.notebook_name = notebook_name
self.save_dir = save_dir
self.files_dir = files_dir
self.path_to_extract_params_default = 'lib/params_default/extract_params_default.csv'
self.path_to_peaks_params_default = 'lib/params_default/peaks_params_default.csv'
self.path_to_fit_params_default = 'lib/params_default/fit_params_default.csv'
self.path_to_db = 'lib/frontend/xraylib_lines.pro'
self.logs = ''
self.logs_lvl = logs_lvl
# Construct a list with all possible transition names from xraylib database
# transition_name = ['KL1', 'KL2', 'KL3', 'KM1', ...]
self.transition_names = []
with open(self.path_to_db, "r") as f:
csvreader = csv.reader(f)
for row in csvreader:
if row!=[]:
if (row[0][0]!=';' and row[0]!='end'):
self.transition_names.append(row[0].split(' = ')[0].split('_')[0])
self.all_spectrums = np.array([])
self.baseline = np.array([])
self.bckg_eVs_inf = 0.
self.bckg_eVs_sup = 1.
self.beam_energy = 0.
self.broad_factor = 0.
self.compton_part = np.array([])
self.ct = 0.
self.channels = np.array([])
self.current_sensorsRelTimestamps = np.array([])
self.data0D = np.array([])
self.elements = None
self.elements_fit = None
self.eV0 = 0.
self.eVs = np.array([])
self.eVs_fit = np.array([])
self.first_channel = 0
self.first_spectrum = 0
self.gain = 0.
self.gaussian_part = np.array([])
self.highTF0 = 0.
self.highTW0 = 0.
self.is_bckg_subset = False
self.is_broad_factor = False
self.is_ct = False
self.is_eV0 = False
self.is_extract_done = False
self.is_fit_done = False
self.is_fit_fail = False
self.is_gain = False
self.is_highTF0 = False
self.is_highTW0 = False
self.is_last_fit_a_preview = False
self.is_lowTF0 = False
self.is_lowTW0 = False
self.is_noise = False
self.is_SDD_elem_0 = False
self.is_SDD_elem_1 = False
self.is_SDD_elem_2 = False
self.is_SDD_elem_3 = False
self.is_SDD_elem_4 = False
self.is_SF0 = False
self.is_sheet_loaded = False
self.is_sl = False
self.is_spectrum_empty = False
self.is_TF0 = False
self.is_TW0 = False
self.last_channel = 0
self.last_non_zero_spectrum = 0
self.last_spectrum = 0
self.list_extract_params_files = []
self.list_fit_params_files = []
self.list_isfit = []
self.list_isfit_str = ''
self.list_files_filenames = []
self.list_peaks_params_files = []
self.lowTF0 = 0.
self.lowTW0 = 0.
self.min_strength = 0.
self.name = ''
self.nb_allspectrums = 0
self.nb_spectrums = 0
self.noise = 0.
self.filename = ''
self.path_to_extract_params = ''
self.path_to_extract_params_save = ''
self.path_to_fit_curve_results = ''
self.path_to_fit_folder = ''
self.path_to_fit_log_results = ''
self.path_to_fit_params = ''
self.path_to_fit_params_results = ''
self.path_to_fit_params_save = ''
self.path_to_file = ''
self.path_to_peaks_params_save = ''
self.peaks_params = np.array([])
self.peaks_params_filled = np.array([])
self.peaks_params_to_add = np.array([])
self.report = np.array([])
self.result = None
self.SCF_Si = np.genfromtxt('lib/fit/f-Si') #Requires the file f-si from CXRO.
self.SDD_elems_available = []
self.SDD_elems_chosen_int = []
self.selected_element = ''
self.selected_line = ''
self.sensorsRelTimestamps = np.array([])
self.session_id = ''
self.SF0 = 0.
self.shelf_part = np.array([])
self.sl = 0.
self.spectrum_model = np.array([])
self.spectrum_to_fit = np.array([])
self.spectrums = np.array([])
self.spectrums_sum = np.array([])
self.stamps0D = np.array([])
self.tail_part = np.array([])
self.TF0 = 0.
self.TW0 = 0.
def check_and_init(self):
'''
Check if files and folders exist, then create the interactive cell.
Raises
------
SystemExit('Save directory not found.')
when save directory not found
SystemExit('Files directory not found.')
when files directory not found
SystemExit('Notebook file not found.')
when notebook file not found
'''
if os.path.exists(self.save_dir):
print("Results will be saved in the directory:\n%s"%self.save_dir)
else:
print(_RED+'Careful, the directory for saving the data was not found.'+_RESET)
print('Save directory indicated in the first cell: %s'%self.save_dir)
sys.exit('Save directory not found.')
if os.path.exists(self.files_dir):
print("Scans (nexus files) should be in the directory:\n%s"%self.files_dir)
else:
print(_RED+"Careful, the directory where the scans are stored was not found."+_RESET)
print('Files directory indicated in the first cell: %s'%self.files_dir)
sys.exit('Files directory not found.')
if not os.path.exists(self.notebook_name):
print(_RED+"Careful, assign the correct notebook name to self.notebook_name."+_RESET)
print('Notebook name indicated in the first cell: %s'%self.files_dir)
sys.exit('Notebook file not found.')
# Set the tokens
self.is_extract_done = False
self.is_sheet_loaded = False
# Create the interactive cell
nb.create_cell(code='FE.start_session(expt, wid)', position ='at_bottom', celltype='code', is_print=False)
def export_nb_to_pdf(self, wid):
'''
Export the notebook to pdf using a command line through the OS.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
Returns
-------
bool
export_done, True if the export suceeded without error/warning
'''
# Save the current state of the notebook (including the widgets)
nb.save()
# Export the pdf
t0 = time.time()
rc = 1
while rc>0:
if (time.time()-t0) > 100:
# Timeout before PDF export is considered as failed
export_done = False
break
time.sleep(3)
command = 'jupyter nbconvert '
command+= self.notebook_name
command+= ' --to pdf '
command+= ' --TagRemovePreprocessor.remove_cell_tags \'notPrint\' ' # Remove the widgets from the PDF
command+= ' --no-input ' # Remove the code cells
rc = subprocess.call(command,shell=True)
if rc==0:
export_done = True
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Export to pdf.'
self.add_str_to_logs(wid, str_to_add)
return export_done
def set_scan_info(self, wid):
'''
Extract SDD elems available and number of points in the scan.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
if '.nxs' in self.filename:
nexus = PN.PyNexusFile(self.path_to_file)
# Extract list of detector elements available
stamps = nexus.extractStamps()
SDD_elems_available = []
for stamp in stamps:
if (stamp[1] is not None and "fluospectrum0" in stamp[1].lower()):
SDD_elems_available.append(stamp[1].lower()[-1])
# Extract number of spectrums taken during the scan
nb_allspectrums = int(nexus.get_nbpts())
self.SDD_elems_available = SDD_elems_available
self.nb_allspectrums = nb_allspectrums
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Extract information from nexus file.'
self.add_str_to_logs(wid, str_to_add)
if '.dat' in self.filename:
# Extract list of detector elements available
SDD_elems_available = []
nb_allspectrums = 0
for index_element in [0,1,2,3,4]:
path_to_mat = self.path_to_file[:-4]+'_fluospectrum0'+str(index_element)+'.mat'
if os.path.isfile(path_to_mat):
SDD_elems_available.append(str(index_element))
# Extract number of spectrums taken during the scan
nb_allspectrums = np.shape(np.genfromtxt(path_to_mat))[0]
self.SDD_elems_available = SDD_elems_available
self.nb_allspectrums = nb_allspectrums
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Extract information from mat file.'
self.add_str_to_logs(wid, str_to_add)
def plot_extraction(self, wid, is_range_spectrums_empty=False):
'''
Plot the extraction with the current set of parameters.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
is_range_spectrums_empty : bool, optional
True if the selected range of spectrums appears to be empty.
'''
# Plot all the spectrums (stopping at the last non-zero one)
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(111)
ax1.set_title('All the spectrums in the file')
ax1.set(xlabel = 'spectrum index', ylabel = 'channel')
ax1.set_xlim(left = -1, right = self.last_non_zero_spectrum+1)
ax1.axvline(self.first_spectrum, linestyle = '--', color = 'k', linewidth = 3,\
label = 'Selected spectrum range')
ax1.axvline(self.last_spectrum, linestyle = '--', color = 'k', linewidth = 3)
ax1.imshow(self.all_spectrums.transpose(), cmap = 'viridis', aspect = 'auto', norm=mplcolors.LogNorm())
plt.legend(fontsize=12)
# Plot the whole channel range
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
ax1.set_title('Selected channel range on the sum of all spectrums')
ax1.set(xlabel = 'channel', ylabel = 'counts')
ax1.axvline(self.first_channel, linestyle = '--', color = 'r', linewidth = 3, label = 'Selected channel range')
ax1.axvline(self.last_channel, linestyle = '--', color = 'r', linewidth = 3)
ax1.plot(np.arange(2048), self.all_spectrums.sum(axis = 0), 'k.-')
ax1.legend()
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = fig.add_subplot(212)
ax2.set(xlabel = 'channel', ylabel = 'counts')
ax2.axvline(self.first_channel, linestyle = '--', color = 'r', linewidth = 3)
ax2.axvline(self.last_channel, linestyle = '--', color = 'r', linewidth = 3)
ax2.plot(np.arange(2048), self.all_spectrums.sum(axis = 0), 'k.-')
ax2.set_yscale('log')
ax2.set_ylim(bottom = 1)
yticks = ax1.yaxis.get_major_ticks()
yticks[-1].label1.set_visible(False)
plt.subplots_adjust(hspace=.0)
if not is_range_spectrums_empty:
#Plot the selected spectrum range
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(111)
ax1.set_title('Zoom on subset of spectrums [%g:%g]'%(self.first_spectrum,self.last_spectrum))
ax1.set(xlabel = 'spectrum index', ylabel = 'channel')
ax1.imshow(self.spectrums.transpose(), cmap = 'viridis', aspect = 'auto', norm=mplcolors.LogNorm(),
interpolation='none',
extent=[self.first_spectrum-0.5,self.last_spectrum+0.5,
self.last_channel+0.5,self.first_channel-0.5])
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
#Plot the selected channel range
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
ax1.set_title('Zoom on subset of channels [%g:%g]'%(self.first_channel,self.last_channel))
ax1.set(xlabel = 'channel', ylabel = 'counts')
ax1.plot(self.channels, self.spectrums[0], 'r-', label = 'Spectrum %g'%self.first_spectrum)
ax1.plot(self.channels, self.spectrums[-1], 'b-', label = 'Spectrum %g'%self.last_spectrum)
ax1.legend(fontsize=12)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = fig.add_subplot(212)
ax2.set(xlabel = 'channel', ylabel = 'counts')
ax2.plot(self.channels, self.spectrums[0], 'r-')
ax2.plot(self.channels, self.spectrums[-1], 'b-')
ax2.set_yscale('log')
ax2.set_ylim(bottom = 1)
yticks = ax1.yaxis.get_major_ticks()
yticks[-1].label1.set_visible(False)
ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.subplots_adjust(hspace=.0)
plt.show()
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Plot of the extraction.'
self.add_str_to_logs(wid, str_to_add)
def set_extract_params_from_file(self, wid, path_to_extract_params):
'''
Load extraction parameters from a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_extract_params : str
Path to the csv file.
'''
if not os.path.exists(path_to_extract_params):
str_to_add = 'The file %s was not found.'%path_to_extract_params
else:
with open(path_to_extract_params, "r") as f:
reader = csv.DictReader(f, delimiter=';',dialect='excel')
for row in reader:
self.is_SDD_elem_0 = literal_eval(row['#is_SDD_elem_0'])
self.is_SDD_elem_1 = literal_eval(row['#is_SDD_elem_1'])
self.is_SDD_elem_2 = literal_eval(row['#is_SDD_elem_2'])
self.is_SDD_elem_3 = literal_eval(row['#is_SDD_elem_3'])
self.is_SDD_elem_4 = literal_eval(row['#is_SDD_elem_4'])
self.first_channel = int(row['#first_channel'])
self.last_channel = int(row['#last_channel'])
self.first_spectrum = int(row['#first_spectrum'])
self.last_spectrum = int(row['#last_spectrum'])
str_to_add = 'Extraction parameters imported from:\n%s'%path_to_extract_params
# Update logs
if self.logs_lvl>=0:
self.add_str_to_logs(wid, str_to_add)
def set_fit_params_from_file(self, wid, path_to_fit_params):
'''
Load fit parameters from a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_params : str
Path to the csv file.
'''
if not os.path.exists(path_to_fit_params):
str_to_add = 'The file %s was not found.'%path_to_fit_params
else:
with open(path_to_fit_params, "r") as f:
reader = csv.DictReader(f, delimiter=';',dialect='excel')
for row in reader:
self.list_isfit_str = str(row['#list_isfit_str'])
self.gain = float(row['#gain'].replace(',', '.'))
self.eV0 = float(row['#eV0'].replace(',', '.'))
self.sl = float(row['#sl'].replace(',', '.'))
self.ct = float(row['#ct'].replace(',', '.'))
self.noise = float(row['#noise'].replace(',', '.'))
self.SF0 = float(row['#SF0'].replace(',', '.'))
self.TF0 = float(row['#TF0'].replace(',', '.'))
self.TW0 = float(row['#TW0'].replace(',', '.'))
self.broad_factor = float(row['#broad_factor'].replace(',', '.'))
self.lowTF0 = float(row['#lowTF0'].replace(',', '.'))
self.highTF0 = float(row['#highTF0'].replace(',', '.'))
self.lowTW0 = float(row['#lowTW0'].replace(',', '.'))
self.highTW0 = float(row['#highTW0'].replace(',', '.'))
self.is_bckg_subset = literal_eval(row['#is_bckg_subset'])
self.bckg_eVs_inf = float(row['#bckg_eVs_inf'].replace(',', '.'))
self.bckg_eVs_sup = float(row['#bckg_eVs_sup'].replace(',', '.'))
# convert list_isfit_str into a list
self.list_isfit = self.list_isfit_str.split(',')
str_to_add = 'Fit parameters imported from:\n%s'%path_to_fit_params
# Update logs
if self.logs_lvl>=0:
self.add_str_to_logs(wid, str_to_add)
def set_peaks_params_from_file(self, wid, path_to_peaks_params):
'''
Load peaks parameters from a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_peaks_params : str
Path to the csv file.
'''
if not os.path.exists(path_to_peaks_params):
# Update logs
str_to_add = 'The file %s was not found.'%path_to_peaks_params
self.add_str_to_logs(wid, str_to_add)
else:
peaks_params = np.array([])
with open(path_to_peaks_params, "r") as f:
csvreader = csv.reader(f, delimiter=';',dialect='excel')
# First line is the header
peaks_header = next(csvreader)
nb_columns = len(peaks_header)
for row in csvreader:
peaks_params = np.append(peaks_params, row)
peaks_params = np.reshape(peaks_params, (len(peaks_params)//nb_columns,nb_columns))
# Extract in peaks_params only the params which will go in the sheet
self.peaks_params = peaks_params[:,:6]
# Get the other general parameters
self.beam_energy = float(peaks_params[0,6].replace(',', '.'))
self.min_strength = float(peaks_params[0,7].replace(',', '.'))
self.gain = float(peaks_params[0,8].replace(',', '.'))
self.eV0 = float(peaks_params[0,9].replace(',', '.'))
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Peaks parameters loaded from the file:\n%s'%path_to_peaks_params
self.add_str_to_logs(wid, str_to_add)
def save_peaks_params_in_file(self, wid, path_to_peaks_params):
'''
Save peaks parameters in a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_peaks_params : str
Path to the csv file.
'''
# Create a folder for saving all data related to the scan
if not os.path.exists(self.save_dir+self.name):
os.mkdir(self.save_dir+self.name)
# Create a subfolder corresponding to the current session
if not os.path.exists(self.save_dir+self.name+'/'+self.session_id):
os.mkdir(self.save_dir+self.name+'/'+self.session_id)
# Write to the csv file
header = ['#Peak name', '#Transition name', '#Position (eV)', '#Strength',
'#Fit position?', '#Fit peak?', '#Beam energy (eV)', '#Min strength',
'#Gain', '#eV0']
with open(path_to_peaks_params, "w", newline='') as f:
writer = csv.writer(f,delimiter=';',dialect='excel')
writer.writerow(header)
for row in self.peaks_params:
writer.writerow(np.append(
row,
[self.beam_energy, self.min_strength, self.gain, self.eV0]
)
)
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Current peaks parameters saved in:\n%s'%path_to_peaks_params
self.add_str_to_logs(wid, str_to_add)
def save_fit_params_in_file(self, wid, path_to_fit_params):
'''
Save fit parameters in a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_params : str
Path to the csv file.
'''
# Create a folder for saving all data related to the scan
if not os.path.exists(self.save_dir+self.name):
os.mkdir(self.save_dir+self.name)
# Create a subfolder corresponding to the current session
if not os.path.exists(self.save_dir+self.name+'/'+self.session_id):
os.mkdir(self.save_dir+self.name+'/'+self.session_id)
# Write to the csv file
with open(path_to_fit_params, "w", newline='') as f:
writer = csv.writer(f,delimiter=';',dialect='excel')
header = np.array([
'#list_isfit_str',
'#gain',
'#eV0',
'#sl',
'#ct',
'#noise',
'#SF0',
'#TF0',
'#TW0',
'#broad_factor',
'#lowTF0',
'#highTF0',
'#lowTW0',
'#highTW0',
'#is_bckg_subset',
'#bckg_eVs_inf',
'#bckg_eVs_sup'
])
writer.writerow(header)
writer.writerow([
self.list_isfit_str,
self.gain,
self.eV0,
self.sl,
self.ct,
self.noise,
self.SF0,
self.TF0,
self.TW0,
self.broad_factor,
self.lowTF0,
self.highTF0,
self.lowTW0,
self.highTW0,
self.is_bckg_subset,
self.bckg_eVs_inf,
self.bckg_eVs_sup
])
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Current fit parameters saved in:\n%s'%path_to_fit_params
self.add_str_to_logs(wid, str_to_add)
def set_extract_params_from_widgets(self, wid):
'''
Set the extraction values of expt from the current values of the widgets.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.is_SDD_elem_0 = wid.is_SDD_elem_0.value
self.is_SDD_elem_1 = wid.is_SDD_elem_1.value
self.is_SDD_elem_2 = wid.is_SDD_elem_2.value
self.is_SDD_elem_3 = wid.is_SDD_elem_3.value
self.is_SDD_elem_4 = wid.is_SDD_elem_4.value
self.first_channel = wid.first_channel.value
self.last_channel = wid.last_channel.value
self.first_spectrum = wid.first_spectrum.value
self.last_spectrum = wid.last_spectrum.value
# Update logs
if self.logs_lvl>=1:
str_to_add = 'expt updated with extraction params from the widgets.'
self.add_str_to_logs(wid, str_to_add)
def set_fit_params_from_widgets(self, wid):
'''
Set the fit values of expt from the current values of the widgets.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.gain = wid.gain.value
self.eV0 = wid.eV0.value
self.sl = wid.sl.value
self.ct = wid.ct.value
self.noise = wid.noise.value
self.SF0 = wid.SF0.value
self.TF0 = wid.TF0.value
self.TW0 = wid.TW0.value
self.broad_factor = wid.broad_factor.value
self.lowTF0 = wid.lowTF0.value
self.highTF0 = wid.highTF0.value
self.lowTW0 = wid.lowTW0.value
self.highTW0 = wid.highTW0.value
self.is_gain = wid.is_gain.value
self.is_eV0 = wid.is_eV0.value
self.is_sl = wid.is_sl.value
self.is_ct = wid.is_ct.value
self.is_noise = wid.is_noise.value
self.is_SF0 = wid.is_SF0.value
self.is_TF0 = wid.is_TF0.value
self.is_TW0 = wid.is_TW0.value
self.is_broad_factor = wid.is_broad_factor.value
self.is_lowTF0 = wid.is_lowTF0.value
self.is_highTF0 = wid.is_highTF0.value
self.is_lowTW0 = wid.is_lowTW0.value
self.is_highTW0 = wid.is_highTW0.value
self.list_isfit = ['gain'*self.is_gain, 'eV0'*self.is_eV0,
'sl'*self.is_sl, 'ct'*self.is_ct, 'noise'*self.is_noise,
'SF0'*self.is_SF0, 'TF0'*self.is_TF0,
'TW0'*self.is_TW0, 'broad_factor'*self.is_broad_factor,
'lowTF0'*self.is_lowTF0, 'highTF0'*self.is_highTF0,
'lowTW0'*self.is_lowTW0, 'highTW0'*self.is_highTW0]
# Remove trailing ""
while "" in self.list_isfit:
self.list_isfit.remove("")
self.list_isfit_str = ','.join(self.list_isfit)
self.is_bckg_subset = wid.is_bckg_subset.value
self.bckg_eVs_inf = wid.bckg_eVs_inf.value
self.bckg_eVs_sup = wid.bckg_eVs_sup.value
# Update logs
if self.logs_lvl>=1:
str_to_add = 'expt updated with fit params from the widgets.'
self.add_str_to_logs(wid, str_to_add)
def save_extract_params_in_file(self, wid, path_to_extract_params):
'''
Save extraction parameters in a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_extract_params : str
Path to the csv file.
'''
# Create a folder for saving all data related to the scan
if not os.path.exists(self.save_dir+self.name):
os.mkdir(self.save_dir+self.name)
# Create a subfolder corresponding to the current session
if not os.path.exists(self.save_dir+self.name+'/'+self.session_id):
os.mkdir(self.save_dir+self.name+'/'+self.session_id)
# Write to the csv file
with open(path_to_extract_params, "w", newline='') as f:
writer = csv.writer(f,delimiter=';',dialect='excel')
header = np.array([
'#is_SDD_elem_0',
'#is_SDD_elem_1',
'#is_SDD_elem_2',
'#is_SDD_elem_3',
'#is_SDD_elem_4',
'#first_channel',
'#last_channel',
'#first_spectrum',
'#last_spectrum',
])
writer.writerow(header)
writer.writerow([
self.is_SDD_elem_0,
self.is_SDD_elem_1,
self.is_SDD_elem_2,
self.is_SDD_elem_3,
self.is_SDD_elem_4,
self.first_channel,
self.last_channel,
self.first_spectrum,
self.last_spectrum
])
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Current extraction parameters saved in:\n%s'%path_to_extract_params
self.add_str_to_logs(wid, str_to_add)
def set_peaks_params_from_widgets(self, wid):
'''
Set the peak values of expt from the current values of the widgets.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.beam_energy = wid.beam_energy.value
self.min_strength = wid.min_strength.value
self.gain = wid.gain.value
self.eV0 = wid.eV0.value
# Update logs
if self.logs_lvl>=1:
str_to_add = 'expt updated with peaks params from the widgets.'
self.add_str_to_logs(wid, str_to_add)
def set_peaks_params_from_sheet(self, wid, sheet):
'''
Set the peaks values of expt from the current values of the sheet.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
sheet : object Sheet
Object from the class Sheet (module ipysheet)
'''
# Get the peaks from the sheet
self.peaks_params_filled = ipysheet.numpy_loader.to_array(ipysheet.easy.current())
# Remove the empty lines
self.peaks_params = self.peaks_params_filled[np.where(self.peaks_params_filled[:,0]!='')]
# Update logs
if self.logs_lvl>=1:
str_to_add = 'expt updated with peaks params from the sheet.'
self.add_str_to_logs(wid, str_to_add)
def validate_sheet(self, wid):
'''
Validate the sheet in the peak tab.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# Update expt with peaks params from sheet and widgets
self.set_peaks_params_from_sheet(wid, wid.sheet)
self.set_peaks_params_from_widgets(wid)
# Create the elements/lines/peaks
self.set_elements(wid)
# Set the strength of each peak relative to the maximum strength within the same line
self.set_peaks_relative_strength(wid)
# Convert channels to eVs
self.eVs = self.gain*self.channels + self.eV0
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Sheet validated.'
self.add_str_to_logs(wid, str_to_add)
def set_session_id(self, wid):
'''
Set the session ID based on time.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.session_id = datetime.now().strftime('%Y%m%d_%H%M%S')
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Set the session id to %s.'%(self.session_id)
self.add_str_to_logs(wid, str_to_add)
def set_paths(self, wid):
'''
Set the paths to the different saving files.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.path_to_fit_params_results = self.save_dir+self.name+'/'+self.session_id+'/fit_results.csv'
self.path_to_extract_params_save = self.save_dir+self.name+'/'+self.session_id+'/'+'extract_params.csv'
self.path_to_peaks_params_save = self.save_dir+self.name+'/'+self.session_id+'/'+'peaks_params.csv'
self.path_to_fit_params_save = self.save_dir+self.name+'/'+self.session_id+'/'+'fit_params.csv'
self.path_to_fit_folder = self.save_dir+self.name+'/'+self.session_id+'/fit_curves/'
self.path_to_fit_log_results = self.save_dir+self.name+'/'+self.session_id+'/fit_results.log'
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the paths to saving files.'
self.add_str_to_logs(wid, str_to_add)
def set_list_files(self, wid):
'''
Set the list of scans available.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.list_files_filenames = [file for file in sorted(os.listdir(self.files_dir))
if ('.nxs' in file or '.dat' in file)][::-1]
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the list of files.'
self.add_str_to_logs(wid, str_to_add)
def set_list_extract_params_files(self, wid):
'''
Set the list of csv files containing extraction parameters.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.list_extract_params_files = ['Default extraction parameters']
tmp_files = []
for root, _, files in os.walk(self.save_dir, topdown=True):
for name in files:
if 'extract_params.csv' in name:
path_to_csv = os.path.join(root.split('/')[-2],root.split('/')[-1])
tmp_files.append(path_to_csv)
tmp_files.sort(reverse=True)
self.list_extract_params_files += tmp_files
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the list of files with extraction parameters.'
self.add_str_to_logs(wid, str_to_add)
def set_list_peaks_params_files(self, wid):
'''
Set the list of csv files containing peaks parameters.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.list_peaks_params_files = ['Default peaks parameters']
tmp_files = []
for root, _, files in os.walk(self.save_dir, topdown=True):
for name in files:
if 'peaks_params.csv' in name:
path_to_csv = os.path.join(root.split('/')[-2],root.split('/')[-1])
tmp_files.append(path_to_csv)
tmp_files.sort(reverse=True)
self.list_peaks_params_files += tmp_files
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the list of files with peaks parameters.'
self.add_str_to_logs(wid, str_to_add)
def set_list_fit_params_files(self, wid):
'''
Set the list of csv files containing fit parameters.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.list_fit_params_files = ['Default fit parameters']
tmp_files = []
for root, _, files in os.walk(self.save_dir, topdown=True):
for name in files:
if 'fit_params.csv' in name:
path_to_csv = os.path.join(root.split('/')[-2],root.split('/')[-1])
tmp_files.append(path_to_csv)
tmp_files.sort(reverse=True)
self.list_fit_params_files += tmp_files
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the list of files with fit parameters.'
self.add_str_to_logs(wid, str_to_add)
def set_elements(self, wid):
'''
Extract elements/lines/peaks from the sheet.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# List of objects Element
self.elements = []
# Go line by line in the sheet
for row in self.peaks_params:
# Treat only the peaks which are fitted
if row[5]!='no' and row[0]!='':
# Set the name of the element
current_element_name = row[0]
# Set the name of the line by appending the name of the current element to
# the first character of the transition name (K, L, M)
# Or the full name if it is not a transition (Elastic peak, Compton peak ...)
if row[1][0] in ['K', 'L', 'M']:
current_line_name = current_element_name+'_'+row[1][0]
else:
current_line_name = current_element_name+'_'+row[1]
# Check if the element has already been created
is_new_element = True
for element in self.elements:
if current_element_name == element.name:
is_new_element = False
# Add this element to the list if it did not exist
if is_new_element:
current_element = self.Element(current_element_name)
self.elements = np.append(self.elements, current_element)
# Check if the line has already been created for this element
is_new_line = True
for line in current_element.lines:
if current_line_name == line.name:
is_new_line = False
# Add this line to the list if it did not exist for this element
if is_new_line:
current_line = current_element.Line(current_line_name)
current_element.lines = np.append(current_element.lines, current_line)
# Create the peak and add it to the line
current_peak = current_line.Peak(
peakName = 'peak'+'_'+row[0]+'_'+row[1],
peakPosition = float(row[2]),
peakStrength = float(row[3]),
peakIs_fitpos = bool(row[4]=='yes'))
current_line.peaks = np.append(current_line.peaks, current_peak)
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Extract elements from sheet.'
self.add_str_to_logs(wid, str_to_add)
def set_peaks_relative_strength(self, wid):
'''
Set the relative strength of each peak relative to the most intense one within the line.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
for element in self.elements:
for line in element.lines:
max_strength = np.max([peak.strength for peak in line.peaks])
# Normalize the strengths with the most intense one
for peak in line.peaks:
peak.relative_strength = peak.strength/max_strength
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Set the peaks relative strength.'
self.add_str_to_logs(wid, str_to_add)
class Element:
'''
Class containing lines/peaks parameters relative to an element.
Attributes
----------
lines : list of Line
List of objects Line.
name : str
Name of the element (i.e. 'Si', 'P', 'Cl', ...).
Methods
-------
__init__(elementName):
Constructor.
Classes
-------
Line:
Class for a fluorescence line (i.e. K, L, M).
'''
def __init__(self, elementName):
'''
Constructor.
Parameters
----------
elementName : str
Name of the element (i.e. Si, P, Cl, ...).
'''
self.lines = []
self.name = elementName
class Line:
'''
Class containing peaks parameters relative to a fluorescence line of an element.
Attributes
----------
name : str
Name of the line (i.e. 'Si_K', 'Ar_L', 'K_M', or user defined).
peaks : list of Peak
List of objects Peaks.
area : float
Area of the line.
peak_series : ndarray
1D array containing the part of the spectrum corresponding to the given line, during a fit.
Methods
-------
__init__(lineName):
Constructor.
Classes
-------
Peak:
Class for a single peak.
'''
def __init__(self, lineName):
'''
Constructor.
Parameters
----------
lineName : str
Name of the line (i.e. 'Si_K', 'Ar_L', 'K_M', or user defined).
'''
self.peaks = []
self.name = lineName
self.area = 1.
class Peak:
'''
Class containing all the information relative to a single peak.
Attributes
----------
is_fit_pos : bool
True if the position of the peak should be a fit param.
name : str
Name of peak ('peak' + element name + line name).
position : float
Position of the peak in eV before fit.
relative_strength : float
Strength of the peak relative to the most intense one of its line.
strength : float
Non-normalized strength of the peak.
Methods
-------
__init__():
Constructor.
'''
def __init__(self, peakName, peakPosition, peakStrength, peakIs_fitpos):
'''
Constructor.
Parameters
----------
peakName : str
Name of the peak ('peak' + '_' + element name + '_' + line name).
peakPosition : float
Position of the peak in eV.
peakStrength : float
Non-normalized strength of the peak.
peakIs_fitpos : bool
True if the position of the peak should be a fit param.
'''
self.name = peakName
self.position = peakPosition
self.strength = peakStrength
self.is_fitpos = peakIs_fitpos
def plot_peaks(self, wid):
'''
Plot the peaks with the current set of parameters.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# Plot the whole spectrum twice (lin and log y-scale)
fig = plt.figure(figsize=(15,8))
gs = fig.add_gridspec(2, hspace=0)
axs = gs.subplots(sharex=True, sharey=False)
for ax in axs:
ax.minorticks_on()
# Iterator to have one color/linestyle per element, in the two plots
colors = iter(['#006BA4', '#FF800E', '#ABABAB', '#595959', 'k', '#C85200', 'b', '#A2C8EC', '#FFBC79']*200)
linestyles = iter(['--', '-.', '-', ':']*400)
# Plot the sum of all spectrums, and the position of the peaks
ax.plot(self.eVs, self.spectrums_sum, 'k.')
ax.set(xlabel = 'E (eV)', ylabel = 'counts')
for element in self.elements:
color = next(colors)
linestyle = next(linestyles)
for line in element.lines:
for peak in line.peaks:
ax.axvline(x = peak.position, color = color,
linestyle = linestyle, label = element.name)
axs[1].set(xlabel = 'E (eV)', ylabel = 'counts')
axs[1].set_ylim(bottom = 1)
axs[1].yaxis.set_label_position("right")
axs[1].yaxis.tick_right()
axs[1].set_yscale('log')
# Avoid having multiple times the same label in legend
handles, labels = axs[0].get_legend_handles_labels()
by_label = dict(zip(labels, handles))
axs[0].legend(by_label.values(), by_label.keys(), bbox_to_anchor=(0., 1.02, 1., .102), loc='lower center',
ncol=8, borderaxespad=0.)
plt.show()
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Plot the peaks.'
self.add_str_to_logs(wid, str_to_add)
def plot_single_fit(self, wid=None, title='', is_clear_output=True):
'''
Plot the fit (data, fitting curve, residual).
Parameters
----------
wid : object myWidgets, optional
Object from the class myWidgets.
title : str, optional
Title of the plot.
is_clear_output : bool, optional
Clear the previous output or not (used for refreshing during a series of fits).
'''
# If gain or eV0 were fitted, the eVs need to be updated
if ('gain' or 'eV0') in self.list_isfit:
eVs = self.eVs_fit
else:
eVs = self.eVs
# Plot the whole spectrum twice (lin and log y-scale)
if is_clear_output:
clear_output(wait=True)
fig = plt.figure(figsize=(15,10))
fig.suptitle(title, fontsize=14)
fig.subplots_adjust(top=0.95)
gs = fig.add_gridspec(3, hspace=0, height_ratios=[0.4,0.4,0.2])
axs = gs.subplots(sharex=True, sharey=False)
for ax in axs[0:2]:
ax.minorticks_on()
# Plot the sum of all spectrums, and the position of the peaks
ax.plot(eVs, self.spectrum_to_fit, 'k.', label='Data')
ax.plot(eVs, self.spectrum_model, 'r-', label='Fit')
if wid is not None:
if wid.show_gaussian.value:
ax.plot(eVs, self.gaussian_part, 'g--', label='Gaussian part')
if wid.show_shelf.value:
ax.plot(eVs, self.shelf_part, 'r-.', label = 'Shelf part')
if wid.show_tail.value:
ax.plot(eVs, self.tail_part, 'm--', label = 'Tail part')
if wid.show_bckg.value:
ax.plot(eVs, self.baseline, 'y:', label = 'Background')
if (wid.show_compton.value and np.shape(self.compton_part)):
ax.plot(eVs, self.compton_part, 'c--', label = 'Compton part')
ax.set(xlabel = 'E (eV)', ylabel = 'counts')
axs[0].legend(fontsize=12)
axs[1].set(xlabel = 'E (eV)', ylabel = 'counts')
axs[1].set_ylim(bottom = 1)
axs[1].set_yscale('log')
axs[1].yaxis.set_label_position("right")
axs[1].yaxis.tick_right()
axs[2].set(xlabel = 'E (eV)', ylabel = 'residuals')
axs[2].plot(eVs, self.spectrum_model-self.spectrum_to_fit, 'k-')
plt.show()
# Update logs
if (self.logs_lvl>=1 and wid is not None):
str_to_add = 'Plot the fit results.'
self.add_str_to_logs(wid, str_to_add)
def set_result_params_to_nan(self, wid):
'''
Update the results of the fit (params and curves) with NaNs.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
for element in self.elements_fit:
for line in element.lines:
line.area = np.nan
line.stderr_area = np.nan
for peak in line.peaks:
peak.position = np.nan
peak.stderr_position = np.nan
for param_name in self.list_isfit:
self.result.params[param_name].value = np.nan
self.result.params[param_name].stderr = np.nan
self.spectrum_model = np.nan*self.eVs
self.gaussian_part = np.nan*self.eVs
self.shelf_part = np.nan*self.eVs
self.tail_part = np.nan*self.eVs
self.baseline = np.nan*self.eVs
self.compton_part = np.nan*self.eVs
self.result.residual = np.nan*self.eVs
# In case of non convergence or empty spectrum, keep the original eVs
self.eVs_fit = self.eVs
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Results set to nans.'
self.add_str_to_logs(wid, str_to_add)
def set_result_params_to_fit_output(self, wid):
'''
Update elements_fit and eVs_fit with the result of the fit, and compute the fitting curve with its sub-parts.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# Extract result from the fit
for element in self.elements_fit:
for line in element.lines:
# It can happen that the fit converges, but do not return stderr
if line.area.stderr is None:
line.stderr_area = np.nan
else:
line.stderr_area = line.area.stderr
line.area = line.area.value
for peak in line.peaks:
if peak.is_fitpos:
if peak.position.stderr is None:
peak.stderr_position = np.nan
else:
peak.stderr_position = peak.position.stderr
peak.position = peak.position.value
# Compute new eVs if gain and/or eV0 were fitted
self.eVs_fit = self.result.params['gain'].value*self.channels + self.result.params['eV0'].value
# Compute the fitting curve and its sub-parts
self.spectrum_model, self.gaussian_part, self.shelf_part, self.tail_part, self.baseline, self.compton_part =\
funFit.funSpectrum(self.elements_fit, self.eVs_fit, self.SCF_Si, self.result.params['ct'].value,\
self.result.params['sl'].value, self.result.params['noise'].value,\
self.result.params['SF0'].value, self.result.params['TF0'].value, \
self.result.params['TW0'].value, self.result.params['broad_factor'].value,\
self.result.params['lowTF0'].value, self.result.params['highTF0'].value,\
self.result.params['lowTW0'].value, self.result.params['highTW0'].value)
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Results updated with fit output.'
self.add_str_to_logs(wid, str_to_add)
def run_single_fit(self, wid):
'''
Run the fit.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# Initialize tokens
self.is_spectrum_empty = False
self.is_fit_fail = False
self.is_last_fit_a_preview = True
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Run single fit.'
self.add_str_to_logs(wid, str_to_add)
# We do not want to write on self during the fit
# But funFitSpectrum will write on elements[x].lines[x].area
# So we use a copy of self.elements for the fit
self.elements_fit = deepcopy(self.elements)
# Do the fit of the spectrum
self.result = funFit.funFitSpectrum(self.spectrum_to_fit, self.list_isfit, self.elements_fit, self.channels,
self.SCF_Si, self.gain, self.eV0, self.ct, self.sl, self.noise,
self.SF0, self.TF0, self.TW0, self.broad_factor,
self.lowTF0, self.highTF0, self.lowTW0, self.highTW0,
self.is_bckg_subset, self.bckg_eVs_inf, self.bckg_eVs_sup)
# Check if the fit succeeded
self.is_fit_fail = not self.result.success
# Check if the spectrum was empty
if np.sum(self.spectrum_to_fit)<10.:
self.is_spectrum_empty = True
if (self.is_spectrum_empty or self.is_fit_fail):
# Put nans in every fit parameters and in the resulting curves
self.set_result_params_to_nan(wid)
else:
# Set the self.XXX_fit params to the fit output
# Set the spectrum_model and sub-curves to the fit output
self.set_result_params_to_fit_output(wid)
def save_fit_curves_results_in_file(self, wid, path_to_fit_curve_results):
'''
Save fit curve in a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_curve_results : str
Path to the csv file.
'''
# Create a subfolder for the fit results
if not os.path.exists(self.save_dir+self.name+'/'+self.session_id+'/fit_curves/'):
os.mkdir(self.save_dir+self.name+'/'+self.session_id+'/fit_curves/')
# Write to the csv file
with open(path_to_fit_curve_results, "w", newline='') as f:
writer = csv.writer(f,delimiter=';',dialect='excel')
header = np.array([
'#sensorsRelTimestamps',
'#eVs',
'#data',
'#fit'
])
writer.writerow(header)
for i in range(len(self.eVs)):
writer.writerow([
self.current_sensorsRelTimestamps,
np.round(self.eVs_fit[i],2),
np.round(self.spectrum_to_fit[i],2),
np.round(self.spectrum_model[i],2)
])
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Fit curve saved in:\n%s'%path_to_fit_curve_results
self.add_str_to_logs(wid, str_to_add)
def create_fit_params_results(self, wid, path_to_fit_params_results):
'''
Create the csv file for the results of the fit and write its header.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_params_results : str
Path to the csv file.
'''
# Prepare the header of the csv file
header = np.array([])
# Data stamps
for stamp0D in self.stamps0D:
if stamp0D[1] is None:
header =np.append(header, '#'+stamp0D[0])
else:
header =np.append(header, '#'+stamp0D[1])
# Stamps from the fit
header = np.append(header, '#spectrum_index')
for element in self.elements:
for line in element.lines:
header = np.append(header, '#'+'area_'+line.name)
header = np.append(header, '#stderr_'+'area_'+line.name)
for peak in line.peaks:
if peak.is_fitpos:
header = np.append(header, '#'+'pos_'+peak.name)
header = np.append(header, '#stderr_'+'pos_'+peak.name)
for param_name in self.list_isfit:
header = np.append(header, '#'+param_name)
header = np.append(header, '#stderr_'+param_name)
with open(path_to_fit_params_results, "w", newline='') as f:
writer = csv.writer(f,delimiter=';')
writer.writerow(header)
# Update logs
if self.logs_lvl>=1:
str_to_add = 'File for fit parameters created here:\n%s'%path_to_fit_params_results
self.add_str_to_logs(wid, str_to_add)
def save_fit_params_results_in_file(self, wid, path_to_fit_params_results, spectrum_index):
'''
Save fit results in a csv file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_params_results : str
Path to the csv file.
spectrum_index : int
Index of the spectrum.
'''
# Array to be written
tbw = np.array([], dtype='float')
# Put the data0D
for data in self.data0D:
tbw = np.append(tbw, data[spectrum_index])
tbw = np.append(tbw, spectrum_index)
for element in self.elements_fit:
for line in element.lines:
tbw = np.append(tbw, np.round(line.area, 4))
tbw = np.append(tbw, np.round(line.stderr_area, 4))
for peak in line.peaks:
if peak.is_fitpos:
tbw = np.append(tbw, np.round(peak.position, 4))
tbw = np.append(tbw, np.round(peak.stderr_position, 4))
for param_name in self.list_isfit:
tbw = np.append(tbw, np.round(self.result.params[param_name].value, 4))
# It can happen that the fit converges, but do not return stderr
if self.result.params[param_name].stderr is None:
tbw = np.append(tbw, np.nan)
else:
tbw = np.append(tbw, np.round(self.result.params[param_name].stderr, 4))
with open(path_to_fit_params_results, "a+", newline='') as f:
writer = csv.writer(f,delimiter=';')
writer.writerow(tbw)
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Fit results saved in:\n%s'%path_to_fit_params_results
self.add_str_to_logs(wid, str_to_add)
def save_fit_logs_in_file(self, wid, path_to_fit_log_results, spectrum_index):
'''
Save fit logs in a txt file.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
path_to_fit_log_results : str
Path to the log file.
spectrum_index : int
Index of the spectrum.
'''
with open(path_to_fit_log_results, "a+", newline='') as f:
f.write('Fit of spectrum '+str(spectrum_index)+'\n')
if self.is_fit_fail:
f.write('#FIT DID NOT CONVERGE\n')
f.write(fit_report(self.result))
f.write('\n')
f.write('\n')
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Fit logs saved in:\n%s'%path_to_fit_log_results
self.add_str_to_logs(wid, str_to_add)
def get_and_save_all_params_in_files(self, wid):
'''
Get the parameters from the widgets and save them in files.
Do not update the parameters from extract, since this should be done by extracting the scan.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
# Set the paths of the different saving files
self.set_paths(wid)
############################
# Extraction parameters
# Do not update the extract params from the widgets,
# in case the user changed the parameters in the widget AFTER extracting the scan
# (this would cause a mismatch between the info from the widget and the actual size of the spectrums)
# Save
self.save_extract_params_in_file(wid, self.path_to_extract_params_save)
############################
# Peaks parameters
if self.is_sheet_loaded:
# Validate the sheet (update expt with widget params)
self.validate_sheet(wid)
# Save
self.save_peaks_params_in_file(wid, self.path_to_peaks_params_save)
# Update list of available params files and the widget
self.set_list_peaks_params_files(wid)
wid.select_peaks_params_file.options = self.list_peaks_params_files
# Change the default option of the file selection window to the most recent one
wid.select_peaks_params_file.value = \
[x for x in self.list_peaks_params_files if self.session_id in x][0]
############################
# Fit parameters
# Update expt from the widget params
self.set_fit_params_from_widgets(wid)
# Save
self.save_fit_params_in_file(wid, self.path_to_fit_params_save)
# Update list of available params files and the widget
self.set_list_fit_params_files(wid)
wid.select_fit_params_file.options = self.list_fit_params_files
# Change the default option of the file selection window to the most recent one
wid.select_fit_params_file.value = \
[x for x in self.list_fit_params_files if self.session_id in x][0]
# Update logs
if self.logs_lvl>=1:
str_to_add = 'Plot the peaks.'
self.add_str_to_logs(wid, str_to_add)
def plot_fit_curve_from_file(self, path_to_fit_folder, fit_index):
'''
Plot the result curve of a fit after loading it from file.
We do not use the attributes of the current expt because this function should be callable even
after the kernel has been restarted.
Parameters
----------
path_to_fit_folder : str
Relative path to the folder containing the fit curves.
fit_index : int
Index of the fitted spectrum to display.
'''
# Import the fitting curves from file
fit_curve_results = np.genfromtxt(path_to_fit_folder+\
'spectrum_'+str(fit_index)+'.csv', delimiter=';', names=True)
self.eVs = fit_curve_results['eVs']
self.eVs_fit = fit_curve_results['eVs']
self.spectrum_to_fit = fit_curve_results['data']
self.spectrum_model = fit_curve_results['fit']
# Plot the result
self.plot_single_fit(title='Fit of spectrum '+str(fit_index), is_clear_output=False)
def plot_fit_areas_from_file(self, path_to_fit_params_results):
'''
Plot the result areas of a fit after loading it from file.
We do not use the attributes of the current expt because this function should be callable even
after the kernel has been restarted.
Parameters
----------
path_to_fit_params_results : str
Relative path to the csv file with the parameters resulting from the fit.
'''
# Import results and params
fit_results = np.genfromtxt(path_to_fit_params_results, delimiter=';', names=True)
# ['area_Cl_K', 'area_X_xx', 'area_Ar_K', 'area_Compton_Co']
names_area_line = [x for x in fit_results.dtype.names if x.startswith('area_')]
# ['Cl_K', 'X_xx', 'Ar_K', 'Compton_Co']
names_line = [x.split('area_')[1] for x in names_area_line]
# Total number of plots to print
nb_plots = len(names_line)
# Divide the plots in figures of nb_rows plots
# To avoid overflowing the pdf with all plots on one page
nb_rows = 6
if nb_plots > 0:
# Token to print the title only once
is_title = True
for k in range(0, nb_plots, nb_rows):
# Print all the packs of nb_rows plots
if (nb_plots-k)//nb_rows>0:
# Create a new figure for each pack of nb_rows plots
if k%nb_rows == 0:
fig, ax = plt.subplots(figsize=(15,4*nb_rows), nrows=nb_rows)
# ax is a float when there is only one row
if nb_plots == 1:
ax = [ax]
# Plot all the plots of the corresponding pack
for i in range(nb_rows):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('AREAS OF FLUORESCENCE LINES', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_area_line = names_area_line[k+i]
name_line = names_line[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_area_line],
'r.-')
ax[i].set_ylabel('Area %s [%s]'%(name_line.split('_')[0],name_line.split('_')[1]))
plt.xlabel('Spectrum index')
plt.show()
# Plot the last pack with a number of elements < nb_rows
else:
# Number of plots remaining
nb_plots_left = nb_plots%nb_rows
fig, ax = plt.subplots(figsize=(15,4*nb_plots_left), nrows=nb_plots_left)
if nb_plots_left == 1:
ax = [ax]
for i in range(nb_plots_left):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('AREAS OF FLUORESCENCE LINES', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_area_line = names_area_line[k+i]
name_line = names_line[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_area_line],
'r.-')
ax[i].set_ylabel('Area %s [%s]'%(name_line.split('_')[0],name_line.split('_')[1]))
plt.xlabel('Spectrum index')
plt.show()
def plot_fit_parameters_from_file(self, path_to_fit_params_save, path_to_fit_params_results):
'''
Plot the result parameters of a fit after loading it from file.
We do not use the attributes of the current expt because this function should be callable even
after the kernel has been restarted.
Parameters
----------
path_to_fit_params_save : str
Relative path to the csv file with the saved fit parameters.
path_to_fit_params_results : str
Relative path to the csv file with the parameters resulting from the fit.
'''
# Import results and params
fit_params = np.genfromtxt(path_to_fit_params_save, delimiter=';', names=True, dtype=None, encoding=None)
fit_results = np.genfromtxt(path_to_fit_params_results, delimiter=';', names=True)
# Construct the list of fitted parameters ('False' if empty)
list_isfit_str = str(fit_params['list_isfit_str']).split(',')
if list_isfit_str[0]!='False':
# Total number of plots to print
nb_plots = len(list_isfit_str)
else:
nb_plots = 0
# Divide the plots in figures of nb_rows plots
# To avoid overflowing the pdf with all plots on one page
nb_rows = 6
if nb_plots > 0:
# Token to print the title only once
is_title = True
for k in range(0, nb_plots, nb_rows):
# Print all the packs of nb_rows plots
if (nb_plots-k)//nb_rows>0:
# Create a new figure for each pack of nb_rows plots
if k%nb_rows == 0:
fig, ax = plt.subplots(figsize=(15,4*nb_rows), nrows=nb_rows)
# ax is a float when there is only one row
if nb_plots == 1:
ax = [ax]
# Plot all the plots of the corresponding pack
for i in range(nb_rows):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('OTHER FIT PARAMETERS', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_param = list_isfit_str[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_param],
'r.-')
ax[i].set_ylabel(name_param)
# Add the xlabel only on the last plot of the the pack
plt.xlabel('Spectrum index')
plt.show()
# Plot the last pack with a number of elements < nb_rows
else:
# Number of plots remaining
nb_plots_left = nb_plots%nb_rows
fig, ax = plt.subplots(figsize=(15,4*nb_plots_left), nrows=nb_plots_left)
if nb_plots_left == 1:
ax = [ax]
for i in range(nb_plots_left):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('OTHER FIT PARAMETERS', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_param = list_isfit_str[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_param],
'r.-')
ax[i].set_ylabel(name_param)
plt.xlabel('Spectrum index')
plt.show()
def plot_fit_positions_from_file(self, path_to_fit_params_results):
'''
Plot the result peaks positions of a fit after loading it from file.
We do not use the attributes of the current expt because this function should be callable even
after the kernel has been restarted.
Parameters
----------
path_to_fit_params_results : str
Relative path to the csv file with the parameters resulting from the fit.
'''
# Import results
fit_results = np.genfromtxt(path_to_fit_params_results, delimiter=';', names=True)
# ['pos_peak_X_xx', 'pos_peak_Ar_KM3']
names_pos_peak = [x for x in fit_results.dtype.names if x.startswith('pos_')]
# ['X_xx', 'Ar_KM3']
names_peak = [x.split('pos_peak_')[1] for x in names_pos_peak]
# Total number of plots to print
nb_plots = len(names_peak)
# Divide the plots in figures of nb_rows plots
# To avoid overflowing the pdf with all plots on one page
nb_rows = 6
if nb_plots > 0:
# Token to print the title only once
is_title = True
for k in range(0, nb_plots, nb_rows):
# Print all the packs of nb_rows plots
if (nb_plots-k)//nb_rows>0:
# Create a new figure for each pack of nb_rows plots
if k%nb_rows == 0:
fig, ax = plt.subplots(figsize=(15,4*nb_rows), nrows=nb_rows)
# ax is a float when there is only one row
if nb_plots == 1:
ax = [ax]
# Plot all the plots of the corresponding pack
for i in range(nb_rows):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('POSITIONS OF FLUORESCENCE PEAKS', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_pos_peak = names_pos_peak[k+i]
name_peak = names_peak[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_pos_peak],
'r.-')
ax[i].set_ylabel('Position %s [%s] (eV)'%(name_peak.split('_')[0],name_peak.split('_')[1]))
plt.xlabel('Spectrum index')
plt.show()
# Plot the last pack with a number of elements < nb_rows
else:
# Number of plots remaining
nb_plots_left = nb_plots%nb_rows
fig, ax = plt.subplots(figsize=(15,4*nb_plots_left), nrows=nb_plots_left)
if nb_plots_left == 1:
ax = [ax]
for i in range(nb_plots_left):
ax[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
if is_title:
ax[i].set_title('POSITIONS OF FLUORESCENCE LINES', pad = 15, y=1.)
ax[i].title.set_fontsize(16)
ax[i].title.set_fontweight('bold')
is_title = False
name_pos_peak = names_pos_peak[k+i]
name_peak = names_peak[k+i]
ax[i].plot(fit_results['spectrum_index'],
fit_results[name_pos_peak],
'r.-')
ax[i].set_ylabel('Position %s [%s] (eV)'%(name_peak.split('_')[0],name_peak.split('_')[1]))
plt.xlabel('Spectrum index')
plt.show()
def plot_all_fit_results(self, fit_index, path_to_result_folder):
'''
Function to call each individual plotting subfunctions when printing the report.
We do not use the attributes of the current expt because this function should be callable even
after the kernel has been restarted.
Parameters
----------
fit_index : int
Index of the fitted spectrum to display.
path_to_result_folder : str
Relative path to the folder where the results are stored.
'''
# Reconstruct the different paths
# e.g. path_to_result_folder = 'save/SIRIUS_Fluo_2020_02_16_02289/20211102_135304/'
path_to_fit_folder = path_to_result_folder+'fit_curves/'
path_to_fit_params_results = path_to_result_folder+'fit_results.csv'
path_to_fit_params_save = path_to_result_folder+'fit_params.csv'
# Plot the fit resulting curve in the tab
self.plot_fit_curve_from_file(path_to_fit_folder, fit_index)
# Plot the time series of areas in the tab
self.plot_fit_areas_from_file(path_to_fit_params_results)
# Plot the time series of peaks positions in the tab
self.plot_fit_positions_from_file(path_to_fit_params_results)
# Plot the time series of fit parameters in the tab
self.plot_fit_parameters_from_file(path_to_fit_params_save,
path_to_fit_params_results)
def extract_avg_from_fit(self, wid):
'''
Get averages on fitted parameters and set the corresponding values in the widgets.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
if self.is_last_fit_a_preview:
# Get the results directly from the fit output
fit_params_results = self.result.params
str_to_add = 'Widgets updated with averages from last fit preview.'
else:
# Get the results from file
fit_params_results = np.genfromtxt(self.path_to_fit_params_results, delimiter=';', names=True)
str_to_add = 'Widgets updated with averages from last series of fits.'
if 'gain' in self.list_isfit:
wid.gain.value = np.round(np.nanmean(fit_params_results['gain']),4)
if 'eV0' in self.list_isfit:
wid.eV0.value = np.round(np.nanmean(fit_params_results['eV0']),4)
if 'ct' in self.list_isfit:
wid.ct.value = np.round(np.nanmean(fit_params_results['ct']),4)
if 'sl' in self.list_isfit:
wid.sl.value = np.round(np.nanmean(fit_params_results['sl']),4)
if 'noise' in self.list_isfit:
wid.noise.value = np.round(np.nanmean(fit_params_results['noise']),4)
if 'SF0' in self.list_isfit:
wid.SF0.value = np.round(np.nanmean(fit_params_results['SF0']),4)
if 'TF0' in self.list_isfit:
wid.TF0.value = np.round(np.nanmean(fit_params_results['TF0']),4)
if 'TW0' in self.list_isfit:
wid.TW0.value = np.round(np.nanmean(fit_params_results['TW0']),4)
if 'broad_factor' in self.list_isfit:
wid.broad_factor.value = np.round(np.nanmean(fit_params_results['broad_factor']),4)
if 'lowTF0' in self.list_isfit:
wid.lowTF0.value = np.round(np.nanmean(fit_params_results['lowTF0']),4)
if 'highTF0' in self.list_isfit:
wid.highTF0.value = np.round(np.nanmean(fit_params_results['highTF0']),4)
if 'lowTW0' in self.list_isfit:
wid.lowTW0.value = np.round(np.nanmean(fit_params_results['lowTW0']),4)
if 'highTW0' in self.list_isfit:
wid.highTW0.value = np.round(np.nanmean(fit_params_results['highTW0']),4)
# Update logs
if self.logs_lvl>=0:
self.add_str_to_logs(wid, str_to_add)
def add_str_to_logs(self, wid, str_to_add):
'''
Add a string to the logs and update the logs window.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
str_to_add : str
String to add to the logs.
'''
# Add the current date
date_to_add = _BOLD+datetime.now().strftime('%d/%m/%Y, %H:%M:%S')+_RESET
# We update in reversed order, there is no way to scroll automatically
# to the bottom of the output widget
self.logs = date_to_add+ '\n'+ str_to_add + '\n' + self.logs
with wid.out_logs:
wid.out_logs.clear_output()
print(self.logs)
def generate_report(self, wid):
'''
Generate the text for the report.
Parameters
----------
wid : object myWidgets
Object from the class myWidgets.
'''
self.report = ['# '+self.name]
self.report.append('break')
self.report.append('## Session ID: '+self.session_id)
self.report.append('Fit results for file ```%s```'%self.filename)
self.report.append('Parameters and results saved in: \n```%s```'%\
(self.save_dir+self.name+'/'+self.session_id+'/'))
self.report.append('break')
self.report.append('**Extraction parameters**')
self.report.append('Spectrum interval = [%g, %g]'%(self.first_spectrum,self.last_spectrum))
self.report.append('Channel interval = [%g, %g]'%(self.first_channel,self.last_channel))
self.report.append('SDD elements used = %s'%(str(self.SDD_elems_chosen_int)))
self.report.append('break')
self.report.append('**Fit parameters**')
self.report.append('List of fitted parameters: %s'%str(self.list_isfit))
self.report.append('beam energy = %g'%self.beam_energy +'; min. strength = %g'%self.min_strength)
self.report.append('**Params for conversion to eVs**')
self.report.append('gain = %g'%self.gain +'; eV0 = %g'%self.eV0)
self.report.append('**Params for linear background**')
self.report.append('slope = %g'%self.sl+'; constant = %g'%self.ct)
if self.is_bckg_subset:
self.report.append('Fit the background on the subset [%g eV, %g eV]'%(self.bckg_eVs_inf, self.bckg_eVs_sup))
self.report.append('**Params for elastic peaks**')
self.report.append('noise = %g'%self.noise
+'; tail fraction (low energy side) = %g'%self.TF0)
self.report.append('tail width (low energy side) = %g'%self.TW0+'; shelf fraction = %g'%self.SF0)
self.report.append('**Params for Compton peaks**')
self.report.append('broadening factor = %g'%self.broad_factor+'; tail fraction (low energy side) = %g'%self.lowTF0\
+'; tail fraction (high energy side) = %g'%self.highTF0)
self.report.append('tail width (low energy side) = %g'%self.lowTW0+'; tail width (high energy side) = %g'%self.highTW0)
# Update logs
if self.logs_lvl>=0:
str_to_add = 'Report generated.'
self.add_str_to_logs(wid, str_to_add)
|
from django.contrib import admin
from fsdviz.common.utils import fill_color_widget
from .models import (
LifeStage,
Condition,
StockingMethod,
StockingEvent,
Hatchery,
YearlingEquivalent,
)
admin.site.empty_value_display = "(None)"
@admin.register(LifeStage)
class LifeStageModelAdmin(admin.ModelAdmin):
list_display = ("abbrev", "description", "fill_color")
def fill_color(self, obj):
return fill_color_widget(obj.color)
@admin.register(YearlingEquivalent)
class YearlingEquivalentModelAdmin(admin.ModelAdmin):
list_display = ("species", "lifestage", "yreq_factor", "comment")
list_filter = ("species", "lifestage")
search_fields = ("species", "lifestage")
ordering = ("species__common_name", "yreq_factor")
def get_readonly_fields(self, request, obj=None):
if obj:
return ("species", "lifestage")
else:
return []
@admin.register(Condition)
class ConditionModelAdmin(admin.ModelAdmin):
list_display = ("condition", "description")
@admin.register(StockingMethod)
class StockingMethodModelAdmin(admin.ModelAdmin):
list_display = ("stk_meth", "description", "fill_color")
search_fields = ("stk_meth", "description")
def fill_color(self, obj):
return fill_color_widget(obj.color)
@admin.register(Hatchery)
class HatcheryModelAdmin(admin.ModelAdmin):
list_display = ("hatchery_name", "abbrev", "hatchery_type", "agency", "active")
list_select_related = ("agency",)
list_filter = (
"active",
"hatchery_type",
"agency",
)
search_fields = ["hatchery_name"]
@admin.register(StockingEvent)
class StockingEventModelAdmin(admin.ModelAdmin):
list_display = (
"species",
"agency",
"lake",
"stateprov",
"year_class",
"agemonth",
"lifestage",
"date",
"site",
"no_stocked",
)
list_select_related = (
"species",
"agency",
"jurisdiction__lake",
"stocking_method",
"lifestage",
"jurisdiction__stateprov",
)
list_filter = ("jurisdiction__lake", "lifestage", "species", "agency", "year")
search_fields = ["site", "stock_id"]
exclude = ["marks", "upload_event", "clip_code"]
fields = [
"stock_id",
"agency_stock_id",
"species",
"strain_raw",
"agency",
"hatchery",
"jurisdiction",
"site",
"st_site",
"grid_10",
"dd_lat",
"dd_lon",
"latlong_flag",
"geom",
"date",
"year",
"month",
"day",
"stocking_method",
"lifestage",
"agemonth",
"length",
"weight",
"year_class",
"fin_clips",
"fish_tags",
"physchem_marks",
"condition",
"no_stocked",
"yreq_stocked",
"notes",
]
date_hierarchy = "date"
view_on_site = True
autocomplete_fields = (
"grid_10",
"species",
"agency",
"stocking_method",
"hatchery",
"fin_clips",
"physchem_marks",
"fish_tags",
)
|
from ec2mc.utils import handle_ip
from ec2mc.utils.base_classes import CommandBase
from ec2mc.utils.find import find_instances
from ec2mc.validate import validate_perms
class CheckServers(CommandBase):
def main(self, cmd_args):
"""check instance status(es)
Args:
cmd_args (namedtuple): See find_instances:add_argparse_args
"""
instances = find_instances.main(cmd_args)
for instance in instances:
print("")
print(f"Checking {instance['name']} ({instance['id']})...")
instance_state, instance_ip = find_instances.get_state_and_ip(
instance['region'], instance['id'])
print(f" Instance is currently {instance_state}.")
if instance_state == "running":
print(f" Instance IP: {instance_ip}")
handle_ip.main(instance, instance_ip)
@classmethod
def add_documentation(cls, argparse_obj):
cmd_parser = super().add_documentation(argparse_obj)
find_instances.add_argparse_args(cmd_parser)
def blocked_actions(self, _):
return validate_perms.blocked(actions=["ec2:DescribeInstances"])
|
#!/usr/local/bin/python3.6
# -*- coding:utf-8 -*-
# ========================================
# Description :
# 框架引导层
# 全局参数定义、配置参数定义、默认参数生成、路径参数生成
# Created : 2020.10.14
# Author : Chalk Yu
# ========================================
from __future__ import absolute_import
from types import MethodType, FunctionType
import sys
import os
import inspect
import traceback
import configparser
import logging
import logging.handlers
from dataclasses import dataclass
from .screws import PathPlant, Store, ConfStore
from .tool import Str2Bool, Str2Int, VarGet, FormatMsg
from .definition import __cache__
from .logger_factory import LoggerFactory
# conf文件获取初始化参数
class ConfArgs():
def __init__(self, *args, **kwargs):
# 初始化配置
self.__configparser__ = configparser.ConfigParser()
# 使用多环境切换
self.__ENV_ON = kwargs.get("ENV_ON") or __cache__.env_on
# 环境类型,用于多个环境切换
self.__ENV_TYPE = kwargs.get("ENV_TYPE") or __cache__.env_type
# 配置文件夹
self.__ENV_DIR = PathPlant.transAbspath(
kwargs.get('ENV_DIR') or __cache__.env_dir)
if self.__ENV_ON:
self.__CONF_PATH = os.path.join(
self.__ENV_DIR, f'{self.__ENV_TYPE}.ini')
else:
# 配置文件路径
self.__CONF_PATH = PathPlant.transAbspath(
kwargs.get('CONF_PATH') or None)
# 读取配置
self.loadConf()
def loadConf(self):
if self.__CONF_PATH != None:
self.__configparser__.read(self.__CONF_PATH, encoding="utf-8")
return self.__configparser__
def getArg(self, arg, namespace=__cache__.model_name):
sec = self.__configparser__._sections
if self.__configparser__.has_section(namespace):
conf = sec.get(namespace)
return conf.get(arg.lower())
else:
return None
def getArgs(self, namespace=__cache__.model_name):
sec = self.__configparser__._sections
if self.__configparser__.has_section(namespace):
args = sec.get(namespace)
if args:
return dict(args)
else:
return {}
else:
return {}
def getArgsBySuffix(self, suffix, arg):
namespace = f'{__cache__.conf_section_prefix}_{suffix}'
return self.getArg(arg, namespace=namespace)
def getSections(self, prefix=None):
sec = self.__configparser__._sections
secList = list(sec.keys())
if prefix == None:
return secList
else:
return list(filter(lambda x: (f'{prefix}_' in x) and x.index(f'{prefix}_') == 0, secList))
# 配置文件夹
@property
def ENV_DIR(self):
return self.__ENV_DIR
@ENV_DIR.setter
def ENV_DIR(self, value):
self.__ENV_DIR = PathPlant.transAbspath(value)
# pass # 读写属性
# 环境类型,用于多个环境切换
@property
def ENV_TYPE(self):
return self.__ENV_TYPE
@ENV_TYPE.setter
def ENV_TYPE(self, value):
self.__ENV_TYPE = value
# pass # 只读属性
# 使用多环境切换
@property
def ENV_ON(self):
return self.__ENV_ON
@ENV_ON.setter
def ENV_ON(self, value):
self.__ENV_ON = value
# pass # 只读属性
# 初始化参数
class GuideArgs(ConfArgs):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# print("GuideArgs", kwargs)
# 创建参数字典
self.__Args__ = dict()
# SQL文件存放目录
self.__Args__['SQL_PATH'] = PathPlant.transAbspath(kwargs.get(
'SQL_PATH') or self.getArg('SQL_PATH') or None)
# LOG文件存放目录
self.__Args__['LOG_PATH'] = PathPlant.transAbspath(kwargs.get(
'LOG_PATH') or self.getArg('LOG_PATH') or None)
# 是否输出日志文件
self.__Args__['LOG_ON'] = VarGet(kwargs.get(
'LOG_ON'), self.getArg('LOG_ON'), __cache__.log_on)
# 是否是debug模式
self.__Args__['DEBUG'] = VarGet(kwargs.get(
'DEBUG'), self.getArg('DEBUG'), __cache__.debug)
self.__Args__['DEBUG'] = Str2Bool(self.__Args__['DEBUG'])
# 单个日志文件最大容量(mb)
self.__Args__['LOG_MAX_SIZE'] = VarGet(kwargs.get(
'LOG_MAX_SIZE'), self.getArg('LOG_MAX_SIZE'), __cache__.log_max_size)
self.__Args__['LOG_MAX_SIZE'] = Str2Int(self.__Args__['LOG_MAX_SIZE'])
# 日志最大备份数
self.__Args__['LOG_BACKUP_CNT'] = VarGet(kwargs.get(
'LOG_BACKUP_CNT'), self.getArg('LOG_BACKUP_CNT'), __cache__.log_backup_cnt)
self.__Args__['LOG_BACKUP_CNT'] = Str2Int(
self.__Args__['LOG_BACKUP_CNT'])
# 是否追踪子文件夹下的SQL文件
self.__Args__['TRACK_SQL_FILE'] = VarGet(kwargs.get(
'TRACK_SQL_FILE'), self.getArg('TRACK_SQL_FILE'), __cache__.track_sql_file)
self.__Args__['TRACK_SQL_FILE'] = Str2Bool(
self.__Args__['TRACK_SQL_FILE'])
# 指定model文件夹名称,默认是model
self.__Args__['MODEL_FOLDER_NAME'] = VarGet(kwargs.get(
'MODEL_FOLDER_NAME'), self.getArg('MODEL_FOLDER_NAME'), __cache__.model_folder_name)
# 是否使用Bean来获取数据
self.__Args__['USE_BEAN'] = VarGet(kwargs.get(
'USE_BEAN'), self.getArg('USE_BEAN'), __cache__.use_bean)
# 是否在异常时自动回滚
self.__Args__['ALLOW_ROLLBACK'] = VarGet(kwargs.get(
"ALLOW_ROLLBACK"), self.getArg('ALLOW_ROLLBACK'), __cache__.allow_rollback)
# 是否自动提交
self.__Args__['AUTO_COMMIT'] = VarGet(kwargs.get(
"AUTO_COMMIT"), self.getArg('AUTO_COMMIT'), __cache__.auto_commit)
# SQL模板类型
self.__Args__['SQL_TEMPLATE_TYPE'] = VarGet(kwargs.get(
"SQL_TEMPLATE_TYPE"), self.getArg('SQL_TEMPLATE_TYPE'), __cache__.sql_template_type)
# 可识别最大sql长度
self.__Args__['MAX_SQL_SIZE'] = VarGet(kwargs.get(
"MAX_SQL_SIZE"), self.getArg('MAX_SQL_SIZE'), __cache__.max_sql_size)
# 默认引号类型
self.__Args__['QUOTATION'] = VarGet(kwargs.get(
"QUOTATION"), self.getArg('QUOTATION'), __cache__.quotation)
# 数据库驱动
self.__Args__['DB_DRIVER'] = VarGet(kwargs.get(
"DB_DRIVER"), self.getArg('DB_DRIVER'), None)
# 数据库认证
self.__Args__['DB_DATABASE'] = VarGet(kwargs.get(
"DB_DATABASE"), self.getArg('DB_DATABASE'), None)
self.__Args__['DB_USER'] = VarGet(kwargs.get(
"DB_USER"), self.getArg('DB_USER'), None)
self.__Args__['DB_PASSWORD'] = VarGet(kwargs.get(
"DB_PASSWORD"), self.getArg('DB_PASSWORD'), None)
self.__Args__['DB_HOST'] = VarGet(kwargs.get(
"DB_HOST"), self.getArg('DB_HOST'), __cache__.db_host)
# 默认端口号设置
__default_port = None
if self.__Args__['DB_DRIVER'] == "postgres":
__default_port = 5432
elif self.__Args__['DB_DRIVER'] == "mysql":
__default_port = 3306
# 设置端口号
self.__Args__['DB_PORT'] = VarGet(kwargs.get(
"DB_PORT"), self.getArg('DB_PORT'), __default_port, None)
# sqlite db文件路径
self.__Args__['SQLITE_PATH'] = VarGet(kwargs.get(
"SQLITE_PATH"), self.getArg('SQLITE_PATH'), None)
# 数据库编码
self.__Args__['ENCODING'] = VarGet(kwargs.get(
"ENCODING"), self.getArg('ENCODING'), __cache__.encoding)
# sqlalchemy扩展参数
self.__Args__['SQLALCHEMY_ARGS'] = Store(VarGet(kwargs.get("SQLALCHEMY_ARGS") if type(
kwargs.get("SQLALCHEMY_ARGS")) == dict else None, __cache__.sqlalchemy_args))
# loader外部重载方法
# connect()
self.__Args__['RW_CONNECT'] = kwargs.get("RW_CONNECT") if type(
kwargs.get("RW_CONNECT")) == FunctionType else None
# execute()
self.__Args__['RW_EXECUTE'] = kwargs.get("RW_EXECUTE") if type(
kwargs.get("RW_EXECUTE")) == FunctionType else None
# close()
self.__Args__['RW_CLOSE'] = kwargs.get("RW_CLOSE") if type(
kwargs.get("RW_CLOSE")) == FunctionType else None
# commit()
self.__Args__['RW_COMMIT'] = kwargs.get("RW_COMMIT") if type(
kwargs.get("RW_COMMIT")) == FunctionType else None
# rollback()
self.__Args__['RW_ROLLBACK'] = kwargs.get("RW_ROLLBACK") if type(
kwargs.get("RW_ROLLBACK")) == FunctionType else None
# inject()
self.__Args__['RW_INJECT'] = kwargs.get("RW_INJECT") if type(
kwargs.get("RW_INJECT")) == FunctionType else None
# 每次都重新读取sql文件,不进缓存
self.__Args__['HARD_LOAD_SQL'] = VarGet(kwargs.get(
"HARD_LOAD_SQL"), self.getArg('HARD_LOAD_SQL'), __cache__.hard_load_sql)
# 缓存数据库连接,保持数据库连接对象,数据库关闭失效
self.__Args__['CACHE_CONNECT'] = VarGet(kwargs.get(
"CACHE_CONNECT"), self.getArg('CACHE_CONNECT'), __cache__.cache_connect)
# 缓存DB预设
self.__Args__['DB_CONF'] = VarGet(kwargs.get(
"DB_CONF"), self.getArg('DB_CONF'), None)
# 获取参数字典
@property
def _Args_(self):
return self.__Args__
# SQL文件存放目录
@property
def SQL_PATH(self):
return self.__Args__['SQL_PATH']
@SQL_PATH.setter
def SQL_PATH(self, value):
self.__Args__['SQL_PATH'] = PathPlant.transAbspath(value)
# pass # 可读写属性
# LOG文件存放目录
@property
def LOG_PATH(self):
return self.__Args__['LOG_PATH']
@LOG_PATH.setter
def LOG_PATH(self, value):
self.__Args__['LOG_PATH'] = PathPlant.transAbspath(value)
# pass # 可读写属性
# 是否输出日志文件
@property
def LOG_ON(self):
return self.__Args__['LOG_ON']
@LOG_ON.setter
def LOG_ON(self, value):
self.__Args__['LOG_ON'] = value
# pass # 可读写属性
# 是否是debug模式
@property
def DEBUG(self):
return self.__Args__['DEBUG']
@DEBUG.setter
def DEBUG(self, value):
self.__Args__['DEBUG'] = value
# pass # 可读写属性
# 单个日志文件最大容量(mb)
@property
def LOG_MAX_SIZE(self):
return self.__Args__['LOG_MAX_SIZE']
@LOG_MAX_SIZE.setter
def LOG_MAX_SIZE(self, value):
self.__Args__['LOG_MAX_SIZE'] = value
# pass # 可读写属性
# 日志最大备份数
@property
def LOG_BACKUP_CNT(self):
# print(self.__Args__)
return self.__Args__['LOG_BACKUP_CNT']
@LOG_BACKUP_CNT.setter
def LOG_BACKUP_CNT(self, value):
self.__Args__['LOG_BACKUP_CNT'] = value
# pass # 可读写属性
# 是否追踪子文件夹下的SQL文件
@property
def TRACK_SQL_FILE(self):
return self.__Args__['TRACK_SQL_FILE']
@TRACK_SQL_FILE.setter
def TRACK_SQL_FILE(self, value):
# self.__Args__['TRACK_SQL_FILE'] = value
pass # 只读属性
# 指定model文件夹名称,默认是model
@property
def MODEL_FOLDER_NAME(self):
return self.__Args__['MODEL_FOLDER_NAME']
@MODEL_FOLDER_NAME.setter
def MODEL_FOLDER_NAME(self, value):
# self.__Args__['MODEL_FOLDER_NAME'] = value
pass # 只读属性
# 是否使用Bean来获取数据
@property
def USE_BEAN(self):
return self.__Args__['USE_BEAN']
@USE_BEAN.setter
def USE_BEAN(self, value):
# self.__Args__['USE_BEAN'] = value
pass # 只读属性
# 是否在异常时自动回滚
@property
def ALLOW_ROLLBACK(self):
return self.__Args__['ALLOW_ROLLBACK']
@ALLOW_ROLLBACK.setter
def ALLOW_ROLLBACK(self, value):
# self.__Args__['ALLOW_ROLLBACK'] = value
pass # 只读属性
# 是否自动提交
@property
def AUTO_COMMIT(self):
return self.__Args__['AUTO_COMMIT']
@AUTO_COMMIT.setter
def AUTO_COMMIT(self, value):
# self.__Args__['AUTO_COMMIT'] = value
pass # 只读属性
# SQL模板类型
@property
def SQL_TEMPLATE_TYPE(self):
return self.__Args__['SQL_TEMPLATE_TYPE']
@SQL_TEMPLATE_TYPE.setter
def SQL_TEMPLATE_TYPE(self, value):
# self.__Args__['SQL_TEMPLATE_TYPE'] = value
pass # 只读属性
# 可识别最大sql长度
@property
def MAX_SQL_SIZE(self):
return self.__Args__['MAX_SQL_SIZE']
@MAX_SQL_SIZE.setter
def MAX_SQL_SIZE(self, value):
# self.__Args__['MAX_SQL_SIZE'] = value
pass # 只读属性
# 默认引号类型
@property
def QUOTATION(self):
return self.__Args__['QUOTATION']
@QUOTATION.setter
def QUOTATION(self, value):
# self.__Args__['QUOTATION'] = value
pass # 只读属性
# 数据库驱动
@property
def DB_DRIVER(self):
return self.__Args__['DB_DRIVER']
@DB_DRIVER.setter
def DB_DRIVER(self, value):
self.__Args__['DB_DRIVER'] = value
# pass # 只读属性
# 数据库认证
@property
def DB_DATABASE(self):
return self.__Args__['DB_DATABASE']
@DB_DATABASE.setter
def DB_DATABASE(self, value):
self.__Args__['DB_DATABASE'] = value
# pass # 只读属性
@property
def DB_USER(self):
return self.__Args__['DB_USER']
@DB_USER.setter
def DB_USER(self, value):
self.__Args__['DB_USER'] = value
# pass # 只读属性
@property
def DB_PASSWORD(self):
return self.__Args__['DB_PASSWORD']
@DB_PASSWORD.setter
def DB_PASSWORD(self, value):
self.__Args__['DB_PASSWORD'] = value
# pass # 只读属性
@property
def DB_HOST(self):
return self.__Args__['DB_HOST']
@DB_HOST.setter
def DB_HOST(self, value):
self.__Args__['DB_HOST'] = value
# pass # 只读属性
@property
def DB_PORT(self):
return self.__Args__['DB_PORT']
@DB_PORT.setter
def DB_PORT(self, value):
self.__Args__['DB_PORT'] = value
# pass # 只读属性
# sqlite db文件路径
@property
def SQLITE_PATH(self):
return self.__Args__['SQLITE_PATH']
@SQLITE_PATH.setter
def SQLITE_PATH(self, value):
self.__Args__['SQLITE_PATH'] = value
# pass # 只读属性
# 数据库编码
@property
def ENCODING(self):
return self.__Args__['ENCODING']
@ENCODING.setter
def ENCODING(self, value):
self.__Args__['ENCODING'] = value
# pass # 只读属性
# sqlalchemy扩展参数
@property
def SQLALCHEMY_ARGS(self):
return self.__Args__['SQLALCHEMY_ARGS']
@SQLALCHEMY_ARGS.setter
def SQLALCHEMY_ARGS(self, value):
self.__Args__['SQLALCHEMY_ARGS'] = value
# pass # 只读属性
# loader外部重载方法
@property
def RW_CONNECT(self):
return self.__Args__['RW_CONNECT']
@RW_CONNECT.setter
def RW_CONNECT(self, value):
# self.__Args__['RW_CONNECT'] = value
pass # 只读属性
@property
def RW_EXECUTE(self):
return self.__Args__['RW_EXECUTE']
@RW_EXECUTE.setter
def RW_EXECUTE(self, value):
# self.__Args__['RW_EXECUTE'] = value
pass # 只读属性
@property
def RW_CLOSE(self):
return self.__Args__['RW_CLOSE']
@RW_CLOSE.setter
def RW_CLOSE(self, value):
# self.__Args__['RW_CLOSE'] = value
pass # 只读属性
@property
def RW_COMMIT(self):
return self.__Args__['RW_COMMIT']
@RW_COMMIT.setter
def RW_COMMIT(self, value):
# self.__Args__['RW_COMMIT'] = value
pass # 只读属性
@property
def RW_ROLLBACK(self):
return self.__Args__['RW_ROLLBACK']
@RW_ROLLBACK.setter
def RW_ROLLBACK(self, value):
# self.__Args__['RW_ROLLBACK'] = value
pass # 只读属性
@property
def RW_INJECT(self):
return self.__Args__['RW_INJECT']
@RW_INJECT.setter
def RW_INJECT(self, value):
# self.__Args__['RW_INJECT'] = value
pass # 只读属性
# 每次都重新读取sql文件,不进缓存
@property
def HARD_LOAD_SQL(self):
return self.__Args__['HARD_LOAD_SQL']
@HARD_LOAD_SQL.setter
def HARD_LOAD_SQL(self, value):
# self.__Args__['HARD_LOAD_SQL'] = value
pass # 只读属性
# 缓存数据库连接,保持数据库连接对象,数据库关闭失效
@property
def CACHE_CONNECT(self):
return self.__Args__['CACHE_CONNECT']
@CACHE_CONNECT.setter
def CACHE_CONNECT(self, value):
# self.__Args__['CACHE_CONNECT'] = value
pass # 只读属性
# 多DB预设
@property
def DB_CONF(self):
return self.__Args__['DB_CONF']
@DB_CONF.setter
def DB_CONF(self, value):
# self.__Args__['DB_CONF'] = value
pass # 只读属性
class InitGuide(GuideArgs, PathPlant):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 模块名字
self.module_name = __cache__.model_name if len(args) == 0 else args[0]
# 生成缓存对象,一个实例生成一个缓存对象
self.cache = ConfStore(
# 已缓存全部sql数据
all_sqls_cached=False,
# @entry注解调用计数,目前只允许一个
entry_cnt=0,
# # 一次性读取的sql文件路径
# folder_structure=list(),
# # 一次性缓存的sql数据
# sql_str_dict=dict(),
# 缓存DB连接头
# conn_headstr = list()
)
# 缓存DB连接的字典
self.conn_cache = dict()
# 解析sql,log,env文件夹路径
self.resolvePath()
# 从配置中获取DB信息
def getDBInfoFromConf(self, dbModelName):
return {
'DB_DRIVER': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_DRIVER'),
'DB_DATABASE': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_DATABASE'),
'DB_USER': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_USER'),
'DB_PASSWORD': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_PASSWORD'),
'DB_HOST': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_HOST'),
'DB_PORT': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'DB_PORT'),
'ALLOW_ROLLBACK': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'ALLOW_ROLLBACK'),
'AUTO_COMMIT': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'AUTO_COMMIT'),
'ENCODING': None if dbModelName == None else self.getArgsBySuffix(dbModelName, 'ENCODING'),
}
# 获取数据库连接信息
def getAccessInfo(self, dbModelName):
if self.DB_CONF == None:
DB_CONF = self.getDBInfoFromConf(dbModelName)
else:
if self.DB_CONF.get(dbModelName) == None:
DB_CONF = self.getDBInfoFromConf(dbModelName)
else:
DB_CONF = self.DB_CONF.get(dbModelName)
return DB_CONF
# 获取数据库连接头列表
def getAccessHeadStr(self):
# 从参数中获取
conn_headstr = []
for model_name in self.DB_CONF:
conn_headstr.append(model_name)
# 从配置文件中获取
for model_name in self.getSections(__cache__.conf_section_prefix):
conn_headstr.append(model_name)
# 去重
conn_headstr = list(set(conn_headstr))
self.cache.create('conn_headstr', conn_headstr)
return conn_headstr
# 解析文件夹路径
def resolvePath(self, sql_on=False):
if sql_on:
# 解决sql文件夹
if self.SQL_PATH == None:
self.SQL_PATH = os.path.realpath('sql')
# self.initFolder(self.SQL_PATH)
# 路径写入缓存
__cache__.modify('sql_dir', self.SQL_PATH)
# 解决log文件夹
if self.LOG_ON:
if self.LOG_PATH == None:
self.LOG_PATH = os.path.realpath('log')
# self.initFolder(self.LOG_PATH)
# 路径写入缓存
__cache__.modify('log_dir', self.LOG_PATH)
# 解决env文件夹
if self.ENV_ON:
if self.ENV_DIR == None:
self.ENV_DIR = os.path.realpath('env')
# self.initFolder(self.ENV_DIR)
# 路径写入缓存
__cache__.modify('env_dir', self.ENV_DIR)
# 解决sql后缀名
def resolveSqlExtension(self, sqlname):
return sqlname if sqlname[-4:] == '.sql' else sqlname + '.sql'
# 根据sql文件名查找sql路径
def getSqlPath(self, sqlname):
# 解析后缀名
_sqlname = self.resolveSqlExtension(sqlname)
# 缓存sql文件列表
if self.cache.folder_structure == None:
self.cacheSqlPaths()
#
for sqlpath in self.cache.folder_structure.PATH_LIST:
if os.path.basename(sqlpath) == _sqlname:
return sqlpath
return None
# 缓存sql文件路径
def cacheSqlPaths(self):
if not self.cache.folder_structure:
folder_structure = self.deepenFolder(self.SQL_PATH)
self.cache.create('folder_structure', folder_structure)
# 缓存sql字符串
def cacheSqlString(self, sql_path_list):
sql_str_dict = {}
for sqlPath in sql_path_list:
with open(sqlPath, "r", encoding='utf-8') as fs_sql:
sqlStr = fs_sql.read()
sql_str_dict[sqlPath] = sqlStr
self.cache.create('sql_str_dict', sql_str_dict)
# 解析sql文件路径
def resolveSqlPath(self, func_name, model_path):
if self.TRACK_SQL_FILE and self.MODEL_FOLDER_NAME != None:
# 模块路径截取,model_path是包含文件名的
preModelPath, relModelPath = self.splitFolder(
model_path, self.MODEL_FOLDER_NAME, includeModel=False)
# sql文件全路径,[:-3]是去掉后缀名.py
sql_fullpath = os.path.join(
self.SQL_PATH, relModelPath[:-3], func_name + '.sql')
return sql_fullpath
else:
sql_fullpath = self.getSqlPath(func_name)
return sql_fullpath
# 通过sql_name解析sql文件路径
def resolveSqlPathSn(self, sql_name):
sql_fullpath = os.path.join(
self.SQL_PATH, self.resolveSqlExtension(sql_name))
return sql_fullpath
# 主动缓存sql文件
def cacheSqlFiles(self):
# 读取全部sql文件
self.cacheSqlPaths()
# 读取全部的sql字符串
self.cacheSqlString(self.cache.folder_structure.PATH_LIST)
# 把已缓存sql标记为True
self.cache.modify('all_sqls_cached', True)
# 主动缓存db连接
def cacheDbConn(self):
for model_name in self.getAccessHeadStr():
dbConf = self.getAccessInfo(model_name)
if self.RW_CONNECT:
self.conn_cache[model_name] = self.RW_CONNECT(dbConf=dbConf)
else:
self.conn_cache[model_name] = self.connect(dbConf=dbConf)
# 主动调用路径模板向导
def runGuide(self):
self.initFolder(self.SQL_PATH)
self.initFolder(self.LOG_PATH)
self.initFolder(self.ENV_DIR)
# 控制台输出
def __print(self, level, message, *msgs):
# 获取当前帧对象 , 代表执行到当前的logging函数
cur_frame = inspect.currentframe()
# 获取上一帧对象 , 代表谁调用的
# bac_frame = cur_frame.f_back
bac_frame = cur_frame
# 字符串转换
__msgs = tuple(map(lambda m: str(m), msgs))
# 添加换行符
msgStr = '\n'.join(__msgs)
# 字符串message
if type(message) == str:
# 合并
message = message + '\n' + msgStr
self.__logger.print(level, message, cur_frame=bac_frame)
else:
# Exception message
self.__logger.print(level, msgStr, cur_frame=bac_frame)
self.__logger.print(level, message, cur_frame=bac_frame)
# self.__logger.print(level, message, cur_frame=bac_frame)
# 日志输出
def __logging(self, level, message, *msgs):
# 获取当前帧对象 , 代表执行到当前的logging函数
cur_frame = inspect.currentframe()
# 获取上一帧对象 , 代表谁调用的
# bac_frame = cur_frame.f_back
bac_frame = cur_frame
# 字符串转换
__msgs = tuple(map(lambda m: str(m), msgs))
# 添加换行符
msgStr = '\n'.join(__msgs)
# 字符串message
if type(message) == str:
# 合并
message = message + '\n' + msgStr
self.__logger.logging(level, message, cur_frame=bac_frame)
else:
# Exception message
self.__logger.logging(level, msgStr, cur_frame=bac_frame)
self.__logger.logging(level, message, cur_frame=bac_frame)
# 打印输出函数
def logging(self, level, message, *msgs):
self.__print(level, message, *msgs)
if self.LOG_ON:
self.__logging(level, message, *msgs)
# 初始化logging
def initLogging(self):
logPath = None
if self.LOG_PATH:
logPath = os.path.join(self.LOG_PATH, self.module_name + ".log")
#
self.__logger = LoggerFactory(
path=logPath,
console=False,
debug=self.DEBUG,
maxMb=self.LOG_MAX_SIZE,
backupCount=self.LOG_BACKUP_CNT,
logOn=self.LOG_ON)
|
from pathlib import Path
from typing import Optional
import typer
from experiment_utils.utils.log_utils import (filter_runs, get_runs,
print_runs, rename_runs)
from rich import print
def show_runs(
dir_name: Optional[str] = typer.Argument(None),
rename: bool = typer.Option(
False, "-R", help="Rename directory with log with accuracy, if threshold - only filtered.",
),
print_parent: bool = typer.Option(False, "-P", help="Print parent name"),
threshold: float = typer.Option(
0, "-t", help="Print only runs with accuracy more than `threshold`"
),
limit: int = typer.Option(0, "-l", help="Print only `limit` lines."),
last: bool = typer.Option(False),
):
"""Print results."""
if dir_name is None:
dir_name = Path.cwd()
else:
dir_name = Path(dir_name)
print(f"log dir: {dir_name}")
runs = get_runs(dir_name, sort=not last)
len_runs = len(runs)
if len_runs == 0:
typer.echo(f"No logs in dir: {dir_name}")
raise typer.Exit()
if last:
limit = limit or 20
print_runs(runs[-limit:], header="last dirs", limit=limit, print_num=True)
raise typer.Exit()
if threshold:
runs = filter_runs(runs, threshold)
thresholded = f", {len(runs)} with acc > {threshold:.2%}"
if len(runs) == 0:
typer.echo(f"{len_runs} runs, no run with threshold {threshold}")
raise typer.Exit()
else:
thresholded = ""
print_runs(
runs,
header=f"{len_runs} log dirs{thresholded}",
limit=limit,
print_parent=print_parent,
)
if rename:
rename_runs(runs, threshold)
if __name__ == "__main__":
typer.run(show_runs)
|
import vrdata
db1 = vrdata.connect('db1')
selected = db1['metadata'].find_one()
print(selected)
|
#============================================
__author__ = "Sachin Mehta"
__license__ = "MIT"
__maintainer__ = "Sachin Mehta"
#============================================
import numpy as np
def cropVolume(img, data=False):
'''
Helper function to remove the redundant black area from the 3D volume
:param img: 3D volume
:param data: Nib allows you to access 3D volume data using the get_data(). If you have already used it before
calling this function, then it is false
:return: returns the crop positions acrss 3 axes (channel, width and height)
'''
if not data:
img = img.get_data()
sum_array = []
for ch in range(img.shape[2]):
values, indexes = np.where(img[:, :, ch] > 0)
sum_val = sum(values)
sum_array.append(sum_val)
ch_s = np.nonzero(sum_array)[0][0]
ch_e = np.nonzero(sum_array)[0][-1]
sum_array = []
for width in range(img.shape[0]):
values, indexes = np.where(img[width, :, :] > 0)
sum_val = sum(values)
sum_array.append(sum_val)
wi_s = np.nonzero(sum_array)[0][0]
wi_e = np.nonzero(sum_array)[0][-1]
sum_array = []
for width in range(img.shape[1]):
values, indexes = np.where(img[:, width, :] > 0)
sum_val = sum(values)
sum_array.append(sum_val)
hi_s = np.nonzero(sum_array)[0][0]
hi_e = np.nonzero(sum_array)[0][-1]
return ch_s, ch_e, wi_s, wi_e, hi_s, hi_e |
from distutils.core import setup
setup(name='compiler01',
version='0.0.1',
description='Learning compiler theroy',
author='tor4z',
author_email='[email protected]',
# install_requires=[],
packages=['lex']
)
|
import numpy as np
from sklearn import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, GaussianNoise
from keras.callbacks import EarlyStopping
import gc
import warnings
warnings.filterwarnings('ignore')
class SimpleUncompleteAutoencoder:
def __init__(self, df):
self.df = df
self.input_dim = self.df.shape[1]
def Modeling(self, train, dense_dim, batchsize = None, validation_size = None):
if batchsize == None:
raise AssertionError("Batchsize must be defined.")
self.train = train
self.dense_dim = dense_dim
model = Sequential()
model.add(Dense(self.dense_dim, input_dim = self.input_dim, activation = 'relu'))
model.add(Dense(self.input_dim))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
print(model.summary())
self.model = model
self.model.fit(train, train, batch_size = batchsize, validation_split = validation_size,
verbose = 1, epochs = 50, callbacks = [EarlyStopping(monitor = 'val_loss', patience = 3)])
gc.collect()
def Prediction(self, test_data, data_type = None):
self.test_data = test_data
if data_type == None:
raise AssertionError('Data type must be defined.')
elif data_type == 'Insample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Insample Normal Score (RMSE) : {}".format(score))
return pred
elif data_type == 'OutOfSample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print('Out of Sample Normal Score (RMSE) : {}'.format(score))
return pred
elif data_type == 'Attack':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print('Attack Underway Score (RMSE) : {}'.format(score))
return pred
class SimpleStackedAutoencoder:
def __init__(self, df):
self.df = df
self.input_dim = self.df.shape[1]
def Modeling(self, train, hidden_dim = None, coding_dim = None, batchsize = None, validation_size = None):
if hidden_dim == None:
raise AssertionError("Hidden Layer Dimension must be defined.")
if coding_dim == None:
raise AssertionError("Coding Layer Dimension must be defined.")
if batchsize == None:
raise AssertionError("Batchsize must be defined.")
self.train = train
self.hidden_dim = hidden_dim
self.coding_dim = coding_dim
model = Sequential()
model.add(Dense(self.hidden_dim, input_dim = self.input_dim, activation = 'relu'))
model.add(Dense(self.coding_dim, activation = 'relu'))
model.add(Dense(self.hidden_dim, activation = 'relu'))
model.add(Dense(self.input_dim))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
print(model.summary())
self.model = model
self.model.fit(train, train, batch_size = batchsize, validation_split = validation_size,
verbose = 1, epochs = 50, callbacks = [EarlyStopping(monitor = 'val_loss', patience = 3)])
gc.collect()
def Prediction(self, test_data, data_type):
self.test_data = test_data
if data_type == None:
raise AssertionError('Data Type must be defined.')
elif data_type == 'Insample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Insample Normal Score (RMSE) : {}".format(score))
return pred
elif data_type == 'OutOfSample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Out of Sample Normal Score (RMSE) : {}".format(score))
return pred
elif data_type =='Attack':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Attack Underway Score (RMSE) : {}".format(score))
return pred
class SimpleDenosingAutoencoder:
def __init__(self, df):
self.df = df
self.input_dim = self.df.shape[1]
def Modeling(self, train, hidden_dim = None, coding_dim = None, batchsize = None, validation_size = None, denosing_type = None, std = None):
if hidden_dim == None:
raise AssertionError("Hidden Layer Dimension must be defined.")
if coding_dim == None:
raise AssertionError("Coding Layer Dimension must be defined.")
if batchsize == None:
raise AssertionError("Batchsize must be defined.")
if denosing_type == None:
raise AssertionError("Denosing Type must be Defined. ('Gaussian' or 'Dropout')")
if denosing_type != None:
if denosing_type == "Dropout":
self.train = train
self.hidden_dim = hidden_dim
self.coding_dim = coding_dim
model = Sequential()
model.add(Dense(self.hidden_dim, input_dim = self.input_dim, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(self.coding_dim, activation = 'relu'))
model.add(Dense(self.hidden_dim, activation = 'relu'))
model.add(Dense(self.input_dim))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
print(model.summary())
self.model = model
self.model.fit(train, train, batch_size = batchsize, validation_split = validation_size,
verbose = 1, epochs = 50, callbacks = [EarlyStopping(monitor = 'val_loss', patience = 3)])
gc.collect()
elif denosing_type == 'Gaussian':
if std == None:
raise AssertionError('Gaussian Noise std must be defined.')
self.train = train
self.hidden_dim = hidden_dim
self.coding_dim = coding_dim
model = Sequential()
model.add(Dense(self.hidden_dim, input_dim = self.input_dim, activation = 'relu'))
model.add(GaussianNoise(std))
model.add(Dense(self.coding_dim, activation = 'relu'))
model.add(Dense(self.hidden_dim, activation = 'relu'))
model.add(Dense(self.input_dim))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
print(model.summary())
self.model = model
self.model.fit(train, train, batch_size = batchsize, validation_split = validation_size,
verbose = 1, epochs = 50, callbacks = [EarlyStopping(monitor = 'val_loss', patience = 3)])
gc.collect()
def Prediction(self, test_data, data_type):
self.test_data = test_data
if data_type == None:
raise AssertionError('Data Type must be defined.')
elif data_type == 'Insample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Insample Normal Score (RMSE) : {}".format(score))
return pred
elif data_type == 'OutOfSample':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Out of Sample Normal Score (RMSE) : {}".format(score))
return pred
elif data_type =='Attack':
pred = self.model.predict(self.test_data)
score = np.sqrt(metrics.mean_squared_error(pred, self.test_data))
print("Attack Underway Score (RMSE) : {}".format(score))
return pred |
# Run this script as a "standalone" script (terminology from the Django
# documentation) that uses the Djano ORM to get data from the database.
# This requires django.setup(), which requires the settings for this project.
# Appending the root directory to the system path also prevents errors when
# importing the models from the app.
if __name__ == '__main__':
import sys
import os
import django
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
sys.path.append(parent_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "metadataset.settings")
django.setup()
from django.db import transaction
from django.urls import reverse
from publications.models import Attribute, EAV, Publication, Subject
domain = "http://127.0.0.1:8000"
# domain = "https://www.metadataset.com"
"""
Specify a subject, attribute, new unit, new note, and convert_unit() function.
"""
# Specify the subject. If this is a subject with children (e.g., "invasive
# species") then all of its children must have the same attributes as it does.
subject = "invasive species"
# Specify the attribute. This must be a numeric attribute (i.e. an attribute
# with "value_as_number", not "value_as_factor").
attribute = "3.04 Time since management intervention"
# Specify the new unit for this attribute.
new_unit = "days"
# Specify the new note for this attribute (or specify "" to keep the old note).
new_note = "Number of days after intervention for which results were monitored"
# Specify the equation for converting from the old unit to the new unit.
def convert_unit(old_value):
# Specify a condition to test for NA values (e.g., "-999"):
if (old_value < 0): # Example for "-999" for NA and positive values for not NA
new_value = None # Or -999 to keep the old NA value, for example.
else:
# Specify the equation:
new_value = old_value * 365 # Example for converting from years to days
new_value = round(new_value)
return(new_value)
with transaction.atomic():
subject = Subject.objects.get(subject=subject)
attributes = Attribute.objects.get(pk=subject.attribute.pk).get_children()
attribute = attributes.get(attribute=attribute)
subjects = subject.get_descendants(include_self=True)
for subject in subjects:
publications = Publication.objects.filter(subject=subject)
eavs = EAV.objects.filter(attribute=attribute, publication_index__in=publications)
for eav in eavs:
path = reverse('publication', args=(), kwargs={
'subject': subject.slug,
'publication_pk': eav.publication_index.pk
})
user = eav.user
print(user)
print(domain + path)
print(eav.value_as_number)
eav.value_as_number = convert_unit(eav.value_as_number)
print(eav.value_as_number)
eav.save()
# Optional: delete this instance if it is None (see convert_unit()).
if (eav.value_as_number is None):
eav.delete()
print("Old attribute unit:", attribute.unit)
print("Old attribute note:", attribute.note)
attribute.unit = new_unit
if (new_note != ""):
attribute.note = new_note
attribute.save()
print("New attribute unit:", attribute.unit)
print("New attribute note:", attribute.note)
|
from netapp.netapp_object import NetAppObject
class NdmpPasswordInfo(NetAppObject):
"""
Information about generate password
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_user_name = None
@property
def user_name(self):
"""
The user for which the password is to be generated.
Attributes: key, non-creatable, non-modifiable
"""
return self._user_name
@user_name.setter
def user_name(self, val):
if val != None:
self.validate('user_name', val)
self._user_name = val
_password = None
@property
def password(self):
"""
The generated NDMP password for the given user. The
command fails if such a user does not exist in the
Vserver context.
Attributes: non-creatable, non-modifiable
"""
return self._password
@password.setter
def password(self, val):
if val != None:
self.validate('password', val)
self._password = val
_vserver_name = None
@property
def vserver_name(self):
"""
The Vserver on which the user password is to be generated
for the given user.
Attributes: key, non-creatable, non-modifiable
"""
return self._vserver_name
@vserver_name.setter
def vserver_name(self, val):
if val != None:
self.validate('vserver_name', val)
self._vserver_name = val
@staticmethod
def get_api_name():
return "ndmp-password-info"
@staticmethod
def get_desired_attrs():
return [
'user-name',
'password',
'vserver-name',
]
def describe_properties(self):
return {
'user_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'password': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'vserver_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
""" distribubot."""
from .version import version as __version__
__all__ = [
'utils',
'distribubot'
] |
import numpy as np
import matplotlib.pyplot as plt
def plot_saved_scores(causality_pipeline):
model = causality_pipeline.stages[3].model
saved_scores = model.decoder.saved
positives, negatives = [
[score for label, _pc, score in saved_scores if label == desired_label]
for desired_label in [True, False]]
plt.xlim([-0.18, 1])
plt.gca().get_yaxis().set_visible(False)
classifier_names = ['Weighted', 'Global', 'Most-frequent', 'Per-connective']
for i, classifier_name in zip(range(4), classifier_names):
offset = .3 * (i + 1)
plt.text(-0.1, -offset - 0.045, classifier_name)
pos = [p[i] for p in positives if not np.isnan(p[i])]
plt.plot(pos, np.zeros_like(pos) - offset, marker='D', color='blue',
fillstyle='none')
neg = [n[i] for n in negatives if not np.isnan(n[i])]
plt.plot(neg, np.zeros_like(neg) - offset - 0.08, marker='o',
color='red', fillstyle='none')
plt.show(block=False)
|
# License: BSD 3-Clause
from .functions import (
attributes_arff_from_df,
check_datasets_active,
create_dataset,
get_dataset,
get_datasets,
list_datasets,
status_update,
list_qualities,
)
from .dataset import OpenMLDataset
from .data_feature import OpenMLDataFeature
__all__ = [
"attributes_arff_from_df",
"check_datasets_active",
"create_dataset",
"get_dataset",
"get_datasets",
"list_datasets",
"OpenMLDataset",
"OpenMLDataFeature",
"status_update",
"list_qualities",
]
|
from django.apps import AppConfig
class WebsheetsConfig(AppConfig):
name = 'websheets'
|
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rn
from numpy import array as ary
from numpy import sqrt
from numpy.linalg import svd, eig, eigvals, inv, pinv
def set_offdiag(mat, triu, inplace=True):
'''sets the off-diagonal elements of a symmetric matrix when the top triangle's values are given.'''
triu = ary(triu).flatten()
indices = ary(np.triu_indices_from(mat, k=1)).T
if inplace:
for ij, ord in zip(indices, triu):
i,j = ij
mat[i,j] = ord
mat[j,i] = ord
return mat
else:
matcopy = mat.copy()
for ij, ord in zip(indices, triu):
i,j = ij
matcopy[i,j] = ord
matcopy[j,i] = ord
return matcopy
if __name__=="__main__":
main_diag = [sqrt(1),.01]
covar_mat = np.diag(ary(main_diag, dtype=float))
set_offdiag(covar_mat, [0], inplace=True)
eigval, eigvec = eig(covar_mat)
print("eigval=", eigval)
print("eigvec=\n", eigvec)
xy = rn.multivariate_normal([0,0], covar_mat, size=1000)
x, y = xy.T
ax = plt.subplot()
ax.scatter(x,y)
ax.set_aspect(1) # equal aspect ratio
plt.show()
plt.clf() |
from itertools import takewhile
from math import sin, pi
# The takewhile function in the itertools module will yield elements from an iterable, as long as a specific criteria (the predicate) is True.
#
# As soon as the predicate is False,
# iteration is stopped - even if subsequent
# elements would have had a True predicate
# - this is not a filter, this basically iterate over an
# iterable as long as the predicate remains True.
def sine_wave(n):
start = 0
max_ = 2 * pi
step = (max_ - start) / (n-1)
for _ in range(n):
yield round(sin(start), 2)
start += step
print(list(sine_wave(15)))
# [0.0, 0.43, 0.78, 0.97, 0.97, 0.78, 0.43, 0.0, -0.43, -0.78, -0.97, -0.97, -0.78, -0.43, -0.0]
print(list(takewhile(lambda x: 0 <= x <= 0.9, sine_wave(15))))
# [0.0, 0.43, 0.78]
print(list(filter(lambda x: 0 <= x <= 0.9, sine_wave(15))))
# [0.0, 0.43, 0.78, 0.78, 0.43, 0.0, -0.0]
|
#!/usr/bin/env python
import sys
import subprocess
import os
import shutil
FNULL = open(os.devnull, "w")
def help():
print("Usage: cfgmngr [ACTION] [OPTION]")
print("Actions:")
print(" set-repo [URL] - add remote git repository to store your configs")
print(" repo - show current git repository")
print(" add-file [FILE PATH] - add file to storage")
print(" rm-file [FILE NAME] - remove file from storage")
print(" files - show your stored files")
print(" pull - pull your config files from remote repository and replace your current files with downloaded")
print(" push - push your saved config files to remote repository")
configDir = "/home/"+os.getlogin()+"/.config/cfgmngr/"
if not os.path.exists(configDir):
os.makedirs(configDir)
if not os.path.exists(configDir + "files/"):
os.makedirs(configDir + "files/")
if not os.path.exists(configDir + "backup/"):
os.makedirs(configDir + "backup/")
if not os.path.exists(configDir + "files/locations"):
open(configDir + "files/locations", "w").close()
def test_repo():
print("Testing repository")
cmd = "cd ~/.config/cfgmngr/files && git ls-remote"
res = subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if res == 0: print("Fine")
return res == 0
def repo_err():
print("Cant connect to repository")
def set_repo(repo):
cmd = "cd ~/.config/cfgmngr/files && git remote rm origin; git init; git remote add origin "+repo
res = subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if res != 0:
repo_err()
return
def get_repo():
cmd = "cd "+configDir+"files/ && git remote -v"
subprocess.call(cmd, shell=True)
def check_exists(fileName):
try:
file = open(os.getcwd() +"/"+ fileName)
except IOError:
return False
else:
file.close()
return True
def remove_username(path):
if(not path.startswith("/home/")):
return path
return "/home/$USER$/"+path[len(os.getlogin())+7:]
def add_username(path):
return path.replace("$USER$", os.getlogin())
def save_file(fileName):
locations = open(configDir + "files/locations", "a")
locations.write(os.path.basename(fileName)+"\n")
locations.write(remove_username(os.getcwd()+"/"+fileName)+"\n")
locations.close()
print("File saved")
def unsave_file(fileName):
locations = open(configDir + "files/locations");
lines = locations.read().split('\n')
locations.close()
newLines = ""
for i in range(1, len(lines), 2):
if lines[i-1] != fileName:
newLines += lines[i-1] + lines[i]
locations = open(configDir + "files/locations", "w");
locations.write(newLines)
locations.close()
def rm_file(fileName):
unsave_file(fileName)
cmd = "cd "+configDir+"files/ && rm "+fileName
subprocess.call(cmd, shell=True)
def add_file(fileName):
if check_exists(fileName):
save_file(fileName)
else:
print("There is no file like this")
def copy_file(path, name, toConfig):
if toConfig:
shutil.copy(path, configDir+"files/"+name)
else:
shutil.copy(configDir+"files/"+name, path)
def backup_file(path, name):
if os.path.exists(path):
shutil.copy(path, configDir+"backup/"+name)
def push():
print("Packing files")
locations = open(configDir + "files/locations");
lines = locations.read().split('\n')
locations.close()
for i in range(1, len(lines), 2):
copy_file(add_username(lines[i]), lines[i-1], True)
print("Uploading files")
cmd = "cd "+configDir+"files/"+" && git add . && git commit -m \"test\"; git push -u origin master"
res = subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if res != 0:
repo_err()
print("Done")
def pull():
print("Downloading files")
cmd = "cd "+configDir+"files/ && git fetch --all; git reset --hard origin/master"
res = subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if res != 0:
repo_err()
return
print("Moving files")
locations = open(configDir + "files/locations");
lines = locations.read().split('\n')
locations.close()
for i in range(1, len(lines), 2):
backup_file(add_username(lines[i]), lines[i-1])
copy_file(add_username(lines[i]), lines[i-1], False)
print("Done")
def show_files():
locations = open(configDir + "files/locations");
lines = locations.read().split('\n')
locations.close()
for i in range(1, len(lines), 2):
print(lines[i-1] + ": "+lines[i])
if len(lines) == 1 or len(lines) == 0:
print("You have no saved files")
if len(sys.argv) == 3:
if sys.argv[1] == "set-repo":
set_repo(sys.argv[2])
elif sys.argv[1] == "add-file":
add_file(sys.argv[2])
elif sys.argv[1] == "rm-file":
rm_file(sys.argv[2])
else: help()
elif len(sys.argv) == 2:
if sys.argv[1] == "repo":
get_repo()
elif sys.argv[1] == "push":
if not test_repo():
repo_err()
exit()
push()
elif sys.argv[1] == "pull":
if not test_repo():
repo_err()
exit()
pull()
elif sys.argv[1] == "files":
show_files()
else: help()
else: help()
|
#!/usr/bin/env python3
# Solution to Project Euler problem 3
import math
def get_primes(limit):
return [idx for idx,b in enumerate(sieve(limit)) if b]
def sieve(limit):
is_prime = [False, True] * ((limit // 2) + (limit % 2))
is_prime[0], is_prime[1], is_prime[2] = False, False, True
i = 3
while i * i < limit:
if is_prime[i]:
for num in range(i*i, limit, i):
is_prime[num] = False
i += 2
return is_prime
def get_factors(num):
primes = get_primes(int(math.sqrt(num) + 1))
factors = []
for prime in primes:
while num % prime == 0:
factors.append(prime)
num //= prime
return factors
def solve():
return get_factors(600851475143)[-1]
if __name__ == "__main__":
print(solve())
|
import sys, unittest, glfw
sys.path.insert(0, '..')
from OpenGL.GL import *
from engine.base.shader import Shader
from engine.base.program import *
import helper
class ProgramTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.window = helper.initAndGetWindow()
@classmethod
def tearDownClass(cls):
glfw.terminate()
def testLinking(self):
try:
program = Program()
program.attachShader(Shader('resources/shaders/test_vert.vs', GL_VERTEX_SHADER))
program.attachShader(Shader('resources/shaders/test_frag.fs', GL_FRAGMENT_SHADER))
program.link()
self.assertEqual(program.getId(), 1)
except RuntimeError:
self.assertTrue(False)
def testLinked(self):
try:
program = getLinkedProgram('resources/shaders/test_vert.vs', 'resources/shaders/test_frag.fs')
self.assertEqual(program.getId(), 1)
except RuntimeError:
self.assertTrue(False)
def testErrorCompile(self):
try:
program = Program()
program.attachShader(Shader('resources/shaders/test_vert.vs', GL_VERTEX_SHADER))
program.attachShader(Shader('resources/shaders/error.fs', GL_FRAGMENT_SHADER))
program.link()
self.assertTrue(False)
except RuntimeError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() |
o, b, l = map(float, input().split())
if o < b and o < l:
print("Otavio")
elif b < o and b < l:
print("Bruno")
elif l < o and l < b:
print("Ian")
else:
print("Empate") |
from django.http import HttpResponse, HttpResponseNotFound
from biostar.apps.posts.models import Tag
from biostar.apps.users.models import Profile
from django.shortcuts import redirect
from energyuse.eserver import views as eviews
from django.contrib import messages
def subscribe(request,topic):
context = {}
if request.user.is_authenticated():
context['is_subscribed'] = Profile.objects.filter(user=request.user, tags__name=topic).exists()
if context['is_subscribed']:
profile = Profile.objects.get(user=request.user)
profile.add_tags(profile.watched_tags.replace(', ' + topic, ''))
profile.save()
messages.info(request, "You are now unfollowing: <code>%s</code>." % topic)
else:
profile = Profile.objects.get(user=request.user)
profile.add_tags(profile.watched_tags + ', ' + topic)
profile.save()
messages.info(request, "You are now following: <code>%s</code>." % topic)
return redirect("topic-list", topic=topic)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017, National University of Ireland and The James Hutton Insitute
# Author: Nicholas Waters
#
# This code is part of the riboSeed package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""
Created on Sun Jul 24 19:33:37 2016
See README.md for more info and usage
"""
import argparse
import sys
import time
import random
import os
import shutil
import multiprocessing
import subprocess
import traceback
import pysam
import math
from riboSeed import __version__
from .classes import SeedGenome, LociMapping, Exes, NgsLib
from bisect import bisect
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
# plotting with mpl is depreciated till I can figure out why
# it wont work in screen sessions ( see bioconda issue #6451)
# try:
# import numpy as np
# import matplotlib as mpl
# from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
# from matplotlib.figure import Figure
# import matplotlib.patches as patches
# PLOT = True
# except Exception as e: # most likely an ImportError, but Im not taking chances
# print(e)
# print("\nlooks like you have some issue with matplotlib. " +
# "Classic matplotlib, amirite? Plotting is disabled\n")
# PLOT = False
from .shared_methods import parse_clustered_loci_file, \
combine_contigs, file_len, get_number_mapped, \
keep_only_first_contig, get_fasta_lengths, \
extract_coords_from_locus, add_gb_seqrecords_to_cluster_list, \
set_up_logging, check_version_from_cmd
# GLOBALS
SAMTOOLS_MIN_VERSION = '1.3.1'
def get_args(test_args=None): # pragma: no cover
"""
"""
parser = argparse.ArgumentParser(
prog="ribo seed",
description="Given cluster file of rDNA regions from riboSelect and " +
"either paired-end or single-end reads, assembles the mapped reads " +
"into pseduocontig 'seeds', and uses those with the reads to run" +
"de fere novo and de novo assembly with SPAdes",
add_help=False) # to allow for custom help
parser.add_argument("clustered_loci_txt", action="store",
help="output from riboSelect")
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument("-r", "--reference_genbank",
dest='reference_genbank',
action="store", default='', type=str,
help="genbank reference, used to estimate " +
"insert sizes, and compare with QUAST",
required=True)
requiredNamed.add_argument("-o", "--output", dest='output', action="store",
help="output directory; " +
"default: %(default)s", default=os.getcwd(),
type=str, required=True)
# had to make this faux "optional" parse so that the named required ones
# above get listed aboce the ther args when displaying the help message
# read libraries
optional = parser.add_argument_group('optional arguments')
optional.add_argument("-F", "--fastq1", dest='fastq1', action="store",
help="forward fastq reads, can be compressed",
type=str, default=None)
optional.add_argument("-R", "--fastq2", dest='fastq2', action="store",
help="reverse fastq reads, can be compressed",
type=str, default=None)
optional.add_argument("-S1", "--fastqS1", dest='fastqS1',
action="store",
help="single fastq reads", type=str, default=None)
# parameters for run
optional.add_argument("-l", "--flanking_length",
help="length of flanking regions, in bp; " +
"default: %(default)s",
default=1000, type=int, dest="flanking")
optional.add_argument("-j", "--just_seed", dest='just_seed',
action="store_true",
default=False,
help="Don't do an assembly, just generate the long" +
" read 'seeds'; default: %(default)s")
optional.add_argument("-e", "--experiment_name", dest='experiment_name',
action="store",
help="prefix for results files; " +
"default: %(default)s",
default="riboSeed", type=str)
optional.add_argument("--mapper", dest='mapper',
action="store", choices=["smalt", "bwa"],
help="available mappers: smalt and bwa; " +
"default: %(default)s",
default='bwa', type=str)
optional.add_argument("-k", "--kmers", dest='kmers', action="store",
default="21,33,55,77,99,127", type=str,
help="kmers used for final assembly" +
", separated by commas such as" +
"21,33,55,77,99,127 . Can be set to 'auto', where " +
"SPAdes chooses. We ensure kmers are not " +
"too big or too close to read length" +
"; default: %(default)s")
optional.add_argument("-p", "--pre_kmers", dest='pre_kmers',
action="store",
default="21,33,55,77,99", type=str,
help="kmers used during seeding assemblies, " +
"separated bt commas" +
"; default: %(default)s")
optional.add_argument("--force_kmers", dest="force_kmers",
action="store_true",
default=False,
help="skip checking to see if kmerchoice is " +
"appropriate to read length. Sometimes kmers " +
"longer than reads can help in the final assembly," +
" as the long reads generated by riboSeed contain " +
"kmers longer than the read length")
optional.add_argument("-s", "--score_min", dest='score_min',
action="store",
default=None, type=int,
help="If using smalt, this sets the '-m' param; " +
"default with smalt is inferred from " +
"read length. If using BWA, reads mapping with AS" +
"score lower than this will be rejected" +
"; default with BWA is half of read length")
optional.add_argument("-a", "--min_assembly_len", dest='min_assembly_len',
action="store",
default=6000, type=int,
help="if initial SPAdes assembly largest contig " +
"is not at least as long as --min_assembly_len, " +
"reject. Set this to the length of the seed " +
"sequence; if it is not achieved, seeding across " +
"regions will likely fail; default: %(default)s")
optional.add_argument("--include_shorts", dest='include_short_contigs',
action="store_true",
default=False,
help="if assembled contig is smaller than " +
"--min_assembly_len, contig will still be included" +
" in assembly; default: inferred")
optional.add_argument("--damn_the_torpedos", dest='damn_the_torpedos',
action="store_true",
default=False,
help="Ignore certain errors, full speed ahead!")
optional.add_argument("--subtract", dest='subtract',
action="store_true",
default=False,
help="if --subtract reads already used in previous" +
"round of subassembly will not be included in " +
"subsequent rounds. This can lead to problems " +
"with multiple mapping and inflated coverage.")
optional.add_argument("--linear",
help="if genome is known to not be circular and " +
"a region of interest (including flanking bits) " +
"extends past chromosome end, this extends the " +
"seqence past chromosome origin forward by " +
"--padding; " +
"default: %(default)s",
default=False, dest="linear", action="store_true")
optional.add_argument("-d", "--min_flank_depth",
help="a subassembly will not be performed if this " +
"minimum depth is not achieved on both the 3' and" +
"5' end of the pseudocontig. " +
"default: %(default)s",
default=0, dest="min_flank_depth", type=float)
optional.add_argument("--subassembler", dest='subassembler',
action="store", type=str,
default="spades",
choices=["spades", "skesa"],
help="assembler to use for subassembly scheme. " +
"SPAdes is used by default, but Skesa is a new " +
"addition that seems to work for subassembly " +
"and is faster")
optional.add_argument("--ref_as_contig", dest='ref_as_contig',
action="store", type=str,
default="infer",
choices=["ignore", "infer", "trusted", "untrusted"],
help="ignore: reference will not be used in " +
"subassembly. trusted: SPAdes will use the seed" +
" sequences as a --trusted-contig; untrusted: " +
"SPAdes will treat as --untrusted-contig. " +
"infer: if mapping percentage " +
"over 80%%, 'trusted'; else 'untrusted'." +
" See SPAdes docs for details. default: infer")
optional.add_argument("--additional_libs",
help="include this string (usually additional " +
"library, but could be other SPAdes args) " +
"these libraries in final assembly " +
"in addition to the reads supplied as -F and -R. " +
"They must be supplied according to SPAdes arg " +
"naming scheme. Use at own risk."
"default: %(default)s",
dest="additional_libs", type=str)
optional.add_argument("--clean_temps", dest='clean_temps',
default=False, action="store_true",
help="if --clean_temps, mapping files will be " +
"removed once they are no no longer needed during " +
"the mapping iterations to save space; " +
"default: %(default)s")
optional.add_argument("--enable-spades-error-corection",
dest='err_correct', action="store_true",
help="Default behaviour should be to skip read " +
"error correction: http://cab.spbu.ru/benchmarking-tools-for-de-novo-microbial-assembly/ . " +
"This re-enables it" +
"default: %(default)s")
optional.add_argument("--skip_control", dest='skip_control',
action="store_true",
default=False,
help="if --skip_control, no de novo " +
"assembly will be done; default: %(default)s")
optional.add_argument("-i", "--iterations", dest='iterations',
action="store",
default=3, type=int,
help="if iterations>1, multiple seedings will " +
"occur after subassembly of seed regions; " +
"if setting --target_len, seedings will continue " +
"until --iterations are completed or --target_len"
" is matched or exceeded; " +
"default: %(default)s")
optional.add_argument("-v", "--verbosity", dest='verbosity',
action="store",
default=2, type=int, choices=[1, 2, 3, 4, 5],
help="Logger writes debug to file in output dir; " +
"this sets verbosity level sent to stderr. " +
" 1 = debug(), 2 = info(), 3 = warning(), " +
"4 = error() and 5 = critical(); " +
"default: %(default)s")
optional.add_argument("--target_len", dest='target_len', action="store",
default=None, type=float,
help="if set, iterations will continue until " +
"contigs reach this length, or max iterations (" +
"set by --iterations) have been completed. Set as " +
"fraction of original seed length by giving a " +
"decimal between 0 and 5, or set as an absolute " +
"number of base pairs by giving an integer greater" +
" than 50. Not used by default")
optional.add_argument("-z", "--serialize", dest='serialize',
action="store_true",
default=False,
help="if --serialize, runs seeding and assembly " +
"without multiprocessing. This is recommended for " +
"machines with less than 8GB RAM: %(default)s")
optional.add_argument("--consensus", dest='initial_consensus',
action="store_true",
default=False,
help="if --initial_consensus, " +
"generate a mpileup-based consesnsus instead of " +
"doing a proper spades subassembly")
optional.add_argument("--smalt_scoring", dest='smalt_scoring',
action="store",
default="match=1,subst=-4,gapopen=-4,gapext=-3",
help="if mapping with SMALT, " +
"submit custom smalt scoring via smalt -S " +
"scorespec option; default: %(default)s")
optional.add_argument("--mapper_args", dest='mapper_args',
action="store",
default="-L 0,0 -U 0 -a",
help="submit custom parameters to mapper. " +
"And by mapper, I mean bwa, cause we dont support " +
"this option for SMALT, sorry. " +
"This requires knowledge of your chosen mapper's " +
"optional arguments. Proceed with caution! " +
"default: %(default)s")
# # TODO Make these check a config file
optional.add_argument("--spades_exe", dest="spades_exe",
action="store", default="spades.py",
help="Path to SPAdes executable; " +
"default: %(default)s")
optional.add_argument("--samtools_exe", dest="samtools_exe",
action="store", default="samtools",
help="Path to samtools executable; " +
"default: %(default)s")
optional.add_argument("--skesa_exe", dest="skesa_exe",
action="store", default="skesa",
help="Path to skesa executable; " +
"default: %(default)s")
optional.add_argument("--smalt_exe", dest="smalt_exe",
action="store", default="smalt",
help="Path to smalt executable;" +
" default: %(default)s")
optional.add_argument("--bwa_exe", dest="bwa_exe",
action="store", default="bwa",
help="Path to BWA executable;" +
" default: %(default)s")
optional.add_argument("--quast_exe", dest="quast_exe",
action="store", default="quast",
help="Path to quast executable; " +
"default: %(default)s")
optional.add_argument("--bcftools_exe", dest="bcftools_exe",
action="store", default="bcftools",
help="Path to bcftools executable; " +
"default: %(default)s")
optional.add_argument("-c", "--cores", dest='cores', action="store",
default=None, type=int,
help="cores to be used" +
"; default: %(default)s")
optional.add_argument("-t", "--threads", dest='threads',
action="store",
default=1, type=int,
choices=[1, 2, 4],
help="if your cores are hyperthreaded, set number" +
" threads to the number of threads per processer." +
"If unsure, see 'cat /proc/cpuinfo' under 'cpu " +
"cores', or 'lscpu' under 'Thread(s) per core'." +
": %(default)s")
optional.add_argument("-m", "--memory", dest='memory', action="store",
default=8, type=int,
help="system memory available" +
"; default: %(default)s")
optional.add_argument('--version', action='version',
version='riboSeed {version}'.format(
version=__version__))
# # had to make this explicitly to call it a faux optional arg
optional.add_argument("-h", "--help",
action="help", default=argparse.SUPPRESS,
help="Displays this help message")
if test_args is None:
args = parser.parse_args(sys.argv[2:])
else:
args = parser.parse_args(test_args)
return args
def last_exception():
""" Returns last exception as a string, or use in logging.
stolen verbatim from pyani
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
return ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
def get_rec_from_generator(recordID, gen, method=None):
""" given a record ID and and SeqIO generator return sequence of
genbank record that has the loci, and call method to refresh generator
If on different sequences, return error
"""
for record in gen:
if recordID == record.id:
if method is not None:
method()
return record
else:
pass
# if none found, raise error
raise ValueError("no record found matching record id %s!" % recordID)
def get_smalt_full_install_cmds(smalt_exe, logger=None): # pragma: no cover
""" TODO replace this with swg tests for bambamc installation
In the meantime, this looks for the files included with riboSeed
(a bam file, reference, index, and fastq file), and generates the cmds
to run a little test mapping
"""
smalttestdir = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"sample_data",
"smalt_test", "")
assert logger is not None, "Must Use Logging"
logger.debug("looking for smalt test dir: {0}".format(
smalttestdir))
if not os.path.exists(smalttestdir):
raise FileNotFoundError(
"Cannot find smalt_test dir containing " +
"files to verify bambamc install! It should here: \n%s",
smalttestdir)
ref = os.path.join(smalttestdir, "ref_to_test_bambamc.fasta")
index = os.path.join(smalttestdir, "test_index")
test_bam = os.path.join(smalttestdir, "test_mapping.bam")
test_reads = os.path.join(smalttestdir, "reads_to_test_bambamc.fastq")
testindexcmd = str("{0} index {1} {2}".format(smalt_exe, index, ref))
testmapcmd = str("{0} map -f bam -o {1} {2} {3}".format(smalt_exe,
test_bam,
index,
test_reads))
return([testindexcmd, testmapcmd])
def test_smalt_bam_install(
cmds, logger=None): # pragma: no cover, cause pragma, no care
""" using test data tha tcomes with package, ensure that
the bambamc library was properly installed with SMALT instaltation
"""
assert logger is not None, "must use logger"
logger.info("testing instalation of SMALT and bambamc")
smalttestdir = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"sample_data",
"smalt_test", "")
test_index = os.path.join(smalttestdir, "test_index")
test_bam = os.path.join(smalttestdir, "test_mapping.bam")
for i in cmds:
try:
logger.debug(i)
subprocess.run([i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
except:
logger.error(
"Error running test to check bambamc lib is " +
"installed! See github.com/gt1/bambamc " +
"and the smalt install guide for more details." +
"https://sourceforge.net/projects/smalt/files/")
sys.exit(1)
# remove the temp files
os.remove(test_bam)
os.remove(str(test_index + ".sma"))
os.remove(str(test_index + ".smi"))
def check_fastqs_len_equal(file1, file2):
""" using file_len from pyutilsnrw, check that the fastqs contain
the same number of lines, ie tat the pairing looks proper.
"""
if file_len(file1) != file_len(file2):
raise ValueError(
"Input Fastq's are of unequal length! Try " +
"fixing with this script: " +
"github.com/enormandeau/Scripts/fastqCombinePairedEnd.py")
def nonify_empty_lib_files(ngsLib, logger=None):
# sometimes, if no singletons are found, we get an empty file.
# this shoudl weed out any empty read files before mapping, etc
logger.info("checking for empty read files")
EMPTIES = 0
for f in ["readF", "readR", "readS0"]:
# ignore if lib is None, as those wont be used anyway
if getattr(ngsLib, f) is None:
logger.debug("%s is set to None, and will be ignored", f)
EMPTIES = EMPTIES + 1
continue
if not os.path.exists(getattr(ngsLib, f)):
logger.warning("read file %s not found and can not be used " +
"for mapping!", f)
EMPTIES = EMPTIES + 1
# set to None so mapper will ignore
setattr(ngsLib, f, None)
continue
# if lib is not none but file is of size 0
logger.debug("size of %s: %f", getattr(ngsLib, f),
os.path.getsize(getattr(ngsLib, f)))
if not os.path.getsize(getattr(ngsLib, f)) > 0:
logger.warning("read file %s is empty and will not be used " +
"for mapping!", f)
EMPTIES = EMPTIES + 1
# set to None so mapper will ignore
setattr(ngsLib, f, None)
if EMPTIES == 3:
raise ValueError("None of the read files hold data!")
def map_to_genome_ref_smalt(mapping_ob, ngsLib, cores,
samtools_exe, smalt_exe,
genome_fasta,
score_minimum=None,
scoring="match=1,subst=-4,gapopen=-4,gapext=-3",
step=3, k=5, logger=None): # pragma: no cover
"""run smalt based on pased args
#TODO rework this to read libtype of ngslib object
requires at least paired end input, but can handle an additional library
of singleton reads. Will not work on just singletons
"""
logger.info("Mapping reads to reference genome with SMALT")
# check min score
assert score_minimum is not None, "must sassign score outside map function!"
score_min = score_minimum
logger.debug(str("using a score min of " +
"{0}").format(score_min))
# index the reference
cmdindex = str("{0} index -k {1} -s {2} {3} {3}").format(
smalt_exe, k, step, genome_fasta)
# map paired end reads to reference index
smaltcommands = [cmdindex]
if "pe" in ngsLib.libtype:
cmdmap = str('{0} map -l pe -S {1} ' +
'-m {2} -n {3} -g {4} -f bam -o {5} {6} {7} ' +
'{8}').format(smalt_exe, scoring,
score_min, cores, ngsLib.smalt_dist_path,
mapping_ob.pe_map_bam, genome_fasta,
ngsLib.readF,
ngsLib.readR)
smaltcommands.append(cmdmap)
else:
with open(mapping_ob.pe_map_bam, 'w') as tempfile:
tempfile.write("@HD riboseed_dummy_file")
pass
# if singletons are present, map those too. Index is already made
if ngsLib.readS0 is not None: # and not ignore_singletons:
# because erros are thrown if there is no file, this
cmdmapS = str(
"{0} map -S {1} -m {2} -n {3} -g {4} -f bam -o {5} " +
"{6} {7}").format(smalt_exe, scoring, score_min, cores,
ngsLib.smalt_dist_path, mapping_ob.s_map_bam,
genome_fasta, ngsLib.readS0)
with open(mapping_ob.s_map_bam, 'w') as tempfile:
tempfile.write("@HD riboseed_dummy_file")
# merge together the singleton and pe reads
cmdmergeS = '{0} merge -f {3} {1} {2}'.format(
samtools_exe, mapping_ob.pe_map_bam,
mapping_ob.s_map_bam, mapping_ob.mapped_bam)
smaltcommands.extend([cmdmapS, cmdmergeS])
else:
# if not already none, set to None when ignoring singleton
ngsLib.readS0 = None
# 'merge', but reallt just converts
cmdmerge = str("{0} view -bh {1} >" +
"{2}").format(samtools_exe, mapping_ob.pe_map_bam, mapping_ob.mapped_bam)
smaltcommands.extend([cmdmerge])
logger.info("running SMALT:")
logger.debug("with the following SMALT commands:")
for i in smaltcommands:
logger.debug(i)
subprocess.run(i, shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, check=True)
# report simgpleton reads mapped
if ngsLib.readS0 is not None:
logger.info(str("Singleton mapped reads: " +
get_number_mapped(mapping_ob.s_map_bam,
samtools_exe=samtools_exe)))
# report paired reads mapped
if "pe" in ngsLib.libtype:
logger.info(str("PE mapped reads: " +
get_number_mapped(mapping_ob.pe_map_bam,
samtools_exe=samtools_exe)))
combined_map_string = get_number_mapped(mapping_ob.mapped_bam_unfiltered,
samtools_exe=samtools_exe)
logger.info(str("Combined mapped reads: " + combined_map_string))
# extract overall percentage as a float
map_percentage = float(combined_map_string.split("(")[1].split("%")[0])
# apparently there have been no errors, so mapping success!
ngsLib.mapping_success = True
return map_percentage
def index_sort_BAM(inbam):
try:
pysam.index(inbam)
except pysam.utils.SamtoolsError:
sorted_bam = os.path.join(os.path.dirname(inbam), "temp_sorted.bam")
pysam.sort("-o", sorted_bam, inbam)
inbam = sorted_bam
try:
pysam.index(inbam)
except pysam.utils.SamtoolsError:
raise ValueError("Your bam file is corrupted! No good")
return inbam
def filter_bam_AS(inbam, outsam, score, logger=None):
""" This is needed because bwa cannot filter based n alignment score
for paired reads.
https://sourceforge.net/p/bio-bwa/mailman/message/31968535/
Given a bam file from bwa (has "AS" tags), write out
reads with AS higher than --score to outsam
read count from https://www.biostars.org/p/1890/
"""
notag = 0
written = 0
score_list = []
inbam = index_sort_BAM(inbam)
bam = pysam.AlignmentFile(inbam, "rb")
osam = pysam.Samfile(outsam, 'wh', template=bam)
for read in bam.fetch():
if read.has_tag('AS'):
score_list.append(read.get_tag('AS'))
if read.get_tag('AS') >= score:
osam.write(read)
written = written + 1
else:
pass
else:
notag = notag + 1
pass
bam.close()
logger.debug("Reads after filtering: %i", written)
# if no reads pass the filtering score
if written == 0:
raise ValueError("No reads pass the filtering score! This commonly happens with " +
"short (<65bp) reads. Try rerunning with a decreased --score_min" +
" argument|")
if notag != 0:
logger.debug("Reads lacking alignment score: %i", notag)
return score_list
def get_bam_AS(inbam, logger=None):
""" Return the mappign scores for downstream QC plotting.
"""
assert logger is not None, "must use logging"
score_list = []
count = 0
try:
pysam.index(inbam)
except pysam.utils.SamtoolsError:
raise ValueError("It looks like your bam file is unsorted! No good")
bam = pysam.AlignmentFile(inbam, "rb")
for read in bam.fetch():
count = count + 1
if read.has_tag('AS'):
score_list.append(read.get_tag('AS'))
else:
pass
bam.close()
if len(score_list) != count:
logger.warning("%i reads did not have AS tags",
count - len(score_list))
return score_list
def convert_sam_to_bam(samtools_exe, bam, sam, reverse=False, logger=None):
"""
becasue pysam doesnt like to write bams in an iterator, which makes sense
"""
assert logger is not None, "must use logging"
logger.debug("Converting with the following command:")
if not reverse:
cmd = "{0} view -o {1} -bS {2}".format(samtools_exe, bam, sam)
else:
cmd = "{0} view -o {2} -h {1}".format(samtools_exe, bam, sam)
logger.debug(cmd)
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
def make_bwa_map_cmds(mapping_ob, ngsLib, cores,
samtools_exe, bwa_exe, genome_fasta,
add_args='-L 0,0 -U 0 -a', logger=None):
""" make bwa sys commands. maps PE and S reads separately,
then combines them into a X_mapped.bam file
return a list of commands to run.
"""
# index the reference
cmdindex = str("{0} index {1}").format(
bwa_exe, genome_fasta)
# map paired end reads to reference index.
bwacommands = [cmdindex]
if "pe" in ngsLib.libtype:
cmdmap = str('{0} mem -t {1} {2} -k 15 ' +
'{3} {4} {5} | {6} view -bh - | ' +
'{6} sort -o ' +
'{7} - ').format(bwa_exe, # 0
cores, # 1
add_args, # 2
genome_fasta, # 3
ngsLib.readF, # 4
ngsLib.readR, # 5
samtools_exe, # 6
mapping_ob.pe_map_bam) # 7)
bwacommands.append(cmdmap)
else:
assert ngsLib.readS0 is not None, \
str("No readS0 attribute found, cannot run mapping with " +
"any reads in .readS0 or .readF and .readR")
# if singletons are present, map those too. Index is already made
if ngsLib.readS0 is not None: # and not ignore_singletons:
cmdmapS = str(
'{0} mem -t {1} {2} -k 15 ' +
'{3} {4} | {5} view -bh - | ' +
'{5} sort -o {6} - ').format(bwa_exe, # 0
cores, # 1
add_args, # 2
genome_fasta, # 3
ngsLib.readS0, # 4
samtools_exe, # 5
mapping_ob.s_map_bam) # 5)
# merge together the singleton and pe reads, if there are any
if "s_1" == ngsLib.libtype:
cmdmergeS = str(
"{0} view -bh {1} > {2}"
).format(samtools_exe, mapping_ob.s_map_bam, mapping_ob.mapped_bam_unfiltered)
else:
assert ngsLib.libtype == "pe_s", "error parsing libtype"
cmdmergeS = '{0} merge -f {3} {1} {2}'.format(
samtools_exe, mapping_ob.pe_map_bam,
mapping_ob.s_map_bam, mapping_ob.mapped_bam_unfiltered)
bwacommands.extend([cmdmapS, cmdmergeS])
else:
# if not already none, set to None when ignoring singleton
ngsLib.readS0 = None
cmdmerge = str("{0} view -bh {1} > " +
"{2}").format(samtools_exe, mapping_ob.pe_map_bam,
mapping_ob.mapped_bam_unfiltered)
bwacommands.extend([cmdmerge])
return bwacommands
def map_to_genome_ref_bwa(mapping_ob, ngsLib, cores,
samtools_exe, bwa_exe, genome_fasta,
score_minimum=None,
add_args='-L 0,0 -U 0 -a', logger=None):
""" Map to bam. maps PE and S reads separately,
then combines them into a X_mapped.bam file
TODO:: break up into execution and comamnd generation
"""
logger.info("Mapping reads to reference genome with BWA")
bwacommands = make_bwa_map_cmds(mapping_ob, ngsLib, cores,
samtools_exe, bwa_exe, genome_fasta,
add_args=add_args, logger=None)
logger.info("running BWA:")
logger.debug("with the following BWA commands:")
for i in bwacommands:
logger.debug(i)
subprocess.run(i, shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, check=True)
# report simgpleton reads mapped
if ngsLib.readS0 is not None:
logger.info(str("Singleton mapped reads: " +
get_number_mapped(mapping_ob.s_map_bam,
samtools_exe=samtools_exe)))
# report paired reads mapped
if "pe" in ngsLib.libtype:
logger.info(str("PE mapped reads: " +
get_number_mapped(mapping_ob.pe_map_bam,
samtools_exe=samtools_exe)))
combined_map_string = get_number_mapped(mapping_ob.mapped_bam_unfiltered,
samtools_exe=samtools_exe)
logger.info(str("Combined mapped reads: " + combined_map_string))
# extract overall percentage as a float
try:
map_perc_string = combined_map_string.split("(")[1].split("%")[0]
map_percentage = float(map_perc_string)
except ValueError:
logger.error("Error mapping reads with bwa; line: %s:", map_perc_string)
if map_perc_string == "N/A : N/A)":
raise ValueError(
"Error during mapping reads to reference; please examine " +
"the sam/bam files. This could be to using a extremely " +
"divergent reference or (more likely) using a library that "+
" is not FR oriented. Consider running as a single library")
# check min score
if score_minimum is not None:
score_min = score_minimum
else:
logger.debug(
"no bwa mapping score min provided; default is 1/2 read " +
"length or 50, whichever is greater.")
# hard minimum of 50
score_min = max(int(round(float(ngsLib.readlen) / 2.0)), 50)
logger.debug("using a score minimum of %i", score_min)
logger.debug("filtering mapped reads with an AS score minimum of %i",
score_min)
score_list = filter_bam_AS(inbam=mapping_ob.mapped_bam_unfiltered,
outsam=mapping_ob.mapped_sam,
score=score_min, logger=logger)
convert_sam_to_bam(
bam=mapping_ob.mapped_bam,
sam=mapping_ob.mapped_sam,
samtools_exe=samtools_exe,
reverse=False,
logger=logger)
logger.info(str("Mapped reads after filtering: " +
get_number_mapped(mapping_ob.mapped_bam,
samtools_exe=samtools_exe)))
# apparently there have been no errors, so mapping success!
ngsLib.mapping_success = True
return (map_percentage, score_list, score_min)
def convert_bam_to_fastqs_cmd(mapping_ob, ref_fasta, samtools_exe,
which='mapped', source_ext="_sam",
single=False, logger=None):
"""generate a cmd to convert a bam file to fastq, using samtools
"""
assert which in ['mapped', 'unmapped'], \
"only valid options are mapped and unmapped"
read_path_dict = {'readF': None, 'readR': None, 'readS': None}
logger.debug("preparing to convert extracted reads to make these files:")
for key, value in read_path_dict.items():
read_path_dict[key] = str(os.path.splitext(
mapping_ob.mapped_bam)[0] + "_" + which + key + '.fastq')
assert None not in read_path_dict.values(), \
"Could not properly construct fastq names!"
# if converting mapped reads, get them from the bam file
if which == 'mapped':
source_ext = '_bam'
# else, leave the defaultsource ext (sam)
else:
pass
if not single:
samfastq = "{0} fastq {1} -1 {2} -2 {3} -s {4}".format(
samtools_exe,
getattr(mapping_ob, str(which + source_ext)),
read_path_dict['readF'],
read_path_dict['readR'],
read_path_dict['readS'])
for key in ['readF', 'readR', 'readS']:
logger.debug(read_path_dict[key])
else:
# This option outputs all the reads in a single fastq
# its needed for low coverage mappings when the F and R
# file may end up empty. Since default behaviour is to
# treat F and R as single libraries anyway, this works
samfastq = "{0} fastq {1} > {2} ".format(
samtools_exe,
getattr(mapping_ob, str(which + source_ext)),
read_path_dict['readS'])
logger.debug(read_path_dict['readS'])
# Flag the others for ignoral
# read_path_dict['readF'] = None
# read_path_dict['readR'] = None
return(samfastq, NgsLib(name=which, master=False,
logger=logger,
readF=read_path_dict['readF'],
readR=read_path_dict['readR'],
readS0=read_path_dict['readS'],
ref_fasta=ref_fasta))
def fiddle_with_spades_exe(spades_exe, logger=None):
""" so heres the deal. SPAdes 3.9 can be run with python3.5 and below.
version 3.10 can be run with 3.6 and below. If the version of spades
and the version of execultion doest jive, this will try to correct it.
return (wait for it, this is harebrained) the python executable needed!
"""
assert logger is not None, "must use logging"
assert sys.version_info[0] != 2 and sys.version_info[1] >= 5, \
"how did we get here? cannot use riboSeed with anything less than 3.5"
# spades will throw an error if run with the wrong version of python. So
# we will assume the error means that we are running with an incompatible
# version of python
# SPAdes 3.11.0 : all
# SPAdes 3.10.0 : python3.6 and below
# SPAdes 3.9.0 : python3.5 and below
logger.debug("Making sure python's version is compatible with SPAdes")
logger.debug("Python executable: %s", sys.executable)
logger.debug("SPAdes executable: %s", spades_exe)
try:
spades_verison = check_version_from_cmd(
exe=sys.executable + " " + spades_exe,
cmd='--version', line=1, where='stderr',
pattern=r"\s*v(?P<version>[^(]+)",
min_version="3.9.0", logger=logger)
SPADES_VERSION_SUCCESS = True
except Exception as e:
SPADES_VERSION_SUCCESS = False
logger.debug("failed initial attempt to get spades version")
logger.debug(e)
if not SPADES_VERSION_SUCCESS:
# if python 3.5 or 3.6, we should try to use python3.5 explicitly
# if the user has it
try:
spades_verison = check_version_from_cmd(
exe=shutil.which("python3.5") + " " + spades_exe,
cmd='--version', line=1, where='stderr',
pattern=r"\s*v(?P<version>[^(]+)",
min_version="3.9.0", logger=logger)
SPADES_VERSION_SUCCESS = True
return shutil.which("python3.5")
except Exception as e:
SPADES_VERSION_SUCCESS = False
logger.error(e)
logger.error("There is an apparent mismatch between python" +
" and spades. Check to see if your spades " +
"version is compatible with your python version")
sys.exit(1)
logger.debug("SPAdes version: %s", spades_verison)
return sys.executable
def generate_spades_cmd(
mapping_ob, ngs_ob, ref_as_contig, python_exe, as_paired=True,
addLibs="", prelim=False, k="21,33,55,77,99", spades_exe="spades.py",
single_lib=False, logger=None, check_libs=False, check_exe=True):
"""return spades command so we can multiprocess the assemblies
wrapper for common spades setting for long illumina reads
ref_as_contig should be either None, 'trusted', or 'untrusted'
prelim flag is True, only assembly is run, and without coverage corrections
"""
assert logger is not None, "Must Use Logging"
# make kmer comamnd empty if using "auto", or set to something like
# -k 55,77,99
if k is not 'auto':
kmers = "-k " + k
else:
kmers = ""
# prepare reference, if being used
if ref_as_contig is not None:
alt_contig = "--{0}-contigs {1}".format(
ref_as_contig, mapping_ob.ref_fasta)
else:
alt_contig = ''
libs = []
if single_lib:
singles = "--pe1-s {0}".format(ngs_ob.readS0)
pairs = ""
libs.append(ngs_ob.readS0)
elif as_paired and ngs_ob.readS0 is not None: # for lib with both
singles = "--pe1-s {0}".format(ngs_ob.readS0)
pairs = "--pe1-1 {0} --pe1-2 {1} ".format(
ngs_ob.readF, ngs_ob.readR)
libs.append(ngs_ob.readS0)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
elif as_paired and ngs_ob.readS0 is None: # for lib with just PE
singles = ""
pairs = "--pe1-1 {0} --pe1-2 {1}".format(
ngs_ob.readF, ngs_ob.readR)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
# for libraries treating paired ends as two single-end libs
elif not as_paired and ngs_ob.readS0 is None:
singles = ''
pairs = "--pe1-s {0} --pe2-s {1}".format(
ngs_ob.readF, ngs_ob.readR)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
else: # for 3 single end libraries
singles = "--pe3-s {0} ".format(ngs_ob.readS0)
pairs = str("--pe1-s {0} --pe2-s {1} ".format(
ngs_ob.readF, ngs_ob.readR))
libs.append(ngs_ob.readS0)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
reads = str(pairs + singles)
if prelim:
cmd = str(
"{0} --only-assembler --cov-cutoff off --sc --careful {1} " +
"{2} {3} {4} -o {5}"
).format(spades_exe, kmers, reads, alt_contig, addLibs,
mapping_ob.assembly_subdir)
else:
cmd = "{0} --careful {1} {2} {3} {4} -o {5}".format(
spades_exe, kmers, reads, alt_contig, addLibs,
mapping_ob.assembly_subdir)
if check_libs:
spades_cmd = make_assembler_empty_check(liblist=libs, cmd=cmd,
logger=logger)
else:
spades_cmd = cmd
return python_exe + " " + spades_cmd
def generate_skesa_cmd(
mapping_ob, ngs_ob, ref_as_contig, python_exe, as_paired=True,
addLibs="", prelim=False, k="21,33,55,77,99", exe="skesa",
single_lib=False, logger=None, check_libs=False, check_exe=True):
"""return spades command so we can multiprocess the assemblies
wrapper for common spades setting for long illumina reads
ref_as_contig should be either None, 'trusted', or 'untrusted'
prelim flag is True, only assembly is run, and without coverage corrections
"""
assert logger is not None, "Must Use Logging"
# make use default kmers, for now
# if k is not 'auto':
# kmers = "-k " + k
# else:
# kmers = ""
# # prepare reference, if being used
if ref_as_contig is not None:
alt_contig = "--fasta {0} ".format(mapping_ob.ref_fasta)
else:
alt_contig = ''
libs = []
if single_lib:
singles = "--fastq {0}".format(ngs_ob.readS0)
pairs = ""
libs.append(ngs_ob.readS0)
elif as_paired and ngs_ob.readS0 is not None: # for lib with both
singles = "--fastq {0}".format(
ngs_ob.readS0)
pairs = " {0} {1}".format(
ngs_ob.readF,
ngs_ob.readR)
libs.append(ngs_ob.readS0)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
elif as_paired and ngs_ob.readS0 is None: # for lib with just PE
singles = "--fastq"
pairs = " {0} {1}".format(
ngs_ob.readF,
ngs_ob.readR)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
# for libraries treating paired ends as two single-end libs
elif not as_paired and ngs_ob.readS0 is None:
singles = '--fastq'
pairs = " {0} {1}".format(
ngs_ob.readF,
ngs_ob.readR)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
else: # for 3 single end libraries
singles = "--fastq {0}".format(
ngs_ob.readS0)
pairs = " {0} {1}".format(
ngs_ob.readF,
ngs_ob.readR)
# singles = "--pe3-s {0} ".format(ngs_ob.readS0)
# pairs = str("--pe1-s {0} --pe2-s {1} ".format(
# ngs_ob.readF, ngs_ob.readR))
libs.append(ngs_ob.readS0)
libs.append(ngs_ob.readF)
libs.append(ngs_ob.readR)
reads = str(singles + pairs)
# os.makedirs(mapping_ob.assembly_subdir)
# naming convention so we can reuse spades parsing methods
contigs_path = os.path.join(mapping_ob.assembly_subdir,
"contigs.fasta")
paired_string = "--use_paired_ends" if as_paired else ""
if True: # if prelim:
# which is always, for now -- skesa is too conservate
# for final assemblies
cmd = str(
"{exe} {reads}{addLibs} " + # watch the spaces
"--contigs_out {contigs_path} {paired_string}").format(**locals())
if check_libs:
cmd = make_assembler_empty_check(liblist=libs, cmd=cmd,
logger=logger)
return cmd
def make_assembler_empty_check(liblist, cmd, logger):
""" returns shell/spades cmd as string. All this does is make it a
conditional shell cmd that depends on the presense of the file
needed for assembly. It is needed so we can bin all
the cmds with multiprocessing.
"""
logger.debug("constructing shell file check for subprocess cmd")
prefix = "if "
for i, lib in enumerate(liblist):
if i != 0:
prefix = prefix + "&& "
check = "[ -s {0} ] ".format(lib) # unix test for empty
prefix = prefix + check
suffix = str("; then {0} ; else echo 'input lib not found, " +
"skipping this assembler call' ; fi").format(cmd)
return str(prefix + suffix)
def exclude_subassembly_based_on_coverage(clu, iteration, logger=None):
""" if using coverage_exclusion, then return 0 if passing or 2 if not.
if not using this, return None
deal with those excluded from assembly by lack of coverage depth
ie (coverage_exclusion=True)
"""
assert logger is not None, "must use logging"
if clu.coverage_exclusion is not None:
assert clu.coverage_exclusion, \
"this should only be set by the partition_mapping method."
logger.warning("THIS FEATURE IS NOT WELL TESTED! USE WITH TREPIDATION")
# if this is the first iteration, return two
# Otherwise, UPDATED: continue with warning
if iteration > 0:
clu.mappings[iteration].assembled_contig = \
clu.mappings[iteration - 1].assembled_contig
logger.warning(" the coverage is worryingly low, but we " +
"will continue.")
return 0
else:
logger.info("the coverage is worryingly low, and as this " +
"is the first iteration, we must discard this contig")
return 2
else:
return None
def evaluate_spades_success(clu, mapping_ob, proceed_to_target, target_len,
include_short_contigs, min_assembly_len,
flank=1000,
min_delta=10,
keep_best_contig=True,
seqname='', logger=None):
"""return success codes:
0 = include contigs, all good
DEPRECIATED! 1 = include contigs, but dont keep iterating
2 = exclude contigs, and keep from iterating
3 = exclude contigs, error ocurred
"""
# DANGEROUS_CONTIG_LENGTH_THRESHOLD_FACTOR = 6
prelog = "{0}-{1}-iter-{2}:".format("SEED_cluster", clu.index,
mapping_ob.iteration)
assert logger is not None, "Must Use Logging"
if seqname == '':
seqname = os.path.splitext(os.path.basename(mapping_ob.ref_fasta))[0]
# check if cluster has be marked for exclusion during partition_mapping()
cov_exclude_result = exclude_subassembly_based_on_coverage(
clu=clu, iteration=mapping_ob.iteration, logger=logger)
if cov_exclude_result is not None:
return cov_exclude_result
mapping_ob.assembled_contig = os.path.join(
mapping_ob.assembly_subdir, "contigs.fasta")
logger.debug("checking for the following file: \n{0}".format(
mapping_ob.assembled_contig))
# check for a spades failure
if not (os.path.isfile(mapping_ob.assembled_contig) and
os.path.getsize(mapping_ob.assembled_contig) > 0):
logger.warning(
"%s No output from SPAdes this time around! return code 3",
prelog)
return 3
# by default, we keep only the longest, bestest, most fantastic-est contig
if keep_best_contig:
logger.debug("reserving first contig")
try:
keep_only_first_contig(
os.path.join(mapping_ob.assembly_subdir, "contigs.fasta"),
newname=seqname)
except Exception as f:
logger.error(f)
raise f
# -------------------------- --------------------------- #
logger.info("%s analyzing mapping", prelog)
seed_len = get_fasta_lengths(clu.mappings[0].ref_fasta)[0]
# seed_len = get_fasta_lengths(mapping_ob.ref_fasta)[0]
# set proceed_to_target params
if proceed_to_target:
if 5 > target_len > 0:
target_seed_len = int(target_len * seed_len)
elif target_len > 50:
target_seed_len = int(target_len)
else:
logger.error("%s invalid target length provided; must be given " +
"as fraction of total length or as an absolute " +
"number of base pairs greater than 50", prelog)
sys.exit(1)
else:
pass
# compare lengths of reference and freshly assembled contig
contig_len = get_fasta_lengths(mapping_ob.assembled_contig)[0]
ref_len = get_fasta_lengths(mapping_ob.ref_fasta)[0]
contig_length_diff = contig_len - ref_len
logger.info("%s Seed length: %i", prelog, seed_len)
if proceed_to_target:
logger.info("Target length: {0}".format(target_seed_len))
logger.info("%s Length of this iteration's longest contig: %i",
prelog, contig_len)
if mapping_ob.iteration != 0:
logger.info("%s Length of previous longest contig: %i",
prelog, ref_len)
logger.info("%s The new contig differs from the previous " +
"iteration by %i bases", prelog, contig_length_diff)
else:
logger.info("%s The new contig differs from the reference " +
"seed by %i bases", prelog, contig_length_diff)
if contig_len > (ref_len + (2 * flank)):
logger.warning(
"Contig length is exceedingly long! We set the threshold of " +
"twice the flanking length as the maximum allowed long-read " +
"length. This may indicate bad mapping parameters, so the " +
"long-read will be discarded. Return code 2")
return 2
# This cuts failing assemblies short
if min_assembly_len > contig_len:
logger.warning("The first iteration's assembly's best contig " +
"is not greater than length set by " +
"--min_assembly_len. Assembly will likely fail if " +
"the contig does not meet the length of the seed")
# if mapping_ob.iteration > 0:
if include_short_contigs:
logger.warning("Continuing to , but if this occurs for more " +
"than one seed, we reccommend you abort and " +
"retry with longer seeds, a different ref, " +
"or re-examine the riboSnag clustering")
logger.warning("Return code 0")
return 0
else:
logger.warning("Return code 2")
return 2
elif proceed_to_target and contig_len >= target_seed_len:
logger.info("target length threshold! has been reached; " +
"skipping future iterations. return code 0")
return 0
# if not first time through, ensure adequate change between iterations to
# avoid problems with trying to assemble a very small number of reads
elif min_delta > abs(contig_length_diff) and mapping_ob.iteration != 0:
logger.warning(str(
"The length of the assembled contig didn't change more " +
"more than {0}bp between rounds of iteration. Continuing " +
"will likely cause error; skipping future iterations. " +
"return code 0").format(min_delta))
return 0
else:
logger.debug("return code 0")
return 0
def parse_subassembly_return_code(cluster, logger=None):
""" given a return code from the above spades success function,
set object attributes as needed
20170531 depreciated return code 1: as we have moved to using the
whole library for mapping, not just the unmapped reads, it is more
important to keep in even unchanging contigs to allow for
competition during mapping
----------------------
return success codes:
0 = include contigs, all good
1 = include contigs, but dont keep iterating
2 = exclude contigs, and keep from iterating
3 = exclude contigs, error ocurred
"""
assert logger is not None, "must use logging"
if cluster.assembly_success == 3:
# TODO other error handling; make a "failed" counter?
cluster.continue_iterating = False
cluster.keep_contigs = False
elif cluster.assembly_success == 2:
cluster.continue_iterating = False
cluster.keep_contigs = False
elif cluster.assembly_success == 1:
raise ValueError("return code 1 depreciated! a warning can be " +
"issued for short asssemblies, but they must " +
"remain in the pseudogenome")
elif cluster.assembly_success == 0:
cluster.continue_iterating = True
cluster.keep_contigs = True
else:
raise ValueError("Error evaluating spades results return!")
def make_quick_quast_table(pathlist, write=False, writedir=None, logger=None):
""" given paths to two or more quast reports, this generates dictionary
where the key is the field in the report and the value is a list of
the values for each report. Hand for passing to the logger function.
This skips any fields not in first report, for better or worse...
"""
assert logger is not None, "Must Use Logging"
assert isinstance(pathlist, list) is True,\
"paths for quast reports must be in a list!"
filelist = pathlist
logger.debug("Quast reports to combine: %s", str(filelist))
mainDict = {}
counter = 0
for i in filelist:
if counter == 0:
try:
with open(i, "r") as handle:
for dex, line in enumerate(handle):
row, val = line.strip().split("\t")
if dex in [0]:
continue # skip header
else:
mainDict[row] = [val]
except Exception:
raise ValueError("error parsing %s" % i)
else:
report_list = []
try:
with open(i, "r") as handle:
for dex, line in enumerate(handle):
row, val = line.strip().split("\t")
report_list.append([row, val])
# logger.debug("report list: %s", str(report_list))
for k, v in mainDict.items():
if k in [x[0] for x in report_list]:
mainDict[k].append(
str([x[1] for x in
report_list if x[0] == k][0]))
else:
mainDict[k].append("XX")
except Exception as e:
logger.warning("error parsing %s", i)
raise e
counter = counter + 1
if write:
if writedir is None:
logger.warning("no output dir, cannot write!")
return mainDict
try:
with open(os.path.join(writedir, "combined_quast_report.tsv"),
"w") as outfile:
for k, v in sorted(mainDict.items()):
# logger.debug("{0}\t{1}\n".format(k, str("\t".join(v))))
outfile.write("{0}\t{1}\n".format(
str(k), str("\t".join(v))))
except Exception as e:
raise e
return mainDict
def check_kmer_vs_reads(k, readlen, min_diff=2, logger=None):
assert logger is not None, "must use logging! "
# ignore this if we are letting spades
if k is 'auto':
logger.debug("no need to check k, we let spades set k")
return k
try:
klist = [int(x) for x in k.split(",")]
except Exception as e:
logger.error("error splitting kmers by comma!")
logger.error(e)
logger.error(last_exception())
raise ValueError
logger.debug(klist)
new_ks = []
for i in klist:
if i > readlen:
logger.warning("removing %d from list of kmers: exceeds read length",
i)
elif readlen - i <= min_diff:
logger.warning("removing %d from list of kmers: too close " +
"to read length", i)
elif i % 2 == 0:
logger.warning("removing %d from list of kmers: must be odd", i)
else:
new_ks.append(i)
return ",".join([str(x) for x in new_ks])
def make_samtools_depth_cmds(exe, bam, chrom, start, end, region=None, prep=False):
""" this just makes the commands to get the depth from samtools.
If prep, the sam file gets sorted and indexed first
"""
prep_cmds = []
# cmd = "samtools depth ./iter_1_s_mappi.bam -r scannedScaffolds:5000-6000"
sorted_bam = os.path.join(
os.path.dirname(bam),
str(os.path.splitext(os.path.basename(bam))[0] + "_sorted.bam"))
# sort that bam, just in case
prep_cmds.append(str("{0} sort {1} > {2}").format(exe, bam, sorted_bam))
# index that bam!
prep_cmds.append(str("{0} index {1}").format(exe, sorted_bam))
if prep:
bamfile = sorted_bam
else:
bamfile = bam
# extract the depth stats for a region
if region is None:
depth_cmd = str("{0} depth -r {2}:{3}-{4} {1}").format(
exe, bamfile, chrom, start, end)
else:
depth_cmd = str("{0} depth -r {2} {1}").format(
exe, bamfile, region)
return (prep_cmds, depth_cmd)
def parse_samtools_depth_results(result, logger=None):
""" parses out the subprocess results from samtools depth
"""
assert logger is not None, "must use logging"
try:
splits = result.stdout.decode("utf-8").split("\n")[0].split("\t")
if len(splits) != 3:
logger.warning("unable to split the results from samtools depth")
else:
pass
except Exception as e:
raise e
covs = [int(x.split("\t")[2]) for
x in result.stdout.decode("utf-8").split("\n")[0: -1]]
if len(covs) == 0:
if result.returncode != 0:
logger.warning("Error parsing samtools depth results! " +
"Here are the results:")
logger.warning(result)
logger.warning("This isn't always fatal, so we will continue. " +
"but take a look with IGB or similar so there " +
"aren't any suprises down the road.")
return [[""], 0]
average = float(sum(covs)) / float(len(covs))
return [covs, average]
def get_samtools_depths(samtools_exe, bam, chrom, start, end,
prep=False, region=None, logger=None):
""" Use samtools depth and awk to get the average coverage depth of a
particular region
"""
prep_cmds, depth_cmd = make_samtools_depth_cmds(
exe=samtools_exe, bam=bam, chrom=chrom,
start=start, end=end, region=region, prep=prep)
logger.debug("running the following commands to get coverage:")
if prep:
for i in prep_cmds: # index and sort
logger.debug(i)
subprocess.run(i,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
else:
pass
# get the results from the depth call
logger.debug(depth_cmd)
result = subprocess.run(depth_cmd,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
covs, ave = parse_samtools_depth_results(result, logger)
return [covs, ave]
def prepare_next_mapping(cluster, seedGenome, flank,
logger=None):
"""use within partition mapping funtion;
makes LociMapping, get region coords, write extracted region,
"""
mapping_subdir = os.path.join(
seedGenome.output_root, cluster.cluster_dir_name,
"{0}_cluster_{1}_mapping_iteration_{2}".format(
cluster.sequence_id, cluster.index, seedGenome.this_iteration))
assembly_subdir = os.path.join(
seedGenome.output_root, cluster.cluster_dir_name,
"{0}_cluster_{1}_assembly_iteration_{2}".format(
cluster.sequence_id, cluster.index, seedGenome.this_iteration))
mapping0 = LociMapping(
name="{0}_cluster_{1}".format(
cluster.sequence_id, cluster.index),
iteration=seedGenome.this_iteration,
assembly_subdir_needed=True,
mapping_subdir=mapping_subdir,
assembly_subdir=assembly_subdir)
# if first time through, get the global start and end coords.
if cluster.global_start_coord is None or cluster.global_end_coord is None:
if seedGenome.this_iteration != 0:
raise ValueError(
"global start and end should be defined previously! Exiting")
if sorted([x.start_coord for x in cluster.loci_list]) != \
[x.start_coord for x in cluster.loci_list]:
logger.warning("Coords are not in increasing order; " +
"you've been warned")
start_list = sorted([x.start_coord for x in cluster.loci_list])
logger.debug("Start_list: {0}".format(start_list))
logger.debug("Finding coords to gather reads from the following loci:")
for i in cluster.loci_list:
logger.debug("%s cluster %i -- locus %i -- %s (%i, %i)(%i) %s",
i.sequence_id, cluster.index,
i.index, i.locus_tag,
i.start_coord, i.end_coord, i.strand,
i.product)
# This works as long as coords are never in reverse order
cluster.global_start_coord = min([x.start_coord for
x in cluster.loci_list]) - flank
# if start is negative, just use 1, the beginning of the sequence
if cluster.global_start_coord < 1:
logger.warning(
"Caution! Cannot retrieve full flanking region, as " +
"the 5' flanking region extends past start of " +
"sequence. If this is a problem, try using a smaller " +
"--flanking region, and/or if appropriate, run with " +
"--linear.")
cluster.global_start_coord = 1
cluster.global_end_coord = max([x.end_coord for
x in cluster.loci_list]) + flank
# logger.debug("rec len: %i", len(cluster.seq_record.seq))
if cluster.global_end_coord > len(cluster.seq_record):
logger.warning(
"Caution! Cannot retrieve full flanking region, as " +
"the 5' flanking region extends past start of " +
"sequence. If this is a problem, try using a smaller " +
"--flanking region, and/or if appropriate, run with " +
"--linear.")
cluster.global_end_coord = len(cluster.seq_record)
logger.debug("global start and end: %s %s",
cluster.global_start_coord,
cluster.global_end_coord)
# if not the first time though, fuhgetaboudit.
# Ie, the coords have been reassigned by the make_faux_genome function
# WE WONT USE A FLANKING REGION BECAUSE NO FLANKING READS ARE AVAILIBLE!
# meaning, the overhang is gained from the bits that overhand the end of
# the mapping. Because both SMALT and BWA use soft-clipping by defualt, we
# recover and use the clipped regions
else:
logger.info("using coords from previous iterations 'genome'")
logger.debug("Coordinates for %s cluster %i: [%i - %i]",
cluster.seq_record.id,
cluster.index,
cluster.global_start_coord,
cluster.global_end_coord)
cluster.extractedSeqRecord = SeqRecord(
cluster.seq_record.seq[
cluster.global_start_coord:
cluster.global_end_coord])
mapping0.ref_fasta = os.path.join(mapping0.mapping_subdir,
"extracted_seed_sequence.fasta")
with open(mapping0.ref_fasta, "w") as writepath:
SeqIO.write(cluster.extractedSeqRecord, writepath, 'fasta')
cluster.mappings.append(mapping0)
def make_mapped_partition_cmds(cluster, mapping_ob, seedGenome, samtools_exe):
""" returns cmds and region
"""
# Prepare for partitioning
partition_cmds = []
# sort our source bam
sort_cmd = str("{0} sort {1} > {2}").format(
samtools_exe,
seedGenome.iter_mapping_list[seedGenome.this_iteration].mapped_bam,
seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam)
# index it
index_cmd = str("{0} index {1}").format(
samtools_exe, seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam)
partition_cmds.extend([sort_cmd, index_cmd])
# define the region to extract
region_to_extract = "{0}:{1}-{2}".format(
cluster.sequence_id, cluster.global_start_coord,
cluster.global_end_coord)
# make a subser from of reads in that region
view_cmd = str("{0} view -o {1} {2} {3}").format(
samtools_exe, mapping_ob.mapped_bam,
seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam,
region_to_extract)
partition_cmds.append(view_cmd)
return (partition_cmds, region_to_extract)
def make_unmapped_partition_cmds(mapped_regions, samtools_exe, seedGenome):
""" given a list of regions (formatted for samtools view, etc) make a
list of mapped reads (file path stored under mapped_ids_txt), and
use the cgrep voodoo to make a sam file from the full library without
the mapped reads. returns a cmd as a string
"""
# starting at second iteration, copy previous iterms mapped_ids_txt
# as a starting point so we can track the reads better.
unmapped_cmds = []
if seedGenome.this_iteration > 0:
shutil.copyfile(
seedGenome.iter_mapping_list[
seedGenome.this_iteration - 1].mapped_ids_txt,
seedGenome.iter_mapping_list[
seedGenome.this_iteration].mapped_ids_txt)
# moved to filter_bam_as function
make_mapped_sam = "{0} view -o {1} -h {2}".format(
samtools_exe,
seedGenome.iter_mapping_list[seedGenome.this_iteration].mapped_sam,
seedGenome.iter_mapping_list[seedGenome.this_iteration].mapped_bam)
unmapped_cmds.append(make_mapped_sam)
# for each region, add read names in that region to
# a list (taken from previous iteration if there has been one)
for region in mapped_regions:
unmapped_cmds.append(
"{0} view {1} {2} | cut -f1 >> {3}".format(
samtools_exe,
seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam,
region,
seedGenome.iter_mapping_list[
seedGenome.this_iteration].mapped_ids_txt))
uniquify_list = "sort -u {0} -o {0}".format(
seedGenome.iter_mapping_list[seedGenome.this_iteration].mapped_ids_txt)
unmapped_cmds.append(uniquify_list)
return unmapped_cmds
def pysam_extract_reads(sam, textfile, unmapped_sam, logger=None):
""" This replaces the "LC_ALL " grep call from the above function. On macs,
there is no speedup gained.
"""
qfile = textfile # contains read names of mapped reads
sfile = sam # mapping of all reads to genome
ofile = unmapped_sam # destination sam of all reads not in regions
nunmapped = 0
total = 0
# Load query fixed strings as a set
with open(qfile, 'r') as qfh:
queries = {q.strip() for q in qfh.readlines() if len(q.strip())}
# Subset reads
samfile = pysam.AlignmentFile(sfile, 'r')
osam = pysam.Samfile(ofile, 'wh', template=samfile)
for read in samfile.fetch():
total = total + 1
if read.qname in queries:
continue
else:
# write out read names not in textfile
nunmapped = nunmapped + 1
osam.write(read)
if logger:
logger.info("Wrote %i unmapped reads of the %i total from %s to %s",
nunmapped, total, sam, ofile)
osam.close()
def partition_mapping(seedGenome, samtools_exe, flank, min_flank_depth,
cluster_list=None, logger=None):
""" Extract interesting stuff based on coords, not a binary
mapped/not_mapped condition
Also, if min_flanking_depth, mark reads with low
mapping coverage for exclusion
"""
mapped_regions = []
logger.info("processing mapping for iteration %i",
seedGenome.this_iteration)
for cluster in cluster_list:
prepare_next_mapping(cluster=cluster, seedGenome=seedGenome,
flank=flank, logger=logger)
mapped_regions = []
all_depths = [] # each entry is a tuple (idx, start_ave, end_ave)
filtered_cluster_list = []
for cluster in cluster_list:
mapped_partition_cmds, reg_to_extract = make_mapped_partition_cmds(
cluster=cluster, mapping_ob=cluster.mappings[-1],
seedGenome=seedGenome, samtools_exe=samtools_exe)
for cmd in mapped_partition_cmds:
logger.debug(cmd)
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
start_depths, start_ave_depth = get_samtools_depths(
bam=seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam,
chrom=cluster.sequence_id,
start=cluster.global_start_coord,
end=cluster.global_start_coord + flank,
region=None,
prep=False,
samtools_exe=samtools_exe,
logger=logger)
end_depths, end_ave_depth = get_samtools_depths(
bam=seedGenome.iter_mapping_list[
seedGenome.this_iteration].sorted_mapped_bam,
chrom=cluster.sequence_id,
start=cluster.global_end_coord - flank,
end=cluster.global_end_coord,
region=None,
prep=False,
samtools_exe=samtools_exe,
logger=logger)
logger.info("Coverage for cluster " +
"%i:\n\t5' %ibp-region: %.2f \n\t3' %ibp-region: %.2f",
cluster.index,
flank,
start_ave_depth,
flank,
end_ave_depth)
if start_ave_depth < min_flank_depth:
logger.warning(str("cluster {0} has insufficient 5' flanking " +
"coverage depth for subassembly, and will be " +
"removed").format(cluster.index))
cluster.coverage_exclusion = True
elif end_ave_depth < min_flank_depth:
logger.warning(str("cluster {0} has insufficient 3' flanking " +
"coverage depth for subassembly, and will be " +
"removed").format(cluster.index))
cluster.coverage_exclusion = True
else:
mapped_regions.append(reg_to_extract)
filtered_cluster_list.append(cluster)
# regardsless, report stats here
all_depths.append((cluster.index, start_ave_depth, end_ave_depth))
logger.info("mapped regions for iteration %i:\n%s",
seedGenome.this_iteration,
"\n".join([x for x in mapped_regions]))
# make commands to extract all the reads NOT mapping to the rDNA regions
unmapped_partition_cmds = make_unmapped_partition_cmds(
mapped_regions=mapped_regions, samtools_exe=samtools_exe,
seedGenome=seedGenome)
for cmd in unmapped_partition_cmds:
logger.debug(cmd)
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
logger.info("using pysam to extract a subset of reads ")
# this may look wierd: for iteration 0, we extract from the mapping.
# for each one after that, we extract from the previous mapping.
unmapped_reads_index = 0 if seedGenome.this_iteration == 0 \
else seedGenome.this_iteration - 1
pysam_extract_reads(
sam=seedGenome.iter_mapping_list[unmapped_reads_index].mapped_sam,
textfile=seedGenome.iter_mapping_list[
seedGenome.this_iteration].mapped_ids_txt,
unmapped_sam=seedGenome.iter_mapping_list[
seedGenome.this_iteration].unmapped_sam, logger=logger)
# sam_score_list = get_sam_AS
return (all_depths, filtered_cluster_list)
def add_coords_to_clusters(seedGenome, logger=None):
""" given a genbank file and some locus tags, add the coordinates, etc,
to the entry in the seed Genome
"""
for cluster in seedGenome.loci_clusters: # for each cluster of loci
# get seq record that cluster is from
try:
cluster.seq_record = get_rec_from_generator(
recordID=cluster.sequence_id,
gen=seedGenome.seq_records,
method=seedGenome.refresh_seq_rec_generator)
except Exception as e:
raise e
try: # make coord list
extract_coords_from_locus(
cluster=cluster, feature=cluster.feat_of_interest,
logger=logger)
except Exception as e:
raise e
logger.debug("Here are the detected region,coords, strand, product, " +
"locus tag, subfeatures and sequence id of the results:")
logger.debug(str(cluster.__dict__))
def bool_run_quast(quast_exe, logger):
if sys.version_info.minor == 6:
logger.warning("QUAST only supports python3.5 and below. We are " +
"skipping QUAST evalutation")
return False
if quast_exe is None:
logger.warning("QUAST executable not provided or available. We are " +
"skipping QUAST evalutation")
return False
try:
quast_version = check_version_from_cmd(
exe=sys.executable + " " + quast_exe,
cmd='--version', line=1, where='stdout',
pattern=r".* v(?P<version>[^\n,]+)",
min_version="4.0", logger=logger,
coerce_two_digit=False)
if quast_version == "4.5":
logger.warning("Due to bugs in QUAST 4.5, we will not run QUAST")
return False
except Exception as e:
logger.error(e)
logger.warning("Error occured while trying to check QUAST version." +
"We are skipping QUAST evalutation")
return False
return True
def make_quast_command(exes, output_root, ref, assembly_subdir, name,
logger=None):
assert logger is not None, "must use logging"
quast_cmd = str("{0} {1} {2} {3} -o {4}").format(
exes.python,
exes.quast,
ref,
os.path.join(assembly_subdir, "contigs.fasta"),
os.path.join(output_root, str("quast_" + name)))
return quast_cmd
def get_final_assemblies_cmds(seedGenome, exes,
ref_as_contig,
additional_libs,
err_correct,
cores,
memory,
serialize,
skip_control=True,
kmers="21,33,55,77,99", logger=None):
"""make cmds for runnning of SPAdes and QUAST final assembly and analysis.
if skip_control, just do the de fere novo assembly. otherwise, do bother
returns list of listed cmds
([[spades_cmd, quast_cmd], [spades_cmd2, quast_cmd2]])
"""
logger.info("\n\nStarting Final Assemblies\n\n")
quast_reports = []
cmd_list = []
final_list = ["de_fere_novo"]
if not skip_control:
final_list.append("de_novo")
for j in final_list:
final_mapping = LociMapping(
iteration=0,
name=j,
mapping_subdir=os.path.join(
seedGenome.output_root,
"final_{0}_mapping".format(j)),
assembly_subdir_needed=True,
assembly_subdir=os.path.join(
seedGenome.output_root,
"final_{0}_assembly".format(j)))
# logger.info("\n\nRunning %s SPAdes \n" % j)
if j == "de_novo":
final_mapping.ref_fasta = ''
assembly_ref_as_contig = None
else:
assert j == "de_fere_novo", \
"Only valid cases are de novo and de fere novo!"
final_mapping.ref_fasta = seedGenome.assembled_seeds
assembly_ref_as_contig = ref_as_contig
# remove unneeded dir
os.rmdir(final_mapping.mapping_subdir)
logger.info("Getting commands for %s SPAdes" % j)
if True: # args.subassembler == "spades":
spades_cmd = generate_spades_cmd(
single_lib=seedGenome.master_ngs_ob.libtype == "s_1",
check_libs=False,
python_exe=exes.python,
mapping_ob=final_mapping, ngs_ob=seedGenome.master_ngs_ob,
ref_as_contig=assembly_ref_as_contig, as_paired=True, prelim=False,
k=kmers, spades_exe=exes.spades, logger=logger)
modest_spades_cmd = make_modest_assembler_cmd(
assembler="spades",
cmd=spades_cmd, cores=cores, memory=memory, split=len(final_list),
serialize=serialize, logger=logger)
## add additional cmdline args for assembly
if additional_libs is not None:
modest_spades_cmd = "{0} {1}".format(
modest_spades_cmd, additional_libs)
if not err_correct:
modest_spades_cmd = "{0} --only-assembler".format(
modest_spades_cmd)
subassembly_cmd = modest_spades_cmd
ref = str("-R %s" % seedGenome.ref_fasta)
quast_cmd = make_quast_command(
exes=exes, output_root=seedGenome.output_root, ref=ref,
assembly_subdir=final_mapping.assembly_subdir, name=j,
logger=logger)
if bool_run_quast(exes.quast, logger):
quast_reports.append(
os.path.join(seedGenome.output_root,
str("quast_" + j), "report.tsv"))
cmd_list.append([modest_spades_cmd, quast_cmd])
else:
quast_reports = None
cmd_list.append([modest_spades_cmd, None])
return(cmd_list, quast_reports)
def check_spades_extra_library_input(inp):
""" check user supplied args against SPAdes args.
This is pretty brittle, cause its experts only for now.
will raise an error if any funny business is detected
It will not however check your syntax, ensure you have correct pairs, etc.
You are in the deep end now.
"""
scary_chars = [";", "|", "&"]
for char in scary_chars:
if char in inp:
raise ValueError("'%s' cannot be part of lib input" %char)
single_valid = "--s"
simple_valid = ["--sanger", "--pacbio", "--nanopore"]
comp_valid_prefixes = ["--pe", "--mp", "--hqmp", "--nxmate"]
comp_valid_suffixes = ["1", "12", "2", "s", "ff", "fr", "rf"]
splitcmds = inp.split(" ")
cmds_dict = {}
MARKER = True
# attempt to separate out args into key value pairs.
try:
for i, cmd in enumerate(splitcmds):
if MARKER:
cmds_dict[cmd] = splitcmds[i + 1]
MARKER = not MARKER
except IndexError:
raise IndexError("All args must have exactly 1 value; no flags!")
for cmd, value in cmds_dict.items():
VALID = False
# check if its a single library (coded as --s#)
if cmd[:-1] == single_valid:
# does it have a valid numeric library idenifier?
for i in range(1, 10):
if cmd == single_valid + str(i):
VALID = True
# check if its a simple arg
elif cmd in simple_valid:
VALID = True
# check if its a compound arg
else:
# does it have a valid prefix?
for pref in comp_valid_prefixes:
if cmd.startswith(pref):
# does it have a valid numeric library idenifier?
for i in range(1, 10):
if cmd.startswith(pref + str(i)):
# does it have a valid suffix?
for suff in comp_valid_suffixes:
if cmd == (pref + str(i) + "-" + suff):
VALID = True
break
if not VALID:
raise ValueError("Invalid spades arg %s" % cmd)
def make_faux_genome(cluster_list, seedGenome, iteration,
output_root, nbuff, logger=None):
""" stictch together viable assembled contigs. perhaps more importnatly,
this also re-write thes coords relative to the new "genome"
returns path to new faux_genome
"""
logger.info("preparing extracted region genome for next round of mapping")
logger.debug("using %i sequences", len(cluster_list))
nbuffer = "N" * nbuff
faux_genome = ""
counter = 0
new_seq_name = seedGenome.name
if not cluster_list:
return 1
for clu in cluster_list:
if not clu.keep_contigs or not clu.continue_iterating:
pass
else:
clu.global_start_coord = len(faux_genome) + nbuff
with open(clu.mappings[-1].assembled_contig, 'r') as con:
contig_rec = list(SeqIO.parse(con, 'fasta'))[0]
faux_genome = str(faux_genome + nbuffer + contig_rec.seq)
clu.global_end_coord = len(faux_genome)
# lastly, set cluster name to new sequence name
clu.sequence_id = new_seq_name
counter = counter + 1
if counter == 0:
logger.warning("No viable contigs for faux genome construction!")
return 1
else:
logger.info("combined %s records as genome for next round of mapping",
counter)
record = SeqRecord(Seq(str(faux_genome + nbuffer),
IUPAC.IUPACAmbiguousDNA()),
id=new_seq_name)
outpath = os.path.join(output_root,
"iter_{0}_buffered_genome.fasta".format(iteration))
with open(outpath, 'w') as outf:
SeqIO.write(record, outf, 'fasta')
return (outpath, len(record))
def decide_proceed_to_target(target_len, logger=None):
assert logger is not None, "Must use logging!"
if target_len is not None:
if not target_len > 0 or not isinstance(target_len, float):
logger.error("--target_len is set to invalid value! Must be a " +
"decimal greater than zero, ie where 1.1 would be " +
"110% of the original sequence length.")
raise ValueError
elif 50 > target_len > 5:
logger.error("We dont reccommend seeding to lengths greater than" +
"5x original seed length. Try between 0.5 and 1.5." +
" If you are setting a target number of bases, it " +
" must be greater than 50")
raise ValueError
else:
proceed_to_target = True
else:
proceed_to_target = False
return proceed_to_target
def subprocess_run_list(cmdlist, hard=False, logger=None):
""" This just allows for sequential cmds with multiprocessing.
It prevents the errors when future commands are looking for and not finding
a needed file.
Logger cant be used with multiprocessing
returns 0 if all is well, otherwise returns 1
if hard == True, quits instead of returning 1
"""
for cmd in cmdlist:
if cmd is not None:
try:
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
except Exception as e:
if logger:
logger.error(e)
if hard:
sys.exit(1)
else:
return 1
return 0
def copy_to_handy_dir(outdir, pre, ref_gb, seedGenome,
skip_control=False,
hard=False, logger=None):
""" copy the resulting contigs and reference to dir for mauving
"""
assert logger is not None, "Must use logging"
os.makedirs(outdir)
files_to_copy = [
os.path.join(seedGenome.output_root,
"final_de_fere_novo_assembly", "contigs.fasta"),
os.path.join(seedGenome.output_root,
"final_de_novo_assembly", "contigs.fasta"),
ref_gb]
new_names = [pre + "_de_fere_novo_contigs.fasta",
pre + "_de_novo_contigs.fasta",
pre + ".gb"]
if skip_control:
new_names = [new_names[i] for i in [0, 2]]
files_to_copy = [files_to_copy[i] for i in [0, 2]]
for idx, f in enumerate(files_to_copy):
logger.debug("copying %s to %s as %s",
f,
os.path.join(outdir, os.path.basename(f)),
new_names[idx])
try:
shutil.copyfile(
f,
os.path.join(outdir, new_names[idx]))
except:
logger.warning("unable to copy %s to %s.",
f, os.path.join(outdir, os.path.basename(f)))
logger.error(last_exception())
if hard:
raise FileNotFoundError
def printPlot(data, line=None, ymax=30, xmax=60, tick=.2,
title="test", fill=False, pathIfWrite=None, logger=None):
""" ascii not what your program can do for you...
Plot a graph of alignment score by read count, using the logger info
output. Optionally write this to a file too.
Args:
data (list): list of ints representing alignment scores
line (int): where to draw a cutoff line
ymax (int): how tall is our graph
xmax (int): how wide is our graph
fill (bool): wherther to fill the area under the line
pathIfWrite (str): path to file to write graph to, if not None
Raises:
None
Returns:
None
"""
assert logger is not None, "must use logging"
data = sorted(data, reverse=True)
xaxis = "|"
yaxis = "_"
avbin = []
scaledy = []
# ylab = str(max(data))
ylab = str(len(data))
sli = math.ceil(len(data) / ymax)
# get rough averages for a window
for i in range(0, ymax + 1):
avbin.append(int(sum(data[i * sli: (i + 1) * sli]) / sli))
avmax = max(avbin)
logger.debug("scaling to max of %i", avmax)
for j in avbin:
scaledy.append(int((j / avmax) * xmax))
if line is not None:
scaled_line = int((line / avmax) * xmax)
lineidx = len(scaledy) - bisect(sorted(scaledy), scaled_line)
plotlines = []
fillchar = " " if not fill else "X"
for idx, j in enumerate(scaledy):
if idx == 0:
plotlines.append(" " * int(xmax * .25) + title)
plotlines.append(" " * 9 + "0" + " " * (xmax - 2) + ylab)
plotlines.append(" " * 10 + xaxis + (yaxis * xmax) + "|")
if line is not None:
if lineidx == idx:
plotlines.append(" " * 10 + xaxis +
"*" * (xmax - 4) + " " + str(line))
fillchar = " "
if idx % int(tick * ymax) == 0:
# plot ticks at increments
ticlab = str(int((j / xmax) * avmax))
plotlines.append(ticlab.rjust(10, " ") + xaxis + fillchar *
j + "O")
else:
plotlines.append(" " * 10 + xaxis + fillchar * j + "O")
logger.info("\n" + "\n".join(plotlines))
if pathIfWrite is not None:
with open(pathIfWrite, 'w') as file_handler:
for item in plotlines:
file_handler.write("{0}\n".format(item))
def set_ref_as_contig(ref_arg, map_percentage, final=False, logger=None):
""" sets the ref_as_contig arg for spades.
# DEPRECIATED COMMENT
# Note this is used to set initial subassembly and final spades assembly.
# Intermediate runs will always use seeds as "trusted", becasue we do not
# reuse reads
this allows the user to submit one of 4 options:
ignore: do not use regions from reference in initial subassembly
"""
assert logger is not None, "must use logging"
if ref_arg == "infer":
if map_percentage > 80:
ref_as_contig = "trusted"
else:
ref_as_contig = "untrusted"
logger.info(
str("unfiltered mapping percentage is %f2 so " +
"'ref_as_contigs' is set to %s"),
map_percentage, ref_as_contig)
elif ref_arg == "ignore":
ref_as_contig = None
else:
assert ref_arg in [None, "trusted", "untrusted"], \
"error parsing initial ref_as_contig: %s" % ref_arg
ref_as_contig = ref_arg
if final and ref_as_contig is None:
# if final assembly, --ref-as-contig cannot be none, so we infer
return set_ref_as_contig(ref_arg="infer",
map_percentage=map_percentage,
final=final, logger=logger)
return ref_as_contig
def report_region_depths(inp, logger):
assert logger is not None, "must use logging"
report_list = []
nclusters = len(inp[0])
for i in range(0, nclusters):
report_list.append("Cluster %i:" % i)
for itidx, iteration in enumerate(inp):
for cluster in iteration:
if cluster[0] == i:
report_list.append(
"\tIter %i -- 5' coverage: %.2f 3' coverage %.2f" % (
itidx, cluster[1], cluster[2]))
return report_list
def make_modest_assembler_cmd(cmd, cores, memory, split=0, assembler="spades",
serialize=False, logger=None):
""" adjust assembler commands to use set amounts of cores and memory
returns the command,
if spades, split on "--careful", cause why would you run
SPAdes with "--reckless"?
if skesa, just append to the end of the cmd
if you need to split resouces between, say, two commands, use split 2
"""
assert logger is not None, "Must use logging"
if serialize:
logger.debug("Allocating assembler %dgb of memory", memory)
mem_each = memory
cores_each = cores
else:
if split:
if split > cores or split > memory:
logger.error("cannot split cores or memory resources into " +
"values less than 1!")
sys.exit(1)
mem_each = int(memory / split)
if mem_each < 1:
logger.warning(
"you must have at least 1gb memory allocated " +
"to each spades call! allocating minimum")
mem_each = 1
cores_each = int(cores / split)
logger.info("Running spades with %d cores and %d gb memory",
cores_each, mem_each)
else:
# make sure spades doesnt hog processors or ram
mem_each = int(memory / cores) # should be floor
if mem_each < 1:
logger.warning("you must have at least 1gb memory allocated " +
"to each spades call! allocating minimum")
mem_each = 1
cores_each = 1
logger.info(
"Allocating SPAdes %dgb of memory for each of %d cores",
mem_each, cores)
if assembler == "spades":
cmdA, cmdB = cmd.split("--careful")
return "{0}-t {1} -m {2} --careful{3}".format(
cmdA,
cores_each,
mem_each,
cmdB)
else:
return "{0} --cores {1} --memory {2} ".format(
cmd,
cores_each,
mem_each)
def define_score_minimum(args, readlen, iteration, logger):
""" given the args, define your scoring minimum for mapping
for smalt, each round of mapping gets more stringent. For BWA,
the mapping minimum is deferred to BWA's default settings.
This will probably be depreciated soon when we finally get rid of the
smalt options
"""
assert logger is not None, "must use logging"
if not args.score_min:
# This makes it such that score minimum is now more stringent
# with each mapping. Too campy? probably.
if args.mapper == 'smalt':
scaling_factor = 1.0 - (
1.0 / (2.0 + float(iteration)))
score_minimum = int(readlen * scaling_factor)
logger.info(
"Mapping with min_score of %f2 (%f2 of read length, %f2)",
scaling_factor, score_minimum, readlen)
else:
assert args.mapper == 'bwa', "must be either smalt or bwa"
logger.debug("using the default minimum score for BWA")
score_minimum = None
else:
if readlen < args.score_min:
raise ValueError(
"--min_score must be smaller than read length {0}".format(
readlen))
score_minimum = args.score_min
logger.info(
"Mapping with min_score of %f2 (read length: %f2)",
score_minimum, readlen)
return score_minimum
def check_genbank_for_fasta(gb, logger=None):
""" Ensure user has not provided a fasta by accident
Yes, its a dumb function, but it was neccsary, given the amount of times
I would get partway through debugging some error only to realize I
used a genbank that was actually a fasta
"""
assert logger is not None, "must use logging"
with open(gb) as ingb:
for line in ingb:
if line.startswith(">"):
logger.error("This genbank file looks like a fasta! Exiting")
raise ValueError
else:
break
def get_fasta_consensus_from_BAM(samtools_exe, bcftools_exe, # vcfutils_exe,
outfasta, ref, bam, region=None, logger=None):
""" run system commands to get consensus fastq from fastq
"""
cmd_list, consensus_fa = make_get_consensus_cmds(
samtools_exe=samtools_exe,
bcftools_exe=bcftools_exe,
# vcfutils_exe=vcfutils_exe,
ref=ref, bam=bam,
region=region,
outfasta=outfasta,
old_method=True,
logger=logger)
for cmd in cmd_list: # may have more cmds here in future
logger.debug(cmd)
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
# consensus_fa = convert_fastq_to_fasta(
# fastq=consensus_fq, outfasta=outfasta,
# only_ATCG=True, logger=logger)
return consensus_fa
def convert_fastq_to_fasta(fastq, outfasta, # only_first=True,
only_ATCG=True, logger=None):
""" This converts fastq to fasta, as well as converting non ATCG bases
to N's. Currently only works on single entry fastqs.
If you need to convert more, learn how to use awk.
"""
assert logger is not None, "must use logging"
counter = 0
with open(outfasta, "w") as ofile:
with open(fastq, "r") as ifile:
for read in SeqIO.parse(ifile, "fastq"):
if counter > 0: # and only_first:
raise ValueError
if only_ATCG:
new_record = read
new_seq = ""
for i in range(0, len(read.seq)):
if read.seq[i] not in ["A", "T", "C", "G"]:
new_seq = new_seq + "N"
else:
new_seq = new_seq + read.seq[i]
new_record.seq = Seq(new_seq, IUPAC.ambiguous_dna)
SeqIO.write(new_record, ofile, "fasta")
else:
SeqIO.write(read, ofile, "fasta")
counter = counter + 1
return outfasta
def make_get_consensus_cmds(samtools_exe, bcftools_exe,# vcfutils_exe,
ref, bam, outfasta=None, old_method=False,
region=None, logger=None):
""" use samtoolsa nd vcfutils to get a consesnus fastq from a BAM file
the old_method referes to how bcftools calls consensuses since 1.2
"""
sorted_bam = os.path.splitext(bam)[0] + "_sorted.bam"
if outfasta is None:
consensus_fa = os.path.splitext(bam)[0] + "_consensus.fa"
else:
consensus_fa = outfasta
faidx_cmd = "{0} faidx {1}".format(samtools_exe, ref)
if old_method:
call = "-c"
else:
call = "-m"
if region is None:
this_region = ""
else:
assert isinstance(region, str), \
"region must be a string in the form of 'chr:start-end'"
this_region = "-r {0} ".format(region)
temp_zvcf = os.path.join(os.path.dirname(sorted_bam), "consensus.vcf.gz")
sort_cmd = "{0} sort {1} > {2}".format(samtools_exe, bam, sorted_bam)
index_cmd = "{0} index {1}".format(samtools_exe, sorted_bam)
# -A means ignore strainge pairing (which we need, as we are subsetting a region already)
# -E means recalculate scores, for imporved accuracy
# -u for uncocmpressed output
# -a for all positions, even those with low coverage
vcf_cmd = str("{0} mpileup -d8000 -EA -uf {1} {5}{2} | " +
"{3} call --ploidy 1 {4} -Oz -o {6} -").format(
samtools_exe, #0
ref, #1
sorted_bam, #2
bcftools_exe, #3
call, #4
this_region, #5
temp_zvcf) #6
tabix_cmd = "{0} {1}".format("tabix", temp_zvcf)
consensus_cmd = "{0} faidx {1} {2} | {3} consensus {4} > {5}".format(
samtools_exe, #0
ref, #1
#2, note this is not "this_region", as we do not need the -r
region if region is not None else "",
bcftools_exe, #3
temp_zvcf, #4
consensus_fa)
return ([faidx_cmd, sort_cmd, index_cmd, vcf_cmd,
tabix_cmd, consensus_cmd], consensus_fa)
def main(args, logger=None):
# allow user to give relative paths
output_root = os.path.abspath(os.path.expanduser(args.output))
try:
os.makedirs(output_root, exist_ok=False)
except OSError:
print("Output directory already exists; exiting...")
sys.exit(1)
t0 = time.time()
log_path = os.path.join(output_root, "riboSeed.log")
if logger is None:
logger = set_up_logging(verbosity=args.verbosity,
outfile=log_path,
name=__name__)
# # log version of riboSeed, commandline options, and all settings
logger.info("riboSeed pipeline package version: %s",
__version__)
logger.info("Usage:\n%s\n", " ".join([x for x in sys.argv]))
logger.debug("All settings used:")
for k, v in sorted(vars(args).items()):
logger.debug("%s: %s", k, str(v))
logger.debug("current PATH:")
try:
logger.debug(os.environ['PATH'])
except KeyError:
logger.error("no PATH variable found in system environment.")
sys.exit(1)
if args.cores is None:
args.cores = multiprocessing.cpu_count()
logger.info("Using %i core(s)", args.cores)
logger.info("Using %iGB memory", args.memory)
logger.info("Using %i thread(s)", args.threads)
logger.info("checking for installations of all required external tools")
logger.debug("creating an Exes object")
try:
sys_exes = Exes(python=sys.executable,
samtools=args.samtools_exe,
spades=args.spades_exe,
bwa=args.bwa_exe,
skesa=args.skesa_exe,
smalt=args.smalt_exe,
quast=args.quast_exe,
bcftools=args.bcftools_exe,
method=args.mapper)
sys_exes.python = sys_exes.check_spades_python_version(logger=logger)
except Exception as e:
logger.error(e)
sys.exit(1)
logger.debug("All required system executables found!")
logger.debug(str(sys_exes.__dict__))
try:
samtools_version = check_version_from_cmd(
exe=sys_exes.samtools, cmd='', line=3, where='stderr',
pattern=r"\s*Version: (?P<version>[^(]+)",
min_version=SAMTOOLS_MIN_VERSION, logger=logger)
except Exception as e:
logger.info(e)
logger.info("perhaps conda is giving a build warning?")
try:
samtools_version = check_version_from_cmd(
exe=sys_exes.samtools, cmd='', line=4, where='stderr',
pattern=r"\s*Version: (?P<version>[^(]+)",
min_version=SAMTOOLS_MIN_VERSION, logger=logger)
except Exception as f:
logger.error(f)
sys.exit(1)
if args.additional_libs:
logger.debug("Checking additional args for final assembly ahead of time")
check_spades_extra_library_input(args.additional_libs)
logger.debug("samtools version: %s", samtools_version)
if samtools_version.startswith("1.5"):
logger.error("Cannot use samtools 1.5! see samtools github issue #726")
sys.exit(1)
# check bambamc is installed proper if using smalt
if args.mapper == "smalt":
logger.info("SMALT is the selected mapper")
test_smalt_cmds = get_smalt_full_install_cmds(smalt_exe=sys_exes.smalt,
logger=logger)
test_smalt_bam_install(cmds=test_smalt_cmds, logger=logger)
else:
logger.info("BWA is the selected mapper")
# if --Initial_consensus, ensure our optional programs are in working order
if args.initial_consensus:
if any([x is None for x in [sys_exes.bcftools]]):
logger.error("Must have availible executables for both bcftools " +
"and vcfutils if using `--inital_consensus option! " +
"Exiting (1)")
sys.exit(1)
try:
check_genbank_for_fasta(gb=args.reference_genbank, logger=logger)
except ValueError:
sys.exit(1)
# if the target_len is set. set needed params
try:
proceed_to_target = decide_proceed_to_target(
target_len=args.target_len, logger=logger)
except ValueError:
logger.error("Exiting")
sys.exit(1)
# check and warn user about potential RAM issues
if args.memory < 6 or int(args.memory / args.cores) < 6:
logger.warning("Danger! We recommend that you have a minimum of " +
"6GB memory (or 6GB memory per core is using " +
"multiprocessing) available. If you have less " +
"than that per core, SPAdes may run out of memory.")
logger.warning("You can continue as configured if needed, and if a " +
"SPAdes error occurs, you can still use the long " +
"reads generated by riboSeed in a standalone assembly")
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# make seedGenome object
logger.debug("constructing the seedGenome object")
seedGenome = SeedGenome(
name=os.path.basename(os.path.splitext(args.reference_genbank)[0]),
# this needs to be zero indexed to access mappings by iter
this_iteration=0,
iter_mapping_list=[],
max_iterations=args.iterations,
clustered_loci_txt=args.clustered_loci_txt,
output_root=output_root,
unmapped_mapping_list=[],
genbank_path=args.reference_genbank,
logger=logger)
seedGenome.iter_mapping_list[0].ref_fasta = seedGenome.ref_fasta
# add ngslib object for user supplied NGS data
logger.debug("adding the sequencing libraries to the seedGenome")
logger.debug(args.fastqS1)
try:
seedGenome.master_ngs_ob = NgsLib(
name="master",
master=True,
make_dist=args.mapper == "smalt",
readF=args.fastq1,
readR=args.fastq2,
readS0=args.fastqS1,
logger=logger,
mapper_exe=sys_exes.mapper,
ref_fasta=seedGenome.ref_fasta)
except Exception as e:
logger.error(e)
logger.error(last_exception())
sys.exit(1)
if args.force_kmers:
logger.info("Skipping kmer check, using kmers provided")
checked_k = args.kmers
checked_prek = args.pre_kmers
else:
checked_k = check_kmer_vs_reads(
k=args.kmers,
readlen=seedGenome.master_ngs_ob.readlen,
min_diff=2, logger=logger)
checked_prek = check_kmer_vs_reads(
k=args.pre_kmers,
readlen=seedGenome.master_ngs_ob.readlen,
min_diff=2, logger=logger)
logger.debug("Using the following kmer values pre and final assemblies:")
logger.debug(checked_k)
logger.debug(checked_prek)
if "pe" in seedGenome.master_ngs_ob.libtype:
# check equal length fastq. This doesnt actually check propper pairs
logger.debug("Checking that the fastq pair have equal number of reads")
try:
check_fastqs_len_equal(file1=args.fastq1, file2=args.fastq2)
except Exception as e:
logger.error(e)
# not just value error, whatever file_len throws
logger.error(last_exception())
sys.exit(1)
# read in riboSelect clusters, make a lociCluster ob for each,
# which get placed in seedGenome.loci_clusters
try:
seedGenome.loci_clusters = parse_clustered_loci_file(
filepath=seedGenome.clustered_loci_txt,
gb_filepath=seedGenome.genbank_path,
output_root=output_root,
circular=args.linear is False,
logger=logger)
seedGenome.loci_clusters = add_gb_seqrecords_to_cluster_list(
cluster_list=seedGenome.loci_clusters,
gb_filepath=seedGenome.genbank_path)
except Exception as e:
logger.error(e)
logger.error(last_exception())
sys.exit(1)
# add coordinates to lociCluster.loci_list
try:
add_coords_to_clusters(seedGenome=seedGenome,
logger=logger)
except Exception as e:
logger.error(e)
logger.error(last_exception())
sys.exit(1)
try:
logger.info("padding genbank by %i", args.flanking * 3)
logger.debug("old ref_fasta: %s", seedGenome.ref_fasta)
seedGenome.pad_genbank(pad=args.flanking * 3,
circular=args.linear is False, logger=logger)
logger.debug("new ref_fasta: %s", seedGenome.ref_fasta)
except Exception as e:
logger.error(e)
logger.error(last_exception())
sys.exit(1)
# make first iteration look like future iterations:
# this should also ensure the mapper uses the padded version
seedGenome.next_reference_path = seedGenome.ref_fasta
#
for cluster in seedGenome.loci_clusters:
cluster.master_ngs_ob = seedGenome.master_ngs_ob
# ---------------------------------------------------------------------------
# Performance summary lists
mapping_percentages = []
region_depths = []
# this gets set during the first mapping
score_minimum = 0
# now, we need to assemble each mapping object
# this should exclude any failures
while seedGenome.this_iteration < args.iterations:
logger.info("processing iteration %i", seedGenome.this_iteration)
logger.debug("with new reference: %s", seedGenome.next_reference_path)
clusters_to_process = [x for x in seedGenome.loci_clusters if
x.continue_iterating and
x.keep_contigs]
if len(clusters_to_process) == 0:
logger.error("No clusters had sufficient mapping! Exiting")
sys.exit(1)
if len(clusters_to_process) < len(seedGenome.loci_clusters):
logger.warning(
"clusters excluded from this iteration \n%s",
" ".join([str(x.index) for x in
seedGenome.loci_clusters if
x.index not in [y.index for
y in clusters_to_process]]))
# For each (non-inital) iteration
if seedGenome.this_iteration != 0:
if seedGenome.this_iteration != 1:
# clear out old .sam files to save space
if args.clean_temps:
logger.info("removing uneeded files from previous mappings")
# delete the read files from the last mapping
# dont do this on first iteration cause those be the reads!
# and if they aren't backed up you are up a creek and
# probably very upset with me.
seedGenome.purge_old_files(all_iters=False, logger=logger)
# seqrecords for the clusters to be gen.next_reference_path
with open(seedGenome.next_reference_path, 'r') as nextref:
next_seqrec = list(SeqIO.parse(nextref, 'fasta'))[0] # next?
for clu in clusters_to_process:
clu.seq_record = next_seqrec
# print qualities of mapped reads
for clu in clusters_to_process:
logger.debug("getting mapping scores for cluster %i from %s",
clu.index, clu.mappings[-1].mapped_bam)
mapped_scores = get_bam_AS(
inbam=clu.mappings[-1].mapped_bam,
logger=logger)
if len(mapped_scores) > 200000:
logger.debug("Downsampling our plotting data to 20k points")
mapped_scores = random.sample(mapped_scores, 200000)
printPlot(data=mapped_scores, line=score_minimum,
ymax=18, xmax=60, tick=.2, fill=True,
pathIfWrite=None,
title=str("Average alignment Scores for cluster " +
"%i\n " % clu.index),
logger=logger)
logger.info(str("-" * 72))
# make new ngslib from unampped reads
convert_cmd, unmapped_ngsLib = convert_bam_to_fastqs_cmd(
mapping_ob=seedGenome.iter_mapping_list[
seedGenome.this_iteration - 1],
samtools_exe=sys_exes.samtools, single=True,
# ref fasta is used to make index cmd
ref_fasta=seedGenome.next_reference_path,
which='unmapped', logger=logger)
# unless subtract arg is used, use all reads each mapping
if not args.subtract:
unmapped_ngsLib = seedGenome.master_ngs_ob
unmapped_ngsLib.readlen = seedGenome.master_ngs_ob.readlen
unmapped_ngsLib.smalt_dist_path = \
seedGenome.master_ngs_ob.smalt_dist_path
logger.debug("converting unmapped bam into reads:")
seedGenome.master_ngs_ob.ref_fasta = seedGenome.next_reference_path
# dont worry about wasting time making these libraries if
# not subtracting previously mapped reads
if args.subtract:
for cmd in [convert_cmd]: # may have more cmds here in future
logger.debug(cmd)
subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
else:
# start with whole lib if first time through
unmapped_ngsLib = seedGenome.master_ngs_ob
score_minimum = define_score_minimum(
args=args, iteration=seedGenome.this_iteration,
readlen=unmapped_ngsLib.readlen, logger=logger)
try:
nonify_empty_lib_files(unmapped_ngsLib, logger=logger)
except ValueError:
logger.error(
"No reads mapped for this iteration. This could be to an " +
"error from samtools or elevated mapping stringency.")
if seedGenome.this_iteration != 0:
logger.warning(" proceeding to final assemblies")
break
else:
logger.error(" Exiting!")
sys.exit(1)
# Run commands to map to the genome
# the exe argument is Exes.mapper because that is what is checked
# during object instantiation
if args.mapper == "smalt":
# # get rid of bwa mapper default args
# if args.mapper_args == '-L 0,0 -U 0':
# args.mapper_args =
map_percent = map_to_genome_ref_smalt(
mapping_ob=seedGenome.iter_mapping_list[
seedGenome.this_iteration],
ngsLib=unmapped_ngsLib,
cores=(args.cores * args.threads),
samtools_exe=sys_exes.samtools,
genome_fasta=seedGenome.next_reference_path,
smalt_exe=sys_exes.mapper,
score_minimum=score_minimum,
step=3, k=5,
scoring="match=1,subst=-4,gapopen=-4,gapext=-3",
logger=logger)
else:
assert args.mapper == "bwa", "must be either bwa or smalt"
map_percent, score_list, score_minimum = map_to_genome_ref_bwa(
mapping_ob=seedGenome.iter_mapping_list[
seedGenome.this_iteration],
ngsLib=unmapped_ngsLib,
cores=(args.cores * args.threads),
genome_fasta=seedGenome.next_reference_path,
samtools_exe=sys_exes.samtools,
bwa_exe=sys_exes.mapper,
score_minimum=score_minimum,
# add_args='-L 0,0 -U 0',
add_args=args.mapper_args,
logger=logger)
mapping_percentages.append([seedGenome.this_iteration, map_percent])
##
# Here we put the logic in for a wee little preliminary assembly to test
##
# if things go really bad on the first mapping, get out while you can
if len(score_list) == 0:
logger.error(
"No reads mapped for this iteration. This could be to an " +
"error from samtools, bwa mem, a bad reference, " +
" or elevated mapping stringency. ")
if seedGenome.this_iteration != 0:
logger.warning(" proceeding to final assemblies")
break
else:
logger.error("Exiting!")
sys.exit(1)
# on first time through, infer ref_as_contig if not
# provided via commandline
if seedGenome.this_iteration == 0:
# do info for smalt mapping
if args.mapper == "bwa":
fig_dir = os.path.join(output_root, "figs")
os.makedirs(fig_dir)
# either use defined min or use the same heuristic as mapping
# if PLOT:
# plotAsScores(
# score_list,
# score_min=score_minimum if score_minimum is not None else
# int(round(float(seedGenome.master_ngs_ob.readlen) / 2.0)),
# outdir=fig_dir, logger=logger)
printPlot(data=score_list, line=score_minimum,
ymax=30, xmax=60,
pathIfWrite=os.path.join(
fig_dir, "initial_mapping.txt"),
tick=.2, fill=True,
title="Average alignment Scores (y) by sorted " +
"read index (x)",
logger=logger)
subassembly_ref_as_contig = set_ref_as_contig(
ref_arg=args.ref_as_contig,
map_percentage=map_percent, logger=logger)
else:
subassembly_ref_as_contig = args.ref_as_contig
else:
pass
try:
# again, this is [(idx, start_depth, end_depth)]
# clusters_post_partition are the clusters passing the minimum
# depth on the flanking regions.
iter_depths, clusters_to_subassemble = partition_mapping(
seedGenome=seedGenome,
logger=logger,
samtools_exe=sys_exes.samtools,
flank=args.flanking,
min_flank_depth=args.min_flank_depth,
cluster_list=clusters_to_process)
except Exception as e:
logger.error("Error while partitioning reads from iteration %i",
seedGenome.this_iteration)
logger.error(last_exception())
logger.error(e)
sys.exit(1)
logger.debug(iter_depths)
region_depths.append(iter_depths)
extract_convert_assemble_cmds = []
# generate spades cmds (cannot be multiprocessed becuase of python's
# inability to pass objects to multiprocessing)
# subassembly_ref_as_contig must be 'trusted' here because of the multimapping/
# coverage issues
for cluster in clusters_to_subassemble:
if args.initial_consensus and seedGenome.this_iteration == 0:
consensus_fasta = get_fasta_consensus_from_BAM(
samtools_exe=sys_exes.samtools,
bcftools_exe=sys_exes.bcftools,
region="{0}:{1}-{2}".format(
cluster.sequence_id,
# cluster.global_start_coord - args.flanking,
# cluster.global_end_coord + args.flanking),
cluster.global_start_coord,
cluster.global_end_coord),
# region=None,
outfasta=os.path.join(
cluster.mappings[-1].mapping_subdir,
"consensus.fasta"),
ref=seedGenome.next_reference_path,
bam=seedGenome.iter_mapping_list[0].sorted_mapped_bam,
# bam=cluster.mappings[-1].mapped_bam,
logger=logger)
# assign our consensus seqeunce to replace the segment of
# reference fasta used in initial subassembly
cluster.mappings[-1].ref_fasta = consensus_fasta
cmdlist = []
logger.debug("generating commands to convert bam to fastqs " +
"and assemble long reads")
convert_cmds, new_ngslib = convert_bam_to_fastqs_cmd(
mapping_ob=cluster.mappings[-1], which='mapped',
single=True,
samtools_exe=sys_exes.samtools,
ref_fasta=cluster.mappings[-1].ref_fasta, logger=logger)
cmdlist.append(convert_cmds)
if args.subassembler == "spades":
assembler_cmd = generate_spades_cmd(
mapping_ob=cluster.mappings[-1],
ngs_ob=new_ngslib, single_lib=True,
ref_as_contig=subassembly_ref_as_contig,
# only check if files exist if using a subset of reads
check_libs=args.subtract,
python_exe=sys_exes.python,
as_paired=False,
prelim=True,
k=checked_prek,
spades_exe=sys_exes.spades, logger=logger)
else:
assert args.subassembler == "skesa", \
"Only valid subassemblers are skesa and spades!"
assembler_cmd = generate_skesa_cmd(
mapping_ob=cluster.mappings[-1],
ngs_ob=new_ngslib, single_lib=True,
ref_as_contig=subassembly_ref_as_contig,
# only check if files exist if using a subset of reads
check_libs=args.subtract,
python_exe=sys_exes.python,
as_paired=False,
prelim=True,
k=checked_prek,
exe=sys_exes.skesa, logger=logger)
modest_assembler_cmd = make_modest_assembler_cmd(
assembler=args.subassembler,
cmd=assembler_cmd, cores=args.cores, memory=args.memory,
serialize=args.serialize, logger=logger)
cmdlist.append(modest_assembler_cmd)
cluster.mappings[-1].mapped_ngslib = new_ngslib
extract_convert_assemble_cmds.append(cmdlist)
# run all those commands!
logger.debug(
"\n running %i cmds: \n %s",
len([j for i in extract_convert_assemble_cmds for j in i]),
"\n".join([j for i in extract_convert_assemble_cmds for j in i]))
if args.serialize:
subassembly_return_sum = 0
logger.info("running without multiprocessing!")
for cmd in [j for i in extract_convert_assemble_cmds for j in i]:
logger.debug(cmd)
result = subprocess.run([cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
logger.debug(result.returncode)
subassembly_return_sum = \
subassembly_return_sum + result.returncode
else:
pool = multiprocessing.Pool(processes=args.cores)
results = [
pool.apply_async(subprocess_run_list,
(cmds,),
{"logger": None,
"hard": False})
for cmds in extract_convert_assemble_cmds]
pool.close()
pool.join()
subassembly_return_sum = sum([r.get() for r in results])
# check return codes
logger.info("Sum of return codes (should be 0):")
logger.info(subassembly_return_sum)
if subassembly_return_sum != 0:
logger.error(
"%d error(s) occurred when converting reads and subassembling with SPAdes!",
subassembly_return_sum)
if args.damn_the_torpedos or subassembly_return_sum < 2:
logger.error(
"Check the SPAdes and samtools logs to diagnose, especially if " +
"this occurs with more than one subassembly. Continuing")
else:
sys.exit(1)
# evaluate mapping (cant be multiprocessed)
for cluster in clusters_to_process:
cluster.assembly_success = evaluate_spades_success(
clu=cluster,
mapping_ob=cluster.mappings[-1],
include_short_contigs=args.include_short_contigs,
keep_best_contig=True,
min_delta=10,
flank=args.flanking,
seqname='', logger=logger,
min_assembly_len=args.min_assembly_len,
proceed_to_target=proceed_to_target,
target_len=args.target_len)
parse_subassembly_return_code(
cluster=cluster,
logger=logger)
clusters_for_pseudogenome = [
x for x in seedGenome.loci_clusters if
x.continue_iterating and x.keep_contigs]
if len(clusters_for_pseudogenome) != 0:
faux_genome_path, faux_genome_len = make_faux_genome(
seedGenome=seedGenome,
iteration=seedGenome.this_iteration,
output_root=seedGenome.output_root,
nbuff=1000,
cluster_list=[x for x in clusters_for_pseudogenome if
x.continue_iterating],
logger=logger)
logger.info("Length of buffered 'genome' for mapping: %i",
faux_genome_len)
else:
faux_genome_path = 1
seedGenome.this_iteration = args.iterations + 1
seedGenome.this_iteration = seedGenome.this_iteration + 1
seedGenome.next_reference_path = faux_genome_path
if seedGenome.this_iteration >= args.iterations:
logger.info("moving on to final assemblies!")
else:
logger.info("Moving on to iteration: %i",
seedGenome.this_iteration)
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# done with the iterations! Lets free up some space
if args.clean_temps:
seedGenome.purge_old_files(all_iters=True, logger=logger)
# And add the remaining final contigs to the directory for combination
contigs_moved_before_list_iter = \
[x.final_contig_path for x in seedGenome.loci_clusters
if not x.keep_contigs]
logger.debug("Contigs moved prior to final iteration:")
logger.debug(contigs_moved_before_list_iter)
if len(contigs_moved_before_list_iter) == len(seedGenome.loci_clusters):
logger.info("all contigs already copied to long_reads dir")
else:
# this assumes that all the uable clusters not in
# clusters_for_pseudogenome have already been copied over
for clu in [x for x in clusters_for_pseudogenome if x.keep_contigs]:
try:
shutil.copyfile(clu.mappings[-1].assembled_contig,
os.path.join(
seedGenome.final_long_reads_dir,
"{0}_cluster_{1}_iter_{2}.fasta".format(
clu.sequence_id,
clu.index,
clu.mappings[-1].iteration)))
except Exception as e:
logger.error(e)
logger.error(last_exception())
sys.exit(1)
for dirpath, dirnames, files in os.walk(seedGenome.final_long_reads_dir):
if not files:
logger.error(
"No pseudocontigs found in the long reads output " +
"directory; it appears " +
"that the subassemblies did not yield pseudocontigs " +
"of sufficient quality. Exiting with code 0")
sys.exit(0)
logger.info("combining contigs from %s", seedGenome.final_long_reads_dir)
seedGenome.assembled_seeds = combine_contigs(
contigs_dir=seedGenome.final_long_reads_dir,
contigs_name="riboSeedContigs",
logger=logger)
logger.info("Combined Seed Contigs: %s", seedGenome.assembled_seeds)
logger.info("Time taken to run seeding: %.2fm" % ((time.time() - t0) / 60))
report = report_region_depths(inp=region_depths, logger=logger)
logger.info("Average depths of mapping for each cluster, by iteration:")
logger.info("\n" + "\n".join(report))
logger.info("Average mapping percentage for initial mapping: %d",
mapping_percentages[0][1])
logger.info("Average mapping percentage of reads to pseudogenome in " +
"subsequent mappings:\n" +
"\n".join(["iter %d: %d%%" % (i[0], i[1]) for \
i in mapping_percentages[1:]]))
if args.just_seed:
logger.info("Done with riboSeed: %s", time.asctime())
logger.info("Skipping final assembly")
logger.info("Combined Contig Seeds (for validation or alternate " +
"assembly): %s", seedGenome.assembled_seeds)
logger.info("Time taken: %.2fm" % ((time.time() - t0) / 60))
return 0
final_ref_as_contig = set_ref_as_contig(
ref_arg=subassembly_ref_as_contig,
# initial mapping percentage is used if inferring
map_percentage=mapping_percentages[0][1], final=True, logger=logger)
spades_quast_cmds, quast_reports = get_final_assemblies_cmds(
seedGenome=seedGenome, exes=sys_exes,
cores=args.cores,
memory=args.memory,
err_correct=args.err_correct,
additional_libs=args.additional_libs,
serialize=args.serialize,
ref_as_contig=final_ref_as_contig,
skip_control=args.skip_control, kmers=checked_k, logger=logger)
# run final assembly(-ies)
if args.serialize:
logger.info("running without multiprocessing!")
# unpack nested spades quast list, ignoring Nones
# hate me yet?
spades_results = []
for cmd in [j for i in spades_quast_cmds for j in i if j is not None]:
logger.debug(cmd)
spades_result = subprocess.run(
[cmd],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
spades_results.append(spades_result)
spades_results_sum = sum([r.returncode for r in spades_results])
else:
# split the processors based on how many spades_cmds are on the list
# dont correct for threads, as Spades defaults to lots of threads
split_cores = int(args.cores / (len(spades_quast_cmds) / 2))
if split_cores < 1:
split_cores = 1
pool = multiprocessing.Pool(processes=split_cores)
logger.debug("running the following commands:")
logger.debug("\n".join([j for i in spades_quast_cmds for
j in i if j is not None]))
spades_results = [
pool.apply_async(subprocess_run_list,
(cmds,),
{"logger": None,
"hard": False})
for cmds in spades_quast_cmds]
pool.close()
pool.join()
logger.debug(spades_results)
logger.info("Sum of return codes (should be 0):")
spades_results_sum = sum([r.get() for r in spades_results])
if spades_results_sum == 0:
logger.info(spades_results_sum)
else:
logger.warning(spades_results_sum)
if spades_results_sum != 0:
logger.error("%d error(s) occurred when running SPAdes!",
spades_results_sum)
if not args.damn_the_torpedos:
logger.error("Check the spades logs to diagnose. Exiting (1)")
sys.exit(1)
if not args.skip_control and quast_reports is not None:
logger.debug("writing combined quast reports")
logger.info("Comparing de novo and de fere novo assemblies:")
try:
quast_comp = make_quick_quast_table(
quast_reports,
write=True,
writedir=seedGenome.output_root,
logger=logger)
for k, v in sorted(quast_comp.items()):
logger.info("%s: %s", k, " ".join(v))
except Exception as e:
logger.error("Error writing out combined quast report")
logger.error(e)
# make dir for easy downloading from cluster
copy_to_handy_dir(outdir=os.path.join(output_root, "mauve"),
pre=args.experiment_name,
ref_gb=args.reference_genbank,
skip_control=args.skip_control,
seedGenome=seedGenome,
hard=False, logger=logger)
logger.info("Done with riboSeed: %s", time.asctime())
logger.info("riboSeed Assembly: %s", seedGenome.output_root)
logger.info("Combined Contig Seeds (for validation or alternate " +
"assembly): %s", seedGenome.assembled_seeds)
logger.info("Time taken: %.2fm" % ((time.time() - t0) / 60))
return 0
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selector.ui'
#
# Created: Sat Jul 13 16:18:03 2019
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(334, 252)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.colorTab = QtGui.QWidget()
self.colorTab.setObjectName("colorTab")
self.verticalLayout = QtGui.QVBoxLayout(self.colorTab)
self.verticalLayout.setObjectName("verticalLayout")
self.redCheck = QtGui.QCheckBox(self.colorTab)
self.redCheck.setObjectName("redCheck")
self.verticalLayout.addWidget(self.redCheck)
self.blueCheck = QtGui.QCheckBox(self.colorTab)
self.blueCheck.setObjectName("blueCheck")
self.verticalLayout.addWidget(self.blueCheck)
self.blackCheck = QtGui.QCheckBox(self.colorTab)
self.blackCheck.setObjectName("blackCheck")
self.verticalLayout.addWidget(self.blackCheck)
self.greenCheck = QtGui.QCheckBox(self.colorTab)
self.greenCheck.setObjectName("greenCheck")
self.verticalLayout.addWidget(self.greenCheck)
self.whiteCheck = QtGui.QCheckBox(self.colorTab)
self.whiteCheck.setObjectName("whiteCheck")
self.verticalLayout.addWidget(self.whiteCheck)
self.pinkCheck = QtGui.QCheckBox(self.colorTab)
self.pinkCheck.setObjectName("pinkCheck")
self.verticalLayout.addWidget(self.pinkCheck)
self.updateButton = QtGui.QPushButton(self.colorTab)
self.updateButton.setText("Refresh")
self.updateButton.setObjectName("updateButton")
self.verticalLayout.addWidget(self.updateButton)
self.tabWidget.addTab(self.colorTab, "")
self.settingsTab = QtGui.QWidget()
self.settingsTab.setObjectName("settingsTab")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.settingsTab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtGui.QGroupBox(self.settingsTab)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.pathToFrames = QtGui.QLineEdit(self.groupBox)
self.pathToFrames.setEnabled(True)
self.pathToFrames.setPlaceholderText("")
self.pathToFrames.setObjectName("pathToFrames")
self.verticalLayout_3.addWidget(self.pathToFrames)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(self.settingsTab)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frameName = QtGui.QLineEdit(self.groupBox_2)
self.frameName.setObjectName("frameName")
self.verticalLayout_4.addWidget(self.frameName)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.tabWidget.addTab(self.settingsTab, "")
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Magic Frame Selector", None, QtGui.QApplication.UnicodeUTF8))
self.redCheck.setText(QtGui.QApplication.translate("MainWindow", "Red", None, QtGui.QApplication.UnicodeUTF8))
self.blueCheck.setText(QtGui.QApplication.translate("MainWindow", "Blue", None, QtGui.QApplication.UnicodeUTF8))
self.blackCheck.setText(QtGui.QApplication.translate("MainWindow", "Black", None, QtGui.QApplication.UnicodeUTF8))
self.greenCheck.setText(QtGui.QApplication.translate("MainWindow", "Green", None, QtGui.QApplication.UnicodeUTF8))
self.whiteCheck.setText(QtGui.QApplication.translate("MainWindow", "White", None, QtGui.QApplication.UnicodeUTF8))
self.pinkCheck.setText(QtGui.QApplication.translate("MainWindow", "Pink", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.colorTab), QtGui.QApplication.translate("MainWindow", "Color Selector", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Filepath to Frame Images", None, QtGui.QApplication.UnicodeUTF8))
self.pathToFrames.setText(QtGui.QApplication.translate("MainWindow", "/home/jeff/Dropbox/Jeff/StreamStuff/FrameSelector/Frames", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MainWindow", "Current Frame Name", None, QtGui.QApplication.UnicodeUTF8))
self.frameName.setText(QtGui.QApplication.translate("MainWindow", "CurrentFrame.png", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.settingsTab), QtGui.QApplication.translate("MainWindow", "Settings", None, QtGui.QApplication.UnicodeUTF8))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.