content
stringlengths 5
1.05M
|
---|
from .cmd import getNamespaces
from .permissions import isForbiddenAllNamespace
def list():
'''Return list of tuples of namespaces: [(value,label),(value,label),...]'''
if isForbiddenAllNamespace() == True:
namespaces = []
else:
namespaces = [("all-namespaces","All namespaces")]
allNamespaces = getNamespaces()
for ns in allNamespaces:
namespaces.append((ns,ns))
return namespaces |
__all__ = ['sslaction', 'sslcert', 'sslcertchain', 'sslcertchain_binding', 'sslcertchain_sslcertkey_binding', 'sslcertfile', 'sslcertkey', 'sslcertkey_binding', 'sslcertkey_crldistribution_binding', 'sslcertkey_service_binding', 'sslcertkey_sslocspresponder_binding', 'sslcertkey_sslvserver_binding', 'sslcertlink', 'sslcertreq', 'sslcipher', 'sslcipher_binding', 'sslcipher_individualcipher_binding', 'sslcipher_service_binding', 'sslcipher_servicegroup_binding', 'sslcipher_sslciphersuite_binding', 'sslcipher_sslprofile_binding', 'sslcipher_sslvserver_binding', 'sslciphersuite', 'sslcrl', 'sslcrl_binding', 'sslcrl_serialnumber_binding', 'sslcrlfile', 'ssldhfile', 'ssldhparam', 'ssldsakey', 'ssldtlsprofile', 'sslfips', 'sslfipskey', 'sslfipssimsource', 'sslfipssimtarget', 'sslglobal_binding', 'sslglobal_sslpolicy_binding', 'sslhsmkey', 'sslkeyfile', 'sslocspresponder', 'sslparameter', 'sslpkcs12', 'sslpkcs8', 'sslpolicy', 'sslpolicy_binding', 'sslpolicy_csvserver_binding', 'sslpolicy_lbvserver_binding', 'sslpolicy_sslglobal_binding', 'sslpolicy_sslpolicylabel_binding', 'sslpolicy_sslservice_binding', 'sslpolicy_sslvserver_binding', 'sslpolicylabel', 'sslpolicylabel_binding', 'sslpolicylabel_sslpolicy_binding', 'sslprofile', 'sslprofile_binding', 'sslprofile_ecccurve_binding', 'sslprofile_sslcipher_binding', 'sslprofile_sslciphersuite_binding', 'sslprofile_sslvserver_binding', 'sslrsakey', 'sslservice', 'sslservice_binding', 'sslservice_ecccurve_binding', 'sslservice_sslcertkey_binding', 'sslservice_sslcipher_binding', 'sslservice_sslciphersuite_binding', 'sslservice_sslpolicy_binding', 'sslservicegroup', 'sslservicegroup_binding', 'sslservicegroup_ecccurve_binding', 'sslservicegroup_sslcertkey_binding', 'sslservicegroup_sslcipher_binding', 'sslservicegroup_sslciphersuite_binding', 'sslvserver', 'sslvserver_binding', 'sslvserver_ecccurve_binding', 'sslvserver_sslcertkey_binding', 'sslvserver_sslcipher_binding', 'sslvserver_sslciphersuite_binding', 'sslvserver_sslpolicy_binding', 'sslwrapkey'] |
#!/usr/bin/env python3
from distutils.core import setup, Extension
setup(
name='pybeep',
version='0.1',
ext_modules=[Extension("pybeep", ["_pybeep.c", "pybeep.c"])],
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux"
],
url="https://github.com/jas14/pybeep",
)
|
"""
This program performs two different logistic regression implementations on two
different datasets of the format [float,float,boolean], one
implementation is in this file and one from the sklearn library. The program
then compares the two implementations for how well the can predict the given outcome
for each input tuple in the datasets.
@author Per Harald Borgen
"""
import math
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from numpy import loadtxt, where
from pylab import scatter, show, legend, xlabel, ylabel
# scale larger positive and values to between -1,1 depending on the largest
# value in the data
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-1,1))
df = pd.read_csv("data.csv", header=0)
# clean up data
df.columns = ["grade1","grade2","label"]
x = df["label"].map(lambda x: float(x.rstrip(';')))
# formats the input data into two arrays, one of independant variables
# and one of the dependant variable
X = df[["grade1","grade2"]]
X = np.array(X)
X = min_max_scaler.fit_transform(X)
Y = df["label"].map(lambda x: float(x.rstrip(';')))
Y = np.array(Y)
# if want to create a new clean dataset
##X = pd.DataFrame.from_records(X,columns=['grade1','grade2'])
##X.insert(2,'label',Y)
##X.to_csv('data2.csv')
# creating testing and training set
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.33)
# train scikit learn model
clf = LogisticRegression()
clf.fit(X_train,Y_train)
print 'score Scikit learn: ', clf.score(X_test,Y_test)
# visualize data, uncomment "show()" to run it
pos = where(Y == 1)
neg = where(Y == 0)
scatter(X[pos, 0], X[pos, 1], marker='o', c='b')
scatter(X[neg, 0], X[neg, 1], marker='x', c='r')
xlabel('Exam 1 score')
ylabel('Exam 2 score')
legend(['Not Admitted', 'Admitted'])
show()
##The sigmoid function adjusts the cost function hypotheses to adjust the algorithm proportionally for worse estimations
def Sigmoid(z):
G_of_Z = float(1.0 / float((1.0 + math.exp(-1.0*z))))
return G_of_Z
##The hypothesis is the linear combination of all the known factors x[i] and their current estimated coefficients theta[i]
##This hypothesis will be used to calculate each instance of the Cost Function
def Hypothesis(theta, x):
z = 0
for i in xrange(len(theta)):
z += x[i]*theta[i]
return Sigmoid(z)
##For each member of the dataset, the result (Y) determines which variation of the cost function is used
##The Y = 0 cost function punishes high probability estimations, and the Y = 1 it punishes low scores
##The "punishment" makes the change in the gradient of ThetaCurrent - Average(CostFunction(Dataset)) greater
def Cost_Function(X,Y,theta,m):
sumOfErrors = 0
for i in xrange(m):
xi = X[i]
hi = Hypothesis(theta,xi)
if Y[i] == 1:
error = Y[i] * math.log(hi)
elif Y[i] == 0:
error = (1-Y[i]) * math.log(1-hi)
sumOfErrors += error
const = -1/m
J = const * sumOfErrors
print 'cost is ', J
return J
##This function creates the gradient component for each Theta value
##The gradient is the partial derivative by Theta of the current value of theta minus
##a "learning speed factor aplha" times the average of all the cost functions for that theta
##For each Theta there is a cost function calculated for each member of the dataset
def Cost_Function_Derivative(X,Y,theta,j,m,alpha):
sumErrors = 0
for i in xrange(m):
xi = X[i]
xij = xi[j]
hi = Hypothesis(theta,X[i])
error = (hi - Y[i])*xij
sumErrors += error
m = len(Y)
constant = float(alpha)/float(m)
J = constant * sumErrors
return J
##For each theta, the partial differential
##The gradient, or vector from the current point in Theta-space (each theta value is its own dimension) to the more accurate point,
##is the vector with each dimensional component being the partial differential for each theta value
def Gradient_Descent(X,Y,theta,m,alpha):
new_theta = []
constant = alpha/m
for j in xrange(len(theta)):
CFDerivative = Cost_Function_Derivative(X,Y,theta,j,m,alpha)
new_theta_value = theta[j] - CFDerivative
new_theta.append(new_theta_value)
return new_theta
##The high level function for the LR algorithm which, for a number of steps (num_iters) finds gradients which take
##the Theta values (coefficients of known factors) from an estimation closer (new_theta) to their "optimum estimation" which is the
##set of values best representing the system in a linear combination model
def Logistic_Regression(X,Y,alpha,theta,num_iters):
m = len(Y)
for x in xrange(num_iters):
new_theta = Gradient_Descent(X,Y,theta,m,alpha)
theta = new_theta
if x % 100 == 0:
#here the cost function is used to present the final hypothesis of the model in the same form for each gradient-step iteration
Cost_Function(X,Y,theta,m)
print 'theta ', theta
print 'cost is ', Cost_Function(X,Y,theta,m)
Declare_Winner(theta)
##This method compares the accuracy of the model generated by the scikit library with the model generated by this implementation
def Declare_Winner(theta):
score = 0
winner = ""
#first scikit LR is tested for each independent var in the dataset and its prediction is compared against the dependent var
#if the prediction is the same as the dataset measured value it counts as a point for thie scikit version of LR
scikit_score = clf.score(X_test,Y_test)
length = len(X_test)
for i in xrange(length):
prediction = round(Hypothesis(X_test[i],theta))
answer = Y_test[i]
if prediction == answer:
score += 1
#the same process is repeated for the implementation from this module and the scores compared to find the higher match-rate
my_score = float(score) / float(length)
if my_score > scikit_score:
print 'You won!'
elif my_score == scikit_score:
print 'Its a tie!'
else:
print 'Scikit won.. :('
print 'Your score: ', my_score
print 'Scikits score: ', scikit_score
# These are the initial guesses for theta as well as the learning rate of the algorithm
# A learning rate too low will not close in on the most accurate values within a reasonable number of iterations
# An alpha too high might overshoot the accurate values or cause irratic guesses
# Each iteration increases model accuracy but with diminishing returns,
# and takes a signficicant coefficient times O(n)*|Theta|, n = dataset length
initial_theta = [0,0]
alpha = 0.1
iterations = 1000
Logistic_Regression(X,Y,alpha,initial_theta,iterations)
|
import os.path as osp
import numpy as np
import pickle
import random
from pathlib import Path
from functools import reduce
from typing import Tuple, List
from tqdm import tqdm
from pyquaternion import Quaternion
from nuscenes import NuScenes
from nuscenes.utils import splits
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import transform_matrix
from nuscenes.utils.data_classes import Box
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
general_to_detection = {
"human.pedestrian.adult": "pedestrian",
"human.pedestrian.child": "pedestrian",
"human.pedestrian.wheelchair": "ignore",
"human.pedestrian.stroller": "ignore",
"human.pedestrian.personal_mobility": "ignore",
"human.pedestrian.police_officer": "pedestrian",
"human.pedestrian.construction_worker": "pedestrian",
"animal": "ignore",
"vehicle.car": "car",
"vehicle.motorcycle": "motorcycle",
"vehicle.bicycle": "bicycle",
"vehicle.bus.bendy": "bus",
"vehicle.bus.rigid": "bus",
"vehicle.truck": "truck",
"vehicle.construction": "construction_vehicle",
"vehicle.emergency.ambulance": "ignore",
"vehicle.emergency.police": "ignore",
"vehicle.trailer": "trailer",
"movable_object.barrier": "barrier",
"movable_object.trafficcone": "traffic_cone",
"movable_object.pushable_pullable": "ignore",
"movable_object.debris": "ignore",
"static_object.bicycle_rack": "ignore",
}
cls_attr_dist = {
"barrier": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bicycle": {
"cycle.with_rider": 2791,
"cycle.without_rider": 8946,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bus": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 9092,
"vehicle.parked": 3294,
"vehicle.stopped": 3881,
},
"car": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 114304,
"vehicle.parked": 330133,
"vehicle.stopped": 46898,
},
"construction_vehicle": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 882,
"vehicle.parked": 11549,
"vehicle.stopped": 2102,
},
"ignore": {
"cycle.with_rider": 307,
"cycle.without_rider": 73,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 165,
"vehicle.parked": 400,
"vehicle.stopped": 102,
},
"motorcycle": {
"cycle.with_rider": 4233,
"cycle.without_rider": 8326,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"pedestrian": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 157444,
"pedestrian.sitting_lying_down": 13939,
"pedestrian.standing": 46530,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"traffic_cone": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"trailer": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 3421,
"vehicle.parked": 19224,
"vehicle.stopped": 1895,
},
"truck": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 21339,
"vehicle.parked": 55626,
"vehicle.stopped": 11097,
},
}
def box_velocity(
nusc, sample_annotation_token: str, max_time_diff: float = 1.5
) -> np.ndarray:
"""
Estimate the velocity for an annotation.
If possible, we compute the centered difference between the previous and next frame.
Otherwise we use the difference between the current and previous/next frame.
If the velocity cannot be estimated, values are set to np.nan.
:param sample_annotation_token: Unique sample_annotation identifier.
:param max_time_diff: Max allowed time diff between consecutive samples that are used to estimate velocities.
:return: <np.float: 3>. Velocity in x/y/z direction in m/s.
"""
current = nusc.get("sample_annotation", sample_annotation_token)
has_prev = current["prev"] != ""
has_next = current["next"] != ""
# Cannot estimate velocity for a single annotation.
if not has_prev and not has_next:
return np.array([np.nan, np.nan, np.nan])
if has_prev:
first = nusc.get("sample_annotation", current["prev"])
else:
first = current
if has_next:
last = nusc.get("sample_annotation", current["next"])
else:
last = current
pos_last = np.array(last["translation"])
pos_first = np.array(first["translation"])
pos_diff = pos_last - pos_first
time_last = 1e-6 * nusc.get("sample", last["sample_token"])["timestamp"]
time_first = 1e-6 * nusc.get("sample", first["sample_token"])["timestamp"]
time_diff = time_last - time_first
if has_next and has_prev:
# If doing centered difference, allow for up to double the max_time_diff.
max_time_diff *= 2
if time_diff > max_time_diff:
# If time_diff is too big, don't return an estimate.
return np.array([np.nan, np.nan, np.nan])
else:
return pos_diff / time_diff
def remove_close(points, radius: float) -> None:
"""
Removes point too close within a certain radius from origin.
:param radius: Radius below which points are removed.
"""
x_filt = np.abs(points[0, :]) < radius
y_filt = np.abs(points[1, :]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
points = points[:, not_close]
return points
def _second_det_to_nusc_box(detection):
box3d = detection["box3d_lidar"].detach().cpu().numpy()
scores = detection["scores"].detach().cpu().numpy()
labels = detection["label_preds"].detach().cpu().numpy()
box3d[:, -1] = -box3d[:, -1] - np.pi / 2
box_list = []
for i in range(box3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=box3d[i, -1])
velocity = (*box3d[i, 6:8], 0.0)
box = Box(
box3d[i, :3],
box3d[i, 3:6],
quat,
label=labels[i],
score=scores[i],
velocity=velocity,
)
box_list.append(box)
return box_list
def _lidar_nusc_box_to_global(nusc, boxes, sample_token):
try:
s_record = nusc.get("sample", sample_token)
sample_data_token = s_record["data"]["LIDAR_TOP"]
except:
sample_data_token = sample_token
sd_record = nusc.get("sample_data", sample_data_token)
cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
sensor_record = nusc.get("sensor", cs_record["sensor_token"])
pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"])
data_path = nusc.get_sample_data_path(sample_data_token)
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record["rotation"]))
box.translate(np.array(cs_record["translation"]))
# Move box to global coord system
box.rotate(Quaternion(pose_record["rotation"]))
box.translate(np.array(pose_record["translation"]))
box_list.append(box)
return box_list
def _get_available_scenes(nusc):
available_scenes = []
print("total scene num:", len(nusc.scene))
for scene in nusc.scene:
scene_token = scene["token"]
scene_rec = nusc.get("scene", scene_token)
sample_rec = nusc.get("sample", scene_rec["first_sample_token"])
sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
if not sd_rec["next"] == "":
sd_rec = nusc.get("sample_data", sd_rec["next"])
else:
has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print("exist scene num:", len(available_scenes))
return available_scenes
def get_sample_data(
nusc, sample_data_token: str, selected_anntokens: List[str] = None
) -> Tuple[str, List[Box], np.array]:
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor's coordinate frame.
:param sample_data_token: Sample_data token.
:param selected_anntokens: If provided only return the selected annotation.
:return: (data_path, boxes, camera_intrinsic <np.array: 3, 3>)
"""
# Retrieve sensor & pose records
sd_record = nusc.get("sample_data", sample_data_token)
cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
sensor_record = nusc.get("sensor", cs_record["sensor_token"])
pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record["modality"] == "camera":
cam_intrinsic = np.array(cs_record["camera_intrinsic"])
imsize = (sd_record["width"], sd_record["height"])
else:
cam_intrinsic = None
imsize = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record["translation"]))
box.rotate(Quaternion(pose_record["rotation"]).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record["translation"]))
box.rotate(Quaternion(cs_record["rotation"]).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def get_sample_ground_plane(root_path, version):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
rets = {}
for sample in tqdm(nusc.sample):
chan = "LIDAR_TOP"
sd_token = sample["data"][chan]
sd_rec = nusc.get("sample_data", sd_token)
lidar_path, _, _ = get_sample_data(nusc, sd_token)
points = read_file(lidar_path)
points = np.concatenate((points[:, :3], np.ones((points.shape[0], 1))), axis=1)
plane, inliers, outliers = fit_plane_LSE_RANSAC(
points, return_outlier_list=True
)
xx = points[:, 0]
yy = points[:, 1]
zz = (-plane[0] * xx - plane[1] * yy - plane[3]) / plane[2]
rets.update({sd_token: {"plane": plane, "height": zz,}})
with open(nusc.root_path / "infos_trainval_ground_plane.pkl", "wb") as f:
pickle.dump(rets, f)
def _fill_trainval_infos(nusc, train_scenes, val_scenes, test=False, nsweeps=10):
from nuscenes.utils.geometry_utils import transform_matrix
train_nusc_infos = []
val_nusc_infos = []
ref_chan = "LIDAR_TOP" # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = "LIDAR_TOP" # The reference channel of the current sample_rec that the point clouds are mapped to.
for sample in tqdm(nusc.sample):
""" Manual save info["sweeps"] """
# Get reference pose and timestamp
# ref_chan == "LIDAR_TOP"
ref_sd_token = sample["data"][ref_chan]
ref_sd_rec = nusc.get("sample_data", ref_sd_token)
ref_cs_rec = nusc.get(
"calibrated_sensor", ref_sd_rec["calibrated_sensor_token"]
)
ref_pose_rec = nusc.get("ego_pose", ref_sd_rec["ego_pose_token"])
ref_time = 1e-6 * ref_sd_rec["timestamp"]
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample["data"]["CAM_FRONT"]
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec["translation"],
Quaternion(ref_pose_rec["rotation"]),
inverse=True,
)
info = {
"lidar_path": ref_lidar_path,
"cam_front_path": ref_cam_path,
"cam_intrinsic": ref_cam_intrinsic,
"token": sample["token"],
"sweeps": [],
"ref_from_car": ref_from_car,
"car_from_global": car_from_global,
"timestamp": ref_time,
}
sample_data_token = sample["data"][chan]
curr_sd_rec = nusc.get("sample_data", sample_data_token)
sweeps = []
while len(sweeps) < nsweeps - 1:
if curr_sd_rec["prev"] == "":
if len(sweeps) == 0:
sweep = {
"lidar_path": ref_lidar_path,
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": None,
"time_lag": curr_sd_rec["timestamp"] * 0,
# time_lag: 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get("sample_data", curr_sd_rec["prev"])
# Get past pose
current_pose_rec = nusc.get("ego_pose", curr_sd_rec["ego_pose_token"])
global_from_car = transform_matrix(
current_pose_rec["translation"],
Quaternion(current_pose_rec["rotation"]),
inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
"calibrated_sensor", curr_sd_rec["calibrated_sensor_token"]
)
car_from_current = transform_matrix(
current_cs_rec["translation"],
Quaternion(current_cs_rec["rotation"]),
inverse=False,
)
tm = reduce(
np.dot,
[ref_from_car, car_from_global, global_from_car, car_from_current],
)
lidar_path = nusc.get_sample_data_path(curr_sd_rec["token"])
time_lag = ref_time - 1e-6 * curr_sd_rec["timestamp"]
sweep = {
"lidar_path": lidar_path,
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": tm,
"global_from_car": global_from_car,
"car_from_current": car_from_current,
"time_lag": time_lag,
}
sweeps.append(sweep)
info["sweeps"] = sweeps
assert (
len(info["sweeps"]) == nsweeps - 1
), f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, you should duplicate to sweep num {nsweeps-1}"
""" read from api """
# sd_record = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
#
# # Get boxes in lidar frame.
# lidar_path, boxes, cam_intrinsic = nusc.get_sample_data(
# sample['data']['LIDAR_TOP'])
#
# # Get aggregated point cloud in lidar frame.
# sample_rec = nusc.get('sample', sd_record['sample_token'])
# chan = sd_record['channel']
# ref_chan = 'LIDAR_TOP'
# pc, times = LidarPointCloud.from_file_multisweep(nusc,
# sample_rec,
# chan,
# ref_chan,
# nsweeps=nsweeps)
# lidar_path = osp.join(nusc.dataroot, "sample_10sweeps/LIDAR_TOP",
# sample['data']['LIDAR_TOP'] + ".bin")
# pc.points.astype('float32').tofile(open(lidar_path, "wb"))
#
# info = {
# "lidar_path": lidar_path,
# "token": sample["token"],
# # "timestamp": times,
# }
if not test:
annotations = [
nusc.get("sample_annotation", token) for token in sample["anns"]
]
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)
# rots = np.array([b.orientation.yaw_pitch_roll[0] for b in ref_boxes]).reshape(-1, 1)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(
-1, 1
)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate(
[locs, dims, velocity[:, :2], -rots - np.pi / 2], axis=1
)
# gt_boxes = np.concatenate([locs, dims, rots], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info["gt_boxes"] = gt_boxes
info["gt_boxes_velocity"] = velocity
info["gt_names"] = np.array([general_to_detection[name] for name in names])
info["gt_boxes_token"] = tokens
if sample["scene_token"] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
return train_nusc_infos, val_nusc_infos
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def create_nuscenes_infos_test(root_path, version="v1.0-trainval", nsweeps=10):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
assert version in available_vers
if version == "v1.0-trainval":
train_scenes = splits.train
# random.shuffle(train_scenes)
# train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
val_scenes = splits.val
elif version == "v1.0-test":
train_scenes = splits.test
val_scenes = []
elif version == "v1.0-mini":
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise ValueError("unknown")
test = "test" in version
root_path = Path(root_path)
# filter exist scenes. you may only download part of dataset.
available_scenes = _get_available_scenes(nusc)
available_scene_names = [s["name"] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set(
[
available_scenes[available_scene_names.index(s)]["token"]
for s in train_scenes
]
)
val_scenes = set(
[available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes]
)
if test:
print(f"test scene: {len(train_scenes)}")
else:
print(f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
nusc, train_scenes, val_scenes, test, nsweeps=nsweeps
)
if test:
print(f"test sample: {len(train_nusc_infos)}")
with open(
root_path / "infos_test_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
else:
print(
f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
)
with open(
root_path / "infos_train_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
with open(
root_path / "infos_val_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(val_nusc_infos, f)
def create_nuscenes_infos(root_path, version="v1.0-trainval", nsweeps=10):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
assert version in available_vers
if version == "v1.0-trainval":
train_scenes = splits.train
# random.shuffle(train_scenes)
# train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
val_scenes = splits.val
elif version == "v1.0-test":
train_scenes = splits.test
val_scenes = []
elif version == "v1.0-mini":
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise ValueError("unknown")
test = "test" in version
root_path = Path(root_path)
# filter exist scenes. you may only download part of dataset.
available_scenes = _get_available_scenes(nusc)
available_scene_names = [s["name"] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set(
[
available_scenes[available_scene_names.index(s)]["token"]
for s in train_scenes
]
)
val_scenes = set(
[available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes]
)
if test:
print(f"test scene: {len(train_scenes)}")
else:
print(f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
nusc, train_scenes, val_scenes, test, nsweeps=nsweeps
)
if test:
print(f"test sample: {len(train_nusc_infos)}")
with open(
root_path / "infos_test_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
else:
print(
f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
)
with open(
root_path / "infos_train_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
with open(
root_path / "infos_val_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(val_nusc_infos, f)
def get_box_mean(info_path, class_name="vehicle.car"):
with open(info_path, "rb") as f:
nusc_infos = pickle.load(f)
gt_boxes_list = []
for info in nusc_infos:
mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_)
gt_boxes_list.append(info["gt_boxes"][mask].reshape(-1, 7))
gt_boxes_list = np.concatenate(gt_boxes_list, axis=0)
print(gt_boxes_list.mean(0))
def eval_main(nusc, eval_version, res_path, eval_set, output_dir):
# nusc = NuScenes(version=version, dataroot=str(root_path), verbose=True)
cfg = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=cfg,
result_path=res_path,
eval_set=eval_set,
output_dir=output_dir,
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=10,)
|
# -*- coding: utf-8 -*-
"""Parser for Windows NT shell items."""
import pyfwsi
from plaso import dependencies
from plaso.events import shell_item_events
from plaso.lib import eventdata
from plaso.winnt import shell_folder_ids
dependencies.CheckModuleVersion(u'pyfwsi')
class ShellItemsParser(object):
"""Parses for Windows NT shell items."""
NAME = u'shell_items'
def __init__(self, origin):
"""Initializes the parser.
Args:
origin: A string containing the origin of the event (event source).
"""
super(ShellItemsParser, self).__init__()
self._origin = origin
self._path_segments = []
def _ParseShellItem(self, parser_mediator, shell_item):
"""Parses a shell item.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
shell_item: the shell item (instance of pyfwsi.item).
"""
path_segment = self._ParseShellItemPathSegment(shell_item)
self._path_segments.append(path_segment)
shell_item_path = self.CopyToPath()
if isinstance(shell_item, pyfwsi.file_entry):
long_name = u''
localized_name = u''
file_reference = u''
for extension_block in shell_item.extension_blocks:
if isinstance(extension_block, pyfwsi.file_entry_extension):
long_name = extension_block.long_name
localized_name = extension_block.localized_name
file_reference = extension_block.file_reference
if file_reference:
file_reference = u'{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
fat_date_time = extension_block.get_creation_time_as_integer()
if fat_date_time:
event_object = shell_item_events.ShellItemFileEntryEvent(
fat_date_time, eventdata.EventTimestamp.CREATION_TIME,
shell_item.name, long_name, localized_name, file_reference,
shell_item_path, self._origin)
parser_mediator.ProduceEvent(event_object)
fat_date_time = extension_block.get_access_time_as_integer()
if fat_date_time:
event_object = shell_item_events.ShellItemFileEntryEvent(
fat_date_time, eventdata.EventTimestamp.ACCESS_TIME,
shell_item.name, long_name, localized_name, file_reference,
shell_item_path, self._origin)
parser_mediator.ProduceEvent(event_object)
fat_date_time = shell_item.get_modification_time_as_integer()
if fat_date_time:
event_object = shell_item_events.ShellItemFileEntryEvent(
fat_date_time, eventdata.EventTimestamp.MODIFICATION_TIME,
shell_item.name, long_name, localized_name, file_reference,
shell_item_path, self._origin)
parser_mediator.ProduceEvent(event_object)
def _ParseShellItemPathSegment(self, shell_item):
"""Parses a shell item path segment.
Args:
shell_item: the shell item (instance of pyfwsi.item).
Returns:
A string containing the shell item path segment.
"""
path_segment = None
if isinstance(shell_item, pyfwsi.root_folder):
description = shell_folder_ids.DESCRIPTIONS.get(
shell_item.shell_folder_identifier, None)
if description:
path_segment = description
else:
path_segment = u'{{{0:s}}}'.format(shell_item.shell_folder_identifier)
path_segment = u'<{0:s}>'.format(path_segment)
elif isinstance(shell_item, pyfwsi.volume):
if shell_item.name:
path_segment = shell_item.name
elif shell_item.identifier:
path_segment = u'{{{0:s}}}'.format(shell_item.identifier)
elif isinstance(shell_item, pyfwsi.file_entry):
long_name = u''
for extension_block in shell_item.extension_blocks:
if isinstance(extension_block, pyfwsi.file_entry_extension):
long_name = extension_block.long_name
if long_name:
path_segment = long_name
elif shell_item.name:
path_segment = shell_item.name
elif isinstance(shell_item, pyfwsi.network_location):
if shell_item.location:
path_segment = shell_item.location
if path_segment is None and shell_item.class_type == 0x00:
# TODO: check for signature 0x23febbee
pass
if path_segment is None:
path_segment = u'<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type)
return path_segment
def CopyToPath(self):
"""Copies the shell items to a path.
Returns:
A Unicode string containing the converted shell item list path or None.
"""
number_of_path_segments = len(self._path_segments)
if number_of_path_segments == 0:
return
strings = [self._path_segments[0]]
number_of_path_segments -= 1
for path_segment in self._path_segments[1:]:
# Remove a trailing \ except for the last path segment.
if path_segment.endswith(u'\\') and number_of_path_segments > 1:
path_segment = path_segment[:-1]
if ((path_segment.startswith(u'<') and path_segment.endswith(u'>')) or
len(strings) == 1):
strings.append(u' {0:s}'.format(path_segment))
elif path_segment.startswith(u'\\'):
strings.append(u'{0:s}'.format(path_segment))
else:
strings.append(u'\\{0:s}'.format(path_segment))
number_of_path_segments -= 1
return u''.join(strings)
def GetUpperPathSegment(self):
"""Retrieves the upper shell item path segment.
Returns:
A Unicode string containing the shell item path segment or 'N/A'.
"""
if not self._path_segments:
return u'N/A'
return self._path_segments[-1]
def Parse(
self, parser_mediator, byte_stream, parent_path_segments,
codepage=u'cp1252'):
"""Parses the shell items from the byte stream.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
byte_stream: a string holding the shell items data.
parent_path_segments: list containing the parent shell item path segments.
codepage: Optional byte stream codepage. The default is cp1252.
"""
if parent_path_segments and isinstance(parent_path_segments, list):
self._path_segments = list(parent_path_segments)
else:
self._path_segments = []
shell_item_list = pyfwsi.item_list()
shell_item_list.copy_from_byte_stream(byte_stream, ascii_codepage=codepage)
for shell_item in shell_item_list.items:
self._ParseShellItem(parser_mediator, shell_item)
def UpdateChainAndParse(
self, parser_mediator, byte_stream, parent_path_segments,
codepage=u'cp1252'):
"""Wrapper for Parse() to synchronize the parser chain.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
byte_stream: a string holding the shell items data.
parent_path_segments: list containing the parent shell item path segments.
codepage: Optional byte stream codepage. The default is cp1252.
"""
parser_mediator.AppendToParserChain(self)
try:
self.Parse(
parser_mediator, byte_stream, parent_path_segments,
codepage=codepage)
finally:
parser_mediator.PopFromParserChain()
|
"""Helper module
This module provides some helper checkers
that make sure the configuration is complete
before using any of the functionalities
"""
from CAM2RequestsCLI import host, port
from functools import wraps
from click import UsageError
def requires_url(f):
'''Asserts the configuration of the URL'''
@wraps(f)
def decorated(*args, **kwargs):
if host != '' and port != '':
return f(*args, **kwargs)
else:
raise UsageError('CAM2RESTfulAPI URL is not set. Use the `url` command to set it')
return decorated
|
from pyleaves.leavesdb.db_manager import create_db, build_db_from_json
import argparse
import os
from os.path import join, abspath
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--json_path', default=r'pyleaves/leavesdb/resources/full_dataset_frozen.json', type=str,help='Db json file')
parser.add_argument('--output_folder', default=r'pyleaves/leavesdb/resources' ,type=str,help='Folder Where to save the output')
args = parser.parse_args()
json_path = abspath(args.json_path)
db_dir = abspath(args.output_folder)
print('Creating folder to save db')
os.makedirs(db_dir,exist_ok=True)
db_path = join(db_dir,'leavesdb.db')
build_db_from_json(frozen_json_filepaths=[json_path],
db_path=db_path) |
# !/usr/bin/env Python3
# -*- coding: utf-8 -*-
# @Author : weiz
# @FILE : testImgTemplate.py
# @Time : 2020/6/19 14:39
# @Description : ocr服务测试程序,三种访问方法,如下所示
import requests
import json
import numpy as np
def test_1():
#url = 'http://192.168.0.99:80/ocr_service'
url = 'http://192.168.1.43:8888/ocr_service'
# data = json.dumps({'base64Image': '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wgARCAEWAgQDASIAAhEBAxEB/8QAGQABAAMBAQAAAAAAAAAAAAAAAAMEBQIG/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/aAAwDAQACEAMQAAAB9+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAj5JkcJaZdguMsajN5NQhJmbyaijGaQAAAAAAAAAAAAAAAAAAAAAAAAMWvrdGXDrQmPatCjR9dTIedDkr1dIV8/YolPL9NRPRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKMZpM2M1lOM0FWQmV65oI6peR8kznkkR8kyPkmU4zQR8kyrGXkMwAAAAAAAAAAAAAAAAAAB4+H1kJhw7Az+tIZ/WlMZMetGR0daM5r6XRl863Jm09bk5r63J1n61cpr3JTz/SRlfQq2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/8QAIxAAAgICAgMAAgMAAAAAAAAAAgMBBBITBRQAEVAVkCEzgP/aAAgBAQABBQL9KOY7AZBlmMMW8GA7kKyD7VfNt9KJ76tjL9VZ/kaezxTIclt+qrweRplJW1CY3kF9LLN0KwLkEwwUxMs5HPZm8blwyltRk+n2RhgPCLDxJYVEM6zst9Yme2gw2VshrfRJZi1apgjBkNFDUyXHpbKqhgMriXVqukJXJtUs4ZrNjDWYn1w7c1H9XTZFw8e0B/3825XT5HIUyG1dXUKvfVZc6ytBd1M+TZQLTZAEThFncV7zHYm2l5ZjsBkGUz6EDFi8x2AyDLMdimQ5J2QWYXFMkzFcCyCNllCSC0tjVsho/G5CGZVhbDuVYcLo2MnWYZu/gJbLNppUT3TMBHvSzsdlUmzwt/bQNrdeqo0hXT2CMSdXH024BiCvaGMZOVNkhVfLllEuOw5sSyi2ArU/6PjaIjwa/tdmhDSRTaq0+rLmRUxIqSzaxZm/rQXhVPUWEbGChqZYjZYUuQZK5N0LObD67iLTaEoXMtJZm8QEPEJ11JXJuUs4YeeCF6a6Fypf7Zf/xAAUEQEAAAAAAAAAAAAAAAAAAACQ/9oACAEDAQE/ARS//8QAFBEBAAAAAAAAAAAAAAAAAAAAkP/aAAgBAgEBPwEUv//EADwQAAIBAwEEBgcGBAcAAAAAAAECEQADEiETIjFRBDJBYZHwFEJxgaHB0SNQUmKx4TOQsvEQNENyc4CS/9oACAEBAAY/Av5KOE70TVxRO42J8AfnQSd4gkeffVptRtVyWauI9wBra5RI17vb9aCC8hY8FBk1Di6NYnZNHnSimN+QAf4LfSgjX7c5YnfG77aKekWtADOYj/BLqzDqGE1vX7chsSMxI1imHpFoYmNXGtMn2hK8cbTH9BSkbSGiDsmjXhrH3let2Liu4nf47GY0+BPup+jopWbn2Tfgi2N75e+iEVmv3FxgXCsgefEiujnE43N4EdKuNpEzB7OHjXSsdrHownDGPX4z8qui09sC5fx3kn/TB591SWW0A0Av0lreY7dK2lgI+6MLA6WxxHsjvpJV1VW6+yJ14QNPj4d111FxgYym22S8tO0cfPB7p6VfA/CoTwGlKnpd5Tb3CoCaR7qXPqZLiB609kd0BprojbZ2a8A9xcFjq8eHs+FdI2S3Cdt6rR6i/mWrC3Bc3tkUk6Rmv5jzHYPvK5eSGZlVcSY4E9vvrO4we5wkCAB3VnbCsx0lz1B3Ci9oozN1wwifZy49/wCpq4btm1LHTFBp3+3z7bBOGa3M3xEDqlYHw8KW7rKqV8Y+lWsiS6WwnHTsmPAVLxgvVXv5nz+zXbhGbALu8IE/Wpuxip3VB495o3LMZHrKTAb96HSAAGxKtp1uH0o9GW/bFrDZibWscOdXmS7aVXbKChPYBzHKrYFyyccNdmQSFI7+7/v+we9bDj1MwDQPpVnXm4pA4O/Mbyjh7SOdbJQZxy6yn9CaAedROgntA+YpevDGAdm3GY5aa1sjeti5+HLWramd9sR4E/KsDM45cJnn8qAO1EmNbLD5VhO9E0AhbVchKESPfWE70TVxRO42J8AfnRME9wpXQyrCRWE70TVxRO42J8AfnWE70TSXVmHUMJoqVuz3WmPyoBdqdY/gt9KljAkDx0p01lOfnzFY3b1tDyZooIguGfX2Zx8ayE9Yr4GPufFLboMs5njBktOfD2/CnD2rlxLm9GXWXTnc+vxo3MHtFAwV8gJ9kOOVZqjXjhpF0EwY7DcMV0mSZOwxCxpvnnVu03pUBldh9nxL6T7+VXwFXZ+lWpOWvqdkVZAN+ReKtN5vwE8/ZXS4y/zFsbpg+pV0EXBHSrcLcbIjqd5rANalrbFDhBXVe2uiqqCxuEApcllXSQZXnAoJlZ3kYo2z6uq99dJi9Z/ia/ZH8K/mrMdGsm4bicV47woWrvQOirkpYFdeEfl76uW0DdIjjs3hrXdM8wavOOjdLP2mn23cPza095L7q8YoIXiezhzirFpL90qtzZFHC/gJHAeytnbg3Dz4KOZro+0jF0Uh/wAx5+09vki9gj72KqXjGTE8OOtPuIl5FU6PIcGdDp3Vpd6UkaQliR/TXRybvSiBbG5sNOH+2m/5bn9Z+53wZkZ+LzJ+NFbsTllNslff3eedI9shbgeS7EzEHTjPuoXWuhxgV9bu5k8qfqlW2eh/KxJraWxbRl6iAbseT7vGWuFrkl1frmNI7PdVppGCHLvmCPn8Kvi5qty4HEGOAH0oJbMKbgd8iWOkfSKt3FALpoMjoO+O3gKL2ijM3XDCJ9nLj3/qat3cyMARA7eH0q+xjffIf+QPlQd4xTqAc+fn+21cjdBVY5Hn4Dzwd7N0IzhVMjsBM/rVvG9ZxTTBbZUY+NbS5GnUHL9/PtVmIwQ5LHGYjz+2pxHEyas2nglFUe8UHeMU6gHPn5/s124RmwC7vCBP1o4Rl31btTOChZoqY67N4sT/ADZv/8QAJxABAAICAgICAgMAAwEAAAAAAREhADFBUWGBcZGhwVCx8BCAkPH/2gAIAQEAAT8h/wDFH0KibPHf6k7MHiVx3oehih0yjYRP9MGbRJ3Ux1MceHrAVVd6U0J6a8O8mhHwaTpguIuerw6IkqiBVmLLan6vBbUjzzxbj/XkvnQbALyrUfKYJCrDOmaGd1+T/g5AQbQk5LkOBkHYTxz8OchoBlBkuy4+RxGD8RkYHaGkwVk8OalFkLk/khg1WIpB2m4XgURgOSexQcHYILujuGODLeEswxygolqnGK/zrAI6L6Z/W0r24+2/GQ6cczG5R8IwlU4tuk7W/A2bGI26fBEMxMwuitQubMryz5huY7DElXAoYZFtIIlY00q8uBgmUa+JLvBlRjrBUqZxpJ4RxBRJIE7tjtdAD0zNL8QIXsHSHdTLZhUFssA5Qm/PORUUqWqWv9B4/koCakLYmj+PGNnE9AAljzd+gI5pv+DQXdtk1eotS9CXK2Jjao+yg7E5SLTZLW2a4htwyMx1zB6BHxLeLBhI4hJ/phs2lSKHs/B5ZnzKEOf0HR73GHV6gjQN8snxRcS6DzJ4XFRfRobtiP2B5lIMDuLKeEioXi8mKvj+2P3WATHlWY8esAeOFvmZJDjLKA7mTB4uDoj/AL/ICzMtJMWmFGISQH6dYgQhUOnORGGmlHgQ628gfzBXooLmcgTm6kRors0vLo4EySdVg8SqO9j0sRDdMUDgFyT9qm8rSGKVKwWw256FRNnjv9SdmSjtX9FiCdn3noVE2eO/1J2YPErjvQ9DGIaEkEvi8iSM8bHWehUTZ47/AFJ2YPErjvQ9DPQqJs8d/qTsw5AQbQk4kYOv+yGHDJbCTMMtC8AzjR5QPymH4DEnISRPGz5XWFE8kGo94InKnB/GGeHX4l6IC12i/J/DqN2yOogGgEQqgZEFY0jkRCZQbCeiDQMICgtxew7Qj8ZFSqoFPTB98RZXciujEA0WpvuNGNF21SeqN2fTJKdhQHrs45w94aalKPIlXDHZmzdiTwhQZIkreOPMsCZe+xXfOO2pkoVEl3O42DDrE9CK6ICKg3M+8ZJMkOQVPOR8aMpoY2aXi4VGT1oICbNLHM385SfMTcIsYlC5WchOI2rdQERGMJd4lKNGY2HvriAY9g3Kgkzh7MLCZXAWEmh75vDgwp/wHg5+0j5DB0Enmi9ljcYd0csDEOyVs8FHKmDfZOuIjaG4n5FQbeWj3K/wxm9nivxJNPf8RFRJakIV6L1utEuR3aYuTwuXuYbleRiGgNIgQQAlKIYTMwpmKv8A+d74fWLLzkq+RxJjMFmymtsr7VOM8DkAlV0ddn6yF26EdA+Px7TUndQaIJZEMzrLGkz1SLV6w8zxCs2RJk0MhsKBXyMOWpehLlbExtUfZSgm1XdVvXXn4kVwgUHW17WLRJntCFelA93WNYcJiwV8qPS8xiZlDdMQRGU/HZMg5Agkikm5RquOlyfKWw6Op8o+tHLilzyLJQfFv2aw1kWaZV/1fAGRDdnJAWezFokz2hCvSge7rDq9QRoG+WT4ouJfoOavjx88dOs8P+ImCJx5UtLoD8P/AKzf/9oADAMBAAIAAwAAABDzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz//EABQRAQAAAAAAAAAAAAAAAAAAAJD/2gAIAQMBAT8QFL//xAAUEQEAAAAAAAAAAAAAAAAAAACQ/9oACAECAQE/EBS//8QAJBABAQEBAQACAgEEAwAAAAAAAREhMQBBUWFxUBCBkJGAscH/2gAIAQEAAT8Q/wAKPbjUhrFTkMsvRNAFDKCKt1yDZo/tmUpFrKvMf9v4fC6QhEJsl1XVRCiRxcSKZigsF48i0bR3IgyqGENEBffqoT2+cNOW8IPtir4yYh3qA/CoACgwajNUL/aHz7mzaFZ+QG0+D7P6AwMAAIDFLH7fIOJJoEoEE36CfHsT/wB4bX9i+h8eK4Wxoo0FEvyepWf5wwEIBWafyTNZ7Q7ETbdArKQ0L2AwTt0maTXh8WAcKeEwpAA+NCsOAICDVOKCbPf+vvH9hX8X28QEKBlDQpDh8rfj0B/t0ORZEFq6kPLuRWcC7AExP5CZSatYgyHdESngDGP5aRC1kVdUbyugWhVxKooSqqFV8OZWZOdGoL0vd9N2JDUFfAYlEDXyoIUktCjragPkGRY8YEiSyNiYL5uU6dmwhvCRLhY/kQRYvDaQXRZ/2YqUNW0NqBQUpQVnkcLiij4mTQkqgMnnqRFe0DQK1Tuezt0ijiDtZhXLDwAWoBtomCfLRpsHCElIDltlvy9+GfNozGAggsudgK+AnFvUKO5r9iCWvgpjOMCqCWjiwPQ94ZbkIalwYiINW+kg4jUJBoEAhQgz1prHiLAHWJBv6TWEBphypK+6BdjnjRRrcdQBqyP3dgeSNk1OQwqctgn/AD/W6DLLhBhSSoabN8a6UhQl1BX4QT58g9uABFoVxAVx+vK8RS0IbFTKFj9eWf4lJLCpGAH558lEgkLHWiBhDz4TwpaK6wi21pCbTyCjlBAW65Fl1P2E1171wFC4E2dABzjN3xJRQNTvu3GpDWKnIZZeiaImuQNoTIfWvHu3GpDWKnIZZeiaAKGUEVbrkGzR/aw56QUOKCvNQ+09u8GREKjpRO+7cakNYqchll6JoAoZQRVuuQbNH9vbjUhrFTkMsvRNAYGAAEBilj9vpwNqxoOOXfhznfV42GKUSmARqSNk9EgI00pz7B+LueAo2QJOlVumYfBQlYakoIZRL+HwDS0CwkQIKgqDu+GVGgg1rFyqfic5/DtrDEuMoh0PcPCPI7MrAqIRLAArzXowAWpNGw7ETqPkxAigRu9UCe4Zk3ikAJVnBstHsHKxXT7AjgDBvgI4vxpNAPsnrmam1asjiuUBV1R8OV3yxmWhGo709d93bknxEAF+nPAApeQNYbBuIoRZvzLVRWsEqR9HqmLU5ItzDZ20yeYJ02F0xMYRXRbsKDbr8ODbKCjix4/czq2yIbLRePo/pmzt5mQFCfAApKVGyVDtnLdeJ4ivKHKI6s/KgZELZvOQVQGsiGgCBNhVKZIyiCixBA98hDgmkxXIOEqU8yXLTWU2ik2pfUHDVGCHXhQBOyj77+8QwtFX90AJ9owcUceSMPhHYanf4c0BqoKJIgIbE8IFvhxmKjaI1Uh9BvgyRLPXVwqH1nw2VNjAxJpFY/DwY9vlASIiDA+WjDfAkjDIu0ouyaYA+giOcc1LJ0UdYkjuVNK66xRVxUfL6HKFaWyiCSvrenjuRGj8OOWPgB9YU8gA5RtRRdgDqRFe0DQK1TuezV3EXD4aBCifBfAxIcpoIcdqy4n6NHA41S3DY7gKlI8uVv1VOFxAGfgfFlM6cOGIAkThB7ZZ0CSDJkK9CCgFEBp67uoVGswvb0E9WyEKqpA9NUuj50YZJP5V1gA+gIAGL+WViUPgEYJBx9o4HGqW4bHcBUpHimM4wKoJaOLA9D3sXEbcmqTaKYdXCv8AXQ8+FZZZXxMjy1IlobEfze9/yzf/2Q=='})
data = json.dumps({'base64Image': 'iVBORw0KGgoAAAANSUhEUgAAAtoAAAG4CAYAAAB7OkD/AAAgAElEQVR4nOydd3xN9//H78iODJEliYgVQcyo3aK1iQZFSmxFtWJWqZhRMYPGKErMolYpDUJrj6L2qpqxYmYgsu7r90d+5/M9585zb+5K+v48Hs8/7vns8zn3nNf5nPfn/ZGAAgUKFChQoECBAgUKRg8SSzeAAgUKFChQoECBAoXiGIwmtJ89e4bk5GSsW7cOI0eORN++fdG9e3d069aNIAiCIAiCIKyO7t27o1evXhg2bBiWLl2KU6dO4f79+8aSx4UT2s+ePUOfPn3g5eUFT09PeHp6Qi6XQyKREARBEARBEESRQS6Xo2TJkihdujS8vLzQrVs3HDx40PxCe/Xq1ahTpw5cXV0tflIIgiAIgiAIwhSULVsWVatWxcqVK00vtFNTUxEYGAhvb2+Ld5wgCIIgCIIgzIGPjw9sbGyQlJRkGqHdq1cvuLu7W7yjBEEQBEEQBGEJKlSogI8++si4QrtHjx5wcXGxeOcIgiAIgiAIwpK4ubkhLCzMOEK7YsWKkEqlFu8UQRAEQRAEQVgDUqkUjo6OeP/+veFCWyaTWbwjBEEQBEEQBGGNuLu7Gya0q1atavHGEwRBEARBEIQ1U7t2bf2E9ujRo2FjY2PxhhMEQRAEQRCENWNjY4MRI0aIE9qXLl2Cs7OzxRtNEARBEARBEEUBFxcXHD9+XLfQLlmypMUbSxAEQRAEQRBFDa1Ce+7cubSFOkEQBEEQBEHoiY2NDYYNG6ZeaOfn58PW1tbijSQIgiAIgiCIooifnx+ePn2qKrQXLFhAQpsgCIIgCIIgDMTGxgbr1q1TFdq0AJIgCIIgCIIgCoeTk5NQaJ8/fx7+/v4WbxhBEARBEARBFGXc3d2Rmpr6P6G9adMmMhshCIIgCIIgiEJia2uLtWvX/k9of/rppyav1MHBAS4uLpDL5ahRowbq16+P0NBQlCxZEh4eHrC3t7f4iSEIgiAIgiCKNra2tihRogTkcjkqVaok0Jyenp5m0Zzt2rX7n9D29fU1WUWlSpWCs7MzVq9ejczMTBX/ggCQmpqKqVOnoly5cnBxcbH4ABEEQRAEQRBFC1dXVzg5OWHVqlVaNef+/fvh7e0NLy8vk7XFy8urQGgrFAqTbbcul8uxcOFCtR3VFBISElChQgWLDxZBEARBEARRNJDJZJg3b55emnPKlCmQSqUmaY+dnR3y8/MhefPmDdzd3U1SiaHhr7/+QrVq1Sw+aARBEARBEIR1I5PJoFAoDNKcFy9exAcffGD0NpUsWRKZmZmQpKWlGX3bdT8/P4NFNheuX79OM9sEQRAEQRCERhwdHQutOVNTUxEYGGjUdpUsWRJpaWnGF9rly5fHrl27Ct1pADh37pzRXwIIgiAIgiCIok9AQACWLl1qFM15//59eHh4GK1tJhHaUqkUkZGRRukwF4z9hkEQBEEQBEEUfRo3bmxUzdmwYUOj2WybRGg7OjoiKyvLqJ0+fvw4SpcubfHBJAiCIAiCIKyDEiVKsE1hjBWuX79utA0cTSK0a9eubdQOc8HV1dXiA0oQBEEQBEFYB6VLlzaJ5qxYsaJR2mcSoT1mzBiTdHrQoEEWH1CCIAiCIAjCOujcubNJNOfMmTON0j6jC20nJyckJyebpNOLFy+GTCaz+KASBEEQBEEQlkUqlWLNmjUm0Zy//vorSpQoUeg2Gl1olyxZEmfPnjVJp3///Xc4OjqKbsv58+cJgiAIgiCIIoRYnefo6IikpCSTaM4zZ84YTRcbXWifOHHCJJ3+7bff4ODgIKod9vb2AIDTp09j3bp1BEEQBEEQhBVz+vRpABCt9WxtbTF79myTaM7Tp09bp9C2t7fH77//bpJO//jjj6LdrXh6egIA+vbtW+g+EQRBEARBEKalT58+AAAvLy9R6aVSKeLj402iOX///XfY29sXuk8mWQzZt29fk3Q6KipKdBuqVq0KAAgPD7f4hUMQBEEQBEFoJzw8HABQtWpV0Xl69uxpEs05adIko/TJJEK7XLlyJum0jY2N6DZEREQAAEJCQix+4RAEQRAEQRDaqVy5MgCgU6dOovPY2dmZRHP6+voapU8mEdqlSpXCjRs3jNrh7du3w9PTU3QbYmJikJ2dDVtbW4tfOARBEARBEIR2bG1tkZ2djZiYGNF5SpUqhb179xpVcx4/fhx+fn5G6ZNJhLZEIkFYWJhRO/3RRx/pVX9ycjKOHTtm8YuGIAiCIAiCEMfRo0dx4MABvfLUr1/fqJqzZ8+eRuuPyYS2s7Mzbt68aZQOr1mzBm5ubqLrdnJywrt37zBt2jSLXzAEQRAEQRCEOKZOnYp3797ByclJdJ5SpUohJibGKJrzwIEDKFWqlNH6YzKhLZFI4OXlVej950+dOoUyZcroVW+PHj0AAGFhYRa/YAiCIAiCIAhx1KlTBwDQo0cPvfL5+Pjgl19+KZTmvHPnDvz9/Y3aH5MKbYlEgmrVquH8+fMGdfiXX35B5cqV9a5z3759uHLlisUvFoIgCIIgCEI/Ll++jP379+udLywsDMuXLzdIc27duhXlypUzel9MLrQlEglcXFzQsmVLZGdni+psXl4ewsLCDDJEr127NhQKBaKjoy1+oRAEQRAEQRD6MWzYMCgUCtSuXVvvvGXKlEH37t1FC+wXL15gypQpKF++vEn6YhahLZEUuF9xdnZGTEwMjh07hqysLEFH3717h+PHj6Ndu3aws7ODXC43qJ5t27bh6dOnetn2EARBEARBENaBo6Mjnj59iu3btxuUXy6XQyqVonnz5mo1Z3Z2Ns6dO4fPP/8cPj4+JvVQZzahrVyps7MzZDIZHB0dIZPJ4OzsrNeCR3W0aNECADB8+HCLXyQEQRAEQRCEYQwfPhwA0KJFi0KV4+bmJtCcEkmBkC+s5hSLRYS2KXBycsKNGzdw8eJFvTa2IQiCIAiCIKwLGxsbXLhwATdv3izSVgrFRmivWrUKOTk5qF+/vsXbQhAEQRAEQRSOevXqIScnB6tWrbJ4WwylWAjtL7/8EgAwevRoi7eFIAiCIAiCMA6jRo0CAHz55ZcWb4shFHmh3a1bN+Tn52PdunWQSqUWbw9BEARBEARhHKRSKdauXYv8/Hx069bN4u3RlyIttPv06YPc3Fzs2bPHpCtGCYIgCIIgCMtga2uLPXv2IDc3F3369LF4e/ShSAptuVyOKVOmQKFQYOvWrXBwcLB4mwiCIAiCIAjTYG9vj19++QUKhQJTpkwx2A20uSlyQjsoKAgHDx6EQqHAvHnzisyJJgiCIAiCIAxHJpNh7ty5UCgUOHjwIIKCgizeJl0UGaHt5OSE7777Dm/fvsXLly8RERFh8TYRBEEQBEEQ5iUiIgIvXrzA27dvMWHCBDg7O1u8TZqweqHt7e2N8ePH49mzZ1AoFFi5ciW8vLws3i6CIAiCIAjCMnh5eWHlypVQKBR49uwZxo8fD29vb4u3SxmrFNrly5fHoEGDsHPnTuTk5CA/Px+bN29GzZo1Ld42giAIgiAIwjqoWbMmNm/ejPz8fOTk5GDnzp0YNGgQypcvb/G2SSQWFtrBwcGIj4/HsmXLsGHDBhw9ehTPnz9ne9FfvXoVMTExKFu2rMVPFEEQBEEQBGGdlC1bFjExMbh69SrTkc+fP8fRo0exYcMGLFu2DPHx8QgODjZruywqtFu2bAkAuHv3Ls6ePYsdO3Zg1qxZiIqKgr+/v8UHjSAIgiAIgiha+Pv7IyoqCrNmzcKOHTtw9uxZ3L17FwDQqlUrs7bFKoQ2mYQQBEEYDy8vLwQFBSEoKAhubm4Wb4+pcHFxsXgbCIIoGtSoUYOENkEUNyZOnIijR4/i6NGjiIuLM0udxflrUO3atXHmzBnExMQgNDTU4u0pDFu3bsX169dx/fp1DB061Khl//zzz+zT6XfffWfxvmqiX79+2LFjB3bs2IHIyEi98jo7O+PZs2d48eIFDh8+jHbt2lm8P0WRI0eOsHvUxx9/bPb65XI5/vjjD/ZfGD9+vEHl9O/fn5Vx7tw5ODk5WfzcEtYFCW0rGASCMDbr1q1jgufXX381eX01a9ZEWloaEhMT4ejoaPH+G5vY2FjwQ61atSzeJkM5c+YM60dMTIxRyy4qQjs+Pp61c8qUKXrlHTt2LMv77t07+Pn5Wbw/RRGFQsHOoyW2t+7ZsyerPzs7G+XKldO7DLlcjn///ZeVs3jxYoufV8L6IKFtBYNAEMbGnEI7KCgIjx8/ZvWdPn1arfiQy+UIDQ01G8Zc+X39+nXWv7/++svi41sY+ELb2GJYjNB2c3NDxYoVTYLYBeyGCu2SJUvi1atXLO/MmTONev5q1aqFyMhIo9K0aVNIJBIsXrwYW7Zs0ZtGjRqZ5Dq0pNC2t7fHnTt3WP0JCQkGldOlSxdWRm5urtV4myCsCxLaVjAIhHlo3rw5zp8/Xyg6deqEESNGICUlxSj07t3bJH01p9B2d3fHwYMHwQ8PHz5UmfUtVaoUzBlOnjxplP5xN0ku9OrVy+TXapkyZRAVFWUwrVu31lg2X2iPGzfOqO0WI7SHDh1qsjG/ceOGqHYaKrTnz5/P8j1//hzu7u5GPX/z5s0z+jnZt28fJBIJbt++bVD+7t27m+Qat6TQnjBhAqv75cuX8PT01LsMmUyGv//+m5Wzbt06s/aBKDqQ0LaCQSDMQ0REhEEPGn4YMGAAJk6cWOhyuGBsG1kOc5uO2NnZYcOGDYK+ZWRkCARfURXaM2bMYGU+fvwYdnZ2Jj+fnTp1KlTfT5w4obHss2fPsnTffPONUdttaaF94cIFUe00RGiHhoYiNzeX5Xv8+DGSk5NFM2nSJJ11kNA2PZUrV0ZWVharW99xbNKkCSQSoekJAFy8eFGvcnbu3Gm2PhOWhYS2FQwCYR6KmtBu0qQJYmNjDeLSpUusjps3bxpcTps2bUSfX5lMhgULFgj6l5OTw2bt+UL7zZs3WLFihU5WrVoleCCfPHlSVL4VK1ZoXdzk5+eHiIgIUdy/f5/Vv337dtH5NCFm4ZcphTZ/Fm7kyJFG/Y+JEdoDBgzA69ev1ZKZmcnyKxQKjen4vH//nuU5deqUqHbqK7TlcjlOnTpVqDHZunWrznrCwsI0fqW4d+8egIJZe+7Y9u3bWfmDBg1Sm4+73qpXr466deuKYu3ataxcbUJ76NChBt9b+P/rbdu2GVxO6dKlRV+fdnZ2ghdNQ0KnTp3g4ODAXLYZGtLT083y3CMsDwltKxgEwjyULl3aIGH09OlTdnPs2bMn/P39MW3aNHZs69ataNCggSgaN26M/Px8ljcqKkpje0ePHl2oG7kxwpw5c/Q+z8oLB8+dOwc7OzuB0E5JSRFVVtOmTVmevLw8o9lAfvbZZ+Y6hSrh0qVLOtvXoEED5hXDELTZDp8/f561JTo62qj/scIuhmzbti3Ln5aWJioPXzQfOnRI7zxihPa3335b6HEXI7S1wb0gHTlyhB2bOnUqK9+YZixz5sxh5WoT2vyXNkuF2rVri+7XkiVLWL43b97gzJkzjEuXLiE1NVVwTB3NmzcX3P/fvXsniL927RquXbumsxyx1ypR9CGhbQWDQFgvUqlU8JmxQYMGkEgkaN++PTu2ZcsW0eWVKVNG8JDQttCoqAptiUTCZv0vXbqEUqVKQSKRGCS0+Q/GzZs3G21crV1omxL+144vv/zSqGUXVmh/+umnLP/jx49F5Vm0aBHLs3fvXpV4uVyOEiVKCEhISGB5ZsyYoRLPzx8WFobs7GyWftasWfD19dVJdHQ0y5Ofn6/X1yF1kNBWH8QKbWWTJb65SmBgIE6dOoW0tDSd5dWpUwc5OTmsnG+//ZbFlStXDhcvXsQff/xRLL0vEYZBQtsKBoGwXnx9fQU3Z040fvDBB+zYsWPHRJfXpEkTQXk+Pj4a0/KFdnp6OubPny8avpeM27dv65X3yZMnLK+hQlsikeDrr78WeB/RV2jb2Njg2bNnAArMCPSZudIFX2i/ffsW69atMyknTpxg9ZlLaHfq1AkDBw5U4dGjR6wta9euVZtGHV5eXjrrLKzQ7tq1K8v/77//isqzfPlylkfdeoTmzZtD3+Dg4ACJpGChL999G/d1RlebqlSpgoyMDJbPUB/NfKxdaP/111963Wf4piN79uwRnS8xMVEwVmLuC5GRkcjLy2N55s2bJ4gfPnw4i0tNTUXFihXVluPq6oobN26wtJcvX2bXg1QqFSw0/u2332Bra2uW/zph3ZDQtoJBIMxP48aNRaVr0KCB4AbMHQ8MDGTH79y5I7re3r17s3yZmZmQSqUa0/KF9r179/TqX2EWQ/JtUQsjtJXRV2jzzQj27Nlj1PHnC+0HDx6Y/Hrr378/q89cQvvChQswZggLC9NZZ2GFNn+B2eXLl0Xl4QuvjRs3qsQbKrRlMhl27drFjmVmZiI4OFhne5TF2MaNG7X+z8Vi7UL7+++/16sOQxdDVqxYUTBWuoR2nz59BCJ7586dkMvlKumWLVvG0ty6dQve3t6CeLlcjp07d7I0ubm5+OCDDwRpKleujJcvX7I0iYmJRhl7omhDQtsKBoEwHwEBAexm2blzZ53po6Ki2E3zjz/+YMdtbGzY58Pc3FzRMxczZ85k5elauPVfEtrffvst5syZI4A/O7Rnzx6VeHXwP+Nqg4S2/qFGjRo66xQjtO3s7FC+fHmEhISoxA0YMIDlF7uwke/tJjExUSW+Zs2aKh4f+Atcb9++rRJvZ2cnEJsA0KNHD51tkclkAjH2119/6W1C4OXlhRYtWqhw69Ytdv1wx/j/808//VQlD+chQ1+Ki9Bu166doJ5Dhw5p3LnR1tYWhw8fZmmXLl0qiHdxccHvv//O4idMmKC2nGbNmglMS4zxNYMo2pDQtoJBIMzH4sWL2Q3w6dOnzBREE3PnzmXpFy5cKIjjHnwARM10SSQS/PbbbyzP8uXLtaYtKkK7a9eu6Nixo8502oT2P//8A2OEf/75R1Qf/wtCe8mSJUhKSlLh3bt3rC3nz59XmyYpKQn79+8XnNsqVarorJMvtDdt2oTo6GjMmTMHmzdvxsmTJ/Ho0SO2GHj//v0q+b/++muW/+DBg6L6uXXrVpZHWRxpQtdiyGHDhgn6Pn/+fFHlTp48meVJSUnRyyMGB99OvbDB0Gu7uAhtGxsbbNu2DUCBiZ+LiwuLq1u3LtsfYfTo0ZBIJPDx8cHjx4+xbNky2Nvbq5Qnk8kwe/Zs7N69GzKZTGO9X331FYACDzHqXiiJ/xYktK1gEAjzUbJkScEuhuvXr9eaPjk5maXt06ePIG7fvn0srkOHDqLqT0lJYXm+/vprrWmLgtBu3LgxsrKyoFAoMH/+fK32qyS0C4KlF0M+fPiQtSUiIkJjOhcXF8G5rVChgiA+KioKcXFx2LBhAw4fPozbt28LZvJ0hb///lulzm+++YbF79q1S1R/+OYd8fHxovJoE9oymQybNm1i8bdv38Ynn3yCZs2aaWXo0KFMPCoUCowYMUJjWs4GXB0ktLWjr+mIo6MjZs2aBWdnZ8HxZs2aqb1utK2b4RDzBXP48OFG39CIKJqQ0LaCQSDMS+fOnQU36vbt22tMyy3GA4DQ0FBB3MKFC1mcps+IfPz9/QX1ch5MNGHtQtvGxkZFIJ88eRJlypRRm16s0K5RowZCQkJEU6tWLZaXhLZu+Ne0tmvf3d1dMLbK43ru3DkUJly5ckWlTv6MsDp7a3XwX3hnzJghKo+uGW2pVIoxY8YIbHuNGYKCgjS2jS+0ly9fbpAfbc7HMwltzWgS2upITEzE0aNHjcKPP/5o1v87YXlIaFvBIBDmh/+AevDgAVxdXVXSVKhQgaVJT09XWUDTr18/Fi/GxR9f4L97906n9wJrF9oSiQSVKlXCtWvXBA++1NRUNG3aVCWtWKGtbbZPHU5OTiwvCW3dpKWlsba0bNlSYzpPT0/BuPr6+grilU1LlMPTp09x8OBBJCYmYurUqRg0aBDatGmDqlWrCj7h8+ELPF2mVRyHDh1ieSZOnCgqj1g/2nybXWMGsUK7f//+7Lg+iyG5tCS0NaOP0Ob7ni9sMNaOtUTRgYS2FQwCYX78/f2Rnp7Obn6LFy9WSdOnTx8Wz21jzIc/kypGCPN99x4+fFhner7QfvXqFcaNGyca/gPw6tWreuV98OAByyvGRtvNzQ1JSUmCh0lubi6GDx8uSGetQjsjI0PUQsvCsHfvXlafNqEtk8lw+fJlo9GpUyeVOvg7Kap7IeIoXbq0YEw9PDwE8ZMnT8bmzZsxb948jBgxAp07d8aBAwdYekO8jixdupTlV3bBpomTJ0+yPGPGjBGVxxChvWrVKiQkJKjAXyB39uxZtWkSEhIECzANEdpXrlwBIFyUrY/Qtre3R8WKFUWxYsUKVq5Yof3HH3/odZ/hC+0NGzaIzjd79mzBdWluof3mzRtkZmbqxdu3b1l+Etr/PUhoW8EgEJaBW7ACFGwoobx5DH8bYnWiwc7OTnADDQwM1FrfzZs3WdqpU6fqbF9R2rDGxsZGsLkMF5YsWcLSiBXa7u7ucHBwEE3JkiVZXkOEtrmDLqFtzMAXatw48UP9+vU1toXvwhKAio2rOgrr3m/9+vUsv5gdGyUSoQjSte6BwxChrTyjz8H/UqXNdIVv4mKI0ObWd+zevZsd00do8ycG9AlFfcOa0qVLo1atWioMHDiQlbFu3TqV+OrVq6u9xnTd59VRuXJllp+E9n8PEtpWMAiEZZDJZILZsKtXr7KV5jKZTLBxS7169dSWwV8s2atXL411lS9fXvBw0CZwOIqS0Ob49ttv2UxVfn6+YLEd3xRB+ZO2JRdDmjtYUmjzX3YA1XUHfJQ/0YtZAFZYob17926Wf9SoUaLy8DdnGjBggKg8moS2g4MD6tSpw36LEdpdunRhaaZPn66xzsIIbVtbW+Tm5gIAVq5cydKS0NYttGNjYw0qNy0tjZVBQpsoDCS0rWAQCMtRq1Yt9gADgMmTJ0MikaBhw4bs2IsXL9RucCCRSPDdd9+xdJs2bdJYz7fffsvSpaamanUNxaFsOjJmzBjR8BeqXblyRa+8/E/chvjR7tmzJ7Kzs5nLLA4/Pz9WrvKuf5YU2i9fvkTfvn1NysqVK1l9umy069atq5ZJkyaxMt69e4fGjRtrTMvh6ekpKDs4OFhwvvz9/TW2o2rVqixdfn6+qPNaWKF97Ngxll+saOYWCQLA559/LiqPstCuX78+li5dilevXmHnzp0snb5CW9uXqsII7Zo1a7JjMTExLK2hQvv58+cqvsP58M+pWKF94MABve4zfNORdevWic4XFxcnuIZJaBPWDgltKxgEwrLMmzeP3QTfv3+PkJAQgf/stWvXasxbt25dwY1Z0wJH/gNpxYoVotpVFBZDaqJy5coqx4KCgli5165dE8TxhfaKFSuwdOlS0fDtSYvzYkj+At7Vq1cbVAZ/l0SFQqHWVzBH7dq1Wdp3796JKr+wQpuzQwaAzz77TFSe1NRUlkebu0I+fKGdlZUFfkhKSmLpxAjtbt26sTR8EaxMYYT2iBEj2DG+z3q+0ObvZKhLaP/2229az09xWgzZokULxMbGqvD06VNWxqlTp1Ti+WNJQpsoDCS0rWAQCMvi4uIi8G995MgRwW9tO0hKpVLB7M+nn36qkiYsLEzwYGjWrJmodhVloa0O/sPmwoULgjjyOqKdkiVLChYxKq8nEAt/gW9qaqrWtPyvOq9evRJVfmGF9qNHj1j+Tz75RFSejIwMlqd169Za05YtWxZjxozBv//+C02BL0LFCG3+tvHadiYtjNDm/pP5+fmCRanjxo1jaRcuXIgGDRqgQYMGbAt4Q4V29erV8dlnn+Gzzz5DQECAxnRFQWirw9vbW/AlU5/FkKNHj9b7ixZ/nEho//cgoW0Fg0BYHk02u2lpaTqFH392TN0mG/wZ1wcPHogyG5FIip/QrlevHiv3xIkTgjgS2toZNGhQofJzfP/996wcXVuc82e/nzx5Iqr8wghtmUyG7Oxslr9WrVqi8vF9XX/44Ycq8ZUrV8bEiRNx5swZgbjjh/fv32Pr1q3o0KEDbGxsWF4xQpvv5jM6OlpjOw0V2o0bN2a/Dx06JEjbtm1btf3hgqFCWyxFVWjzvxAA5N6PMC0ktK1gEAjrgP8g5AJ/4ZEmuD8RUODWjv8Q9fX1FWx5re3TsjJFUWj7+PgIhAqf9u3bs3KVX0hIaIsfk6FDh6rEu7i4wNHRUWc5O3fuZOVoM4mSSIQi7u7du6LaWRihHRAQIPjvlSpVSmceBwcHQZ6wsDCVNDNmzIC2sHnzZo11iRHa/G3jv/jiC41tNURoDxo0CH/99Rf73bdvX0FauVwucC+oHEhoqyKVSlV8/8fHx2vd14CENlEYSGhbwSAQ1kFwcLDg8zwg/hP98ePHWR6+T26+rfe7d+/g5eUluj1FUWjPmDEDly5dQrdu3VQeXH379mXlrlq1ShDHF9qxsbGYMmWKaKZPn87yFkehXa1aNZY3PT1dsNmLt7c3YmNj8erVK7UCXBn+9uu6fE7z3dYp29RrQh+hreyXm7svA8KFaNpQ9qJSpUoVlTT8rwFAwew831NJYd37jR8/nqXp0aOHxrIMEdp8l6ApKSlqxaBUKkWDBg3QuXNnZu5x584dlWubhHYBHTp0gHJYvHgxkpKSsGrVKrXedfhCu2XLlggNDdULfp0ktP97kNC2gkEgrAf+gjMAaNKkiah83bt3Z3mysrIQFBSEwMBAgZ9tvk9pMRRFoc3fsGTQoEGCuJiYGBY3a9YsQZwlvY5Yu9BeuHAhy/vDDz8I4vheb/755x+tZklly5YVnKuPP/5Ya71RUVEs7d9//y2qrWKEtq2tLWJiYnD06FHBcb5gVTYt0oSyr++yZcuqpGnZsqWKaYgx/Wjzxyc8PFxjWYYI7UmTJrH/RlRUlOhrprCLIcBsFBcAACAASURBVPWtByg6Qptz6cpfBMufYElKSlLxGU+LIYnCQELbCgaBsA4CAwNVZrQvX74syn+wTCYTeEzYvn07tmzZwn6/ffsWpUuX1qs9fKGdlpaGuLg40fDb8s8//+iVl78gTR+hLZPJBNt7K/to5m8A9NVXXwniLCm0c3JyjLobozr4M8n6CO0SJUqwc6pQKFS8uXh6egpMk7QJPf4XhZycHJ0b0PBngo8fPy6qvbqEdp06dZg4u3XrliCOv7toQkKCqPr4s/0A1H4xcnFxUTEN0SS069evj1u3bjH45/bu3buCOA7+YsxHjx6pTXPr1i3BS/e9e/fYceXF1so22n5+fqJ3yeSwhNA+ceKEXvcZvtD+9ddfRef78ccfBWOuj9Du2LEjy8d3ublv3z48e/ZM0Be+LiGhTRQGEtpWMAiEdbBmzRqoC9o8CfDh+9NVDrGxsXq3p6htWMP3u/zy5UuV2VX+5kDKNxy+0G7UqJHAF3S7du1Y3OXLl1V8RfMXixX1DWuU4e9eyt8RkA9fMCQnJ2ssi//iJ0Y4jxw5UlS5fDQJbScnJ8yaNUvg6SEjI4PFOzs7C0RtZGSkqPoaNGggOLdOTk6i8mkS2vxtuc0V+vTpI2ibpp0h9cESQttSQazQtrOzY6Y4T548QevWrVkZ8fHxqFatmsBV5MWLF5nLRBLaRGEgoW0Fg0BYnoYNGwpmWPizum/evBF9c1W3oPKff/4RtVBNmaImtPlbGis/yG1sbJCZmcniy5UrJ4hv1KgRWrRogRYtWqgIdH9/f5ZPnacMuVzO8oq1qS8KQlsulwvc0DVv3lxtOr77SIVCgWrVqqmkcXZ2xps3b1g6MYty+aY+/E1ctKFOaLdq1Qq3b99WOQ83b95kX4s+//xzdjw/P1/0WoZWrVoJ8kmlUlH5NAntjz76CFlZWaLJyckR9EmhUOiVPysrS8UkhC+0r169qnVjGU1ws+yFFdrh4eGYNGmSxsWiRUlo813sTZw4UfBSxXkdCQ0NxfPnz9lx7rrnC+2PP/4YISEhesFfWExC+78HCW0rGATCstjb2wtMLc6ePYsqVaoIXI1t375dVFl8DyRcaNu2rUHtspTpCJ927drB19cXc+bMQXBwsNb2JiYmsjrHjRsniONvfpKRkaFxp8169eqpbKKiTWiXL19eL9tVDr7QfvToEXx9fU0K352YWKHNF59nz57VmpbvmWLp0qUq8b169RJck+rEuDIzZ85k6Tdu3CiqzZs3b2Z5FixYIFgrwIXc3FzMmjVL8PL5559/svgjR46IHkf+ZjH8GXJdiLXR1gX/ZQQoENr8LdwNgS+0CxsKK7SHDRsGoGAh96hRo1TiLWE6ooyfnx+6du2KgQMHChYK86lUqRL7YvLy5Uu4ubmpFdoSScGLa3p6Ou7du8cmWMjrCFEYSGhbwSAQloW/pa9CoUDjxo0hkUgwe/ZswQ2yTZs2WsuxtbXFnj17VG6s+/btE2XnrYylFkMqw9/98vTp0xpn9/nmH8qLSPku0A4ePKg2v4+PD16/fo0bN24IZm81Ce2ePXsiPT0deXl5aNeunV59svbFkE5OToLZbF27JA4YMIClzczMhKurqyD+xIkTLP7ixYui2rx48WKW56effhKVhy+Y1YWrV6+iXr16gjwffPCBIM3gwYNFn9fBgwezfI8fPxadzxhC28HBAQ8ePFDp4759+wp1rfCF9uPHj3H9+nW94Rb6FVZo83eeXLhwoUq8JRZDqmP+/PkACtbCKNv3y+VygVeo8ePHQyKRaBTaEokETZs2RYUKFdhvEtpEYSChbQWDQFiO5s2bIz8/n90E+X6zXVxc8PjxYxZ369YtjVtW29nZYceOHRpvrlu2bNHoX1oT1iK0O3XqJOiLus/6Pj4+LP79+/cqvrD37t3L4qdPn662Hv5iyVu3brGXE01Cm29Tn5aWhpCQENF9smahLZPJBCYYV65c0epNRCqVIigoSLCQl+/qj2/DDgDDhg0T1Wb+eCxYsEBnemXzFH7Izc3FjBkz1P5/9u/fz9JlZGSovCRog++pRKx9vkRiHKE9bdo0wTWUnJzMfhtqWy2RWJeN9rJly1iesWPHaqwHsKzQ3rp1Kytr//79grjJkycLxolbBKxNaCvDF9pjx47FwIED9WLChAksPwnt/x4ktK1gEAjL4O/vjydPnrAb4KNHj1SuRf6W1YB6Twqurq4CsQAAf/31Fw4ePCg4tn37dr02Y6lWrRrbwlfXjKYyxhTa33zzDSvr2bNnatN07dqVpTl27Jggzt3dXWCGo+5m07ZtW/bgVSgUgu23NQltJycnXLhwgcVdv35dtEizVqHt6OiIXbt2Ca6b+Ph4dO7cGYMHD8akSZOQkJCALVu24Pjx47h7967ATRkX+LPWfJeL6enpcHNzE9XmX3/9leXT9HLEhy88+eHatWsqs9jqxgEAZs+erdd5nTdvHsury7yGT2GFdvv27QU7Ug4fPhx169Zl13BGRobBJiTWJLT5nmC6d++uEh8eHs7uUfr215hCmy+E58+fz443bdpUME5du3ZlcYYKbVoMSegLCW0rGATC/Dg4OAg+pysUCrW21FKpVOBX+s2bNyhTpgyLL1++PC5duiQQC5cvX4anpydcXV1VFgsdOXKErWQ3JcYU2uvXr2dlafI+wTczUPaRzf+8n5mZqfKyUbJkSYH7O2UzBW022hUqVBC4FNyxY4eoBXHWKrRlMpnRXB3Wr19f4M5M3dhog28GIsbzjrLZiEKhwMKFCzUuBPb19RV4ecjIyND7v8Gf+T9w4IDofIUR2hEREQIPKRcuXGBfX5YsWcKOp6amirKFV8aahDb/Wqxbt65R/xPGEtp2dnaCrzn9+vVjcU2aNGHHlWe6SWgT5oKEthUMAmFepFIpNm3aJBAF2m609evXZw+F9evXMxdiEREReP36taCc8+fPw9PTk+UtXbq0wNYWAO7fvy/aO4ah8BcmanILJ5Z79+6xsuLi4tSmuXr1Kkvz6aefCs41f2Hmzz//rDIW27ZtY/F37txRmZXW5XWEvyAOgNpFW8pYq9CWSCSChZO6Qk5ODlJSUnDmzBns3LlTIFwTExNx//599js9PV1wberixo0bLO+XX36pM33ZsmXZS09qaipatmypMa2dnR0OHTok6Ms333yj1zmVSqWC/xbf7EsXhghtOzs7xMbGCkzNnj9/LlgkXKJECcFujunp6Vp9m6vDVEK7evXqrFxN6yT4ODg4sNlghUKhcaGhofC/cmnbUVMXfDENANWrVxfEnzlzBmlpaSqbGZHQJswFCW0rGATCvHALZ7hw5MgRnYsVV6xYgejoaEgkBbbby5cvh3JITk5Wa7rg5+cnEKJAgc1qbGysRpvvwsLf+j0lJcWgxZgSidB9GgB07NhRJY2Pj4/A7IMv5vg+sAGgdevWgrx8X835+fn46KOPVMrXJbQlEqEtaU5ODurXr6+1X8pC28bGxqTwXR/qEtru7u548+YN3r59i+vXr2P//v1YuXIlpk6dikGDBqFDhw6oU6cOfH19Vby3TJ8+HVeuXEH//v2xceNGwbnXtSU6nwoVKggEpdjFpj179sTly5cFX32Ukcvl2LBhg6BtZ86c0XsNA99cCQD7f4pBX6H90UcfCV4YgYJ1AQ0bNlRJW6VKFcFXFoVCgZUrV2p0kacMX2hPnz4dDRo00BvuJYkvtPm7aL548ULnPYEvRPVdIyIG/voXfTfk4cNfq/H69WuV9QwDBgxA7969tfaPhDZhSkhoW8EgEOZjypQpgoflvXv3NG6tzIczR+jYsaNaTwOLFy/W+uDy8vLCuXPnVPLduHEDHTp0MHo/+VvCAwUmFU2bNhXl87VKlSpo2LAhJkyYgPT0dFZGVlaW2hcJvhu6GzdusOO2tra4fPkyi7t586bgIdiqVSuBH2JNgqdMmTIsjSah7ezsLPjMffv2ba322tbuR9vQ2UMnJydIpVIMGTJEUOeNGzdUXurCwsJQtmxZlePlypUTmEspFAr4+PiIboO2HSdtbW1VXP5lZGSouI5ctmwZ5s6di+joaHTu3BnNmjVDWFgYQkNDUa9ePUyaNEnFNl2X+0k+/BflyZMna0zXuHFjtb7xHzx4gBo1amjM17BhQ4HY5voZFxeHgIAArW0zlXs/mUwm8GX/ww8/wM7OTm0b/Pz8BPerX375xej3KP5ahOzsbIwdOxa1a9cWdY+qVq0a2rVrp/IyuWXLFrXXnLr6DRXa+/btw44dO/SCv1CWhPZ/DxLaVjAIhHngrz4HCmaklD8zaqJq1aoCzxlcyMzMVDtbog5nZ2fB6nh+OHz4sF4LJXXh6OiIp0+fanwAGxLU+WeWSCRYtWoVS7Nq1Sp2XNnHMN92MjQ0VCDiDx48qNG3Nn+zB22fvOvXry/YeVCbXbG1C+3C0L59e8ELTF5entqZ12PHjgEoENLp6elISUnB48ePBbazAPDnn38apV0eHh4qojUvL0+taYWm/4mmoM08qlGjRmjVqhXCwsIQEhKCJk2aCExsRowYodLOIUOGCBba8sPOnTtFbagTFham9qU8Pz8fBw8eRHR0NEJDQ1XWFJhKaEskQpt2oMC05erVq7h8+TLjxo0bgv8RAERERBj9Og0PDzdaP4GC6/jDDz8UXb+hQruwgYT2fw8S2lYwCIR5GDp0KBMg79+/R7NmzXTm8fLywpo1awQr17lw+vRpvWbRJJKCmfHJkycLPssD4l2u6cPHH3/MdogzxsNB0ywrX0wMHDgQEknBQ4wv9q5cuSIQ0l5eXswry71795hwadKkCRISEjB9+nRMmjQJc+bMEYgiZR+5ysTFxSE/Px+xsbEahbtEIhTa+fn5eP36tUl5+/Ytq8+UQrtt27YqM72adoHkL2DVFHJycjR6DNGHDz/8EHfv3hWUrVAoMGDAALXpJ02aJO7CBPDw4UOtpip8zyTqAifOPvjgA+zZs0dgO8wPjx49Qq9evfTqt5eXF3bv3q2xbnVma3yhvXbtWgwZMkRvuP+kstCuWLGiyky7rvDrr7+K3nFTX/gmPIUNEyZM0KtuEtqEuSChbQWDQJiPjh07IiMjA+3btxeV3t7eXrDrHlCwKcK4ceP0tinl07x5c7ZQTdNMsTEICgrCggULcOnSJTx//hyZmZmiyMjIwOPHj/Hnn3/i66+/1viJmW97CABVqlSBRFIwY/3s2TMABUJW3UyTjY0NZs2aJfgf8jfHUQ45OTk6/7P29vYC14CasObFkIXhww8/FAjtnTt3avTBrWxeohwePnyoc5MmsbRr107gGSIvL0/rQj9l0yd1ITs7G5s3b4a/v7/Wur/44guNZRw9epSJyBIlSqh4EAKAV69eYfLkyShRooTB/e/evTvu3LkjKPfJkycoXbq0SlpTLYbkqFGjBpKSklS2j1cODx48wOTJkzX+941F69atsXXrVty5cwevX78WfY9KS0vDv//+i02bNqFp06Z612uo0C5fvrzeazSqVq3K8pPQ/u9BQtsKBoEwL/p4XpBICsQq511k165dKF++vFHa4ebmhrFjxxZKsFsaqVSKRo0aISEhAdeuXRPMfNWpUweZmZkaPZWow97eXuWzNVAg+jp37my0dhdXoS2RFCwSVCgUOHPmjFZxWLp0aURFRWHAgAEYOnQooqOjMWrUKHz11Vdo2bKl0RfqhoeHIzc3F2lpaToXV9rZ2SEwMBDVqlVDvXr18OGHH6JFixZo06YNWrVqhXr16jHvP7po2rSp4FrKz8/HgwcPsGTJEri7uwvSVqxYkW26c//+fYwbN06vDXS0YWtri4EDB+LixYsA1C8slkhML7Q5HBwcEBAQgKCgIAFly5Y1+zPZEtBiSMJckNC2gkEgrJ/w8HCz/0mKA/Xq1dNqwqGO6tWro169emjUqBEaNWqE4OBgrTsjGkK5cuXYrm2RkZEmPw/BwcGsPmO+MGhiwIABer9QmoMuXboItrY2B1KpFA4ODnBwcICdnZ1OM4guXbogIiJC7+tWH7TtYmprawt3d3e4u7sbPJtsZ2cHBwcHk3k1Kg6UK1cOsbGxiI2NVbuHAp9OnTqx/68hXzbc3NxYfk0vWETxhYS2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDYChRUVF48uQJ4uPjLd4Wa8HFxQUBAQFGwcHBweL9IQhCgo8++sjszwltlCtXDtWqVTNbfVWrVkWzZs0QEhJisT5HRERg9OjRFj/3BFHUIKFthvo+//xznDlzRm9OnjzJyujRoweioqIglUrZsUGDBgEAVq5cafELyVr49ttvYazQpk0bi/eHIP7rzJ49GwAQFxdn8bZIJBJMmTIF2dnZePjwIfz9/c1S5+bNmwEAiYmJhS7L1dUV69evR6VKlUTnadOmDQBAoVAgPDzc4mNAEEUJEtpmqG/EiBEGCb2cnBxWRnZ2NgBAJpOxYyS0VeGE9rt373D37l2DyMvLI6FNEFZCcHAwcnNz8fbtW3h7e6tN8/PPP2PHjh16UbVqVYPa07p1a+Tn5wMAzp07B2dnZ5OfA2MK7cTERADAy5cv8cknn+jdhlevXiEoKMji14U2SpYsiR49eiAuLg6LFy/GnDlzMGzYMFSvXl1rPrlcjqZNm2LUqFGYM2cOfvjhB8TExKBly5awsbExuD2BgYFo0qQJmjRpYvXnjjA+JLTNUJ9MJoONjQ1sbGzQr18/AMCGDRvYMRsbG7i7uwMAsrKyBMe5Mkhoi4MT2r///rvBZTx+/JiENkFYADs7O4SEhKiwf/9+AEBCQoJKnI+PD9LT0/WeyPjwww8FdZcoUUI0CxYsAADcunULjRs3FpXH1tbW4PNiTKHt4eGBw4cPs8mcQYMGic736NEj3Lt3D40bN7b4taIOe3t7zJgxA2/fvtU47iVKlFCbt2fPnuzery7cuXMHH3/8sUHt+uOPP1g5x48ft/h5IswLCW0zn/A+ffoAANavXy84XqJECQAFQltdPhLa4ijuQrtChQpsZiQgIEBr2tDQUJZWXzw9PQVl2djYiM7r5+cnqi/VqlXDyJEjsXDhQsTHx2PIkCGi8yrj4+ODfv36Yc6cOVi0aBFmzpyJwYMHo2LFiqLyBwcHY9iwYViwYAEWLFiAr7/+GmXLltWrDXK5HE2aNGGzYYsXL0Z8fDzGjh2L8PBwuLi4WPz6sXaqVKmih1QuCIsWLUKVKlUQGhoq4Pr16wCAr776SiUuNDQUTk5OgrrfvXund936hFGjRqn0d9WqVUhOTtbJ06dPAQCPHj0SlX7FihVaz7ODgwN27NjB2hYXF4fmzZvj1q1bWklLS8OjR490pvv222/Nfu24u7vjxIkTrE/v37/H4cOHsXHjRuzdu5edQ01C++effwYAvH37FocOHcLmzZuRlJSEFy9esDKzs7P1FtsVKlSAQqEQXAuWtLUnzA8JbTOf8L59+wIA1q1bJzhOQts4FHehffHiRXaz3rVrl9a0ycnJBouCbt26Ccry8PAQnXfEiBFa2+Xp6Ylff/1Vbd7s7GxMnTpVcJ1rw9XVFUuXLmX/D+Xw6tUrrfldXFywfv16lQchAOTl5WH+/Pk6PxnL5XIMGTIEDx480Hpenj17ZvHrx9rhhPY///yDmJgYraxduxZAgdBWNyaccBb78mYJoc29DBg7XLlyRWd/bWxssGHDBigUCkRHR6N9+/ZGq3/27NlmvW5kMpngfrdy5UqVyQKZTIbmzZvDzs5ObRlLly7F8OHDVUyB7O3tMWfOHFb2zZs3BWuldBEbGwug4F6Uk5MDAJg1a5bF/2uE+SChbeYTzpmOrFmzRnCchLZx4IT2oUOHULFiRYNITU0FYH1COywsTPAwy83NRenSpTWmt0ah7erqisuXL7O0d+/exebNm5GcnMweQgAwd+5cnecjICBAIFTevHmDAwcOYOPGjUhOTsbLly+1Cm0HBwecPHmS5U9JScGWLVuQlJSE9+/fs+PaPtfb29tj9+7dgv7n5ubi9u3buHXrFl6/fi1on6WvIWuHE9pJSUmQSCSYNm0aEhIS1Nq1tmvXDoB6oc2Vk5KSIrpuTmg3btxYYL5XWLj/oTahvWrVKkRERGjk2LFjAICDBw9qTbd69WoA4oS2RFLwQsI9/O3t7eHr66vCkiVLkJqaiu+//x5lypRRm0YZc3+9iY6OZv+zefPmGVSGLtOec+fOsTrE2vfL5XL2Ar5r1y5WxpMnTwpl800ULUhom7iujz76CEOGDGGsW7cOAHDixAnB8eHDhwMosJnjH4+KioJEQkJbLMXZ68jixYsBgL0IAND6iTY4OBh169ZVYdu2bSx/9+7d1aZR/k/yhfb333+v9UXF3d1dY5tWrlzJypk+fTrkcjmLq1mzJp48eQKgwLtBgwYNNJbj5OSECxcusLQzZ86Eq6urII2NjQ1atGihsQz+LNWiRYsED9pKlSrhzp07LL5t27Zqy5g5cyZLc//+fXTv3h2Ojo6CNGXLlsWwYcNEi5//MmXKlEFSUhJiY2Ph6+uLrKwsZGRkqMxOSiQSfPDBB0hKSsJXX32FihUrIioqihEfHw8AuHDhguA4R4UKFVTK44R2/fr1jdonzr5cm9BWF8dHrI32mDFjAIgX2rro0aMH+9qzYMECtWlq165tUdHo7OzMzDvOnz9vsrbw7xdiF5G2bt2a5enTpw8mTJjAfpP3lv8OJLRNXNeyZcvUijixgZuRIaEtDk5ov3nzBtevXzeI3NxcANYltB0dHfHq1SsAwOrVq3H37l0AwI0bN/T6jCmRSPDjjz+y6ys0NFRUHr7QHjlypEF9CA4OZt4a9uzZozZNx44dWT3bt2/XWBb/oafLVEUdpUuXZrPWx48fV2uq0qRJE1bH4cOH1Y7Jmzdv2PUWGBiotU57e3uLX0dFiSVLlgAAnj59it9++00tYWFhkEgk+OKLL/S6r/bv31+lvv+C0HZ1dUViYiLKly+vs91t2rRhz50lS5aovc9MmjQJeXl5WLx4sd7nxc/PD6NGjdL6MiyG/v37s3Ht1KmTya7H77//ntVTp04dUXm4cXv79i1cXFwQFBTEXlx27NhhsrYS1gUJbRPXpTyjfeTIEQDA1q1baUbbBBjDRvvWrVvIzMxEy5YtLd4fjs8//5zd5MPDw9mMHQC9PQBYSmjHxcWxMpo1a6Y2jVQqZTPJ79+/V/sJOiAggIlkQ8f5m2++YW3p3LmzxnRnz54FUDBrrmzryzfl+e233yx+jRQnatWqhby8POTk5OD169cqcGKFexnmhPaxY8cwZcoUjRw/flyj0K5QoQIqVqxo9I2qgoKCEBoaqnZW3txCe/v27QCAtLQ0FfMwPm3btkVWVhYAYP78+Rpf5iMiIthYfPnll6LPiVwux7///sv+W4UR23v27AFQYANdGM8uuti7dy8A4PXr1xrtvPl4eHiw+9SGDRvY8UOHDrFnvY+Pj8naS1gPJLTNfMK5G73ymzfZaBsHbmdIdQ81c2CsWRplODvPZ8+ewdbWFrVq1WIiT9/xt5TQ5hZyPnv2TOss/MKFC1ld6s7j5MmTWXzdunUNagvnbisrK0vrTPOkSZNYXd27dxfENWzYkMXt3bvXItdbccTe3p6ZBSmfc4lEglatWgEo8MDBeQ7hhLauRWbclxBlob1+/Xps2bLF5CjvJskJ7TFjxmi18/7ll18AFHzN0paOm2jQJLRDQkIE6xoWLVqkIhoHDhyInJwcKBQKxMTE6ByvuXPnAihYyCz2pd/Z2ZntVwAAw4YNM+hakclkzLUj/z/o4eGBFi1aoGPHjqhXr16hBXiXLl3YC8WkSZNE5Rk2bBjrH9/0bMCAAez4mDFjLP5/I0wPCW0zn2yFQoHs7Gx4eHgI4nQJbW62hX+MhHaBDeG4cePMQrt27bS2xZizNHyCgoKYyUVCQgI7funSJQBAZmamRpdV6rCE0OY/WHXN/kZFRWl9AHP9vn79ukHnUyaTITMzE0DBWgltabkFdwAwefJkQVypUqXYuGRlZem10x6hmYSEBADAli1b0KBBA4HtvZeXF1JSUlREuDqh3b9/f5w5cwZffPEFO6ZJaHPXg6mD8pccS3gdcXNzQ1JSEkt76NAhyOVy+Pj4CNZvXLt2DfPnz8fSpUuRmJiIjRs3YteuXUhOTsbRo0dx5swZXL58mZmxAQUvP2JnaceNG4e0tDQcPnzYYA1Qvnx5VvecOXPg4uKCFStWMPM/Lrx+/RrTpk3TORNtb28PT09PeHt7o1KlSggPD8e6devY/3zVqlWCdSXa+PvvvwEUmD7x7cbd3NyYmdLVq1ct/n8jTA8JbTPVaWtry3x8rl27ViVel9BWx/jx4wH8t4U29xnOHEHX8OxSUQAAIABJREFUeTbWLI0y/Blcvv0o3/yhX79+osuzhNCuWbMmy69pURVH48aNWdqFCxeqtIObWeL7Cvbx8UGrVq0QHh6OOnXqaH0YBgQEsPLV/Rf5hISEsLTKLjklkv99igeAhw8f6rXTHqHKkCFDABRsDlKzZk2kp6fj9evXmDx5MgICAtgXQeV9CNQJ7e+++w6AcAZSk9Du2bMn+vbtK5qlS5cCKNiBVp98vr6+gno5of3mzRs8f/5cI5wJQlZWltZ03JoBXYshbWxsWB+4FxEvLy+DXRzm5eWxe9/BgwdFi9HC0rx5c9aGmJgY5p0lLy8Pz549Y+YvXEhKStK6WLJLly5q+7d79269RFLt2rVZ3vnz56vEb9y4kcVrW/RNFA9IaJuhPrlcjvXr1wMomH1UtxGGLqFdq1YtDB06FL169UK3bt0wZMgQ5n3iv+yTc9CgQYiLi1OBW4V+4MABtfGG0KVLF53tMcYsDR+ZTMZmjG7evCmI8/PzYw+3o0ePii6zsEL75cuXePLkCZ48eYIbN25g27ZtGDx4sNatqNu2bcvyf/PNN1rr4s9SKQvhpk2bsrjo6Gh4e3tj69atbMaJC6mpqRg5cqTaRY7169dn6eLi4rS2xc3NTfCwVY4PCAhgM6xAwZeMLVu2oHLlyhb/bxRFKlSogOvXr6NmzZrw9fXFokWLmFjiXrBOnjypstmMNqE9ceJEdkyT0NYXbs1EWlpaocqxtNcR5d0xZ86ciaSkJCxduhTff/89xo0bh6FDh6J3797o3LkzWrVqhUaNGqFGjRooV64cvL294eTkBKlUit69e7Nx6tixo1muF74w/vfff5GXl4fJkyeze6+trS06duyIhw8fsnTjx48XVR4/ZGVl4Y8//sCnn34qql3cVxkAbMEuH/6XsuXLl5vlXBGWg4S2GerjXPopFApERkaqTaNLaPOFCj/k5uaKXgH9X6FKlSpQKBTIz89X68arKNGiRQs21nzBwLFv3z52bQUHB4sqs7BCW1N49OiRxkWO/MWcQ4cO1VqXr68vS7tt2zZBXK9evVjckCFDmFDJzc1FamqqysY16oQJ3+WWLhtUOzs7lvbPP/9Um6ZMmTJsppULeXl5WLNmTZG//iyBsj1t165dBS9S6enpmD59uuC5oU1of/fdd+yYWKHdtm1bJCUlafTJXFyEtjrs7OwQEhKicj8JCAhASEiIwJSnevXqaNGiBdulduHChUZfn6KNHj16CP53X331ldp0oaGh7N7w7NkzjTPuJUqUQMWKFREcHIzatWsjIiICc+fOxfPnz1kdP/zwg9Y2OTg44OXLlwA0m4bY2NiwnSrT09O1TlIQRR8S2maor27dunj16hWGDBmiMY2NjQ0iIiIwYMAAdOjQQWWb18DAQOzZswf79u3Dvn37sHv3bixfvhyNGjWy+EVkbSxatAjA/za9KMpw2wIrFAqUK1dOJZ5vz6xrdpbDEKHt5OSE2NhYREdHY+DAgejbty9Gjx6NdevWscVIQMGndHULFPv06cPS6BI5fFGvbM89YsQIFvfvv/8iKysLw4YNYzbqDg4OiIqKEmwUw3nu4QgPD2dxYhYjcTOpx48f15hGJpNh8ODBzA84F7KzszF9+nRRXgoIIT4+PliyZAkT2YmJiVizZg37/fr1a0yaNAmurq5qhfb06dMBCN0/ihXa3KLCu3fvqo0vzkK7atWqAFR3VeW8e/D/T1y7tD3bTEm3bt3Yf+2ff/7RuqMst6AUUD/LrA0PDw8cPXpU4z2FT2RkJEu3atUqNGjQQC07duxg6Xr37m2R80eYBxLaZuywmHRt2rTReoMndPPs2TMABQvdVqxYYTDqZpDNibu7O7OZ1GQa4uzszBZyPXr0SNRmDYYIbW14eHhg165drMxTp06ppOHPRA8ePFhredpmtJU3JNL0ifqTTz5haS5evCiI4281re0zskRSMLvKBU0z2srjMX78eDajxT8nlvKEU9QICQlBQkICu66fPHmCrl27sviaNWviwIED7NwuWbJErdDmvNeIWQypDPcVcufOnWrjOaGdmZmJ0NBQnSjbZnNYUmhHRERg/fr1KusKOI9GT58+FRy3RqHNPcsB4KefftKalv+Szr+exBIYGMhM9U6fPq0xHfeVUZ9w6NAhi5w/wjyQ0DZTndznOF1wD4yHDx+KSk+2oKrwZ1gLEwrji9sYDB06lLVlwYIFGmdGDh48yNJ16NBBZ7nGFtoSScFqfc7jCgCV67Jz584sTtdiSr6NtvLDc/To0SxOl/DltldXKBQCTwjNmjVjZcTGxmotw93dnaXVZ4MJNzc3xMXFCbZyP3XqlEn9/BYHtm7dys5XWloapk+frrLjJ0eHDh1w/PhxeHl5qRXaa9asASAUVWKF9o0bNwAAEyZMUBvPN4USEzSto+GE9saNG7Uuojx9+jSAghdubek2bdoEQJzQnj9/PoAC7yL849xi5Hv37gmOW6PQDg4OZud46dKlWtPyv6r16dPHoPo4t5OaTDwDAwNV1ouICQqFgszMijEktM1UZ8WKFfX+84kJOTk5Fr+IrI0ePXro5QlA00Nt2bJlFu0Ht1mKPkF5BlgdphDaEol2n9N8TyKzZ8/WWg5/R8YpU6YI4vi7wCnHKTNv3jyWlr+yv0qVKuy4Lk8y3Gd0QOhaUSx169YVzG7rms3/r9OzZ09cvXoVe/fuxZ07d3D+/HmNJCcns3zqhDb3Aspf8CdGaJctW5aNlya/0JzQzs/Px61bt3QyduxYteVYwr0fx6lTpwAU+NLmH+cWBJ49e1Zw3BqFNt+P9v79+7WmHTVqFDs/7du3N6g+znNYbm6u2nj+PXDChAmoVauWVvj3qOnTp1v8/0eYBhLaZqrT398ff/75p064TT2ysrJEpec/bAjjwO3eqWk2yxxwf0x9Q3Z2Nry8vLSWbSqhzTcP4X+ul0iE5iDatlaXSCTo27cvS6u8ex3fJIS/yE0dMTExLC1/zYOjoyP7BKzrky1/S3hDxQR/9pP+r9rhNjIaOHAggAI77JSUFBXy8/Px5MkTlk+d0OYWmwUGBrJjYoT2yJEj2XhVr15d65gay0b74sWLWje64Tzb3L17V9TGOPHx8ShXrhyaN2+udnOoEiVKICcnB4Dqzqhc//fs2SM4bo1CWyL5n+11VlaWVvOs3377DUDBy5EhOzI6ODgwUa9ukaPyrrZidA1/BjwlJcVsbhEJ80JC2woGgQ/ZaFsezhVUjx49LNaGBQsWsIf9119/rXNm5KeffmLpddl7mkpoR0dHs3LV2UA+ePAAQIHNrbadIZctW8bK8ff3F8R5eHgINo/Q1h7+NvU1atQQxHGfgN++fat1oeKMGTN0ii5dlCxZkpVx48YNi17bRQVOaGvyRf/8+XOtQpvz256eni641nQJbalUiqtXr7LxUhabHNa6GJJP165dART4JFcWcJznnfz8fJXN0ziTG2Uf9mKFdp06dfDZZ59p3XGVw8/PT1Q6bfA9M2n6QtWkSRN231C3i2utWrV0ev6YNWsWq2fq1Kkq8R9//DGL18fMjL/eoE2bNoU6F4R1QkLbCgaBDwlty8LfEOWDDz6wSBvs7OyYO6nXr1/DwcFBZx7+DPjly5e1pjWV0ObvNqfO3nD58uUsXpMbQFtbW+Yf/q+//lKbhrO9fvr0qdaHNCemMzIyVMR0XFwca4vyjB6HVCrFrVu3AAC3bt3S+nKgjXLlyrG6NPWJEKKv0K5SpQqGDBnCTIQSExPZOd+8eTN7xjRs2BBDhgxBSEiI2nI7derE8nGzl9HR0SrpjCW0J06ciISEBBUvU8oYIrTHjh0LoMA7j3LczJkzAQB///234LhcLsfjx48BAH379hXEiRXanD9tTQtJOVatWgWgYD2Sur0l9OH3339n47Z48WI2s21ra4tu3brh1atXAApmmpVfuiUSCSZMmID79+9jxIgRCAoKYselUilq1KiBtWvXsvLv3bsHNzc3lTK4vTIA/RZb8j1H/fLLLyb5PxGWhYS2FQwCHxLaloX7bJqVlSVK4KqjsLM0n332Gbvx6rPrJ7flLwDUq1dPYzpDhLa7u7vWeL6bLU1u8Pgbxfzxxx9qXXENHz6cpdHk8qpfv34sjbqZJeX28HeQ5KhcuTKb4Tp37pzaWW3+A5DvIo6jcePGGD16tM6xnjt3LisnPj7eotd3UUFfoc2nbdu2UCgUUCgUbOOqBw8eoGnTplrrdHBwYIsg9+/fz2yV8/LyVEwjjCW0xWKI0OZcg27ZskUljnsJVfYT/umnn7I+ly5dWhAnVmhzX7a0tdXV1RX8YMhus3y8vLxw7do1Vl5ubi5SUlLYTplAgVmdsikax4QJEwTtef36Ne7du4eMjAzB8bt376p1QMD3EJWeng5HR0fRbXdycmIvde/fv0epUqXMck0R5oOEtonr6tmzJ+bMmSMabtV9enq6XvnmzJljsRnYooC3tzd69OiB1q1bo0GDBggJCUFgYCC8vb3h5eWFatWqYeLEicxuUdfW3JowxiwNf3ZGny29+SL1xx9/1JjOEKH9+++/46effkKjRo0EnjMCAwMxb948ZvOcnZ2tVeRv27aN1b1hwwZ4e3tDIimYeRo8eDDbVOLkyZMa7RVtbGzYS4VCocCUKVPg4uICiaRALA0aNIjtJvjq1Su2mYYy/Bn2Xbt2MTMVuVyOnj17sof01atX1Ypp7lPx06dPER8fj7Zt2yIwMBCurq7w8/NDq1atmBcIoGCbbXW+0AlVDBXazZs3Z+Jo5cqV8PX1RXJyMoAC8Tht2jSN1xVnrqVQKNCwYUNIJELToR07dqBatWqQSMwvtFeuXAlA94I/DldXV/aSMXr0aEGcv78/+2oXHh7Ojnt6ejIbY2X/9RKJeKHNzZZPmzZNY/ukUilbjJmdna3W976+uLu7Y9myZQJPP1w4cuSI1udjtWrVsGPHDpUt27nw5MkTfP/99xo94AwZMoSlXb16td5t55v+qfuCQhRtSGibuC5uVsEcoV+/fha/oKyVUqVKsYeLrpCSkqJiGywGY8zSBAQEMNH66NEjvRbHeHl5MaGalpamcVbFEKG9f/9+licrKwsPHjxg/sq58O7dO53b1Ht6ego8LeTl5eHevXuCmafbt29rFMccFSpUEGyrnJ2djQcPHggelBkZGRpNVCQSCVxcXASeXfLz83H//n2Be8jHjx9r3HGTb5OpK2RkZJD9pR5wQvvly5e4e/euCnl5eQKhLZfLMXr0aHb9Hzt2jG3TLpPJMHXqVPYF49ChQ/Dz81NbH6DqUvK7775j/0kAOH/+PA4dOsSu+f79+6N37974/PPPERkZiR49eiAqKgq9e/dG//79MXjwYERHR2PMmDEG36M5G3SgQMiNHDkSI0aMUMu0adPYzLxCoVAxkxk0aBD773FfqoKCgthC/Hfv3qFKlSoqbVAntLmJhRkzZrBzzQnoXr16ae2Tk5MTOnbsiEqVKhn12nFzc0OLFi0QGRmJDh06CBbD6sLZ2Rl169ZFREQEIiMj0a5dO1SqVMlgszGCkEhIaJu8roYNGyIyMtIs0GyZdrjZGk3h+fPnWLFihcbNJXRhjFkabttowDAzg+3bt7P8mnYvM0Rojx8/XqN/8qysLGzcuFG0T3cPDw+sXr2afT3gQk5ODlavXq3TawqHn58fNm3aJBBBQIFg3rNnj6j2lChRAkuWLFGZBcvLy8Mvv/yiVfC7uLhg4sSJOH/+vMaXuBcvXmDJkiV6PeyJ/wnfx48f4/LlyyrwhXaLFi1w7tw5ds737NnDvnDwad++PdLS0gAUfIVo3rw5JJKCjVs4EX7r1i219rf169cX+Ks3NEyePNmg8+Ho6MjcjuoT5syZo1LW3r17ARS475PL5RgyZAhzQZmTk6PRvlid0P7yyy8BFPxfLly4wNY05Ofnk19ogvh/SGhbwSAQ5sPGxgZOTk5wc3ODh4cHPD094e3trfGToL6YapbGGrCxsUHNmjXRoUMHREZGokuXLmjQoIHO1fqaKFWqFFq3bo3IyEi0bNlSpx24Jry8vNC6dWt8/vnnaNu2rUEvSm5ubmjZsiUiIyPRunVrvXdxdHV1RYMGDRAREYFu3bqhTZs2CA4O1rolNKEZsaYj/MWLb9++xZgxY7Se8ypVquD27dsAgAEDBkAiKdjq/enTp0hLS1O7UI5PuXLlMHjwYCxatAibN2/G7t27kZycjMOHD+P48eM4ffo0/v77b4G/73PnzuHMmTM4ffq02plisdjb26NPnz748ccfsXHjRo38/PPPWLBgAVq0aKFShq2tLdtYau7cuZDL5UxAv3jxQuuiTHVC297eHr/++qvgRfP9+/ca/YYTxH8REtpWMAgEQRDE//Dx8UGTJk1UTDzUsW7dOiQmJqJMmTKiyvb29saYMWMExyIiItCoUSOL99scSKVS1KpVi5lEeXh4ICEhga2X0ISrqys8PT3Vrldwc3NDQEAA/P399VoISBD/BUhoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBPF/7J15fEzX///vJBEiEWskiD0qQYjta1d8Uok9SAhJUVVUWyKxawlKi6JEqdqlpcS+VG1FqS21lBa1i0RCiD37zOv3x/zO6Z2Ze2fuTCaTNH3fx+P5x9xz7jn33nPn3Nd9n/d5H4IoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAvgpqtUKnTu3Blubm4F/gAwatWqhWbNmtmsvnr16iEwMBBNmzYt8GsnCIIgCILID0hoF8BNnzdvHgBg4cKFBf4ACIKAKVOmICsrC0+fPkXdunVtUufSpUsBAPv27Svw6ycIonDTvn17m78njFGzZk3Ur18/X8p2c3ODl5cXSpcuXeDX6ezsjGHDhsHBwaHAz4Ug/q2Q0C6Am16zZk1kZ2cjIyMDVapUkcyzZcsW7N+/3yyaNGli0fm0bNkSWVlZAICbN2+ifPny+X4P8kNoN2vWDMOGDcs3KleuXCDPC0H8l2GGiS+++KLAz0UQBERHRyMrKwuJiYmy/XdeiImJAQCMHDmyQK/TxcUFDx8+BACMHz++wO87QfxbIaGdz3UVL14cfn5+Bhw4cAAAsHLlSoO0qlWr4unTpzB302/EcuXKoUKFCoqYPXs2AODevXvo0qWLomNKlixp8X3JD6HNriG/tk6dOtn0OSUIQsBbb72FnJwcvHnzBhUrVpTMs3HjRuzYscMs6tWrZ9H5BAQEQK1WAwDOnz8PZ2dnq16vnNCeOnWq2cYXhqVGgi+++AIA8Pr163z5qLAW9vb2aNu2rSI8PT0Vlenj44OIiAgsXrwYixYtwqhRoxQdW758eV6XOffd19eXH6dSqQr8nhLWg4R2Ptfl7e1ttqBbsWIFatasCS8vLx0uXboEAPjoo48M0ry8vAyE75MnT/IsLo1ts2bNMrje5cuX48SJEyZhlpK0tDRF+ZUIcia0nz9/jj179lidhg0bFvgfliCKMo6OjvD29jbg4MGDAICYmBiDNHd3d7x48cLs/qtdu3Y6dbu4uCjm66+/BqAdAWzTpo2iY4oVK2ZwvX5+fli+fDnCw8P5PjmhvXXrVov76jp16kAQtB8tn376qWK+/PJLaDQaAMDhw4cVHxcVFWXT58bZ2VnxvZg4caLRssqWLYu4uDh+3eItOzsbc+bMgb29vezxQUFBPP/YsWMVX8PRo0f5cXZ2dgX+XySsBwntfK6LCe2///4bkyZNMsrq1asBaIW2fjl2dnZ4+fIlAKB69eqK6mZCOzc3Fzk5OVaDdUBSQvu3335T3OGZsz158sTk9TKhff78eZs+T/mNj48PRowYgXnz5mHp0qWYPXs2goOD4eLioriMkiVLIjAwEFOmTEFMTAwWLFiAqKgos92N7Ozs4O/vj2nTpmHp0qX44osv0K9fP9nRjWLFinErTevWrc2+9urVq1tkHSL+nfj4+JjdNyxduhQ+Pj5o0KCBDteuXQOgNUzopzVo0MDgmU1PT7dqn6W/RUZGGlxv7969AQCrVq3i++SEtoeHh6SBJTExEQAwZcoUyXQvLy8u8nv27Jmv18i2p0+f2vS5sZbQdnFxwcWLF3ne+/fvY8uWLTh48CCys7P5/piYGNkySGgT+pDQzue6mND++eefIQjaobgVK1agVq1aBnk7d+4MQFpoe3l5AQBSUlIU182EtrVdHrZs2QLAuNDevHkzunfvLsvevXsBAPHx8UbzLViwAED+CO358+cjNTUVW7dutenzp5SAgADEx8fLvjDS0tIwYsQIo2W4urpi7ty5/CNNajt69Chq1qxp8nyaNWuGq1evSpaRkpKCHj16GByjUqlw8+ZNns/c/9yRI0f4sRShpujDhPaNGzdMWk03bNgAQCu09cuxt7fnwlnpB1phE9rJycm4du0ap02bNgbHlylThhs+fH19TV5jrVq1EBERoZhHjx7hwoULiIqKMus4W/uXi4X2vHnzZD84vLy8jOqNZcuW6ZQjngRav359/lEDAO3bt5csg4Q2oQ8J7Xyuq0qVKtixYwemT5+OSpUqISMjAy9fvkSFChUM8vr5+WHHjh348MMPUadOHZ2JeIsXLwYAXL16VXKinpS/YUEK7dmzZxstQ6mPdnh4OID8EdqsUz106JBNnz8lhIWF6bykr1+/jl27dmHnzp24ceOGTtq4ceNky2HuRoDWz/L48eOIi4vDL7/8glevXvG0Bw8eGBUkzZs3x+vXr3n+s2fPYtOmTTrWn5ycHHTp0sXg2OjoaJ7HnAltVapU4b6wf/75Z4G3CZH/MKG9f/9+CIKAmTNnIiYmBjVq1DDI27VrVwDSQpuV8+DBA8V1M6Hdpk0bODg4WI1Dhw4BMF9oswnzDH9/f4PjO3bsCED70W2OOFPy8TFx4kQAwOPHj/HWW28pKregIsOIhbYp1xA5atasiZycHP5OkPKTDggI4PXIvbtIaBP6kNC2Yb3Lly/nHdehQ4ckadWqFQRBwJAhQ2DO9tFHHxnUR0LbOIVZaI8ZMwYajQaxsbHw8fExSO/RowcXypmZmahWrZpkOQ8fPkRSUhKGDRtmMFReunRpbN68mT9Da9eulSyjRIkSuHXrFgDgzZs36Nq1q076kCFDuCBOSkoyqMfLy4tb3e7evat4ok9UVBQ/twkTJhR4mxD5T9WqVbF//37MmjULHh4eRg0TzZs3x/79+/mclfDwcM7ChQsBAJcuXdLZz6hdu7ZBeUxot2jRwqrXxPzL8+o6IgjaD4igoCDOihUrAABXrlzR2c/Q/68KgoAWLVogLS0NsbGxKFGihOQ5h4SEQK1WIyUlBQ0aNNC550OHDpUUgl26dMGLFy/w7rvv2vy5sYbQFhsEjAmi69evA9AaFqS0CwltQh8S2jaq08/PD7m5ucjKykJqaqoBTIh069YNgvCP0P7tt9+MDp+yP6eU0K5WrRpq1Kgh25laCvMVLFeunEEaCW3rMHToUHTv3t1ontGjR5vs0FetWmX0/+Xo6Ig7d+4A0Fq8HR0dDfJ89NFHvJ7Ro0cbvZcAJN1ZTp06xdPZx6Qpzp8/D0A7x4D8s/97sGcqJSVFdoIycyf64IMPYM42dOhQg/r+DUJ71qxZZl2nVL/ZqVMnPHv2DABw6tQpg4XTQkJCkJ2djQcPHsDb25vvr1+/PlJTUwEAixcv5vtVKhXGjRuH3NxcANr+VEnc7cqVKyMyMlLSUm8u1hDa586dA6CdTG9ssiMLNwn8874Wk19C28vLS3KisBS2Wg+DUAYJbRvUV7x4cfzxxx8AgH79+smez8OHD3moKCa0Fy1aZLTszz//HICh0P7hhx/yJfKGPvqiiQntefPmGZ2BzywxP//8s9F8w4YNk31h6FOUhLYSq6+npyfvmL/77juLy2EvdgDw8vIySL9w4QIA4NmzZ3BycpIso27duryMX375xSB91KhRPF38kpZDHK2HzW8g/jsww0R2djaePXtmADNMBAYGQhD+EdonT55EdHS0LKx/khLatWvXhpeXl9UNEzVq1ECDBg0krfKWCu3ff/8d8+fPl2XVqlVG+80GDRogKSkJgDZySu3ateHk5ITo6Gio1Wq8fv0aH3zwAbp3746+ffsiPDwcycnJAIDdu3fzfsDd3Z3Pt2HvK6noKvrY29vzUTKNRpNnsZ1Xoe3o6MgnOx48eNBo3pCQEF6XVHSV/BLa7P4r2V69emXz/ywhDwltG9THBN2WLVvQpk0bHUuwm5sbHjx4AAAIDQ3l+6WE9uDBg3Hx4kV88sknfJ+c0H78+LHiP2Vett69e+vU+2+KOpIfQtuaVhpTODk58fuzbt06i8uZPn06L0ff179SpUpc1Hz//fdGy2FDqhkZGQYv2/Lly/NFkZKTk41ajARB65vLtoEDB+b7vSQKD8WLF+dzC/r372+QziaNi92UmNCeO3eu0bLnz58PwFBof//994iLi8t32GqS5cuXR3BwMLeOHj58GMHBwQgODsZPP/0EQPvxzPYFBwejefPmXGibWlWYTZ431m/WqlULd+7cQXZ2Nnr27Ik6depwq7TctmbNGh1rNes70tLS0LdvX8Vt7OzsrFOX+J3OzKC3AAAgAElEQVRmCXkV2uIP+2XLlhnN26xZM553+fLlBumFQWh/+OGHBf4/Jv6BhHY+1/Xhhx8CAG7duoWGDRvixYsXePHiBWbPno3q1atzYaovYqSE9rhx4wDoumXICe3g4GBJ30Q5vvrqKwBa64I5x1WtWlWnXnY9r1+/RnJysixv3rwBoPUvNpbv+fPnAJQJ7Tlz5gAAzp07B0EQUKdOHcTHx8vCPkZevHhhNF98fDz+7//+z2T91rbSmKJ27dq8Y50xY4bF5Xz33XcAALVajVKlSumk9erVi9fx8ccfGy0nNjaW52Vxe8Xs3LmTp//vf/+TLUccqeTFixd5WhiJ+PfBLLpxcXFo2bIlXF1deZrYMCEW4VJCe+jQoYiPj8cHH3zA98kJbfHE4PzcOnToAEEQ0KpVK7OP3bBhg6TQ7ty5M+Li4nREnRKhLQja8JliP+7IyEiMHz8egwYNQo8ePTB9+nSo1WpoNBpMmzbNYIRMpVIhIiLCosVsJk2ahLS0NBw/fjzPGkAstNPS0vj74/r169i2bRs+/PBDo+FQO3XqxI//9NNPjdZVpUoVnnfz5s0G6WKhHRUVpXjS7LFjx/hxUkLb1dUVZcqUkeSdd97hHy7btm0r8P8woQsJ7Xyuq2bNmvjzzz/h6+uLihUr4uuvv+a+gMxSeObMGQMxISW02eQw8SREOaFtLt27d+fnlJdyCtJHe+7cuQC0fu2CIKBRo0Zmv8zkNiWi2dpWGlOMGTOG1yUXasoU9vb2SEhIAACcPn3aIH3ChAm8DjZML4fYCi0VfaRv3748feXKlbLltGjRgudbvXp1vt5DonAxYsQIAMCdO3fQqFEjvHjxAs+ePcP06dPh6enJ+5cNGzboHCcltKdMmQIAmDZtGt8nJ7TDwsIwZMgQxbCJ7enp6WYd5+HhAUHQClxjrh9ShIaGSgrtkSNHAgB+/PFHvk+p0GZ06dLFwK1xyJAhyMzMxJs3bxAaGgoHBweMGzdOx2Ls5eWF2bNn63wMFQRK4mgnJyfjnXfekTy+T58+PF9ERITRusqWLcvz7tmzxyBdLLQt3cyZDOnq6soNPPfv35ecO0UULCS0bVCf/sSQvn378igNgNb6O2/ePB0fPmMW7ejoaL5PqdDu2LEjDh06hG+++UZyokpRENos0sDx48chCNrFB/z9/WXZvXs3AK2ribF8/v7+ijsva1ppjOHs7Iz79+8DAC5fvmzxLHXmAw8AgwYNMkhnYSUBmFwZk43eAEBYWJhBeokSJfgkrLS0NMmJl/p1WvoBQfw7qVWrFq5evYpGjRrBw8MDS5cuRUZGBu+bAO0EPv25AsaE9meffcb3yQltcxkwYAAA7cQ5a9+DDRs2ID4+XjKknrWFdr169bBv3z7+nyxXrhyKFy/O++c7d+7Az88PgiCgYsWKSE5OhkajwYABAyAIAo4fPw5AK2KHDBlSYEuHOzo6Yvbs2YiIiMCwYcMwZMgQREZGYt26dXxUFNCOoEpNxmbtCZh2uxCLeil/blsL7U2bNgHQRkGRirVOFDwktG1Yb6VKlbBixQousletWoXVq1dzK+jLly8xY8YMlC5dWlJoT5s2DYCuD5pSof3JJ58AAB49eiSZXhSENptguXfvXkXnWpgnQ5qCuXvkxUWlVq1aXPiePXtW0m963bp1vPOXWmRJzODBg3le8XC91HkDkFzgxsHBASkpKQC0L/mCenETBYe+fz8LNce2Fy9e4PPPP9d5bxgT2lOmTOH7lArtLl26YP/+/ViwYIFkujWFNpsAz2CLVDGBK8ZaQrtq1ar47rvveNzo+/fvo1+/fvD19eWx8bdu3crvcbFixVCuXDn+XsrIyEDTpk1RqVIlbNy4kbfNyZMndcIBFgbKlCmDbdu28XO8ePGiQZ7+/fvzdLnISgyxRVvqXSMW2jt27FC8yI94YS+lQlscbceUywtRcJDQtkF9DRo0wPLly7lfcnJyMoKDg3m6r68v9u/fz/8wK1eulBTa7CUh9pVVKrS//fZbANpJN1LpYqHt5+dnEn3fbEZBCm32Zf/DDz8oahcpoV2tWjXs2rVLcoGMwoI43J6cEDBF6dKlcfnyZS4WpHyqxfcUgEk/TLFVSM6fu127djyPVDuxiW5A3vzOiX8/7u7uWLZsGRfZa9euxfr16/nvZ8+eYdq0aXB1dZUU2qxvFLsCKBXabLGWu3fvSqZbS2jb2dkhMTERp0+fhqenJwRBWmgz10Ipoc1GkjZt2sT3yQltT09PLFmyhI8SpKenY8aMGShZsiQmTpzIjT65ubm4ffs2Hj9+zPPqb3fu3EGZMmUgCNrFg5jvfFZWFj799FNFIf5shaOjI65du8bPXX8VzW7duvG0SZMmGS1L7KMdGxtrkG6rONr169fnmuLo0aMmJ5gTBQcJ7Xyua/v27fzP8/z5c8yaNctgwhkjICAAv/76K9zd3SWFNrMGhoeH831KhTazUsyZM0cynQltpZtcODkmtPfu3Su5giXjl19+AaB1ezCWz1SYKjGszCVLlihqGymhvWPHDgDA7du3LZrgk9/079+fvwz3799v0cusZMmSfOJNdna2pD8144cffuBtburjQ2zRHj58uGQelUqFu3fvAtCGoNKfm8As6BqNRjLUIFH08fb2RkxMDJ+gmJycjJCQEJ7eqFEjHD58mD9ry5YtkxTazAVJyWRIfdjE3l27dkmmM6H96tUrNGjQwCTMN1sftrJjUlISF1f6Qvvtt99GcnIyhg4dKim0IyMjAehGHpIT2uIP2bi4OFSvXp2nMbGZlZWFpKQkXLlyBSdOnMDevXsRGxuLJUuWYNasWRg3bhwPVytelbNcuXJ8MbOEhASULl26wJ8lMZMmTeLXru8m17x5c55mKvyoOK/USre2ENpOTk64cuUKACA1NbVQvquIfyChnc91DRw4EH/99Rf27t2LO3fu4OLFi7IcPXqUHycltJnVW+wqoERou7u7cyuQ3GQQsdC+du2aSWbOnClZTkGG92MTQsaPH6+obaSEdoUKFXg5V69elYx9W1D07duXD/WePn3a6Cx6OZycnPiS0Gq1WtIvWwxzxwEMQ//pI/bRFgsjfdgzC+hGjnBycsKLFy8AACdOnCjw+03Ynq1bt/Jn4/nz5/j8889lJ9p1794dv/32G9zc3CSF9vr16w2eRaVCm4WqnDp1qmS6ePRGySYXdnDDhg0AgPnz5/N9YqFtb2/PF27at2+fpND+8ssvAei+K4y5jsycORMdO3Y02O/g4IBatWrpiDw3NzecOHECJ06c0DEQNWzYEEuWLJFsm5EjRxqNKlRQiN1D9N+XYncQU6Os4raXWgXTFkJb7DpoamEzouAhoZ3PdTEf0/fffx+AdrjzwYMHBuTm5iI1NZUfJyW02dCceJhfidBmM/kBoGXLlpJ5rO2jfe3aNaPxZJmYTU5OVhR/du3atXBzc0O3bt0krbguLi78Y0JpPFc5H+169erxCTTnzp2zSNBam+DgYL6gwu+//86HbM2hZMmS3BKo0Whkrc5ixCvRmXp5ivMaW/1RvLjNjh07+P5+/frx/XI+3kTRJiwsDH/99Rd+/vlnk4YJ8f9WSmgfOXIEANCuXTu+T4nQrl69On8O5SaXMbGlVqtx8+ZNk0yYMMGgjHLlyiE9PR0ajUZnBUax0GYuLBkZGahTp46k0N61a5eBqDM36ogc4kWxbP2etjbi96B4VJjB3klPnz41KnSXLFnCy5Gat5LfQlv8wfD1118X+H0lTENC20Z1MqE9ZswYyfTk5GSjQrtevXoAtD514j+hEqH9+++/8z/m8ePHJf/EhXUypJi2bdsCAB4/fmxgafb39zfa+UlhbDJkYGAgd9EQR3kpCEJCQrgl++zZsxb9X0qWLMmFh1qtVixkxe4gUkuri2H+3BqNxuSHAFvuODMzk+dlcbYzMjIs+pAg/v0wwwSLhiNnmFCr1UhOTubHSQltNqm2WrVqfJ8SoT127Fj+zOv78jKs4aM9depUAMCRI0d09jOhPWrUKL7IE5vzoC+0HRwc8OTJEwBAp06deBlyQrtt27YIDQ1VjHhF1/fff9+sY2vWrFngz5MYcRx/Hx8fg3TxCrkBAQGSZdjb2yMxMREA8Mcff0jmyU+hXbt2bW4EunDhAooXL17g95UwDQltG9VprtCuW7cuRo4cidatW0MQBKxcuZL/CXft2gU3NzcIgtZfbOTIkXzFMX0CAwP5cWxYXmo41FpCe8yYMVi0aBG6detmNJ8lQvvdd98FoPWL1O+IWCeZkJCgOFKFqagjUVFRmDt3boFOMgkNDeUi++TJkxbFq3V2duadeG5urkl3ETH169fnz8/69euN5r137x4Ard+9qXJHjx7Ny33vvfdQtmxZLirE0ROI/yZMaMvFok9NTTUqtP38/HifJ+4PTAltlUqFv/76iz+bcv1TXoV2qVKl+IJZ+tF3mNBm/4dt27bxa9AX2j179uR5xSEP5YQ2m4Nii23w4MEm70PlypWtIhZNfZgHBQXx8JC///67ZB4/Pz+e5+TJk5L9PovwAsiPuuWX0HZ0dOQGilevXkmGfyQKJyS0bVSnuUJbjL+/P3eLSE1NBWA8+D7D0dGRR5b47bffEBgYCI1GA41Gg6ioKJ281hLaSrFEaC9YsIBfi3i/i4sLD1MXExOjuLzCHt5vwIABXGQfPXrUIhcWFxcXPvExJyeHx79Vikql4gI6LS3NIH4xQ7zIjJz/vpiKFStyV5iffvqJ/z8A6KxUR/w3MVdo+/j4YOTIkdw1bu3atfx52rx5M3/HtGrVCiNHjtRx1RDTu3dvfhwzTEiFe8ur0G7RogWSkpLwxx9/GBgG2rdvj6dPn3JRKA7/N378eDx48ADTpk1D8eLF+YQ4QDuvgVnga9SogQcPHuDKlSs6ZX/wwQeIiYlRjPg+rly50qxjW7RoYfQerFmzBgCQmJioMynTEnbu3Ik1a9agTZs2OuEhPT098eWXX/J+1FSsaXGows2bN/NJrA4ODhg6dCgyMzN5u8hNRM8voc1Wbwa0E/5btmwpS7NmzQr8P0z8AwltG9VpqdBu164dHyr6/vvv4ebmhp9++gmA1gVg7ty5BnFnGWxJcuCfYUUWWxbQilw2s93WQvuLL74AAFy6dElRfkdHRx5nVD+knfiaGjVqBJVKpWjJW3HIQyX5lcY2tYaVJiwsjLuuHDhwwKJlyF1cXPDrr78C0EYXUeq7rg+L3w4AkydPNkhXqVR80YusrCzZ0I/67N27FwDw5s0bbmlLTk4uVGHBiILBXKEtpkuXLtygwNwqEhIS8Pbbbxuts0SJEnwS5MGDB/lKprm5uRg5cqROXmu4jpQoUcLAKtm2bVtuTLly5QofudTHwcGBi8L09HQ8evSIC8m5c+fKfhCbS375aLu6ukK8mSNIpdizZw8vKzMzEwkJCXzEgG0ZGRkIDQ01Wk7ZsmV1Pl7UajXu37/PI+AA2pjjxj4M8kNod+3alVvblWx59c0nrAsJbRvVyYT206dPcffuXQP0J0Pa2dkhIiKCf0GfPXuWWzRVKhU+/fRTLsROnTplIG7CwsL4H3Pjxo06aWPHjuXWREDboYsnyQ0dOhSDBg3CgAEDEBoaioEDByI8PByDBg3C0KFDMXz4cHz88ccYN26cyVW05BB3RnFxcYiMjJQN5D9t2jQenhAAOnTowMvx8PDg1mxmmW7SpIniDsmcTcnkQWtYaXr16sXbNi0tDR9//LHREIjDhg2TXLWRiWwA2L59u8ky3n//fcnzcXV1xcOHDwFoRceYMWP4h0S5cuV03JqUWLMZ4kk9LFbvV199ZdO+gCicWCq0O3bsiJcvXwIAVq9eDQ8PDx5lJzc3FzNnzpR1Bfv6668BaPtANplXbKzYsWMHd9Gz9sqQdnZ2mDJlCre8njt3DhUrVpTM6+7ujgMHDvDzGjZsGMqXL88jmADArVu3dPpJS8kvoa1SqXDmzBkA2o/zvFpgo6Ki+HtAf8vMzERcXJzJqEmMMmXKYNWqVdx1h205OTmIjY2Fu7u70ePzQ2gfPHhQ8trkNhLahQsS2jaqkwnthw8f4sqVKwbk5ORwod2pUyfupwdoLZpSvrmdO3dGWloaAK1LCZvA0aVLFy7U7t+/Lxmizs/PD/v27TPrK1lqs3TWs729vdmdB6C16ovLYS4oGo2Gd9YFJbStZaURv9yVblJRDdgzYM4md07t27fniyMAWiv0/fv3uTAAtGLeHGu0OJwf20wt8078N2BC25hhQiy07e3tERUVxcXRyZMn+SiQnZ0dZsyYwd3vjh07hsqVK0vWB2hX7BWnTZkyRee/dPHiRe6OlZ6ebtIwMWLECIwePRrjxo3De++9Z3CtrVq10pmwvnnzZskRrOLFiyMiIoJb6dVqNSIjI3Xy9OjRA8nJyQC0feKyZcvyFDUpP6OOlCxZEj179pRdLMtcHBwc0LBhQ3Tv3h2hoaEICQlB69atLb7+cuXKISAgAKGhoXjnnXf+9VFXiIKDhLaN6lTqOtKjRw/esWVmZmLKlClGJ+O99dZb+PvvvwH8E3mkfPnySExMxKtXr9C8eXOj51WtWjUMGzYMixcvxubNm7F3714cOnQIx48fx2+//YazZ8/iwoULOmG1zp8/j/j4eJw9e9akH54xihUrhrCwMCxbtgybNm0yytKlS9GrVy8Df8aqVavi3r17WLFiBd+n1HXEXEy5jljLSlMYhbYgaD/OTp06ZXDMkydPMGnSJIsmjbIRAEB6aWTivwkTvnKGCbHQ9vf357GmAa1LnNSiYN26deNueCkpKTyWdFBQEBfhN2/elFxopUWLFjxqT1626dOn8zKbNm2KrVu3cmPHmzdvDFxUBEEby3rKlClISEjg5aSkpMjOZahQoYLOpMfbt29b/K4rSuH9CKKgIKFtozrd3d3Rtm1bA0sKg4k5QdBO5ImNjVW8DHj58uUNhFa3bt10wj0VZby9vQvNKmTWttIURmrXro1evXqhX79+aNmypewcAYKwFKWuI+LJi2/evMG4ceOMfhD7+Pjg9u3bAMBdpdzd3ZGSkoLnz5+bHFGpWbMmRowYgaVLl1pkmGBh5dzd3fmERwDYvXu3bH8/Y8YMni8jIwOLFi1S9M4cNWoUMjMz8ejRI1k3FFOQ0CaIvENCuxA0AkEQBPEPFStWRMuWLWWXLhcTGxuLtWvXKp6EW7FiRYwbN05nX1BQEA+lais6d+6MkydPmjSI2NnZYf369Zg+fbpJ/2B9mjZtajI6lTHc3Nxw9OhRHD16VHKUgCAI05DQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNYIxixYrhvffeg729vcVl1KhRA4GBgWjcuLHiY1xdXdGqVas8nbuDgwOmTJmC6OhoNGnSxGT+8uXLIzo6GtHR0ahatarF9TZs2BCenp4F3nYEQRAEQfy3IaFdCBpBDnt7e5w6dQoAMHv2bIvLiYiIAADs3LlTUf4mTZogKSkJr1+/RsOGDS2ud/jw4QCA58+fo1y5cibzT5s2DQDw999/W/xhoVKpcOnSJWRnZyMqKqrA25AgCMsJCAhAYGAgHBwcZPOUK1cOgYGB6Nixo9Xrb9Sokc3eT61bt0aHDh2gUqkK/L4z2rZti1KlSunss7OzQ1hYGEqUKFHg50cQ/wZIaNugviFDhuDEiROyGBPRY8aMAQCo1Wq88847FtVvrtB2dnbG9evXAQA3b96Eq6urRQ388OFDAMC4ceNM5i9fvjzS0tIAAAMGDLD4XgcHBwMAsrKyULduXQiCgAoVKmD//v1mM3XqVJs+lwRB6JKbmwsARvugTp06AQASExOtWnepUqVw7949PHr0CF27ds33a338+DEA5Gn00pqULVsW6enpSE9P13lX7t+/HwCwcuXKAj9HUzg5OcHf3x9RUVFYuHAhli5dirlz52LcuHEICgpCxYoVjR7v7u6Otm3bStKsWTNUqlTJKufp4+ODtm3b8pHfEiVKyNZrDF9fX6P11K5dG0OHDsXcuXOxdOlSzJkzBwMHDlRkCCtdurTi8yhdurTia1epVGjYsCEmTZqE+vXrW63tS5QogcDAQEyaNAmLFi3C119/jQkTJqB169Y2/5gloW2D+j777DMY27Zv3y57rEqlwoEDBwAASUlJqFChgtn1myu0BUFA06ZNkZ2dDQBYsmSJ2XX++OOP/PrS0tKQmpoqS9WqVbF8+XKe/8mTJ0bz3759W7JOe3t7/PnnnwCAOXPm8P2enp5G77/cFhcXZ9PnkiAIXZQIbdaf37t3z6p1r127FgCQnp6uIwBCQkIQHh5uNl5eXkbrMya0Y2NjLWLMmDE65ahUKkybNg2DBg0yef2RkZEAgKtXr+oIk7CwMN5HhoWFFfgzIkWNGjWwYsUKvHz50mgfr9FocPr0adSuXVuynCFDhph8T9y8eRNjx45FsWLFLDrX4sWL87aPiYmBIAioVauWeS+r/78dPHhQso7WrVvj+PHjsse9evUKkyZNgp2dnex5sg9aJVuHDh2MXnOpUqXQp08frFy5Eg8ePODHderUySrtP378eDx79kz2/C5evJin0XpzIaFtg/qY0N6wYQO8vLw4hw8fBqAVshUqVJClSZMmSE9Px8GDB+Hj4yObT+5lZExoOzs765yTmNWrV2Pfvn3w8/OTTC9fvrxkfaNGjQKgfUGdPn3aJEOGDIFGo4FarVaU/9ChQ5L1jhw5EgBw69YtODk58f3FixfnX9riey73NX7nzh0AwNdff23T55IgCF2UCO0uXboAgOwHuCW8//77/KU8atQonbQnT54oFhzibdiwYUbrNCa0NRqNRXVu27ZNp5wJEyYA0I74tW/fXvZcHBwccO/ePQDA0KFDDdK3bNkCQGsU0XctKWhGjhyJN2/e6NyH3NxcJCYmIiEhAa9evTK4T3LCUInQZtvRo0dRsmRJs883PDwcgFb016tXD4JgudD+6KOPDMp/5513oFareZ47d+5gz5492L59OzdMsW3+/Pmy52kNod2oUSMcOXIEWVlZksdZS2jHx8cD0LqtHjx4EJs2bcLhw4d12j4tLQ3e3t42eSZJaNugPia02deqIGitr8xVQmwhyMu2Y8cOCIK2k6xRowZn5syZALRfu+L9jo6OCAoKsrg+qT9l7969+ctx4MCBEAQBb7/9Ng4fPozKlSsb5K9VqxZ/wbDymjRpgl9++QV16tRRfI9Lly6NR48eAdB9mLt27arzImAviMjISMlyOnbsCADIzs5GjRo1bPpcEgShixKh3aNHDwDA9evXrVJnx44duRDYuHGjwTDzhg0bsGPHDrMx5fpnTGgHBgZahP4E+NKlS+Pvv/8GAKSmpqJmzZqS58IEZkJCAhwdHQ3S3dzccOTIETRt2rTAnxExX375pc47auvWrejcubOO4UUQtC4hPXr0wLp165CRkaFIaE+ePBne3t7w9vaGr68vAgMDsXTpUh3ROHfuXLPPmc3DOnr0KN9XrFgxWQOYPrt27QKgtUpL/U9CQ0MBaA1tUu319ttvIzU1FYDWRVVOF4mFdlhYmNFz0r/fjP79++u0z+3bt7koBqwntHfu3ImwsDCDUQZXV1ed0fZ9+/bZ5LkkoW2D+qSEdosWLQAAT58+haenJ169eqUDs2Ckp6cbpMmxadMmCIIALy8vKNmaNGnChXZWVhauXbumCPaBoC+0Bw4cyDud6dOn8/0xMTEAtC9CfZ+4999/H2/evEF8fDyfXDN16lQA2k6+WrVqiu7xt99+CwBYu3Yt39esWTOo1WqkpaVx0b5161YAwNixYyXLOXjwoEE5BEEUDEqEdp8+fQAAV65cyXN9bdq04Vav06dP23TCn618tOvXr4/Xr18DAC5fvgwXFxeddEdHR9y+fRuAdnQwLi7OIoy5IeQHbD4TADx79kyxX72np6ekEUgQdIX2e++9J5knJCSE53n8+LFZ5+zn58eP7devn9nXXK5cOW69//bbbyXzBAUF4d133zVaTr9+/fh5yH0siIW2pROPg4KCcODAAURERHBrsrjdrCW0jbnxODk58f9aTk6ORaMQ5kJC2wb1SQnt6dOnAwBiY2Mlj0lPTwcABAQEmF2fp6enjqsFGwJMS0vT2V+3bl0utK9evaq4/G+++cZAaLu6uvLJj/p/VAcHB+5nLv6CXLhwIeLj4zF9+nSdcH4qlQobN24EABw/ftzk+XTq1AkajQZJSUn8WbKzs8Pp06cBAJs3b+Z5t23bBgAGvouCIKB58+YAtMOMb731lk2fSYIgBKxevRo5OTkcton3BQQEYP369fw3E+MajUYnnz6mXDfatGmDFy9eAABu3LgBd3f3fLvOSpUq4e7duzqw6xDv27JlS77ULx5F3b59u47VfvTo0bDGZstJnd7e3tzI8+bNGzRv3twq5SoR2iqVigs3AAYfLsb47rvvAAAPHz60yMd73LhxvF4/Pz/Z8zNVTokSJfjzt2vXLsk81hDaUuSH0DbFzp07eZ1KjXl5gYS2DeqTEtpnztRSAc4AACAASURBVJwBAAQHB0sewyYiWhppRIwxH21rCW1BEODr6ysbUq9cuXKIjY3VmaW9e/duANJRSZydnbF+/XqTf4JKlSohKSkJGo0GgYGBfD/zsXz06JHOBFL2B5OaMc9E+MaNG236PBIEoUWp0P7++++VaD2dTcrXmBEeHo7MzEwAWisui8NfrFgxzJgxw+rvKE9PT4MPAalrFbsTWBs2CggAw4cPhyAIqFixIh+xfPz4MVq2bCnJ3bt3AQATJkyQzWNK4FWuXBmRkZHw9/fP87XExcXxa/nkk0+sdo+UCG1BEPgIQFZWltFQlGLKlCnDRxZmzpxp9rnZ2dnxen/77bc8Xyv7yPz5558l04uS0GYupBqNxqKoauZCQtsG9ekLbTc3N6jVamRkZMh+/TLXEVOzd5VgK6EtCNrZxGvXrlVEQkICACA+Pl7xMfrhixYuXAgAyMzMxJUrV3D79m0kJyfzD5U+ffro5GeW9ZSUFB2LS926dfmEkaCgIB1/Mzc3N5s+nwRBaJFzHSldujQ8PDzg4eHBo2OcOXOG7xPDfEAHDx4sWUejRo14f3vjxg2dj3vWdyYlJZllqTSXDh06cLFhK0twiRIl8Mcff2D79u18+JyNJALGwyVevXoVANCrVy+L6ra3t8etW7e42MmL2Pbw8ODPyf379xULXSUoEdru7u68frmoH1KwkYOcnByLFljr3r07P7fw8PA8Xaerqyt//61YsUIyT1ER2iqVij97ly5dyvf6BIGEtk3q0xfagwcPBiDviG9vb88fvNatW+e5flsK7QoVKiA/N7HVWhC0ky81Gg0ePXqEy5cv48iRI9xnbcOGDQbn99tvv/GyxKMFPXv2lK3TnPCG1rTSEMR/HSU+2h9//DEAyEYjYpPNjImRX3/9FadOndIZ/apSpQq38i1evDhfr3P27Nm8v7Gly4WHhwf3pe7du7dOv5efQtvZ2Zm3LZA3K/TQoUN5OZZMRjSGKaHt4OCAzZs3A9C6HLZp00ZRuSqVCteuXQNgPLyvMVg889TUVBQvXjxP1ymepBgSEiKZp6gIbRZ5B8jbmh3mQELbBvXpC202IW/ixImS+UuVKsUfhIsXLxpd7ObEiRMmrd62FNr29vZ8ZrYpfvnlFwDAvHnzFB/j7OxsUJ/Yt429LB48eIAyZcoYnN+lS5f4vRUL8bfffhsXL17UgU2KUvqStaaVhiAIZUJ70qRJAP6JuqQP+7g29lL19vY2iJSwZ88eANpJ2aVKlULJkiWNhmFVglxI1N9//533S2Kh7e3tjeXLl+cJJWFKPT09edhC1kfmp9AWBAETJ05EWloajh07licNsGzZMn7vevbsadXnTyy0p02bBj8/P/j5+aF169b44IMPcOHCBQDaEVVTEw7FiEWrJe+JOnXqcAt0Xj8uHBwcdNpcTrSLzzk5ORkpKSlITEzE5cuXsX79eoSEhFj0kZhfQtvJyQkVKlRApUqVUK9ePfTv3x979+4FoI2uMm3aNKs+K8YgoW2D+vSFdlRUFO/ApVZQqlatGszZTHUu+kLb1dUV/fr1Q8WKFa0utM3BmI+2JXh4eODRo0dQq9WyX9wsRrZGo0F6errss1e8eHHuq8jCFJrCmlYagiCUCW0W0m3dunWS6Uxo9+/fX3G9H3zwAe8n2IR0sdXZ0u3Zs2cGdfn4+OjkKV68OF/cxt/fP891vnz50ui1iieO3717l4dLzMjIkA1VyBaCOXPmjGS6rd6tgiBw8QTA6MqI1atX50JZH7nFhJTE0b58+TKaNWtm1jkzY9v169ctWqVw0aJFALSCUW6xHaXMmjWLX4uxBYiUxNG+dOmS2YEE8ktosw9w8Zabm4uNGzfaPCwlCW0b1KcvtFUqFU6cOAEA+OqrrwzyN2rUiD8Y3bt3h7+/vyRMCJqKTMKE9s2bN3WCxTdu3JhbgO/cuYNmzZopgk080Rfa4hjdSjh06BAA7SqO5h6r/9VtZ2fHQ/N9+eWXsveCRXP5+eefAQCTJk2SzMfuy8uXL80K/2MtKw1BEPJCu127dmjRogUEQTuBUq4vFQQBJ0+eBCA/8Vyfhg0b8n5C7DY2a9Yso1FNGMzfW61WG6Q9efLEoL4vv/wSGo2Gzyv58ccf8fz5c3Ts2BEVK1ZEcHCwLMyyvGDBAtk8SqzOn3zyCXJzc9GuXTu+lkBeNmtM4lfKsWPHeL3GROcPP/wge75y0a2ULliTlZWF9evXy45YiKlSpQqf/BoREWH29To7O/NVD3/66ac83buQkBBuGf/++++N5q1Tpw6io6Px0UcfYciQIXj//fcxefJk7Nq1iz+7gNYqbmppezG2FNqANirN7t27Fbv5WAMS2jaoTyrqyP/93/8B0MbJ1p9sx74cMzIyjJbLhvqkrLdly5ZFnz59sHz5ciQmJho8bH///Tfq1KnDg9lbsomFtoODg8XlWLKxlyxj3rx5AIBTp04ZhEliFoOyZcvy41lIwIcPH0oG1z9y5AgA4LvvvrPps0kQxD8wod25c2dMnz4dp0+fRpMmTfDo0SPk5uZiwYIFXGjJjYwxo4b+xGgpKlSowCM5ANq1Bsw9519//RUAsGzZMpN5nZ2d8fjxY5w9e5aHiGOW88zMTPTo0UNRXZbEYBYErdCqVasWBEFAy5YtIQj/LNr1+PFjWWMLGxmMioqSTLdFJAcGM7AAxi3aeRXaM2bM4NfXokULBAQEYMKECTqrK167dk3SZVFMdHQ0AK3gs0T7jBgxgteXF1cZf39/Hmnn7NmzeYon7eXlhT/++IOfl1xMbynyS2iXLVsWXl5eqFu3Lpo2bYqQkBCsWLGCz+FSq9WSYX7zAxLaNqhPSmgLwj9DmvqNzf7c9+7dM1ouG75r1aqVzv4PP/xQx4WBbffv30dERITO0A6bSPLixQuTvuAMFi9bLLTt7e2xf/9+myFeOpXFEtVoNJgzZw5mzpyJNWvW4MiRI7hz5w6PC8pGClgIJhaBZPz48Tr3r2nTprw8tiQuQRC2wd7eHk2aNEFkZKTk0uMdO3bkE9DEm5zYPH78OADTQrtkyZI6k6WB/BfaY8eOBaBdQEu8YM2CBQsAaA0xbDEVKRcDOaGtxB3Bzs4ODx8+hFqt1nEZYEI7v320rYU41KOxRWrq1q2Ltm3bctq3b8+PUyK05aKOODg4IDY2lufTf8+LKVasGJKSkgAAq1atsuh6L1++DECrDyydONuxY0cuOK9evWqVyFrVqlXjZb548UJx9BdbT4asUaMGXx1VrVYb6Kf8gIS2DeqTE9qTJ08GYDiJ54svvgAgH8+Swb5G9ZfZZZ3D69evsXnzZu7qITUZcuLEiYrqEmMtH205vvvuO5w4cULxYj3ioUPxplar8eDBA0yZMgWCIODdd98FAPz1118QhH9Caj1//lxnqOvw4cMAgD179tj0uSSI/zpr1qzhkT7E26NHjxAbG4tBgwbxMHv9+/fH06dPeZ6VK1dKjk4dPXoUgHGhXbJkSe7KJjZS5KfQdnZ2RlJSEjIzM1GxYkUdoa1SqbBmzRoektDJyQk3b97EN998o7OKob7QDggIwMmTJzFy5EiT59m6dWsAWoOC2OXi3ya02XsUAKKjoxUfJ47ulRehLQjacJMZGRkAtKs9y62KGRwcnKdn6+233+bHT5061aL71alTJy6Ib9y4obO2RV7Zvn07P7+6desqOqYg4miL76MplxlrQELbBvXJCe2uXbsC+Ef4MZj/sLHZxHZ2dtzaI7buCoKAt956C3369OEvHWNRR9jy6FKh8ORwdXWFh4cHSpUqlS/36+zZswCgeBb32LFjceTIEcTExGD06NHo0qUL3nrrLTg6OurkYzG32VL1gvCPqI6Li4Mg/LOcc25urtFhSIIgrA9z2VKr1Th9+jTv46QmjQuC4QSta9euGawKyMqUE9pubm7ckp2Tk6MzNJ+fQnvOnDk6L3pjS7APHz4cAPDq1SuUK1fOoC4mtJnbSWJiouRHhxhmNT979qzO/n+b0G7VqhVvrz///FPx5EJrCm1B+MfSDEAnTKQYFmnrzJkzFl0rW2glKysLHh4eZh//v//9T8eSLbf0vKWwZwqA4tU5C0Joq1QqvljQlStX8r0+Eto2qE9OaDOLQlJSEt9XokQJHlbOWCfm5OTEH87q1asbrd+Y0GZWHHMsAXI4OzvnORTV8uXL8ejRIwDAkSNHJNMtdec4f/48AODDDz/k+xo2bMgtWJMnT0ZKSgoA6ZUjCYLIX9577z2Eh4fzoWxTUUfYSzotLY2P8OXk5GDGjBl8rgbr4+TiA7MoG7m5uRg4cCA8PDx435pfQrtmzZrIzMyEWq1Gw4YNIQjyQtvOzo7HXF64cKFkXUxoly9fnrsURkZGytZvZ2fHFwzTz8eEdmZmpqzrHhMpFy5ckExv27atzZ4ZOzs73Lhxg7eZUn91awvtmzdv8rzijyGGj48P/3AcNGiQ2dfJVhMFtBNmzT3+nXfe4ZN8r1y5And3d6u3xZo1a/g9qFOnjqJjCkJoC4KA1NRUAOZFXLMUEto2qE9OaIeHhwPQDt+wfX379gUAo6tGCoLuwjCm/KvkhLZKpeIPm6WTacSUL18ettiUupQIgtb6XrJkSdSoUYN3cvodAAsPxrbExESKGEIQhQBTQptZCJctW4ZGjRpxQarRaPj6AmyEUE7c2NnZ4a+//kLfvn0hCIJNhHbZsmWRnp6uI5jkhDZzN8jMzDRYQVDKR5v1ZykpKQbrDjDatm0LQDtyoF+mNaKOsHtpKwYNGsTrTk1NlQ3XJ8aaQtvb25u/X8SGMzFLlizh51eiRAmzr3HmzJn8XN5++22zjg0MDOSuLRcuXJC1uOeFYsWK8Y+3Z8+eKfYfLwihLdYDUgZIa0NC2wb1MaG9YcMGhIeHo0OHDujbty/u3bsHQNeVgfkTbt682WiZ1atX5w+nqdnCckK7fv36vIwaNWrk+TqLFSuGDh06SPLNN9/gf//7n2y6GDYsOXv2bMl0KWsBw9HREe3bt0d0dDSOHz+O7OxstGrVindSFy5ckDyGTVABgNGjR9v0eSQIQhpjQtvLy4uHJuvWrRsEQYCLiwtiY2N15o/s3LkTADBixAgIgoBatWphwIABOmJHHCnCFkJbEAR8++23OiJXSmirVCq+KIrU0thSQtvd3Z1bLvUnejPYIi+//vqrQRoT2o8ePZKNO82ijkREREimm4q8wahcuXKeVzVk9+mnn37i7ZaUlGRyITdrCe3KlSvz0VJAG65WP4+zszOeP38OQLtAm7nX5+joyEdb//zzT7OOFYvss2fPWqy3TLUpc4VS8uyLsURoly1bVnbZ+hYtWhidiGlnZ8fjmAN5X75eCSS0bVAfE9rffPMN/7OwLSMjgwdPZ0OYAEzGePT19QWgHe40Vb+c0J4yZQoA4Pbt2/l6/cw3eu/evbIWFjHm+GiXKFEC7du3x9SpU3Ho0CHufybucNu3b89DIUqJaHEHAWhjZ9tyCIsgCGmMCe3ly5cD0ApC/ZCe4sloLCoFi5nPRg31fZMZthLa+nNIpIQ2s2ZnZ2ejZs2asnXpj0guXboUgNaqrW+IKV68OJ9E+tFHHxmUaSsfbeZmkJiYaNL9UQmlS5fm7w5AO6qxf/9+DB48GA0bNkSlSpVQrVo1tGzZEqNHj0Z8fDzPq0Ror1+/HiNHjuRMmDABP/zwA3f1BLRzBKTmLjEfe0sXmBk4cCCv4+OPP1Z8XLt27bhL1Zs3bxAZGYlhw4YZRT90LuPWrVv44osv0KBBA+4Hr1Kp4Ovrix9//JGfX0pKiqxbStmyZREUFKTDqlWr+LGfffaZQbr+xNKOHTsiPT0dGo0G06dPN6hj7dq1uHLlCt577z2dIAf29vZo3bo1jzYGAKdPn1YcHSUvkNC2QX1i15GlS5ciISEB165dQ1xcHF9RysPDg4fNUxIBJCAgAIB2hrOpvFJCW6VS8c5SyRK9eSEgIID79J07d86kq4s5Qpu9bNmWnp6O/fv3IyIigvtyMyH97NkznU7Q3t4eixcv5scePHiQLwSQnZ1tsWXbWlYagvivIye0vb29+SIZM2fONFoGE52sn2NRKuRGDW0ltPXRF9oODg7cFWb58uVG69IX2rVq1eL3Tj98bEhICACtkUZqQp0thLarq6tOvz127FirPC8uLi5Yv349zNnUajUWLFggWZ7SBWsA7eiAXASPixcvArB8gZlTp04B0E6GlZsYLIXYWqx0k7sX7Plk53Hv3j1upWdbcnKy0f9MixYtzD4f/Xep2A9c6hldu3YtT9doNHj69Cnu3btnYISLj483a2GdvEBC2wb1yfloM1xdXfnXdXp6uqIlTNetWwdAu+SpqbwsVqtYaLOIJ4BlLxNzadmyJV/J8vr160ZDCrF7oURoN2/eHLdv30ZMTAy6du1qMNPez8+Pr4Q5efJkvr98+fI6Q4179uyBo6MjmjZtyv3WAW3oRXPCH1nbSkMQ/2WkhLa9vT1fhObZs2dGXckE4Z+RO9b/sYVLPv30U8n8hUVojx49mr8TqlSpYrQuqTk2LEJFQkKCjvWc+az/8ssvkmXaQmirVCqcOXMGgDaChrlLmJuiTZs22LJlC3eh0d80Gg3+/vtvzJ07Fz4+PrLlGBPamZmZuHfvHjZv3ow+ffrIhvRjQQ8A7UrP5l5LkyZN+PFS7kPGsKbQXr9+PbeO62/Pnj3DkiVLTBrRrCG0Q0NDuX/1mjVrDOpo164djhw5IrmWCADcunULkZGRBiNK+QkJbRvUZ0xoe3h46AxhDR06lKc5ODhgyZIlmDp1Kj788EMMGjQIw4YN43GxAenlxu3t7eHq6gqVSgUHBwfuj7RlyxaezlZxkvLRyy+aNGmCp0+fYtu2bbIPeY0aNfhQXO/evfNUn6urK65fvw5A6x7DRHiHDh34pA1AOyQoPp9atWrhr7/+4ulPnz7Fxx9/bDA8LVWfeLOWlYYg/qtICe1Zs2bx/5iSJayZBffBgwcQBO0QOCA/qTqvQputFPjNN9+YdZxYaDs6OvI5PLNnz5Y9hr07pIQ2E3i5ubn43//+B0HQ9m3Mr535rOvDhHZaWhpCQ0MlYasNf/XVV7J5pFxdxJQsWRI9e/ZUHJ3CEhwdHdG4cWMEBQVhwIAB6N69O1q2bGmWVTivsMVs7t69a/ECM4UFZ2dnNGvWDD179kRoaCiCgoLQuHFjk+9Ga9OkSRP4+/sbvZ9lypRBmzZt0KdPH/Tv3x8BAQGoVq1agdw3Eto2qE9OaHfq1ElnEt6sWbMMjhUvB6y/3b59G+XLlzc4pmTJksjNzYVGo9H5qmMW3alTp/J9tvZF9vLy4n+OQ4cO4f79+7h8+TLOnTuH33//nU/ayM7OzlOMz2LFinHLTW5uLtq1a4cKFSpg9erV/Gs4OzsbEyZMkIy7WqpUKWzcuFHnfptarjW/rTQE8V+icuXK/L/H5nYMGjSI/39PnjypSLh4enryY3bs2MH/+3KRTJQK7aCgIHTq1AnNmzdH/fr14evrq9O3SvXnxtC3aJcqVQrjx4/n56k/WlevXj0+WhcYGChZ5tixY3VG1sLCwpCTk4OcnBxZ66M1oo4A4Ivt/Jdxc3PjVmA2R4D470FC2wb1yQnt4OBgaDQaqNVqTJw4UfLYmJgYXLt2DTdv3sTNmzdx69YtnDlzBrNnz5YU2QxxTE8AOHDgAA8X+Pnnn0Oj0WDr1q0F+vCxFSb1t6ysLIwaNSpPZdvb22PevHkAgAkTJkAQBFSqVInfl2vXrslO+hAzaNAgpKWlKY6rbQsrDUEUVWbMmIHz58/jyJEj3Ajx+PFjCILWMs3iCKekpKBq1aqKy929e7dOH7Nt2zbZvEqFNnOfkNo0Go3ZSzsbW7BGELRzV16/fo2kpCTcuXOH+6hnZGQYfRfoU716dQwbNkw2nQntly9fIjo62mJs9X4tzEyaNAkA+OqfBX0+RMFAQtsG9XXt2hXLly9H//79DdKmTp0Kf39/q9dZu3Zt+Pn5wdfXV3LCy+DBg036U+U39erVQ2hoKAYOHIjw8HCEhYWhZ8+eFq14Zaytxb/r1q2LiRMnmuWfValSJZsPjRHEfxHxqoxsi4qKgiBo+4vHjx/j5cuXiledY1SqVImv/nj27FlZn2dBUC60xS584i01NdUiQ4Epob169WqDul6/fo0hQ4ZYtQ2U+GgTyhgxYgRmzZql4xJK/PcgoV0IGoEgCILQxoDu3r07evfujT59+qBBgwY66e3bt0fr1q0tLl9JKK9SpUph/vz5mD9/vlFB7uLiAnd3d1SuXBmenp7w9PRExYoVFS8Brs/48eMRHR0te7ynpydat26Ntm3bonXr1mjSpInRRc0sxdfXFzt27MDq1asL/HkgiKIACe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARiiq1a9dGsWLFFOevW7cuQkJCCvy8CYIgCIIgrAEJ7ULQCGIGDx6MV69e4dWrV7C3ty/w87EUOzs73L59G6mpqQgODjaZ/91334VarUZWVha8vb0trrdUqVLw9PS0CiVKlCjw+0gQBGFr7Ozs4ODg8K9+BxFEYYGEdiFoBDHDhg0D26zVyTVu3BjDhg3LF5o3by5ZZ2BgIAAgOzsbNWrUMHmOzs7OSEhIAAD89NNPFl/rxIkTYa0tMDCwwJ8HgiDkGTJkCFJTU/Ho0aMCP5d/Ez4+Pnjw4AFOnjwpmT58+HAAQGxsrMFxbdu2NYmDg0OBXyNBFBZIaNuw3hEjRiAiIgJ+fn6yeZQK7d69eyMiIgLvvPOOyXqnT59uNfGpv3355ZeSde7duxcAsHbtWr6vSpUqWL58uSznz5/n5X7//feSeSIjI41eKxPa6enpuHv3rkXk5uaS0CaIfwEjR44EAKjVaquV2bhxY4SHh+cL9evXl6yzYcOGmD9/vtUw9W5jL/5bt25JpssJ7R07dih6L5QvX75AngeVSoXmzZtj9OjRWLhwIWJiYjB9+nR06dLFLDdGQdB+VERERGDx4sVYtGgRRo0aBU9PT4vOq1ixYujQoQPmzp0LlUpl8bW1atUKEydOxJIlS7B48WJ89tln6NWrF5ycnMwuS8kHkz7NmjUrkHb9t0NC24b1Pn78GADwySefyOZRKrQPHDgAAFizZo3JesVC++bNm1Zl/PjxBvU1aNAAGo0GOTk5qF27Nt/v6+trhoSX3o4dO2b0WpnQzotV/OHDhwBIaBNEQTJkyBCMHDlSVpwKgnKh3bNnT4wcORLt2rUzWe/cuXPz3E/JbRMnTpSsMyQkxKr1mJrrklehffz4caxdu9YAZqSwtdBWqVQYNGgQbt68KXtPEhIS0K1bN5NllS1bFnFxcdBoNAZlZGdnY86cOYpGmz08PDB06FDExcXh+fPnvAw7Ozuzr699+/a4fPmy7LVNnz7drPLs7e3Nf6gA3Lhxw6btWlQgoW3DegtaaFvT6mOM2NhYAMCqVat09jOhffToUTRo0MAsOnfuDOC/KbR9fHwwYsQIzJs3D0uXLsXs2bMRHBwMFxcXs8qpWrUqRowYgYULF2LJkiWIjIy02B/ezs4OzZo1w7Rp0yy28giC9qMsIiICX3/9NZYsWYIZM2ZgwIABivoElUqF9u3bY+rUqYiJicHcuXMRHh6OUqVKmXUOFSpUwODBgzF//nwsWbIEkydPlnWJUkK9evUwbtw4NG3atMCfnX8zT548AQCMGjVKNo9SoX3s2DEAwLJly0zWy4S2RqPBgwcPrIrctYiF9owZMxAdHW02M2bM4GXICe327dtj/vz5WLduHQDg2bNnkhbxgwcPAgD++usvzJ8/H1OmTIEg/CO0BwwYIFl+eno6ANsL7ZkzZ/JrV6vVuHTpErZv3469e/fiwYMHPC03Nxc9evSQLcfFxQUXL17k+RMSErBlyxYcPHgQ2dnZfH9MTIxsGf369UN8fDzUarWkWDVXaA8fPpx/wADaj6Pt27dj69atuHjxItRqtc2E9vz58wusP/g3Q0LbhvX+F4S2j48PcnNzkZGRgerVq0MQBD6sxYT27t27zS63Ro0aAP5bQjsgIADx8fGynV5aWhpGjBhhshxHR0csXrwYOTk5BmVoNBps2LABzs7OJsspU6YM+vXrh7Vr1yI5OZmXUa9ePYuek19++UX22jZt2mT0+Pr16+u4Gunfl4EDB5o8B5VKhYkTJ3JxoL8dPHgQ7u7uJstxcnJC165d8c033+Du3bv8+F69ehXo8/Nvp6CF9osXL2x2rWKhbencHDs7O16GnNAeO3as7H/O2Hb37l0IQuEV2t9++y2ys7OxYMECgw9/Ozs7jBo1igvfxMRE2Ynuy5Yt49c8f/58HV/z+vXrIzExkae3b9/eZBlqtRrx8fE6/YI5Qrt79+78vO/du4eAgACDPB4eHkbdUeXw8vJSBLsetVqtM0JNKIeEtg3r/S8I7a1btwIAvvrqKwiCgJIlSyIpKQmbNm1CQEAAANsI7WPHjinuSPR59OgRgIIV2mFhYTovuuvXr2PXrl3YuXMnbty4oZM2btw42XLs7Oywpy43ngAAIABJREFUc+dOnvfJkyfYvn07du/ejVevXvH9Bw4ckH0BtG3bFsePH5cU6oD5Qrtz5846dT98+BC7d+/Gli1bcPr0aWRlZRkV2j4+Pnj69Ck//o8//sCPP/6Is2fP8n0ajUZWDDAWLFjA879+/Rp79uzBtm3bkJqayvf/+eefsiMHb731Fvbt2ycr1Elo5w0S2uahRGjXrl0bQUFBGDNmDAAgOTkZQUFBBjBxdezYMQQFBXGBUFiF9pQpU0yOIIkFcJcuXQzSa9asyfu4Q4cOSfpSs3cYAOzbt0+yntmzZ2PTpk0YMmQIPDw8IAgCtm/fzo9TKrTLli3L30WJiYl5Gjm0FAcHB/5x8fPPP9u8/qICCW0b1lvUhXb79u2h0WiQmprK25RZUO7du4fmzZsDAFJSUrBnzx6zOHLkCO/4jZ1DUYk6MmbMGGg0GsTGxsLHx8cgvUePHlysZmZmomrVqpLljB49ml/P1q1bdURjpUqVcOHCBZ4+fPhwk2UAwLVr13DlyhX+2xyh7evri9evXwPQDluHh4cbPOeurq5o1aqV5PH29vZ8aDc7OxuhoaE66X369OFDvM+ePUO5cuUky+nSpQs//1OnTqFixYo8rVSpUti3bx9PlxsuFZcBaJ/xM2fO8N8ktPMGCW3zUCK0GXn10S5sQlvJBEP23gcgOak+OjqapxsTRNevXwcA5OTkKNYulgjtzz//nB8jZcm29XNJ/ZnlkNC2Yb1FWWjb29vj0qVLAIAxY8bAyckJbm5u3MVg4MCBNp0M+fr1a1y7ds0imFWjIIX20KFD0b17d6N5xAJ47NixBulOTk7cOnvr1i3J4VIvLy8uTG/duiX5who2bBj27NmDUaNG8VCNc+bM4XUrFdpikfzmzRuLfJgHDBjA642OjpbMM2vWLJ5n8uTJknnYebx48YJbncSULl2a/19fv36N0qVLG+Tx9/fHkSNHMG7cODRo0ACCIOC9997jddOLKW+Q0DYPWwrtQYMGoUSJEgZkZGQAUCa0K1eujMjISPj7+9vkHjdu3Jjfn0mTJhmknzt3DgDw/Plzo20wb948Xo6SyZWCYL7QLlasGO9/Tpw4YbPnUB/2v7l//z7FVM8DJLRtWG9RFtoeHh6Ss7QB4MyZM1CpVFxo//TTT3BxcTELb29vPHv2DHv37jV6Htbw0b558yZevXqlKHRifqHEQuPp6cnv8XfffWeQHhoaytPHjBkjW47YtUTpJEBLhHZ4eLii8zHG4cOHAWit+HLWag8PD/6xdOnSJYP0pk2b8vMwNqlp/vz5PJ8Sn29BIKFtTUhom4clQvvly5eIjY01gI3M5Fd4P3t7e9y6dQuA1s3LFmK7e/fu/Pz0LfKOjo7c4HDw4EHFbRUVFaWobnOFdqdOnXh+JfNw8gOxYezTTz8tkHMoKpDQtmG9TGifP38emzZtkoR9VQPAjz/+KJuPWYrNEdo5OTn5en27du3CiRMnsG3bNmzatIl3oi1btoQg5G0ypFKcnZ3h4eFh82eKYUsrjZOTE39W1q1bZ5C+YcMGnl6lShXZcoYOHWr2i8MSoX3q1CkAWp9sR0dHs6/XxcWFvwz379+vqC6NRmPwLHz22Wf83Dt06CBbRrt27Xi+b7/9VtE5ktC2HiS0zcMSoW1qkxPaN2/exOnTpw1gE/dMCW1nZ2edSBrGjE/Wgvlo5+bmolKlSjpp3t7e/FxMPSPNmjXjeZcvX66obnOFtjiCDBsts7OzQ+PGjdGrVy/4+/sbXIO1+fbbbwEAWVlZkqN+hHJIaNuwXia0rbkpEdos9FFGRobNrpUNr4k7avaw7dq1C2XKlLF4sqJYpA0cOBCTJk2yCV27djV6zba20tSuXZs/BzNmzDBIZ76ECQkJRssRWy70QzLKYa7QrlKlCh/xWLRokUXXKxa+06ZNM5p38eLFPC/70GPs2bMHgPaFayzairOzMxcOplyWGCS0rQcT2ufOnZO0usbGxvIJsGw+gxwpKSkAzBPaz58/t9m1FpTQvnfvHmrUqGHA5MmTDfpvQfhHaMuN8Jjjoz1p0iQ8f/4cx48fz3cNUL16dX5uW7duNUgXW5BNWW+rVKnC827evFlR/eYKbZY/OzsbDg4OCAoKwv379yHeNBoNjh49Cl9fX6vfrzJlyvC5NEqvkZCHhLYN62UrQxrjxx9/5H+ksWPHyuZjIkqJ0P7iiy8AaIcJbXGdtWrVQkZGBl6+fKljSWWTIePi4iwOMwXoCjtmqbLFtnr1aqPXbWsrjdhHWz/UlIODA3efOPr/2jvz8Bqu/4/PTW5kF4kEsUSEEJRKbPmhaKOtWqNiKZFSaitpqviiqVL1tUtrLbG0VSVqKWrfSmINYg9fS+wisZPIIvf9++M+czqTu829d+4iPud5Xs/DnXPOnJkzmXnPmc+yb5/efjw8PFg/UgWlsUJbaFsdFRXFfq9duzY6duyItm3bIigoSG8fQrOq3r17663LR1UAgOjoaNE2PmrLrVu3DI6bD/V4+/ZtSeeFhLZ88EJbziJFaM+cOROAOkKPtY5VKLQ7deqEDh06GE3Hjh1ZH5ay0d64cSMAeYS2tVAqldi3bx8Adcbg4OBgjToff/wxO3dxcXF6+/P29mZ1N2/eLGkMxgptPnRpZmYmPvnkE/bC/+zZM42/ixcvXqBZs2aynrO4uDjWv76vfoQ0SGjbwSQIkWqjHRUVhZEjR0py2Js9ezYA4P79+1Y5hq1btwKARtbIFi1aAFCnV+eF9t69e7F8+XK2UnL69GlRlrHbt2+z//Nv9EJhN3DgQEyZMkUD/ma0e/durdtNoWvXrgaP3VqrNO7u7ux8nDlzRuPmXbFiRXYd/f777wb7e/bsGTv/UvZvrNAW1q9VqxbCw8Nx+vRpFC+nT59GRESE1j7Gjx8v+ebfo0cPVrf4C09OTg4A4OjRowbHzT/wcnNzJZ0XEtry8cUXX2DkyJF64cOJqlQqvfWuXr0KQJrQ5r+G3Lt3z2rH+rpkhuSj8RSP9sNjj0J73rx57Lx8/vnnWusIFwKGDBmitz93d3dW15A9N4+xQpuPu/3gwQM8fvwYR48eRZMmTdj2qlWr4vfff2d93rx50+g07LpwcHBgixHnz583OWU88S8ktO1gEoRIFdrGsGzZMgDqWMyWHn9MTAwAdfxhJycn0TbeGWXBggVMaH/88cfgOPXKJgBMnjyZ1c/NzRXZ4vJi3JCwq127NlQqVYkOsL948WIAuk1U6tSpw64jKfbF/Kd1XQ/e4hgrtFeuXMnqt2vXjj2Qc3JykJWVJcqi9urVK60rZvwLI2DYaVO4uieMMODk5MR+37Nnj8FxJycns/Ms5QFJQtu6SLXR5k3MpDzoeNvUa9euWe04hEI7JSUFycnJRpOSksL6kCq0CwoKtEZe4n2Aigvt7du3A4DORQd7E9rx8fHsnOhzfBa+mMfGxurtU7iibcg5n8dYoc1/SQOAs2fPws3NTaOOQqFgXxgA4LPPPpPlnLVt25b1aQ3b+TcBEtpW2ufQoUOxfPlyDBs2TG89Y4R2QkICli9frnMFkIf/Yzx06JBFj7Fu3bp4+vQpALXZy0cffYSYmBh8/fXXGDx4MPr37w9AbU9sSaHNr2AYcph7Xfniiy/YNTJr1iytdRo0aMDq/PTTTwb7vHnzJgDD9tw8xgrtbdu2sfpXrlzB1atX0bZtW3aN+/n5YerUqcyOOycnh2UWLT6vAAzaJQoTSwjtuYVmMlIi0/Dx2wHozCYnhIS2dZEqtI0hKSkJgNpp3VrHYSsb7VevXiE7O1sDPkZ/caHNm+rp8lexJ6EtzKmwevVqvQK3ffv2rK620H9ChDbaxc+PLowV2vz9GAAiIyN11mvatCmr98cff8hy3ngfFl1hTQnjIaFtpX2uWbMGALBu3Tq99YwR2nfu3AEAg+Kdj2+9evVqix4jvxKkrfz5558s+sngwYMtKrR5p9NDhw4hMTHRZL799lurXpdS6NGjB7MD37ZtmyhFsBBhVAEpn8stvaItTLeelZWlMwqKMEbt9OnTRdvmzJnDtoWFhendn64VbTc3N/b7rl27DI6bVrRtw+DBg5GYmIjBgwcbrAdIE9o//vgjEhMT8c477+itt2vXLsnXh1y8LnG0eVOqGzduaF0J51+UbS20x4wZw87Fhg0bNL6uFof3HwIML0wI606ZMkXSeIwV2mfOnGH1y5Qpo7OeUqlEfn4+AODw4cNmn7egoCD2dVFbyFjCNEhoW2mfthLajo6OzHtY6k3BVHr37o2TJ09i48aNWLhwISZMmIBBgwYhMjISwcHBLORfmzZtmNAeNWoUGjVqxB40y5YtQ6NGjdCoUSPk5eXh0KFD7P+8Q4shYcevqptbzInFbQm6du3KHBwPHz6sMzU4x4kjkmgL/Vcc3kY7NTVV0liMFdo7d+5k9XUlkeE4DmXLlmXHePLkSdE23qkXAFq0aKF3f8JPwcI4tAqFgr2oHDx40OC4eWEhNQIFCW15+PPPPwGoX9D11TNGaD98+BAAMGDAAL31Ll++DECao7lcvC5Cmz83r169QmFhoQZ8saXQ5iOmAOoII4ZENseJzUF0pVbnEdpz9+nTR9KYjBXamzZtYvX1RUbiOI4lJTtz5ozZ506YOyA0NNRmc1jSIKFtpX3aSmiHhYWx/rp37271C0zI+fPnAQBVqlSRLeqINnr16oW+ffuaDB8ybNGiRTY9X0KioqJYDOnjx4/rXeXgOLGJxI4dO/TW9fT0ZHU3bNggaTzGCm3+czwAnenVec6dOwdAU9wKPeENCQnh9VU8extvfyrFBpeve+7cOUnnhYS2PNhKaLu4uLC/M2sm6bCW0HZ0dIRSqWRZEq9cuQKlUqnBkCFDAAArV66EUqlkY+JNSsqXL6+1f1ubjghtsvmxS23Lh2Z9+PChXjEs/LJmKFISj7FCW5gOvn79+jrrOTk5sRVtKT4n+nBzc2N/I3KsjhP/QkLbSvu0ldDmA9+rVCpUrlzZ6hcYj5+fH1QqFe7cuQOO+1cIrVixAmPGjMGsWbMAqKOQ8HGrCwoKcOnSJfZ/XoBJTZBiKgcOHAAAfPPNNzY7X0K6devGVouOHj0q+e+FNwe5evWq3nrC1MRC0x19GCu0hSYhhlKv8+YaBQUFot+F5iD6VsU5jsP8+fNZ3WrVqunsX5/ddenSpdmn8DVr1kg6LyS05cFWQpuPjAQAHTp0sNrxWktoHzx4EKaUixcvwsvLi/3d6BqjLYW2MBHV8uXLjT6Pc+fOZe0//PBDrXUcHR1x+/ZtANIjNHGc8UJbaHs9YcIEnfVat27N6k2dOtWs88f7UAFATEyM1eevJENC20r7tIXQdnJywvXr1wFINwmQCx8fH4SHh6Nv376YPHkyS53NCxZL2mibC38jlZp225L07NmTieyUlBSULl1aclveqQWA3sxeQ4cOZfUMJeXhMVZoC9PBG/qycuHCBQDqDJLC3/39/ZnwNRS/9uTJkwDU8a+Lh6dKSEhgY9Fnr8vfJwDDkQh4SGjLg62ENv9CWFRUBF9fX6sdr7WEdlJSEi5fvmw0O3bsQPPmzQEA6enpOsdgrNCuWLEinJ2dzT5/whXgxYsXSxKzxWnQoAG7v6SkpGidB/56A3SHCtSGsUKb4/71rXr69CkCAwM1tiuVSrYopFKptDqIOzs7Izg4WNI++XvmgwcPJDl+E9IhoW2lfdpCaAvfUA05TJqLv78/kpKSkJqayh5o2gpv02avQtvHx4fdbA2FkLM0n3zyiSjpjD6bbG0Io5N8+eWXOuvxkQSePHkiORarsULb19eXfZLX55RbsWJF5ozz119/aWxPS0sDoM5yquthHhwczOZw/vz5Gts/+OADNvZ58+bpHEtiYiJ7iGl70GmDhLY82EJou7i4sLBq+/fvt+rxWttGm6dVq1aoW7eu6LcePXpoFW2jRo0CoD/ShjFCmw87e/v2bY0IQ8YwcuRIdtxXrlzB559/jgEDBuglICBAa19//PEH6yspKYktUCiVSnz22WfIy8sDoDbf02WWEhwcjMjISBGHDx9m/Xbp0kW0rXiyMZ42bdqw+9iVK1dE9YKCgvD333+zPrU5Lvr4+DBzmAMHDug1o+FfogBgxowZVr323wRIaFtpn9YW2j4+Prh//z4AIDs726BDhbk4OTmxRCAqlQqXLl3CypUrMWLECLZimpOTw2yL7VVo8+N6+fKlyW/1cqzS9O7dmznt7dixQ2scVUOULVuWOcLeuHFD62p469at2c1cV6hAbRgrtDmOY1lPi4qKdDoz8g9fQJxBkmfYsGEGHwh8IoeioiINEcFx6ocmnxDixYsXWkV0jRo1mGjYtGmT5PNCQlsebCG0hw8fzuauf//+Vj1eWwjt+vXr48mTJ3j+/Dn7stOqVSu8evUKubm56Nevn6j+oUOHAAB9+/bV2adUoV26dGkIy1dffWXyuROuFkstnTp10tqXt7c3zp49y+oVFRXhxo0bIgf7Gzdu6H0xEIYVlFL0hd0VRk8B1PbjwhjbgDppjrYFkuJJkPTZevMvGEVFRahRo4ZVr/03ARLaVtonL7QfPXqE1NRUnfACAFC/Neury68QahPawlTu1go6HxMTg4iICI3Ym3wcb+FbtylCmzeFMEVolytXDr169cKHH36I8PBwhISEICAgAOXKlYOfnx/q1q2Lb7/9lp3T3377zaRzIMcqTefOnZnIfvToEYYNG2ZwhUbXTVSYTTE5ORk1a9YEx6mjb7Rr146FQrx79y58fHy09uHv76+xQsNn5OOvL+G24s6HPMHBwexl7OHDh4iKihLF0RaGh9y/f7/WjGTOzs4sa5lKpcK3337LXkI8PT2ZrT+gfTWbRxg54OLFi6KvF02aNGH7yMnJQUhIiNY+PD09Nc6L0M7zv//9r8b2UqVKWeVv8XXH2kK7fPnyePToEQDgzp07spgzGINQFC1atAgLFy40GuHfjyGhHRwczBx99+7dy4Sag4ODKLrPTz/9BKVSifDwcABAXl6ezvsEx3FsxdeQ0FYoFDhy5AgAID8/H40aNTL53MkptDmOQ5kyZbBkyRLmZMiXwsJCrFixQqcjKI+cQpvj1I7wfLQXYXnw4AHGjh2rc6U6ICAAT548AaB2/Nb1tbJChQrsWLdv327V6/5NgYS2lfbJC21LlOJCOyIigm07efKkUZ7XcvPpp58CUK8QC53SpArtM2fO4JNPPsHgwYPZqoIpArZs2bJs5dZQuXXrls44z/qQa5VGuFostYwePVprX0qlkmVzA9Ti9Pbt2yxFPaC2AQwPD9c5ns6dOxs1lkePHunsKyoqShQG7NmzZ7h16xZ7sQDUET70PcxCQ0PZAwRQX1sZGRnsIQ8Au3fvNvhFYuHChaJxZ2ZmMvEBqJ2+dGXA4zhx9k2pxZp2v68zvNA2tDDB+6AA0FsvNTWVXXfFhXbxDHvWXs3mOOumYA8JCWF+KCkpKVpN0j7//HO8evUKz549Q7169dhqtr4FCOHfg5QvcG5ubujUqROCg4Ntfr1pw8fHBx9++CF69uyJ999/3+o6RYiDgwPefvttdO3aFd27d0fTpk0lhS2sXLkyOnbsaNOxEyS0rbbPsLAwdOjQQVb4FRptK9pDhgzBs2fPULt2bZteYO+//z4KCwvx3XffiX4vLrSdnJwQGBgouhaUSqVohRBQR93QttIphWvXrul9OGVnZyMxMVGv46A+5FqlkVNocxyHUqVKYfLkySw0F19UKhW2b9+uc8WWR06hzXHqyA6pqaka7V68eIEff/wRnp6eBs9RrVq1RFkb+fL06VNMmjRJ0sqxQqFAbGwsW9UXlqNHj6JZs2Z625PQthy80LZEKS60O3TowLYdPHjQJEc6cxEK7SFDhmDw4MFGw4fkA3QL7SZNmrDr/eDBg3qdq6OiohAREcFWaAsLC9kXMY5T+zpcvXoVx44dQ0pKCovFLzUUJkG8KZDQtoNJMJV3330Xbdu21enYYcqqrCXo0qWLht1heHg4JkyYYPBFICwsDHFxcRg8eDC6d+8uSYTpQ6lUws3NDV5eXvDx8YGvry/KlStnVDQPfdjzKo2bmxveffdd9OzZE+3bt4e/v79Nx1OzZk107twZPXv2RMuWLU2yQw8ICEDHjh3Ro0cPtGjRwqRP/k5OTmjWrBl69OiBzp07S46NS1iOhg0byr4wwQtBbaYjX331FR4/fmyzubeWjTb/ArNnzx5JztVubm7MF6h4+Ljg4GCNl5js7Gydzn0E8aZCQtsOJoEgCIKwLBEREWjbtq3OfAK2fPEMCAhAdHQ0oqOjTf5ip1AoWB+6zOtKlSqFcePGGeXoXaNGDaxdu1bjJdbR0RFhYWFo3rw5WrZsicaNG1NYOILQAgltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mASCIAiCIAii5EFC2w4mgSAIgiAIgih5kNC2g0kgCIIgCIIgSh4ktO1gEgiCIAiCIIiSBwltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mASCIAiCIAii5EFC2w4mgSAIgiAIgih5kNC2g0kgCIIgCIIgSh4ktO1gEgiCIAiCIIiSBwltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mATCPnB1dUXlypVtPg5CjIuLC9q2bYu2bduiXr16ktu1adMGbdu2xf/93/+ZPYYmTZogLCwMjo6ONj8fxfnmm2/QokULKBQKSfWVSiUCAwMRGBgIX19fm4+f0KRhw4Zo2bIlXFxcbD4WgiDMg4S2HUwCYXsiIyNx7do1pKWloVSpUjYfj73h6emJDRs2YNeuXdi1axfi4uKstu+AgADw5ddff5Xc7vnz5wCAY8eOmT2G06dPAwCuXr0KBwcHm88HT7du3di5OXbsGJycnAy2qVy5sknnk7Ae27dvBwDk5eUhMjLS5uN5nXF2drb5GIg3GxLadjAJhG2pVq0a8vPzmfiYPHmyLP3WqFEDcXFxknjvvfdsfh70sXr1agiLXOdICrYW2q1atWL7nzVrls3ngsfT0xO3bt1iY+vfv7+kdsYK7RYtWlj9AWEpvL29ERERgfj4eGzevBk//fQT2zZ+/HhMmjTJLN59912zx+jr64uCggImtMuWLWuRc1G6dGn07dtXEqGhoeA4DuXKlUPr1q3Noly5cmaPvVmzZujUqZPOl14HBwdERkZi27ZtOHLkiNn78/f3R4cOHWwCLfy8/pDQtoNJIGzPpEmTmPgoLCxEWFiY2X1+/PHHkFoWLFgAjuNQvnx5yW0MFWPMLPQxYsQIjb5VKhW6dOlilbmxtdDev38/AKCgoAABAQFGtS1XrhxCQkKMonr16pL6njNnDjsvmzZtkjwmqUI7KCgISUlJUKlUuHv3Lry8vKwy3+bi4eGBmjVrIiIiAoMHD8asWbPw999/i15K+JKdnc3MgXJycsz+mxs/frzZ4x8+fDjrb82aNRY7T8HBwZKPa9KkSeA4Dt27dzf7HPXo0cPssScnJwMArly5glatWmlsVygUuHjxItunufdz4ZcjaxdLvWgR1oOEth1MAmF7XF1dcfXqVXZzO378uGSbV12UBKHdrVs3FBUVsT5TUlLYv58/fy6L/bMhbCm0+/Tpw/b97Nkz7Nu3TxL8g3327NlGz9vNmzcNjqtVq1ZsXu7evQs/Pz+2zcHBAXFxcdi8ebPWFT9DQrtSpUpYuHAhW1Xli3D1V8jw4cPx8uVLq/Pf//5XYyyDBg2SfJ4LCwuRlpaGwMBAcNy/QlulUqGwsFAyr169Yn3KIbTPnj3L+ouIiLDY39XrKrSbN2/O+srPz0eVKlW01hs9ejSr99tvv5m1TxLahDmQ0LaDSSDsg86dOwMAzpw5o3WVxFiEQjs2NpY5oPHExsay7WPHjgXHiYX2/fv3sW3bNqO4efMma2+u0O7QoQPy8vJYf3/88QcUCgVWrVrFfnv8+LEsq//6sJXQDgoKwuPHj016OPICyRJC28vLCxkZGQCAoqIivP/++6Lts2bNYn2NGTNGo70uoR0YGIj58+fj5cuXovHk5uZi1qxZOj/5x8XFmXSOzC0zZszQGEuVKlX0tlm1ahUzhSjuaMgL7a1btxp1ndSrV4/1r01oe3t7Izk5WRJHjx5lfalUKqSkpEhua4iVK1eKxiUU2gsXLkSjRo1ETJ8+nW0fPHgwOE4stOPi4tCiRQtJfPnll6yduUKb/8IEAIsXL9ZZr3z58sjNzQUAvHr1CiEhISbvs0aNGhg2bJjVEH59IaH9+kNC2w4mgZCPmjVrMoc9U8jLy8O+fftMahsdHS0ai1Bo9+rVS2OsS5cuZdvr168PjhML7b/++svo41+8eDFrb47Q7tKli8hu/ciRI3BzcwPHqaOAHDhwgG178uSJybapGzZs0KaHrF6Sk5NF4/Lx8cH58+fZ9lWrVmHhwoVYuHChaMwHDhxgvwvhH+pCod2sWTM0aNBAJwcPHgSgX2grFAr8+eefrM/vv/9eo05gYCCePXsGQG3u0rBhQ9H24kI7PDwcq1at0ljBzsnJwaxZs+Dv7693Dk1d0RaWvLw8WVa0OY7Dd999h+HDh6N9+/aoW7cuunQDiG8/AAAgAElEQVTpwvbTunVrncdhKaFdrlw54y9IC5T09HTRuIRC+9tvv9UY96ZNmwCoRWqFChXAcWKh3aZNG8nnKCIigrUzR2i3b9+e9fPy5UtUrVpVb/158+ax+mvXrjV5v9bm5MmTbNwktF9/SGjbwSQQ8tGwYUOZH0/SS/GHlT6hrVAocPv2bQDA9evX2e/2ILTj4uJEn8MvXLigcbMvXbo0jh07xurk5eXh008/NXpf9ii0fX19kZqayrYV/+z83nvvsW0xMTF6j08otD09PfXW3bZtGwD9Qvv7779n/e3atUtnuEGhCcW5c+dEq7dCoS38YsGXJ0+eYOrUqShfvrzF/k5dXV1F+7TkM6Bdu3ZsP7YQ2n5+fnj+/LlBhC+2hYWFktoYw4kTJ0Tj0ie03dzc2GrwP//8w363pdB2cXHB//73P9bP9OnTDbYJCAgQXeMdOnSw2HUmJyS0SxYktO1gEgj5eF2E9ttvv822zZkzh/1uS6Ht4uKCRYsWiY4pPT0dlSpV0lrf29sbhw4dEtVPTEyEq6ur5H1+8cUXSExM1IvQVOV///ufwfo8/AptVlaWwbr/+c9/2Jg6d+4MlUoFQG12Uvx4oqKi2HgMPbjlEtoKhQITJkxgfWVkZOiNga1QKPDPP/+w+kJRIhTawnLnzh2MHj1aw+nR398fEydORLVq1WT7O32ThLYUFAqFyP9h0KBBFjsfPPqEdmRkJNs2bNgw9rsthbbw+s/OzpZ8zUyZMoW1u3HjBsqUKWPxc2suJLRLFiS07WASCPkQCu2dO3dCqVRajeKOZ/qE9tixY7U+sGwltENCQnDq1CmR+ElLSzMYisvd3Z0JRL5cvnxZ1nCFtrDRTkhIQHp6utbjF9rWN27cWG8/cgntJUuWiM7xhAkT8Nlnn+Hrr7/G5MmTsWjRIqxbtw779u3DmTNncO/ePdFXicLCQjRq1Agcpym0U1NTER0drTOMWGhoKAC1zbA2EwNTIKEtRuhsd+nSJUnx0M1Fn9Bevnw5ALUPQMWKFdnvthLaoaGhohX/fv36SW7r4eEhsnnesGGDZEd3PomZNRDeH0holyxIaNvBJBDyIRTaO3bsMKpthw4d2GfWmTNnmj0WfUKbD0/16NEj0UPV2kLb0dERI0aMwIsXL0TCZ9OmTfDw8JC0T6VSKXLAA9SibNWqVahRo4bZ59HaQjssLAwzZsxAYmKi1ljJwlX8efPm6YypXKdOHdmEtnA1z9SSlpYGpVIpEtpSwsfx90tAu024KZDQ/pfi8dA7depksXMhRJfQdnR0RHZ2NgB1lCFhG6HQzs7Oxq1btySRlZXF2hkrtF1dXUX+EsnJyUZHhGrXrh37SgX863xuiB49ehjxF2ZeGTduHNsvCe2SBQltO5gEQj7MEdpCYawrlJmp/QmFtq+vL1ttLG7/a02h3bhxY5EtMqBewZo8ebJJ2Q979OihEaWjoKAAS5cuNcsx09pC+7PPPjPyEam9dOnSRTahXb16dZFQKF6ePn2Ky5cv4+DBg9i4cSOWLl2KadOmYfTo0SxCCaCOFGFswpqYmBhWf+DAgWb/XXCc5YR248aN0aZNGxHjxo1j+/n66681tvNOvrzQzs/PR3Z2tmSE17wpQnvu3Lms/ZYtW2Q5D1LQJbRbtmzJfh8xYoSojS3C+/3yyy+sbU5ODmrWrGnS8QrjzqtUKkmr4iS0CTkgoW0Hk0DIhzlCW/gQmT17ttlj0SW0hbGZi6dXtobQrlKlClasWKEh3LKysvDRRx+ZdcwBAQHYvXu31gfJP//8gwEDBhj9925IaOv6zG6q0O7SpQvOnj2rlStXrrCxPHr0SGe9s2fPIiIiQlZnyG+//RYTJkzAwIED0alTJ4SHhyMwMFAjTF1xunbtysYwd+5ckdAuHvJN1375UjyUoKlYSmgX9xmQUvivLrZIWPPhhx+K/g5v3ryp95oyBqFw04YuoS28Zovb5QvvkX/99Rfmzp0rib/++ou1M0ZoDxs2THR+hw4davK14eLigiNHjrC+CgsLtUaDEtK4cWPJPiHm0q5dO7ZfEtolCxLadjAJhHyYI7Sjo6NZ26lTp5o9Fl1Cmw/P9vz5cw1HO0sK7apVq2L+/PlaI02sW7dOtigTCoUCvXr1wr179zT2AwBJSUlG9Ve+fHkcPnwYhw8fFgmZ8PBwbNiwATt37tTabu/evTh8+DCWLVsm2/UlFJ2rV6+Gs7Oz3vpyRx0xlfXr1yMqKgocpw5dyJdTp04ZbLt3715WXy6HSHsW2hkZGUhISJDMihUrWF/GCO0KFSrg7t27Ro9Xapk3b57e/esS2nzirpMnT2q0saaNdvv27VFYWMjaGWNbre+c37hxg/VZVFSE4cOHy/73Zi4ktEsWJLTtYBII+TBHaA8YMIC1nTBhgtlj0Sa0XVxc2Err6tWrNdpYUmgLo1Dw5datW+jWrRs4Tp1tsG/fvujbt6/B2MlCbt68iVu3bmH79u2i3z09PREfH49Hjx6x/T148ECUxdAcdu3axfrVZ3srF8L41Hy5fv06oqOjdZraCIX2tm3bsHnzZp3wdqz6hHadOnX0xuLWRfGHtfAlaM6cOfjggw80TCo6d+6M33//ndXLzs42W+jwWEpoV61aVZTOvm7duiL/g5iYGI2U97wTqDVttJVKpegFBgD27NljVg6AXbt2iUzBTBHavCAAgPj4eI021hLa4eHhonm7cuWKbNFC6tWrhwcPHojO/dy5c3U6A9sCEtolCxLadjAJhHyYI7SFGe5Gjx5t9li0Ce0PP/yQ/abN4dKSQjs4OJglCsnJycGkSZPg7u7Otq9evZq1bdmypeR98uXIkSNat3t5eWHkyJG4fv26KFSYubRq1YrtWxjr1xKULl0aaWlpbH+XL18WPaiPHz+Od955R6Od3JkhhatxxpTi4eJMca6UK+IIx1nPGTI8PFy0H30vZGXLloWvry9Kly5t1D6USiV8fX3h6+vL7L0NMX/+fI3za+jriBSE6clNEdrx8fHst08++USjjTWEdtOmTfHkyRNW/8mTJ6hbt67O+hUqVEB4eLhR5yk0NFS0AAAAhw4dkjWEpTmQ0C5ZkNC2g0kg5MMcoS0UH0OGDDF7LNqEtoeHB65duwZA7XRVp04dURtL22hPnToViYmJqFy5ssY2YcbBJk2aSNqfUqlkbfbt22ewrq4EK6Zy+PBhtv8WLVpY5Jry9/fH8ePH2X7OnTsHV1dXtG7dWhQSUaVSYeXKlaJwaEKhvXbtWqxatUonvBmBNYS2k5MT5s2bp5ENUlt58uQJxo8fL+vcWUto//DDD6L9WOPLhyFGjBih9Tzbg9Bu2LAhuyYyMjJEL+IcZ3mh3bx5c5HILigoMLgfPvvjvn37jBIyYWFhGuZtL168QFxcnM4vVKVLl0aZMmXMxtALGQntkgUJbTuYBEI+tAntrVu3Svrsev36ddb2/PnzRn2y1ZYVUZeNtjCzYPGVWKHQzsvLw71794xC6NBlbKQPYTzsWrVqSWpTunRp1qZ4KDBTKG6zXrlyZSxfvhzLly/XmsRD+OCX8snfwcEBbdu2lSxqOnXqJHoY3717F0FBQWy7UqnEV199xcyBAHUEkGHDhkGhUMhuo80L7Rs3bmiYQBTnq6++YvvWlQClTJkyCA8PR0REhIbpyLvvvov69etb5JO6tYS2MCwcoBbaxU0QRo0ahfT0dNk4c+aMzvH06dNH5PwotEG2B6HNceLso8V9VYR/b0uWLMGYMWMkkZiYyNrpEtqRkZEsGyWgTv1uaPU7MDBQFF+7eBQnQwQFBeHSpUuia+TUqVMaLxg8wsyU5pTly5frHRcJ7ZIFCW07mARCPpo0acJuUNu2bQPHcVqd/+QuEydO1BiLvjjawtTjwkgOQqFtbjFWaAtTqku9uVepUoW1efnypVliwdnZGU+fPsX+/fvRv39/cByHt956i/X/+++/a7RRKpW4efMmAPWKsr6/6datWzNHL0Op0xs3boytW7eKzue1a9d0voAEBgaKbMb5aB7/+c9/mADT9fDmSUxMRHp6ut4vA7zQvnz5ssHzKQxNJhTa7u7uaNGiBVq0aCF6abAm1hDawuyrfPn+++/x6NEjUbSfGTNmGP23pa/k5+drHU/37t1FwnrFihXYv38/+7+9CG03NzcW1/vFixciJ2lLhvcTZnAsKirCZ599ZvB4+cQ6gNq5XPg1SSply5Zlf+uPHz9GYGCgzroktAlTIKFtB5NAyIfwQbNx40ZwHIcHDx6wRDT6KL7SJKUNzzfffKMxFn1CW+hAtXfvXva7UGhnZWUZ7Qx1+/Zt1t5Yoc23zc3Nlez0FhYWJnqAmGO+IbS5XrBgATjOsNDmOLFdqa46HMehWrVqLH75iRMntNYpVaqUhpMaoH5pM5QlU6FQIDY2FmlpaQZXr01FDqEtFKA//vijpP16eXnJIgR5rCG0+SRKxW1xAbWPQtOmTcFx6qyMhkKv8S9zAPDHH3/orfvzzz9rjKVPnz6iTJ3//PMPnJ2dRQ7Kt2/flpwARkpiGFOFNsdxGDRoENsmXNW2dBztadOmoaCgAL179zY4vyEhIaJzaiicoT4cHBwwbtw4gyvovNB++fKl5NCGPELHYhLabxYktO1gEgj5aN26NbtB/fnnn5LbOTs7iz5B3rhxw6SkLUL0CW2O+zc7pEqlQtWqVcFxtkvB7ubmxl40Lly4ILldp06dRA9RcxzmhJ+s+RVHKULb39+f2ZUWFBToXdXauHEj66958+Za6wgTnGRmZrIoLPrsq4WsWbNGcl0eqfHLeaF99+5dREZG6mXmzJnsOEwV2k5OThg2bBiys7NNTi+uDUsLbRcXFxZZQpjwZP78+ew6v3fvHvu7M8SWLVtYH5UqVTJqLLGxsSgqKmLtT506BS8vL3Cc9khAchVzhLa7uzuzlb537x6zzxcK7Xbt2sHFxQUuLi4s6c6TJ09QpkwZ9jvPRx99xNr16NFDp42yg4MDGjduLOm8Cr8KXr161WA8eTnghfaDBw+MbhsSEsLGS0L7zYKEth1MAiEfQuFnjL2eMBsaX9q3b2/WWAwJ7ZEjR7LtfMY9WwntRo0asXbr16+Hq6urpHBaQjtgQHvsXanwockKCwuZEJEitDlOLKD1RYxp27atwf6USiV27dqF+Ph4tjJdo0YNjetDzhIXFyfpHMnhDClFaDs4OKBXr14i+9WXL1+anJWvOJYW2nx2z9zcXHTr1o3tp3Xr1qIXkNOnT8PDw8Ngf6YKbQcHB5FJ1vnz50VfRoRCe/HixVi4cKFZCMWnOUKb48RRiBo2bAiO0+0MKfQ70bYqrFAooFQqoVQqMWDAANy+fVvyS442hM6VAETJXiwJCW3CFEho28EkEPLx6aefshtUQkKC5HbTp09n7fgQeEKTDlMwJLT/7//+j22fO3cuOM52QnvIkCGsXWJiIlJTU5GcnKzhnFgcoY0kX0yx+61UqRJbaRQ6iEoV2pGRkbh48SKio6P1RsdwcHBgYjUvLw++vr6SxicU2hcuXNAbD1sqQpFlrNAuLCw0aEbw8OFD1r9Uoe3o6IiePXtqOBEC6mgrur4CGIslhbaDgwMb/5o1a9CuXTu2n9atW0OpVOLAgQPsNykr9easaL/11lsoKCjA+fPnUaFCBdE24TVgLzbaPMKsjLy9tC6h7ejoyEzPdIX5dHR0FNnDnzx5UnI4RCFKpRJnzpxh/axbt05vfX9/f9kcekloE6ZAQtsOJoGQD+EqsbaEC9pQKpXM+efatWsi0R0REWHyWAwJbaGoXr9+vcZv1hTawtB+wkQRGzdu1CtceUEjjBZgiq2k0CZ0xIgR7HepQtvR0VFy+LnvvvuO9Tly5EhJbYRCW44Y6xzHoXbt2qxPY4W23Dbabm5uGDx4sCjFPF+uXr2KmJiY1ya8X8+ePVm/77//vobQ5jgOFStWRGZmJhYtWgQnJyeDfZojtDmOw8CBA7UmarJnoS009+ATeOkL7zdx4kS2rbig8PX1FTkLA8Dff/8NHx8fo49x6NChrI9nz55pDVXK07dvXzx69AizZ8+W5doioU2YAgltO5gEQj74mKoAMGDAAElthA/mKVOmoEaNGmx19fTp05IexNowJLTd3NzY9j179oDjbCO0+WgfgDpiQvGEEboe2P7+/uw8rV+/HmfPngWgflkxVpQJH8LBwcHsd6lCWx+DBg3CqFGj2Cf7wMBAo+3RS6rQXr9+PRISErQ6DKanpyMmJgZKpVKW4xViKaFdqlQpJoYuX74MhUKhVWhznDpiDv/vDz74AAMGDNCJcAV15MiReusas0prz0Jb2Bf/xU2f0C5fvjz7Gnjq1Cl23bzzzjtsIQNQfzGMjY01Kcto+fLlRV9qvvzyS511AwICWLhTlUoli3kJCW3CFEho28EkEPIhXHl69913DdZ3cnJCeno6AHXc1urVq4PjOKxZs4b1I3VlvDiGhLYwNN7OnTvBcbYR2l27dmVtdu3aBY5TZ7AUevRre6AJV5aGDh0qWtGKioqSPGZ/f3+2r+IxiOUQ2nwWvsLCQrz33nvguH8dUQHdTpFCSqrQ1lYOHjyIrl27mu0MrA9LCW3hF63hw4eD4zidQlvI5s2b9Z4TY4q/v7/k8dqz0O7QoQPbPmfOHHCc4YQ1fKQXQG2SM2XKFNF95NSpU3qzPBpi1apVrK+DBw8avEZjY2NZ/fv37xs1N9ogoU2YAgltO5gEQj6EKycBAQEG648dO5bVX7VqFfu9Vq1aLOZtYWGh1vTahjAktIVCdenSpeA42wht4WqyMHbtf/7zH/Z7YWGhKN43x3E4ceIEAPULSqVKlRAUFMSiK5w9e1ayUBM6VBZ/qZFDaK9bt471ERYWBo7jMHjwYPbbkiVLDPYhFNqLFy/WSO5iCrzDHmC80H78+DESEhL0IowDbkhoFxQUYM2aNWjWrJnGPh0dHSXbskvFEkK7bNmyLHHQnTt3WBSK10Voy13MFdp8JBHgX3MwQ0Lbx8cH2dnZGmMpKirC9OnTzXqZ6NKlC+svNzdXUlItBwcH0Tnevn27SSvpPCS0CVMgoW0Hk0DIQ7ly5djN6dGjRwZvqA0bNmTJbAoKClC7dm3RdqHjzv3799lqt1SEQnvs2LEYO3YsevTogc6dO2PcuHGiLI58ZklrC+3GjRuz+s+fPxfFf1YoFKIoBg8ePGCRAj744AP2O78KznGcSNx9/vnnksYs/CxfPKqFHEL7yJEjrA8+koqfnx97kXr69KnBz/0lNerI3bt3ER8fr+GkV/zvKj8/H2vWrJEthbklhLbQ0TU2Npb9LkVou7u7602ZvXPnTtZH7dq19dY15kuAvQjtBQsWYMiQIYiIiEDLli0xfvx40Uo0/3VQSgr26Oho0TiuXLliVnx9jlPfF+/fv8/6lOpbwXEcqlevLvI7GTZsmMnj4IV2QUEBtm3bZhTCr2gktN8sSGjbwSQQ8iD8VM6bYujC399flHJ9+vTpGnVcXV1x8eJFVicjI0Nv1rDiCIV237598ezZM60PxJs3bzKhZ02hrVAoRJnptEVpKVOmDMumCKhXf5VKJU6dOsV+69KlC6v/zjvvsN8fPnxoMFNbs2bNWP1jx45pbJdDaPMP6KysLNHvwsQ0n3zyid4+7E1oP3v2DCtWrNCL8MFuTsKaWrVqsfrGprjWhaVMR77//nucPXtW5FchRWgbwlxnSF0IhfakSZMwYcIEs1i6dCnrzxihPXHiRJFfhrCcOnVKaxxtXUKb48ShAW/cuGFWWEgHBwfs2LGD9ZecnGy0D0hcXBxrn5ubi5CQEJPGQpkhCVMgoW0Hk0DIgzDU3Hfffaeznp+fn2gV9cKFCzrTY9erV08kkG/duiX5uiluOiJcFePLtWvXRP1ZU2gPGDCA1X358qVO7/2mTZuisLAQ69evh5ubGyZMmMDanTt3TmMFT/hQ3LNnj96H4qhRo5hj4pAhQzS2myu0vb29Wfvk5GTRtmHDhuHEiRMYMGCAwfToJdVGW4rQbtGiBas/bdo0WY7dUkK7Zs2aGmZer4vQtrWNtvBrFF/S09NFX/IMCe1mzZrhgw8+gKenJ06fPs3q3rt3D02aNDHpmIT3m6dPn6JatWpG9+Hg4IBDhw6xflJTU01y8uWFtkqlQnZ2tsnwzqW6IKFdsiChbQeTQJiPu7s7i5wBQGd2sYCAAFy4cIHVy83NNShIO3fuLPqMmpOTw0w99FFcaEdERGDSpEmYOnUqvvnmG0RGRmpkM7OW0K5Xr57oc+oPP/ygt9+WLVvCwcEBnTp1EmW50+bJX7NmTRZ9AAAWLVqk14yndu3aWLx4sdb7gFBor1692ujzIUxQUzw1tjEP2jdZaEdFRbH65nx2F2KNFOw8JLS1U1xot2/fHosXL8aSJUswefJkdOjQQSP+tC6hXatWLaxduxYqlQpjx44Fx6nvtUJTp/z8fMTFxRllIz1kyBDR/SYmJsboc+Lo6IgyZcqgXbt27KUeMM3J3RwbbWNIS0tj4ySh/fpDQtsOJoEwn88//5zdmG7fvq3VTrJVq1bIzMxk9V69eiUye9BHr169RGIbUCdK0PfQNeQMqQ2h0E5LS9MbRkwbQlMQXUK7cuXKogfg5cuXDa7ocpz670YYLzspKUlnXWH0B0Attk1ZQQoKCmJ9nDt3zqi2SqVSZB4idPQ0ltdRaAvDVgqFNn/T5+fFUD+jR49m9YXZUlu3bm3QIVMXQkc7AFi4cKHJfRkK2/YmCe02bdqw/gytmhpyhtRGcaFdrVo1LFmyhPk7AMCMGTNY/erVq4tM9ADg0KFDzClZF35+fvjll19E7e7du4f4+HhMnjwZCQkJWLRoEVasWIF169Zh27Zt2L9/P1JTU3H+/HlkZGTg/v37Ij+Y4iU/Px/169c36vxaS2jzyX8AsCy5xOsLCW07mATCPJydnUU38+Krs6VKlcK0adNEKyNFRUVGC69u3bqJhCagTu4yceJErdewuULb3KJNaFerVk2UlKSgoABNmzY1OK7o6GjmOAqohZ6+v1uFQoG1a9eKxrNnzx6jRUqpUqVE+/3hhx/g5+cHFxcXnfj6+qJNmzbYs2cPa2dMFkhtvI5Ce9y4caz/fv36sd+FYSUvXbqkV+A5OzuLPmMLP9uPGDHCpOtS7mLoa4w+od2/f39Jts9C29xZs2ZJaiNMta4LuYW20IF76tSpeuuaIrSFiaWSk5NRUFAgmoudO3eiQYMGojYVK1ZkEYr4UlRUhKSkJJ3PYG1mdpYoJ06cMGoBYPLkyUhMTJT0JchUhOnl8/LyLBpmk7AOJLTtYBII8xA+XAoKClhkDI5TR8c4d+6c6Oaal5eHHj16mLSvRo0aiUII8uXp06eYOHGiqK69Ce333nsPWVlZojqGkvq4uLjgp59+ErV58OCBpFi4bm5u2Ldvn8Z5Gjt2rMHU7kKE4flMLVOmTDHrGhMK7aKiIhQWFpqN8AuJuUK7W7dumDFjBuLj4/H1119j6tSpLNQdIM5wqlAoRNfwpUuXkJSUhFWrVolYs2YNMjIyWL2MjAzRPkuC0Bba7cpd3nrrLYPzaYrQnjhxIhISEjBhwgSMHDkSX3zxBb744gv8+uuvItOI6Ohovf2YIrSFL6/CkpaWphH+U4irqyuWLVum0e7AgQNaha5QbBpTCgoKkJWVhcuXL+PEiRPYt28fNmzYgF9//RXz58/HlClTMG3aNNF5GjVqlFn3BmNZvnw5Vq1ahcWLFyMhIQFTpkzBhAkTMH78eCxYsEDkE3To0CGrjo2wDCS07WASCPPYtGkTuzHNnz+f/T5z5kyNG/GdO3dMioktxMfHBytXrhT1++rVK7Rt21ZUz1yhfezYMURFRRmFMCa2UGj7+fmJMqoBwJgxY/SOpXnz5izFOl8ePHiA0NBQyefKw8ND5BwJALt37zZqFSkwMFDry43Usnz5crOzG9pb1JHiQlvo2Fq83Lp1S8PWtl+/fkaN79WrVxpJiIKCgtChQwebYyiesj6hvXv3bjx//twi1KlTx+B8miK0//jjD4PzlZmZKQrVqQ1jhXaDBg1EApXfz4ABAyRHAYmKimLme+np6SzcpjZSUlIAqF/wtm7disTEREybNg2jR49Gv3790KlTJzRv3hxvvfUWKleuDA8PD8l/zytWrGDHkJOTY3ToVnNYv3695L+7Pn36WG1chOUgoW0Hk0CYh7e3N65cuYKsrCyUL1+e/V6zZk1RyKrNmzfDz89Ptv127NgRly5dAgCN1WyOM19oy+0M2bRpU7x48QKFhYUYOnSowb6aN28uWl1JS0tDUFCQ0WNSKpVISEiASqXCnTt3TJoDHx8fjBkzBhs3bsSBAweQnJysly1btmD27NlaE7CYglBor1q1CtHR0WYjtH02V2gL46ELy6lTp9CoUSOd1+/WrVtx8eJFZGRkaOXs2bP4/fffZTuPtkAOG21LYYrQFpoEaSuXL1+WFOHDlBXthIQEAOqoGz///LNeoawLb29vJCQkGHwRqVKliuh+LicVKlRgzvMnT540K/ygsYwfP17v/AHqF9tp06aZlVyHsB9IaNvBJBDmExYWhk6dOmn83r17d9y6dQvdu3e3yH6dnJzQp08frSumHTt2xOPHj/H48WPJKcm9vb2xb98+7Nu3z+AncW3ExsayBAnawmB17NgRrVq1ktxfmzZtkJOTgylTphhl7qENPhmGra8VU7AXG+2BAwdi5MiR6N+/v+h3pVKJkJAQhISEoGbNmggKCrL6fdVeef/999nfobmJU+Rm0aJFSE1NRWpqqsZXB10EBgaiZ8+ejO7du+Pjjz9Ghw4d0KBBA8mry6YIbWdnZxw8eBDdunWz+bkzl9jYWEyaNEkUc90a1KlTB7GxsYiLi8OIESMwcuRIxogRIxATEyMyfyRef0ho28EkEJZFDiejNxkSbGq7ZqVSCVjMaQYAAAqCSURBVKVSKZtzkiX6JAipuLq6okWLFmjRogWqVKkiuZ2xyWII4k3njRbaGRkZOH78ODZs2IBp06YhOjpa1tBNBEEQBEEQxJtBpUqVEB0djWnTpmHDhg04fvw4cyp/o4R2zZo1MXv2bCxatAgrV65EcnIysrOz2We08+fPIz4+nj7fEARBEARBEDqpWrUq4uPjRcEDsrOzkZycjJUrV2LRokWYPXu2VX0BOM7GQlsXQUFBGDhwIDZu3IiCggKDsT4JgiAIgiCIN4+3334bSUlJKCoqQkFBATZu3IiBAweaFDTAEtil0BZSrlw5jB07FllZWVCpVFi6dKmsESsIgiAIgiCI1ws/Pz8sXboUKpUKWVlZGDt2rKQEVdbG7oU2j5ubG8aNG4ecnBw8fPgQkZGRNh8TQRAEQRAEYV0iIyPx4MED5OTk4JtvvoG7u7vNx6SL10Zo8wQGBmLv3r1QqVSYNWsWeV4TBEEQBEG8ATg4OGDmzJlQqVTYu3cvAgMDbT4mQ7x2Qpvj1GGNfvjhB6hUKqxduxYuLi42HxNBEARBEARhGZydnbFmzRqoVCr88MMPr81C62sptHn69euHwsJCbNmyxerB7gmCIAiCIAjL4+TkhC1btqCwsBD9+vWz+XiM4bUW2hynzjhYVFSEFStWUJpUgiAIgiCIEoRCocBvv/2GoqIii2WXtiSvvdDmOA5DhgwBAIwYMcLmYyEIgiAIgiDk4auvvgIADBkyxOZjMYUSIbQ5jsOyZcuQn5+Pxo0b23wsBEEQBEEQhHk0atQI+fn5WLZsmc3HYiolRmi7ubnh4sWLOHXqFJRKpc3HQxAEQRAEQZiGUqlEWloaLl26BDc3N5uPx1RsIrS9vLzg6uoKjuPg4uIChUIBd3d3eHl5mdVvmzZtAABffvmlzU8sQRAEQRAEYRqxsbEAgDZt2pjVj5eXFzw8PODg4CDSnNbSvFYT2k5OTqhQoQJ69uyJ48ePIz8/H8Ly8uVLHDhwAK1bt4ZCoTA5bMu6deuQmZn5Wr/9EARBEARBvKm4uroiMzMT69evN6m9o6MjSpUqhbZt2yIlJQW5ublaNee4cePg4eEBZ2dnix2LVYR2UFAQxo8fj+zsbEgt3bt3R0BAgNH7Cg0NhUqlQmxsrM0vFIIgCIIgCMI4hg8fDpVKhdDQUKPbVqpUCWFhYXj16pUkvZmXl4eIiAiULl3aIsdicaFdrVo1/Pnnn5IFtrAsWrQIDRs2NHqfO3bswLlz52x+oRAEQRAEQRDGcfbsWezcudPodrVq1UJSUpJJmvPkyZOoW7eu7MdiUaFdqVIlZGRkmHTAfElKSkKFChWM2m+vXr0AwCSRThAEQRAEQdiGsLAwAECvXr2MalelShUcPXrULM2ZmZkJPz8/WY/HYkLbx8cHe/bsMeuA+fLNN9+gbNmykvft5uaG3NxcfP/99za/YAiCIAiCIAhpTJw4Ebm5uUb52nl5eeG3336TRXNeunQJ7u7ush2PxYR27969ZTlgvjRp0sSo/e/atQspKSk2v2AIgiAIgiAIaSQnJ2P37t1GtWnZsqWsmlNOiwiLCG1/f38cOnRI1oPetm2bUava8fHxyM/Ph5OTk80vGoIgCIIgCEI/Tk5OyM/PR3x8vOQ2vr6+2LBhg6yaMz09Hb6+vrIck0WEdoUKFWQ9YL6UKlVK8hgiIyMBACEhITa/cAiCIAiCIAj91KpVCwDQpUsXyW2cnJwsojmrVasmyzFZRGiPHz/eIgfdu3dvyWOoU6cOAKBjx442v3AIgiAIgiAI/XTs2BEAUKdOHclt+vTpYxHNGRMTI8sxyS60nZ2dsXXrVosc9MyZM6FQKCSNw9fXFwDQt29fm184BEEQBEEQhH4+/fRTAJAc+UOhUODnn3+2iObcsmWLLIlsZBfaZcqUMTu8iq6yYcMGuLi4SBqHs7MzAODo0aNYsWIFQRAEQRAEYcfw+lGq1nNxccHmzZstojkPHTokiy6WXWh7e3vj+PHjFjnorVu3wtXVVfJY0tLSCIIgCIIgiNcIqTrP1dUV27Zts4jmTE1NtU+h7e7ujr/++ssiB/3LL7/AwcHB7DESBEEQBEEQrzcKhQILFiywiObcuXOnLPG0LeIMOW3aNIsctDFeqARBEARBEETJZtCgQRbRnF9//bUs47OI0K5Ro4ZFDtrf39/mE0oQBEEQBEHYB15eXhbRnKGhobKMzyJCu2LFirh48aKsB5yZmQlPT0+bTyhBEARBEARhH1SoUEH2JIkvX740yidQHxYR2gqFAs2aNZP1oJs1a2bzySQIgiAIgiDsi8DAQFk1Z8+ePSWHkzaERYQ2x3Hw8fHBjRs3ZDngBQsWoEqVKjafSIIgCIIgCMK+8Pb2xokTJ2TRnBs3bkT16tVlHZtFhDbHcQgICEBWVpbZBy01niJBEARBEATx5lG9enWkp6ebrTkrVqwo67iY0H7+/LnsQpvjODRu3Bhnzpwx+YApnB9BEARBEARhiDp16piVy8USY/Ly8kJOTg64oqIilCpVyiI7cXBwwMSJE4062NmzZ8PR0dHmk0YQBEEQBEG8HlSvXh3z5s0zSnPOmTMHSqXSIuNxdHSESqUCBwC+vr4WO3A/Pz+UK1cOu3bt0mlO8vz5cyxfvhzu7u4oXbq0zSeLIAiCIAiCeL3w9PREUFAQJk2apFdz/vrrr/Dw8EDZsmUtNhZ/f38AUAvtjz76yOIH7+zsjLJly8LHxwf169dHeHg4goOD4ejoCA8PDzg5Odl8ggiCIAiCIIjXG2dnZ/j4+MDb2xv16tVDeHg46tevD0dHR3h6elrFBzAyMvJfof3rr7+S0CUIgiAIgiAIM3FyckJSUtK/QjszMxNlypSx+cAIgiAIgiAI4nWmUqVKOHXq1L9CG4BsmXAIgiAIgiAI4k3Fw8OD2YQzob1ixQqLeV4SBEEQBEEQREnHyckJP/30k6bQzszMlD1YN0EQBEEQBEG8KSiVShQVFWkKbQAYNmwYrWoTBEEQBEEQhJE4Ojpi9uzZonCCIqENWCY7DkEQBEEQBEGUZLy9vTXidmsI7ZSUFEoaQxAEQRAEQRAScXd3x5kzZwwLbQD48ssvyYSEIAiCIAiCIAygVCoxcuRIbZJau9AGgNDQUJsPnCAIgiAIgiDsmbp16+qS07qFNgBKYkMQBEEQBEEQOlAoFPqktH6h/fLlSzg7O0OhUNj8QAiCIAiCIAjCHlAoFKhevbpekW1QaPMlLCwMXl5eNj8ogiAIgiAIgrAlnp6e6NWrlxQJLU1oA8A777yD6tWr2/zgCIIgCIIgCMIWlClTBtHR0VLls3ShDQBbt26FUqlE+fLlbX6gBEEQBEEQBGENypUrhypVqiAzM9MY6Wyc0ObLkiVLEBISgqpVq9r8wAmCIAiCIAjCEnh5eSE0NBTLli0zRTKbJrT5snv3bnTv3h2+vr6oWLEivL294ejoaPOTQhAEQRAEQRDG4OjoCF9fX/j5+cHPzw99+vRBVlaWOVLZPKEtLNevX8fhw4exYMECDBs2DH369EH37t0JgiAIgiAIwm759NNPERcXh99++w07d+7E/fv35ZLH+H/vYxXfgO9iuQAAAABJRU5ErkJggg=='})
r = requests.post(url, data, headers={'Content-Type': 'application/json'}) # 'Content-Type':'application/json' 'charset=utf8'
print(r.text)
def test_2():
#url = 'http://192.168.0.99:80/imgTemplate'
url = 'http://192.168.1.43:8888/imgTemplate'
data = json.dumps({'base64Image': 'iVBORw0KGgoAAAANSUhEUgAAAtoAAAG4CAYAAAB7OkD/AAAgAElEQVR4nOydd3xN9//H78iODJEliYgVQcyo3aK1iQZFSmxFtWJWqZhRMYPGKErMolYpDUJrj6L2qpqxYmYgsu7r90d+5/M9585zb+5K+v48Hs8/7vns8zn3nNf5nPfn/ZGAAgUKFChQoECBAgUKRg8SSzeAAgUKFChQoECBAoXiGIwmtJ89e4bk5GSsW7cOI0eORN++fdG9e3d069aNIAiCIAiCIKyO7t27o1evXhg2bBiWLl2KU6dO4f79+8aSx4UT2s+ePUOfPn3g5eUFT09PeHp6Qi6XQyKREARBEARBEESRQS6Xo2TJkihdujS8vLzQrVs3HDx40PxCe/Xq1ahTpw5cXV0tflIIgiAIgiAIwhSULVsWVatWxcqVK00vtFNTUxEYGAhvb2+Ld5wgCIIgCIIgzIGPjw9sbGyQlJRkGqHdq1cvuLu7W7yjBEEQBEEQBGEJKlSogI8++si4QrtHjx5wcXGxeOcIgiAIgiAIwpK4ubkhLCzMOEK7YsWKkEqlFu8UQRAEQRAEQVgDUqkUjo6OeP/+veFCWyaTWbwjBEEQBEEQBGGNuLu7Gya0q1atavHGEwRBEARBEIQ1U7t2bf2E9ujRo2FjY2PxhhMEQRAEQRCENWNjY4MRI0aIE9qXLl2Cs7OzxRtNEARBEARBEEUBFxcXHD9+XLfQLlmypMUbSxAEQRAEQRBFDa1Ce+7cubSFOkEQBEEQBEHoiY2NDYYNG6ZeaOfn58PW1tbijSQIgiAIgiCIooifnx+ePn2qKrQXLFhAQpsgCIIgCIIgDMTGxgbr1q1TFdq0AJIgCIIgCIIgCoeTk5NQaJ8/fx7+/v4WbxhBEARBEARBFGXc3d2Rmpr6P6G9adMmMhshCIIgCIIgiEJia2uLtWvX/k9of/rppyav1MHBAS4uLpDL5ahRowbq16+P0NBQlCxZEh4eHrC3t7f4iSEIgiAIgiCKNra2tihRogTkcjkqVaok0Jyenp5m0Zzt2rX7n9D29fU1WUWlSpWCs7MzVq9ejczMTBX/ggCQmpqKqVOnoly5cnBxcbH4ABEEQRAEQRBFC1dXVzg5OWHVqlVaNef+/fvh7e0NLy8vk7XFy8urQGgrFAqTbbcul8uxcOFCtR3VFBISElChQgWLDxZBEARBEARRNJDJZJg3b55emnPKlCmQSqUmaY+dnR3y8/MhefPmDdzd3U1SiaHhr7/+QrVq1Sw+aARBEARBEIR1I5PJoFAoDNKcFy9exAcffGD0NpUsWRKZmZmQpKWlGX3bdT8/P4NFNheuX79OM9sEQRAEQRCERhwdHQutOVNTUxEYGGjUdpUsWRJpaWnGF9rly5fHrl27Ct1pADh37pzRXwIIgiAIgiCIok9AQACWLl1qFM15//59eHh4GK1tJhHaUqkUkZGRRukwF4z9hkEQBEEQBEEUfRo3bmxUzdmwYUOj2WybRGg7OjoiKyvLqJ0+fvw4SpcubfHBJAiCIAiCIKyDEiVKsE1hjBWuX79utA0cTSK0a9eubdQOc8HV1dXiA0oQBEEQBEFYB6VLlzaJ5qxYsaJR2mcSoT1mzBiTdHrQoEEWH1CCIAiCIAjCOujcubNJNOfMmTON0j6jC20nJyckJyebpNOLFy+GTCaz+KASBEEQBEEQlkUqlWLNmjUm0Zy//vorSpQoUeg2Gl1olyxZEmfPnjVJp3///Xc4OjqKbsv58+cJgiAIgiCIIoRYnefo6IikpCSTaM4zZ84YTRcbXWifOHHCJJ3+7bff4ODgIKod9vb2AIDTp09j3bp1BEEQBEEQhBVz+vRpABCt9WxtbTF79myTaM7Tp09bp9C2t7fH77//bpJO//jjj6LdrXh6egIA+vbtW+g+EQRBEARBEKalT58+AAAvLy9R6aVSKeLj402iOX///XfY29sXuk8mWQzZt29fk3Q6KipKdBuqVq0KAAgPD7f4hUMQBEEQBEFoJzw8HABQtWpV0Xl69uxpEs05adIko/TJJEK7XLlyJum0jY2N6DZEREQAAEJCQix+4RAEQRAEQRDaqVy5MgCgU6dOovPY2dmZRHP6+voapU8mEdqlSpXCjRs3jNrh7du3w9PTU3QbYmJikJ2dDVtbW4tfOARBEARBEIR2bG1tkZ2djZiYGNF5SpUqhb179xpVcx4/fhx+fn5G6ZNJhLZEIkFYWJhRO/3RRx/pVX9ycjKOHTtm8YuGIAiCIAiCEMfRo0dx4MABvfLUr1/fqJqzZ8+eRuuPyYS2s7Mzbt68aZQOr1mzBm5ubqLrdnJywrt37zBt2jSLXzAEQRAEQRCEOKZOnYp3797ByclJdJ5SpUohJibGKJrzwIEDKFWqlNH6YzKhLZFI4OXlVej950+dOoUyZcroVW+PHj0AAGFhYRa/YAiCIAiCIAhx1KlTBwDQo0cPvfL5+Pjgl19+KZTmvHPnDvz9/Y3aH5MKbYlEgmrVquH8+fMGdfiXX35B5cqV9a5z3759uHLlisUvFoIgCIIgCEI/Ll++jP379+udLywsDMuXLzdIc27duhXlypUzel9MLrQlEglcXFzQsmVLZGdni+psXl4ewsLCDDJEr127NhQKBaKjoy1+oRAEQRAEQRD6MWzYMCgUCtSuXVvvvGXKlEH37t1FC+wXL15gypQpKF++vEn6YhahLZEUuF9xdnZGTEwMjh07hqysLEFH3717h+PHj6Ndu3aws7ODXC43qJ5t27bh6dOnetn2EARBEARBENaBo6Mjnj59iu3btxuUXy6XQyqVonnz5mo1Z3Z2Ns6dO4fPP/8cPj4+JvVQZzahrVyps7MzZDIZHB0dIZPJ4OzsrNeCR3W0aNECADB8+HCLXyQEQRAEQRCEYQwfPhwA0KJFi0KV4+bmJtCcEkmBkC+s5hSLRYS2KXBycsKNGzdw8eJFvTa2IQiCIAiCIKwLGxsbXLhwATdv3izSVgrFRmivWrUKOTk5qF+/vsXbQhAEQRAEQRSOevXqIScnB6tWrbJ4WwylWAjtL7/8EgAwevRoi7eFIAiCIAiCMA6jRo0CAHz55ZcWb4shFHmh3a1bN+Tn52PdunWQSqUWbw9BEARBEARhHKRSKdauXYv8/Hx069bN4u3RlyIttPv06YPc3Fzs2bPHpCtGCYIgCIIgCMtga2uLPXv2IDc3F3369LF4e/ShSAptuVyOKVOmQKFQYOvWrXBwcLB4mwiCIAiCIAjTYG9vj19++QUKhQJTpkwx2A20uSlyQjsoKAgHDx6EQqHAvHnzisyJJgiCIAiCIAxHJpNh7ty5UCgUOHjwIIKCgizeJl0UGaHt5OSE7777Dm/fvsXLly8RERFh8TYRBEEQBEEQ5iUiIgIvXrzA27dvMWHCBDg7O1u8TZqweqHt7e2N8ePH49mzZ1AoFFi5ciW8vLws3i6CIAiCIAjCMnh5eWHlypVQKBR49uwZxo8fD29vb4u3SxmrFNrly5fHoEGDsHPnTuTk5CA/Px+bN29GzZo1Ld42giAIgiAIwjqoWbMmNm/ejPz8fOTk5GDnzp0YNGgQypcvb/G2SSQWFtrBwcGIj4/HsmXLsGHDBhw9ehTPnz9ne9FfvXoVMTExKFu2rMVPFEEQBEEQBGGdlC1bFjExMbh69SrTkc+fP8fRo0exYcMGLFu2DPHx8QgODjZruywqtFu2bAkAuHv3Ls6ePYsdO3Zg1qxZiIqKgr+/v8UHjSAIgiAIgiha+Pv7IyoqCrNmzcKOHTtw9uxZ3L17FwDQqlUrs7bFKoQ2mYQQBEEYDy8vLwQFBSEoKAhubm4Wb4+pcHFxsXgbCIIoGtSoUYOENkEUNyZOnIijR4/i6NGjiIuLM0udxflrUO3atXHmzBnExMQgNDTU4u0pDFu3bsX169dx/fp1DB061Khl//zzz+zT6XfffWfxvmqiX79+2LFjB3bs2IHIyEi98jo7O+PZs2d48eIFDh8+jHbt2lm8P0WRI0eOsHvUxx9/bPb65XI5/vjjD/ZfGD9+vEHl9O/fn5Vx7tw5ODk5WfzcEtYFCW0rGASCMDbr1q1jgufXX381eX01a9ZEWloaEhMT4ejoaPH+G5vY2FjwQ61atSzeJkM5c+YM60dMTIxRyy4qQjs+Pp61c8qUKXrlHTt2LMv77t07+Pn5Wbw/RRGFQsHOoyW2t+7ZsyerPzs7G+XKldO7DLlcjn///ZeVs3jxYoufV8L6IKFtBYNAEMbGnEI7KCgIjx8/ZvWdPn1arfiQy+UIDQ01G8Zc+X39+nXWv7/++svi41sY+ELb2GJYjNB2c3NDxYoVTYLYBeyGCu2SJUvi1atXLO/MmTONev5q1aqFyMhIo9K0aVNIJBIsXrwYW7Zs0ZtGjRqZ5Dq0pNC2t7fHnTt3WP0JCQkGldOlSxdWRm5urtV4myCsCxLaVjAIhHlo3rw5zp8/Xyg6deqEESNGICUlxSj07t3bJH01p9B2d3fHwYMHwQ8PHz5UmfUtVaoUzBlOnjxplP5xN0ku9OrVy+TXapkyZRAVFWUwrVu31lg2X2iPGzfOqO0WI7SHDh1qsjG/ceOGqHYaKrTnz5/P8j1//hzu7u5GPX/z5s0z+jnZt28fJBIJbt++bVD+7t27m+Qat6TQnjBhAqv75cuX8PT01LsMmUyGv//+m5Wzbt06s/aBKDqQ0LaCQSDMQ0REhEEPGn4YMGAAJk6cWOhyuGBsG1kOc5uO2NnZYcOGDYK+ZWRkCARfURXaM2bMYGU+fvwYdnZ2Jj+fnTp1KlTfT5w4obHss2fPsnTffPONUdttaaF94cIFUe00RGiHhoYiNzeX5Xv8+DGSk5NFM2nSJJ11kNA2PZUrV0ZWVharW99xbNKkCSQSoekJAFy8eFGvcnbu3Gm2PhOWhYS2FQwCYR6KmtBu0qQJYmNjDeLSpUusjps3bxpcTps2bUSfX5lMhgULFgj6l5OTw2bt+UL7zZs3WLFihU5WrVoleCCfPHlSVL4VK1ZoXdzk5+eHiIgIUdy/f5/Vv337dtH5NCFm4ZcphTZ/Fm7kyJFG/Y+JEdoDBgzA69ev1ZKZmcnyKxQKjen4vH//nuU5deqUqHbqK7TlcjlOnTpVqDHZunWrznrCwsI0fqW4d+8egIJZe+7Y9u3bWfmDBg1Sm4+73qpXr466deuKYu3ataxcbUJ76NChBt9b+P/rbdu2GVxO6dKlRV+fdnZ2ghdNQ0KnTp3g4ODAXLYZGtLT083y3CMsDwltKxgEwjyULl3aIGH09OlTdnPs2bMn/P39MW3aNHZs69ataNCggSgaN26M/Px8ljcqKkpje0ePHl2oG7kxwpw5c/Q+z8oLB8+dOwc7OzuB0E5JSRFVVtOmTVmevLw8o9lAfvbZZ+Y6hSrh0qVLOtvXoEED5hXDELTZDp8/f561JTo62qj/scIuhmzbti3Ln5aWJioPXzQfOnRI7zxihPa3335b6HEXI7S1wb0gHTlyhB2bOnUqK9+YZixz5sxh5WoT2vyXNkuF2rVri+7XkiVLWL43b97gzJkzjEuXLiE1NVVwTB3NmzcX3P/fvXsniL927RquXbumsxyx1ypR9CGhbQWDQFgvUqlU8JmxQYMGkEgkaN++PTu2ZcsW0eWVKVNG8JDQttCoqAptiUTCZv0vXbqEUqVKQSKRGCS0+Q/GzZs3G21crV1omxL+144vv/zSqGUXVmh/+umnLP/jx49F5Vm0aBHLs3fvXpV4uVyOEiVKCEhISGB5ZsyYoRLPzx8WFobs7GyWftasWfD19dVJdHQ0y5Ofn6/X1yF1kNBWH8QKbWWTJb65SmBgIE6dOoW0tDSd5dWpUwc5OTmsnG+//ZbFlStXDhcvXsQff/xRLL0vEYZBQtsKBoGwXnx9fQU3Z040fvDBB+zYsWPHRJfXpEkTQXk+Pj4a0/KFdnp6OubPny8avpeM27dv65X3yZMnLK+hQlsikeDrr78WeB/RV2jb2Njg2bNnAArMCPSZudIFX2i/ffsW69atMyknTpxg9ZlLaHfq1AkDBw5U4dGjR6wta9euVZtGHV5eXjrrLKzQ7tq1K8v/77//isqzfPlylkfdeoTmzZtD3+Dg4ACJpGChL999G/d1RlebqlSpgoyMDJbPUB/NfKxdaP/111963Wf4piN79uwRnS8xMVEwVmLuC5GRkcjLy2N55s2bJ4gfPnw4i0tNTUXFihXVluPq6oobN26wtJcvX2bXg1QqFSw0/u2332Bra2uW/zph3ZDQtoJBIMxP48aNRaVr0KCB4AbMHQ8MDGTH79y5I7re3r17s3yZmZmQSqUa0/KF9r179/TqX2EWQ/JtUQsjtJXRV2jzzQj27Nlj1PHnC+0HDx6Y/Hrr378/q89cQvvChQswZggLC9NZZ2GFNn+B2eXLl0Xl4QuvjRs3qsQbKrRlMhl27drFjmVmZiI4OFhne5TF2MaNG7X+z8Vi7UL7+++/16sOQxdDVqxYUTBWuoR2nz59BCJ7586dkMvlKumWLVvG0ty6dQve3t6CeLlcjp07d7I0ubm5+OCDDwRpKleujJcvX7I0iYmJRhl7omhDQtsKBoEwHwEBAexm2blzZ53po6Ki2E3zjz/+YMdtbGzY58Pc3FzRMxczZ85k5elauPVfEtrffvst5syZI4A/O7Rnzx6VeHXwP+Nqg4S2/qFGjRo66xQjtO3s7FC+fHmEhISoxA0YMIDlF7uwke/tJjExUSW+Zs2aKh4f+Atcb9++rRJvZ2cnEJsA0KNHD51tkclkAjH2119/6W1C4OXlhRYtWqhw69Ytdv1wx/j/808//VQlD+chQ1+Ki9Bu166doJ5Dhw5p3LnR1tYWhw8fZmmXLl0qiHdxccHvv//O4idMmKC2nGbNmglMS4zxNYMo2pDQtoJBIMzH4sWL2Q3w6dOnzBREE3PnzmXpFy5cKIjjHnwARM10SSQS/PbbbyzP8uXLtaYtKkK7a9eu6Nixo8502oT2P//8A2OEf/75R1Qf/wtCe8mSJUhKSlLh3bt3rC3nz59XmyYpKQn79+8XnNsqVarorJMvtDdt2oTo6GjMmTMHmzdvxsmTJ/Ho0SO2GHj//v0q+b/++muW/+DBg6L6uXXrVpZHWRxpQtdiyGHDhgn6Pn/+fFHlTp48meVJSUnRyyMGB99OvbDB0Gu7uAhtGxsbbNu2DUCBiZ+LiwuLq1u3LtsfYfTo0ZBIJPDx8cHjx4+xbNky2Nvbq5Qnk8kwe/Zs7N69GzKZTGO9X331FYACDzHqXiiJ/xYktK1gEAjzUbJkScEuhuvXr9eaPjk5maXt06ePIG7fvn0srkOHDqLqT0lJYXm+/vprrWmLgtBu3LgxsrKyoFAoMH/+fK32qyS0C4KlF0M+fPiQtSUiIkJjOhcXF8G5rVChgiA+KioKcXFx2LBhAw4fPozbt28LZvJ0hb///lulzm+++YbF79q1S1R/+OYd8fHxovJoE9oymQybNm1i8bdv38Ynn3yCZs2aaWXo0KFMPCoUCowYMUJjWs4GXB0ktLWjr+mIo6MjZs2aBWdnZ8HxZs2aqb1utK2b4RDzBXP48OFG39CIKJqQ0LaCQSDMS+fOnQU36vbt22tMyy3GA4DQ0FBB3MKFC1mcps+IfPz9/QX1ch5MNGHtQtvGxkZFIJ88eRJlypRRm16s0K5RowZCQkJEU6tWLZaXhLZu+Ne0tmvf3d1dMLbK43ru3DkUJly5ckWlTv6MsDp7a3XwX3hnzJghKo+uGW2pVIoxY8YIbHuNGYKCgjS2jS+0ly9fbpAfbc7HMwltzWgS2upITEzE0aNHjcKPP/5o1v87YXlIaFvBIBDmh/+AevDgAVxdXVXSVKhQgaVJT09XWUDTr18/Fi/GxR9f4L97906n9wJrF9oSiQSVKlXCtWvXBA++1NRUNG3aVCWtWKGtbbZPHU5OTiwvCW3dpKWlsba0bNlSYzpPT0/BuPr6+grilU1LlMPTp09x8OBBJCYmYurUqRg0aBDatGmDqlWrCj7h8+ELPF2mVRyHDh1ieSZOnCgqj1g/2nybXWMGsUK7f//+7Lg+iyG5tCS0NaOP0Ob7ni9sMNaOtUTRgYS2FQwCYX78/f2Rnp7Obn6LFy9WSdOnTx8Wz21jzIc/kypGCPN99x4+fFhner7QfvXqFcaNGyca/gPw6tWreuV98OAByyvGRtvNzQ1JSUmCh0lubi6GDx8uSGetQjsjI0PUQsvCsHfvXlafNqEtk8lw+fJlo9GpUyeVOvg7Kap7IeIoXbq0YEw9PDwE8ZMnT8bmzZsxb948jBgxAp07d8aBAwdYekO8jixdupTlV3bBpomTJ0+yPGPGjBGVxxChvWrVKiQkJKjAXyB39uxZtWkSEhIECzANEdpXrlwBIFyUrY/Qtre3R8WKFUWxYsUKVq5Yof3HH3/odZ/hC+0NGzaIzjd79mzBdWluof3mzRtkZmbqxdu3b1l+Etr/PUhoW8EgEJaBW7ACFGwoobx5DH8bYnWiwc7OTnADDQwM1FrfzZs3WdqpU6fqbF9R2rDGxsZGsLkMF5YsWcLSiBXa7u7ucHBwEE3JkiVZXkOEtrmDLqFtzMAXatw48UP9+vU1toXvwhKAio2rOgrr3m/9+vUsv5gdGyUSoQjSte6BwxChrTyjz8H/UqXNdIVv4mKI0ObWd+zevZsd00do8ycG9AlFfcOa0qVLo1atWioMHDiQlbFu3TqV+OrVq6u9xnTd59VRuXJllp+E9n8PEtpWMAiEZZDJZILZsKtXr7KV5jKZTLBxS7169dSWwV8s2atXL411lS9fXvBw0CZwOIqS0Ob49ttv2UxVfn6+YLEd3xRB+ZO2JRdDmjtYUmjzX3YA1XUHfJQ/0YtZAFZYob17926Wf9SoUaLy8DdnGjBggKg8moS2g4MD6tSpw36LEdpdunRhaaZPn66xzsIIbVtbW+Tm5gIAVq5cydKS0NYttGNjYw0qNy0tjZVBQpsoDCS0rWAQCMtRq1Yt9gADgMmTJ0MikaBhw4bs2IsXL9RucCCRSPDdd9+xdJs2bdJYz7fffsvSpaamanUNxaFsOjJmzBjR8BeqXblyRa+8/E/chvjR7tmzJ7Kzs5nLLA4/Pz9WrvKuf5YU2i9fvkTfvn1NysqVK1l9umy069atq5ZJkyaxMt69e4fGjRtrTMvh6ekpKDs4OFhwvvz9/TW2o2rVqixdfn6+qPNaWKF97Ngxll+saOYWCQLA559/LiqPstCuX78+li5dilevXmHnzp0snb5CW9uXqsII7Zo1a7JjMTExLK2hQvv58+cqvsP58M+pWKF94MABve4zfNORdevWic4XFxcnuIZJaBPWDgltKxgEwrLMmzeP3QTfv3+PkJAQgf/stWvXasxbt25dwY1Z0wJH/gNpxYoVotpVFBZDaqJy5coqx4KCgli5165dE8TxhfaKFSuwdOlS0fDtSYvzYkj+At7Vq1cbVAZ/l0SFQqHWVzBH7dq1Wdp3796JKr+wQpuzQwaAzz77TFSe1NRUlkebu0I+fKGdlZUFfkhKSmLpxAjtbt26sTR8EaxMYYT2iBEj2DG+z3q+0ObvZKhLaP/2229az09xWgzZokULxMbGqvD06VNWxqlTp1Ti+WNJQpsoDCS0rWAQCMvi4uIi8G995MgRwW9tO0hKpVLB7M+nn36qkiYsLEzwYGjWrJmodhVloa0O/sPmwoULgjjyOqKdkiVLChYxKq8nEAt/gW9qaqrWtPyvOq9evRJVfmGF9qNHj1j+Tz75RFSejIwMlqd169Za05YtWxZjxozBv//+C02BL0LFCG3+tvHadiYtjNDm/pP5+fmCRanjxo1jaRcuXIgGDRqgQYMGbAt4Q4V29erV8dlnn+Gzzz5DQECAxnRFQWirw9vbW/AlU5/FkKNHj9b7ixZ/nEho//cgoW0Fg0BYHk02u2lpaTqFH392TN0mG/wZ1wcPHogyG5FIip/QrlevHiv3xIkTgjgS2toZNGhQofJzfP/996wcXVuc82e/nzx5Iqr8wghtmUyG7Oxslr9WrVqi8vF9XX/44Ycq8ZUrV8bEiRNx5swZgbjjh/fv32Pr1q3o0KEDbGxsWF4xQpvv5jM6OlpjOw0V2o0bN2a/Dx06JEjbtm1btf3hgqFCWyxFVWjzvxAA5N6PMC0ktK1gEAjrgP8g5AJ/4ZEmuD8RUODWjv8Q9fX1FWx5re3TsjJFUWj7+PgIhAqf9u3bs3KVX0hIaIsfk6FDh6rEu7i4wNHRUWc5O3fuZOVoM4mSSIQi7u7du6LaWRihHRAQIPjvlSpVSmceBwcHQZ6wsDCVNDNmzIC2sHnzZo11iRHa/G3jv/jiC41tNURoDxo0CH/99Rf73bdvX0FauVwucC+oHEhoqyKVSlV8/8fHx2vd14CENlEYSGhbwSAQ1kFwcLDg8zwg/hP98ePHWR6+T26+rfe7d+/g5eUluj1FUWjPmDEDly5dQrdu3VQeXH379mXlrlq1ShDHF9qxsbGYMmWKaKZPn87yFkehXa1aNZY3PT1dsNmLt7c3YmNj8erVK7UCXBn+9uu6fE7z3dYp29RrQh+hreyXm7svA8KFaNpQ9qJSpUoVlTT8rwFAwew831NJYd37jR8/nqXp0aOHxrIMEdp8l6ApKSlqxaBUKkWDBg3QuXNnZu5x584dlWubhHYBHTp0gHJYvHgxkpKSsGrVKrXedfhCu2XLlggNDdULfp0ktP97kNC2gkEgrAf+gjMAaNKkiah83bt3Z3mysrIQFBSEwMBAgZ9tvk9pMRRFoc3fsGTQoEGCuJiYGBY3a9YsQZwlvY5Yu9BeuHAhy/vDDz8I4vheb/755x+tZklly5YVnKuPP/5Ya71RUVEs7d9//y2qrWKEtq2tLWJiYnD06FHBcb5gVTYt0oSyr++yZcuqpGnZsqWKaYgx/Wjzxyc8PFxjWYYI7UmTJrH/RlRUlOhrprCLIcBsFBcAACAASURBVPWtByg6Qptz6cpfBMufYElKSlLxGU+LIYnCQELbCgaBsA4CAwNVZrQvX74syn+wTCYTeEzYvn07tmzZwn6/ffsWpUuX1qs9fKGdlpaGuLg40fDb8s8//+iVl78gTR+hLZPJBNt7K/to5m8A9NVXXwniLCm0c3JyjLobozr4M8n6CO0SJUqwc6pQKFS8uXh6egpMk7QJPf4XhZycHJ0b0PBngo8fPy6qvbqEdp06dZg4u3XrliCOv7toQkKCqPr4s/0A1H4xcnFxUTEN0SS069evj1u3bjH45/bu3buCOA7+YsxHjx6pTXPr1i3BS/e9e/fYceXF1so22n5+fqJ3yeSwhNA+ceKEXvcZvtD+9ddfRef78ccfBWOuj9Du2LEjy8d3ublv3z48e/ZM0Be+LiGhTRQGEtpWMAiEdbBmzRqoC9o8CfDh+9NVDrGxsXq3p6htWMP3u/zy5UuV2VX+5kDKNxy+0G7UqJHAF3S7du1Y3OXLl1V8RfMXixX1DWuU4e9eyt8RkA9fMCQnJ2ssi//iJ0Y4jxw5UlS5fDQJbScnJ8yaNUvg6SEjI4PFOzs7C0RtZGSkqPoaNGggOLdOTk6i8mkS2vxtuc0V+vTpI2ibpp0h9cESQttSQazQtrOzY6Y4T548QevWrVkZ8fHxqFatmsBV5MWLF5nLRBLaRGEgoW0Fg0BYnoYNGwpmWPizum/evBF9c1W3oPKff/4RtVBNmaImtPlbGis/yG1sbJCZmcniy5UrJ4hv1KgRWrRogRYtWqgIdH9/f5ZPnacMuVzO8oq1qS8KQlsulwvc0DVv3lxtOr77SIVCgWrVqqmkcXZ2xps3b1g6MYty+aY+/E1ctKFOaLdq1Qq3b99WOQ83b95kX4s+//xzdjw/P1/0WoZWrVoJ8kmlUlH5NAntjz76CFlZWaLJyckR9EmhUOiVPysrS8UkhC+0r169qnVjGU1ws+yFFdrh4eGYNGmSxsWiRUlo813sTZw4UfBSxXkdCQ0NxfPnz9lx7rrnC+2PP/4YISEhesFfWExC+78HCW0rGATCstjb2wtMLc6ePYsqVaoIXI1t375dVFl8DyRcaNu2rUHtspTpCJ927drB19cXc+bMQXBwsNb2JiYmsjrHjRsniONvfpKRkaFxp8169eqpbKKiTWiXL19eL9tVDr7QfvToEXx9fU0K352YWKHNF59nz57VmpbvmWLp0qUq8b169RJck+rEuDIzZ85k6Tdu3CiqzZs3b2Z5FixYIFgrwIXc3FzMmjVL8PL5559/svgjR46IHkf+ZjH8GXJdiLXR1gX/ZQQoENr8LdwNgS+0CxsKK7SHDRsGoGAh96hRo1TiLWE6ooyfnx+6du2KgQMHChYK86lUqRL7YvLy5Uu4ubmpFdoSScGLa3p6Ou7du8cmWMjrCFEYSGhbwSAQloW/pa9CoUDjxo0hkUgwe/ZswQ2yTZs2WsuxtbXFnj17VG6s+/btE2XnrYylFkMqw9/98vTp0xpn9/nmH8qLSPku0A4ePKg2v4+PD16/fo0bN24IZm81Ce2ePXsiPT0deXl5aNeunV59svbFkE5OToLZbF27JA4YMIClzczMhKurqyD+xIkTLP7ixYui2rx48WKW56effhKVhy+Y1YWrV6+iXr16gjwffPCBIM3gwYNFn9fBgwezfI8fPxadzxhC28HBAQ8ePFDp4759+wp1rfCF9uPHj3H9+nW94Rb6FVZo83eeXLhwoUq8JRZDqmP+/PkACtbCKNv3y+VygVeo8ePHQyKRaBTaEokETZs2RYUKFdhvEtpEYSChbQWDQFiO5s2bIz8/n90E+X6zXVxc8PjxYxZ369YtjVtW29nZYceOHRpvrlu2bNHoX1oT1iK0O3XqJOiLus/6Pj4+LP79+/cqvrD37t3L4qdPn662Hv5iyVu3brGXE01Cm29Tn5aWhpCQENF9smahLZPJBCYYV65c0epNRCqVIigoSLCQl+/qj2/DDgDDhg0T1Wb+eCxYsEBnemXzFH7Izc3FjBkz1P5/9u/fz9JlZGSovCRog++pRKx9vkRiHKE9bdo0wTWUnJzMfhtqWy2RWJeN9rJly1iesWPHaqwHsKzQ3rp1Kytr//79grjJkycLxolbBKxNaCvDF9pjx47FwIED9WLChAksPwnt/x4ktK1gEAjL4O/vjydPnrAb4KNHj1SuRf6W1YB6Twqurq4CsQAAf/31Fw4ePCg4tn37dr02Y6lWrRrbwlfXjKYyxhTa33zzDSvr2bNnatN07dqVpTl27Jggzt3dXWCGo+5m07ZtW/bgVSgUgu23NQltJycnXLhwgcVdv35dtEizVqHt6OiIXbt2Ca6b+Ph4dO7cGYMHD8akSZOQkJCALVu24Pjx47h7967ATRkX+LPWfJeL6enpcHNzE9XmX3/9leXT9HLEhy88+eHatWsqs9jqxgEAZs+erdd5nTdvHsury7yGT2GFdvv27QU7Ug4fPhx169Zl13BGRobBJiTWJLT5nmC6d++uEh8eHs7uUfr215hCmy+E58+fz443bdpUME5du3ZlcYYKbVoMSegLCW0rGATC/Dg4OAg+pysUCrW21FKpVOBX+s2bNyhTpgyLL1++PC5duiQQC5cvX4anpydcXV1VFgsdOXKErWQ3JcYU2uvXr2dlafI+wTczUPaRzf+8n5mZqfKyUbJkSYH7O2UzBW022hUqVBC4FNyxY4eoBXHWKrRlMpnRXB3Wr19f4M5M3dhog28GIsbzjrLZiEKhwMKFCzUuBPb19RV4ecjIyND7v8Gf+T9w4IDofIUR2hEREQIPKRcuXGBfX5YsWcKOp6amirKFV8aahDb/Wqxbt65R/xPGEtp2dnaCrzn9+vVjcU2aNGHHlWe6SWgT5oKEthUMAmFepFIpNm3aJBAF2m609evXZw+F9evXMxdiEREReP36taCc8+fPw9PTk+UtXbq0wNYWAO7fvy/aO4ah8BcmanILJ5Z79+6xsuLi4tSmuXr1Kkvz6aefCs41f2Hmzz//rDIW27ZtY/F37txRmZXW5XWEvyAOgNpFW8pYq9CWSCSChZO6Qk5ODlJSUnDmzBns3LlTIFwTExNx//599js9PV1wberixo0bLO+XX36pM33ZsmXZS09qaipatmypMa2dnR0OHTok6Ms333yj1zmVSqWC/xbf7EsXhghtOzs7xMbGCkzNnj9/LlgkXKJECcFujunp6Vp9m6vDVEK7evXqrFxN6yT4ODg4sNlghUKhcaGhofC/cmnbUVMXfDENANWrVxfEnzlzBmlpaSqbGZHQJswFCW0rGATCvHALZ7hw5MgRnYsVV6xYgejoaEgkBbbby5cvh3JITk5Wa7rg5+cnEKJAgc1qbGysRpvvwsLf+j0lJcWgxZgSidB9GgB07NhRJY2Pj4/A7IMv5vg+sAGgdevWgrx8X835+fn46KOPVMrXJbQlEqEtaU5ODurXr6+1X8pC28bGxqTwXR/qEtru7u548+YN3r59i+vXr2P//v1YuXIlpk6dikGDBqFDhw6oU6cOfH19Vby3TJ8+HVeuXEH//v2xceNGwbnXtSU6nwoVKggEpdjFpj179sTly5cFX32Ukcvl2LBhg6BtZ86c0XsNA99cCQD7f4pBX6H90UcfCV4YgYJ1AQ0bNlRJW6VKFcFXFoVCgZUrV2p0kacMX2hPnz4dDRo00BvuJYkvtPm7aL548ULnPYEvRPVdIyIG/voXfTfk4cNfq/H69WuV9QwDBgxA7969tfaPhDZhSkhoW8EgEOZjypQpgoflvXv3NG6tzIczR+jYsaNaTwOLFy/W+uDy8vLCuXPnVPLduHEDHTp0MHo/+VvCAwUmFU2bNhXl87VKlSpo2LAhJkyYgPT0dFZGVlaW2hcJvhu6GzdusOO2tra4fPkyi7t586bgIdiqVSuBH2JNgqdMmTIsjSah7ezsLPjMffv2ba322tbuR9vQ2UMnJydIpVIMGTJEUOeNGzdUXurCwsJQtmxZlePlypUTmEspFAr4+PiIboO2HSdtbW1VXP5lZGSouI5ctmwZ5s6di+joaHTu3BnNmjVDWFgYQkNDUa9ePUyaNEnFNl2X+0k+/BflyZMna0zXuHFjtb7xHzx4gBo1amjM17BhQ4HY5voZFxeHgIAArW0zlXs/mUwm8GX/ww8/wM7OTm0b/Pz8BPerX375xej3KP5ahOzsbIwdOxa1a9cWdY+qVq0a2rVrp/IyuWXLFrXXnLr6DRXa+/btw44dO/SCv1CWhPZ/DxLaVjAIhHngrz4HCmaklD8zaqJq1aoCzxlcyMzMVDtbog5nZ2fB6nh+OHz4sF4LJXXh6OiIp0+fanwAGxLU+WeWSCRYtWoVS7Nq1Sp2XNnHMN92MjQ0VCDiDx48qNG3Nn+zB22fvOvXry/YeVCbXbG1C+3C0L59e8ELTF5entqZ12PHjgEoENLp6elISUnB48ePBbazAPDnn38apV0eHh4qojUvL0+taYWm/4mmoM08qlGjRmjVqhXCwsIQEhKCJk2aCExsRowYodLOIUOGCBba8sPOnTtFbagTFham9qU8Pz8fBw8eRHR0NEJDQ1XWFJhKaEskQpt2oMC05erVq7h8+TLjxo0bgv8RAERERBj9Og0PDzdaP4GC6/jDDz8UXb+hQruwgYT2fw8S2lYwCIR5GDp0KBMg79+/R7NmzXTm8fLywpo1awQr17lw+vRpvWbRJJKCmfHJkycLPssD4l2u6cPHH3/MdogzxsNB0ywrX0wMHDgQEknBQ4wv9q5cuSIQ0l5eXswry71795hwadKkCRISEjB9+nRMmjQJc+bMEYgiZR+5ysTFxSE/Px+xsbEahbtEIhTa+fn5eP36tUl5+/Ytq8+UQrtt27YqM72adoHkL2DVFHJycjR6DNGHDz/8EHfv3hWUrVAoMGDAALXpJ02aJO7CBPDw4UOtpip8zyTqAifOPvjgA+zZs0dgO8wPjx49Qq9evfTqt5eXF3bv3q2xbnVma3yhvXbtWgwZMkRvuP+kstCuWLGiyky7rvDrr7+K3nFTX/gmPIUNEyZM0KtuEtqEuSChbQWDQJiPjh07IiMjA+3btxeV3t7eXrDrHlCwKcK4ceP0tinl07x5c7ZQTdNMsTEICgrCggULcOnSJTx//hyZmZmiyMjIwOPHj/Hnn3/i66+/1viJmW97CABVqlSBRFIwY/3s2TMABUJW3UyTjY0NZs2aJfgf8jfHUQ45OTk6/7P29vYC14CasObFkIXhww8/FAjtnTt3avTBrWxeohwePnyoc5MmsbRr107gGSIvL0/rQj9l0yd1ITs7G5s3b4a/v7/Wur/44guNZRw9epSJyBIlSqh4EAKAV69eYfLkyShRooTB/e/evTvu3LkjKPfJkycoXbq0SlpTLYbkqFGjBpKSklS2j1cODx48wOTJkzX+941F69atsXXrVty5cwevX78WfY9KS0vDv//+i02bNqFp06Z612uo0C5fvrzeazSqVq3K8pPQ/u9BQtsKBoEwL/p4XpBICsQq511k165dKF++vFHa4ebmhrFjxxZKsFsaqVSKRo0aISEhAdeuXRPMfNWpUweZmZkaPZWow97eXuWzNVAg+jp37my0dhdXoS2RFCwSVCgUOHPmjFZxWLp0aURFRWHAgAEYOnQooqOjMWrUKHz11Vdo2bKl0RfqhoeHIzc3F2lpaToXV9rZ2SEwMBDVqlVDvXr18OGHH6JFixZo06YNWrVqhXr16jHvP7po2rSp4FrKz8/HgwcPsGTJEri7uwvSVqxYkW26c//+fYwbN06vDXS0YWtri4EDB+LixYsA1C8slkhML7Q5HBwcEBAQgKCgIAFly5Y1+zPZEtBiSMJckNC2gkEgrJ/w8HCz/0mKA/Xq1dNqwqGO6tWro169emjUqBEaNWqE4OBgrTsjGkK5cuXYrm2RkZEmPw/BwcGsPmO+MGhiwIABer9QmoMuXboItrY2B1KpFA4ODnBwcICdnZ1OM4guXbogIiJC7+tWH7TtYmprawt3d3e4u7sbPJtsZ2cHBwcHk3k1Kg6UK1cOsbGxiI2NVbuHAp9OnTqx/68hXzbc3NxYfk0vWETxhYS2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDYChRUVF48uQJ4uPjLd4Wa8HFxQUBAQFGwcHBweL9IQhCgo8++sjszwltlCtXDtWqVTNbfVWrVkWzZs0QEhJisT5HRERg9OjRFj/3BFHUIKFthvo+//xznDlzRm9OnjzJyujRoweioqIglUrZsUGDBgEAVq5cafELyVr49ttvYazQpk0bi/eHIP7rzJ49GwAQFxdn8bZIJBJMmTIF2dnZePjwIfz9/c1S5+bNmwEAiYmJhS7L1dUV69evR6VKlUTnadOmDQBAoVAgPDzc4mNAEEUJEtpmqG/EiBEGCb2cnBxWRnZ2NgBAJpOxYyS0VeGE9rt373D37l2DyMvLI6FNEFZCcHAwcnNz8fbtW3h7e6tN8/PPP2PHjh16UbVqVYPa07p1a+Tn5wMAzp07B2dnZ5OfA2MK7cTERADAy5cv8cknn+jdhlevXiEoKMji14U2SpYsiR49eiAuLg6LFy/GnDlzMGzYMFSvXl1rPrlcjqZNm2LUqFGYM2cOfvjhB8TExKBly5awsbExuD2BgYFo0qQJmjRpYvXnjjA+JLTNUJ9MJoONjQ1sbGzQr18/AMCGDRvYMRsbG7i7uwMAsrKyBMe5Mkhoi4MT2r///rvBZTx+/JiENkFYADs7O4SEhKiwf/9+AEBCQoJKnI+PD9LT0/WeyPjwww8FdZcoUUI0CxYsAADcunULjRs3FpXH1tbW4PNiTKHt4eGBw4cPs8mcQYMGic736NEj3Lt3D40bN7b4taIOe3t7zJgxA2/fvtU47iVKlFCbt2fPnuzery7cuXMHH3/8sUHt+uOPP1g5x48ft/h5IswLCW0zn/A+ffoAANavXy84XqJECQAFQltdPhLa4ijuQrtChQpsZiQgIEBr2tDQUJZWXzw9PQVl2djYiM7r5+cnqi/VqlXDyJEjsXDhQsTHx2PIkCGi8yrj4+ODfv36Yc6cOVi0aBFmzpyJwYMHo2LFiqLyBwcHY9iwYViwYAEWLFiAr7/+GmXLltWrDXK5HE2aNGGzYYsXL0Z8fDzGjh2L8PBwuLi4WPz6sXaqVKmih1QuCIsWLUKVKlUQGhoq4Pr16wCAr776SiUuNDQUTk5OgrrfvXund936hFGjRqn0d9WqVUhOTtbJ06dPAQCPHj0SlX7FihVaz7ODgwN27NjB2hYXF4fmzZvj1q1bWklLS8OjR490pvv222/Nfu24u7vjxIkTrE/v37/H4cOHsXHjRuzdu5edQ01C++effwYAvH37FocOHcLmzZuRlJSEFy9esDKzs7P1FtsVKlSAQqEQXAuWtLUnzA8JbTOf8L59+wIA1q1bJzhOQts4FHehffHiRXaz3rVrl9a0ycnJBouCbt26Ccry8PAQnXfEiBFa2+Xp6Ylff/1Vbd7s7GxMnTpVcJ1rw9XVFUuXLmX/D+Xw6tUrrfldXFywfv16lQchAOTl5WH+/Pk6PxnL5XIMGTIEDx480Hpenj17ZvHrx9rhhPY///yDmJgYraxduxZAgdBWNyaccBb78mYJoc29DBg7XLlyRWd/bWxssGHDBigUCkRHR6N9+/ZGq3/27NlmvW5kMpngfrdy5UqVyQKZTIbmzZvDzs5ObRlLly7F8OHDVUyB7O3tMWfOHFb2zZs3BWuldBEbGwug4F6Uk5MDAJg1a5bF/2uE+SChbeYTzpmOrFmzRnCchLZx4IT2oUOHULFiRYNITU0FYH1COywsTPAwy83NRenSpTWmt0ah7erqisuXL7O0d+/exebNm5GcnMweQgAwd+5cnecjICBAIFTevHmDAwcOYOPGjUhOTsbLly+1Cm0HBwecPHmS5U9JScGWLVuQlJSE9+/fs+PaPtfb29tj9+7dgv7n5ubi9u3buHXrFl6/fi1on6WvIWuHE9pJSUmQSCSYNm0aEhIS1Nq1tmvXDoB6oc2Vk5KSIrpuTmg3btxYYL5XWLj/oTahvWrVKkRERGjk2LFjAICDBw9qTbd69WoA4oS2RFLwQsI9/O3t7eHr66vCkiVLkJqaiu+//x5lypRRm0YZc3+9iY6OZv+zefPmGVSGLtOec+fOsTrE2vfL5XL2Ar5r1y5WxpMnTwpl800ULUhom7iujz76CEOGDGGsW7cOAHDixAnB8eHDhwMosJnjH4+KioJEQkJbLMXZ68jixYsBgL0IAND6iTY4OBh169ZVYdu2bSx/9+7d1aZR/k/yhfb333+v9UXF3d1dY5tWrlzJypk+fTrkcjmLq1mzJp48eQKgwLtBgwYNNJbj5OSECxcusLQzZ86Eq6urII2NjQ1atGihsQz+LNWiRYsED9pKlSrhzp07LL5t27Zqy5g5cyZLc//+fXTv3h2Ojo6CNGXLlsWwYcNEi5//MmXKlEFSUhJiY2Ph6+uLrKwsZGRkqMxOSiQSfPDBB0hKSsJXX32FihUrIioqihEfHw8AuHDhguA4R4UKFVTK44R2/fr1jdonzr5cm9BWF8dHrI32mDFjAIgX2rro0aMH+9qzYMECtWlq165tUdHo7OzMzDvOnz9vsrbw7xdiF5G2bt2a5enTpw8mTJjAfpP3lv8OJLRNXNeyZcvUijixgZuRIaEtDk5ov3nzBtevXzeI3NxcANYltB0dHfHq1SsAwOrVq3H37l0AwI0bN/T6jCmRSPDjjz+y6ys0NFRUHr7QHjlypEF9CA4OZt4a9uzZozZNx44dWT3bt2/XWBb/oafLVEUdpUuXZrPWx48fV2uq0qRJE1bH4cOH1Y7Jmzdv2PUWGBiotU57e3uLX0dFiSVLlgAAnj59it9++00tYWFhkEgk+OKLL/S6r/bv31+lvv+C0HZ1dUViYiLKly+vs91t2rRhz50lS5aovc9MmjQJeXl5WLx4sd7nxc/PD6NGjdL6MiyG/v37s3Ht1KmTya7H77//ntVTp04dUXm4cXv79i1cXFwQFBTEXlx27NhhsrYS1gUJbRPXpTyjfeTIEQDA1q1baUbbBBjDRvvWrVvIzMxEy5YtLd4fjs8//5zd5MPDw9mMHQC9PQBYSmjHxcWxMpo1a6Y2jVQqZTPJ79+/V/sJOiAggIlkQ8f5m2++YW3p3LmzxnRnz54FUDBrrmzryzfl+e233yx+jRQnatWqhby8POTk5OD169cqcGKFexnmhPaxY8cwZcoUjRw/flyj0K5QoQIqVqxo9I2qgoKCEBoaqnZW3txCe/v27QCAtLQ0FfMwPm3btkVWVhYAYP78+Rpf5iMiIthYfPnll6LPiVwux7///sv+W4UR23v27AFQYANdGM8uuti7dy8A4PXr1xrtvPl4eHiw+9SGDRvY8UOHDrFnvY+Pj8naS1gPJLTNfMK5G73ymzfZaBsHbmdIdQ81c2CsWRplODvPZ8+ewdbWFrVq1WIiT9/xt5TQ5hZyPnv2TOss/MKFC1ld6s7j5MmTWXzdunUNagvnbisrK0vrTPOkSZNYXd27dxfENWzYkMXt3bvXItdbccTe3p6ZBSmfc4lEglatWgEo8MDBeQ7hhLauRWbclxBlob1+/Xps2bLF5CjvJskJ7TFjxmi18/7ll18AFHzN0paOm2jQJLRDQkIE6xoWLVqkIhoHDhyInJwcKBQKxMTE6ByvuXPnAihYyCz2pd/Z2ZntVwAAw4YNM+hakclkzLUj/z/o4eGBFi1aoGPHjqhXr16hBXiXLl3YC8WkSZNE5Rk2bBjrH9/0bMCAAez4mDFjLP5/I0wPCW0zn2yFQoHs7Gx4eHgI4nQJbW62hX+MhHaBDeG4cePMQrt27bS2xZizNHyCgoKYyUVCQgI7funSJQBAZmamRpdV6rCE0OY/WHXN/kZFRWl9AHP9vn79ukHnUyaTITMzE0DBWgltabkFdwAwefJkQVypUqXYuGRlZem10x6hmYSEBADAli1b0KBBA4HtvZeXF1JSUlREuDqh3b9/f5w5cwZffPEFO6ZJaHPXg6mD8pccS3gdcXNzQ1JSEkt76NAhyOVy+Pj4CNZvXLt2DfPnz8fSpUuRmJiIjRs3YteuXUhOTsbRo0dx5swZXL58mZmxAQUvP2JnaceNG4e0tDQcPnzYYA1Qvnx5VvecOXPg4uKCFStWMPM/Lrx+/RrTpk3TORNtb28PT09PeHt7o1KlSggPD8e6devY/3zVqlWCdSXa+PvvvwEUmD7x7cbd3NyYmdLVq1ct/n8jTA8JbTPVaWtry3x8rl27ViVel9BWx/jx4wH8t4U29xnOHEHX8OxSUQAAIABJREFUeTbWLI0y/Blcvv0o3/yhX79+osuzhNCuWbMmy69pURVH48aNWdqFCxeqtIObWeL7Cvbx8UGrVq0QHh6OOnXqaH0YBgQEsPLV/Rf5hISEsLTKLjklkv99igeAhw8f6rXTHqHKkCFDABRsDlKzZk2kp6fj9evXmDx5MgICAtgXQeV9CNQJ7e+++w6AcAZSk9Du2bMn+vbtK5qlS5cCKNiBVp98vr6+gno5of3mzRs8f/5cI5wJQlZWltZ03JoBXYshbWxsWB+4FxEvLy+DXRzm5eWxe9/BgwdFi9HC0rx5c9aGmJgY5p0lLy8Pz549Y+YvXEhKStK6WLJLly5q+7d79269RFLt2rVZ3vnz56vEb9y4kcVrW/RNFA9IaJuhPrlcjvXr1wMomH1UtxGGLqFdq1YtDB06FL169UK3bt0wZMgQ5n3iv+yTc9CgQYiLi1OBW4V+4MABtfGG0KVLF53tMcYsDR+ZTMZmjG7evCmI8/PzYw+3o0ePii6zsEL75cuXePLkCZ48eYIbN25g27ZtGDx4sNatqNu2bcvyf/PNN1rr4s9SKQvhpk2bsrjo6Gh4e3tj69atbMaJC6mpqRg5cqTaRY7169dn6eLi4rS2xc3NTfCwVY4PCAhgM6xAwZeMLVu2oHLlyhb/bxRFKlSogOvXr6NmzZrw9fXFokWLmFjiXrBOnjypstmMNqE9ceJEdkyT0NYXbs1EWlpaocqxtNcR5d0xZ86ciaSkJCxduhTff/89xo0bh6FDh6J3797o3LkzWrVqhUaNGqFGjRooV64cvL294eTkBKlUit69e7Nx6tixo1muF74w/vfff5GXl4fJkyeze6+trS06duyIhw8fsnTjx48XVR4/ZGVl4Y8//sCnn34qql3cVxkAbMEuH/6XsuXLl5vlXBGWg4S2GerjXPopFApERkaqTaNLaPOFCj/k5uaKXgH9X6FKlSpQKBTIz89X68arKNGiRQs21nzBwLFv3z52bQUHB4sqs7BCW1N49OiRxkWO/MWcQ4cO1VqXr68vS7tt2zZBXK9evVjckCFDmFDJzc1FamqqysY16oQJ3+WWLhtUOzs7lvbPP/9Um6ZMmTJsppULeXl5WLNmTZG//iyBsj1t165dBS9S6enpmD59uuC5oU1of/fdd+yYWKHdtm1bJCUlafTJXFyEtjrs7OwQEhKicj8JCAhASEiIwJSnevXqaNGiBdulduHChUZfn6KNHj16CP53X331ldp0oaGh7N7w7NkzjTPuJUqUQMWKFREcHIzatWsjIiICc+fOxfPnz1kdP/zwg9Y2OTg44OXLlwA0m4bY2NiwnSrT09O1TlIQRR8S2maor27dunj16hWGDBmiMY2NjQ0iIiIwYMAAdOjQQWWb18DAQOzZswf79u3Dvn37sHv3bixfvhyNGjWy+EVkbSxatAjA/za9KMpw2wIrFAqUK1dOJZ5vz6xrdpbDEKHt5OSE2NhYREdHY+DAgejbty9Gjx6NdevWscVIQMGndHULFPv06cPS6BI5fFGvbM89YsQIFvfvv/8iKysLw4YNYzbqDg4OiIqKEmwUw3nu4QgPD2dxYhYjcTOpx48f15hGJpNh8ODBzA84F7KzszF9+nRRXgoIIT4+PliyZAkT2YmJiVizZg37/fr1a0yaNAmurq5qhfb06dMBCN0/ihXa3KLCu3fvqo0vzkK7atWqAFR3VeW8e/D/T1y7tD3bTEm3bt3Yf+2ff/7RuqMst6AUUD/LrA0PDw8cPXpU4z2FT2RkJEu3atUqNGjQQC07duxg6Xr37m2R80eYBxLaZuywmHRt2rTReoMndPPs2TMABQvdVqxYYTDqZpDNibu7O7OZ1GQa4uzszBZyPXr0SNRmDYYIbW14eHhg165drMxTp06ppOHPRA8ePFhredpmtJU3JNL0ifqTTz5haS5evCiI4281re0zskRSMLvKBU0z2srjMX78eDajxT8nlvKEU9QICQlBQkICu66fPHmCrl27sviaNWviwIED7NwuWbJErdDmvNeIWQypDPcVcufOnWrjOaGdmZmJ0NBQnSjbZnNYUmhHRERg/fr1KusKOI9GT58+FRy3RqHNPcsB4KefftKalv+Szr+exBIYGMhM9U6fPq0xHfeVUZ9w6NAhi5w/wjyQ0DZTndznOF1wD4yHDx+KSk+2oKrwZ1gLEwrji9sYDB06lLVlwYIFGmdGDh48yNJ16NBBZ7nGFtoSScFqfc7jCgCV67Jz584sTtdiSr6NtvLDc/To0SxOl/DltldXKBQCTwjNmjVjZcTGxmotw93dnaXVZ4MJNzc3xMXFCbZyP3XqlEn9/BYHtm7dys5XWloapk+frrLjJ0eHDh1w/PhxeHl5qRXaa9asASAUVWKF9o0bNwAAEyZMUBvPN4USEzSto+GE9saNG7Uuojx9+jSAghdubek2bdoEQJzQnj9/PoAC7yL849xi5Hv37gmOW6PQDg4OZud46dKlWtPyv6r16dPHoPo4t5OaTDwDAwNV1ouICQqFgszMijEktM1UZ8WKFfX+84kJOTk5Fr+IrI0ePXro5QlA00Nt2bJlFu0Ht1mKPkF5BlgdphDaEol2n9N8TyKzZ8/WWg5/R8YpU6YI4vi7wCnHKTNv3jyWlr+yv0qVKuy4Lk8y3Gd0QOhaUSx169YVzG7rms3/r9OzZ09cvXoVe/fuxZ07d3D+/HmNJCcns3zqhDb3Aspf8CdGaJctW5aNlya/0JzQzs/Px61bt3QyduxYteVYwr0fx6lTpwAU+NLmH+cWBJ49e1Zw3BqFNt+P9v79+7WmHTVqFDs/7du3N6g+znNYbm6u2nj+PXDChAmoVauWVvj3qOnTp1v8/0eYBhLaZqrT398ff/75p064TT2ysrJEpec/bAjjwO3eqWk2yxxwf0x9Q3Z2Nry8vLSWbSqhzTcP4X+ul0iE5iDatlaXSCTo27cvS6u8ex3fJIS/yE0dMTExLC1/zYOjoyP7BKzrky1/S3hDxQR/9pP+r9rhNjIaOHAggAI77JSUFBXy8/Px5MkTlk+d0OYWmwUGBrJjYoT2yJEj2XhVr15d65gay0b74sWLWje64Tzb3L17V9TGOPHx8ShXrhyaN2+udnOoEiVKICcnB4Dqzqhc//fs2SM4bo1CWyL5n+11VlaWVvOs3377DUDBy5EhOzI6ODgwUa9ukaPyrrZidA1/BjwlJcVsbhEJ80JC2woGgQ/ZaFsezhVUjx49LNaGBQsWsIf9119/rXNm5KeffmLpddl7mkpoR0dHs3LV2UA+ePAAQIHNrbadIZctW8bK8ff3F8R5eHgINo/Q1h7+NvU1atQQxHGfgN++fat1oeKMGTN0ii5dlCxZkpVx48YNi17bRQVOaGvyRf/8+XOtQpvz256eni641nQJbalUiqtXr7LxUhabHNa6GJJP165dART4JFcWcJznnfz8fJXN0ziTG2Uf9mKFdp06dfDZZ59p3XGVw8/PT1Q6bfA9M2n6QtWkSRN231C3i2utWrV0ev6YNWsWq2fq1Kkq8R9//DGL18fMjL/eoE2bNoU6F4R1QkLbCgaBDwlty8LfEOWDDz6wSBvs7OyYO6nXr1/DwcFBZx7+DPjly5e1pjWV0ObvNqfO3nD58uUsXpMbQFtbW+Yf/q+//lKbhrO9fvr0qdaHNCemMzIyVMR0XFwca4vyjB6HVCrFrVu3AAC3bt3S+nKgjXLlyrG6NPWJEKKv0K5SpQqGDBnCTIQSExPZOd+8eTN7xjRs2BBDhgxBSEiI2nI7derE8nGzl9HR0SrpjCW0J06ciISEBBUvU8oYIrTHjh0LoMA7j3LczJkzAQB///234LhcLsfjx48BAH379hXEiRXanD9tTQtJOVatWgWgYD2Sur0l9OH3339n47Z48WI2s21ra4tu3brh1atXAApmmpVfuiUSCSZMmID79+9jxIgRCAoKYselUilq1KiBtWvXsvLv3bsHNzc3lTK4vTIA/RZb8j1H/fLLLyb5PxGWhYS2FQwCHxLaloX7bJqVlSVK4KqjsLM0n332Gbvx6rPrJ7flLwDUq1dPYzpDhLa7u7vWeL6bLU1u8Pgbxfzxxx9qXXENHz6cpdHk8qpfv34sjbqZJeX28HeQ5KhcuTKb4Tp37pzaWW3+A5DvIo6jcePGGD16tM6xnjt3LisnPj7eotd3UUFfoc2nbdu2UCgUUCgUbOOqBw8eoGnTplrrdHBwYIsg9+/fz2yV8/LyVEwjjCW0xWKI0OZcg27ZskUljnsJVfYT/umnn7I+ly5dWhAnVmhzX7a0tdXV1RX8YMhus3y8vLxw7do1Vl5ubi5SUlLYTplAgVmdsikax4QJEwTtef36Ne7du4eMjAzB8bt376p1QMD3EJWeng5HR0fRbXdycmIvde/fv0epUqXMck0R5oOEtonr6tmzJ+bMmSMabtV9enq6XvnmzJljsRnYooC3tzd69OiB1q1bo0GDBggJCUFgYCC8vb3h5eWFatWqYeLEicxuUdfW3JowxiwNf3ZGny29+SL1xx9/1JjOEKH9+++/46effkKjRo0EnjMCAwMxb948ZvOcnZ2tVeRv27aN1b1hwwZ4e3tDIimYeRo8eDDbVOLkyZMa7RVtbGzYS4VCocCUKVPg4uICiaRALA0aNIjtJvjq1Su2mYYy/Bn2Xbt2MTMVuVyOnj17sof01atX1Ypp7lPx06dPER8fj7Zt2yIwMBCurq7w8/NDq1atmBcIoGCbbXW+0AlVDBXazZs3Z+Jo5cqV8PX1RXJyMoAC8Tht2jSN1xVnrqVQKNCwYUNIJELToR07dqBatWqQSMwvtFeuXAlA94I/DldXV/aSMXr0aEGcv78/+2oXHh7Ojnt6ejIbY2X/9RKJeKHNzZZPmzZNY/ukUilbjJmdna3W976+uLu7Y9myZQJPP1w4cuSI1udjtWrVsGPHDpUt27nw5MkTfP/99xo94AwZMoSlXb16td5t55v+qfuCQhRtSGibuC5uVsEcoV+/fha/oKyVUqVKsYeLrpCSkqJiGywGY8zSBAQEMNH66NEjvRbHeHl5MaGalpamcVbFEKG9f/9+licrKwsPHjxg/sq58O7dO53b1Ht6ego8LeTl5eHevXuCmafbt29rFMccFSpUEGyrnJ2djQcPHggelBkZGRpNVCQSCVxcXASeXfLz83H//n2Be8jHjx9r3HGTb5OpK2RkZJD9pR5wQvvly5e4e/euCnl5eQKhLZfLMXr0aHb9Hzt2jG3TLpPJMHXqVPYF49ChQ/Dz81NbH6DqUvK7775j/0kAOH/+PA4dOsSu+f79+6N37974/PPPERkZiR49eiAqKgq9e/dG//79MXjwYERHR2PMmDEG36M5G3SgQMiNHDkSI0aMUMu0adPYzLxCoVAxkxk0aBD773FfqoKCgthC/Hfv3qFKlSoqbVAntLmJhRkzZrBzzQnoXr16ae2Tk5MTOnbsiEqVKhn12nFzc0OLFi0QGRmJDh06CBbD6sLZ2Rl169ZFREQEIiMj0a5dO1SqVMlgszGCkEhIaJu8roYNGyIyMtIs0GyZdrjZGk3h+fPnWLFihcbNJXRhjFkabttowDAzg+3bt7P8mnYvM0Rojx8/XqN/8qysLGzcuFG0T3cPDw+sXr2afT3gQk5ODlavXq3TawqHn58fNm3aJBBBQIFg3rNnj6j2lChRAkuWLFGZBcvLy8Mvv/yiVfC7uLhg4sSJOH/+vMaXuBcvXmDJkiV6PeyJ/wnfx48f4/LlyyrwhXaLFi1w7tw5ds737NnDvnDwad++PdLS0gAUfIVo3rw5JJKCjVs4EX7r1i219rf169cX+Ks3NEyePNmg8+Ho6MjcjuoT5syZo1LW3r17ARS475PL5RgyZAhzQZmTk6PRvlid0P7yyy8BFPxfLly4wNY05Ofnk19ogvh/SGhbwSAQ5sPGxgZOTk5wc3ODh4cHPD094e3trfGToL6YapbGGrCxsUHNmjXRoUMHREZGokuXLmjQoIHO1fqaKFWqFFq3bo3IyEi0bNlSpx24Jry8vNC6dWt8/vnnaNu2rUEvSm5ubmjZsiUiIyPRunVrvXdxdHV1RYMGDRAREYFu3bqhTZs2CA4O1rolNKEZsaYj/MWLb9++xZgxY7Se8ypVquD27dsAgAEDBkAiKdjq/enTp0hLS1O7UI5PuXLlMHjwYCxatAibN2/G7t27kZycjMOHD+P48eM4ffo0/v77b4G/73PnzuHMmTM4ffq02plisdjb26NPnz748ccfsXHjRo38/PPPWLBgAVq0aKFShq2tLdtYau7cuZDL5UxAv3jxQuuiTHVC297eHr/++qvgRfP9+/ca/YYTxH8REtpWMAgEQRDE//Dx8UGTJk1UTDzUsW7dOiQmJqJMmTKiyvb29saYMWMExyIiItCoUSOL99scSKVS1KpVi5lEeXh4ICEhga2X0ISrqys8PT3Vrldwc3NDQEAA/P399VoISBD/BUhoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBEEQxQ8S2lYwCARBEARBEETxg4S2FQwCQRAEQRAEUfwgoW0Fg0AQBEEQBEEUP0hoW8EgEARBEARBEMUPEtpWMAgEQRAEQRBE8YOEthUMAkEQBEEQBFH8IKFtBYNAEARBEARBFD9IaFvBIBAEQRAEQRDFDxLaVjAIBEEQBEEQRPGDhLYVDAJBEARBEARR/CChbQWDQBAEQRAEQRQ/SGhbwSAQBEEQBPF/7J15fEzX///vJBEiEWskiD0qQYjta1d8Uok9SAhJUVVUWyKxawlKi6JEqdqlpcS+VG1FqS21lBa1i0RCiD37zOv3x/zO6Z2Ze2fuTCaTNH3fx+P5x9xz7jn33nPn3Nd9n/d5H4IoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAvgpqtUKnTu3Blubm4F/gAwatWqhWbNmtmsvnr16iEwMBBNmzYt8GsnCIIgCILID0hoF8BNnzdvHgBg4cKFBf4ACIKAKVOmICsrC0+fPkXdunVtUufSpUsBAPv27Svw6ycIonDTvn17m78njFGzZk3Ur18/X8p2c3ODl5cXSpcuXeDX6ezsjGHDhsHBwaHAz4Ug/q2Q0C6Am16zZk1kZ2cjIyMDVapUkcyzZcsW7N+/3yyaNGli0fm0bNkSWVlZAICbN2+ifPny+X4P8kNoN2vWDMOGDcs3KleuXCDPC0H8l2GGiS+++KLAz0UQBERHRyMrKwuJiYmy/XdeiImJAQCMHDmyQK/TxcUFDx8+BACMHz++wO87QfxbIaGdz3UVL14cfn5+Bhw4cAAAsHLlSoO0qlWr4unTpzB302/EcuXKoUKFCoqYPXs2AODevXvo0qWLomNKlixp8X3JD6HNriG/tk6dOtn0OSUIQsBbb72FnJwcvHnzBhUrVpTMs3HjRuzYscMs6tWrZ9H5BAQEQK1WAwDOnz8PZ2dnq16vnNCeOnWq2cYXhqVGgi+++AIA8Pr163z5qLAW9vb2aNu2rSI8PT0Vlenj44OIiAgsXrwYixYtwqhRoxQdW758eV6XOffd19eXH6dSqQr8nhLWg4R2Ptfl7e1ttqBbsWIFatasCS8vLx0uXboEAPjoo48M0ry8vAyE75MnT/IsLo1ts2bNMrje5cuX48SJEyZhlpK0tDRF+ZUIcia0nz9/jj179lidhg0bFvgfliCKMo6OjvD29jbg4MGDAICYmBiDNHd3d7x48cLs/qtdu3Y6dbu4uCjm66+/BqAdAWzTpo2iY4oVK2ZwvX5+fli+fDnCw8P5PjmhvXXrVov76jp16kAQtB8tn376qWK+/PJLaDQaAMDhw4cVHxcVFWXT58bZ2VnxvZg4caLRssqWLYu4uDh+3eItOzsbc+bMgb29vezxQUFBPP/YsWMVX8PRo0f5cXZ2dgX+XySsBwntfK6LCe2///4bkyZNMsrq1asBaIW2fjl2dnZ4+fIlAKB69eqK6mZCOzc3Fzk5OVaDdUBSQvu3335T3OGZsz158sTk9TKhff78eZs+T/mNj48PRowYgXnz5mHp0qWYPXs2goOD4eLioriMkiVLIjAwEFOmTEFMTAwWLFiAqKgos92N7Ozs4O/vj2nTpmHp0qX44osv0K9fP9nRjWLFinErTevWrc2+9urVq1tkHSL+nfj4+JjdNyxduhQ+Pj5o0KCBDteuXQOgNUzopzVo0MDgmU1PT7dqn6W/RUZGGlxv7969AQCrVq3i++SEtoeHh6SBJTExEQAwZcoUyXQvLy8u8nv27Jmv18i2p0+f2vS5sZbQdnFxwcWLF3ne+/fvY8uWLTh48CCys7P5/piYGNkySGgT+pDQzue6mND++eefIQjaobgVK1agVq1aBnk7d+4MQFpoe3l5AQBSUlIU182EtrVdHrZs2QLAuNDevHkzunfvLsvevXsBAPHx8UbzLViwAED+CO358+cjNTUVW7dutenzp5SAgADEx8fLvjDS0tIwYsQIo2W4urpi7ty5/CNNajt69Chq1qxp8nyaNWuGq1evSpaRkpKCHj16GByjUqlw8+ZNns/c/9yRI0f4sRShpujDhPaNGzdMWk03bNgAQCu09cuxt7fnwlnpB1phE9rJycm4du0ap02bNgbHlylThhs+fH19TV5jrVq1EBERoZhHjx7hwoULiIqKMus4W/uXi4X2vHnzZD84vLy8jOqNZcuW6ZQjngRav359/lEDAO3bt5csg4Q2oQ8J7Xyuq0qVKtixYwemT5+OSpUqISMjAy9fvkSFChUM8vr5+WHHjh348MMPUadOHZ2JeIsXLwYAXL16VXKinpS/YUEK7dmzZxstQ6mPdnh4OID8EdqsUz106JBNnz8lhIWF6bykr1+/jl27dmHnzp24ceOGTtq4ceNky2HuRoDWz/L48eOIi4vDL7/8glevXvG0Bw8eGBUkzZs3x+vXr3n+s2fPYtOmTTrWn5ycHHTp0sXg2OjoaJ7HnAltVapU4b6wf/75Z4G3CZH/MKG9f/9+CIKAmTNnIiYmBjVq1DDI27VrVwDSQpuV8+DBA8V1M6Hdpk0bODg4WI1Dhw4BMF9oswnzDH9/f4PjO3bsCED70W2OOFPy8TFx4kQAwOPHj/HWW28pKregIsOIhbYp1xA5atasiZycHP5OkPKTDggI4PXIvbtIaBP6kNC2Yb3Lly/nHdehQ4ckadWqFQRBwJAhQ2DO9tFHHxnUR0LbOIVZaI8ZMwYajQaxsbHw8fExSO/RowcXypmZmahWrZpkOQ8fPkRSUhKGDRtmMFReunRpbN68mT9Da9eulSyjRIkSuHXrFgDgzZs36Nq1q076kCFDuCBOSkoyqMfLy4tb3e7evat4ok9UVBQ/twkTJhR4mxD5T9WqVbF//37MmjULHh4eRg0TzZs3x/79+/mclfDwcM7ChQsBAJcuXdLZz6hdu7ZBeUxot2jRwqrXxPzL8+o6IgjaD4igoCDOihUrAABXrlzR2c/Q/68KgoAWLVogLS0NsbGxKFGihOQ5h4SEQK1WIyUlBQ0aNNC550OHDpUUgl26dMGLFy/w7rvv2vy5sYbQFhsEjAmi69evA9AaFqS0CwltQh8S2jaq08/PD7m5ucjKykJqaqoBTIh069YNgvCP0P7tt9+MDp+yP6eU0K5WrRpq1Kgh25laCvMVLFeunEEaCW3rMHToUHTv3t1ontGjR5vs0FetWmX0/+Xo6Ig7d+4A0Fq8HR0dDfJ89NFHvJ7Ro0cbvZcAJN1ZTp06xdPZx6Qpzp8/D0A7x4D8s/97sGcqJSVFdoIycyf64IMPYM42dOhQg/r+DUJ71qxZZl2nVL/ZqVMnPHv2DABw6tQpg4XTQkJCkJ2djQcPHsDb25vvr1+/PlJTUwEAixcv5vtVKhXGjRuH3NxcANr+VEnc7cqVKyMyMlLSUm8u1hDa586dA6CdTG9ssiMLNwn8874Wk19C28vLS3KisBS2Wg+DUAYJbRvUV7x4cfzxxx8AgH79+smez8OHD3moKCa0Fy1aZLTszz//HICh0P7hhx/yJfKGPvqiiQntefPmGZ2BzywxP//8s9F8w4YNk31h6FOUhLYSq6+npyfvmL/77juLy2EvdgDw8vIySL9w4QIA4NmzZ3BycpIso27duryMX375xSB91KhRPF38kpZDHK2HzW8g/jsww0R2djaePXtmADNMBAYGQhD+EdonT55EdHS0LKx/khLatWvXhpeXl9UNEzVq1ECDBg0krfKWCu3ff/8d8+fPl2XVqlVG+80GDRogKSkJgDZySu3ateHk5ITo6Gio1Wq8fv0aH3zwAbp3746+ffsiPDwcycnJAIDdu3fzfsDd3Z3Pt2HvK6noKvrY29vzUTKNRpNnsZ1Xoe3o6MgnOx48eNBo3pCQEF6XVHSV/BLa7P4r2V69emXz/ywhDwltG9THBN2WLVvQpk0bHUuwm5sbHjx4AAAIDQ3l+6WE9uDBg3Hx4kV88sknfJ+c0H78+LHiP2Vett69e+vU+2+KOpIfQtuaVhpTODk58fuzbt06i8uZPn06L0ff179SpUpc1Hz//fdGy2FDqhkZGQYv2/Lly/NFkZKTk41ajARB65vLtoEDB+b7vSQKD8WLF+dzC/r372+QziaNi92UmNCeO3eu0bLnz58PwFBof//994iLi8t32GqS5cuXR3BwMLeOHj58GMHBwQgODsZPP/0EQPvxzPYFBwejefPmXGibWlWYTZ431m/WqlULd+7cQXZ2Nnr27Ik6depwq7TctmbNGh1rNes70tLS0LdvX8Vt7OzsrFOX+J3OzKC3AAAgAElEQVRmCXkV2uIP+2XLlhnN26xZM553+fLlBumFQWh/+OGHBf4/Jv6BhHY+1/Xhhx8CAG7duoWGDRvixYsXePHiBWbPno3q1atzYaovYqSE9rhx4wDoumXICe3g4GBJ30Q5vvrqKwBa64I5x1WtWlWnXnY9r1+/RnJysixv3rwBoPUvNpbv+fPnAJQJ7Tlz5gAAzp07B0EQUKdOHcTHx8vCPkZevHhhNF98fDz+7//+z2T91rbSmKJ27dq8Y50xY4bF5Xz33XcAALVajVKlSumk9erVi9fx8ccfGy0nNjaW52Vxe8Xs3LmTp//vf/+TLUccqeTFixd5WhiJ+PfBLLpxcXFo2bIlXF1deZrYMCEW4VJCe+jQoYiPj8cHH3zA98kJbfHE4PzcOnToAEEQ0KpVK7OP3bBhg6TQ7ty5M+Li4nREnRKhLQja8JliP+7IyEiMHz8egwYNQo8ePTB9+nSo1WpoNBpMmzbNYIRMpVIhIiLCosVsJk2ahLS0NBw/fjzPGkAstNPS0vj74/r169i2bRs+/PBDo+FQO3XqxI//9NNPjdZVpUoVnnfz5s0G6WKhHRUVpXjS7LFjx/hxUkLb1dUVZcqUkeSdd97hHy7btm0r8P8woQsJ7Xyuq2bNmvjzzz/h6+uLihUr4uuvv+a+gMxSeObMGQMxISW02eQw8SREOaFtLt27d+fnlJdyCtJHe+7cuQC0fu2CIKBRo0Zmv8zkNiWi2dpWGlOMGTOG1yUXasoU9vb2SEhIAACcPn3aIH3ChAm8DjZML4fYCi0VfaRv3748feXKlbLltGjRgudbvXp1vt5DonAxYsQIAMCdO3fQqFEjvHjxAs+ePcP06dPh6enJ+5cNGzboHCcltKdMmQIAmDZtGt8nJ7TDwsIwZMgQxbCJ7enp6WYd5+HhAUHQClxjrh9ShIaGSgrtkSNHAgB+/PFHvk+p0GZ06dLFwK1xyJAhyMzMxJs3bxAaGgoHBweMGzdOx2Ls5eWF2bNn63wMFQRK4mgnJyfjnXfekTy+T58+PF9ERITRusqWLcvz7tmzxyBdLLQt3cyZDOnq6soNPPfv35ecO0UULCS0bVCf/sSQvn378igNgNb6O2/ePB0fPmMW7ejoaL5PqdDu2LEjDh06hG+++UZyokpRENos0sDx48chCNrFB/z9/WXZvXs3AK2ribF8/v7+ijsva1ppjOHs7Iz79+8DAC5fvmzxLHXmAw8AgwYNMkhnYSUBmFwZk43eAEBYWJhBeokSJfgkrLS0NMmJl/p1WvoBQfw7qVWrFq5evYpGjRrBw8MDS5cuRUZGBu+bAO0EPv25AsaE9meffcb3yQltcxkwYAAA7cQ5a9+DDRs2ID4+XjKknrWFdr169bBv3z7+nyxXrhyKFy/O++c7d+7Az88PgiCgYsWKSE5OhkajwYABAyAIAo4fPw5AK2KHDBlSYEuHOzo6Yvbs2YiIiMCwYcMwZMgQREZGYt26dXxUFNCOoEpNxmbtCZh2uxCLeil/blsL7U2bNgHQRkGRirVOFDwktG1Yb6VKlbBixQousletWoXVq1dzK+jLly8xY8YMlC5dWlJoT5s2DYCuD5pSof3JJ58AAB49eiSZXhSENptguXfvXkXnWpgnQ5qCuXvkxUWlVq1aXPiePXtW0m963bp1vPOXWmRJzODBg3le8XC91HkDkFzgxsHBASkpKQC0L/mCenETBYe+fz8LNce2Fy9e4PPPP9d5bxgT2lOmTOH7lArtLl26YP/+/ViwYIFkujWFNpsAz2CLVDGBK8ZaQrtq1ar47rvveNzo+/fvo1+/fvD19eWx8bdu3crvcbFixVCuXDn+XsrIyEDTpk1RqVIlbNy4kbfNyZMndcIBFgbKlCmDbdu28XO8ePGiQZ7+/fvzdLnISgyxRVvqXSMW2jt27FC8yI94YS+lQlscbceUywtRcJDQtkF9DRo0wPLly7lfcnJyMoKDg3m6r68v9u/fz/8wK1eulBTa7CUh9pVVKrS//fZbANpJN1LpYqHt5+dnEn3fbEZBCm32Zf/DDz8oahcpoV2tWjXs2rVLcoGMwoI43J6cEDBF6dKlcfnyZS4WpHyqxfcUgEk/TLFVSM6fu127djyPVDuxiW5A3vzOiX8/7u7uWLZsGRfZa9euxfr16/nvZ8+eYdq0aXB1dZUU2qxvFLsCKBXabLGWu3fvSqZbS2jb2dkhMTERp0+fhqenJwRBWmgz10Ipoc1GkjZt2sT3yQltT09PLFmyhI8SpKenY8aMGShZsiQmTpzIjT65ubm4ffs2Hj9+zPPqb3fu3EGZMmUgCNrFg5jvfFZWFj799FNFIf5shaOjI65du8bPXX8VzW7duvG0SZMmGS1L7KMdGxtrkG6rONr169fnmuLo0aMmJ5gTBQcJ7Xyua/v27fzP8/z5c8yaNctgwhkjICAAv/76K9zd3SWFNrMGhoeH831KhTazUsyZM0cynQltpZtcODkmtPfu3Su5giXjl19+AaB1ezCWz1SYKjGszCVLlihqGymhvWPHDgDA7du3LZrgk9/079+fvwz3799v0cusZMmSfOJNdna2pD8144cffuBtburjQ2zRHj58uGQelUqFu3fvAtCGoNKfm8As6BqNRjLUIFH08fb2RkxMDJ+gmJycjJCQEJ7eqFEjHD58mD9ry5YtkxTazAVJyWRIfdjE3l27dkmmM6H96tUrNGjQwCTMN1sftrJjUlISF1f6Qvvtt99GcnIyhg4dKim0IyMjAehGHpIT2uIP2bi4OFSvXp2nMbGZlZWFpKQkXLlyBSdOnMDevXsRGxuLJUuWYNasWRg3bhwPVytelbNcuXJ8MbOEhASULl26wJ8lMZMmTeLXru8m17x5c55mKvyoOK/USre2ENpOTk64cuUKACA1NbVQvquIfyChnc91DRw4EH/99Rf27t2LO3fu4OLFi7IcPXqUHycltJnVW+wqoERou7u7cyuQ3GQQsdC+du2aSWbOnClZTkGG92MTQsaPH6+obaSEdoUKFXg5V69elYx9W1D07duXD/WePn3a6Cx6OZycnPiS0Gq1WtIvWwxzxwEMQ//pI/bRFgsjfdgzC+hGjnBycsKLFy8AACdOnCjw+03Ynq1bt/Jn4/nz5/j8889lJ9p1794dv/32G9zc3CSF9vr16w2eRaVCm4WqnDp1qmS6ePRGySYXdnDDhg0AgPnz5/N9YqFtb2/PF27at2+fpND+8ssvAei+K4y5jsycORMdO3Y02O/g4IBatWrpiDw3NzecOHECJ06c0DEQNWzYEEuWLJFsm5EjRxqNKlRQiN1D9N+XYncQU6Os4raXWgXTFkJb7DpoamEzouAhoZ3PdTEf0/fffx+AdrjzwYMHBuTm5iI1NZUfJyW02dCceJhfidBmM/kBoGXLlpJ5rO2jfe3aNaPxZJmYTU5OVhR/du3atXBzc0O3bt0krbguLi78Y0JpPFc5H+169erxCTTnzp2zSNBam+DgYL6gwu+//86HbM2hZMmS3BKo0Whkrc5ixCvRmXp5ivMaW/1RvLjNjh07+P5+/frx/XI+3kTRJiwsDH/99Rd+/vlnk4YJ8f9WSmgfOXIEANCuXTu+T4nQrl69On8O5SaXMbGlVqtx8+ZNk0yYMMGgjHLlyiE9PR0ajUZnBUax0GYuLBkZGahTp46k0N61a5eBqDM36ogc4kWxbP2etjbi96B4VJjB3klPnz41KnSXLFnCy5Gat5LfQlv8wfD1118X+H0lTENC20Z1MqE9ZswYyfTk5GSjQrtevXoAtD514j+hEqH9+++/8z/m8ePHJf/EhXUypJi2bdsCAB4/fmxgafb39zfa+UlhbDJkYGAgd9EQR3kpCEJCQrgl++zZsxb9X0qWLMmFh1qtVixkxe4gUkuri2H+3BqNxuSHAFvuODMzk+dlcbYzMjIs+pAg/v0wwwSLhiNnmFCr1UhOTubHSQltNqm2WrVqfJ8SoT127Fj+zOv78jKs4aM9depUAMCRI0d09jOhPWrUKL7IE5vzoC+0HRwc8OTJEwBAp06deBlyQrtt27YIDQ1VjHhF1/fff9+sY2vWrFngz5MYcRx/Hx8fg3TxCrkBAQGSZdjb2yMxMREA8Mcff0jmyU+hXbt2bW4EunDhAooXL17g95UwDQltG9VprtCuW7cuRo4cidatW0MQBKxcuZL/CXft2gU3NzcIgtZfbOTIkXzFMX0CAwP5cWxYXmo41FpCe8yYMVi0aBG6detmNJ8lQvvdd98FoPWL1O+IWCeZkJCgOFKFqagjUVFRmDt3boFOMgkNDeUi++TJkxbFq3V2duadeG5urkl3ETH169fnz8/69euN5r137x4Ard+9qXJHjx7Ny33vvfdQtmxZLirE0ROI/yZMaMvFok9NTTUqtP38/HifJ+4PTAltlUqFv/76iz+bcv1TXoV2qVKl+IJZ+tF3mNBm/4dt27bxa9AX2j179uR5xSEP5YQ2m4Nii23w4MEm70PlypWtIhZNfZgHBQXx8JC///67ZB4/Pz+e5+TJk5L9PovwAsiPuuWX0HZ0dOQGilevXkmGfyQKJyS0bVSnuUJbjL+/P3eLSE1NBWA8+D7D0dGRR5b47bffEBgYCI1GA41Gg6ioKJ281hLaSrFEaC9YsIBfi3i/i4sLD1MXExOjuLzCHt5vwIABXGQfPXrUIhcWFxcXPvExJyeHx79Vikql4gI6LS3NIH4xQ7zIjJz/vpiKFStyV5iffvqJ/z8A6KxUR/w3MVdo+/j4YOTIkdw1bu3atfx52rx5M3/HtGrVCiNHjtRx1RDTu3dvfhwzTEiFe8ur0G7RogWSkpLwxx9/GBgG2rdvj6dPn3JRKA7/N378eDx48ADTpk1D8eLF+YQ4QDuvgVnga9SogQcPHuDKlSs6ZX/wwQeIiYlRjPg+rly50qxjW7RoYfQerFmzBgCQmJioMynTEnbu3Ik1a9agTZs2OuEhPT098eWXX/J+1FSsaXGows2bN/NJrA4ODhg6dCgyMzN5u8hNRM8voc1Wbwa0E/5btmwpS7NmzQr8P0z8AwltG9VpqdBu164dHyr6/vvv4ebmhp9++gmA1gVg7ty5BnFnGWxJcuCfYUUWWxbQilw2s93WQvuLL74AAFy6dElRfkdHRx5nVD+knfiaGjVqBJVKpWjJW3HIQyX5lcY2tYaVJiwsjLuuHDhwwKJlyF1cXPDrr78C0EYXUeq7rg+L3w4AkydPNkhXqVR80YusrCzZ0I/67N27FwDw5s0bbmlLTk4uVGHBiILBXKEtpkuXLtygwNwqEhIS8Pbbbxuts0SJEnwS5MGDB/lKprm5uRg5cqROXmu4jpQoUcLAKtm2bVtuTLly5QofudTHwcGBi8L09HQ8evSIC8m5c+fKfhCbS375aLu6ukK8mSNIpdizZw8vKzMzEwkJCXzEgG0ZGRkIDQ01Wk7ZsmV1Pl7UajXu37/PI+AA2pjjxj4M8kNod+3alVvblWx59c0nrAsJbRvVyYT206dPcffuXQP0J0Pa2dkhIiKCf0GfPXuWWzRVKhU+/fRTLsROnTplIG7CwsL4H3Pjxo06aWPHjuXWREDboYsnyQ0dOhSDBg3CgAEDEBoaioEDByI8PByDBg3C0KFDMXz4cHz88ccYN26cyVW05BB3RnFxcYiMjJQN5D9t2jQenhAAOnTowMvx8PDg1mxmmW7SpIniDsmcTcnkQWtYaXr16sXbNi0tDR9//LHREIjDhg2TXLWRiWwA2L59u8ky3n//fcnzcXV1xcOHDwFoRceYMWP4h0S5cuV03JqUWLMZ4kk9LFbvV199ZdO+gCicWCq0O3bsiJcvXwIAVq9eDQ8PDx5lJzc3FzNnzpR1Bfv6668BaPtANplXbKzYsWMHd9Gz9sqQdnZ2mDJlCre8njt3DhUrVpTM6+7ujgMHDvDzGjZsGMqXL88jmADArVu3dPpJS8kvoa1SqXDmzBkA2o/zvFpgo6Ki+HtAf8vMzERcXJzJqEmMMmXKYNWqVdx1h205OTmIjY2Fu7u70ePzQ2gfPHhQ8trkNhLahQsS2jaqkwnthw8f4sqVKwbk5ORwod2pUyfupwdoLZpSvrmdO3dGWloaAK1LCZvA0aVLFy7U7t+/Lxmizs/PD/v27TPrK1lqs3TWs729vdmdB6C16ovLYS4oGo2Gd9YFJbStZaURv9yVblJRDdgzYM4md07t27fniyMAWiv0/fv3uTAAtGLeHGu0OJwf20wt8078N2BC25hhQiy07e3tERUVxcXRyZMn+SiQnZ0dZsyYwd3vjh07hsqVK0vWB2hX7BWnTZkyRee/dPHiRe6OlZ6ebtIwMWLECIwePRrjxo3De++9Z3CtrVq10pmwvnnzZskRrOLFiyMiIoJb6dVqNSIjI3Xy9OjRA8nJyQC0feKyZcvyFDUpP6OOlCxZEj179pRdLMtcHBwc0LBhQ3Tv3h2hoaEICQlB69atLb7+cuXKISAgAKGhoXjnnXf+9VFXiIKDhLaN6lTqOtKjRw/esWVmZmLKlClGJ+O99dZb+PvvvwH8E3mkfPnySExMxKtXr9C8eXOj51WtWjUMGzYMixcvxubNm7F3714cOnQIx48fx2+//YazZ8/iwoULOmG1zp8/j/j4eJw9e9akH54xihUrhrCwMCxbtgybNm0yytKlS9GrVy8Df8aqVavi3r17WLFiBd+n1HXEXEy5jljLSlMYhbYgaD/OTp06ZXDMkydPMGnSJIsmjbIRAEB6aWTivwkTvnKGCbHQ9vf357GmAa1LnNSiYN26deNueCkpKTyWdFBQEBfhN2/elFxopUWLFjxqT1626dOn8zKbNm2KrVu3cmPHmzdvDFxUBEEby3rKlClISEjg5aSkpMjOZahQoYLOpMfbt29b/K4rSuH9CKKgIKFtozrd3d3Rtm1bA0sKg4k5QdBO5ImNjVW8DHj58uUNhFa3bt10wj0VZby9vQvNKmTWttIURmrXro1evXqhX79+aNmypewcAYKwFKWuI+LJi2/evMG4ceOMfhD7+Pjg9u3bAMBdpdzd3ZGSkoLnz5+bHFGpWbMmRowYgaVLl1pkmGBh5dzd3fmERwDYvXu3bH8/Y8YMni8jIwOLFi1S9M4cNWoUMjMz8ejRI1k3FFOQ0CaIvENCuxA0AkEQBPEPFStWRMuWLWWXLhcTGxuLtWvXKp6EW7FiRYwbN05nX1BQEA+lais6d+6MkydPmjSI2NnZYf369Zg+fbpJ/2B9mjZtajI6lTHc3Nxw9OhRHD16VHKUgCAI05DQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNYIxixYrhvffeg729vcVl1KhRA4GBgWjcuLHiY1xdXdGqVas8nbuDgwOmTJmC6OhoNGnSxGT+8uXLIzo6GtHR0ahatarF9TZs2BCenp4F3nYEQRAEQfy3IaFdCBpBDnt7e5w6dQoAMHv2bIvLiYiIAADs3LlTUf4mTZogKSkJr1+/RsOGDS2ud/jw4QCA58+fo1y5cibzT5s2DQDw999/W/xhoVKpcOnSJWRnZyMqKqrA25AgCMsJCAhAYGAgHBwcZPOUK1cOgYGB6Nixo9Xrb9Sokc3eT61bt0aHDh2gUqkK/L4z2rZti1KlSunss7OzQ1hYGEqUKFHg50cQ/wZIaNugviFDhuDEiROyGBPRY8aMAQCo1Wq88847FtVvrtB2dnbG9evXAQA3b96Eq6urRQ388OFDAMC4ceNM5i9fvjzS0tIAAAMGDLD4XgcHBwMAsrKyULduXQiCgAoVKmD//v1mM3XqVJs+lwRB6JKbmwsARvugTp06AQASExOtWnepUqVw7949PHr0CF27ds33a338+DEA5Gn00pqULVsW6enpSE9P13lX7t+/HwCwcuXKAj9HUzg5OcHf3x9RUVFYuHAhli5dirlz52LcuHEICgpCxYoVjR7v7u6Otm3bStKsWTNUqlTJKufp4+ODtm3b8pHfEiVKyNZrDF9fX6P11K5dG0OHDsXcuXOxdOlSzJkzBwMHDlRkCCtdurTi8yhdurTia1epVGjYsCEmTZqE+vXrW63tS5QogcDAQEyaNAmLFi3C119/jQkTJqB169Y2/5gloW2D+j777DMY27Zv3y57rEqlwoEDBwAASUlJqFChgtn1myu0BUFA06ZNkZ2dDQBYsmSJ2XX++OOP/PrS0tKQmpoqS9WqVbF8+XKe/8mTJ0bz3759W7JOe3t7/PnnnwCAOXPm8P2enp5G77/cFhcXZ9PnkiAIXZQIbdaf37t3z6p1r127FgCQnp6uIwBCQkIQHh5uNl5eXkbrMya0Y2NjLWLMmDE65ahUKkybNg2DBg0yef2RkZEAgKtXr+oIk7CwMN5HhoWFFfgzIkWNGjWwYsUKvHz50mgfr9FocPr0adSuXVuynCFDhph8T9y8eRNjx45FsWLFLDrX4sWL87aPiYmBIAioVauWeS+r/78dPHhQso7WrVvj+PHjsse9evUKkyZNgp2dnex5sg9aJVuHDh2MXnOpUqXQp08frFy5Eg8ePODHderUySrtP378eDx79kz2/C5evJin0XpzIaFtg/qY0N6wYQO8vLw4hw8fBqAVshUqVJClSZMmSE9Px8GDB+Hj4yObT+5lZExoOzs765yTmNWrV2Pfvn3w8/OTTC9fvrxkfaNGjQKgfUGdPn3aJEOGDIFGo4FarVaU/9ChQ5L1jhw5EgBw69YtODk58f3FixfnX9riey73NX7nzh0AwNdff23T55IgCF2UCO0uXboAgOwHuCW8//77/KU8atQonbQnT54oFhzibdiwYUbrNCa0NRqNRXVu27ZNp5wJEyYA0I74tW/fXvZcHBwccO/ePQDA0KFDDdK3bNkCQGsU0XctKWhGjhyJN2/e6NyH3NxcJCYmIiEhAa9evTK4T3LCUInQZtvRo0dRsmRJs883PDwcgFb016tXD4JgudD+6KOPDMp/5513oFareZ47d+5gz5492L59OzdMsW3+/Pmy52kNod2oUSMcOXIEWVlZksdZS2jHx8cD0LqtHjx4EJs2bcLhw4d12j4tLQ3e3t42eSZJaNugPia02deqIGitr8xVQmwhyMu2Y8cOCIK2k6xRowZn5syZALRfu+L9jo6OCAoKsrg+qT9l7969+ctx4MCBEAQBb7/9Ng4fPozKlSsb5K9VqxZ/wbDymjRpgl9++QV16tRRfI9Lly6NR48eAdB9mLt27arzImAviMjISMlyOnbsCADIzs5GjRo1bPpcEgShixKh3aNHDwDA9evXrVJnx44duRDYuHGjwTDzhg0bsGPHDrMx5fpnTGgHBgZahP4E+NKlS+Pvv/8GAKSmpqJmzZqS58IEZkJCAhwdHQ3S3dzccOTIETRt2rTAnxExX375pc47auvWrejcubOO4UUQtC4hPXr0wLp165CRkaFIaE+ePBne3t7w9vaGr68vAgMDsXTpUh3ROHfuXLPPmc3DOnr0KN9XrFgxWQOYPrt27QKgtUpL/U9CQ0MBaA1tUu319ttvIzU1FYDWRVVOF4mFdlhYmNFz0r/fjP79++u0z+3bt7koBqwntHfu3ImwsDCDUQZXV1ed0fZ9+/bZ5LkkoW2D+qSEdosWLQAAT58+haenJ169eqUDs2Ckp6cbpMmxadMmCIIALy8vKNmaNGnChXZWVhauXbumCPaBoC+0Bw4cyDud6dOn8/0xMTEAtC9CfZ+4999/H2/evEF8fDyfXDN16lQA2k6+WrVqiu7xt99+CwBYu3Yt39esWTOo1WqkpaVx0b5161YAwNixYyXLOXjwoEE5BEEUDEqEdp8+fQAAV65cyXN9bdq04Vav06dP23TCn618tOvXr4/Xr18DAC5fvgwXFxeddEdHR9y+fRuAdnQwLi7OIoy5IeQHbD4TADx79kyxX72np6ekEUgQdIX2e++9J5knJCSE53n8+LFZ5+zn58eP7devn9nXXK5cOW69//bbbyXzBAUF4d133zVaTr9+/fh5yH0siIW2pROPg4KCcODAAURERHBrsrjdrCW0jbnxODk58f9aTk6ORaMQ5kJC2wb1SQnt6dOnAwBiY2Mlj0lPTwcABAQEmF2fp6enjqsFGwJMS0vT2V+3bl0utK9evaq4/G+++cZAaLu6uvLJj/p/VAcHB+5nLv6CXLhwIeLj4zF9+nSdcH4qlQobN24EABw/ftzk+XTq1AkajQZJSUn8WbKzs8Pp06cBAJs3b+Z5t23bBgAGvouCIKB58+YAtMOMb731lk2fSYIgBKxevRo5OTkcton3BQQEYP369fw3E+MajUYnnz6mXDfatGmDFy9eAABu3LgBd3f3fLvOSpUq4e7duzqw6xDv27JlS77ULx5F3b59u47VfvTo0bDGZstJnd7e3tzI8+bNGzRv3twq5SoR2iqVigs3AAYfLsb47rvvAAAPHz60yMd73LhxvF4/Pz/Z8zNVTokSJfjzt2vXLsk81hDaUuSH0DbFzp07eZ1KjXl5gYS2DeqTEtpnztRSAc4AACAASURBVJwBAAQHB0sewyYiWhppRIwxH21rCW1BEODr6ysbUq9cuXKIjY3VmaW9e/duANJRSZydnbF+/XqTf4JKlSohKSkJGo0GgYGBfD/zsXz06JHOBFL2B5OaMc9E+MaNG236PBIEoUWp0P7++++VaD2dTcrXmBEeHo7MzEwAWisui8NfrFgxzJgxw+rvKE9PT4MPAalrFbsTWBs2CggAw4cPhyAIqFixIh+xfPz4MVq2bCnJ3bt3AQATJkyQzWNK4FWuXBmRkZHw9/fP87XExcXxa/nkk0+sdo+UCG1BEPgIQFZWltFQlGLKlCnDRxZmzpxp9rnZ2dnxen/77bc8Xyv7yPz5558l04uS0GYupBqNxqKoauZCQtsG9ekLbTc3N6jVamRkZMh+/TLXEVOzd5VgK6EtCNrZxGvXrlVEQkICACA+Pl7xMfrhixYuXAgAyMzMxJUrV3D79m0kJyfzD5U+ffro5GeW9ZSUFB2LS926dfmEkaCgIB1/Mzc3N5s+nwRBaJFzHSldujQ8PDzg4eHBo2OcOXOG7xPDfEAHDx4sWUejRo14f3vjxg2dj3vWdyYlJZllqTSXDh06cLFhK0twiRIl8Mcff2D79u18+JyNJALGwyVevXoVANCrVy+L6ra3t8etW7e42MmL2Pbw8ODPyf379xULXSUoEdru7u68frmoH1KwkYOcnByLFljr3r07P7fw8PA8Xaerqyt//61YsUIyT1ER2iqVij97ly5dyvf6BIGEtk3q0xfagwcPBiDviG9vb88fvNatW+e5flsK7QoVKiA/N7HVWhC0ky81Gg0ePXqEy5cv48iRI9xnbcOGDQbn99tvv/GyxKMFPXv2lK3TnPCG1rTSEMR/HSU+2h9//DEAyEYjYpPNjImRX3/9FadOndIZ/apSpQq38i1evDhfr3P27Nm8v7Gly4WHhwf3pe7du7dOv5efQtvZ2Zm3LZA3K/TQoUN5OZZMRjSGKaHt4OCAzZs3A9C6HLZp00ZRuSqVCteuXQNgPLyvMVg889TUVBQvXjxP1ymepBgSEiKZp6gIbRZ5B8jbmh3mQELbBvXpC202IW/ixImS+UuVKsUfhIsXLxpd7ObEiRMmrd62FNr29vZ8ZrYpfvnlFwDAvHnzFB/j7OxsUJ/Yt429LB48eIAyZcoYnN+lS5f4vRUL8bfffhsXL17UgU2KUvqStaaVhiAIZUJ70qRJAP6JuqQP+7g29lL19vY2iJSwZ88eANpJ2aVKlULJkiWNhmFVglxI1N9//533S2Kh7e3tjeXLl+cJJWFKPT09edhC1kfmp9AWBAETJ05EWloajh07licNsGzZMn7vevbsadXnTyy0p02bBj8/P/j5+aF169b44IMPcOHCBQDaEVVTEw7FiEWrJe+JOnXqcAt0Xj8uHBwcdNpcTrSLzzk5ORkpKSlITEzE5cuXsX79eoSEhFj0kZhfQtvJyQkVKlRApUqVUK9ePfTv3x979+4FoI2uMm3aNKs+K8YgoW2D+vSFdlRUFO/ApVZQqlatGszZTHUu+kLb1dUV/fr1Q8WKFa0utM3BmI+2JXh4eODRo0dQq9WyX9wsRrZGo0F6errss1e8eHHuq8jCFJrCmlYagiCUCW0W0m3dunWS6Uxo9+/fX3G9H3zwAe8n2IR0sdXZ0u3Zs2cGdfn4+OjkKV68OF/cxt/fP891vnz50ui1iieO3717l4dLzMjIkA1VyBaCOXPmjGS6rd6tgiBw8QTA6MqI1atX50JZH7nFhJTE0b58+TKaNWtm1jkzY9v169ctWqVw0aJFALSCUW6xHaXMmjWLX4uxBYiUxNG+dOmS2YEE8ktosw9w8Zabm4uNGzfaPCwlCW0b1KcvtFUqFU6cOAEA+OqrrwzyN2rUiD8Y3bt3h7+/vyRMCJqKTMKE9s2bN3WCxTdu3JhbgO/cuYNmzZopgk080Rfa4hjdSjh06BAA7SqO5h6r/9VtZ2fHQ/N9+eWXsveCRXP5+eefAQCTJk2SzMfuy8uXL80K/2MtKw1BEPJCu127dmjRogUEQTuBUq4vFQQBJ0+eBCA/8Vyfhg0b8n5C7DY2a9Yso1FNGMzfW61WG6Q9efLEoL4vv/wSGo2Gzyv58ccf8fz5c3Ts2BEVK1ZEcHCwLMyyvGDBAtk8SqzOn3zyCXJzc9GuXTu+lkBeNmtM4lfKsWPHeL3GROcPP/wge75y0a2ULliTlZWF9evXy45YiKlSpQqf/BoREWH29To7O/NVD3/66ac83buQkBBuGf/++++N5q1Tpw6io6Px0UcfYciQIXj//fcxefJk7Nq1iz+7gNYqbmppezG2FNqANirN7t27Fbv5WAMS2jaoTyrqyP/93/8B0MbJ1p9sx74cMzIyjJbLhvqkrLdly5ZFnz59sHz5ciQmJho8bH///Tfq1KnDg9lbsomFtoODg8XlWLKxlyxj3rx5AIBTp04ZhEliFoOyZcvy41lIwIcPH0oG1z9y5AgA4LvvvrPps0kQxD8wod25c2dMnz4dp0+fRpMmTfDo0SPk5uZiwYIFXGjJjYwxo4b+xGgpKlSowCM5ANq1Bsw9519//RUAsGzZMpN5nZ2d8fjxY5w9e5aHiGOW88zMTPTo0UNRXZbEYBYErdCqVasWBEFAy5YtIQj/LNr1+PFjWWMLGxmMioqSTLdFJAcGM7AAxi3aeRXaM2bM4NfXokULBAQEYMKECTqrK167dk3SZVFMdHQ0AK3gs0T7jBgxgteXF1cZf39/Hmnn7NmzeYon7eXlhT/++IOfl1xMbynyS2iXLVsWXl5eqFu3Lpo2bYqQkBCsWLGCz+FSq9WSYX7zAxLaNqhPSmgLwj9DmvqNzf7c9+7dM1ouG75r1aqVzv4PP/xQx4WBbffv30dERITO0A6bSPLixQuTvuAMFi9bLLTt7e2xf/9+myFeOpXFEtVoNJgzZw5mzpyJNWvW4MiRI7hz5w6PC8pGClgIJhaBZPz48Tr3r2nTprw8tiQuQRC2wd7eHk2aNEFkZKTk0uMdO3bkE9DEm5zYPH78OADTQrtkyZI6k6WB/BfaY8eOBaBdQEu8YM2CBQsAaA0xbDEVKRcDOaGtxB3Bzs4ODx8+hFqt1nEZYEI7v320rYU41KOxRWrq1q2Ltm3bctq3b8+PUyK05aKOODg4IDY2lufTf8+LKVasGJKSkgAAq1atsuh6L1++DECrDyydONuxY0cuOK9evWqVyFrVqlXjZb548UJx9BdbT4asUaMGXx1VrVYb6Kf8gIS2DeqTE9qTJ08GYDiJ54svvgAgH8+Swb5G9ZfZZZ3D69evsXnzZu7qITUZcuLEiYrqEmMtH205vvvuO5w4cULxYj3ioUPxplar8eDBA0yZMgWCIODdd98FAPz1118QhH9Caj1//lxnqOvw4cMAgD179tj0uSSI/zpr1qzhkT7E26NHjxAbG4tBgwbxMHv9+/fH06dPeZ6VK1dKjk4dPXoUgHGhXbJkSe7KJjZS5KfQdnZ2RlJSEjIzM1GxYkUdoa1SqbBmzRoektDJyQk3b97EN998o7OKob7QDggIwMmTJzFy5EiT59m6dWsAWoOC2OXi3ya02XsUAKKjoxUfJ47ulRehLQjacJMZGRkAtKs9y62KGRwcnKdn6+233+bHT5061aL71alTJy6Ib9y4obO2RV7Zvn07P7+6desqOqYg4miL76MplxlrQELbBvXJCe2uXbsC+Ef4MZj/sLHZxHZ2dtzaI7buCoKAt956C3369OEvHWNRR9jy6FKh8ORwdXWFh4cHSpUqlS/36+zZswCgeBb32LFjceTIEcTExGD06NHo0qUL3nrrLTg6OurkYzG32VL1gvCPqI6Li4Mg/LOcc25urtFhSIIgrA9z2VKr1Th9+jTv46QmjQuC4QSta9euGawKyMqUE9pubm7ckp2Tk6MzNJ+fQnvOnDk6L3pjS7APHz4cAPDq1SuUK1fOoC4mtJnbSWJiouRHhxhmNT979qzO/n+b0G7VqhVvrz///FPx5EJrCm1B+MfSDEAnTKQYFmnrzJkzFl0rW2glKysLHh4eZh//v//9T8eSLbf0vKWwZwqA4tU5C0Joq1QqvljQlStX8r0+Eto2qE9OaDOLQlJSEt9XokQJHlbOWCfm5OTEH87q1asbrd+Y0GZWHHMsAXI4OzvnORTV8uXL8ejRIwDAkSNHJNMtdec4f/48AODDDz/k+xo2bMgtWJMnT0ZKSgoA6ZUjCYLIX9577z2Eh4fzoWxTUUfYSzotLY2P8OXk5GDGjBl8rgbr4+TiA7MoG7m5uRg4cCA8PDx435pfQrtmzZrIzMyEWq1Gw4YNIQjyQtvOzo7HXF64cKFkXUxoly9fnrsURkZGytZvZ2fHFwzTz8eEdmZmpqzrHhMpFy5ckExv27atzZ4ZOzs73Lhxg7eZUn91awvtmzdv8rzijyGGj48P/3AcNGiQ2dfJVhMFtBNmzT3+nXfe4ZN8r1y5And3d6u3xZo1a/g9qFOnjqJjCkJoC4KA1NRUAOZFXLMUEto2qE9OaIeHhwPQDt+wfX379gUAo6tGCoLuwjCm/KvkhLZKpeIPm6WTacSUL18ettiUupQIgtb6XrJkSdSoUYN3cvodAAsPxrbExESKGEIQhQBTQptZCJctW4ZGjRpxQarRaPj6AmyEUE7c2NnZ4a+//kLfvn0hCIJNhHbZsmWRnp6uI5jkhDZzN8jMzDRYQVDKR5v1ZykpKQbrDjDatm0LQDtyoF+mNaKOsHtpKwYNGsTrTk1NlQ3XJ8aaQtvb25u/X8SGMzFLlizh51eiRAmzr3HmzJn8XN5++22zjg0MDOSuLRcuXJC1uOeFYsWK8Y+3Z8+eKfYfLwihLdYDUgZIa0NC2wb1MaG9YcMGhIeHo0OHDujbty/u3bsHQNeVgfkTbt682WiZ1atX5w+nqdnCckK7fv36vIwaNWrk+TqLFSuGDh06SPLNN9/gf//7n2y6GDYsOXv2bMl0KWsBw9HREe3bt0d0dDSOHz+O7OxstGrVindSFy5ckDyGTVABgNGjR9v0eSQIQhpjQtvLy4uHJuvWrRsEQYCLiwtiY2N15o/s3LkTADBixAgIgoBatWphwIABOmJHHCnCFkJbEAR8++23OiJXSmirVCq+KIrU0thSQtvd3Z1bLvUnejPYIi+//vqrQRoT2o8ePZKNO82ijkREREimm4q8wahcuXKeVzVk9+mnn37i7ZaUlGRyITdrCe3KlSvz0VJAG65WP4+zszOeP38OQLtAm7nX5+joyEdb//zzT7OOFYvss2fPWqy3TLUpc4VS8uyLsURoly1bVnbZ+hYtWhidiGlnZ8fjmAN5X75eCSS0bVAfE9rffPMN/7OwLSMjgwdPZ0OYAEzGePT19QWgHe40Vb+c0J4yZQoA4Pbt2/l6/cw3eu/evbIWFjHm+GiXKFEC7du3x9SpU3Ho0CHufybucNu3b89DIUqJaHEHAWhjZ9tyCIsgCGmMCe3ly5cD0ApC/ZCe4sloLCoFi5nPRg31fZMZthLa+nNIpIQ2s2ZnZ2ejZs2asnXpj0guXboUgNaqrW+IKV68OJ9E+tFHHxmUaSsfbeZmkJiYaNL9UQmlS5fm7w5AO6qxf/9+DB48GA0bNkSlSpVQrVo1tGzZEqNHj0Z8fDzPq0Ror1+/HiNHjuRMmDABP/zwA3f1BLRzBKTmLjEfe0sXmBk4cCCv4+OPP1Z8XLt27bhL1Zs3bxAZGYlhw4YZRT90LuPWrVv44osv0KBBA+4Hr1Kp4Ovrix9//JGfX0pKiqxbStmyZREUFKTDqlWr+LGfffaZQbr+xNKOHTsiPT0dGo0G06dPN6hj7dq1uHLlCt577z2dIAf29vZo3bo1jzYGAKdPn1YcHSUvkNC2QX1i15GlS5ciISEB165dQ1xcHF9RysPDg4fNUxIBJCAgAIB2hrOpvFJCW6VS8c5SyRK9eSEgIID79J07d86kq4s5Qpu9bNmWnp6O/fv3IyIigvtyMyH97NkznU7Q3t4eixcv5scePHiQLwSQnZ1tsWXbWlYagvivIye0vb29+SIZM2fONFoGE52sn2NRKuRGDW0ltPXRF9oODg7cFWb58uVG69IX2rVq1eL3Tj98bEhICACtkUZqQp0thLarq6tOvz127FirPC8uLi5Yv349zNnUajUWLFggWZ7SBWsA7eiAXASPixcvArB8gZlTp04B0E6GlZsYLIXYWqx0k7sX7Plk53Hv3j1upWdbcnKy0f9MixYtzD4f/Xep2A9c6hldu3YtT9doNHj69Cnu3btnYISLj483a2GdvEBC2wb1yfloM1xdXfnXdXp6uqIlTNetWwdAu+SpqbwsVqtYaLOIJ4BlLxNzadmyJV/J8vr160ZDCrF7oURoN2/eHLdv30ZMTAy6du1qMNPez8+Pr4Q5efJkvr98+fI6Q4179uyBo6MjmjZtyv3WAW3oRXPCH1nbSkMQ/2WkhLa9vT1fhObZs2dGXckE4Z+RO9b/sYVLPv30U8n8hUVojx49mr8TqlSpYrQuqTk2LEJFQkKCjvWc+az/8ssvkmXaQmirVCqcOXMGgDaChrlLmJuiTZs22LJlC3eh0d80Gg3+/vtvzJ07Fz4+PrLlGBPamZmZuHfvHjZv3ow+ffrIhvRjQQ8A7UrP5l5LkyZN+PFS7kPGsKbQXr9+PbeO62/Pnj3DkiVLTBrRrCG0Q0NDuX/1mjVrDOpo164djhw5IrmWCADcunULkZGRBiNK+QkJbRvUZ0xoe3h46AxhDR06lKc5ODhgyZIlmDp1Kj788EMMGjQIw4YN43GxAenlxu3t7eHq6gqVSgUHBwfuj7RlyxaezlZxkvLRyy+aNGmCp0+fYtu2bbIPeY0aNfhQXO/evfNUn6urK65fvw5A6x7DRHiHDh34pA1AOyQoPp9atWrhr7/+4ulPnz7Fxx9/bDA8LVWfeLOWlYYg/qtICe1Zs2bx/5iSJayZBffBgwcQBO0QOCA/qTqvQputFPjNN9+YdZxYaDs6OvI5PLNnz5Y9hr07pIQ2E3i5ubn43//+B0HQ9m3Mr535rOvDhHZaWhpCQ0MlYasNf/XVV7J5pFxdxJQsWRI9e/ZUHJ3CEhwdHdG4cWMEBQVhwIAB6N69O1q2bGmWVTivsMVs7t69a/ECM4UFZ2dnNGvWDD179kRoaCiCgoLQuHFjk+9Ga9OkSRP4+/sbvZ9lypRBmzZt0KdPH/Tv3x8BAQGoVq1agdw3Eto2qE9OaHfq1ElnEt6sWbMMjhUvB6y/3b59G+XLlzc4pmTJksjNzYVGo9H5qmMW3alTp/J9tvZF9vLy4n+OQ4cO4f79+7h8+TLOnTuH33//nU/ayM7OzlOMz2LFinHLTW5uLtq1a4cKFSpg9erV/Gs4OzsbEyZMkIy7WqpUKWzcuFHnfptarjW/rTQE8V+icuXK/L/H5nYMGjSI/39PnjypSLh4enryY3bs2MH/+3KRTJQK7aCgIHTq1AnNmzdH/fr14evrq9O3SvXnxtC3aJcqVQrjx4/n56k/WlevXj0+WhcYGChZ5tixY3VG1sLCwpCTk4OcnBxZ66M1oo4A4Ivt/Jdxc3PjVmA2R4D470FC2wb1yQnt4OBgaDQaqNVqTJw4UfLYmJgYXLt2DTdv3sTNmzdx69YtnDlzBrNnz5YU2QxxTE8AOHDgAA8X+Pnnn0Oj0WDr1q0F+vCxFSb1t6ysLIwaNSpPZdvb22PevHkAgAkTJkAQBFSqVInfl2vXrslO+hAzaNAgpKWlKY6rbQsrDUEUVWbMmIHz58/jyJEj3Ajx+PFjCILWMs3iCKekpKBq1aqKy929e7dOH7Nt2zbZvEqFNnOfkNo0Go3ZSzsbW7BGELRzV16/fo2kpCTcuXOH+6hnZGQYfRfoU716dQwbNkw2nQntly9fIjo62mJs9X4tzEyaNAkA+OqfBX0+RMFAQtsG9XXt2hXLly9H//79DdKmTp0Kf39/q9dZu3Zt+Pn5wdfXV3LCy+DBg036U+U39erVQ2hoKAYOHIjw8HCEhYWhZ8+eFq14Zaytxb/r1q2LiRMnmuWfValSJZsPjRHEfxHxqoxsi4qKgiBo+4vHjx/j5cuXiledY1SqVImv/nj27FlZn2dBUC60xS584i01NdUiQ4Epob169WqDul6/fo0hQ4ZYtQ2U+GgTyhgxYgRmzZql4xJK/PcgoV0IGoEgCILQxoDu3r07evfujT59+qBBgwY66e3bt0fr1q0tLl9JKK9SpUph/vz5mD9/vlFB7uLiAnd3d1SuXBmenp7w9PRExYoVFS8Brs/48eMRHR0te7ynpydat26Ntm3bonXr1mjSpInRRc0sxdfXFzt27MDq1asL/HkgiKIACe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARCIIgCIIgiKIHCe1C0AgEQRAEQRBE0YOEdiFoBIIgCIIgCKLoQUK7EDQCQRAEQRAEUfQgoV0IGoEgCIIgCIIoepDQLgSNQBAEQRAEQRQ9SGgXgkYgCIIgCIIgih4ktAtBIxAEQRAEQRBFDxLahaARiiq1a9dGsWLFFOevW7cuQkJCCvy8CYIgCIIgrAEJ7ULQCGIGDx6MV69e4dWrV7C3ty/w87EUOzs73L59G6mpqQgODjaZ/91334VarUZWVha8vb0trrdUqVLw9PS0CiVKlCjw+0gQBGFr7Ozs4ODg8K9+BxFEYYGEdiFoBDHDhg0D26zVyTVu3BjDhg3LF5o3by5ZZ2BgIAAgOzsbNWrUMHmOzs7OSEhIAAD89NNPFl/rxIkTYa0tMDCwwJ8HgiDkGTJkCFJTU/Ho0aMCP5d/Ez4+Pnjw4AFOnjwpmT58+HAAQGxsrMFxbdu2NYmDg0OBXyNBFBZIaNuw3hEjRiAiIgJ+fn6yeZQK7d69eyMiIgLvvPOOyXqnT59uNfGpv3355ZeSde7duxcAsHbtWr6vSpUqWL58uSznz5/n5X7//feSeSIjI41eKxPa6enpuHv3rkXk5uaS0CaIfwEjR44EAKjVaquV2bhxY4SHh+cL9evXl6yzYcOGmD9/vtUw9W5jL/5bt25JpssJ7R07dih6L5QvX75AngeVSoXmzZtj9OjRWLhwIWJiYjB9+nR06dLFLDdGQdB+VERERGDx4sVYtGgRRo0aBU9PT4vOq1ixYujQoQPmzp0LlUpl8bW1atUKEydOxJIlS7B48WJ89tln6NWrF5ycnMwuS8kHkz7NmjUrkHb9t0NC24b1Pn78GADwySefyOZRKrQPHDgAAFizZo3JesVC++bNm1Zl/PjxBvU1aNAAGo0GOTk5qF27Nt/v6+trhoSX3o4dO2b0WpnQzotV/OHDhwBIaBNEQTJkyBCMHDlSVpwKgnKh3bNnT4wcORLt2rUzWe/cuXPz3E/JbRMnTpSsMyQkxKr1mJrrklehffz4caxdu9YAZqSwtdBWqVQYNGgQbt68KXtPEhIS0K1bN5NllS1bFnFxcdBoNAZlZGdnY86cOYpGmz08PDB06FDExcXh+fPnvAw7Ozuzr699+/a4fPmy7LVNnz7drPLs7e3Nf6gA3Lhxw6btWlQgoW3DegtaaFvT6mOM2NhYAMCqVat09jOhffToUTRo0MAsOnfuDOC/KbR9fHwwYsQIzJs3D0uXLsXs2bMRHBwMFxcXs8qpWrUqRowYgYULF2LJkiWIjIy02B/ezs4OzZo1w7Rp0yy28giC9qMsIiICX3/9NZYsWYIZM2ZgwIABivoElUqF9u3bY+rUqYiJicHcuXMRHh6OUqVKmXUOFSpUwODBgzF//nwsWbIEkydPlnWJUkK9evUwbtw4NG3atMCfnX8zT548AQCMGjVKNo9SoX3s2DEAwLJly0zWy4S2RqPBgwcPrIrctYiF9owZMxAdHW02M2bM4GXICe327dtj/vz5WLduHQDg2bNnkhbxgwcPAgD++usvzJ8/H1OmTIEg/CO0BwwYIFl+eno6ANsL7ZkzZ/JrV6vVuHTpErZv3469e/fiwYMHPC03Nxc9evSQLcfFxQUXL17k+RMSErBlyxYcPHgQ2dnZfH9MTIxsGf369UN8fDzUarWkWDVXaA8fPpx/wADaj6Pt27dj69atuHjxItRqtc2E9vz58wusP/g3Q0LbhvX+F4S2j48PcnNzkZGRgerVq0MQBD6sxYT27t27zS63Ro0aAP5bQjsgIADx8fGynV5aWhpGjBhhshxHR0csXrwYOTk5BmVoNBps2LABzs7OJsspU6YM+vXrh7Vr1yI5OZmXUa9ePYuek19++UX22jZt2mT0+Pr16+u4Gunfl4EDB5o8B5VKhYkTJ3JxoL8dPHgQ7u7uJstxcnJC165d8c033+Du3bv8+F69ehXo8/Nvp6CF9osXL2x2rWKhbencHDs7O16GnNAeO3as7H/O2Hb37l0IQuEV2t9++y2ys7OxYMECgw9/Ozs7jBo1igvfxMRE2Ynuy5Yt49c8f/58HV/z+vXrIzExkae3b9/eZBlqtRrx8fE6/YI5Qrt79+78vO/du4eAgACDPB4eHkbdUeXw8vJSBLsetVqtM0JNKIeEtg3r/S8I7a1btwIAvvrqKwiCgJIlSyIpKQmbNm1CQEAAANsI7WPHjinuSPR59OgRgIIV2mFhYTovuuvXr2PXrl3YuXMnbty4oZM2btw42XLs7Oywpy43ngAAIABJREFUc+dOnvfJkyfYvn07du/ejVevXvH9Bw4ckH0BtG3bFsePH5cU6oD5Qrtz5846dT98+BC7d+/Gli1bcPr0aWRlZRkV2j4+Pnj69Ck//o8//sCPP/6Is2fP8n0ajUZWDDAWLFjA879+/Rp79uzBtm3bkJqayvf/+eefsiMHb731Fvbt2ycr1Elo5w0S2uahRGjXrl0bQUFBGDNmDAAgOTkZQUFBBjBxdezYMQQFBXGBUFiF9pQpU0yOIIkFcJcuXQzSa9asyfu4Q4cOSfpSs3cYAOzbt0+yntmzZ2PTpk0YMmQIPDw8IAgCtm/fzo9TKrTLli3L30WJiYl5Gjm0FAcHB/5x8fPPP9u8/qICCW0b1lvUhXb79u2h0WiQmprK25RZUO7du4fmzZsDAFJSUrBnzx6zOHLkCO/4jZ1DUYk6MmbMGGg0GsTGxsLHx8cgvUePHlysZmZmomrVqpLljB49ml/P1q1bdURjpUqVcOHCBZ4+fPhwk2UAwLVr13DlyhX+2xyh7evri9evXwPQDluHh4cbPOeurq5o1aqV5PH29vZ8aDc7OxuhoaE66X369OFDvM+ePUO5cuUky+nSpQs//1OnTqFixYo8rVSpUti3bx9PlxsuFZcBaJ/xM2fO8N8ktPMGCW3zUCK0GXn10S5sQlvJBEP23gcgOak+OjqapxsTRNevXwcA5OTkKNYulgjtzz//nB8jZcm29XNJ/ZnlkNC2Yb1FWWjb29vj0qVLAIAxY8bAyckJbm5u3MVg4MCBNp0M+fr1a1y7ds0imFWjIIX20KFD0b17d6N5xAJ47NixBulOTk7cOnvr1i3J4VIvLy8uTG/duiX5who2bBj27NmDUaNG8VCNc+bM4XUrFdpikfzmzRuLfJgHDBjA642OjpbMM2vWLJ5n8uTJknnYebx48YJbncSULl2a/19fv36N0qVLG+Tx9/fHkSNHMG7cODRo0ACCIOC9997jddOLKW+Q0DYPWwrtQYMGoUSJEgZkZGQAUCa0K1eujMjISPj7+9vkHjdu3Jjfn0mTJhmknzt3DgDw/Plzo20wb948Xo6SyZWCYL7QLlasGO9/Tpw4YbPnUB/2v7l//z7FVM8DJLRtWG9RFtoeHh6Ss7QB4MyZM1CpVFxo//TTT3BxcTELb29vPHv2DHv37jV6Htbw0b558yZevXqlKHRifqHEQuPp6cnv8XfffWeQHhoaytPHjBkjW47YtUTpJEBLhHZ4eLii8zHG4cOHAWit+HLWag8PD/6xdOnSJYP0pk2b8vMwNqlp/vz5PJ8Sn29BIKFtTUhom4clQvvly5eIjY01gI3M5Fd4P3t7e9y6dQuA1s3LFmK7e/fu/Pz0LfKOjo7c4HDw4EHFbRUVFaWobnOFdqdOnXh+JfNw8gOxYezTTz8tkHMoKpDQtmG9TGifP38emzZtkoR9VQPAjz/+KJuPWYrNEdo5OTn5en27du3CiRMnsG3bNmzatIl3oi1btoQg5G0ypFKcnZ3h4eFh82eKYUsrjZOTE39W1q1bZ5C+YcMGnl6lShXZcoYOHWr2i8MSoX3q1CkAWp9sR0dHs6/XxcWFvwz379+vqC6NRmPwLHz22Wf83Dt06CBbRrt27Xi+b7/9VtE5ktC2HiS0zcMSoW1qkxPaN2/exOnTpw1gE/dMCW1nZ2edSBrGjE/Wgvlo5+bmolKlSjpp3t7e/FxMPSPNmjXjeZcvX66obnOFtjiCDBsts7OzQ+PGjdGrVy/4+/sbXIO1+fbbbwEAWVlZkqN+hHJIaNuwXia0rbkpEdos9FFGRobNrpUNr4k7avaw7dq1C2XKlLF4sqJYpA0cOBCTJk2yCV27djV6zba20tSuXZs/BzNmzDBIZ76ECQkJRssRWy70QzLKYa7QrlKlCh/xWLRokUXXKxa+06ZNM5p38eLFPC/70GPs2bMHgPaFayzairOzMxcOplyWGCS0rQcT2ufOnZO0usbGxvIJsGw+gxwpKSkAzBPaz58/t9m1FpTQvnfvHmrUqGHA5MmTDfpvQfhHaMuN8Jjjoz1p0iQ8f/4cx48fz3cNUL16dX5uW7duNUgXW5BNWW+rVKnC827evFlR/eYKbZY/OzsbDg4OCAoKwv379yHeNBoNjh49Cl9fX6vfrzJlyvC5NEqvkZCHhLYN62UrQxrjxx9/5H+ksWPHyuZjIkqJ0P7iiy8AaIcJbXGdtWrVQkZGBl6+fKljSWWTIePi4iwOMwXoCjtmqbLFtnr1aqPXbWsrjdhHWz/UlIODA3efOPr/2jvz8Bqu/4/PTW5kF4kEsUSEEJRKbPmhaKOtWqNiKZFSaitpqviiqVL1tUtrLbG0VSVqKWrfSmINYg9fS+wisZPIIvf9++M+czqTu829d+4iPud5Xs/DnXPOnJkzmXnPmc+yb5/efjw8PFg/UgWlsUJbaFsdFRXFfq9duzY6duyItm3bIigoSG8fQrOq3r17663LR1UAgOjoaNE2PmrLrVu3DI6bD/V4+/ZtSeeFhLZ88EJbziJFaM+cOROAOkKPtY5VKLQ7deqEDh06GE3Hjh1ZH5ay0d64cSMAeYS2tVAqldi3bx8Adcbg4OBgjToff/wxO3dxcXF6+/P29mZ1N2/eLGkMxgptPnRpZmYmPvnkE/bC/+zZM42/ixcvXqBZs2aynrO4uDjWv76vfoQ0SGjbwSQIkWqjHRUVhZEjR0py2Js9ezYA4P79+1Y5hq1btwKARtbIFi1aAFCnV+eF9t69e7F8+XK2UnL69GlRlrHbt2+z//Nv9EJhN3DgQEyZMkUD/ma0e/durdtNoWvXrgaP3VqrNO7u7ux8nDlzRuPmXbFiRXYd/f777wb7e/bsGTv/UvZvrNAW1q9VqxbCw8Nx+vRpFC+nT59GRESE1j7Gjx8v+ebfo0cPVrf4C09OTg4A4OjRowbHzT/wcnNzJZ0XEtry8cUXX2DkyJF64cOJqlQqvfWuXr0KQJrQ5r+G3Lt3z2rH+rpkhuSj8RSP9sNjj0J73rx57Lx8/vnnWusIFwKGDBmitz93d3dW15A9N4+xQpuPu/3gwQM8fvwYR48eRZMmTdj2qlWr4vfff2d93rx50+g07LpwcHBgixHnz583OWU88S8ktO1gEoRIFdrGsGzZMgDqWMyWHn9MTAwAdfxhJycn0TbeGWXBggVMaH/88cfgOPXKJgBMnjyZ1c/NzRXZ4vJi3JCwq127NlQqVYkOsL948WIAuk1U6tSpw64jKfbF/Kd1XQ/e4hgrtFeuXMnqt2vXjj2Qc3JykJWVJcqi9urVK60rZvwLI2DYaVO4uieMMODk5MR+37Nnj8FxJycns/Ms5QFJQtu6SLXR5k3MpDzoeNvUa9euWe04hEI7JSUFycnJRpOSksL6kCq0CwoKtEZe4n2Aigvt7du3A4DORQd7E9rx8fHsnOhzfBa+mMfGxurtU7iibcg5n8dYoc1/SQOAs2fPws3NTaOOQqFgXxgA4LPPPpPlnLVt25b1aQ3b+TcBEtpW2ufQoUOxfPlyDBs2TG89Y4R2QkICli9frnMFkIf/Yzx06JBFj7Fu3bp4+vQpALXZy0cffYSYmBh8/fXXGDx4MPr37w9AbU9sSaHNr2AYcph7Xfniiy/YNTJr1iytdRo0aMDq/PTTTwb7vHnzJgDD9tw8xgrtbdu2sfpXrlzB1atX0bZtW3aN+/n5YerUqcyOOycnh2UWLT6vAAzaJQoTSwjtuYVmMlIi0/Dx2wHozCYnhIS2dZEqtI0hKSkJgNpp3VrHYSsb7VevXiE7O1sDPkZ/caHNm+rp8lexJ6EtzKmwevVqvQK3ffv2rK620H9ChDbaxc+PLowV2vz9GAAiIyN11mvatCmr98cff8hy3ngfFl1hTQnjIaFtpX2uWbMGALBu3Tq99YwR2nfu3AEAg+Kdj2+9evVqix4jvxKkrfz5558s+sngwYMtKrR5p9NDhw4hMTHRZL799lurXpdS6NGjB7MD37ZtmyhFsBBhVAEpn8stvaItTLeelZWlMwqKMEbt9OnTRdvmzJnDtoWFhendn64VbTc3N/b7rl27DI6bVrRtw+DBg5GYmIjBgwcbrAdIE9o//vgjEhMT8c477+itt2vXLsnXh1y8LnG0eVOqGzduaF0J51+UbS20x4wZw87Fhg0bNL6uFof3HwIML0wI606ZMkXSeIwV2mfOnGH1y5Qpo7OeUqlEfn4+AODw4cNmn7egoCD2dVFbyFjCNEhoW2mfthLajo6OzHtY6k3BVHr37o2TJ09i48aNWLhwISZMmIBBgwYhMjISwcHBLORfmzZtmNAeNWoUGjVqxB40y5YtQ6NGjdCoUSPk5eXh0KFD7P+8Q4shYcevqptbzInFbQm6du3KHBwPHz6sMzU4x4kjkmgL/Vcc3kY7NTVV0liMFdo7d+5k9XUlkeE4DmXLlmXHePLkSdE23qkXAFq0aKF3f8JPwcI4tAqFgr2oHDx40OC4eWEhNQIFCW15+PPPPwGoX9D11TNGaD98+BAAMGDAAL31Ll++DECao7lcvC5Cmz83r169QmFhoQZ8saXQ5iOmAOoII4ZENseJzUF0pVbnEdpz9+nTR9KYjBXamzZtYvX1RUbiOI4lJTtz5ozZ506YOyA0NNRmc1jSIKFtpX3aSmiHhYWx/rp37271C0zI+fPnAQBVqlSRLeqINnr16oW+ffuaDB8ybNGiRTY9X0KioqJYDOnjx4/rXeXgOLGJxI4dO/TW9fT0ZHU3bNggaTzGCm3+czwAnenVec6dOwdAU9wKPeENCQnh9VU8extvfyrFBpeve+7cOUnnhYS2PNhKaLu4uLC/M2sm6bCW0HZ0dIRSqWRZEq9cuQKlUqnBkCFDAAArV66EUqlkY+JNSsqXL6+1f1ubjghtsvmxS23Lh2Z9+PChXjEs/LJmKFISj7FCW5gOvn79+jrrOTk5sRVtKT4n+nBzc2N/I3KsjhP/QkLbSvu0ldDmA9+rVCpUrlzZ6hcYj5+fH1QqFe7cuQOO+1cIrVixAmPGjMGsWbMAqKOQ8HGrCwoKcOnSJfZ/XoBJTZBiKgcOHAAAfPPNNzY7X0K6devGVouOHj0q+e+FNwe5evWq3nrC1MRC0x19GCu0hSYhhlKv8+YaBQUFot+F5iD6VsU5jsP8+fNZ3WrVqunsX5/ddenSpdmn8DVr1kg6LyS05cFWQpuPjAQAHTp0sNrxWktoHzx4EKaUixcvwsvLi/3d6BqjLYW2MBHV8uXLjT6Pc+fOZe0//PBDrXUcHR1x+/ZtANIjNHGc8UJbaHs9YcIEnfVat27N6k2dOtWs88f7UAFATEyM1eevJENC20r7tIXQdnJywvXr1wFINwmQCx8fH4SHh6Nv376YPHkyS53NCxZL2mibC38jlZp225L07NmTieyUlBSULl1aclveqQWA3sxeQ4cOZfUMJeXhMVZoC9PBG/qycuHCBQDqDJLC3/39/ZnwNRS/9uTJkwDU8a+Lh6dKSEhgY9Fnr8vfJwDDkQh4SGjLg62ENv9CWFRUBF9fX6sdr7WEdlJSEi5fvmw0O3bsQPPmzQEA6enpOsdgrNCuWLEinJ2dzT5/whXgxYsXSxKzxWnQoAG7v6SkpGidB/56A3SHCtSGsUKb4/71rXr69CkCAwM1tiuVSrYopFKptDqIOzs7Izg4WNI++XvmgwcPJDl+E9IhoW2lfdpCaAvfUA05TJqLv78/kpKSkJqayh5o2gpv02avQtvHx4fdbA2FkLM0n3zyiSjpjD6bbG0Io5N8+eWXOuvxkQSePHkiORarsULb19eXfZLX55RbsWJF5ozz119/aWxPS0sDoM5yquthHhwczOZw/vz5Gts/+OADNvZ58+bpHEtiYiJ7iGl70GmDhLY82EJou7i4sLBq+/fvt+rxWttGm6dVq1aoW7eu6LcePXpoFW2jRo0CoD/ShjFCmw87e/v2bY0IQ8YwcuRIdtxXrlzB559/jgEDBuglICBAa19//PEH6yspKYktUCiVSnz22WfIy8sDoDbf02WWEhwcjMjISBGHDx9m/Xbp0kW0rXiyMZ42bdqw+9iVK1dE9YKCgvD333+zPrU5Lvr4+DBzmAMHDug1o+FfogBgxowZVr323wRIaFtpn9YW2j4+Prh//z4AIDs726BDhbk4OTmxRCAqlQqXLl3CypUrMWLECLZimpOTw2yL7VVo8+N6+fKlyW/1cqzS9O7dmznt7dixQ2scVUOULVuWOcLeuHFD62p469at2c1cV6hAbRgrtDmOY1lPi4qKdDoz8g9fQJxBkmfYsGEGHwh8IoeioiINEcFx6ocmnxDixYsXWkV0jRo1mGjYtGmT5PNCQlsebCG0hw8fzuauf//+Vj1eWwjt+vXr48mTJ3j+/Dn7stOqVSu8evUKubm56Nevn6j+oUOHAAB9+/bV2adUoV26dGkIy1dffWXyuROuFkstnTp10tqXt7c3zp49y+oVFRXhxo0bIgf7Gzdu6H0xEIYVlFL0hd0VRk8B1PbjwhjbgDppjrYFkuJJkPTZevMvGEVFRahRo4ZVr/03ARLaVtonL7QfPXqE1NRUnfACAFC/Neury68QahPawlTu1go6HxMTg4iICI3Ym3wcb+FbtylCmzeFMEVolytXDr169cKHH36I8PBwhISEICAgAOXKlYOfnx/q1q2Lb7/9lp3T3377zaRzIMcqTefOnZnIfvToEYYNG2ZwhUbXTVSYTTE5ORk1a9YEx6mjb7Rr146FQrx79y58fHy09uHv76+xQsNn5OOvL+G24s6HPMHBwexl7OHDh4iKihLF0RaGh9y/f7/WjGTOzs4sa5lKpcK3337LXkI8PT2ZrT+gfTWbRxg54OLFi6KvF02aNGH7yMnJQUhIiNY+PD09Nc6L0M7zv//9r8b2UqVKWeVv8XXH2kK7fPnyePToEQDgzp07spgzGINQFC1atAgLFy40GuHfjyGhHRwczBx99+7dy4Sag4ODKLrPTz/9BKVSifDwcABAXl6ezvsEx3FsxdeQ0FYoFDhy5AgAID8/H40aNTL53MkptDmOQ5kyZbBkyRLmZMiXwsJCrFixQqcjKI+cQpvj1I7wfLQXYXnw4AHGjh2rc6U6ICAAT548AaB2/Nb1tbJChQrsWLdv327V6/5NgYS2lfbJC21LlOJCOyIigm07efKkUZ7XcvPpp58CUK8QC53SpArtM2fO4JNPPsHgwYPZqoIpArZs2bJs5dZQuXXrls44z/qQa5VGuFostYwePVprX0qlkmVzA9Ti9Pbt2yxFPaC2AQwPD9c5ns6dOxs1lkePHunsKyoqShQG7NmzZ7h16xZ7sQDUET70PcxCQ0PZAwRQX1sZGRnsIQ8Au3fvNvhFYuHChaJxZ2ZmMvEBqJ2+dGXA4zhx9k2pxZp2v68zvNA2tDDB+6AA0FsvNTWVXXfFhXbxDHvWXs3mOOumYA8JCWF+KCkpKVpN0j7//HO8evUKz549Q7169dhqtr4FCOHfg5QvcG5ubujUqROCg4Ntfr1pw8fHBx9++CF69uyJ999/3+o6RYiDgwPefvttdO3aFd27d0fTpk0lhS2sXLkyOnbsaNOxEyS0rbbPsLAwdOjQQVb4FRptK9pDhgzBs2fPULt2bZteYO+//z4KCwvx3XffiX4vLrSdnJwQGBgouhaUSqVohRBQR93QttIphWvXrul9OGVnZyMxMVGv46A+5FqlkVNocxyHUqVKYfLkySw0F19UKhW2b9+uc8WWR06hzXHqyA6pqaka7V68eIEff/wRnp6eBs9RrVq1RFkb+fL06VNMmjRJ0sqxQqFAbGwsW9UXlqNHj6JZs2Z625PQthy80LZEKS60O3TowLYdPHjQJEc6cxEK7SFDhmDw4MFGw4fkA3QL7SZNmrDr/eDBg3qdq6OiohAREcFWaAsLC9kXMY5T+zpcvXoVx44dQ0pKCovFLzUUJkG8KZDQtoNJMJV3330Xbdu21enYYcqqrCXo0qWLht1heHg4JkyYYPBFICwsDHFxcRg8eDC6d+8uSYTpQ6lUws3NDV5eXvDx8YGvry/KlStnVDQPfdjzKo2bmxveffdd9OzZE+3bt4e/v79Nx1OzZk107twZPXv2RMuWLU2yQw8ICEDHjh3Ro0cPtGjRwqRP/k5OTmjWrBl69OiBzp07S46NS1iOhg0byr4wwQtBbaYjX331FR4/fmyzubeWjTb/ArNnzx5JztVubm7MF6h4+Ljg4GCNl5js7Gydzn0E8aZCQtsOJoEgCIKwLBEREWjbtq3OfAK2fPEMCAhAdHQ0oqOjTf5ip1AoWB+6zOtKlSqFcePGGeXoXaNGDaxdu1bjJdbR0RFhYWFo3rw5WrZsicaNG1NYOILQAgltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mASCIAiCIAii5EFC2w4mgSAIgiAIgih5kNC2g0kgCIIgCIIgSh4ktO1gEgiCIAiCIIiSBwltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mASCIAiCIAii5EFC2w4mgSAIgiAIgih5kNC2g0kgCIIgCIIgSh4ktO1gEgiCIAiCIIiSBwltO5gEgiAIgiAIouRBQtsOJoEgCIIgCIIoeZDQtoNJIAiCIAiCIEoeJLTtYBIIgiAIgiCIkgcJbTuYBIIgCIIgCKLkQULbDiaBIAiCIAiCKHmQ0LaDSSAIgiAIgiBKHiS07WASCIIgCIIgiJIHCW07mATCPnB1dUXlypVtPg5CjIuLC9q2bYu2bduiXr16ktu1adMGbdu2xf/93/+ZPYYmTZogLCwMjo6ONj8fxfnmm2/QokULKBQKSfWVSiUCAwMRGBgIX19fm4+f0KRhw4Zo2bIlXFxcbD4WgiDMg4S2HUwCYXsiIyNx7do1pKWloVSpUjYfj73h6emJDRs2YNeuXdi1axfi4uKstu+AgADw5ddff5Xc7vnz5wCAY8eOmT2G06dPAwCuXr0KBwcHm88HT7du3di5OXbsGJycnAy2qVy5sknnk7Ae27dvBwDk5eUhMjLS5uN5nXF2drb5GIg3GxLadjAJhG2pVq0a8vPzmfiYPHmyLP3WqFEDcXFxknjvvfdsfh70sXr1agiLXOdICrYW2q1atWL7nzVrls3ngsfT0xO3bt1iY+vfv7+kdsYK7RYtWlj9AWEpvL29ERERgfj4eGzevBk//fQT2zZ+/HhMmjTJLN59912zx+jr64uCggImtMuWLWuRc1G6dGn07dtXEqGhoeA4DuXKlUPr1q3Noly5cmaPvVmzZujUqZPOl14HBwdERkZi27ZtOHLkiNn78/f3R4cOHWwCLfy8/pDQtoNJIGzPpEmTmPgoLCxEWFiY2X1+/PHHkFoWLFgAjuNQvnx5yW0MFWPMLPQxYsQIjb5VKhW6dOlilbmxtdDev38/AKCgoAABAQFGtS1XrhxCQkKMonr16pL6njNnDjsvmzZtkjwmqUI7KCgISUlJUKlUuHv3Lry8vKwy3+bi4eGBmjVrIiIiAoMHD8asWbPw999/i15K+JKdnc3MgXJycsz+mxs/frzZ4x8+fDjrb82aNRY7T8HBwZKPa9KkSeA4Dt27dzf7HPXo0cPssScnJwMArly5glatWmlsVygUuHjxItunufdz4ZcjaxdLvWgR1oOEth1MAmF7XF1dcfXqVXZzO378uGSbV12UBKHdrVs3FBUVsT5TUlLYv58/fy6L/bMhbCm0+/Tpw/b97Nkz7Nu3TxL8g3327NlGz9vNmzcNjqtVq1ZsXu7evQs/Pz+2zcHBAXFxcdi8ebPWFT9DQrtSpUpYuHAhW1Xli3D1V8jw4cPx8uVLq/Pf//5XYyyDBg2SfJ4LCwuRlpaGwMBAcNy/QlulUqGwsFAyr169Yn3KIbTPnj3L+ouIiLDY39XrKrSbN2/O+srPz0eVKlW01hs9ejSr99tvv5m1TxLahDmQ0LaDSSDsg86dOwMAzpw5o3WVxFiEQjs2NpY5oPHExsay7WPHjgXHiYX2/fv3sW3bNqO4efMma2+u0O7QoQPy8vJYf3/88QcUCgVWrVrFfnv8+LEsq//6sJXQDgoKwuPHj016OPICyRJC28vLCxkZGQCAoqIivP/++6Lts2bNYn2NGTNGo70uoR0YGIj58+fj5cuXovHk5uZi1qxZOj/5x8XFmXSOzC0zZszQGEuVKlX0tlm1ahUzhSjuaMgL7a1btxp1ndSrV4/1r01oe3t7Izk5WRJHjx5lfalUKqSkpEhua4iVK1eKxiUU2gsXLkSjRo1ETJ8+nW0fPHgwOE4stOPi4tCiRQtJfPnll6yduUKb/8IEAIsXL9ZZr3z58sjNzQUAvHr1CiEhISbvs0aNGhg2bJjVEH59IaH9+kNC2w4mgZCPmjVrMoc9U8jLy8O+fftMahsdHS0ai1Bo9+rVS2OsS5cuZdvr168PjhML7b/++svo41+8eDFrb47Q7tKli8hu/ciRI3BzcwPHqaOAHDhwgG178uSJybapGzZs0KaHrF6Sk5NF4/Lx8cH58+fZ9lWrVmHhwoVYuHChaMwHDhxgvwvhH+pCod2sWTM0aNBAJwcPHgSgX2grFAr8+eefrM/vv/9eo05gYCCePXsGQG3u0rBhQ9H24kI7PDwcq1at0ljBzsnJwaxZs+Dv7693Dk1d0RaWvLw8WVa0OY7Dd999h+HDh6N9+/aoW7cuunQDiG8/AAAgAElEQVTpwvbTunVrncdhKaFdrlw54y9IC5T09HTRuIRC+9tvv9UY96ZNmwCoRWqFChXAcWKh3aZNG8nnKCIigrUzR2i3b9+e9fPy5UtUrVpVb/158+ax+mvXrjV5v9bm5MmTbNwktF9/SGjbwSQQ8tGwYUOZH0/SS/GHlT6hrVAocPv2bQDA9evX2e/2ILTj4uJEn8MvXLigcbMvXbo0jh07xurk5eXh008/NXpf9ii0fX19kZqayrYV/+z83nvvsW0xMTF6j08otD09PfXW3bZtGwD9Qvv7779n/e3atUtnuEGhCcW5c+dEq7dCoS38YsGXJ0+eYOrUqShfvrzF/k5dXV1F+7TkM6Bdu3ZsP7YQ2n5+fnj+/LlBhC+2hYWFktoYw4kTJ0Tj0ie03dzc2GrwP//8w363pdB2cXHB//73P9bP9OnTDbYJCAgQXeMdOnSw2HUmJyS0SxYktO1gEgj5eF2E9ttvv822zZkzh/1uS6Ht4uKCRYsWiY4pPT0dlSpV0lrf29sbhw4dEtVPTEyEq6ur5H1+8cUXSExM1IvQVOV///ufwfo8/AptVlaWwbr/+c9/2Jg6d+4MlUoFQG12Uvx4oqKi2HgMPbjlEtoKhQITJkxgfWVkZOiNga1QKPDPP/+w+kJRIhTawnLnzh2MHj1aw+nR398fEydORLVq1WT7O32ThLYUFAqFyP9h0KBBFjsfPPqEdmRkJNs2bNgw9rsthbbw+s/OzpZ8zUyZMoW1u3HjBsqUKWPxc2suJLRLFiS07WASCPkQCu2dO3dCqVRajeKOZ/qE9tixY7U+sGwltENCQnDq1CmR+ElLSzMYisvd3Z0JRL5cvnxZ1nCFtrDRTkhIQHp6utbjF9rWN27cWG8/cgntJUuWiM7xhAkT8Nlnn+Hrr7/G5MmTsWjRIqxbtw779u3DmTNncO/ePdFXicLCQjRq1Agcpym0U1NTER0drTOMWGhoKAC1zbA2EwNTIKEtRuhsd+nSJUnx0M1Fn9Bevnw5ALUPQMWKFdnvthLaoaGhohX/fv36SW7r4eEhsnnesGGDZEd3PomZNRDeH0holyxIaNvBJBDyIRTaO3bsMKpthw4d2GfWmTNnmj0WfUKbD0/16NEj0UPV2kLb0dERI0aMwIsXL0TCZ9OmTfDw8JC0T6VSKXLAA9SibNWqVahRo4bZ59HaQjssLAwzZsxAYmKi1ljJwlX8efPm6YypXKdOHdmEtnA1z9SSlpYGpVIpEtpSwsfx90tAu024KZDQ/pfi8dA7depksXMhRJfQdnR0RHZ2NgB1lCFhG6HQzs7Oxq1btySRlZXF2hkrtF1dXUX+EsnJyUZHhGrXrh37SgX863xuiB49ehjxF2ZeGTduHNsvCe2SBQltO5gEQj7MEdpCYawrlJmp/QmFtq+vL1ttLG7/a02h3bhxY5EtMqBewZo8ebJJ2Q979OihEaWjoKAAS5cuNcsx09pC+7PPPjPyEam9dOnSRTahXb16dZFQKF6ePn2Ky5cv4+DBg9i4cSOWLl2KadOmYfTo0SxCCaCOFGFswpqYmBhWf+DAgWb/XXCc5YR248aN0aZNGxHjxo1j+/n66681tvNOvrzQzs/PR3Z2tmSE17wpQnvu3Lms/ZYtW2Q5D1LQJbRbtmzJfh8xYoSojS3C+/3yyy+sbU5ODmrWrGnS8QrjzqtUKkmr4iS0CTkgoW0Hk0DIhzlCW/gQmT17ttlj0SW0hbGZi6dXtobQrlKlClasWKEh3LKysvDRRx+ZdcwBAQHYvXu31gfJP//8gwEDBhj9925IaOv6zG6q0O7SpQvOnj2rlStXrrCxPHr0SGe9s2fPIiIiQlZnyG+//RYTJkzAwIED0alTJ4SHhyMwMFAjTF1xunbtysYwd+5ckdAuHvJN1375UjyUoKlYSmgX9xmQUvivLrZIWPPhhx+K/g5v3ryp95oyBqFw04YuoS28Zovb5QvvkX/99Rfmzp0rib/++ou1M0ZoDxs2THR+hw4davK14eLigiNHjrC+CgsLtUaDEtK4cWPJPiHm0q5dO7ZfEtolCxLadjAJhHyYI7Sjo6NZ26lTp5o9Fl1Cmw/P9vz5cw1HO0sK7apVq2L+/PlaI02sW7dOtigTCoUCvXr1wr179zT2AwBJSUlG9Ve+fHkcPnwYhw8fFgmZ8PBwbNiwATt37tTabu/evTh8+DCWLVsm2/UlFJ2rV6+Gs7Oz3vpyRx0xlfXr1yMqKgocpw5dyJdTp04ZbLt3715WXy6HSHsW2hkZGUhISJDMihUrWF/GCO0KFSrg7t27Ro9Xapk3b57e/esS2nzirpMnT2q0saaNdvv27VFYWMjaGWNbre+c37hxg/VZVFSE4cOHy/73Zi4ktEsWJLTtYBII+TBHaA8YMIC1nTBhgtlj0Sa0XVxc2Err6tWrNdpYUmgLo1Dw5datW+jWrRs4Tp1tsG/fvujbt6/B2MlCbt68iVu3bmH79u2i3z09PREfH49Hjx6x/T148ECUxdAcdu3axfrVZ3srF8L41Hy5fv06oqOjdZraCIX2tm3bsHnzZp3wdqz6hHadOnX0xuLWRfGHtfAlaM6cOfjggw80TCo6d+6M33//ndXLzs42W+jwWEpoV61aVZTOvm7duiL/g5iYGI2U97wTqDVttJVKpegFBgD27NljVg6AXbt2iUzBTBHavCAAgPj4eI021hLa4eHhonm7cuWKbNFC6tWrhwcPHojO/dy5c3U6A9sCEtolCxLadjAJhHyYI7SFGe5Gjx5t9li0Ce0PP/yQ/abN4dKSQjs4OJglCsnJycGkSZPg7u7Otq9evZq1bdmypeR98uXIkSNat3t5eWHkyJG4fv26KFSYubRq1YrtWxjr1xKULl0aaWlpbH+XL18WPaiPHz+Od955R6Od3JkhhatxxpTi4eJMca6UK+IIx1nPGTI8PFy0H30vZGXLloWvry9Kly5t1D6USiV8fX3h6+vL7L0NMX/+fI3za+jriBSE6clNEdrx8fHst08++USjjTWEdtOmTfHkyRNW/8mTJ6hbt67O+hUqVEB4eLhR5yk0NFS0AAAAhw4dkjWEpTmQ0C5ZkNC2g0kg5MMcoS0UH0OGDDF7LNqEtoeHB65duwZA7XRVp04dURtL22hPnToViYmJqFy5ssY2YcbBJk2aSNqfUqlkbfbt22ewrq4EK6Zy+PBhtv8WLVpY5Jry9/fH8ePH2X7OnTsHV1dXtG7dWhQSUaVSYeXKlaJwaEKhvXbtWqxatUonvBmBNYS2k5MT5s2bp5ENUlt58uQJxo8fL+vcWUto//DDD6L9WOPLhyFGjBih9Tzbg9Bu2LAhuyYyMjJEL+IcZ3mh3bx5c5HILigoMLgfPvvjvn37jBIyYWFhGuZtL168QFxcnM4vVKVLl0aZMmXMxtALGQntkgUJbTuYBEI+tAntrVu3Svrsev36ddb2/PnzRn2y1ZYVUZeNtjCzYPGVWKHQzsvLw71794xC6NBlbKQPYTzsWrVqSWpTunRp1qZ4KDBTKG6zXrlyZSxfvhzLly/XmsRD+OCX8snfwcEBbdu2lSxqOnXqJHoY3717F0FBQWy7UqnEV199xcyBAHUEkGHDhkGhUMhuo80L7Rs3bmiYQBTnq6++YvvWlQClTJkyCA8PR0REhIbpyLvvvov69etb5JO6tYS2MCwcoBbaxU0QRo0ahfT0dNk4c+aMzvH06dNH5PwotEG2B6HNceLso8V9VYR/b0uWLMGYMWMkkZiYyNrpEtqRkZEsGyWgTv1uaPU7MDBQFF+7eBQnQwQFBeHSpUuia+TUqVMaLxg8wsyU5pTly5frHRcJ7ZIFCW07mARCPpo0acJuUNu2bQPHcVqd/+QuEydO1BiLvjjawtTjwkgOQqFtbjFWaAtTqku9uVepUoW1efnypVliwdnZGU+fPsX+/fvRv39/cByHt956i/X/+++/a7RRKpW4efMmAPWKsr6/6datWzNHL0Op0xs3boytW7eKzue1a9d0voAEBgaKbMb5aB7/+c9/mADT9fDmSUxMRHp6ut4vA7zQvnz5ssHzKQxNJhTa7u7uaNGiBVq0aCF6abAm1hDawuyrfPn+++/x6NEjUbSfGTNmGP23pa/k5+drHU/37t1FwnrFihXYv38/+7+9CG03NzcW1/vFixciJ2lLhvcTZnAsKirCZ599ZvB4+cQ6gNq5XPg1SSply5Zlf+uPHz9GYGCgzroktAlTIKFtB5NAyIfwQbNx40ZwHIcHDx6wRDT6KL7SJKUNzzfffKMxFn1CW+hAtXfvXva7UGhnZWUZ7Qx1+/Zt1t5Yoc23zc3Nlez0FhYWJnqAmGO+IbS5XrBgATjOsNDmOLFdqa46HMehWrVqLH75iRMntNYpVaqUhpMaoH5pM5QlU6FQIDY2FmlpaQZXr01FDqEtFKA//vijpP16eXnJIgR5rCG0+SRKxW1xAbWPQtOmTcFx6qyMhkKv8S9zAPDHH3/orfvzzz9rjKVPnz6iTJ3//PMPnJ2dRQ7Kt2/flpwARkpiGFOFNsdxGDRoENsmXNW2dBztadOmoaCgAL179zY4vyEhIaJzaiicoT4cHBwwbtw4gyvovNB++fKl5NCGPELHYhLabxYktO1gEgj5aN26NbtB/fnnn5LbOTs7iz5B3rhxw6SkLUL0CW2O+zc7pEqlQtWqVcFxtkvB7ubmxl40Lly4ILldp06dRA9RcxzmhJ+s+RVHKULb39+f2ZUWFBToXdXauHEj66958+Za6wgTnGRmZrIoLPrsq4WsWbNGcl0eqfHLeaF99+5dREZG6mXmzJnsOEwV2k5OThg2bBiys7NNTi+uDUsLbRcXFxZZQpjwZP78+ew6v3fvHvu7M8SWLVtYH5UqVTJqLLGxsSgqKmLtT506BS8vL3Cc9khAchVzhLa7uzuzlb537x6zzxcK7Xbt2sHFxQUuLi4s6c6TJ09QpkwZ9jvPRx99xNr16NFDp42yg4MDGjduLOm8Cr8KXr161WA8eTnghfaDBw+MbhsSEsLGS0L7zYKEth1MAiEfQuFnjL2eMBsaX9q3b2/WWAwJ7ZEjR7LtfMY9WwntRo0asXbr16+Hq6urpHBaQjtgQHvsXanwockKCwuZEJEitDlOLKD1RYxp27atwf6USiV27dqF+Ph4tjJdo0YNjetDzhIXFyfpHMnhDClFaDs4OKBXr14i+9WXL1+anJWvOJYW2nx2z9zcXHTr1o3tp3Xr1qIXkNOnT8PDw8Ngf6YKbQcHB5FJ1vnz50VfRoRCe/HixVi4cKFZCMWnOUKb48RRiBo2bAiO0+0MKfQ70bYqrFAooFQqoVQqMWDAANy+fVvyS442hM6VAETJXiwJCW3CFEho28EkEPLx6aefshtUQkKC5HbTp09n7fgQeEKTDlMwJLT/7//+j22fO3cuOM52QnvIkCGsXWJiIlJTU5GcnKzhnFgcoY0kX0yx+61UqRJbaRQ6iEoV2pGRkbh48SKio6P1RsdwcHBgYjUvLw++vr6SxicU2hcuXNAbD1sqQpFlrNAuLCw0aEbw8OFD1r9Uoe3o6IiePXtqOBEC6mgrur4CGIslhbaDgwMb/5o1a9CuXTu2n9atW0OpVOLAgQPsNykr9easaL/11lsoKCjA+fPnUaFCBdE24TVgLzbaPMKsjLy9tC6h7ejoyEzPdIX5dHR0FNnDnzx5UnI4RCFKpRJnzpxh/axbt05vfX9/f9kcekloE6ZAQtsOJoGQD+EqsbaEC9pQKpXM+efatWsi0R0REWHyWAwJbaGoXr9+vcZv1hTawtB+wkQRGzdu1CtceUEjjBZgiq2k0CZ0xIgR7HepQtvR0VFy+LnvvvuO9Tly5EhJbYRCW44Y6xzHoXbt2qxPY4W23Dbabm5uGDx4sCjFPF+uXr2KmJiY1ya8X8+ePVm/77//vobQ5jgOFStWRGZmJhYtWgQnJyeDfZojtDmOw8CBA7UmarJnoS009+ATeOkL7zdx4kS2rbig8PX1FTkLA8Dff/8NHx8fo49x6NChrI9nz55pDVXK07dvXzx69AizZ8+W5doioU2YAgltO5gEQj74mKoAMGDAAElthA/mKVOmoEaNGmx19fTp05IexNowJLTd3NzY9j179oDjbCO0+WgfgDpiQvGEEboe2P7+/uw8rV+/HmfPngWgflkxVpQJH8LBwcHsd6lCWx+DBg3CqFGj2Cf7wMBAo+3RS6rQXr9+PRISErQ6DKanpyMmJgZKpVKW4xViKaFdqlQpJoYuX74MhUKhVWhznDpiDv/vDz74AAMGDNCJcAV15MiReusas0prz0Jb2Bf/xU2f0C5fvjz7Gnjq1Cl23bzzzjtsIQNQfzGMjY01Kcto+fLlRV9qvvzyS511AwICWLhTlUoli3kJCW3CFEho28EkEPIhXHl69913DdZ3cnJCeno6AHXc1urVq4PjOKxZs4b1I3VlvDiGhLYwNN7OnTvBcbYR2l27dmVtdu3aBY5TZ7AUevRre6AJV5aGDh0qWtGKioqSPGZ/f3+2r+IxiOUQ2nwWvsLCQrz33nvguH8dUQHdTpFCSqrQ1lYOHjyIrl27mu0MrA9LCW3hF63hw4eD4zidQlvI5s2b9Z4TY4q/v7/k8dqz0O7QoQPbPmfOHHCc4YQ1fKQXQG2SM2XKFNF95NSpU3qzPBpi1apVrK+DBw8avEZjY2NZ/fv37xs1N9ogoU2YAgltO5gEQj6EKycBAQEG648dO5bVX7VqFfu9Vq1aLOZtYWGh1vTahjAktIVCdenSpeA42wht4WqyMHbtf/7zH/Z7YWGhKN43x3E4ceIEAPULSqVKlRAUFMSiK5w9e1ayUBM6VBZ/qZFDaK9bt471ERYWBo7jMHjwYPbbkiVLDPYhFNqLFy/WSO5iCrzDHmC80H78+DESEhL0IowDbkhoFxQUYM2aNWjWrJnGPh0dHSXbskvFEkK7bNmyLHHQnTt3WBSK10Voy13MFdp8JBHgX3MwQ0Lbx8cH2dnZGmMpKirC9OnTzXqZ6NKlC+svNzdXUlItBwcH0Tnevn27SSvpPCS0CVMgoW0Hk0DIQ7ly5djN6dGjRwZvqA0bNmTJbAoKClC7dm3RdqHjzv3799lqt1SEQnvs2LEYO3YsevTogc6dO2PcuHGiLI58ZklrC+3GjRuz+s+fPxfFf1YoFKIoBg8ePGCRAj744AP2O78KznGcSNx9/vnnksYs/CxfPKqFHEL7yJEjrA8+koqfnx97kXr69KnBz/0lNerI3bt3ER8fr+GkV/zvKj8/H2vWrJEthbklhLbQ0TU2Npb9LkVou7u7602ZvXPnTtZH7dq19dY15kuAvQjtBQsWYMiQIYiIiEDLli0xfvx40Uo0/3VQSgr26Oho0TiuXLliVnx9jlPfF+/fv8/6lOpbwXEcqlevLvI7GTZsmMnj4IV2QUEBtm3bZhTCr2gktN8sSGjbwSQQ8iD8VM6bYujC399flHJ9+vTpGnVcXV1x8eJFVicjI0Nv1rDiCIV237598ezZM60PxJs3bzKhZ02hrVAoRJnptEVpKVOmDMumCKhXf5VKJU6dOsV+69KlC6v/zjvvsN8fPnxoMFNbs2bNWP1jx45pbJdDaPMP6KysLNHvwsQ0n3zyid4+7E1oP3v2DCtWrNCL8MFuTsKaWrVqsfrGprjWhaVMR77//nucPXtW5FchRWgbwlxnSF0IhfakSZMwYcIEs1i6dCnrzxihPXHiRJFfhrCcOnVKaxxtXUKb48ShAW/cuGFWWEgHBwfs2LGD9ZecnGy0D0hcXBxrn5ubi5CQEJPGQpkhCVMgoW0Hk0DIgzDU3Hfffaeznp+fn2gV9cKFCzrTY9erV08kkG/duiX5uiluOiJcFePLtWvXRP1ZU2gPGDCA1X358qVO7/2mTZuisLAQ69evh5ubGyZMmMDanTt3TmMFT/hQ3LNnj96H4qhRo5hj4pAhQzS2myu0vb29Wfvk5GTRtmHDhuHEiRMYMGCAwfToJdVGW4rQbtGiBas/bdo0WY7dUkK7Zs2aGmZer4vQtrWNtvBrFF/S09NFX/IMCe1mzZrhgw8+gKenJ06fPs3q3rt3D02aNDHpmIT3m6dPn6JatWpG9+Hg4IBDhw6xflJTU01y8uWFtkqlQnZ2tsnwzqW6IKFdsiChbQeTQJiPu7s7i5wBQGd2sYCAAFy4cIHVy83NNShIO3fuLPqMmpOTw0w99FFcaEdERGDSpEmYOnUqvvnmG0RGRmpkM7OW0K5Xr57oc+oPP/ygt9+WLVvCwcEBnTp1EmW50+bJX7NmTRZ9AAAWLVqk14yndu3aWLx4sdb7gFBor1692ujzIUxQUzw1tjEP2jdZaEdFRbH65nx2F2KNFOw8JLS1U1xot2/fHosXL8aSJUswefJkdOjQQSP+tC6hXatWLaxduxYqlQpjx44Fx6nvtUJTp/z8fMTFxRllIz1kyBDR/SYmJsboc+Lo6IgyZcqgXbt27KUeMM3J3RwbbWNIS0tj4ySh/fpDQtsOJoEwn88//5zdmG7fvq3VTrJVq1bIzMxk9V69eiUye9BHr169RGIbUCdK0PfQNeQMqQ2h0E5LS9MbRkwbQlMQXUK7cuXKogfg5cuXDa7ocpz670YYLzspKUlnXWH0B0Attk1ZQQoKCmJ9nDt3zqi2SqVSZB4idPQ0ltdRaAvDVgqFNn/T5+fFUD+jR49m9YXZUlu3bm3QIVMXQkc7AFi4cKHJfRkK2/YmCe02bdqw/gytmhpyhtRGcaFdrVo1LFmyhPk7AMCMGTNY/erVq4tM9ADg0KFDzClZF35+fvjll19E7e7du4f4+HhMnjwZCQkJWLRoEVasWIF169Zh27Zt2L9/P1JTU3H+/HlkZGTg/v37Ij+Y4iU/Px/169c36vxaS2jzyX8AsCy5xOsLCW07mATCPJydnUU38+Krs6VKlcK0adNEKyNFRUVGC69u3bqJhCagTu4yceJErdewuULb3KJNaFerVk2UlKSgoABNmzY1OK7o6GjmOAqohZ6+v1uFQoG1a9eKxrNnzx6jRUqpUqVE+/3hhx/g5+cHFxcXnfj6+qJNmzbYs2cPa2dMFkhtvI5Ce9y4caz/fv36sd+FYSUvXbqkV+A5OzuLPmMLP9uPGDHCpOtS7mLoa4w+od2/f39Jts9C29xZs2ZJaiNMta4LuYW20IF76tSpeuuaIrSFiaWSk5NRUFAgmoudO3eiQYMGojYVK1ZkEYr4UlRUhKSkJJ3PYG1mdpYoJ06cMGoBYPLkyUhMTJT0JchUhOnl8/LyLBpmk7AOJLTtYBII8xA+XAoKClhkDI5TR8c4d+6c6Oaal5eHHj16mLSvRo0aiUII8uXp06eYOHGiqK69Ce333nsPWVlZojqGkvq4uLjgp59+ErV58OCBpFi4bm5u2Ldvn8Z5Gjt2rMHU7kKE4flMLVOmTDHrGhMK7aKiIhQWFpqN8AuJuUK7W7dumDFjBuLj4/H1119j6tSpLNQdIM5wqlAoRNfwpUuXkJSUhFWrVolYs2YNMjIyWL2MjAzRPkuC0Bba7cpd3nrrLYPzaYrQnjhxIhISEjBhwgSMHDkSX3zxBb744gv8+uuvItOI6Ohovf2YIrSFL6/CkpaWphH+U4irqyuWLVum0e7AgQNaha5QbBpTCgoKkJWVhcuXL+PEiRPYt28fNmzYgF9//RXz58/HlClTMG3aNNF5GjVqlFn3BmNZvnw5Vq1ahcWLFyMhIQFTpkzBhAkTMH78eCxYsEDkE3To0CGrjo2wDCS07WASCPPYtGkTuzHNnz+f/T5z5kyNG/GdO3dMioktxMfHBytXrhT1++rVK7Rt21ZUz1yhfezYMURFRRmFMCa2UGj7+fmJMqoBwJgxY/SOpXnz5izFOl8ePHiA0NBQyefKw8ND5BwJALt37zZqFSkwMFDry43Usnz5crOzG9pb1JHiQlvo2Fq83Lp1S8PWtl+/fkaN79WrVxpJiIKCgtChQwebYyiesj6hvXv3bjx//twi1KlTx+B8miK0//jjD4PzlZmZKQrVqQ1jhXaDBg1EApXfz4ABAyRHAYmKimLme+np6SzcpjZSUlIAqF/wtm7disTEREybNg2jR49Gv3790KlTJzRv3hxvvfUWKleuDA8PD8l/zytWrGDHkJOTY3ToVnNYv3695L+7Pn36WG1chOUgoW0Hk0CYh7e3N65cuYKsrCyUL1+e/V6zZk1RyKrNmzfDz89Ptv127NgRly5dAgCN1WyOM19oy+0M2bRpU7x48QKFhYUYOnSowb6aN28uWl1JS0tDUFCQ0WNSKpVISEiASqXCnTt3TJoDHx8fjBkzBhs3bsSBAweQnJysly1btmD27NlaE7CYglBor1q1CtHR0WYjtH02V2gL46ELy6lTp9CoUSOd1+/WrVtx8eJFZGRkaOXs2bP4/fffZTuPtkAOG21LYYrQFpoEaSuXL1+WFOHDlBXthIQEAOqoGz///LNeoawLb29vJCQkGHwRqVKliuh+LicVKlRgzvMnT540K/ygsYwfP17v/AHqF9tp06aZlVyHsB9IaNvBJBDmExYWhk6dOmn83r17d9y6dQvdu3e3yH6dnJzQp08frSumHTt2xOPHj/H48WPJKcm9vb2xb98+7Nu3z+AncW3ExsayBAnawmB17NgRrVq1ktxfmzZtkJOTgylTphhl7qENPhmGra8VU7AXG+2BAwdi5MiR6N+/v+h3pVKJkJAQhISEoGbNmggKCrL6fdVeef/999nfobmJU+Rm0aJFSE1NRWpqqsZXB10EBgaiZ8+ejO7du+Pjjz9Ghw4d0KBBA8mry6YIbWdnZxw8eBDdunWz+bkzl9jYWEyaNEkUc90a1KlTB7GxsYiLi8OIESMwcuRIxogRIxATEyMyfyRef0ho28EkEJZFDiejNxkSbGq7ZqVSCVjMaQYAAAqCSURBVKVSKZtzkiX6JAipuLq6okWLFmjRogWqVKkiuZ2xyWII4k3njRbaGRkZOH78ODZs2IBp06YhOjpa1tBNBEEQBEEQxJtBpUqVEB0djWnTpmHDhg04fvw4cyp/o4R2zZo1MXv2bCxatAgrV65EcnIysrOz2We08+fPIz4+nj7fEARBEARBEDqpWrUq4uPjRcEDsrOzkZycjJUrV2LRokWYPXu2VX0BOM7GQlsXQUFBGDhwIDZu3IiCggKDsT4JgiAIgiCIN4+3334bSUlJKCoqQkFBATZu3IiBAweaFDTAEtil0BZSrlw5jB07FllZWVCpVFi6dKmsESsIgiAIgiCI1ws/Pz8sXboUKpUKWVlZGDt2rKQEVdbG7oU2j5ubG8aNG4ecnBw8fPgQkZGRNh8TQRAEQRAEYV0iIyPx4MED5OTk4JtvvoG7u7vNx6SL10Zo8wQGBmLv3r1QqVSYNWsWeV4TBEEQBEG8ATg4OGDmzJlQqVTYu3cvAgMDbT4mQ7x2Qpvj1GGNfvjhB6hUKqxduxYuLi42HxNBEARBEARhGZydnbFmzRqoVCr88MMPr81C62sptHn69euHwsJCbNmyxerB7gmCIAiCIAjL4+TkhC1btqCwsBD9+vWz+XiM4bUW2hynzjhYVFSEFStWUJpUgiAIgiCIEoRCocBvv/2GoqIii2WXtiSvvdDmOA5DhgwBAIwYMcLmYyEIgiAIgiDk4auvvgIADBkyxOZjMYUSIbQ5jsOyZcuQn5+Pxo0b23wsBEEQBEEQhHk0atQI+fn5WLZsmc3HYiolRmi7ubnh4sWLOHXqFJRKpc3HQxAEQRAEQZiGUqlEWloaLl26BDc3N5uPx1RsIrS9vLzg6uoKjuPg4uIChUIBd3d3eHl5mdVvmzZtAABffvmlzU8sQRAEQRAEYRqxsbEAgDZt2pjVj5eXFzw8PODg4CDSnNbSvFYT2k5OTqhQoQJ69uyJ48ePIz8/H8Ly8uVLHDhwAK1bt4ZCoTA5bMu6deuQmZn5Wr/9EARBEARBvKm4uroiMzMT69evN6m9o6MjSpUqhbZt2yIlJQW5ublaNee4cePg4eEBZ2dnix2LVYR2UFAQxo8fj+zsbEgt3bt3R0BAgNH7Cg0NhUqlQmxsrM0vFIIgCIIgCMI4hg8fDpVKhdDQUKPbVqpUCWFhYXj16pUkvZmXl4eIiAiULl3aIsdicaFdrVo1/Pnnn5IFtrAsWrQIDRs2NHqfO3bswLlz52x+oRAEQRAEQRDGcfbsWezcudPodrVq1UJSUpJJmvPkyZOoW7eu7MdiUaFdqVIlZGRkmHTAfElKSkKFChWM2m+vXr0AwCSRThAEQRAEQdiGsLAwAECvXr2MalelShUcPXrULM2ZmZkJPz8/WY/HYkLbx8cHe/bsMeuA+fLNN9+gbNmykvft5uaG3NxcfP/99za/YAiCIAiCIAhpTJw4Ebm5uUb52nl5eeG3336TRXNeunQJ7u7ush2PxYR27969ZTlgvjRp0sSo/e/atQspKSk2v2AIgiAIgiAIaSQnJ2P37t1GtWnZsqWsmlNOiwiLCG1/f38cOnRI1oPetm2bUava8fHxyM/Ph5OTk80vGoIgCIIgCEI/Tk5OyM/PR3x8vOQ2vr6+2LBhg6yaMz09Hb6+vrIck0WEdoUKFWQ9YL6UKlVK8hgiIyMBACEhITa/cAiCIAiCIAj91KpVCwDQpUsXyW2cnJwsojmrVasmyzFZRGiPHz/eIgfdu3dvyWOoU6cOAKBjx442v3AIgiAIgiAI/XTs2BEAUKdOHclt+vTpYxHNGRMTI8sxyS60nZ2dsXXrVosc9MyZM6FQKCSNw9fXFwDQt29fm184BEEQBEEQhH4+/fRTAJAc+UOhUODnn3+2iObcsmWLLIlsZBfaZcqUMTu8iq6yYcMGuLi4SBqHs7MzAODo0aNYsWIFQRAEQRAEYcfw+lGq1nNxccHmzZstojkPHTokiy6WXWh7e3vj+PHjFjnorVu3wtXVVfJY0tLSCIIgCIIgiNcIqTrP1dUV27Zts4jmTE1NtU+h7e7ujr/++ssiB/3LL7/AwcHB7DESBEEQBEEQrzcKhQILFiywiObcuXOnLPG0LeIMOW3aNIsctDFeqARBEARBEETJZtCgQRbRnF9//bUs47OI0K5Ro4ZFDtrf39/mE0oQBEEQBEHYB15eXhbRnKGhobKMzyJCu2LFirh48aKsB5yZmQlPT0+bTyhBEARBEARhH1SoUEH2JIkvX740yidQHxYR2gqFAs2aNZP1oJs1a2bzySQIgiAIgiDsi8DAQFk1Z8+ePSWHkzaERYQ2x3Hw8fHBjRs3ZDngBQsWoEqVKjafSIIgCIIgCMK+8Pb2xokTJ2TRnBs3bkT16tVlHZtFhDbHcQgICEBWVpbZBy01niJBEARBEATx5lG9enWkp6ebrTkrVqwo67iY0H7+/LnsQpvjODRu3Bhnzpwx+YApnB9BEARBEARhiDp16piVy8USY/Ly8kJOTg64oqIilCpVyiI7cXBwwMSJE4062NmzZ8PR0dHmk0YQBEEQBEG8HlSvXh3z5s0zSnPOmTMHSqXSIuNxdHSESqUCBwC+vr4WO3A/Pz+UK1cOu3bt0mlO8vz5cyxfvhzu7u4oXbq0zSeLIAiCIAiCeL3w9PREUFAQJk2apFdz/vrrr/Dw8EDZsmUtNhZ/f38AUAvtjz76yOIH7+zsjLJly8LHxwf169dHeHg4goOD4ejoCA8PDzg5Odl8ggiCIAiCIIjXG2dnZ/j4+MDb2xv16tVDeHg46tevD0dHR3h6elrFBzAyMvJfof3rr7+S0CUIgiAIgiAIM3FyckJSUtK/QjszMxNlypSx+cAIgiAIgiAI4nWmUqVKOHXq1L9CG4BsmXAIgiAIgiAI4k3Fw8OD2YQzob1ixQqLeV4SBEEQBEEQREnHyckJP/30k6bQzszMlD1YN0EQBEEQBEG8KSiVShQVFWkKbQAYNmwYrWoTBEEQBEEQhJE4Ojpi9uzZonCCIqENWCY7DkEQBEEQBEGUZLy9vTXidmsI7ZSUFEoaQxAEQRAEQRAScXd3x5kzZwwLbQD48ssvyYSEIAiCIAiCIAygVCoxcuRIbZJau9AGgNDQUJsPnCAIgiAIgiDsmbp16+qS07qFNgBKYkMQBEEQBEEQOlAoFPqktH6h/fLlSzg7O0OhUNj8QAiCIAiCIAjCHlAoFKhevbpekW1QaPMlLCwMXl5eNj8ogiAIgiAIgrAlnp6e6NWrlxQJLU1oA8A777yD6tWr2/zgCIIgCIIgCMIWlClTBtHR0VLls3ShDQBbt26FUqlE+fLlbX6gBEEQBEEQBGENypUrhypVqiAzM9MY6Wyc0ObLkiVLEBISgqpVq9r8wAmCIAiCIAjCEnh5eSE0NBTLli0zRTKbJrT5snv3bnTv3h2+vr6oWLEivL294ejoaPOTQhAEQRAEQRDG4OjoCF9fX/j5+cHPzw99+vRBVlaWOVLZPKEtLNevX8fhw4exYMECDBs2DH369EH37t0JgiAIgiAIwm759NNPERcXh99++w07d+7E/fv35ZLH+H/vYxXfgO9iuQAAAABJRU5ErkJggg==',
"size": {
"width": 730,
"height": 440
},
"params": {},
"type": "2"})
r = requests.post(url, data, headers={'Content-Type': 'application/json'}) # 'Content-Type':'application/json' 'charset=utf8'
print(r.text)
def test_3(img_path, img_name, img_type='png/jpg'):
"""
:param img_path:图片的路径
:param img_name:图片的名称
:param img_type:图片的类型,这里写的是image/jpeg,也可以是png/jpg 仅限于png、PNG、jpg、JPG、bmp
"""
#url = 'http://192.168.0.99:80/bolian_upload'
#url = 'http://192.168.1.43:8888/bolian_upload'
url = 'http://192.168.0.99:8880/bolian_upload'
imgName = img_path + img_name
files = {'file': (img_name, open(imgName, 'rb'), "image/jpeg")}
response = requests.post(url, files=files)
print(response.text)
if __name__ == '__main__':
#test_1()
#test_2()
test_3("/home/hw/桌面/web_ocr/", "g-2-1.png")
|
import requests
import re
from bs4 import BeautifulSoup
def get_info():
'''获取one的照片和一句话'''
one_url = 'http://www.wufazhuce.com'
r = requests.get(one_url)
html = r.text
soap = BeautifulSoup(html, "html.parser")
# 下载图片
pic_url = soap.find(class_='fp-one-imagen').get('src')
pic = requests.get(pic_url)
if pic.status_code == 200:
with open('one.png', 'wb') as f:
f.write(pic.content)
# 图片脚标
# footer = re.sub(r'\s', '', soap.find(class_='fp-one-imagen-footer').text)
# print(footer)
# 每日一句
one_word = re.sub(r'\s', '', soap.find(class_='fp-one-cita').text)
print(one_word)
return one_word
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Components.DQMMessageLogger_cfi import *
from DQMServices.Components.DQMProvInfo_cfi import *
from DQMServices.Components.DQMFastTimerService_cff import *
from DQMOffline.L1Trigger.L1TriggerDqmOffline_cff import *
from DQMOffline.Ecal.ecal_dqm_source_offline_HI_cff import *
from DQM.EcalPreshowerMonitorModule.es_dqm_source_offline_cff import *
from DQM.HcalTasks.OfflineSourceSequence_hi import *
from DQM.SiStripMonitorClient.SiStripSourceConfigTier0_HeavyIons_cff import *
from DQM.SiPixelCommon.SiPixelOfflineDQM_source_cff import *
from DQM.DTMonitorModule.dtDQMOfflineSources_HI_cff import *
from DQM.RPCMonitorClient.RPCTier0Source_cff import *
from DQM.CSCMonitorModule.csc_dqm_sourceclient_offline_cff import *
from DQM.BeamMonitor.AlcaBeamMonitorHeavyIons_cff import *
DQMNone = cms.Sequence()
dqmProvInfo.runType = "hi_run"
DQMOfflineHeavyIonsDCS = cms.Sequence( dqmProvInfo )
# L1 trigger sequences
DQMOfflineHeavyIonsL1T = cms.Sequence( l1TriggerDqmOffline ) # L1 emulator is run within this sequence for real data
#DPGs
DQMOfflineHeavyIonsEcal = cms.Sequence( ecal_dqm_source_offline *
es_dqm_source_offline )
DQMOfflineHeavyIonsHcal = cms.Sequence( hcalOfflineSourceSequence )
DQMOfflineHeavyIonsTrackerStrip = cms.Sequence( SiStripDQMTier0_hi )
DQMOfflineHeavyIonsTrackerPixel = cms.Sequence( siPixelOfflineDQM_heavyions_source )
DQMOfflineHeavyIonsMuonDPG = cms.Sequence( dtSources *
rpcTier0Source *
cscSources )
DQMOfflineHeavyIonsPreDPG = cms.Sequence( DQMOfflineHeavyIonsDCS *
DQMOfflineHeavyIonsL1T *
DQMOfflineHeavyIonsEcal *
DQMOfflineHeavyIonsHcal *
DQMOfflineHeavyIonsTrackerStrip *
DQMOfflineHeavyIonsTrackerPixel *
DQMOfflineHeavyIonsMuonDPG )
DQMOfflineHeavyIonsDPG = cms.Sequence( DQMOfflineHeavyIonsPreDPG *
DQMMessageLogger )
#Modifications
from DQMOffline.Muon.muonMonitors_cff import *
diMuonHistos.etaBin = cms.int32(70) #dimuonhistograms mass, bin
diMuonHistos.etaBBin = cms.int32(70)
diMuonHistos.etaEBin = cms.int32(70)
diMuonHistos.LowMassMin = cms.double(2.0)
diMuonHistos.LowMassMax = cms.double(14.0)
diMuonHistos.HighMassMin = cms.double(55.0)
diMuonHistos.HighMassMax = cms.double(125.0)
from DQMOffline.Trigger.DQMOffline_Trigger_cff import *
triggerOfflineDQMSource.remove(jetMETHLTOfflineAnalyzer)
triggerOfflineDQMSource.remove(exoticaMonitorHLT)
triggerOfflineDQMSource.remove(susyMonitorHLT)
triggerOfflineDQMSource.remove(b2gMonitorHLT)
triggerOfflineDQMSource.remove(bphMonitorHLT)
triggerOfflineDQMSource.remove(higgsMonitorHLT)
triggerOfflineDQMSource.remove(smpMonitorHLT)
triggerOfflineDQMSource.remove(topMonitorHLT)
triggerOfflineDQMSource.remove(btagMonitorHLT)
triggerOfflineDQMSource.remove(egammaMonitorHLT)
triggerOfflineDQMSource.remove(ak4PFL1FastL2L3CorrectorChain)
globalAnalyzerTnP.inputTags.offlinePVs = cms.InputTag("hiSelectedVertex")
trackerAnalyzerTnP.inputTags.offlinePVs = cms.InputTag("hiSelectedVertex")
tightAnalyzerTnP.inputTags.offlinePVs = cms.InputTag("hiSelectedVertex")
looseAnalyzerTnP.inputTags.offlinePVs = cms.InputTag("hiSelectedVertex")
from DQMOffline.EGamma.egammaDQMOffline_cff import *
#egammaDQMOffline.remove(electronAnalyzerSequence)
egammaDQMOffline.remove(zmumugammaAnalysis)
egammaDQMOffline.remove(zmumugammaOldAnalysis)
#egammaDQMOffline.remove(photonAnalysis)
photonAnalysis.phoProducer = cms.InputTag("gedPhotonsTmp")
photonAnalysis.isHeavyIon = True
photonAnalysis.barrelRecHitProducer = cms.InputTag("ecalRecHit", "EcalRecHitsEB")
photonAnalysis.endcapRecHitProducer = cms.InputTag("ecalRecHit", "EcalRecHitsEE")
dqmElectronGeneralAnalysis.ElectronCollection = cms.InputTag("gedGsfElectronsTmp")
dqmElectronGeneralAnalysis.TrackCollection = cms.InputTag("hiGeneralTracks")
dqmElectronGeneralAnalysis.VertexCollection = cms.InputTag("hiSelectedVertex")
dqmElectronAnalysisAllElectrons.ElectronCollection = cms.InputTag("gedGsfElectronsTmp")
dqmElectronAnalysisSelectionEt.ElectronCollection = cms.InputTag("gedGsfElectronsTmp")
dqmElectronAnalysisSelectionEtIso.ElectronCollection = cms.InputTag("gedGsfElectronsTmp")
dqmElectronTagProbeAnalysis.ElectronCollection = cms.InputTag("gedGsfElectronsTmp")
stdPhotonAnalysis.isHeavyIon = True
stdPhotonAnalysis.barrelRecHitProducer = cms.InputTag("ecalRecHit", "EcalRecHitsEB")
stdPhotonAnalysis.endcapRecHitProducer = cms.InputTag("ecalRecHit", "EcalRecHitsEE")
#disabled, until an appropriate configuration is set
hltTauOfflineMonitor_PFTaus.Matching.doMatching = False
from DQMOffline.Trigger.FSQHLTOfflineSource_cfi import getFSQHI
fsqHLTOfflineSource.todo = getFSQHI()
from DQMOffline.RecoB.PrimaryVertexMonitor_cff import *
pvMonitor.vertexLabel = cms.InputTag("hiSelectedVertex")
from DQM.TrackingMonitorSource.TrackingSourceConfig_Tier0_HeavyIons_cff import *
from DQMOffline.JetMET.jetMETDQMOfflineSourceHI_cff import *
from DQM.BeamMonitor.AlcaBeamMonitorHeavyIons_cff import *
from DQM.Physics.DQMPhysics_cff import *
DQMOfflineHeavyIonsMUO = cms.Sequence(muonMonitors)
DQMOfflineHeavyIonsTracking = cms.Sequence( TrackMonDQMTier0_hi )
DQMOfflineHeavyIonsJetMET = cms.Sequence( jetMETDQMOfflineSource )
DQMOfflineHeavyIonsEGamma = cms.Sequence( egammaDQMOffline )
DQMOfflineHeavyIonsTrigger = cms.Sequence( triggerOfflineDQMSource )
DQMOfflineHeavyIonsVertex = cms.Sequence( pvMonitor )
DQMOfflineHeavyIonsBeam = cms.Sequence( alcaBeamMonitor )
DQMOfflineHeavyIonsPhysics = cms.Sequence( dqmPhysicsHI )
DQMOfflineHeavyIonsPrePOG = cms.Sequence( DQMOfflineHeavyIonsMUO *
DQMOfflineHeavyIonsTracking *
DQMOfflineHeavyIonsJetMET *
DQMOfflineHeavyIonsEGamma *
DQMOfflineHeavyIonsTrigger *
DQMOfflineHeavyIonsVertex *
DQMOfflineHeavyIonsBeam *
DQMOfflineHeavyIonsPhysics )
DQMOfflineHeavyIonsPOG = cms.Sequence( DQMOfflineHeavyIonsPrePOG *
DQMMessageLogger )
DQMOfflineHeavyIons = cms.Sequence( DQMOfflineHeavyIonsPreDPG *
DQMOfflineHeavyIonsPrePOG *
DQMMessageLogger )
DQMOfflineHeavyIonsFakeHLT = cms.Sequence( DQMOfflineHeavyIons )
DQMOfflineHeavyIonsFakeHLT.remove( triggerOfflineDQMSource )
#this is needed to have a light sequence for T0 processing
liteDQMOfflineHeavyIons = cms.Sequence ( DQMOfflineHeavyIons )
liteDQMOfflineHeavyIons.remove( SiStripMonitorCluster )
liteDQMOfflineHeavyIons.remove( jetMETDQMOfflineSource )
PostDQMOfflineHI = cms.Sequence()
|
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from github.release_notes.model import (
REF_TYPE_PULL_REQUEST,
REF_TYPE_COMMIT,
)
from github.release_notes.renderer import (
MarkdownRenderer,
get_or_call,
CATEGORY_ACTION_ID,
CATEGORY_NOTEWORTHY_ID,
CATEGORY_IMPROVEMENT_ID,
TARGET_GROUP_USER_ID,
TARGET_GROUP_OPERATOR_ID,
TARGET_GROUP_DEVELOPER_ID,
)
from test.github.release_notes.default_util import (
release_note_block_with_defaults,
CURRENT_REPO_NAME,
)
class RendererTest(unittest.TestCase):
def test_render_multiline_rls_note_should_have_2nd_level_bullet_points(self):
multiline_text = \
'first line with header\n'\
'second line\n'\
'third line\n'
release_note_objs = [
release_note_block_with_defaults(
text=multiline_text,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* first line with header (#42, @foo)\n'\
' * second line\n'\
' * third line'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_from_other_github_should_auto_link(self):
release_note_objs = [
release_note_block_with_defaults(
source_repo='madeup.enterprise.github.corp/o/s',
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'\n'.join((
'# [s]',
'## Improvements',
'* *[USER]* default release note text '
'([o/s#42](https://madeup.enterprise.github.corp/o/s/pull/42), '
'[@foo](https://madeup.enterprise.github.corp/foo))'
))
self.assertEqual(expected_md_str, actual_md_str)
def test_render_reference_pr(self):
release_note_objs = [
release_note_block_with_defaults(
reference_type=REF_TYPE_PULL_REQUEST,
reference_id='42',
source_repo=CURRENT_REPO_NAME,
),
release_note_block_with_defaults(
reference_type=REF_TYPE_PULL_REQUEST,
reference_id='1',
text='other component, same github instance rls note',
source_repo='github.com/madeup/a-foo-bar',
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* default release note text (#42, @foo)\n'\
'# [a-foo-bar]\n'\
'## Improvements\n'\
'* *[USER]* other component, same github instance rls note (madeup/a-foo-bar#1, @foo)'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_reference_commit(self):
release_note_objs = [
release_note_block_with_defaults(
text='rls note 1',
reference_type=REF_TYPE_COMMIT,
reference_id='commit-id-1',
source_repo=CURRENT_REPO_NAME,
),
# As the source repository is on the same github instance as the current repository
# it can be auto linked by github, hence we do not need to build a link to the commit
# with the cut off commit id as link text
release_note_block_with_defaults(
text='other component rls note',
reference_type=REF_TYPE_COMMIT,
reference_id='very-long-commit-id-that-will-not-be-shortened',
user_login='bar',
source_repo='github.com/madeup/a-foo-bar',
),
# the source repository is on a different github instance as the current repository.
# It can not be auto linked by github, hence we need to build a link to the commit
# with the cut off commit id as link text
release_note_block_with_defaults(
text='release note from different github instance',
reference_type=REF_TYPE_COMMIT,
reference_id='very-long-commit-id-that-will-be-shortened',
user_login='bar',
source_repo='madeup.enterprise.github.corp/o/s',
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = ''\
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* rls note 1 (commit-id-1, @foo)\n'\
'# [a-foo-bar]\n'\
'## Improvements\n'\
'* *[USER]* other component rls note ' \
'(madeup/a-foo-bar@very-long-commit-id-that-will-not-be-shortened, @bar)\n'\
'# [s]\n'\
'## Improvements\n'\
'* *[USER]* release note from different github instance ' \
'([o/s@very-long-co](https://madeup.enterprise.github.corp/o/s/commit/'\
'very-long-commit-id-that-will-be-shortened), '\
'[@bar](https://madeup.enterprise.github.corp/bar))'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_user(self):
release_note_objs = [
release_note_block_with_defaults(
reference_type=None,
reference_id=None,
user_login='bar',
source_repo='github.com/madeup/a-foo-bar',
),
release_note_block_with_defaults(
reference_type=None,
reference_id=None,
user_login='foo',
source_repo=CURRENT_REPO_NAME,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* default release note text (@foo)\n'\
'# [a-foo-bar]\n'\
'## Improvements\n'\
'* *[USER]* default release note text (@bar)'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_no_reference_no_user(self):
release_note_objs = [
release_note_block_with_defaults(
reference_type=None,
reference_id=None,
user_login=None,
source_repo='github.com/madeup/a-foo-bar',
),
release_note_block_with_defaults(
reference_type=None,
reference_id=None,
user_login=None,
source_repo=CURRENT_REPO_NAME,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* default release note text\n'\
'# [a-foo-bar]\n'\
'## Improvements\n'\
'* *[USER]* default release note text'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_drop_duplicates(self):
release_note_objs = [
release_note_block_with_defaults(
text='duplicate',
reference_type=None,
reference_id=None,
user_login=None,
source_repo=CURRENT_REPO_NAME,
),
release_note_block_with_defaults(
text='duplicate',
reference_type=None,
reference_id=None,
user_login=None,
source_repo=CURRENT_REPO_NAME,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* duplicate'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_no_release_notes(self):
release_note_objs = []
expected_md_str = 'no release notes available'
self.assertEqual(
expected_md_str,
MarkdownRenderer(release_note_objs=release_note_objs).render()
)
def test_render_skip_empty_lines(self):
release_note_objs = [
release_note_block_with_defaults(
text='first line1\n\n second line1', #empty line
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
text='first line2\n \nsecond line2', #empty line with space
reference_type=None,
reference_id=None,
user_login=None,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* first line1\n'\
' * second line1\n'\
'* *[USER]* first line2\n'\
' * second line2'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_remove_bullet_points(self):
release_note_objs = [
release_note_block_with_defaults(
text='first line1\n* second line1', #contains bullet point (*)
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
text='first line2\n * second line2', # contains bullet point with extra spaces
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
text='- first line3\n - second line3', # contains bullet point (-)
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
text='first line4\n*italic*', # no bullet point, just italic
reference_type=None,
reference_id=None,
user_login=None,
)
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* first line1\n'\
' * second line1\n'\
'* *[USER]* first line2\n'\
' * second line2\n'\
'* *[USER]* first line3\n'\
' * second line3\n'\
'* *[USER]* first line4\n'\
' * *italic*'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_categories(self):
release_note_objs = [
release_note_block_with_defaults(
category_id=CATEGORY_IMPROVEMENT_ID,
text='improvement release note',
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
category_id=CATEGORY_NOTEWORTHY_ID,
text='noteworthy release note',
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
category_id=CATEGORY_ACTION_ID,
text='action required release note',
reference_type=None,
reference_id=None,
user_login=None,
),
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Action Required\n'\
'* *[USER]* action required release note\n'\
'## Most notable changes\n'\
'* *[USER]* noteworthy release note\n'\
'## Improvements\n'\
'* *[USER]* improvement release note'
self.assertEqual(expected_md_str, actual_md_str)
def test_render_target_group(self):
release_note_objs = [
release_note_block_with_defaults(
target_group_id=TARGET_GROUP_USER_ID,
text='user release note',
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
target_group_id=TARGET_GROUP_OPERATOR_ID,
text='operator release note',
reference_type=None,
reference_id=None,
user_login=None,
),
release_note_block_with_defaults(
target_group_id=TARGET_GROUP_DEVELOPER_ID,
text='developer release note',
reference_type=None,
reference_id=None,
user_login=None,
),
]
actual_md_str = MarkdownRenderer(release_note_objs=release_note_objs).render()
expected_md_str = \
'# [current-repo]\n'\
'## Improvements\n'\
'* *[USER]* user release note\n'\
'* *[OPERATOR]* operator release note\n'\
'* *[DEVELOPER]* developer release note'
self.assertEqual(expected_md_str, actual_md_str)
def test_get_or_call(self):
def call_me():
return 'value'
self.assertEqual('value', get_or_call({'key': 'value'}, 'key'))
self.assertEqual('value', get_or_call({'key': lambda: 'value'}, 'key'))
self.assertEqual('value', get_or_call({'key': {'subkey': call_me}}, 'key.subkey'))
self.assertEqual(None, None)
|
import numpy as np
import tensorflow as tf
import SimpleITK as sitk
from utils import *
"""
@author: roger
This code runs the test manually and iis just to verify
"""
train_phase=tf.placeholder(tf.bool, name='phase_train')#This is for testing!
def generator(inputMR,batch_size_tf,wd):
######## FCN for the 32x32x32 to 24x24x24 ####################################
conv1_a = conv_op_3d_bn(inputMR, name="conv1_a", kh=9, kw=9, kz=9, n_out=32, dh=1, dw=1, dz=1, wd=wd, padding='VALID',train_phase=train_phase)#30
conv2_a = conv_op_3d_bn(conv1_a, name="conv2_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)
conv3_a = conv_op_3d_bn(conv2_a, name="conv3_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)#28
conv4_a = conv_op_3d_bn(conv3_a, name="conv4_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)#28
conv5_a = conv_op_3d_bn(conv4_a, name="conv5_a", kh=9, kw=9, kz=9, n_out=64, dh=1, dw=1, dz=1, wd=wd, padding='VALID',train_phase=train_phase)
conv6_a = conv_op_3d_bn(conv5_a, name="conv6_a", kh=3, kw=3, kz=3, n_out=64, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)#26
conv7_a = conv_op_3d_bn(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=64, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)
conv8_a = conv_op_3d_bn(conv7_a, name="conv8_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)
#conv7_a = conv_op_3d_bn(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=wd, padding='SAME',train_phase=train_phase)#24
conv9_a = conv_op_3d_norelu(conv8_a, name="conv9_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=wd, padding='SAME')#24 I modified it here,dong
#MR_16_downsampled=conv7_a#JUST FOR TEST
return conv9_a
def evaluate(sess,patch_MR):
""" patch_MR is a np array of shape [H,W,nchans]
"""
patch_MR=np.expand_dims(patch_MR,axis=0)#[1,H,W,nchans]
patch_MR=np.expand_dims(patch_MR,axis=4)#[1,H,W,nchans]
#patch_MR=patch_MR.astype(np.float32)
patch_CT_pred= sess.run(G, feed_dict={inputMR: patch_MR, train_phase: False})
patch_CT_pred=np.squeeze(patch_CT_pred)#[Z,H,W]
#imsave('mr32.png',np.squeeze(MR16_eval[0,:,:,2]))
#imsave('ctpred.png',np.squeeze(patch_CT_pred[0,:,:,0]))
#print 'mean of layer ',np.mean(MR16_eval)
#print 'min ct estimated ',np.min(patch_CT_pred)
#print 'max ct estimated ',np.max(patch_CT_pred)
#print 'mean of ctpatch estimated ',np.mean(patch_CT_pred)
return patch_CT_pred
def test_1_subject(sess,MR_image,CT_GT,MR_patch_sz,CT_patch_sz,step):
"""
receives an MR image and returns an estimated CT image of the same size
"""
matFA=MR_image
matSeg=CT_GT
dFA=MR_patch_sz
dSeg=CT_patch_sz
eps=1e-5
[row,col,leng]=matFA.shape
margin1=int((dFA[0]-dSeg[0])/2)
margin2=int((dFA[1]-dSeg[1])/2)
margin3=int((dFA[2]-dSeg[2])/2)
cubicCnt=0
marginD=[margin1,margin2,margin3]
print 'matFA shape is ',matFA.shape
matFAOut=np.zeros([row+2*marginD[0],col+2*marginD[1],leng+2*marginD[2]])
print 'matFAOut shape is ',matFAOut.shape
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA
matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[0:marginD[0],:,:] #we'd better flip it along the first dimension
matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[row-marginD[0]:matFA.shape[0],:,:] #we'd better flip it along the 1st dimension
matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,0:marginD[1],:] #we'd better flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,col-marginD[1]:matFA.shape[1],:] #we'd better to flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,0:marginD[2]] #we'd better flip it along the 3rd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,leng-marginD[2]:matFA.shape[2]]
matOut=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))
used=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))+eps
#fid=open('trainxxx_list.txt','a');
print 'last i ',row-dSeg[0]
for i in range(0,row-dSeg[0]+1,step[0]):
print 'i ',i
for j in range(0,col-dSeg[1]+1,step[1]):
for k in range(0,leng-dSeg[2]+1,step[2]):
volSeg=matSeg[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]
#print 'volSeg shape is ',volSeg.shape
volFA=matFAOut[i:i+dSeg[0]+2*marginD[0],j:j+dSeg[1]+2*marginD[1],k:k+dSeg[2]+2*marginD[2]]
#print 'volFA shape is ',volFA.shape
#mynet.blobs['dataMR'].data[0,0,...]=volFA
#mynet.forward()
#temppremat = mynet.blobs['softmax'].data[0].argmax(axis=0) #Note you have add softmax layer in deploy prototxt
temppremat=evaluate(sess, volFA)
#print 'patchout shape ',temppremat.shape
#temppremat=volSeg
matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+temppremat;
used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+1;
matOut=matOut/used
return matOut
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
return True
else:
return False
path_test='/home/dongnie/warehouse/prostate/ganData64to24Test'
model = "./checkpoint"
checkpoint_file = tf.train.latest_checkpoint(model)
wd=0.0005
inputMR=tf.placeholder(tf.float32, shape=[None, 32, 32, 32, 1])
batch_size_tf = tf.shape(inputMR)[0] #variable batchsize so we can test here
G=generator(inputMR,batch_size_tf,wd)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
ckpt = tf.train.latest_checkpoint(model)
print(ckpt)
saver.restore(sess, ckpt) # restore all variables
mr_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_MRI.nii'))
ct_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_CT.nii'))
mrnp=sitk.GetArrayFromImage(mr_test_itk)
#mu=np.mean(mrnp)
#mrnp=(mrnp-mu)/(np.max(mrnp)-np.min(mrnp))
ctnp=sitk.GetArrayFromImage(ct_test_itk)
print mrnp.dtype
print ctnp.dtype
ct_estimated=test_1_subject(sess,mrnp,ctnp,[32,32,32],[16,16,16],[2,5,5])
psnrval=psnr(ct_estimated,ctnp)
print ct_estimated.dtype
print ctnp.dtype
print 'psnr= ',psnrval
volout=sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,'ct_estimated_test_script.nii.gz') |
import cv2
import numpy as np
import joblib
file1=open('final.txt','r')
data=[];label=[]
counter=0
while(1):
line=file1.readline()
#line.strip('\n')
if len(line)!=0:
line=line.split(';')
path_str,label_str=line[0],line[1].strip('\n')
img=cv2.imread(path_str,0)
pict=cv2.resize(img,(64,64))
data.append(pict)
label.append(label_str)
counter+=1
print('第%d行完成'%counter)
else:
break
file1.close()
# 形成64*64的图像列表
joblib.dump(data,'img_data')
joblib.dump(label,'img_label')
|
# coding: utf-8
"""
most code in this file copy from:https://github.com/swapnil96/Convex-hull
getHull(pl)
just use this api
pl:[c4d.Vector()...]
return:((0,1,2),(3,6,7)...)
"""
import c4d
import math
def set_correct_normal(possible_internal_points, plane): # Make the orientation of Normal correct
for point in possible_internal_points:
dist = dotProduct(plane.normal, point - plane.pointA)
if(dist != 0):
if(dist > 10**-10):
plane.normal.x = -1 * plane.normal.x
plane.normal.y = -1 * plane.normal.y
plane.normal.z = -1 * plane.normal.z
return
def printV(vec): # Print points
print vec.x, vec.y, vec.z
def cross(pointA, pointB): # Cross product
x = (pointA.y * pointB.z) - (pointA.z * pointB.y)
y = (pointA.z * pointB.x) - (pointA.x * pointB.z)
z = (pointA.x * pointB.y) - (pointA.y * pointB.x)
return Point(x, y, z)
def dotProduct(pointA, pointB): # Dot product
return (pointA.x * pointB.x + pointA.y * pointB.y + pointA.z * pointB.z)
def checker_plane(a, b): # Check if two planes are equal or not
if ((a.pointA.x == b.pointA.x) and (a.pointA.y == b.pointA.y) and (a.pointA.z == b.pointA.z)):
if ((a.pointB.x == b.pointB.x) and (a.pointB.y == b.pointB.y) and (a.pointB.z == b.pointB.z)):
if ((a.pointC.x == b.pointC.x) and (a.pointC.y == b.pointC.y) and (a.pointC.z == b.pointC.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
if ((a.pointA.x == b.pointB.x) and (a.pointA.y == b.pointB.y) and (a.pointA.z == b.pointB.z)):
if ((a.pointB.x == b.pointA.x) and (a.pointB.y == b.pointA.y) and (a.pointB.z == b.pointA.z)):
if ((a.pointC.x == b.pointC.x) and (a.pointC.y == b.pointC.y) and (a.pointC.z == b.pointC.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointA.x) and (a.pointC.y == b.pointA.y) and (a.pointC.z == b.pointA.z)):
return True
if ((a.pointA.x == b.pointC.x) and (a.pointA.y == b.pointC.y) and (a.pointA.z == b.pointC.z)):
if ((a.pointB.x == b.pointA.x) and (a.pointB.y == b.pointA.y) and (a.pointB.z == b.pointA.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
return False
def checker_edge(a, b): # Check if 2 edges have same 2 vertices
if ((a.pointA == b.pointA)and(a.pointB == b.pointB)) or ((a.pointB == b.pointA)and(a.pointA == b.pointB)):
return True
return False
class Edge: # Make a object of type Edge which have two points denoting the vertices of the edges
def __init__(self, pointA, pointB):
self.pointA = pointA
self.pointB = pointB
def __str__(self):
string = "Edge"
string += "\n\tA: " + str(self.pointA.x) + "," + str(self.pointA.y) + "," + str(self.pointA.z)
string += "\n\tB: " + str(self.pointB.x) + "," + str(self.pointB.y) + "," + str(self.pointB.z)
return string
def __hash__(self):
return hash((self.pointA, self.pointB))
def __eq__(self, other):
# print "comparing Edges"
return checker_edge(self, other)
class Point: # Point class denoting the points in the space
def __init__(self, x=None, y=None, z=None):
self.x = x
self.y = y
self.z = z
def __sub__(self, pointX):
return Point(self.x - pointX.x, self.y - pointX.y, self.z - pointX.z)
def __add__(self, pointX):
return Point(self.x + pointX.x, self.y + pointX.y, self.z + pointX.z)
def length(self):
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(self.z)
def __hash__(self):
return hash((self.x, self.y, self.z))
def __eq__(self, other):
# print "Checking equality of Point"
return (self.x == other.x) and(self.y == other.y) and(self.z == other.z)
class Plane: # Plane class having 3 points for a triangle
def __init__(self, pointA, pointB, pointC):
self.pointA = pointA
self.pointB = pointB
self.pointC = pointC
self.normal = None
self.distance = None
self.calcNorm()
self.to_do = set()
self.edge1 = Edge(pointA, pointB)
self.edge2 = Edge(pointB, pointC)
self.edge3 = Edge(pointC, pointA)
def calcNorm(self):
point1 = self.pointA - self.pointB
point2 = self.pointB - self.pointC
normVector = cross(point1, point2)
length = normVector.length()
normVector.x = normVector.x / length
normVector.y = normVector.y / length
normVector.z = normVector.z / length
self.normal = normVector
self.distance = dotProduct(self.normal, self.pointA)
def dist(self, pointX):
return (dotProduct(self.normal, pointX - self.pointA))
def get_edges(self):
return [self.edge1, self.edge2, self.edge3]
def calculate_to_do(self, temp=None):
if (temp != None):
for p in temp:
dist = self.dist(p)
if dist > 10**(-10):
self.to_do.add(p)
else:
for p in points:
dist = self.dist(p)
if dist > 10**(-10):
self.to_do.add(p)
def __eq__(self, other):
# print 'Checking Plane Equality'
return checker_plane(self, other)
def __str__(self):
string = "Plane : "
string += "\n\tX: " + str(self.pointA.x) + "," + str(self.pointA.y) + "," + str(self.pointA.z)
string += "\n\tY: " + str(self.pointB.x) + "," + str(self.pointB.y) + "," + str(self.pointB.z)
string += "\n\tZ: " + str(self.pointC.x) + "," + str(self.pointC.y) + "," + str(self.pointC.z)
string += "\n\tNormal: " + str(self.normal.x) + "," + str(self.normal.y) + "," + str(self.normal.z)
return string
def __hash__(self):
return hash((self.pointA, self.pointB, self.pointC))
def calc_horizon(visited_planes, plane, eye_point, edge_list): # Calculating the horizon for an eye to make new faces
if (plane.dist(eye_point) > 10**-10):
visited_planes.append(plane)
edges = plane.get_edges()
for edge in edges:
neighbour = adjacent_plane(plane, edge)
if (neighbour not in visited_planes):
result = calc_horizon(visited_planes, neighbour, eye_point, edge_list)
if(result == 0):
edge_list.add(edge)
return 1
else:
return 0
def adjacent_plane(main_plane, edge): # Finding adjacent planes to an edge
for plane in list_of_planes:
edges = plane.get_edges()
if (plane != main_plane) and (edge in edges):
return plane
def distLine(pointA, pointB, pointX): # Calculate the distance of a point from a line
vec1 = pointX - pointA
vec2 = pointX - pointB
vec3 = pointB - pointA
vec4 = cross(vec1, vec2)
if vec2.length() == 0:
return None
else:
return vec4.length() / vec2.length()
def max_dist_line_point(pointA, pointB): # Calculate the maximum distant point from a line for initial simplex
maxDist = 0
for point in points:
if (pointA != point) and (pointB != point):
dist = abs(distLine(pointA, pointB, point))
if dist > maxDist:
maxDistPoint = point
maxDist = dist
return maxDistPoint
def max_dist_plane_point(plane): # Calculate the maximum distance from the plane
maxDist = 0
for point in points:
dist = abs(plane.dist(point))
if (dist > maxDist):
maxDist = dist
maxDistPoint = point
return maxDistPoint
def find_eye_point(plane, to_do_list): # Calculate the maximum distance from the plane
maxDist = 0
for point in to_do_list:
dist = plane.dist(point)
if (dist > maxDist):
maxDist = dist
maxDistPoint = point
return maxDistPoint
def initial_dis(p, q): # Gives the Euclidean distance
return math.sqrt((p.x - q.x)**2 + (p.y - q.y)**2 + (p.z - q.z)**2)
def initial_max(now): # From the extreme points calculate the 2 most distant points
maxi = -1
found = [[], []]
for i in xrange(6):
for j in xrange(i + 1, 6):
dist = initial_dis(now[i], now[j])
if dist > maxi:
found = [now[i], now[j]]
return found
def initial(): # To calculate the extreme points to make the initial simplex
x_min_temp = 10**9
x_max_temp = -10**9
y_min_temp = 10**9
y_max_temp = -10**9
z_min_temp = 10**9
z_max_temp = -10**9
for i in xrange(len(points)):
if points[i].x > x_max_temp:
x_max_temp = points[i].x
x_max = points[i]
if points[i].x < x_min_temp:
x_min_temp = points[i].x
x_min = points[i]
if points[i].y > y_max_temp:
y_max_temp = points[i].y
y_max = points[i]
if points[i].y < y_min_temp:
y_min_temp = points[i].y
y_min = points[i]
if points[i].z > z_max_temp:
z_max_temp = points[i].z
z_max = points[i]
if points[i].z < z_min_temp:
z_min_temp = points[i].z
z_min = points[i]
return (x_max, x_min, y_max, y_min, z_max, z_min)
points = []
list_of_planes = []
def getHull(pl):
if len(pl) < 4:
print "less then 4 points"
return None
global points, list_of_planes
points = []
searchMap = {}
for i, v in enumerate(pl):
k = Point(v.x, v.y, v.z)
points.append(k)
searchMap[k] = i
try:
extremes = initial() # calculate the extreme points for every axis.
initial_line = initial_max(extremes) # Make the initial line by joining farthest 2 points
third_point = max_dist_line_point(initial_line[0], initial_line[1]) # Calculate the 3rd point to make a plane
first_plane = Plane(initial_line[0], initial_line[1], third_point) # Make the initial plane by joining 3rd point to the line
fourth_point = max_dist_plane_point(first_plane) # Make the fourth plane to make a tetrahedron
except:
print "hull error"
possible_internal_points = [initial_line[0], initial_line[1], third_point, fourth_point] # List that helps in calculating orientation of point
second_plane = Plane(initial_line[0], initial_line[1], fourth_point) # The other planes of the tetrahedron
third_plane = Plane(initial_line[0], fourth_point, third_point)
fourth_plane = Plane(initial_line[1], third_point, fourth_point)
set_correct_normal(possible_internal_points, first_plane) # Setting the orientation of normal correct
set_correct_normal(possible_internal_points, second_plane)
set_correct_normal(possible_internal_points, third_plane)
set_correct_normal(possible_internal_points, fourth_plane)
first_plane.calculate_to_do() # Calculating the to_do list which stores the point for which eye_point have to be found
second_plane.calculate_to_do()
third_plane.calculate_to_do()
fourth_plane.calculate_to_do()
list_of_planes = [] # List containing all the planes
list_of_planes.append(first_plane)
list_of_planes.append(second_plane)
list_of_planes.append(third_plane)
list_of_planes.append(fourth_plane)
any_left = True # Checking if planes with to do list is over
while any_left:
any_left = False
for working_plane in list_of_planes:
if len(working_plane.to_do) > 0:
any_left = True
eye_point = find_eye_point(working_plane, working_plane.to_do) # Calculate the eye point of the face
edge_list = set()
visited_planes = []
calc_horizon(visited_planes, working_plane, eye_point, edge_list) # Calculate the horizon
for internal_plane in visited_planes: # Remove the internal planes
list_of_planes.remove(internal_plane)
for edge in edge_list: # Make new planes
new_plane = Plane(edge.pointA, edge.pointB, eye_point)
set_correct_normal(possible_internal_points, new_plane)
temp_to_do = set()
for internal_plane in visited_planes:
temp_to_do = temp_to_do.union(internal_plane.to_do)
new_plane.calculate_to_do(temp_to_do)
list_of_planes.append(new_plane)
"""
"""
tmp = []
for f in list_of_planes:
tmp.append((searchMap[f.pointC], searchMap[f.pointB], searchMap[f.pointA]))
return tuple(tmp)
|
import tensorflow as tf
from tensorflow.python.framework import ops
_binned_select_knn = tf.load_op_library('binned_select_knn.so')
def _BinnedSelectKnn(K : int, coords, bin_idx, dim_bin_idx, bin_boundaries, n_bins, bin_width , tf_compatible=False):
'''
the op wrapper only
'''
return _binned_select_knn.BinnedSelectKnn(n_neighbours=K,
coords=coords,
bin_idx=bin_idx,
dim_bin_idx=dim_bin_idx,
bin_boundaries=bin_boundaries,
n_bins=n_bins,
bin_width=bin_width,
tf_compatible=tf_compatible
)
def BinnedSelectKnn(K : int, coords, row_splits, n_bins=None, max_bin_dims=3, tf_compatible=False, max_radius=None):
'''
max_radius is a dummy for now to make it a drop-in replacement
'''
from bin_by_coordinates_op import BinByCoordinates
from index_replacer_op import IndexReplacer
# the following number of bins seems a good~ish estimate for good performance
# for homogenous point distributions but should be subject to more tests
elems_per_rs = 1
if row_splits.shape[0] is not None:
elems_per_rs = row_splits[1]
if n_bins is None:
n_bins = tf.math.pow(tf.cast(elems_per_rs,dtype='float32')/(K/32),1/max_bin_dims)
n_bins = tf.cast(n_bins,dtype='int32')
n_bins = tf.where(n_bins<5,5,n_bins)
n_bins = tf.where(n_bins>20,20,n_bins)#just a guess
bin_coords = coords
if bin_coords.shape[-1]>max_bin_dims:
bin_coords = bin_coords[:,:max_bin_dims]
dbinning,binning, nb, bin_width, nper = BinByCoordinates(bin_coords, row_splits, n_bins=n_bins)
#if this becomes a bottleneck one could play tricks since nper and bin numbers are predefined
sorting = tf.argsort(binning)
scoords = tf.gather_nd( coords, sorting[...,tf.newaxis])
sbinning = tf.gather_nd( binning, sorting[...,tf.newaxis])
sdbinning = tf.gather_nd( dbinning, sorting[...,tf.newaxis])
#add a leading 0
bin_boundaries = tf.concat([tf.zeros([1],dtype='int32'), nper],axis=0) #row_splits[0:1]
# make it row split like
bin_boundaries = tf.cumsum(bin_boundaries)
idx,dist = _BinnedSelectKnn(K, scoords, sbinning, sdbinning, bin_boundaries=bin_boundaries,
n_bins=nb, bin_width=bin_width, tf_compatible=tf_compatible )
if row_splits.shape[0] is None:
return idx, dist
#sort back
idx = IndexReplacer(idx,sorting)
dist = tf.scatter_nd(sorting[...,tf.newaxis], dist, dist.shape)
idx = tf.scatter_nd(sorting[...,tf.newaxis], idx, idx.shape)
return idx, dist
_sknn_grad_op = tf.load_op_library('select_knn_grad.so')
@ops.RegisterGradient("BinnedSelectKnn")
def _BinnedSelectKnnGrad(op, idxgrad, dstgrad):
coords = op.inputs[0]
indices = op.outputs[0]
distances = op.outputs[1]
coord_grad = _sknn_grad_op.SelectKnnGrad(grad_distances=dstgrad, indices=indices, distances=distances, coordinates=coords)
return coord_grad,None,None,None,None,None
|
def is_leap(year):
leap = False
if year % 4 == 0:
leap=True
if year % 100 == 0 and year % 400 != 0:
leap = False
return leap
if __name__ == '__main__':
year = int(input())
print ( is_leap(year))
|
from django import forms
from api.forms.abstract_form import AbstractForm
class LuxstayRoomForm(AbstractForm):
custom_session_id = forms.CharField(required=False, initial=None)
ip_address = forms.CharField(required=False, initial=None)
room_id = forms.IntegerField(required=False, initial=None)
|
# -*- coding: utf-8 -*-
"""Main module."""
from pandas import DataFrame
import pandas as pd
import sqlparse
from typing import List
Columns = List[str]
class AnalysisException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DataFrame(DataFrame):
def parse_sql_str(self,sql:str)->Columns:
parsed_sql = sqlparse.parse(sql)
columns = []
if len(parsed_sql) > 0 :
for token in parsed_sql[0].tokens:
if token.is_keyword:
if token.normalized == 'SELECT':
pass
if token.is_keyword== False:
if token.is_group and hasattr(token,'tokens'):
for subtoken in token.tokens:
if subtoken.ttype[0] == 'Name':
columns.append(subtoken.value)
if hasattr(token,'ttype'):
if str(token.ttype)=='Token.Wildcard':
columns += list(self.columns.values)
return columns
else:
return columns
def select_str(self,expression:str) -> DataFrame:
dummy_sql = 'SELECT {0} FROM table'.format(expression)
sql_columns = self.parse_sql_str(dummy_sql)
selected_columns = []
for sql_column in sql_columns:
if sql_column in self.columns:
selected_columns.append(sql_column)
else:
raise AnalysisException('cannot resolve {0} column')
return self[selected_columns].copy()
def select_list(self,expressions:list) -> DataFrame:
dummy_sql = 'SELECT {0} FROM table'.format(','.join(expressions))
sql_columns = self.parse_sql_str(dummy_sql)
df_columns = [column for column in sql_columns if column in self.columns]
return self[df_columns].copy()
def select(self,*expressions):
all_df = []
for expression in expressions:
if type(expression) is str:
selected_df = self.select_str(expression)
all_df.append(selected_df)
elif type(expression) is list:
selected_df = self.select_list(expression)
all_df.append(selected_df)
else:
raise NotImplementedError()
try:
union_df = pd.concat(all_df,axis=1)
except ValueError:
union_df = pd.DataFrame()
finally:
return union_df
class column():
def __init__(self):
raise NotImplementedError()
class expression():
def __init__(self):
raise NotImplementedError() |
from dataclasses import dataclass
from bindings.ows.resource_type import ResourceType
__NAMESPACE__ = "http://www.w3.org/1999/xlink"
@dataclass
class Resource2(ResourceType):
class Meta:
name = "resource"
namespace = "http://www.w3.org/1999/xlink"
|
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from .models import UserLikes
from core.serializers import PinSerializer
class UserLikesDetailSerializer(serializers.ModelSerializer):
#TODO: 这里以后做一个简易的pin的序列化 没必要返回这么多信息
pin = PinSerializer()
class Meta:
model = UserLikes
fields = ("pin", "id")
class UserLikeSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault()) # 仅查看当前用户
class Meta:
model = UserLikes
validators = [
UniqueTogetherValidator(
queryset=UserLikes.objects.all(),
fields=('user', 'pin'),
message="已经点赞"
)
]
fields = ("user", "pin", "id")
|
#!/usr/bin/env python
"""
In the name of Allah, the most Gracious, the most Merciful.
▓▓▓▓▓▓▓▓▓▓
░▓ Author ▓ Abdullah <https://abdullah.today>
░▓▓▓▓▓▓▓▓▓▓
░░░░░░░░░░
░█▀█░█▀█░█░░░█░█░█▀▄░█▀█░█▀▄
░█▀▀░█░█░█░░░░█░░█▀▄░█▀█░█▀▄
░▀░░░▀▀▀░▀▀▀░░▀░░▀▀░░▀░▀░▀░▀
"""
# Install dependencies
# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
import imaplib
import argparse
import subprocess
import time
from apiclient import errors
from httplib2 import ServerNotFoundError
get_mail, get_passwd, get_server = subprocess.run(['gpg', '-dq', '/home/ak/.local/share/misc/email_address.gpg'], check=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='utf-8'), subprocess.run(['gpg','-dq',
'/home/ak/.local/share/misc/mail_otp.gpg'],check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8'), subprocess.run(['gpg', '-dq', '/home/ak/.local/share/misc/email_server.gpg'],
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
mail_address, mail_passwd, mail_server = get_mail.stdout[:-1], get_passwd.stdout[:-1], get_server.stdout[:-1]
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--badge', default='\uf0e0')
parser.add_argument('-c', '--color', default='#ff69b4')
parser.add_argument('-m', '--mute', action='store_true')
args = parser.parse_args()
unread_badge = '%{F' + args.color + '}' + args.badge + ' %{F-}'
error_badge = '%F' + args.color + '}\uf06a %{F-}'
mail_count_was = 0
def sync(mail_count):
obj = imaplib.IMAP4_SSL(mail_server, 993)
obj.login(mail_address, mail_passwd)
obj.select()
mail_count_now = len(obj.search(None, 'UnSeen')[1][0].split())
if mail_count_now > 0:
print(unread_badge + str(mail_count_now), flush=True)
else:
print(args.badge, flush=True)
if not args.mute and mail_count_was < mail_count_now and mail_count_now > 0:
subprocess.run(['/usr/bin/mbsync', '-Xa'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.run(['/usr/bin/mpv', '--no-resume-playback', '--volume=45',
'--really-quiet', '/home/ak/.local/share/misc/phansi.aac' ])
return mail_count_now
try:
import httplib
except:
import http.client as httplib
def is_connected():
conn = httplib.HTTPConnection('mail.abdullah.today', timeout=3)
try:
conn.request('HEAD', '/')
conn.close()
return True
except:
conn.close()
return False
while True:
# time.sleep(5)
# if is_connected():
# mail_count_was = sync(mail_count_was)
# time.sleep(10)
# else:
# print('no network!')
# time.sleep(5)
try:
mail_count_was = sync(mail_count_was)
time.sleep(10)
except (errors.HttpError, ServerNotFoundError, OSError) as error:
print(error_badge + str(error), flush=True)
time.sleep(5)
|
"""
owtf.models.email_confirmation
~~~~~~~~~~~~~~~~~~~~~~
"""
from sqlalchemy import Column, Integer, Unicode, ForeignKey, DateTime
from owtf.db.model_base import Model
class EmailConfirmation(Model):
__tablename__ = "email_confirmation"
id = Column(Integer, primary_key=True, autoincrement=True)
key_value = Column(Unicode(255), nullable=True)
expiration_time = Column(DateTime)
user_id = Column(Integer, ForeignKey("users.id"))
@classmethod
def get_by_userid(cls, session, user_id):
return session.query(cls).filter_by(user_id=user_id).all()
@classmethod
def add_confirm_password(cls, session, cf):
"""Adds an user to the DB"""
new_cf = cls(
key_value=cf["key_value"],
expiration_time=cf["expiration_time"],
user_id=cf["user_id"],
)
session.add(new_cf)
session.commit()
@classmethod
def find_by_key_value(cls, session, key_value):
return session.query(cls).filter_by(key_value=key_value).first()
@classmethod
def remove_previous_all(cls, session, user_id):
email_confirmation_objects = session.query(cls).filter_by(user_id=user_id).all()
if email_confirmation_objects is not None:
for email_confirmation_obj in email_confirmation_objects:
session.delete(email_confirmation_obj)
session.commit()
|
from random import sample
from datetime import datetime, timedelta
from numpy import power
from numpy.random import randint, gamma
def date_to_season(date):
# Crude, but close enough
vernal_equinox = datetime(date.year, 3, 21)
summer_solstice = datetime(date.year, 6, 21)
autumnal_equinox = datetime(date.year, 9, 21)
winter_solstice = datetime(date.year, 12, 21)
if date < vernal_equinox:
season = 'winter'
elif vernal_equinox <= date < summer_solstice:
season = 'spring'
elif summer_solstice <= date < autumnal_equinox:
season = 'summer'
elif autumnal_equinox <= date < winter_solstice:
season = 'fall'
elif winter_solstice <= date:
season = 'winter'
else:
raise ValueError('ERROR - Something is wrong with date; you should never get here.')
return season
def random_date_in_season(year, season):
vernal_equinox = datetime(year, 3, 21)
summer_solstice = datetime(year, 6, 21)
autumnal_equinox = datetime(year, 9, 21)
winter_solstice = datetime(year, 12, 21)
if season == 'spring':
rd = random_date(vernal_equinox, summer_solstice)
elif season == 'summer':
rd = random_date(summer_solstice, autumnal_equinox)
elif season == 'fall':
rd = random_date(autumnal_equinox, winter_solstice)
elif season == 'winter 1':
rd = random_date(datetime(year, 1, 1), vernal_equinox)
elif season == 'winter 2':
rd = random_date(winter_solstice, datetime(year + 1, 1, 1))
elif season == 'winter':
rd1 = random_date(datetime(year, 1, 1), vernal_equinox)
rd2 = random_date(winter_solstice, datetime(year + 1, 1, 1))
rd = sample((rd1, rd2), 1)[0]
else:
raise ValueError('ERROR - unrecognized season: ' + str(season))
return rd
def random_date(min_date, max_date):
days_between = (max_date - min_date).days
offset = randint(0, days_between)
rd = min_date + timedelta(days=offset)
return rd
def sample_gamma(mean, std, size=None):
var = power(std, 2)
k = power(mean, 2) / var
theta = var / mean
return gamma(k, theta, size)
|
import numpy as np
import pytest
import mbuild as mb
from mbuild.formats.vasp import read_poscar, write_poscar
from mbuild.tests.base_test import BaseTest
class TestVasp(BaseTest):
"""Unit tests for Vasp POSCAR writer"""
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
tmpdir.chdir()
def test_read_write(self, gilmerite):
write_poscar(gilmerite, "test.poscar")
new_gilmerite = read_poscar("test.poscar")
assert np.allclose(gilmerite.box.lengths, new_gilmerite.box.lengths)
assert np.allclose(gilmerite.box.angles, new_gilmerite.box.angles)
assert np.allclose(gilmerite.xyz, new_gilmerite.xyz)
def test_read_write_direct(self, gilmerite):
write_poscar(gilmerite, "test.poscar", coord_style="direct")
new_gilmerite = read_poscar("test.poscar")
assert np.allclose(gilmerite.box.lengths, new_gilmerite.box.lengths)
assert np.allclose(gilmerite.box.angles, new_gilmerite.box.angles)
assert np.allclose(gilmerite.xyz, new_gilmerite.xyz)
def test_lattice_constant(self, copper_cell):
write_poscar(copper_cell, "test.poscar", lattice_constant=0.4123)
with open("test.poscar", "r") as f:
for i, line in enumerate(f):
if i == 1:
lattice_constant = np.genfromtxt(line.splitlines(True))
assert lattice_constant == 0.4123
def test_bravais(self, copper_cell):
"""Test that compound with no box has a lattice that is diagonal."""
write_poscar(copper_cell, "test.poscar")
with open("test.poscar", "r") as f:
lines = f.readlines()
bravais = np.stack(
[np.fromstring(line, sep=" ") for line in lines[2:5]]
)
# zero the diagonal
for i in range(3):
bravais[i, i] = 0
assert np.array_equal(bravais, np.zeros((3, 3)))
def test_num_elements(self, cscl_crystal):
write_poscar(cscl_crystal, "test.poscar")
with open("test.poscar", "r") as f:
for i, line in enumerate(f):
if i == 5:
elements = line.split()
assert len(elements) == 2
def test_num_atoms(self, copper_cell):
write_poscar(copper_cell, "test.poscar")
with open("test.poscar", "r") as f:
for i, line in enumerate(f):
pass
assert i + 1 == 44
@pytest.mark.parametrize("coord_type", ["direct", "cartesian"])
def test_coordinate_header(self, gilmerite, coord_type):
write_poscar(gilmerite, "test.poscar", coord_style=coord_type)
with open("test.poscar", "r") as f:
for i, line in enumerate(f):
if i == 7:
coord = line.strip()
assert coord == coord_type
def test_warning_raised(self, copper_cell):
copper_cell.box = None
with pytest.warns(UserWarning):
write_poscar(copper_cell, "test.poscar", coord_style="direct")
def test_error_raised(self, copper_cell):
with pytest.raises(ValueError):
write_poscar(copper_cell, "test.poscar", coord_style="heck")
|
import discord
from redbot.core import commands
from redbot.core.i18n import Translator, cog_i18n
import contextlib
from . import constants as sub
from .core import Core
_ = Translator("Nsfw", __file__)
@cog_i18n(_)
class Nsfw(Core):
"""
Send random NSFW images from random subreddits
If `[p]help Nsfw` or any other Nsfw commands are used in a non-nsfw channel,
you will not be able to see the list of commands for this category.
"""
@commands.command()
async def nsfwversion(self, ctx: commands.Context):
"""Get the version of the installed Nsfw cog."""
await self._version_msg(ctx, self.__version__, self.__author__)
@commands.is_owner()
@commands.group()
async def nsfwset(self, ctx: commands.Context):
"""Settings for the Nsfw cog."""
@nsfwset.command()
async def switchredditapi(self, ctx: commands.Context):
"""Toggle to use Reddit API directly with the cost of getting ratelimited fast, or use Martine API with faster results and no ratelimits problems.
Defaults to Martine API."""
val = await self.config.use_reddit_api()
await self.config.use_reddit_api.set(not val)
await ctx.send(
"Switched to Reddit API. Warning: Your bot might be ratelimited by Reddit fast."
if not val
else "Switched back to Martine API."
)
@commands.is_nsfw()
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.user)
async def cleandm(self, ctx: commands.Context, number: int):
"""
Delete a number specified of DM's from the bot.
`<number>`: Number of messages from the bot you want
to delete in your DM's.
"""
if ctx.guild:
return await ctx.send(_("This command works only for DM's messages !"))
async for message in ctx.channel.history(limit=number):
if message.author.id == ctx.bot.user.id:
with contextlib.suppress(discord.NotFound):
await message.delete()
await ctx.tick()
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(name="4k", aliases=["4K", "fourk"])
async def four_k(self, ctx: commands.Context):
"""Sends some 4k images from random subreddits."""
await self._send_msg(ctx, _("4k"), sub.FOUR_K)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["oface", "ofaces"])
async def ahegao(self, ctx: commands.Context):
"""Sends some ahegao images from random subreddits."""
await self._send_msg(ctx, _("ahegao"), sub.AHEGAO)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["butt", "booty"])
async def ass(self, ctx: commands.Context):
"""Sends some ass images from random subreddits."""
await self._send_msg(ctx, _("ass"), sub.ASS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["asian"])
async def asianporn(self, ctx: commands.Context):
"""Sends some asian porn images."""
await self._send_msg(ctx, _("asian porn"), sub.ASIANPORN)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["sodomy"])
async def anal(self, ctx: commands.Context):
"""Sends some anal images/gifs from random subreddits."""
await self._send_msg(ctx, _("anal"), sub.ANAL)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def bbw(self, ctx: commands.Context):
"""Sends some bbw images."""
await self._send_msg(ctx, _("bbw"), sub.BBW)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["shibari"])
async def bdsm(self, ctx: commands.Context):
"""Sends some bdsm from random subreddits."""
await self._send_msg(ctx, _("bdsm"), sub.BDSM)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["blackdick", "bcock", "bdick", "blackcocks", "blackdicks"])
async def blackcock(self, ctx: commands.Context):
"""Sends some blackcock images from random subreddits."""
await self._send_msg(ctx, _("black cock"), sub.BLACKCOCK)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["blowjobs", "blowj", "bjob", "fellatio", "fellation"])
async def blowjob(self, ctx: commands.Context):
"""Sends some blowjob images/gifs from random subreddits."""
await self._send_msg(ctx, _("blowjob"), sub.BLOWJOB)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["boob", "boobies", "tits", "titties", "breasts", "breast"])
async def boobs(self, ctx: commands.Context):
"""Sends some boobs images from random subreddits."""
await self._send_msg(ctx, _("boobs"), sub.BOOBS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["boless"])
async def bottomless(self, ctx: commands.Context):
"""Sends some bottomless images from random subreddits."""
await self._send_msg(ctx, _("bottomless"), sub.BOTTOMLESS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def cosplay(self, ctx: commands.Context):
"""Sends some nsfw cosplay images from random subreddits."""
await self._send_msg(ctx, _("nsfw cosplay"), sub.COSPLAY)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["cunni", "pussyeating"])
async def cunnilingus(self, ctx: commands.Context):
"""Sends some cunnilingus images from random subreddits."""
await self._send_msg(ctx, _("cunnilingus"), sub.CUNNI)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["cum", "cums", "cumshots"])
async def cumshot(self, ctx: commands.Context):
"""Sends some cumshot images/gifs from random subreddits."""
await self._send_msg(ctx, _("cumshot"), sub.CUMSHOTS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["deept", "deepthroating"])
async def deepthroat(self, ctx: commands.Context):
"""Sends some deepthroat images from random subreddits."""
await self._send_msg(ctx, _("deepthroat"), sub.DEEPTHROAT)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["cock"])
async def dick(self, ctx: commands.Context):
"""Sends some dicks images from random subreddits."""
await self._send_msg(ctx, _("dick"), sub.DICK)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["doublep"])
async def doublepenetration(self, ctx: commands.Context):
"""Sends some double penetration images/gifs from random subreddits."""
await self._send_msg(ctx, _("double penetration"), sub.DOUBLE_P)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def ebony(self, ctx: commands.Context):
"""Sends some ebony images."""
await self._send_msg(ctx, _("ebony"), sub.EBONY)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["facial"])
async def facials(self, ctx: commands.Context):
"""Sends some facials images from random subreddits."""
await self._send_msg(ctx, _("facials"), sub.FACIALS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["feets", "feetish"])
async def feet(self, ctx: commands.Context):
"""Sends some feet images from random subreddits."""
await self._send_msg(ctx, _("feets"), sub.FEET)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def femdom(self, ctx: commands.Context):
"""Sends some femdom images from random subreddits."""
await self._send_msg(ctx, _("femdom"), sub.FEMDOM)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["futanari"])
async def futa(self, ctx: commands.Context):
"""Sends some futa images from random subreddits."""
await self._send_msg(ctx, _("futa"), sub.FUTA)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["gayporn"])
async def gay(self, ctx: commands.Context):
"""Sends some gay porn from random subreddits."""
await self._send_msg(ctx, _("gay porn"), sub.GAY_P)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["groups", "nudegroup", "nudegroups"])
async def group(self, ctx: commands.Context):
"""Sends some groups nudes from random subreddits."""
await self._send_msg(ctx, "groups nudes", sub.GROUPS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def hentai(self, ctx: commands.Context):
"""Sends some hentai images/gifs from Nekobot API."""
await self._send_other_msg(
ctx,
name=_("hentai"),
arg="message",
source="Nekobot API",
url=sub.NEKOBOT_URL.format(sub.NEKOBOT_HENTAI),
)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["lesbians"])
async def lesbian(self, ctx: commands.Context):
"""Sends some lesbian gifs or images from random subreddits."""
await self._send_msg(ctx, _("lesbian"), sub.LESBIANS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["milfs"])
async def milf(self, ctx: commands.Context):
"""Sends some milf images from random subreddits."""
await self._send_msg(ctx, _("milf"), sub.MILF)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["oralsex"])
async def oral(self, ctx: commands.Context):
"""Sends some oral gifs or images from random subreddits."""
await self._send_msg(ctx, _("oral"), sub.ORAL)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["pgif", "prongif"])
async def porngif(self, ctx: commands.Context):
"""Sends some porn gifs from Nekobot API."""
await self._send_other_msg(
ctx,
name=_("porn gif"),
arg="message",
source="Nekobot API",
url=sub.NEKOBOT_URL.format("pgif"),
)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def public(self, ctx: commands.Context):
"""Sends some public nude images from random subreddits."""
await self._send_msg(ctx, _("public nude"), sub.PUBLIC)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["vagina", "puss"])
async def pussy(self, ctx: commands.Context):
"""Sends some pussy nude images from random subreddits."""
await self._send_msg(ctx, _("pussy"), sub.PUSSY)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command()
async def realgirls(self, ctx: commands.Context):
"""Sends some real girls images from random subreddits."""
await self._send_msg(ctx, _("real nudes"), sub.REAL_GIRLS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["redheads", "ginger", "gingers"])
async def redhead(self, ctx: commands.Context):
"""Sends some red heads images from random subreddits."""
await self._send_msg(ctx, _("red head"), sub.REDHEADS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["r34"])
async def rule34(self, ctx: commands.Context):
"""Sends some rule34 images from random subreddits."""
await self._send_msg(ctx, _("rule34"), sub.RULE_34)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["squirts"])
async def squirt(self, ctx: commands.Context):
"""Sends some squirts images from random subreddits."""
await self._send_msg(ctx, _("squirt"), sub.SQUIRTS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["thighs", "legs"])
async def thigh(self, ctx: commands.Context):
"""Sends some thighs images from random subreddits."""
await self._send_msg(ctx, _("thigh"), sub.THIGHS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["groupsex"])
async def threesome(self, ctx: commands.Context):
"""Sends some threesome images."""
await self._send_msg(ctx, _("threesome"), sub.THREESOME)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["trap", "shemale", "shemales"])
async def trans(self, ctx: commands.Context):
"""Sends some trans from random subreddits."""
await self._send_msg(ctx, _("trans"), sub.TRANS)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["wild", "gwild"])
async def gonewild(self, ctx: commands.Context):
"""Sends some gonewild images from random subreddits."""
await self._send_msg(ctx, _("gonewild"), sub.WILD)
@commands.is_nsfw()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.command(aliases=["yiffs"])
async def yiff(self, ctx: commands.Context):
"""Sends some yiff images from random subreddits."""
await self._send_msg(ctx, _("yiff"), sub.YIFF)
|
# ===-----------------------------------------------------------*- Python -*-===
#
# Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===------------------------------------------------------------------------===
import BMutils
import sys
scriptDir = sys.path[0]
# This is more sneaky, since we invoke another python script,
# because we want to do measurements at different numbers
# of threads, but our runtime doesn't allow team size changes
# so we have to run each test as a separate execution.
libraries = ["LLVM", "LOMP"]
# libraries = ["LLVM"]
# libraries = ["LOMP"]
schedules = ("static", "static1", "monotonic", "nonmonotonic", "guided")
runDesc = BMutils.runDescription(
"python3 " + scriptDir + "/runSchedTest.py",
{"increasing_": schedules, "square_": schedules, "random_": schedules},
{"increasing_": libraries, "square_": libraries, "random_": libraries,},
"Sched_",
)
BMutils.runBM(runDesc)
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
version = '0.3.5b0'
requires = [
'eduid-am >= 0.6.3b5',
'eduid-userdb >= 0.4.0b12',
]
testing_extras = [
'nose==1.3.7',
'nosexcover==1.0.11',
'coverage==4.5.1',
'freezegun==0.3.10'
]
setup(name='eduid-dashboard-amp',
version=version,
description='eduID Dashboard Attribute Manager Plugin',
long_description=README + '\n\n' + CHANGES,
# TODO: add classifiers
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='',
author='SUNET',
url='https://github.com/SUNET/eduid-dashboard-amp',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={
'testing': testing_extras,
},
test_suite='eduid_dashboard_amp',
entry_points="""
[eduid_am.attribute_fetcher]
eduid_dashboard = eduid_dashboard_amp:attribute_fetcher
[eduid_am.plugin_init]
eduid_dashboard = eduid_dashboard_amp:plugin_init
""",
)
|
import json
import logging
from django.http import JsonResponse
from django.contrib.contenttypes.models import ContentType
from rest_framework.response import Response
from rest_framework import (
viewsets,
status,
)
from rest_framework.generics import (
UpdateAPIView,
ListAPIView,
)
from rest_framework.permissions import (
IsAuthenticated,
)
from .serializers import (
ReviewLogListSerializer,
ReviewLogUpdateSerializer,
)
from . import serializers_singleton
from logs.models import ReviewLog, query_by_args
logger = logging.getLogger('review')
class ReviewLogViewSet(viewsets.ModelViewSet):
queryset = ReviewLog.objects.all()
serializer_class = ReviewLogListSerializer
permission_classes = [IsAuthenticated]
# overwrite list for jQuery DataTable
def list(self, request, **kwargs):
reviews = query_by_args(request, **request.query_params)
serializer = ReviewLogListSerializer(reviews['items'], many=True)
result = dict()
result['data'] = serializer.data
result['draw'] = reviews['draw']
result['recordsTotal'] = reviews['total']
result['recordsFiltered'] = reviews['count']
return Response(result, status=status.HTTP_200_OK, template_name=None, content_type=None)
class ReviewLogUpdateAPIView(UpdateAPIView):
queryset = ReviewLog.objects.all()
serializer_class = ReviewLogUpdateSerializer
permission_classes = [IsAuthenticated]
def get_object(self, user, object_id, content_type):
return ReviewLog.objects.filter(user=user, object_id=object_id, content_type=content_type).first()
def patch(self, request):
data = json.loads(request.data.get('data'))
object_id = data.get('object_id')
app_label = data.get('app_label')
model = data.get('model')
data['user'] = request.user.id
content_type = ContentType.objects.filter(app_label=app_label, model=model).first()
data['content_type'] = content_type.id
obj = self.get_object(request.user, object_id, content_type)
serializer = ReviewLogUpdateSerializer(obj, data=data, partial=True)
if serializer.is_valid():
serializer.save()
return JsonResponse(data=serializer.data)
else:
logger.exception(serializer.errors, extra={
'user': request.user,
'content_type': content_type,
'object_id': object_id,
})
return JsonResponse(data=serializer.errors, safe=False)
class ReviewLogSingletonListAPIView(ListAPIView):
serializer_class = serializers_singleton.ReviewLogSerializer
queryset = ReviewLog.objects.all()
permission_classes = [IsAuthenticated]
|
# ## ML-Agent Learning (SAC)
# Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290
# and implemented in https://github.com/hill-a/stable-baselines
import logging
from collections import defaultdict
from typing import Dict
import os
import numpy as np
from mlagents_envs.timers import timed
from mlagents.trainers.tf_policy import TFPolicy
from mlagents.trainers.sac.policy import SACPolicy
from mlagents.trainers.rl_trainer import RLTrainer
from mlagents.trainers.trajectory import Trajectory, SplitObservations
from mlagents.trainers.brain import BrainParameters
logger = logging.getLogger("mlagents.trainers")
BUFFER_TRUNCATE_PERCENT = 0.8
class SACTrainer(RLTrainer):
"""
The SACTrainer is an implementation of the SAC algorithm, with support
for discrete actions and recurrent networks.
"""
def __init__(
self,
brain_name: str,
reward_buff_cap: int,
trainer_parameters: dict,
training: bool,
load: bool,
seed: int,
run_id: str,
):
"""
Responsible for collecting experiences and training SAC model.
:param brain_name: The name of the brain associated with trainer config
:param reward_buff_cap: Max reward history to track in the reward buffer
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param run_id: The The identifier of the current run
"""
super().__init__(
brain_name, trainer_parameters, training, run_id, reward_buff_cap
)
self.param_keys = [
"batch_size",
"buffer_size",
"buffer_init_steps",
"hidden_units",
"learning_rate",
"init_entcoef",
"max_steps",
"normalize",
"num_update",
"num_layers",
"time_horizon",
"sequence_length",
"summary_freq",
"tau",
"use_recurrent",
"summary_path",
"memory_size",
"model_path",
"reward_signals",
"vis_encode_type",
]
self.check_param_keys()
self.load = load
self.seed = seed
self.policy: TFPolicy = None
self.step = 0
self.train_interval = (
trainer_parameters["train_interval"]
if "train_interval" in trainer_parameters
else 1
)
self.reward_signal_updates_per_train = (
trainer_parameters["reward_signals"]["reward_signal_num_update"]
if "reward_signal_num_update" in trainer_parameters["reward_signals"]
else trainer_parameters["num_update"]
)
self.checkpoint_replay_buffer = (
trainer_parameters["save_replay_buffer"]
if "save_replay_buffer" in trainer_parameters
else False
)
def save_model(self, name_behavior_id: str) -> None:
"""
Saves the model. Overrides the default save_model since we want to save
the replay buffer as well.
"""
self.policy.save_model(self.get_step)
if self.checkpoint_replay_buffer:
self.save_replay_buffer()
def save_replay_buffer(self) -> None:
"""
Save the training buffer's update buffer to a pickle file.
"""
filename = os.path.join(
self.trainer_parameters["model_path"], "last_replay_buffer.hdf5"
)
logger.info("Saving Experience Replay Buffer to {}".format(filename))
with open(filename, "wb") as file_object:
self.update_buffer.save_to_file(file_object)
def load_replay_buffer(self) -> None:
"""
Loads the last saved replay buffer from a file.
"""
filename = os.path.join(
self.trainer_parameters["model_path"], "last_replay_buffer.hdf5"
)
logger.info("Loading Experience Replay Buffer from {}".format(filename))
with open(filename, "rb+") as file_object:
self.update_buffer.load_from_file(file_object)
logger.info(
"Experience replay buffer has {} experiences.".format(
self.update_buffer.num_experiences
)
)
def process_trajectory(self, trajectory: Trajectory) -> None:
"""
Takes a trajectory and processes it, putting it into the replay buffer.
"""
last_step = trajectory.steps[-1]
agent_id = trajectory.agent_id # All the agents should have the same ID
# Add to episode_steps
self.episode_steps[agent_id] += len(trajectory.steps)
agent_buffer_trajectory = trajectory.to_agentbuffer()
# Update the normalization
if self.is_training:
self.policy.update_normalization(agent_buffer_trajectory["vector_obs"])
# Evaluate all reward functions for reporting purposes
self.collected_rewards["environment"][agent_id] += np.sum(
agent_buffer_trajectory["environment_rewards"]
)
for name, reward_signal in self.policy.reward_signals.items():
evaluate_result = reward_signal.evaluate_batch(
agent_buffer_trajectory
).scaled_reward
# Report the reward signals
self.collected_rewards[name][agent_id] += np.sum(evaluate_result)
# Get all value estimates for reporting purposes
value_estimates = self.policy.get_batched_value_estimates(
agent_buffer_trajectory
)
for name, v in value_estimates.items():
self.stats_reporter.add_stat(
self.policy.reward_signals[name].value_name, np.mean(v)
)
# Bootstrap using the last step rather than the bootstrap step if max step is reached.
# Set last element to duplicate obs and remove dones.
if last_step.max_step:
vec_vis_obs = SplitObservations.from_observations(last_step.obs)
for i, obs in enumerate(vec_vis_obs.visual_observations):
agent_buffer_trajectory["next_visual_obs%d" % i][-1] = obs
if vec_vis_obs.vector_observations.size > 1:
agent_buffer_trajectory["next_vector_in"][
-1
] = vec_vis_obs.vector_observations
agent_buffer_trajectory["done"][-1] = False
# Append to update buffer
agent_buffer_trajectory.resequence_and_append(
self.update_buffer, training_length=self.policy.sequence_length
)
if trajectory.done_reached:
self._update_end_episode_stats(
agent_id, self.get_policy(trajectory.behavior_id)
)
def is_ready_update(self) -> bool:
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
return (
self.update_buffer.num_experiences >= self.trainer_parameters["batch_size"]
and self.step >= self.trainer_parameters["buffer_init_steps"]
)
@timed
def update_policy(self) -> None:
"""
If train_interval is met, update the SAC policy given the current reward signals.
If reward_signal_train_interval is met, update the reward signals from the buffer.
"""
if self.step % self.train_interval == 0:
self.update_sac_policy()
self.update_reward_signals()
def create_policy(self, brain_parameters: BrainParameters) -> TFPolicy:
policy = SACPolicy(
self.seed,
brain_parameters,
self.trainer_parameters,
self.is_training,
self.load,
)
for _reward_signal in policy.reward_signals.keys():
self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
# Load the replay buffer if load
if self.load and self.checkpoint_replay_buffer:
try:
self.load_replay_buffer()
except (AttributeError, FileNotFoundError):
logger.warning(
"Replay buffer was unable to load, starting from scratch."
)
logger.debug(
"Loaded update buffer with {} sequences".format(
self.update_buffer.num_experiences
)
)
return policy
def update_sac_policy(self) -> None:
"""
Uses demonstration_buffer to update the policy.
The reward signal generators are updated using different mini batches.
If we want to imitate http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times, then reward_signal_updates_per_train
is greater than 1 and the reward signals are not updated in parallel.
"""
self.cumulative_returns_since_policy_update.clear()
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
num_updates = self.trainer_parameters["num_update"]
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
logger.debug("Updating SAC policy at step {}".format(self.step))
buffer = self.update_buffer
if (
self.update_buffer.num_experiences
>= self.trainer_parameters["batch_size"]
):
sampled_minibatch = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
# Get rewards for each reward
for name, signal in self.policy.reward_signals.items():
sampled_minibatch[
"{}_rewards".format(name)
] = signal.evaluate_batch(sampled_minibatch).scaled_reward
update_stats = self.policy.update(sampled_minibatch, n_sequences)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
# Truncate update buffer if neccessary. Truncate more than we need to to avoid truncating
# a large buffer at each update.
if self.update_buffer.num_experiences > self.trainer_parameters["buffer_size"]:
self.update_buffer.truncate(
int(self.trainer_parameters["buffer_size"] * BUFFER_TRUNCATE_PERCENT)
)
for stat, stat_list in batch_update_stats.items():
self.stats_reporter.add_stat(stat, np.mean(stat_list))
bc_module = self.policy.bc_module
if bc_module:
update_stats = bc_module.update()
for stat, val in update_stats.items():
self.stats_reporter.add_stat(stat, val)
def update_reward_signals(self) -> None:
"""
Iterate through the reward signals and update them. Unlike in PPO,
do it separate from the policy so that it can be done at a different
interval.
This function should only be used to simulate
http://arxiv.org/abs/1809.02925 and similar papers, where the policy is updated
N times, then the reward signals are updated N times. Normally, the reward signal
and policy are updated in parallel.
"""
buffer = self.update_buffer
num_updates = self.reward_signal_updates_per_train
n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
batch_update_stats: Dict[str, list] = defaultdict(list)
for _ in range(num_updates):
# Get minibatches for reward signal update if needed
reward_signal_minibatches = {}
for name, signal in self.policy.reward_signals.items():
logger.debug("Updating {} at step {}".format(name, self.step))
# Some signals don't need a minibatch to be sampled - so we don't!
if signal.update_dict:
reward_signal_minibatches[name] = buffer.sample_mini_batch(
self.trainer_parameters["batch_size"],
sequence_length=self.policy.sequence_length,
)
update_stats = self.policy.update_reward_signals(
reward_signal_minibatches, n_sequences
)
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
for stat, stat_list in batch_update_stats.items():
self.stats_reporter.add_stat(stat, np.mean(stat_list))
def add_policy(self, name_behavior_id: str, policy: TFPolicy) -> None:
"""
Adds policy to trainer.
:param brain_parameters: specifications for policy construction
"""
if self.policy:
logger.warning(
"add_policy has been called twice. {} is not a multi-agent trainer".format(
self.__class__.__name__
)
)
self.policy = policy
def get_policy(self, name_behavior_id: str) -> TFPolicy:
"""
Gets policy from trainer associated with name_behavior_id
:param name_behavior_id: full identifier of policy
"""
return self.policy
|
rules = {
'A': {
1: 'To enter the competition you MUST create an audio track',
2: 'Every person can enter the competition with one entry only. You may submit your',
3: 'Your audio track MUST be downloadable without access control and the audio file',
4: 'The length of your submitted audio file MUST be no less than one',
5: 'Your audio track MUST be created specifically for this competition, not released',
6: 'You MUST license your audio track under a',
7: 'Your track SHOULD be a musical piece in the broadest sense',
8: 'Your track SHOULD be in the style of a well-known artist or song',
9: 'Your track MUST incorporate at least one recording of a physical sound source',
10: 'You MAY only use Open Source software',
11: 'The following exceptions to rule 10. are granted',
12: 'When submitting your entry, you MUST list all plug-ins you used for creating',
13: 'To be eligible for winning, you MUST vote on the other submissions into the',
14: 'You SHOULD make the patches you created for the ',
},
'B': {
1: 'Software regarded as Open Source MUST be released under and conform to ',
2: 'This means but is not limited to that the source code for the software MUST be',
3: 'The software SHOULD be packaged for a current version of a major Linux',
},
'C': {
1: 'The voting period starts immediately after the final submission date for ',
2: 'Voting is open to every person, contestants MUST vote to be able to win.',
3: 'Everybody has one vote, which consist of fifteen',
4: 'The vote may be changed (but not retracted completely) as often as desired during',
5: 'Voters must go to vote and provide a ',
},
'D': {
1: 'Only those contestants can win, which are not disqualified by the competition',
2: 'The contestant who gets the most points wins. In case of a tie, the entry which has',
3: 'Second and third place are awarded according to the same rules, where the loser of a',
4: 'The final decision in case of disputes lies with the competition organizer.',
}
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
"""
Helper functions for Alexa Media Player.
For more details about this platform, please refer to the documentation at
https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers-needed/58639
"""
import logging
from typing import Any, Callable, List, Text
from alexapy import AlexapyLoginError, hide_email
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_component import EntityComponent
from . import DATA_ALEXAMEDIA
_LOGGER = logging.getLogger(__name__)
async def add_devices(
account: Text,
devices: List[EntityComponent],
add_devices_callback: Callable,
include_filter: List[Text] = None,
exclude_filter: List[Text] = None,
) -> bool:
"""Add devices using add_devices_callback."""
include_filter = [] or include_filter
exclude_filter = [] or exclude_filter
new_devices = []
for device in devices:
if (
include_filter
and device.name not in include_filter
or exclude_filter
and device.name in exclude_filter
):
_LOGGER.debug("%s: Excluding device: %s", account, device)
continue
new_devices.append(device)
devices = new_devices
if devices:
_LOGGER.debug("%s: Adding %s", account, devices)
try:
add_devices_callback(devices, False)
return True
except HomeAssistantError as exception_:
message = exception_.message # type: str
if message.startswith("Entity id already exists"):
_LOGGER.debug("%s: Device already added: %s", account, message)
else:
_LOGGER.debug(
"%s: Unable to add devices: %s : %s", account, devices, message
)
except BaseException as ex:
template = "An exception of type {0} occurred." " Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug("%s: Unable to add devices: %s", account, message)
else:
return True
return False
def retry_async(
limit: int = 5, delay: float = 1, catch_exceptions: bool = True
) -> Callable:
"""Wrap function with retry logic.
The function will retry until true or the limit is reached. It will delay
for the period of time specified exponentialy increasing the delay.
Parameters
----------
limit : int
The max number of retries.
delay : float
The delay in seconds between retries.
catch_exceptions : bool
Whether exceptions should be caught and treated as failures or thrown.
Returns
-------
def
Wrapped function.
"""
def wrap(func) -> Callable:
import functools
import asyncio
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> Any:
_LOGGER.debug(
"%s.%s: Trying with limit %s delay %s catch_exceptions %s",
func.__module__[func.__module__.find(".") + 1 :],
func.__name__,
limit,
delay,
catch_exceptions,
)
retries: int = 0
result: bool = False
next_try: int = 0
while not result and retries < limit:
if retries != 0:
next_try = delay * 2 ** retries
await asyncio.sleep(next_try)
retries += 1
try:
result = await func(*args, **kwargs)
except Exception as ex: # pylint: disable=broad-except
if not catch_exceptions:
raise
template = "An exception of type {0} occurred." " Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug(
"%s.%s: failure caught due to exception: %s",
func.__module__[func.__module__.find(".") + 1 :],
func.__name__,
message,
)
_LOGGER.debug(
"%s.%s: Try: %s/%s after waiting %s seconds result: %s",
func.__module__[func.__module__.find(".") + 1 :],
func.__name__,
retries,
limit,
next_try,
result,
)
return result
return wrapper
return wrap
def _catch_login_errors(func) -> Callable:
"""Detect AlexapyLoginError and attempt relogin."""
import functools
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> Any:
try:
result = await func(*args, **kwargs)
except AlexapyLoginError as ex: # pylint: disable=broad-except
template = "An exception of type {0} occurred." " Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
_LOGGER.debug(
"%s.%s: detected bad login: %s",
func.__module__[func.__module__.find(".") + 1 :],
func.__name__,
message,
)
instance = args[0]
if hasattr(instance, "_login"):
login = instance._login
email = login.email
hass = instance.hass if instance.hass else None
if hass and (
"configurator"
not in (hass.data[DATA_ALEXAMEDIA]["accounts"][email])
or not (
hass.data[DATA_ALEXAMEDIA]["accounts"][email]["configurator"]
)
):
config_entry = hass.data[DATA_ALEXAMEDIA]["accounts"][email][
"config_entry"
]
setup_alexa = hass.data[DATA_ALEXAMEDIA]["accounts"][email][
"setup_alexa"
]
test_login_status = hass.data[DATA_ALEXAMEDIA]["accounts"][email][
"test_login_status"
]
_LOGGER.debug(
"%s: Alexa API disconnected; attempting to relogin",
hide_email(email),
)
if login.status:
await login.reset()
await login.login()
await test_login_status(hass, config_entry, login, setup_alexa)
return None
return result
return wrapper
def _existing_serials(hass, login_obj) -> List:
email: Text = login_obj.email
existing_serials = (
list(
hass.data[DATA_ALEXAMEDIA]["accounts"][email]["entities"][
"media_player"
].keys()
)
if "entities" in (hass.data[DATA_ALEXAMEDIA]["accounts"][email])
else []
)
for serial in existing_serials:
device = hass.data[DATA_ALEXAMEDIA]["accounts"][email]["devices"][
"media_player"
][serial]
if "appDeviceList" in device and device["appDeviceList"]:
apps = list(
map(
lambda x: x["serialNumber"] if "serialNumber" in x else None,
device["appDeviceList"],
)
)
# _LOGGER.debug("Combining %s with %s",
# existing_serials, apps)
existing_serials = existing_serials + apps
return existing_serials
|
class SpotNotFound(Exception):
"""
Exception raised when spot cannot be found
"""
pass
class SpotDataOverflow(Exception):
"""
Exception raised when spot data overflows
"""
pass
|
import argparse
from restli.scaffolders import ProjectScaffolder
from restli.generators import PegasusGenerator, ResourceGenerator
def create_parser():
parser = argparse.ArgumentParser(description="A command line tool for restli projects.")
parser.add_argument('-s', '--scaffold', help="THe name of your restli project")
parser.add_argument('-g', '--generate', help="The name of the pegasus / resource file")
parser.add_argument('-t', '--type', help="Type of the restli pegasus schema", default="record")
parser.add_argument('-f', '--fields', help="The fields included in your pegasus schema", default="id:long")
parser.add_argument('-d', '--doc', help="The doc for the pegasus schema")
parser.add_argument('-ns', '--namespace', help="The namespace for the pegasus / resource file")
parser.add_argument('-m', '--methods', help="The CRUD methods to implement for your resource", default="get update create delete")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if args.scaffold:
scaffolder = ProjectScaffolder(args)
scaffolder.scaffold()
if args.generate:
pegasus_generator = PegasusGenerator(args)
pegasus_generator.generate()
resource_generator = ResourceGenerator(args)
resource_generator.generate()
if __name__ == "__main__":
main()
|
# Source : https://leetcode.com/problems/longest-uncommon-subsequence-i/#/description
# Author : Han Zichi
# Date : 2017-04-23
class Solution(object):
def findLUSlength(self, a, b):
"""
:type a: str
:type b: str
:rtype: int
"""
if a == b:
return -1
if a.find(b) != -1:
return len(a)
if b.find(a) != -1:
return len(b)
return max(len(a), len(b)) |
"""Evaluate the accuracy of the retriever module."""
import unicodedata
import regex as re
import spacy
from typing import List
from sacremoses import MosesDetokenizer
from utils.text_process_tool import spacy_tokenize, normalize
md = MosesDetokenizer(lang='en')
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
def has_answer(answers, retrieved_text, match='string', tokenized: bool = False):
"""Check if retrieved_text contains an answer string.
If `match` is string, token matching is done between the text and answer.
If `match` is regex, we search the whole text with the regex.
"""
if not isinstance(answers, list):
answers = [answers]
if match == 'string':
if tokenized:
text = md.detokenize(retrieved_text)
t_text = retrieved_text
else:
text = retrieved_text
t_text = spacy_tokenize(retrieved_text, uncase=True)
for single_answer in answers:
single_answer = spacy_tokenize(single_answer, uncase=True)
for i in range(0, len(t_text) - len(single_answer) + 1):
if single_answer == t_text[i: i + len(single_answer)]:
return True
for single_answer in answers: # If raw covered.
if single_answer in text:
return True
elif match == 'regex':
if tokenized:
text = md.detokenize(retrieved_text)
else:
text = retrieved_text
# Answer is a regex
single_answer = normalize(answers[0])
if regex_match(text, single_answer):
return True
return False
def utest_normal():
paragraph = "I'm name is eason"
answer = "Eason"
print(has_answer(answer, paragraph, match='string', tokenized=False))
def utest_regex():
# {"question": "How deep is Crater Lake?",
# "answer": ["1\\s?,\\s?932 feet|1,?949\\s*f(ee|oo)?t|594\\s*m|593 m|593\\.0|594\\.0552"]}
# paragraph = "When is Fashion week in NYC?"
# paragraph = "1 , 932 feet"
# paragraph = "120 km/h"
paragraph = ['3', ',', '390', 'km']
# answer = "Sept?(ember)?|Feb(ruary)?"
# answer = "1\\s?,\\s?932 feet|1,?949\\s*f(ee|oo)?t|594\\s*m|593 m|593\\.0|594\\.0552"
# answer = "120\\s*km/?h|75\\s*mph"
answer = "diameter.*(4,?21[0-9]\\s*miles|6[78][0-9][0-9]\\s*(km|kilometers))|radius.*(2,?106\\s*miles|3,?390\\s*(km|kilometers))|3390km|3390 km|3\\,389\\.5 km"
print(has_answer(answer, paragraph, match='regex', tokenized=True))
if __name__ == '__main__':
utest_regex() |
from app.common.library import cognito
from app.common.models import User
def reset_password_confirm(username, confirmation_code, new_password):
client = cognito.create_client()
# pylint:disable=unused-variable
resp, msg = cognito.idp_confirm_forgot_password(
client, username, confirmation_code, new_password
)
if msg is not None:
return {"message": msg, "error": True, "success": False, "data": None}
user = User.objects.get(username=username)
user.is_active = True
user.set_password(new_password)
user.save()
return {
"message": "success",
"error": False,
"success": True,
"data": None,
}
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self, strategy1=None, strategy2=None, strategy3=None):
super().__init__()
self.gatherv2 = P.GatherV2().set_strategy(strategy1)
self.gatherv2.add_prim_attr("manual_split", ((1, 0), (7, 1)))
self.mul = P.Mul().set_strategy(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
self.param = Parameter(initializer("ones", (8, 64), ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (2, 4, 64), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (256, 16), ms.float32), name="matmul_weight")
def construct(self, x, b):
out = self.gatherv2(self.param, x, 0)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (2, 256))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([2, 4]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_neg_data_parallel():
context.set_context(save_graphs=True)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
|
list1 = [2, 8, "Aditya", 9, 78 ,55 ,3 ,4 , 1, "Ani"]
for x in list1:
if str(x).isnumeric() and x > 6:
print(x)
|
#!/usr/bin/env python
from os import path
from setuptools import setup, find_packages
BASE_DIR = path.abspath(path.dirname(__file__))
def read(f):
with open(path.join(BASE_DIR, f)) as fh:
return fh.read()
def get_version():
version = read('VERSION').strip()
if not version:
raise RuntimeError('Cannot find version information')
return version
install_requires = []
setup(
name='secure',
version=get_version(),
description='Provides standard interface for hashing, encrypting, decrypting and verifying user input',
long_description=read('README.md'),
author='Integralist',
url='https://github.com/integralist/Python-Encryption',
packages=find_packages(),
install_requires=install_requires,
keywords='integralist hash hashing encryption decryption library scrypt'
)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import mimetypes
import os
import re
import time
import zipfile
from collections import OrderedDict
import html5lib
from cheroot import wsgi
from django.conf import settings
from django.core.cache import cache
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseNotFound
from django.http import HttpResponseNotModified
from django.http import HttpResponsePermanentRedirect
from django.http.response import FileResponse
from django.http.response import StreamingHttpResponse
from django.template import Context
from django.template.engine import Engine
from django.utils.cache import patch_response_headers
from django.utils.encoding import force_str
from django.utils.http import http_date
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.utils.paths import get_content_storage_file_path
from kolibri.core.content.utils.paths import get_hashi_base_path
from kolibri.core.content.utils.paths import get_hashi_html_filename
from kolibri.core.content.utils.paths import get_hashi_js_filename
from kolibri.core.content.utils.paths import get_hashi_path
from kolibri.core.content.utils.paths import get_zip_content_base_path
logger = logging.getLogger(__name__)
def add_security_headers(request, response):
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
requested_headers = request.META.get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS", "")
if requested_headers:
response["Access-Control-Allow-Headers"] = requested_headers
# restrict CSP to only allow resources to be loaded from self, to prevent info leakage
# (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage
response[
"Content-Security-Policy"
] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:"
return response
def django_response_to_wsgi(response, environ, start_response):
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str("Set-Cookie"), str(c.output(header=""))))
start_response(force_str(status), response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
response = environ["wsgi.file_wrapper"](response.file_to_stream)
return response
template_engine = Engine(
dirs=[os.path.join(os.path.dirname(__file__), "./templates/content")],
libraries={"staticfiles": "django.contrib.staticfiles.templatetags.staticfiles"},
)
h5p_template = template_engine.get_template("h5p.html")
hashi_template = template_engine.get_template("hashi.html")
allowed_methods = set(["GET", "OPTIONS"])
def _hashi_response_from_request(request):
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
filename = request.path_info.lstrip("/")
if filename.split(".")[-1] != "html":
return HttpResponseNotFound()
if filename != get_hashi_html_filename():
return HttpResponsePermanentRedirect(get_hashi_path())
if request.method == "OPTIONS":
return HttpResponse()
developer_mode = getattr(settings, "DEVELOPER_MODE", False)
# if client has a cached version, use that we can safely assume nothing has changed
# as we provide a unique path per compiled hashi JS file.
if request.META.get("HTTP_IF_MODIFIED_SINCE") and not developer_mode:
return HttpResponseNotModified()
CACHE_KEY = "HASHI_VIEW_RESPONSE_{}".format(get_hashi_html_filename())
cached_response = cache.get(CACHE_KEY)
if cached_response is not None and not developer_mode:
return cached_response
content = hashi_template.render(
Context(
{
"hashi_file_path": "content/{filename}".format(
filename=get_hashi_js_filename()
)
}
)
)
response = HttpResponse(content, content_type="text/html")
response["Content-Length"] = len(response.content)
response["Last-Modified"] = http_date(time.time())
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
cache.set(CACHE_KEY, response, YEAR_IN_SECONDS)
return response
def get_hashi_view_response(environ):
request = WSGIRequest(environ)
response = _hashi_response_from_request(request)
add_security_headers(request, response)
return response
def hashi_view(environ, start_response):
response = get_hashi_view_response(environ)
return django_response_to_wsgi(response, environ, start_response)
def load_json_from_zipfile(zf, filepath):
with zf.open(filepath, "r") as f:
return json.load(f)
def recursive_h5p_dependencies(zf, data, prefix=""):
jsfiles = OrderedDict()
cssfiles = OrderedDict()
# load the dependencies, recursively, to extract their JS and CSS paths to include
for dep in data.get("preloadedDependencies", []):
packagepath = "{machineName}-{majorVersion}.{minorVersion}/".format(**dep)
librarypath = packagepath + "library.json"
content = load_json_from_zipfile(zf, librarypath)
newjs, newcss = recursive_h5p_dependencies(zf, content, packagepath)
cssfiles.update(newcss)
jsfiles.update(newjs)
# load the JS required for the current package
for js in data.get("preloadedJs", []):
path = prefix + js["path"]
jsfiles[path] = True
# load the CSS required for the current package
for css in data.get("preloadedCss", []):
path = prefix + css["path"]
cssfiles[path] = True
return jsfiles, cssfiles
INITIALIZE_HASHI_FROM_IFRAME = "if (window.parent && window.parent.hashi) {try {window.parent.hashi.initializeIframe(window);} catch (e) {}}"
def parse_html(content):
try:
document = html5lib.parse(content, namespaceHTMLElements=False)
if not document:
# Could not parse
return content
# Because html5lib parses like a browser, it will
# always create head and body tags if they are missing.
head = document.find("head")
# Use the makeelement method of the head tag here to ensure that we use the same
# Element class for both. Depending on the system and python version we are on,
# we may be using the C implementation or the pure python and a mismatch will cause an error.
script_tag = head.makeelement("script", {"type": "text/javascript"})
script_tag.text = INITIALIZE_HASHI_FROM_IFRAME
head.insert(0, script_tag)
# Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original
# content for the doctype and, if found, prepend it to the content serialized by html5lib
doctype = None
try:
# Now parse the content as a dom tree instead, so that we capture
# any doctype node as a dom node that we can read.
tree_builder_dom = html5lib.treebuilders.getTreeBuilder("dom")
parser_dom = html5lib.HTMLParser(
tree_builder_dom, namespaceHTMLElements=False
)
tree = parser_dom.parse(content)
# By HTML Spec if doctype is included, it must be the first thing
# in the document, so it has to be the first child node of the document
doctype_node = tree.childNodes[0]
# Check that this node is in fact a doctype node
if doctype_node.nodeType == doctype_node.DOCUMENT_TYPE_NODE:
# render to a string by calling the toxml method
# toxml uses single quotes by default, replace with ""
doctype = doctype_node.toxml().replace("'", '"')
except Exception as e:
logger.warn("Error in HTML5 parsing to determine doctype {}".format(e))
html = html5lib.serialize(
document,
quote_attr_values="always",
omit_optional_tags=False,
minimize_boolean_attributes=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
)
if doctype:
html = doctype + html
return html
except html5lib.html5parser.ParseError:
return content
def get_h5p(zf):
file_size = 0
# Get the h5p bootloader, and then run it through our hashi templating code.
# return the H5P bootloader code
try:
h5pdata = load_json_from_zipfile(zf, "h5p.json")
contentdata = load_json_from_zipfile(zf, "content/content.json")
except KeyError:
return HttpResponseNotFound("No valid h5p file was found at this location")
jsfiles, cssfiles = recursive_h5p_dependencies(zf, h5pdata)
jsfiles = jsfiles.keys()
cssfiles = cssfiles.keys()
path_includes_version = (
"true" if "-" in [name for name in zf.namelist() if "/" in name][0] else "false"
)
main_library_data = [
lib
for lib in h5pdata["preloadedDependencies"]
if lib["machineName"] == h5pdata["mainLibrary"]
][0]
bootstrap_content = h5p_template.render(
Context(
{
"jsfiles": jsfiles,
"cssfiles": cssfiles,
"content": json.dumps(
json.dumps(contentdata, separators=(",", ":"), ensure_ascii=False)
),
"library": "{machineName} {majorVersion}.{minorVersion}".format(
**main_library_data
),
"path_includes_version": path_includes_version,
}
),
)
content = parse_html(bootstrap_content)
content_type = "text/html"
response = HttpResponse(content, content_type=content_type)
file_size = len(response.content)
if file_size:
response["Content-Length"] = file_size
return response
def get_embedded_file(zipped_path, zipped_filename, embedded_filepath):
with zipfile.ZipFile(zipped_path) as zf:
# handle H5P files
if zipped_path.endswith("h5p") and not embedded_filepath:
return get_h5p(zf)
# if no path, or a directory, is being referenced, look for an index.html file
if not embedded_filepath or embedded_filepath.endswith("/"):
embedded_filepath += "index.html"
# get the details about the embedded file, and ensure it exists
try:
info = zf.getinfo(embedded_filepath)
except KeyError:
return HttpResponseNotFound(
'"{}" does not exist inside "{}"'.format(
embedded_filepath, zipped_filename
)
)
# file size
file_size = 0
# try to guess the MIME type of the embedded file being referenced
content_type = (
mimetypes.guess_type(embedded_filepath)[0] or "application/octet-stream"
)
if zipped_filename.endswith("zip") and (
embedded_filepath.endswith("htm") or embedded_filepath.endswith("html")
):
content = zf.open(info).read()
html = parse_html(content)
response = HttpResponse(html, content_type=content_type)
file_size = len(response.content)
else:
# generate a streaming response object, pulling data from within the zip file
response = FileResponse(zf.open(info), content_type=content_type)
file_size = info.file_size
# set the content-length header to the size of the embedded file
if file_size:
response["Content-Length"] = file_size
return response
path_regex = re.compile("/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)")
def get_zipped_file_path(zipped_filename):
# calculate the local file path to the zip file
zipped_path = get_content_storage_file_path(zipped_filename)
# if the zipfile does not exist on disk, return a 404
if not os.path.exists(zipped_path):
raise InvalidStorageFilenameError()
return zipped_path
YEAR_IN_SECONDS = 60 * 60 * 24 * 365
def _zip_content_from_request(request):
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
match = path_regex.match(request.path_info)
if match is None:
return HttpResponseNotFound("Path not found")
if request.method == "OPTIONS":
return HttpResponse()
zipped_filename, embedded_filepath = match.groups()
try:
zipped_path = get_zipped_file_path(zipped_filename)
except InvalidStorageFilenameError:
return HttpResponseNotFound(
'"%(filename)s" is not a valid zip file' % {"filename": zipped_filename}
)
# Sometimes due to URL concatenation, we get URLs with double-slashes in them, like //path/to/file.html.
# the zipped_filename and embedded_filepath are defined by the regex capturing groups in the URL defined
# in urls.py in the same folder as this file:
# r"^zipcontent/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)"
# If the embedded_filepath contains a leading slash because of an input URL like:
# /zipcontent/filename.zip//file.html
# then the embedded_filepath will have a value of "/file.html"
# we detect this leading slash in embedded_filepath and remove it.
if embedded_filepath.startswith("/"):
embedded_filepath = embedded_filepath[1:]
# Any double-slashes later in the URL will be present as double-slashes, such as:
# /zipcontent/filename.zip/path//file.html
# giving an embedded_filepath value of "path//file.html"
# Normalize the path by converting double-slashes occurring later in the path to a single slash.
# This would change our example embedded_filepath to "path/file.html" which will resolve properly.
embedded_filepath = embedded_filepath.replace("//", "/")
# if client has a cached version, use that (we can safely assume nothing has changed, due to MD5)
if request.META.get("HTTP_IF_MODIFIED_SINCE"):
return HttpResponseNotModified()
CACHE_KEY = "ZIPCONTENT_VIEW_RESPONSE_{}/{}".format(
zipped_filename, embedded_filepath
)
cached_response = cache.get(CACHE_KEY)
if cached_response is not None:
return cached_response
response = get_embedded_file(zipped_path, zipped_filename, embedded_filepath)
# ensure the browser knows not to try byte-range requests, as we don't support them here
response["Accept-Ranges"] = "none"
response["Last-Modified"] = http_date(time.time())
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
if not isinstance(response, StreamingHttpResponse):
cache.set(CACHE_KEY, response, YEAR_IN_SECONDS)
return response
def generate_zip_content_response(environ):
request = WSGIRequest(environ)
response = _zip_content_from_request(request)
add_security_headers(request, response)
return response
def zip_content_view(environ, start_response):
"""
Handles GET requests and serves a static file from within the zip file.
"""
response = generate_zip_content_response(environ)
return django_response_to_wsgi(response, environ, start_response)
def get_application():
path_map = {
get_hashi_base_path(): hashi_view,
get_zip_content_base_path(): zip_content_view,
}
return wsgi.PathInfoDispatcher(path_map)
|
"""
Contains protocol/step objects for gate operations.
.. currentmodule:: quanguru.classes.QGates
.. autosummary::
SpinRotation
xGate
.. |c| unicode:: U+2705
.. |x| unicode:: U+274C
.. |w| unicode:: U+2000
======================= ================== ============== ================ ===============
**Function Name** **Docstrings** **Examples** **Unit Tests** **Tutorials**
======================= ================== ============== ================ ===============
`SpinRotation` |w| |w| |w| |x| |w| |w| |x| |w| |w| |x| |w| |w| |x|
`xGate` |w| |w| |w| |x| |w| |w| |x| |w| |w| |x| |w| |w| |x|
======================= ================== ============== ================ ===============
"""
from .QPro import Gate
from .baseClasses import setAttr
from ..QuantumToolbox import evolution
from ..QuantumToolbox import operators #pylint: disable=relative-beyond-top-level
from ..QuantumToolbox import spinRotations #pylint: disable=relative-beyond-top-level
class SpinRotation(Gate): # pylint: disable=too-many-ancestors
label = 'SpinRotation'
#: (**class attribute**) number of instances created internally by the library
_internalInstances: int = 0
#: (**class attribute**) number of instances created explicitly by the user
_externalInstances: int = 0
#: (**class attribute**) number of total instances = _internalInstances + _externalInstances
_instances: int = 0
__slots__ = ['__angle', '__rotationAxis', 'phase', '_rotationOp']
def __init__(self, **kwargs):
super().__init__()
self.__angle = None
self.__rotationAxis = None
self._rotationOp = None
self.phase = 1
#self._createUnitary = self._rotMat
self._named__setKwargs(**kwargs) # pylint: disable=no-member
@property
def angle(self):
return self._SpinRotation__angle
@angle.setter
def angle(self, val):
setAttr(self, '_SpinRotation__angle', val)
@property
def rotationAxis(self):
return self._SpinRotation__rotationAxis # pylint: disable=no-member
@rotationAxis.setter
def rotationAxis(self, axStr):
setAttr(self, '_SpinRotation__rotationAxis', axStr)
if axStr.lower() == 'x':
self._rotationOp = operators.Jx
elif axStr.lower() == 'y':
self._rotationOp = operators.Jy
elif axStr.lower() == 'z':
self._rotationOp = operators.Jz
else:
raise ValueError('unknown axis')
def _rotMat(self, collapseOps = None, decayRates = None, openSys=False): #pylint:disable=unused-argument
if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member
sys = list(self.subSys.values())
rotOp = self._rotationOp
flipOp = operators.compositeOp(rotOp(sys[0].dimension, isDim=True), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member,line-too-long # noqa: E501
flipUn = evolution.Unitary(self.phase*self.angle*flipOp)
for i in range(len(sys)-1):
flipOpN = operators.compositeOp(rotOp(sys[i+1].dimension, isDim=True),
sys[i+1]._dimsBefore, sys[i+1]._dimsAfter)
flipUn = evolution.Unitary(self.phase*self.angle*flipOpN) @ flipUn
self._paramBoundBase__matrix = evolution._prepostSO(flipUn) if (openSys or isinstance(collapseOps, list) or self._isOpen) else flipUn # pylint: disable=assigning-non-slot,line-too-long,protected-access
self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot
return self._paramBoundBase__matrix # pylint: disable=no-member
class xGate(SpinRotation): # pylint: disable=too-many-ancestors
label = 'xGate'
#: (**class attribute**) number of instances created internally by the library
_internalInstances: int = 0
#: (**class attribute**) number of instances created explicitly by the user
_externalInstances: int = 0
#: (**class attribute**) number of total instances = _internalInstances + _externalInstances
_instances: int = 0
__slots__ = []
def __init__(self, **kwargs):
super().__init__()
self.rotationAxis = 'x'
#self._createUnitary = self._gateImplements
self._named__setKwargs(**kwargs) # pylint: disable=no-member
def instantFlip(self, openSys=False):
if ((self._paramBoundBase__matrix is None) or (self._paramBoundBase__paramUpdated is True)): # pylint: disable=no-member
sys = list(self.subSys.values())
if self.rotationAxis.lower() == 'x':
rotOp = spinRotations.xRotation
elif self.rotationAxis.lower() == 'y':
rotOp = spinRotations.yRotation
elif self.rotationAxis.lower() == 'z':
rotOp = spinRotations.zRotation
flipOp = operators.compositeOp(rotOp(self.angle), sys[0]._dimsBefore, sys[0]._dimsAfter) # pylint: disable=no-member
for i in range(len(sys)-1):
flipOp = operators.compositeOp(rotOp(self.angle), sys[i+1]._dimsBefore, sys[i+1]._dimsAfter) @ flipOp
self._paramBoundBase__matrix = evolution._prepostSO(flipOp) if openSys else flipOp # pylint: disable=assigning-non-slot,protected-access
self._paramBoundBase__paramUpdated = False # pylint: disable=assigning-non-slot
return self._paramBoundBase__matrix # pylint: disable=no-member
def _gateImplements(self, collapseOps = None, decayRates = None): #pylint:disable=unused-argument
if self.implementation is None:
unitary = self._rotMat(openSys = isinstance(collapseOps, list) or self._isOpen)
elif self.implementation.lower() in ('instant', 'flip'): # pylint: disable=no-member
unitary = self.instantFlip(openSys = isinstance(collapseOps, list) or self._isOpen)
return unitary
SpinRotation._createUnitary = SpinRotation._rotMat # pylint: disable=protected-access
xGate._createUnitary = xGate._gateImplements
|
from .collection import create_collection_from_xml, add_collection_items_from_xml
from .guild import create_guild_from_xml, add_guild_members_from_xml
from .hotitems import create_hot_items_from_xml, add_hot_items_from_xml
from .plays import create_plays_from_xml, add_plays_from_xml
from .game import create_game_from_xml, add_game_comments_from_xml
__all__ = [create_collection_from_xml, create_guild_from_xml, create_hot_items_from_xml, create_plays_from_xml,
create_game_from_xml,
add_collection_items_from_xml, add_guild_members_from_xml, add_hot_items_from_xml, add_plays_from_xml,
add_game_comments_from_xml] |
import numpy as np
from GitMarco.statistics.metrics import standard_error
def test_standard_error():
data = np.random.rand(20)
error = standard_error(data)
assert isinstance(error, float), 'Error in standard error type'
|
import time
from datetime import datetime
import socket
import os
import glob
from tqdm import tqdm
import importlib
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from datasets import UCF101Dataset
from models import S3DG
from utils.process import load_checkpoint_model
from utils.utils import AverageMeter, accuracy
from utils.settings import from_file
# CUDA_DEVICES = [0, 1, 2, 3, 4, 5, 6, 7]
# os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3, 4, 5, 6, 7"
# CUDA_DEVICES = [0]
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.backends.cudnn.benchmark = True
# Use GPU if available else revert to CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("\nGPU being used:", torch.cuda.is_available())
def parse_args():
parser = argparse.ArgumentParser(description='Test model on action recognition')
parser.add_argument('config', help='config file path, and its format is config_file.config')
parser.add_argument('--resume_from', type=str, help='the checkpoint file to init model')
parser.add_argument('--gpus', type=int, default=8, help='gpu number')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = from_file(args.config)
if args.gpus is not None:
cfg.gpus = args.gpus
if args.resume_from is not None:
cfg.resume_from = args.resume_from
else:
raise ValueError('Please specify the path to load checkpoint')
################ 1 DATA ###################
print('Testing model on {} dataset...'.format(cfg.data['dataset']))
batch_size = 1 * cfg.gpus # since a video contain 10 clips
test_dataset = UCF101Dataset(data_file=cfg.data['test_file'], img_tmpl=cfg.data['test_img_tmp'],
clip_len=cfg.data['test_clip_len'], size=cfg.data['size'], mode='test', shuffle=False)
test_dataloader= DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
################ 2 MODEL ##################
# init model from checkpoint
model = S3DG(num_class=cfg.model['num_class'])
load_checkpoint_model(model, checkpoint_path=cfg.resume_from)
if torch.cuda.device_count() > 1:
print('use %d gpus' % (torch.cuda.device_count()))
model = nn.DataParallel(model, device_ids=range(cfg.gpus))
else:
print('use 1 gpu')
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
model.to(device)
# ################### 3 CRITERION #########################
criterion = nn.CrossEntropyLoss().to(device) # standard crossentropy loss for classification
# criterion = nn.BCEWithLogitsLoss().to(device)
################## 4 BEGIN TEST #########################
avg_loss, avg_acc = test(test_dataloader, model, criterion)
def test(test_loader, model, criterion):
torch.set_grad_enabled(False)
model.eval()
total_loss = 0.0
correct = 0
total_num = 0
pbar = tqdm(test_loader)
for step, (inputs,labels) in enumerate(pbar):
inputs = inputs.cuda() # (bs, 10, C, T, H,W)
labels = labels.cuda() # (bs)
outputs = []
for clip in inputs:
clip = clip.cuda() # (10, C, T, H, W)
out = model(clip) # (10, 101)
out = torch.mean(out, dim=0) # (101,)
outputs.append(out)
outputs = torch.stack(outputs) # (bs, 101)
loss = criterion(outputs, labels)
# compute loss and acc
total_loss += loss.item()
pts = torch.argmax(outputs, dim=1)
correct += torch.sum(labels == pts).item()
total_num += inputs.size(0)
# print('correct: {}, {}, {}'.format(correct, targets, pts))
pbar.set_description('{}/{}: correct {}/{}'.format(step, len(test_loader,
correct, total_num)))
# print(str(step), len(test_loader))
# print(correct)
avg_loss = total_loss / len(test_loader)
# avg_loss = total_loss / (len(val_loader)+len(train_loader))
avg_acc = correct / len(test_loader.dataset)
# avg_acc = correct / (len(val_loader.dataset)+len(train_loader.dataset))
print('[TEST] loss: {:.3f}, acc: {:.3f}'.format(avg_loss, avg_acc))
return avg_loss, avg_acc
if __name__ == '__main__':
main()
|
from scipy.stats import bernoulli, randint
import numpy as np
class TreeCut:
def __init__(self, growth_param=5, replanting_cost=30, linear_wood_value=10,
maintenance_cost=3, max_height=40, proba_of_dying=.1,
sappling_height=1, gamma=1. / (1 + 0.05)):
self.growth_param = growth_param
self.replanting_cost = replanting_cost
self.linear_wood_value = linear_wood_value
self.maintenance_cost = maintenance_cost
self.max_height = max_height
self.proba_of_dying = proba_of_dying
self.sappling_height = sappling_height
self.gamma = gamma
self.states = range(self.max_height + 1)
self.number_of_actions = 2
self.death = 0
self.dead_index = 0
self.no_cut = 0
self.cut = 1
self.dynamics, self.reward = self.tree_MDP()
def tree_sim(self, cur_state, action):
if cur_state is self.death:
if action is self.cut:
next_state = self.sappling_height
reward = -self.replanting_cost
else:
next_state = self.death
reward = 0
else:
if action is self.cut:
next_state = self.sappling_height
reward = self.linear_wood_value * cur_state - self.replanting_cost
else:
tree_is_dying = bernoulli.rvs(self.proba_of_dying)
if tree_is_dying:
next_state = self.death
reward = -self.maintenance_cost
else:
next_state = randint.rvs(cur_state, self.max_height + 1)
reward = -self.maintenance_cost
return next_state, reward
def tree_MDP(self):
dynamics = np.zeros((self.max_height + 1, self.max_height + 1, self.number_of_actions))
rewards = np.zeros((self.max_height + 1, self.number_of_actions))
dynamics[:, self.sappling_height, self.cut] = np.array([1] * (self.max_height + 1))
dynamics[self.dead_index, self.dead_index, self.no_cut] = 1
dynamics[self.max_height, self.max_height, self.no_cut] = 1 - self.proba_of_dying
dynamics[1:, self.dead_index, self.no_cut] = self.proba_of_dying
for cur_state in range(1, self.max_height):
for next_state in range(cur_state, self.max_height + 1):
dynamics[cur_state, next_state, self.no_cut] = (1 - self.proba_of_dying) * 1. / (
self.max_height - cur_state + 1)
rewards[self.dead_index, :] = [0, -self.replanting_cost]
rewards[1:, self.no_cut] = [-self.maintenance_cost for k in range(self.max_height)]
rewards[1:, self.cut] = [self.linear_wood_value * cur_state - self.replanting_cost for cur_state in
range(1, self.max_height + 1)]
return dynamics, rewards
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 19:28
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import json
import yaml
def convert_probe_source(apps, schema_editor):
ProbeSource = apps.get_model("probes", "ProbeSource")
for ps in ProbeSource.objects.all():
# fix model
ps.model = ps.model.split(".")[-1]
# fix osquery model
ps.model = ps.model.replace("OSQ", "Osq")
# fix body
probe_d = yaml.load(ps.body)
# fix inventory filters
for inventory_filter in probe_d.get("filters", {}).get("inventory", []):
mf_mbu = inventory_filter.pop("business_units", None)
if mf_mbu:
if not isinstance(mf_mbu, list):
mf_mbu = [mf_mbu]
inventory_filter["meta_business_unit_ids"] = mf_mbu
mf_tag = inventory_filter.pop("tags", None)
if mf_tag:
if not isinstance(mf_tag, list):
mf_tag = [mf_tag]
inventory_filter["tag_ids"] = mf_tag
# fix payload filters
for payload_filter in probe_d.get("filters", {}).get("payload", []):
for k, v in list(payload_filter.items()):
if not isinstance(v, list):
payload_filter[k] = [v]
# fix metadata filters
for metadata_filter in probe_d.get("filters", {}).get("metadata", []):
mf_type = metadata_filter.pop("type", None)
if mf_type:
metadata_filter["event_types"] = mf_type
mf_tags = metadata_filter.pop("tags", None)
if mf_tags:
metadata_filter["event_tags"] = mf_tags
if ps.model == "OsqueryProbe":
probe_d["queries"] = []
for q in probe_d.pop("osquery", {}).pop("schedule", []):
for k, v in list(q.items()):
if (v == "") or (v is None):
del q[k]
probe_d["queries"].append(q)
elif ps.model == "OsqueryComplianceProbe":
oc = probe_d.pop("osquery_compliance", {})
fc = oc.pop("file_checksums", [])
if fc:
probe_d["file_checksums"] = fc
pf = oc.pop("preference_files", [])
if pf:
probe_d["preference_files"] = pf
elif ps.model == "MunkiInstallProbe":
ps.model = "BaseProbe"
elif ps.model == "OsqueryDistributedQueryProbe":
probe_d["distributed_query"] = probe_d.pop("osquery_distributed_query")
elif ps.model == "OsqueryFIMProbe":
probe_d["file_paths"] = probe_d.pop("osquery_fim", [])
elif ps.model == "SantaProbe":
probe_d["rules"] = probe_d.pop("santa", [])
# serialize body as json
ps.body = json.dumps(probe_d)
ps.save()
class Migration(migrations.Migration):
dependencies = [
('probes', '0008_auto_20161111_1906'),
]
operations = [
migrations.RunPython(convert_probe_source),
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
('name', models.TextField()),
('description', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='FeedImport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('OK', 'OK'),
('DOWNLOAD_ERROR', 'download error'),
('FEED_ERROR', 'feed error')], max_length=32)),
('new_probes', models.PositiveIntegerField(default=0)),
('updated_probes', models.PositiveIntegerField(default=0)),
('archived_probes', models.PositiveIntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('feed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='probes.Feed')),
],
),
migrations.CreateModel(
name='FeedProbe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.CharField(max_length=255)),
('key', models.CharField(max_length=255)),
('body', django.contrib.postgres.fields.jsonb.JSONField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('archived_at', models.DateTimeField(blank=True, null=True)),
('feed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='probes.Feed')),
],
),
migrations.RemoveField(
model_name='probesource',
name='apps',
),
migrations.AlterField(
model_name='probesource',
name='body',
field=django.contrib.postgres.fields.jsonb.JSONField(editable=False),
),
migrations.AlterUniqueTogether(
name='feedprobe',
unique_together=set([('feed', 'key')]),
),
]
|
from pony.orm import *
import datetime
from model import Group, Contact
from pymysql.converters import decoders, encoders, convert_mysql_timestamp
class ORMFixture:
def __init__(self, host, database, user, password):
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
self.db.bind(
"mysql",
host=host,
database=database,
user=user,
password=password,
conv=conv,
autocommit=True
)
self.db.generate_mapping()
db = Database()
class ORMGroup(db.Entity):
_table_ = "group_list"
id = PrimaryKey(int, column="group_id")
name = Optional(str, column="group_name")
header = Optional(str, column="group_header")
footer = Optional(str, column="group_footer")
contacts = Set(
lambda: ORMFixture.ORMContact,
table="address_in_groups",
column="id",
reverse="groups",
lazy=True
)
class ORMContact(db.Entity):
_table_ = "addressbook"
id = PrimaryKey(int, column="id")
firstname = Optional(str, column="firstname")
lastname = Optional(str, column="lastname")
company = Optional(str, column="company")
home_tel = Optional(str, column="home")
mobile_tel = Optional(str, column="mobile")
work_tel = Optional(str, column="work")
sec_tel = Optional(str, column="phone2")
email = Optional(str, column="email")
email2 = Optional(str, column="email2")
email3 = Optional(str, column="email3")
note = Optional(str, column="notes")
address = Optional(str, column="address")
homepage = Optional(str, column="homepage")
deprecated = Optional(datetime.datetime, column="deprecated")
groups = Set(
lambda: ORMFixture.ORMGroup,
table="address_in_groups",
column="group_id",
reverse="contacts",
lazy=True
)
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
@staticmethod
def convert_groups_to_model(groups):
def convert(group):
return Group(
id=str(group.id),
name=group.name,
footer=group.footer
)
return list(map(convert, groups))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None)
)
@staticmethod
def convert_contacts_to_model(contacts):
def convert(contact):
return Contact(
id=str(contact.id),
firstname=contact.firstname,
lastname=contact.lastname,
company=contact.company,
home_tel=contact.home_tel,
mobile_tel=contact.mobile_tel,
work_tel=contact.work_tel,
sec_tel=contact.sec_tel,
email=contact.email,
email2=contact.email2,
email3=contact.email3,
note=contact.note,
address=contact.address,
homepage=contact.homepage
)
return list(map(convert, contacts))
@db_session
def get_contact_group(self, group):
return list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
@db_session
def get_contacts_in_group(self, group):
orm_group = self.get_contact_group(group)
return self.convert_contacts_to_model(filter(lambda x: x.deprecated is None, orm_group.contacts))
@db_session
def get_contacts_not_in_group(self, group):
orm_group = self.get_contact_group(group)
contacts = select(
c for c in ORMFixture.ORMContact
if c.deprecated is None and orm_group not in c.groups
)
return self.convert_contacts_to_model(contacts)
def do_something(self):
l = self.get_contacts_in_group(Group(id="123"))
print(l)
for item in l:
print(item)
print(len(l))
|
import uvicorn
from datetime import datetime
from typing import List, Optional
from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from pydantic import BaseModel, EmailStr
app = FastAPI()
_version_ = '1.0.0'
class baseRequest(BaseModel):
num: Optional[int] = None
ioType:str
key:str
status: bool
class baseResponse(BaseModel):
detail:str
data:str
message:str
status: int
timestamp: datetime
response = {
'root':{'data':f'Raspberry Pi Web SDK {_version_} Look at /docs','detail':'','message':'Success','status':1},
'version':{'data':f'Raspberry Pi Web SDK {_version_}','detail':'','message':'Success','status':1},
'status':{'data':'','detail':'','message':'','status':0},
}
@app.get("/",response_model=baseResponse)
async def root():
return JSONResponse(response['root'])
@app.get("/version",response_model=baseResponse)
async def version():
return JSONResponse(response['version'])
@app.get('/status/{gpio_id}',response_model=baseResponse)
async def read_item(gpio_id: int):
response_ = response['status']
response_['data']
response_['message']
response_['status']
return JSONResponse(response_)
#if __name__ == "__main__":
# uvicorn.run(app, host="0.0.0.0", port=80,debug=True) |
'''
Created on Aug 7, 2018
@author: kjnether
'''
import pytest
import FMEUtil.FMWParser
import os.path
@pytest.fixture()
def attributeRenamerFixture():
'''
returns a fmw file as a FMWParser
object that contains a attribute renamer
transformer:
fish_aquatic_invasive_spcs_sp_staging_gdb_idwprod1.fmw
fwa_waterbodies_20k_50k_staging_gdb_bcgw.fmw
'''
fileName = 'fish_aquatic_invasive_spcs_sp_staging_gdb_idwprod1.fmw'
filePath = os.path.join(os.path.dirname(__file__), '..', 'test_Data')
fileNameFullPath = os.path.join(filePath, fileName)
parsr = FMEUtil.FMWParser.FMWParser(fileNameFullPath)
yield parsr
@pytest.fixture()
def counterFixture():
'''
returns a fmw file as a FMWParser
object that contains a counter transformer:
'''
fileName = 'fn_land_use_sites_line_staging_gdb_bcgw.fmw'
filePath = os.path.join(os.path.dirname(__file__), '..', 'test_Data')
fileNameFullPath = os.path.join(filePath, fileName)
parsr = FMEUtil.FMWParser.FMWParser(fileNameFullPath)
yield parsr
@pytest.fixture()
def transParserFixture():
# returns a parser object useful for testing transfomers
# only one: fwa_waterbodies_20k_50k_staging_gdb_bcgw.fmw
fileName = 'fwa_waterbodies_20k_50k_staging_gdb_bcgw.fmw'
filePath = os.path.join(os.path.dirname(__file__), '..', 'test_Data')
fileNameFullPath = os.path.join(filePath, fileName)
parsr = FMEUtil.FMWParser.FMWParser(fileNameFullPath)
yield parsr
|
from nequip.data import AtomicDataDict
RMSE_LOSS_KEY = "rmse"
MAE_KEY = "mae"
LOSS_KEY = "noramlized_loss"
VALUE_KEY = "value"
CONTRIB = "contrib"
VALIDATION = "validation"
TRAIN = "training"
ABBREV = {
AtomicDataDict.TOTAL_ENERGY_KEY: "e",
AtomicDataDict.PER_ATOM_ENERGY_KEY: "Ei",
AtomicDataDict.FORCE_KEY: "f",
AtomicDataDict.NODE_FEATURES_KEY: "h",
LOSS_KEY: "loss",
VALIDATION: "val",
TRAIN: "train",
}
|
#from scamp import entryExit
import utilities
global itr
itr = 0
def sort_filters(a,b):
import string
list = ['-u','W-J-B','-g','W-J-V','-r','W-C-RC','-i','W-S-I+','W-C-IC','-z','W-S-Z+']
a_num = 0
b_num = 0
for i in range(len(list)):
print a, b, list[i]
if string.find(a,list[i])!= -1: a_num = i
if string.find(b,list[i])!= -1: b_num = i
print a_num, b_num
answer = a_num - b_num
print answer
return answer
def load_spectra():
import pickle
f = open('picklespectra','r')
m = pickle.Unpickler(f)
spectra = m.load()
return spectra
''' get SDSS zeropoint if exists '''
def get_sdss_zp(run,night,snpath):
import MySQLdb
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
zps={'JCAT':0}
OK = True
for filt in ['u','g','r','i','z']:
command = "SELECT SDSSZP from CALIB where SN='" + snpath + "' and FILT='" + filt + "' and NAME='reg' and RUN='" + run + "'"
print command
c.execute(command)
zp = c.fetchall()[0][0]
if str(zp) != 'None':
print zp
zps[filt] = float(zp)
else: OK = False
if OK:
return zps #['u'], zps['g'], zps['r'], zps['i'], zps['z']
else:
return None
def assign_zp(filt,pars,zps):
if filt in zps:
out = pars[zps[filt]]
else:
# raise Exception
out = 0
return out
def get_kit():
import pickle, os
f = open(os.environ['kpno'] + '/process_kpno/locuskit','r')
m = pickle.Unpickler(f)
locus = m.load()
return locus
#def get_locus():
# import pickle
# f = open('/Volumes/mosquitocoast/patrick/kpno/process_kpno/kpnolocus','r')
# m = pickle.Unpickler(f)
# locus = m.load()
# return locus
def get_locus():
import pickle
f = open('synthlocus','r')
m = pickle.Unpickler(f)
locus = m.load()
return locus
def locus():
import os, re
f = open('locus.txt','r').readlines()
id = -1
rows = {}
bands = {}
for i in range(len(f)):
l = f[i]
if l[0] != ' ':
rows[i] = l[:-1]
else:
id += 1
bands[rows[id]] = [float(x) for x in re.split('\s+',l[:-1])[1:]]
print bands.keys()
#pylab.scatter(bands['GSDSS_ZSDSS'],bands['RSDSS_ISDSS'])
#pylab.show()
return bands
#@entryExit
#def all(catalog_dir,cluster,magtype='APER1',location=None):
def all(subarudir,cluster,DETECT_FILTER,aptype,magtype,location=None):
print magtype
save_slr_flag = photocalibrate_cat_flag = '--spec mode=' + magtype.replace('1','').replace('APER','aper').replace('2','')
catalog_dir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + aptype + '/'
catalog_dir_iso = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + '_iso/'
import astropy.io.fits as pyfits, os, string, random
min_err = 0.02
#catalog_dir = '/'.join(catalog.split('/')[:-1])
catalog_notcal = catalog_dir + '/' + cluster + '.stars.cat'
catalog = catalog_dir + '/' + cluster + '.stars.calibrated.cat'
command = './photocalibrate_cat.py -i %(catalog_notcal)s -c %(cluster)s -t standard -o %(catalog)s %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'catalog_notcal':catalog_notcal, 'catalog':catalog, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
print command
os.system(command)
offset_list = catalog_dir + '/multiSAVEZP.offsets.list'
complete_offset_list = catalog_dir + '/multiCOMPLETE.offsets.list'
slr_high = catalog_dir + '/slr.offsets.list'
from glob import glob
startingzps = {}
if glob(slr_high):
f = open(slr_high,'r').readlines()
for l in f:
res = l.split(' ')
filt = res[1]
zp = float(res[2])
startingzps[filt.replace('10_2','').replace('10_1','').replace('10_3','')] = zp
offset_list_file = open(offset_list,'w')
complete_offset_list_file = open(complete_offset_list,'w')
print catalog_dir, offset_list
#zps_dict = {'full':{'SUBARU-10_2-1-W-J-B': 0.16128103741856098, 'SUBARU-10_2-1-W-C-RC': 0.0, 'SUBARU-10_2-1-W-S-Z+': 0.011793588122789772, 'MEGAPRIME-10_2-1-u': 0.060291451932493148, 'SUBARU-10_2-1-W-C-IC': 0.0012269407091880637, 'SUBARU-10_2-1-W-J-V': 0.013435398732369786}}
''' get catalog filters '''
import do_multiple_photoz
filterlist_all = do_multiple_photoz.get_filters(catalog,'OBJECTS')
print filterlist_all
filterdict, good_star_nums = do_multiple_photoz.figure_out_slr_chip(filterlist_all,catalog,'OBJECTS',magtype=magtype)
filterlist = filterdict.values()
print filterlist
print filterdict, good_star_nums
filterlist.sort()
print filterlist
reload(do_multiple_photoz)
import pylab
print catalog
table = pyfits.open(catalog)[1].data[:]
#mags = table.field('MAG_APER1-SUBARU-10_2-1-W-S-I+')
#max_mag = sorted(mags)[-1]
if False:
print len(table)
table = table[mags < max_mag -0.3]
print len(table)
''' saturated stars in A2219 image '''
if cluster == 'A2219':
table.field('MAG_APER1-SPECIAL-0-1-I')[table.field('Ypos') < 2000] = 99
table.field('MAG_APER1-SPECIAL-0-1-I')[table.field('MAG_APER1-SPECIAL-0-1-I') < 21] = 99
print catalog, 'catalog'
alpha = [table.field('ALPHA_J2000')[0]]
delta = [table.field('DELTA_J2000')[0]]
import utilities
gallong, gallat = utilities.convert_to_galactic(alpha, delta)
ebv = utilities.getDust(alpha,delta)
extinct = {}
for filt in filterlist:
extinct[filt] = utilities.getExtinction(filt) * ebv[0]
print extinct
print ebv, 'ebv', alpha, delta, gallong, gallat
#location = os.environ['sne'] + '/photoz/' + cluster + '/SLRplots/'
if location is None:
location = os.environ['sne'] + '/photoz/' + cluster + '/SLRplots/'
print 'deleting old plots'
os.system('rm ' + location + '/*')
os.system('mkdir -p ' + location)
print 'finished deleting old plots'
import pickle
f = open('maglocus_SYNTH','r')
m = pickle.Unpickler(f)
locus_mags = m.load()
#import pickle
#f = open('maglocus_SYNTH','r')
#m = pickle.Unpickler(f)
locus_pairs = get_locus() #m.load()
if True:
''' assign locus color to each instrument band '''
instrument_to_locus = {}
for filt in filterlist:
a_short = filt.replace('+','').replace('C','')[-1]
print filt, a_short
ok = True
if string.find(filt,'WHT') != -1:
a_short = 'WHT' + a_short.upper()
elif string.find(filt,'MEGAPRIME') != -1:
a_short = 'MP' + a_short.upper() + 'SUBARU'
elif string.find(filt,'SUBARU') != -1:
if string.find(filt,"W-S-") != -1:
a_short = 'WS' + a_short.upper() + 'SUBARU'
else:
a_short = a_short.upper() + 'JOHN'
print filt, not (string.find(filt,'B') != -1 and string.find(filt,'SUBARU') != -1), string.find(filt,'-2-')==-1 , not (string.find(filt,'MEGAPRIME') != -1 and filt[-1] == 'u') , not (string.find(filt,'WHT') != -1 and filt[-1] == 'U')
#if not (string.find(filt,'-B') != -1 and string.find(filt,'SUBARU') != -1) and string.find(filt,'-2-')==-1 and not (string.find(filt,'MEGAPRIME') != -1 and filt[-1] == 'u') and not (string.find(filt,'WHT') != -1 and filt[-1] == 'U'):
if True: #string.find(filt,'-2-')==-1 and not (string.find(filt,'MEGAPRIME') != -1 and filt[-1] == 'u') and not (string.find(filt,'WHT') != -1 and filt[-1] == 'U') and string.find(filt,'W-J-B') == -1:
instrument_to_locus[filt] = a_short
print instrument_to_locus
#instrument_to_locus = {'u':'U'+DET,'g':'G'+DET,'r':'R'+DET,'i':'I'+DET,'z':'Z'+DET,'JCAT':'JTMASS'}
''' figure out the filter to hold '''
list = ['SUBARU-10_1-1-W-C-RC','SUBARU-10_2-1-W-C-RC','MEGAPRIME-0-1-r','SUBARU-10_2-1-W-S-R+','SUBARU-9-4-W-C-RC','SUBARU-10_2-1-W-S-I+',]
for filt in list:
if filt in filterlist:
hold_all = filt
break
''' THROWING OUT ALL 10_*_2 chips '''
def f(x): return x!=hold_all and not (string.find(x,'-2-') != -1 and string.find(x,'10')!=-1) and not (string.find(x,'MEGAPRIME') != -1 and x[-1] == 'u') and not (string.find(x,'WHT') != -1 and string.find(x,'U') != -1) # and string.find(x,'W-C-IC') == -1 and string.find(x,'Z') == -1 #and string.find(x,'10_3') == -1
vary_list = filter(f, filterlist)
print vary_list, filterlist
#vary_list = ['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+','SUBARU-10_2-1-W-C-IC','SUBARU-10_3-1-W-C-IC','MEGAPRIME-10_2-1-i','MEGAPRIME-10_2-1-z']
print vary_list
moststarfilts, good_star_nums = do_multiple_photoz.figure_out_slr_chip(vary_list+[hold_all],catalog,'OBJECTS',magtype=magtype)
print moststarfilts
#vary_list = ['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+','SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']
print vary_list
hold_all
#while
''' designate which filter zeropoint to be held constant when matching bands '''
combos = [{'hold':hold_all,'vary':vary_list}]
zps_dict_all = {}
def update_zps(zps_dict_all,results):
if not combo['hold'] in zps_dict_all:
zps_dict_all[combo['hold']] = 0.
for key in combo['vary']:
zps_dict_all[key] = zps_dict_all[combo['hold']] + results['full'][key]
return zps_dict_all
print combos
for combo in combos:
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, bootstrap=False, startingzps=None, plotdir=location, pre_zps=None, gallat=gallat, extinct=extinct)
print results
zps_dict_all = update_zps(zps_dict_all,results)
''' finally fit all bands at once '''
#combo = {'hold':'JCAT','vary':['u','g','r','i','z']}
#results = fit(table, combo, instrument_to_locus, magtype, locus_mags, min_err, startingzps=zps_dict_all, bootstrap=True, plotdir=location, pre_zps=None,gallat=gallat)
#zps_dict_all = update_zps(zps_dict_all,results)
#print zps_dict_all
if False:
''' assign locus color to each instrument band '''
DET = 'SDSS'
magtype='APER1'
instrument_to_locus = {'SDSS_u':'U'+DET,'SDSS_g':'G'+DET,'SDSS_r':'R'+DET,'SDSS_i':'I'+DET,'SDSS_z':'Z'+DET,'JCAT':'JTMASS'}
''' designate which filter zeropoint to be held constant when matching bands '''
combos = [{'hold':'SDSS_z','vary':['SDSS_r','SDSS_i']},{'hold':'SDSS_r','vary':['SDSS_u','SDSS_g']}]
zps_dict_all = {}
def update_zps(zps_dict_all,results):
if not combo['hold'] in zps_dict_all:
zps_dict_all[combo['hold']] = 0.
for key in combo['vary']:
zps_dict_all[key] = zps_dict_all[combo['hold']] + results['full'][key]
return zps_dict_all
if True:
''' first fit combinations of three bands'''
for combo in combos:
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, bootstrap=False,plotdir=location, pre_zps=False)
print results
zps_dict_all = update_zps(zps_dict_all,results)
''' finally fit all bands at once '''
combo = {'hold':'SDSS_z','vary':['SDSS_u','SDSS_g','SDSS_r','SDSS_i']}
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, startingzps=zps_dict_all, bootstrap=True, plotdir=location, pre_zps=False, extinct=extinct)
zps_dict_all = update_zps(zps_dict_all,results)
print zps_dict_all
#zps_dict_all = {'SUBARU-10_2-1-W-J-B': 0.16128103741856098, 'SUBARU-10_2-1-W-C-RC': 0.0, 'SUBARU-10_2-1-W-S-Z+': 0.011793588122789772, 'MEGAPRIME-10_2-1-u': 0.060291451932493148, 'SUBARU-10_2-1-W-C-IC': 0.0012269407091880637, 'SUBARU-10_2-1-W-J-V': 0.013435398732369786}
#zps_dict_all['SUBARU-10_2-1-W-C-RC'] = -99
print zps_dict_all
#for key in zps_dict_all.keys():
print zps_dict_all.keys(),
print zps_dict_all
''' select chips w/ most stars '''
for key in moststarfilts:
print key
offset_list_file.write('DUMMY ' + moststarfilts[key] + ' ' + str(zps_dict_all[moststarfilts[key]]) + ' 0\n')
#offset_list_file.write('DUMMY ' + key + ' ' + str(-99) + ' 0\n')
offset_list_file.close()
''' record all ZPs and numbers of stars '''
for key in zps_dict_all.keys():
complete_offset_list_file.write('DUMMY ' + key + ' ' + str(zps_dict_all[key]) + ' ' + str(good_star_nums[key]) + '\n')
complete_offset_list_file.close()
if magtype == 'APER1': aptype='aper'
elif magtype == 'ISO': aptype='iso'
save_slr_flag = photocalibrate_cat_flag = '--spec mode=' + magtype
print 'running save_slr'
command = './save_slr.py -c %(cluster)s -i %(catalog)s -o %(offset_list)s %(save_slr_flag)s' % {'cluster':cluster, 'catalog':catalog, 'offset_list':offset_list, 'save_slr_flag':save_slr_flag}
print command
os.system(command)
if False:
slr_catalog_dir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + aptype + '/'
slr_catalog_dir_iso = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + '_iso/'
photocalibrate_cat_flag = '--spec mode=' + magtype
all_phot_cat = slr_catalog_dir + '/' + cluster + '.unstacked.cat'
all_phot_cat_iso = slr_catalog_dir_iso + '/' + cluster + '.unstacked.cat'
slr_out = slr_catalog_dir + '/' + cluster + '.slr.cat'
slr_out_iso = slr_catalog_dir_iso + '/' + cluster + '.slr.cat'
print 'running photocalibrate_cat'
command = './photocalibrate_cat.py -i %(all_phot_cat_iso)s -c %(cluster)s -o %(slr_out_iso)s -t slr %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'all_phot_cat_iso':all_phot_cat, 'slr_out_iso':slr_out_iso, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
os.system(command)
command = './photocalibrate_cat.py -i %(all_phot_cat)s -c %(cluster)s -o %(slr_out)s -t slr %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'all_phot_cat':all_phot_cat, 'slr_out':slr_out, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
os.system(command)
print 'finished'
import calc_test_save
calc_test_save.photocalibrate(cluster)
#for band in [['r','i','u','g'],['g','r','i','z'],['g','r','u','g'],['r','i','i','z'],['i','JCAT','i','z']]:
# plot(table,zps_dict_all,instrument_to_locus,magtype,locus_c, min_err,band,location)
#return results
def plot(table,zplist,instrument_to_locus,magtype,locus_c, min_err,bands,location, alt_locus_c=None):
b1,b2,b3,b4 = bands
import pylab
pylab.clf()
if alt_locus_c:
if instrument_to_locus[b1]+'_'+instrument_to_locus[b2] in alt_locus_c and instrument_to_locus[b3]+'_'+instrument_to_locus[b4] in alt_locus_c:
print [instrument_to_locus[a] for a in [b1,b2,b3,b4]]
pylab.scatter(alt_locus_c[instrument_to_locus[b1]+'_'+instrument_to_locus[b2]],alt_locus_c[instrument_to_locus[b3]+'_'+instrument_to_locus[b4]],color='green')
if instrument_to_locus[b1]+'_'+instrument_to_locus[b2] in locus_c and instrument_to_locus[b3]+'_'+instrument_to_locus[b4] in locus_c:
print [instrument_to_locus[a] for a in [b1,b2,b3,b4]]
pylab.scatter(locus_c[instrument_to_locus[b1]+'_'+instrument_to_locus[b2]],locus_c[instrument_to_locus[b3]+'_'+instrument_to_locus[b4]],color='red')
else:
print '\n\n\n********************'
print b1 +'-'+b2 + ' and ' + b3 + '-' + b4 + ' not both locus color'
print 'possible locus bands:'
print locus_c.keys()
return
x1 = table.field('MAG_' + magtype + '_reg_' + b1)
x2 = table.field('MAG_' + magtype + '_reg_' + b2)
x1_err = table.field('MAGERR_' + magtype + '_reg_' + b1)
x2_err = table.field('MAGERR_' + magtype + '_reg_' + b2)
x = x1 -zplist[b1] - (x2 - zplist[b2])
x1_err[x1_err<min_err] = min_err
x2_err[x2_err<min_err] = min_err
x_err = (x1_err**2.+x2_err**2.)**0.5
y1 = table.field('MAG_' + magtype + '_reg_' + b3)
y2 = table.field('MAG_' + magtype + '_reg_' + b4)
y1_err = table.field('MAGERR_' + magtype + '_reg_' + b3)
y2_err = table.field('MAGERR_' + magtype + '_reg_' + b4)
y1_err[y1_err<min_err] = min_err
y2_err[y2_err<min_err] = min_err
y = y1 -zplist[b3] - (y2 - zplist[b4])
y_err = (y1_err**2.+y2_err**2.)**0.5
import scipy
good = scipy.array(abs(x1)<90) * scipy.array(abs(x2)<90) * scipy.array(abs(y1)<90) * scipy.array(abs(y2)<90)
pylab.scatter(x[good],y[good])
pylab.errorbar(x[good],y[good],xerr=x_err[good],yerr=y_err[good],fmt=None)
pylab.xlabel(b1 + '-' + b2,fontsize='x-large')
pylab.ylabel(b3 + '-' + b4,fontsize='x-large')
os.system('mkdir -p ' + location)
file = location + '/SLR'+b1+b2+b3+b4 +'.png'
print file
pylab.savefig(file)
#pylab.show()
#pylab.savefig('/Users/pkelly/Dropbox/plot.pdf')
def fit(table, combo_dict, instrument_to_locus, magtype, locus_mags, locus_pairs,
min_err=0.02,
min_bands_per_star=3,
startingzps=None,
plot_iteration_increment=5,
max_err=0.1,
bootstrap=False,
bootstrap_num=0,
plotdir='.',
save_bootstrap_plots=False,
live_plot=True,
pre_zps=None,
gallat=None,
extinct=None):
import string, re, pyfits, random, scipy, pylab
from copy import copy
if live_plot:
pylab.ion()
zps ={}
for i in range(len(combo_dict['vary'])):
zps[combo_dict['vary'][i]] = i
number_locus_points = len(locus_mags)
number_all_stars = len(table.field('MAG_' + magtype + '-' + instrument_to_locus.keys()[0]))
print 'MAG_' + magtype + '-' + instrument_to_locus.keys()[0]
''' for each point in locus, make a list of the locus in each color (locus has same number of points in each color) '''
''' just a rearrangement '''
locus_list = []
for j in range(number_locus_points):
o = []
for c in instrument_to_locus.values():
o.append(locus_mags[j][c])
locus_list.append(o)
print instrument_to_locus
raw_input()
results = {}
if bootstrap:
cycles = ['full'] + ['bootstrap' + str(i) for i in range(bootstrap_num)]
else:
cycles = ['full']
for iteration in cycles:
''' make matrix with a full set of locus points for each star '''
locus_matrix = scipy.array(number_all_stars*[locus_list])
print locus_matrix.shape
''' assemble matricies to make instrumental measured bands '''
print instrument_to_locus.keys()
A_band = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[[table.field('MAG_' + magtype + '-' + a)[:] for a in instrument_to_locus.keys()]]),0,2),1,2)
SeqNr = table.field('SeqNr')[:]
n = len(table.field('MAG_' + magtype + '-' + instrument_to_locus.keys()[0]))
def isitJ(name):
import string
if string.find(name,'JCAT') != -1:
return scipy.ones(n)
else:
return scipy.zeros(n)
A_err = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[[table.field('MAGERR_' + magtype + '-' + a)[:] for a in instrument_to_locus.keys()]]),0,2),1,2)
print A_err.shape
''' only use stars with errors less than max_err '''
if True:
mask = A_err > max_err
#mask[A_band_J == 1] = 0
mask[A_err > 1.5] = 1
A_band[mask] = 99
''' make matrix specifying good values '''
good = scipy.ones(A_band.shape)
#A_band[abs(A_FLAG) != 0] = 99
#A_band[abs(A_IMAFLAG) != 0] = 99
good[abs(A_band) == 99] = 0
good[abs(A_band) == 0] = 0
good = good[:,0,:]
good_bands_per_star = good.sum(axis=1) # sum all of the good bands for any given star
print good_bands_per_star , A_band.shape
''' figure out the cut-off '''
A_band = A_band[good_bands_per_star>=min_bands_per_star]
SeqNr = SeqNr[good_bands_per_star>=min_bands_per_star]
A_err = A_err[good_bands_per_star>=min_bands_per_star]
A_err[A_err<min_err] = min_err
locus_matrix = locus_matrix[good_bands_per_star>=min_bands_per_star]
''' if a bootstrap iteration, bootstrap with replacement '''
if string.find(iteration,'bootstrap') != -1:
length = len(A_band)
random_indices = []
unique_indices = {}
for e in range(length):
index = int(random.random()*length - 1)
unique_indices[index] = 'yes'
random_indices.append(index)
print random_indices, len(unique_indices.keys())
A_band = scipy.array([A_band[i] for i in random_indices])
SeqNr = scipy.array([SeqNr[i] for i in random_indices])
A_err = scipy.array([A_err[i] for i in random_indices])
locus_matrix = scipy.array([locus_matrix[i] for i in random_indices])
bands = A_band
bands_err = A_err
''' set errors on bad measurements (value=+-99) equal to 100000. and bands equal to 0 '''
bands_err[abs(A_band) == 99] = 1000.
bands[abs(A_band) == 99] = 0.
print bands.shape, locus_matrix.shape
number_good_stars = len(locus_matrix)
''' update good matrix after masking '''
good = scipy.ones(A_band.shape)
good[abs(A_band) == 99] = 0
good[abs(A_band) == 0] = 0
global itr
itr = 0
cycle = 0
keep_fitting = True
outliers = 'no outlier rejection'
while keep_fitting:
cycle += 1
print keep_fitting, outliers,
def errfunc(pars,residuals=False,savefig=None):
global itr
stat_tot = 0
zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(instrument_to_locus.keys())))
for i in range(len(instrument_to_locus.keys())):
a = instrument_to_locus.keys()[i]
zp_bands[:,:,i] = assign_zp(a,pars,zps)
print zp_bands.shape, bands.shape, locus_matrix.shape, good.shape, number_good_stars, number_locus_points
num_prelim = (bands - locus_matrix + zp_bands) / bands_err**2.
num_prelim[good == 0] = 0.
num = (num_prelim.sum(axis=2))
denom_prelim = 1. / bands_err**2.
denom_prelim[good == 0] = 0.
denom = (denom_prelim.sum(axis=2))
mean = num / denom
mean_array = scipy.dstack(len(instrument_to_locus.keys())*[mean])
ds_prelim = (bands - locus_matrix + zp_bands - mean_array)**2. #/ ds_err**2.
ds_prelim[good == 0] = 0
''' calculate reduced chi squared '''
ds = ds_prelim.sum(axis=2)**0.5
resid_prelim = (bands - locus_matrix + zp_bands - mean_array )**2. / bands_err**2.
plot = (bands -locus_matrix + zp_bands - mean_array )
resid_prelim[good == 0] = 0
''' calculate reduced chi squared '''
resid = resid_prelim.sum(axis=2) / good.sum(axis=2)
if False: #live_plot and iteration is 'full' and (itr % plot_iteration_increment == 0 or savefig is not None):
id = 100
for id in [10,20,30,40,100,150]:
best = 99999999999999999999999999999999999999999999999999999999
for i in range(len(resid[id])):
if resid[id][i] < best:
star = i
best = resid[id][i]
pylab.clf()
pylab.scatter(range(len(plot[id][star])),plot[id][star])
#pylab.scatter(range(len(plot[id][0])),locus_matrix[id][0])
pylab.errorbar(range(len(plot[id][star])),plot[id][star],yerr=bands_err[id][star])
pylab.ylim([-0.4,0.4])
pylab.draw()
pylab.draw()
print combo_dict['vary'], resid_prelim[id][star], (bands - locus_matrix + zp_bands - mean_array)[id][star], bands_err[id][star], good[id][star], resid[id], resid[id][star]
#print (bands - locus_matrix + zp_bands - mean_array )[10], bands_err[10]
''' these two are not necessarily the same star '''
dist = ds.min(axis=1)
select_diff = resid.min(axis=1)
#for i in range(len(ds.min(axis=0))):
# print i
# print len(ds[0]), len(ds.min(axis=0))
# print ds[0][i]
# print ds.min(axis=1)[i]
#print 'end of locus', end_of_locus, ds.min(axis=1), ds[0]
stat_tot = select_diff.sum()
print 'ZPs', dict(zip([combo_dict['hold']]+combo_dict['vary'],([0.] + ['%.6f' % a for a in pars.tolist()])))
print len(bands), 'stars'
redchi = stat_tot / float(len(bands) - 1)
print 'chi^2', '%.5f' % stat_tot,
print 'red chi^2', '%.5f' % redchi
print 'iteration', itr
print live_plot , iteration is 'full' , (itr % plot_iteration_increment == 0 or savefig is not None)
if live_plot and iteration is 'full' and (itr % plot_iteration_increment == 0 or savefig is not None):
if savefig is not None:
print live_plot , iteration is 'full' , (itr % plot_iteration_increment == 0 or savefig is not None)
#savefig = None
plot_progress(pars,stat_tot,savefig)
itr += 1
if residuals:
end_of_locus = scipy.array([reduce(lambda x,y: x*y, [(resid.min(axis=1)[i] != resid[i][x] ) for x in range(5)]) for i in range(len(ds.min(axis=1)))])
print end_of_locus
return select_diff, dist, redchi, end_of_locus, len(bands), SeqNr
else: return stat_tot
def plot_progress(pars,stat_tot=None,savefig=None,paper_version=True):
import pylab, scipy
if paper_version:
params = {'backend' : 'ps',
'text.usetex' : True,
'ps.usedistiller' : 'xpdf',
'ps.distiller.res' : 6000}
pylab.rcParams.update(params)
fig_width = 6
fig_height = 6
fig_size = [fig_width,fig_height]
params = {'axes.labelsize' : 16,
'text.fontsize' : 16,
'legend.fontsize' : 15,
'xtick.labelsize' : 16,
'ytick.labelsize' : 16,
'figure.figsize' : fig_size}
pylab.rcParams.update(params)
zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(instrument_to_locus.keys())))
for i in range(len(instrument_to_locus.keys())):
a = instrument_to_locus.keys()[i]
zp_bands[:,:,i] = assign_zp(a,pars,zps)
if pre_zps:
#pre_zp_A = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[pre_zps[a[0][0]] for a in complist]]]),0,1),0,0)
#pre_zp_B = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[pre_zps[a[1][0]] for a in complist]]]),0,1),0,0)
#pre_zp_bands = pre_zp_A - pre_zp_B
pre_zp_bands = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[assign_zp(a[0],pars,pre_zps) for a in instrument_to_locus.keys()]]]),0,1),0,0)
pre_zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(pre_zpz)))
for i in range(len(pre_zps)):
a = pre_zps[i]
zp_bands[:,:,i] = assign_zp(a[0][0],pars,zps)-assign_zp(a[1][0],pars,zps)
oa = [combo_dict['hold']]+combo_dict['vary'] #instrument_to_locus.keys()
oa.sort(sort_filters)
print oa
if savefig is not None:
#index_list = zip([int(x) for x in 2*scipy.arange(len(complist)/2)],[int(x) for x in 2*scipy.arange(len(complist)/2)+scipy.ones(len(complist)/2)])
#if len(complist) > 2*(len(complist)/2):
# index_list.append([len(complist)-2,len(complist)-1])
#print index_list
index_list = []
print oa #instrument_to_locus.keys()
rng = range(len(oa))
if len(oa) > 3:
for a in rng: #range(len(complist)):
for b in rng: #range(len(complist)):
if a < b:
for c in rng: #range(len(complist)):
if b <= c:
for d in rng: #range(len(complist)):
if c < d:
#if c < d and c > b:
index_list.append([[oa[a],oa[b]],[oa[c],oa[d]]])
else:
index_list.append([[oa[0],oa[1]],[oa[1],oa[2]]])
print index_list #, range(len(complist)), complist
index_list += [[['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']]]
index_list = index_list[:]
#index_list = [[['SUBARU-10_2-1-W-C-RC''SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-S-Z+','SUBARU-10_3-1-W-J-V']]]
else:
#index_list = [[[complist[1][0],complist[2][0]],[complist[3][0],complist[4][0]]]]
index_list = [[['MEGAPRIME-10_2-1-g','SUBARU-10_2-1-W-C-RC'],['SUBARU-10_2-1-W-C-RC','MEGAPRIME-10_2-1-z']]]
index_list = [[['SUBARU-10_2-1-W-C-RC','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-S-Z+','SUBARU-10_3-1-W-J-V']]]
index_list = [[['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']]]
print oa
index_list = [[[oa[0],oa[1]],[oa[1],oa[2]]]]
if savefig:
print index_list
print index_list
print instrument_to_locus
def ind(filt):
for j in range(len(instrument_to_locus.keys())):
if instrument_to_locus.keys()[j] == filt:
return j
for [c1_band1, c1_band2], [c2_band1,c2_band2] in index_list:
print instrument_to_locus.values()
print ind(c1_band1), ind(c1_band2)
print ind(c2_band1), ind(c2_band2)
print c2_band1, c2_band2
if ind(c1_band1) is not None and ind(c1_band2) is not None and ind(c2_band1) is not None and ind(c2_band2) is not None:
x_color = scipy.array(bands + zp_bands)[:,0,ind(c1_band1)] - scipy.array(bands + zp_bands)[:,0,ind(c1_band2)]
print ind(c2_band1), ind(c2_band2)
y_color = (bands + zp_bands)[:,0,ind(c2_band1)] - (bands + zp_bands)[:,0,ind(c2_band2)]
if pre_zps:
pre_x_color = scipy.array((bands + pre_zp_bands)[:,0,color1_index].tolist())
pre_y_color = (bands + pre_zp_bands)[:,0,color2_index]
x_err_1 = (bands_err)[:,0,ind(c1_band1)]
x_err_2 = (bands_err)[:,0,ind(c1_band2)]
y_err_1 = (bands_err)[:,0,ind(c2_band1)]
y_err_2 = (bands_err)[:,0,ind(c2_band2)]
mask = (x_err_1<100)*(x_err_2<100)*(y_err_1<100)*(y_err_2<100)
x_color = x_color[mask]
y_color = y_color[mask]
x_err = (x_err_1**2. + x_err_2**2.)**0.5
y_err = (y_err_1**2. + y_err_2**2.)**0.5
y_err = y_err[mask]
x_err = x_err[mask]
if pre_zps:
pre_x_color = pre_x_color[mask]
pre_y_color = pre_y_color[mask]
print len(x_color), len(x_color)
pylab.clf()
pylab.axes([0.18,0.1,0.77,0.85])
x_a = c1_band1 #complist[color1_index][0][0]
x_b = c1_band2 #complist[color1_index][1][0]
y_a = c2_band1 #complist[color2_index][0][0]
y_b = c2_band2 #complist[color2_index][1][0]
print extinct, x_a
x_extinct = extinct[x_a] - extinct[x_b]
y_extinct = extinct[y_a] - extinct[y_b]
def short_name(input):
for filt_name, filt_new in [['-u','$u$ (CFHT)'],['-g','$g$ (CFHT)'],['-r','$r$ (CFHT)'],['-i','$i$ (CFHT)'],['-z','$z$ (CFHT)'],['-B','$B$ (Subaru)'],['-V','$V$ (Subaru)'],['RC','$R$ (Subaru)'],['IC','$I$ (Subaru)'],['-I+','$i$ (Subaru)'],['-Z+','$z$ (Subaru)']]:
if string.find(input,filt_name) != -1:
return filt_new
return input
if paper_version:
x_color_name = short_name(x_a) + ' - ' + short_name(x_b) #+ ' (mag)'
y_color_name = short_name(y_a) + ' - ' + short_name(y_b) #+ ' (mag)'
pylab.xlabel(x_color_name)
pylab.ylabel(y_color_name)
else:
x_color_name = x_a + '-' + x_b
y_color_name = y_a + '-' + y_b
pylab.xlabel(x_color_name,fontsize='x-large')
pylab.ylabel(y_color_name,fontsize='x-large')
#print x_color_name, y_color_name, x_color, y_color, x_err, y_err
if len(x_color) and len(y_color):
pylab.errorbar(x_color,y_color,xerr=x_err,yerr=y_err,fmt=None,ecolor='gray',mc='none')
pylab.errorbar(x_color,y_color,xerr=0,yerr=0,fmt=None,marker='s',ecolor='#0066ff',mcolor='#0066ff') #,color='#3399ff',
# mfc='#3399ff', mec='#3399ff', ms=1, mew=1)
#pylab.scatter(x_color,y_color,s=0.1)
c1_locus = locus_matrix[0,:,ind(c1_band1)] - locus_matrix[0,:,ind(c1_band2)]
c2_locus = locus_matrix[0,:,ind(c2_band1)] - locus_matrix[0,:,ind(c2_band2)]
pylab.errorbar(c1_locus,c2_locus,xerr=0,yerr=0,color='red',ms=4.0,marker='.',ecolor='none')
if pre_zps:
pylab.errorbar(pre_x_color,pre_y_color,xerr=x_err,yerr=y_err,fmt=None,c='green')
pylab.scatter(pre_x_color,pre_y_color,c='green')
#print locus_matrix[0,:,color1_index][0]
pylab.arrow(c1_locus[10],c2_locus[-10],x_extinct,y_extinct,width=0.01,color='black')
if stat_tot is not None:
if not paper_version:
pylab.title('N=' + str(len(x_color)) + ' chi$^{2}$=' + ('%.1f' % stat_tot) + ' ' + iteration + ' ' + outliers + ' LAT=' + ('%.1f' % gallat))
if live_plot:
pylab.draw()
fit_band_zps = reduce(lambda x,y: x + y, [z[-2:].replace('C','').replace('-','') for z in [combo_dict['hold']] + combo_dict['vary']])
''' only save figure if savefig is not None '''
print 'saving file '
if savefig is not None:
if (string.find(iteration,'bootstrap')==-1 or save_bootstrap_plots):
file = plotdir + '/' + fit_band_zps + '_' + x_color_name + '_' + y_color_name + '_' + savefig.replace(' ','_')
file = file.replace(' ','').replace('$','')
command = 'mkdir -p ' + plotdir
print command
os.system(command)
pylab.savefig(file)
if paper_version: pylab.savefig(file.replace('.png','.pdf'))
#pylab.show()
#def median_starting_zp():
#for key in combo_dict['vary']:
# median = instrument_to_locus[key]
if outliers == 'no outlier rejection':
''' starting guess for zeropoint : median hold instrumental magnitude - median hold locus magnitude '''
print A_band.shape
pinit = []
for i in range(1,len(instrument_to_locus.keys())):
key = instrument_to_locus.keys()[i]
key_hold = instrument_to_locus.keys()[0]
print i, A_band.shape, len(instrument_to_locus)
diff = A_band[:,0,i] - A_band[:,0,0]
good_diff = good[:,0,i] + good[:,0,0]
diff = diff[good_diff == 2]
median_instrumental = scipy.median(diff)
locus_here = [locus_mags[x][instrument_to_locus[key]] - locus_mags[x][instrument_to_locus[key_hold]] for x in range(len(locus_mags))]
median_locus = scipy.median(locus_here)
pinit.append(median_locus - median_instrumental)
print pinit
if True:
if iteration == 'full':
#print startingzps.keys()
if startingzps is None:
pinit = scipy.zeros(len(combo_dict['vary']))
else:
pinit = []
for key in combo_dict['vary']:
try1 = key.replace('10_2','').replace('10_1','').replace('10_3','').replace('9-4','')
try2 = key.replace('10_2','').replace('10_1','').replace('10_3','').replace('9-4','') + '-1'
if startingzps.has_key(key):
val = startingzps[key]
elif startingzps.has_key(try1):
val = startingzps[try1]
elif startingzps.has_key(try2):
val = startingzps[try2]
pinit.append(val)
else:
import random
''' add random offset of 1.0 mag '''
pinit = [results['full'][key] + random.random()*1.0 for key in combo_dict['vary']]
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,maxiter=100000,args=(),ftol=0.00001,xtol=0.00001) #,gtol=0.001)
if iteration is 'full':
errfunc(out,savefig=iteration+'_'+outliers+'.png')
print out
import scipy
print 'starting'
residuals,dist,redchi,end_of_locus, num, SeqNr = errfunc(pars=[0.] + out,residuals=True)
#print SeqNr[:10], residuals[:10]
print dist
print 'finished'
print 'bands' , len(bands)
''' first filter on distance '''
bands = bands[dist < 1.5]
bands_err = bands_err[dist < 1.5]
locus_matrix = locus_matrix[dist < 1.5]
good = good[dist < 1.5]
residuals = residuals[dist < 1.5]
end_of_locus = end_of_locus[dist < 1.5]
SeqNr = SeqNr[dist < 1.5]
print end_of_locus
print bands.shape
print dist.shape, residuals.shape
if True:
''' filter on residuals '''
print bands.shape, residuals.shape
if cycle == 1: cut_num = 100
if cycle > 1: cut_num = 30
if cycle > 2: cut_num = 15
if cycle > 3: cut_num = 6
#print residuals, cycle, cut_num, len(bands[residuals < cut_num]), len(bands)
bands = bands[residuals < cut_num]
bands_err = bands_err[residuals < cut_num]
locus_matrix = locus_matrix[residuals < cut_num]
good = good[residuals < cut_num]
end_of_locus = end_of_locus[residuals < cut_num]
SeqNr = SeqNr[residuals < cut_num]
if cycle > 2:
''' filter on end of locus '''
bands = bands[end_of_locus]
bands_err = bands_err[end_of_locus]
locus_matrix = locus_matrix[end_of_locus]
good = good[end_of_locus]
SeqNr = SeqNr[end_of_locus]
print number_good_stars, len(locus_matrix)
print len(filter(lambda x: x is False,end_of_locus.tolist()))
if number_good_stars > len(locus_matrix) or len(filter(lambda x: x is False,end_of_locus.tolist())) > 0 and cycles < 5:
print 'REFITTING AFTER REMOVING ' + str(number_good_stars - len(locus_matrix) ) + ' OUTLIERS AND STARS MATCHING BLUE END OF LOCUS'
number_good_stars = len(locus_matrix)
print 'bands' , len(bands)
print bands.shape, locus_matrix.shape
pinit = out #scipy.zeros(len(zps_list))
outliers = 'outliers removed'
if False:
pinit = scipy.array(out) + scipy.array([random.random()*1.0 for p in pinit])
pinit = out #scipy.zeros(len(zps_list))
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,args=())
residuals,dist,redchi,end_of_locus, num = errfunc(out,savefig=iteration+'_'+outliers+'.png',residuals=True)
print out
else:
print 'NO OUTLYING STARS OR STARS MATCHING BLUE END OF LOCUS, PROCEEDING'
keep_fitting = False
results[iteration] = dict(zip([combo_dict['hold']]+combo_dict['vary'],([0.] + out.tolist())))
mask = bands_err < 100
#ra = [table.field('ALPHA_J2000')[table.field('SeqNr') == s] for s in SeqNr]
#dec = [table.field('DELTA_J2000')[table.field('SeqNr') == s] for s in SeqNr]
#pylab.scatter(ra,dec,pcolor=residuals)
#pylab.show()
pylab.clf()
ra = [table.field('ALPHA_J2000')[table.field('SeqNr') == s][0] for s in SeqNr]
dec = [table.field('DELTA_J2000')[table.field('SeqNr') == s][0] for s in SeqNr]
print ra, dec, residuals
print len(ra), len(dec), len(residuals)
pylab.scatter(ra,dec,c=residuals)
#pylab.show()
results['redchi'] = redchi
results['num'] = num
print results
errors = {}
bootstraps = {}
import scipy
print 'BOOTSTRAPPING ERRORS:'
for key in [combo_dict['hold']] + combo_dict['vary']:
l = []
for r in results.keys():
if r != 'full' and r != 'redchi' and r != 'num':
l.append(results[r][key])
print key+':', scipy.std(l), 'mag'
errors[key] = scipy.std(l)
if bootstrap_num > 0 and len(l) > 0:
bootstraps[key] = reduce(lambda x,y: x + ',' + y, [str(z) for z in l])
else: bootstraps[key] = 'None'
results['bootstraps'] = bootstraps
results['errors'] = errors
results['bootstrapnum'] = bootstrap_num
if False:
def save_results(save_file,results,errors):
f = open(save_file,'w')
for key in results['full'].keys():
f.write(key + ' ' + str(results['full'][key]) + ' +- ' + str(errors[key]) + '\n')
f.close()
import pickle
f = open(save_file + '.pickle','w')
m = pickle.Pickler(f)
pickle.dump({'results':results,'errors':errors},m)
f.close()
if results.has_key('full') and save_results is not None: save_results(save_file,results, errors)
return results
#@entryExit
def sdss(run,night,snpath,name=None):
import pylab, pyfits, commands
input_cat = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/stars.fits'
p = pyfits.open(input_cat)[1].data
#pylab.scatter(p.field('psfMag_g') - p.field('psfMag_r'),p.field('MAG_APER_u') - p.field('psfMag_u'))
#pylab.errorbar(x[good],y[good],xerr=x_err[good],yerr=y_err[good],fmt=None)
#pylab.show()
import transform_filts, scipy
kit = get_kit()
det = 'T2KB'
print kit.keys()
aptype = 'psfMag_' #'MAG_APERCORR-SDSS_'
aptype_err = 'psfMagErr_' #'MAGERR_APERCORR-SDSS_'
for mag in ['APERCORR','APERDUST']:
cat_aptype = 'MAG_' + mag + '-' #'psfMag_'
cat_aptype_err = 'MAGERR_' + mag + '-' #'psfMagErr_'
for filt in ['u','g','r','i','z']:
running = p.field(aptype + 'g') - p.field(aptype + 'i')
x = p.field('ra')[running==0.47440300000000235]
y = p.field('dec')[running==0.47440300000000235]
#print x,y
variation=transform_filts.apply_kit(running,kit[filt.upper() + det])
print variation
calibrated = p.field(aptype + filt) + variation
uncalibrated = p.field(cat_aptype + filt)
error = (p.field(aptype_err + filt)**2. + p.field(cat_aptype_err + filt)**2.)**0.5
mask= (error < 0.1) * (p.field('FLAGS-' + filt) == 0) * (p.field('IMAFLAGS_ISO-' + filt) == 0.)
#mask *= (error < 0.1) * (p.field('FLAGS-SDSS_' + filt) == 0) * (p.field('IMAFLAGS_ISO-SDSS_' + filt) == 0.)
mask *= (p.field('FLAGS-g') == 0) * (p.field('IMAFLAGS_ISO-g') == 0.)
mask *= (p.field('FLAGS-i') == 0) * (p.field('IMAFLAGS_ISO-i') == 0.)
#mask *= p.field('FLAGS_SDSS') == 0
print mask
running = running[mask]
calibrated = calibrated[mask]
uncalibrated = uncalibrated[mask]
error = error[mask]
#print running, p.field('psfMag_g'), p.field('psfMag_i')
#print sorted(running)
#print p.field('SDSS_NEIGHBORS'), p.field('psfMag_g')
error[error < 0.02] = 0.02
print calibrated
def compute(cal_sample, uncal_sample, error_sample):
zp = scipy.average(cal_sample - uncal_sample,weights=1./error_sample**2.)
zp = scipy.median(cal_sample - uncal_sample)
mask = abs(cal_sample- uncal_sample-zp)/error_sample < 6.
cal_sample= cal_sample[mask]
uncal_sample= uncal_sample[mask]
error_sample = error_sample[mask]
zp = scipy.average(cal_sample - uncal_sample,weights=1./error_sample**2.)
zp_med = scipy.median(cal_sample - uncal_sample)
return zp, zp_med
zps = []
for i in range(100):
import random
random_indices = []
unique_indices = {}
length = len(calibrated)
for e in range(length):
index = int(random.random()*length - 1)
unique_indices[index] = 'yes'
random_indices.append(index)
cal = scipy.array([calibrated[i] for i in random_indices])
uncal = scipy.array([uncalibrated[i] for i in random_indices])
err = scipy.array([error[i] for i in random_indices])
zp, zp_med = compute(cal,uncal,err)
zps.append(zp)
zp = scipy.mean(zps)
zp_err = scipy.std(zps)
pylab.clf()
pylab.title(str(zp) + ' +- ' + str(zp_err))
pylab.axhline(zp,c='red')
pylab.axhline(zp+zp_err,c='red')
pylab.axhline(zp-zp_err,c='red')
pylab.scatter(running,calibrated-uncalibrated)
pylab.errorbar(running,calibrated-uncalibrated,yerr=error,fmt=None)
tab = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/sdss_stars' + filt + '.png'
print tab
pylab.savefig(tab)
pylab.savefig('/Users/pkelly/Dropbox/sdss' + filt + '.png')
pylab.clf()
pylab.title(str(zp) + ' +- ' + str(zp_err))
pylab.scatter(calibrated,uncalibrated-calibrated)
pylab.errorbar(calibrated,uncalibrated-calibrated,yerr=error,fmt=None)
tab = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/bias_stars' + filt + '.png'
print tab
pylab.savefig(tab)
pylab.savefig('/Users/pkelly/Dropbox/bias_sdss' + filt + '.png')
#pylab.show()
image = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/' + filt + '/reg.fits'
import scamp
name = 'reg'
print image, snpath, filt, name, run
reload(scamp).add_image(image,snpath,filt,name,run)
import MySQLdb
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
if mag=='APERCORR':
command = "UPDATE CALIB set SDSSZP=" + str(zp) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSZPERR=" + str(zp_err) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSNUM=" + str(len(calibrated)) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
elif mag=='APERDUST':
command = "UPDATE CALIB set SDSSDUSTZP=" + str(zp) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
print command
c.execute(command)
command = "UPDATE CALIB set SDSSDUSTZPERR=" + str(zp_err) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSDUSTNUM=" + str(len(calibrated)) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
print filt, zp, zp_med
def plot_zp():
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
for filt in ['u','g','r','i','z']:
command = 'select JD, SLRZP, sn from calib where gallat is not null and slrzp is not null and filt="' + filt + '"' # and run="kpno_May2010"' #JD > 2455470'
#command = 'select JD, SLRZP, sn from calib where gallat is not null and slrzp is not null and filt="' + filt + '" and exptime=120'
c.execute(command)
results = c.fetchall()
print results
x = [float(a[0]) for a in results]
y = [float(a[1]) for a in results]
s = [(a[2][4:]) for a in results]
import pylab
pylab.clf()
for i in range(len(x)):
pylab.text(x[i],y[i],s[i],fontsize=8)
pylab.scatter(x,y)
pylab.title(filt)
pylab.savefig('/Users/pkelly/Dropbox/test' + filt + '.pdf')
#pylab.show()
def plot_detail(calibrate=False):
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
for filt in ['u','g','r','i','z']:
import pylab
pylab.clf()
def p(command,color):
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
c.execute(command)
results = c.fetchall()
print results
x = scipy.array([float(a[0]) for a in results])
y = scipy.array([float(a[1]) for a in results])
y_err = scipy.array([float(a[2]) for a in results])
s = [(a[3][4:]) for a in results]
for i in range(len(x)):
pylab.text(x[i]+0.01,y[i]+0.00,s[i],fontsize=8)
print x
if 1:
pylab.errorbar(x,y,y_err,fmt='ro',color=color)
pylab.scatter(x,y,c=color)
x_new = scipy.arange(1,3)
print len(x), len(y)
p = scipy.polyfit(x,y,1)
y_new = scipy.polyval(p, x_new)
pylab.plot(x_new,y_new, color='black')
A = scipy.vstack([x/y_err, scipy.ones(len(x))/y_err]).T
print A
from scipy import linalg
m,c = scipy.linalg.lstsq(A,y/y_err)[0]
print m,c
pylab.plot(x_new,m*x_new + c, color='blue')
print x_new, m*x_new
return m,c
run = 'kpno_Oct2010'
variable = 'airmass'
command = 'select b.' + variable + ', c.slrdustzp+b.RELZP, c.slrdustzperr, b.sn from calib as c join calib b on c.sn=b.sn and c.run=b.run and c.filt=b.filt where c.slrzp is not null and c.slrzperr is not null and c.slrnum > 10 and b.relzp is not null and c.filt="' + filt + '" and c.run="' + run + '" and c.slrzperr<8 and c.JD>2455475'
#p(command,'blue')
command = 'select b.' + variable + ', c.sdssdustzp+b.RELZP, c.sdssdustzperr, b.sn from calib as c join calib b on c.sn=b.sn and c.run=b.run and c.filt=b.filt where c.sdssdustzp is not null and c.sdsszperr is not null and c.sdssnum > 1 and b.relzp is not null and c.filt="' + filt + '" and c.run="' + run + '" and b.night=4297' # and c.JD>2455475'
print command
m_fit,c_fit = p(command,'red')
if calibrate:
#for filt in ['u','g','r','i','z']:
#command = 'select sn, airmass, sdssdustzp, run from calib where night=4297 and filt="' + filt + '" group by sn,filt'
command = 'select sn, airmass, sdssdustzp, run from calib where night=4353 and sn="sn1997ef" and filt="' + filt + '" group by sn,filt'
print command
c.execute(command)
results = c.fetchall()
print results
import string , os
for sn, airmass, sdssdustzp, run in results:
if not sdssdustzp:
sdssphotozp = m_fit*float(airmass) + c_fit
else:
sdssphotozp = float(sdssdustzp)
print sdssphotozp, sdssdustzp, sn
command = 'sethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits SDSSPHOTOZP=' + str(sdssphotozp)
print command
os.system(command)
command = 'sethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.sdss.fits SDSSPHOTOZP=' + str(sdssphotozp)
print command
os.system(command)
command = 'update calib set sdssphotozp=' + str(sdssphotozp) + ' where sn="' + sn + '" and run="' + run + '" and filt="' + filt + '"'
c.execute(command)
import anydbm
gh = anydbm.open(sn)
gh['sdssphotozp_' + filt ] = str(sdssphotozp)
import commands
gain = commands.getoutput('gethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits GAIN')
detector = commands.getoutput('gethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits DETECTOR')
gh['gain_' + filt + '_' + detector ] = gain
pylab.title(filt)
pylab.savefig('/Users/pkelly/Dropbox/test' + filt + '.pdf')
if __name__ == '__main__':
import os , sys, string
subarudir = os.environ['subdir']
cluster = sys.argv[1] #'MACS1423+24'
spec = False
train_first = False
magtype = 'APER1'
AP_TYPE = ''
type = 'all'
if len(sys.argv) > 2:
for s in sys.argv:
if s == 'spec':
type = 'spec'
spec = True
if s == 'rand':
type = 'rand'
if s == 'train':
train_first = True
if s == 'ISO':
magtype = 'ISO'
if s == 'APER1':
magtype = 'APER1'
if s == 'APER':
magtype = 'APER'
if string.find(s,'detect') != -1:
import re
rs = re.split('=',s)
DETECT_FILTER=rs[1]
if string.find(s,'spectra') != -1:
import re
rs = re.split('=',s)
SPECTRA=rs[1]
if string.find(s,'aptype') != -1:
import re
rs = re.split('=',s)
AP_TYPE = '_' + rs[1]
print 'magtype', magtype
#photdir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + AP_TYPE + '/'
all(subarudir,cluster,DETECT_FILTER,AP_TYPE,magtype)
|
#!/usr/bin/python3
# pylint: disable=I0011,R0913,R0902
"""Define Handler abstact"""
class Handler(object):
"""Handler abstract"""
def __init__(self, *argv):
self.workers = []
for worker in argv:
self.workers.append(worker)
def add_worker(self, worker):
self.workers.append(worker)
def handle(self, message):
"""handle a message"""
raise NotImplementedError("handle not implemented")
|
from django.core.management import BaseCommand
from db.models import ProfileType
from db.seed import Seed
class Command(BaseCommand):
help = 'Generates random students'
seed = Seed()
def add_arguments(self, parser):
parser.add_argument('num_students',
type=int,
help='Indicates the number of students to be created',
default=0)
parser.add_argument('num_companies',
type=int,
help='Indicates the number of companies to be created',
default=0)
parser.add_argument('num_universities',
type=int,
help='Indicates the number of universities to be created',
default=0)
# noinspection PyUnresolvedReferences
def handle(self, *args, **options):
number_of_students = options.get('num_students')
number_of_companies = options.get('num_companies')
number_of_universities = options.get('num_universities')
self.stdout.write(f'Adding {number_of_students} random student(s)...')
for i in range(0, number_of_students):
self.stdout.write('.', ending='')
random_data = self.seed.random_student(i)
self.seed.run(random_data)
self.stdout.write('', ending='\n')
self.stdout.write(f'Adding {number_of_companies} random company(ies)...')
for i in range(0, number_of_companies):
self.stdout.write('.', ending='')
random_data = self.seed.random_company(i, ProfileType.COMPANY)
self.seed.run(random_data)
self.stdout.write('', ending='\n')
self.stdout.write(f'Adding {number_of_universities} random university(ies)...')
for i in range(0, number_of_universities):
self.stdout.write('.', ending='')
random_data = self.seed.random_company(i, ProfileType.UNIVERSITY)
self.seed.run(random_data)
self.stdout.write('', ending='\n')
self.stdout.write(self.style.SUCCESS('Adding random data completed'))
|
import os
print(os.environ["ENV001"])
print(os.environ["ENV002"])
|
# Author: Christian Brodbeck <[email protected]>
#
# License: BSD-3-Clause
import os
import os.path as op
import re
import shutil
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
import warnings
import mne
from mne.datasets import testing
from mne.io import read_info
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.io.constants import FIFF
from mne.surface import dig_mri_distances
from mne.transforms import invert_transform
from mne.utils import requires_mayavi, traits_test, modified_env, get_config
from mne.channels import DigMontage
data_path = testing.data_path(download=False)
raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = op.join(data_path, 'subjects')
fid_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-fiducials.fif')
ctf_raw_path = op.join(data_path, 'CTF', 'catch-alp-good-f.ds')
nirx_15_0_raw_path = op.join(data_path, 'NIRx', 'nirscout',
'nirx_15_0_recording', 'NIRS-2019-10-27_003.hdr')
nirsport2_raw_path = op.join(data_path, 'NIRx', 'nirsport_v2', 'aurora_2021_9',
'2021-10-01_002_config.hdr')
snirf_nirsport2_raw_path = op.join(data_path, 'SNIRF', 'NIRx', 'NIRSport2',
'1.0.3', '2021-05-05_001.snirf')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_decimation(subjects_dir_tmp):
"""Test CoregModel decimation of high-res to low-res head."""
from mne.gui._coreg_gui import CoregModel
# This makes the test much faster
subject_dir = op.join(subjects_dir_tmp, 'sample')
shutil.move(op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'surf', 'lh.seghead'))
for fname in ('sample-head.fif', 'sample-head-dense.fif'):
os.remove(op.join(subject_dir, 'bem', fname))
model = CoregModel(guess_mri_subject=False)
with pytest.warns(RuntimeWarning, match='No low-resolution'):
model.mri.subjects_dir = op.dirname(subject_dir)
assert model.mri.subject == 'sample' # already set by setting subjects_dir
assert model.mri.bem_low_res.file == ''
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 2562 # because we moved it
@requires_mayavi
@traits_test
def test_coreg_model(subjects_dir_tmp):
"""Test CoregModel."""
from mne.gui._coreg_gui import CoregModel
trans_dst = op.join(subjects_dir_tmp, 'test-trans.fif')
# make it use MNI fiducials
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
model = CoregModel()
with pytest.raises(RuntimeError, match='Not enough information for savin'):
model.save_trans('blah.fif')
model.mri.subjects_dir = subjects_dir_tmp
model.mri.subject = 'sample'
assert model.mri.fid_ok # automated using MNI fiducials
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert model.has_lpa_data
assert model.has_nasion_data
assert model.has_rpa_data
assert len(model.hsp.eeg_points) > 1
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 267122
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.nasion_weight = 1.
model.fit_fiducials(0)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert new_x < old_x
model.fit_icp(0)
new_dist = np.mean(model.point_distance)
assert new_dist < avg_point_distance
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
# test restoring trans
x, y, z = 100, 200, 50
rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.mri_head_t
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert model.trans_x == 0
model.set_trans(trans)
assert_array_almost_equal(model.trans_x, x)
assert_array_almost_equal(model.trans_y, y)
assert_array_almost_equal(model.trans_z, z)
assert_array_almost_equal(model.rot_x, rot_x)
assert_array_almost_equal(model.rot_y, rot_y)
assert_array_almost_equal(model.rot_z, rot_z)
# info
assert isinstance(model.fid_eval_str, str)
assert isinstance(model.points_eval_str, str)
# scaling job
assert not model.can_prepare_bem_model
model.n_scale_params = 1
assert model.can_prepare_bem_model
model.prepare_bem_model = True
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', False)
assert sdir == subjects_dir_tmp
assert sfrom == 'sample'
assert sto == 'sample2'
assert_allclose(scale, model.parameters[6:9])
assert skip_fiducials is False
# find BEM files
bems = set()
for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
match = re.match(r'sample-(.+-bem)\.fif', fname)
if match:
bems.add(match.group(1))
assert set(bemsol) == bems
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', True)
assert bemsol == []
assert (skip_fiducials)
model.load_trans(fname_trans)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
assert_allclose(invert_transform(trans)['trans'][:3, 3] * 1000.,
[model.trans_x, model.trans_y, model.trans_z])
@requires_mayavi
@traits_test
def test_coreg_gui_display(subjects_dir_tmp, check_gui_ci):
"""Test CoregFrame."""
from mayavi import mlab
from tvtk.api import tvtk
home_dir = subjects_dir_tmp
# Remove the two files that will make the fiducials okay via MNI estimation
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',
'talairach.xfm'))
with modified_env(_MNE_GUI_TESTING_MODE='true',
_MNE_FAKE_HOME_DIR=home_dir):
with pytest.raises(ValueError, match='not a valid subject'):
mne.gui.coregistration(
subject='Elvis', subjects_dir=subjects_dir_tmp)
# avoid modal dialog if SUBJECTS_DIR is set to a directory that
# does not contain valid subjects
ui, frame = mne.gui.coregistration(subjects_dir='')
mlab.process_ui_events()
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp,
subject='sample')
mlab.process_ui_events()
assert not frame.model.mri.fid_ok
frame.model.mri.lpa = [[-0.06, 0, 0]]
frame.model.mri.nasion = [[0, 0.05, 0]]
frame.model.mri.rpa = [[0.08, 0, 0]]
assert frame.model.mri.fid_ok
frame.data_panel.raw_src.file = raw_path
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.SphereSource)
frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.CylinderSource)
mlab.process_ui_events()
# grow hair (faster for low-res)
assert frame.data_panel.view_options_panel.head_high_res
frame.data_panel.view_options_panel.head_high_res = False
frame.model.grow_hair = 40.
# scale
frame.coreg_panel.n_scale_params = 3
frame.coreg_panel.scale_x_inc = True
assert frame.model.scale_x == 101.
frame.coreg_panel.scale_y_dec = True
assert frame.model.scale_y == 99.
# reset parameters
frame.coreg_panel.reset_params = True
assert frame.model.grow_hair == 0
assert not frame.data_panel.view_options_panel.head_high_res
# configuration persistence
assert (frame.model.prepare_bem_model)
frame.model.prepare_bem_model = False
frame.save_config(home_dir)
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp)
assert not frame.model.prepare_bem_model
assert not frame.data_panel.view_options_panel.head_high_res
ui.dispose()
mlab.process_ui_events()
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_with_fsaverage(tmpdir):
"""Test CoregModel with the fsaverage brain data."""
tempdir = str(tmpdir)
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir,
fs_home=op.join(subjects_dir, '..'))
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert model.mri.fid_ok
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.nasion_weight = 1.
model.trans_y = -0.008
model.fit_fiducials(0)
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(np.inf)
assert model.hsp.n_omitted == 0
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.005)
assert model.hsp.n_omitted == 40
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
# scale with 1 parameter
model.n_scale_params = 1
model.fit_fiducials(1)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert (new_x < old_x)
model.fit_icp(1)
avg_point_distance_1param = np.mean(model.point_distance)
assert (avg_point_distance_1param < avg_point_distance)
# scaling job
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert sdir == tempdir
assert sfrom == 'fsaverage'
assert sto == 'scaled'
assert_allclose(scale, model.parameters[6:9])
assert set(bemsol) == {'inner_skull-bem'}
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert bemsol == []
# scale with 3 parameters
model.n_scale_params = 3
model.fit_icp(3)
assert (np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert model.hsp.n_omitted == 1
model.hsp.file = kit_raw_path
assert model.hsp.n_omitted == 0
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_gui_automation():
"""Test that properties get properly updated."""
from mne.gui._file_traits import DigSource
from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
from mne.gui._coreg_gui import CoregModel
subject = 'sample'
hsp = DigSource()
hsp.file = raw_path
mri = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir, subject=subject)
model = CoregModel(mri=mri, hsp=hsp)
# gh-7254
assert not (model.nearest_transformed_high_res_mri_idx_hsp == 0).all()
model.fit_fiducials()
model.icp_iterations = 2
model.nasion_weight = 2.
model.fit_icp()
model.omit_hsp_points(distance=5e-3)
model.icp_iterations = 2
model.fit_icp()
errs_icp = np.median(
model._get_point_distance())
assert 2e-3 < errs_icp < 3e-3
info = mne.io.read_info(raw_path)
errs_nearest = np.median(
dig_mri_distances(info, fname_trans, subject, subjects_dir))
assert 1e-3 < errs_nearest < 2e-3
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, event_pos):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.event_pos = event_pos
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetEventPosition(self):
"""Return event position."""
return self.event_pos
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.parametrize(
'inst_path', (raw_path, 'gen_montage', ctf_raw_path, nirx_15_0_raw_path,
nirsport2_raw_path, snirf_nirsport2_raw_path))
def test_coreg_gui_pyvista_file_support(inst_path, tmpdir,
renderer_interactive_pyvistaqt):
"""Test reading supported files."""
from mne.gui import coregistration
tempdir = str(tmpdir)
if inst_path == 'gen_montage':
# generate a montage fig to use as inst.
tmp_info = read_info(raw_path)
eeg_chans = []
for pt in tmp_info['dig']:
if pt['kind'] == FIFF.FIFFV_POINT_EEG:
eeg_chans.append(f"EEG {pt['ident']:03d}")
dig = DigMontage(dig=tmp_info['dig'],
ch_names=eeg_chans)
inst_path = op.join(tempdir, 'tmp-dig.fif')
dig.save(inst_path)
# Suppressing warnings here is not ideal.
# However ctf_raw_path (catch-alp-good-f.ds) is poorly formed and causes
# mne.io.read_raw to issue warning.
# XXX consider replacing ctf_raw_path and removing warning ignore filter.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coregistration(inst=inst_path, subject='sample',
subjects_dir=subjects_dir)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_coreg_gui_pyvista(tmpdir, renderer_interactive_pyvistaqt):
"""Test that using CoregistrationUI matches mne coreg."""
from mne.gui import coregistration
tempdir = str(tmpdir)
config = get_config(home_dir=os.environ.get('_MNE_FAKE_HOME_DIR'))
tmp_trans = op.join(tempdir, 'tmp-trans.fif')
coreg = coregistration(subject='sample', subjects_dir=subjects_dir,
trans=fname_trans)
coreg._reset_fiducials()
coreg.close()
coreg = coregistration(inst=raw_path, subject='sample',
subjects_dir=subjects_dir)
coreg._set_fiducials_file(fid_fname)
assert coreg._fiducials_file == fid_fname
# picking
vtk_picker = TstVTKPicker(coreg._surfaces['head'], 0, (0, 0))
coreg._on_mouse_move(vtk_picker, None)
coreg._on_button_press(vtk_picker, None)
coreg._on_pick(vtk_picker, None)
coreg._on_button_release(vtk_picker, None)
coreg._set_lock_fids(True)
assert coreg._lock_fids
coreg._on_pick(vtk_picker, None) # also pick when locked
coreg._set_lock_fids(False)
assert not coreg._lock_fids
coreg._set_lock_fids(True)
assert coreg._lock_fids
assert coreg._nasion_weight == 10.
coreg._set_point_weight(11., 'nasion')
assert coreg._nasion_weight == 11.
coreg._fit_fiducials()
coreg._fit_icp()
assert coreg._coreg._extra_points_filter is None
coreg._omit_hsp()
assert coreg._coreg._extra_points_filter is not None
coreg._reset_omit_hsp_filter()
assert coreg._coreg._extra_points_filter is None
assert coreg._grow_hair == 0
coreg._set_grow_hair(0.1)
assert coreg._grow_hair == 0.1
assert coreg._orient_glyphs == \
(config.get('MNE_COREG_ORIENT_TO_SURFACE', '') == 'true')
assert coreg._hpi_coils
assert coreg._eeg_channels
assert coreg._head_shape_points
assert coreg._scale_mode == 'None'
assert coreg._icp_fid_match == 'nearest'
assert coreg._head_resolution == \
(config.get('MNE_COREG_HEAD_HIGH_RES', 'true') == 'true')
assert not coreg._head_transparency
coreg._set_head_transparency(True)
assert coreg._head_transparency
coreg._save_trans(tmp_trans)
assert op.isfile(tmp_trans)
coreg.close()
|
import pytest
from digests.utils import get_schema_name
@pytest.mark.parametrize('content_type, schema_name', [
('application/vnd.elife.digest+json; version=1', 'digest.v1.json'),
('application/vnd.elife.digest+json; version=2', 'digest.v2.json'),
('application/vnd.elife.digest+json; version=3', 'digest.v3.json'),
('application/vnd.elife.digest+json', 'digest.v1.json'),
])
def test_can_get_schema_name(content_type: str, schema_name: str):
assert get_schema_name(content_type) == schema_name
|
import json
import os
import pytest
import yaml
from desmod.component import Component
from desmod.simulation import (
SimEnvironment,
SimStopEvent,
simulate,
simulate_factors,
simulate_many,
)
import desmod.progress
pytestmark = pytest.mark.usefixtures('cleandir')
@pytest.fixture
def cleandir(tmpdir):
origin = os.getcwd()
tmpdir.chdir()
yield None
os.chdir(origin)
@pytest.fixture
def no_progressbar(monkeypatch):
monkeypatch.setattr(desmod.progress, 'progressbar', None)
@pytest.fixture
def no_colorama(monkeypatch):
monkeypatch.setattr(desmod.progress, 'colorama', None)
@pytest.fixture
def config():
return {
'sim.config.file': 'config.yaml',
'sim.result.file': 'result.yaml',
'sim.workspace': 'workspace',
'sim.workspace.overwrite': False,
'sim.timescale': '1 us',
'sim.seed': 1234,
'sim.duration': '1 us',
'test.ensure_workspace': False,
'test.fail_pre_init': False,
'test.fail_init': False,
'test.fail_simulate': False,
'test.fail_post_simulate': False,
'test.fail_get_result': False,
'test.until_delay': None,
}
class TopTest(Component):
@classmethod
def pre_init(cls, env):
if env.config.get('test.fail_pre_init'):
raise Exception('fail_pre_init')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.config.get('test.fail_init'):
raise Exception('fail_init')
self.add_process(self.test_proc)
def test_proc(self):
yield self.env.timeout(0.5)
if self.env.config.get('test.fail_simulate'):
assert False, 'fail_simulate'
until_delay = self.env.config.get('test.until_delay')
if until_delay is not None:
self.env.until.schedule(until_delay)
yield self.env.timeout(0.5)
def post_sim_hook(self):
if self.env.config.get('test.fail_post_simulate'):
raise Exception('fail_post_simulate')
def get_result_hook(self, result):
if self.env.config.get('test.fail_get_result'):
raise Exception('fail_get_result')
result['time_100ps'] = self.env.time(unit='100 ps')
def test_pre_init_failure(config):
config['test.fail_pre_init'] = True
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'] == repr(Exception('fail_pre_init'))
assert result['sim.now'] == 0
assert result['sim.time'] == 0
assert result['sim.runtime'] > 0
assert result['config']['test.fail_pre_init']
assert os.path.exists(
os.path.join(config['sim.workspace'], config['sim.result.file'])
)
def test_init_failure(config):
config['test.fail_init'] = True
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'] == repr(Exception('fail_init'))
assert result['sim.now'] == 0
assert result['sim.time'] == 0
assert result['sim.runtime'] > 0
assert result['config']['test.fail_init']
for file_key in ['sim.result.file', 'sim.config.file']:
assert os.path.exists(os.path.join(config['sim.workspace'], config[file_key]))
def test_simulate_fail(config):
config['test.fail_simulate'] = True
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'].startswith('AssertionError')
assert result['sim.now'] == 0.5
assert result['sim.time'] == 0.5e-6
assert result['sim.runtime'] > 0
assert result['config']['test.fail_simulate']
for file_key in ['sim.result.file', 'sim.config.file']:
assert os.path.exists(os.path.join(config['sim.workspace'], config[file_key]))
def test_post_simulate_fail(config):
config['test.fail_post_simulate'] = True
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'] == repr(Exception('fail_post_simulate'))
assert result['sim.now'] == 1
assert result['sim.time'] == 1e-6
assert result['sim.runtime'] > 0
assert result['config']['test.fail_post_simulate']
for file_key in ['sim.result.file', 'sim.config.file']:
assert os.path.exists(os.path.join(config['sim.workspace'], config[file_key]))
def test_get_result_fail(config):
config['test.fail_get_result'] = True
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'] == repr(Exception('fail_get_result'))
assert result['sim.now'] == 1
assert result['sim.time'] == 1e-6
assert result['sim.runtime'] > 0
assert result['config']['test.fail_get_result']
for file_key in ['sim.result.file', 'sim.config.file']:
assert os.path.exists(os.path.join(config['sim.workspace'], config[file_key]))
def test_simulate_reraise(config):
config['test.fail_simulate'] = True
with pytest.raises(AssertionError):
simulate(config, TopTest, reraise=True)
def test_no_result_file(config):
config.pop('sim.result.file')
config.pop('sim.config.file')
result = simulate(config, TopTest)
assert result['sim.exception'] is None
assert not os.listdir(config['sim.workspace'])
def test_simulate_with_progress(config, capsys):
config['sim.progress.enable'] = True
config['sim.duration'] = '10 us'
simulate(config, TopTest)
_, err = capsys.readouterr()
assert err.endswith('(100%)\n')
@pytest.mark.parametrize('max_width', [0, 1])
def test_simulate_with_progress_tty(config, capsys, max_width):
config['sim.progress.enable'] = True
config['sim.progress.max_width'] = max_width
config['sim.duration'] = '10 us'
with capsys.disabled():
simulate(config, TopTest)
def test_simulate_progress_non_one_timescale(config):
config['sim.progress.enable'] = True
config['sim.timescale'] = '100 ns'
config['sim.duration'] = '10 us'
simulate(config, TopTest)
def test_simulate_factors(config):
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(config, factors, TopTest)
assert len(results) == 3
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
def test_simulate_factors_only_factor(config):
FACTOR_NUM = 2
def single_factor_filter_fn(cfg):
return cfg['meta.sim.index'] == FACTOR_NUM
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(
config, factors, TopTest, config_filter=single_factor_filter_fn
)
assert len(results) == 1
for result in results:
assert result['sim.exception'] is None
assert result['config']['meta.sim.workspace'] == os.path.join(
config['sim.workspace'], str(FACTOR_NUM)
)
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
def test_simulate_factors_progress(config, capfd):
config['sim.progress.enable'] = True
config['sim.duration'] = '10 us'
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(config, factors, TopTest)
assert len(results) == 3
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
out, err = capfd.readouterr()
assert out == ''
assert '3 of 3 simulations' in err
def test_simulate_factors_progress_tty(config, capsys):
config['sim.progress.enable'] = True
config['sim.duration'] = '10 us'
factors = [(['sim.seed'], [[1], [2], [3]])]
with capsys.disabled():
results = simulate_factors(config, factors, TopTest)
assert len(results) == 3
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
def test_simulate_factors_no_overwrite(config):
config['sim.workspace.overwrite'] = False
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(config, factors, TopTest)
assert os.path.isdir(config['sim.workspace'])
assert len(results) == 3
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
with open(os.path.join(config['sim.workspace'], 'cookie.txt'), 'w') as f:
f.write('hi')
factors = [(['sim.seed'], [[1], [2], [3], [4]])]
results = simulate_factors(config, factors, TopTest)
assert len(results) == 4
assert os.path.isdir(config['sim.workspace'])
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
with open(os.path.join(config['sim.workspace'], 'cookie.txt')) as f:
assert f.read() == 'hi'
def test_simulate_factors_overwrite(config):
config['sim.workspace.overwrite'] = True
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(config, factors, TopTest)
assert os.path.isdir(config['sim.workspace'])
assert len(results) == 3
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
with open(os.path.join(config['sim.workspace'], 'cookie.txt'), 'w') as f:
f.write('hi')
factors = [(['sim.seed'], [[1], [2]])]
results = simulate_factors(config, factors, TopTest)
assert len(results) == 2
assert os.path.isdir(config['sim.workspace'])
for result in results:
assert result['sim.exception'] is None
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
assert not os.path.exists(os.path.join(config['sim.workspace'], 'cookie.txt'))
assert set(os.listdir(config['sim.workspace'])) == set(['0', '1'])
def test_progress_enabled(config):
config['sim.progress.enable'] = True
result = simulate(config, TopTest)
assert result['sim.exception'] is None
assert result['sim.now'] == 1
assert result['sim.time'] == 1e-6
assert result['sim.runtime'] > 0
assert os.path.exists(
os.path.join(config['sim.workspace'], config['sim.result.file'])
)
@pytest.mark.parametrize('max_width', [0, 1])
def test_many_progress_enabled(config, max_width):
config['sim.progress.enable'] = True
config['sim.progress.max_width'] = max_width
factors = [(['sim.seed'], [[1], [2], [3]])]
results = simulate_factors(config, factors, TopTest)
for result in results:
assert result['sim.exception'] is None
assert result['sim.now'] == 1
assert result['sim.time'] == 1e-6
assert result['sim.runtime'] > 0
assert os.path.exists(
os.path.join(
result['config']['meta.sim.workspace'],
result['config']['sim.result.file'],
)
)
def test_many_progress_no_pbar(config, capsys, no_progressbar):
config['sim.progress.enable'] = True
config['sim.duration'] = '10 us'
factors = [(['sim.seed'], [[1], [2], [3]])]
with capsys.disabled():
simulate_factors(config, factors, TopTest)
def test_many_progress_no_colorama(config, capsys, no_colorama):
config['sim.progress.enable'] = True
factors = [(['sim.seed'], [[1], [2], [3]])]
with capsys.disabled():
simulate_factors(config, factors, TopTest)
def test_workspace_env_init(config):
class TestEnvironment(SimEnvironment):
def __init__(self, config):
super().__init__(config)
assert os.path.split(os.getcwd())[-1] == config['sim.workspace']
workspace = config['sim.workspace']
assert not os.path.exists(workspace)
simulate(config, TopTest, TestEnvironment)
assert os.path.exists(workspace)
def test_workspace_no_overwrite(config):
workspace = config['sim.workspace']
config['sim.workspace.overwrite'] = True
config['sim.result.file'] = 'first-result.yaml'
assert not os.path.exists(workspace)
simulate(config, TopTest)
assert os.path.exists(os.path.join(workspace, 'first-result.yaml'))
config['sim.workspace.overwrite'] = False
config['sim.result.file'] = 'second-result.yaml'
simulate(config, TopTest)
assert os.path.exists(os.path.join(workspace, 'first-result.yaml'))
assert os.path.exists(os.path.join(workspace, 'second-result.yaml'))
config['sim.workspace.overwrite'] = True
config['sim.result.file'] = 'third-result.yaml'
simulate(config, TopTest)
assert not os.path.exists(os.path.join(workspace, 'first-result.yaml'))
assert not os.path.exists(os.path.join(workspace, 'second-result.yaml'))
assert os.path.exists(os.path.join(workspace, 'third-result.yaml'))
def test_workspace_is_curdir(config):
config['sim.workspace'] = '.'
config['sim.workspace.overwrite'] = True
config['sim.result.file'] = 'first-result.yaml'
simulate(config, TopTest)
assert os.path.exists('first-result.yaml')
config['sim.workspace.overwrite'] = True
config['sim.result.file'] = 'second-result.yaml'
simulate(config, TopTest)
# '.' is not supposed to be overwritten
assert os.path.exists('first-result.yaml')
assert os.path.exists('second-result.yaml')
def test_many_with_duplicate_workspace(config):
configs = [config.copy() for _ in range(2)]
configs[0]['sim.workspace'] = os.path.join('tmp', os.pardir, 'workspace')
configs[1]['sim.workspace'] = 'workspace'
with pytest.raises(ValueError):
simulate_many(configs, TopTest)
def test_many_user_jobs(config):
simulate_many([config], TopTest, jobs=1)
def test_many_invalid_jobs(config):
with pytest.raises(ValueError):
simulate_many([config], TopTest, jobs=0)
def test_sim_time(config):
config['sim.timescale'] = '10 ms'
config['sim.duration'] = '995 ms'
result = simulate(config, TopTest)
assert result['sim.time'] == 0.995
assert result['sim.now'] == 99.5
assert result['time_100ps'] == 9950000000
def test_sim_time_non_default_t(config):
config['sim.timescale'] = '1 ms'
env = SimEnvironment(config)
assert env.time(1000, 's') == 1
assert env.time(1, 'ms') == 1
assert env.time(t=500) == 0.5
@pytest.mark.parametrize('progress_enable', [True, False])
def test_sim_until(config, progress_enable):
class TestEnvironment(SimEnvironment):
def __init__(self, config):
super().__init__(config)
self.until = SimStopEvent(self)
config['sim.progress.enable'] = progress_enable
config['test.until_delay'] = 0
result = simulate(config, TopTest, TestEnvironment)
assert result['sim.now'] == 0.50
config['test.until_delay'] = 0.25
result = simulate(config, TopTest, TestEnvironment)
assert result['sim.now'] == 0.75
def test_sim_json_result(config):
config['sim.result.file'] = 'result.json'
result = simulate(config, TopTest)
workspace = config['sim.workspace']
with open(os.path.join(workspace, config['sim.result.file'])) as f:
assert json.load(f) == result
@pytest.mark.parametrize(
'ext, parser',
[
('yaml', yaml.safe_load),
('yml', yaml.safe_load),
('json', json.load),
('py', lambda f: eval(f.read())),
],
)
def test_sim_result_format(config, ext, parser):
config['sim.result.file'] = 'result.' + ext
config['sim.config.file'] = 'config.' + ext
result = simulate(config, TopTest)
workspace = config['sim.workspace']
with open(os.path.join(workspace, config['sim.result.file'])) as f:
assert parser(f) == result
with open(os.path.join(workspace, config['sim.config.file'])) as f:
assert parser(f) == config
def test_sim_invalid_result_format(config):
config['sim.result.file'] = 'result.bogus'
with pytest.raises(ValueError):
simulate(config, TopTest)
result = simulate(config, TopTest, reraise=False)
assert result['sim.exception'] is not None
|
A, B = map(int, input().split())
print(A / B)
|
Map = ["a", "b", "c", "d", 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def result(Str):
newstr = ""
for i in Str:
if i in Map:
if Map.index(i)+2>25:
newstr += Map[abs(len(Map)-(Map.index(i)+2))]
continue
else:
newstr += Map[Map.index(i)+2]
continue
newstr += i
return newstr
if __name__ == '__main__':
Str = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
print(result(Str))
intab="abcdefghijklmnopqrstuvwxyz"
outtab="cdefghijklmnopqrstuvwxyzab"
trantab = Str.maketrans(intab, outtab)
print(Str.translate(trantab))
Str="map"
print(Str.translate(trantab)) |
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SetParentBoneNode(Node, ArmLogicTreeNode):
'''Set parent bone node'''
bl_idname = 'LNSetParentBoneNode'
bl_label = 'Set Parent Bone'
bl_icon = 'NONE'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('ArmNodeSocketObject', 'Object')
self.inputs.new('ArmNodeSocketObject', 'Parent')
self.inputs[-1].default_value = 'Parent'
self.inputs.new('NodeSocketString', 'Bone')
self.inputs[-1].default_value = 'Bone'
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(SetParentBoneNode, category='Action')
|
import os
def clearConsole():
command = 'clear'
os.system(command)
if __name__ == "__main__":
clearConsole() |
""" This module defines the training configuration """
from typing import NamedTuple, Union
class TrainConfig(NamedTuple):
""" TrainConfig is a configuration for training with ray """
experiment_name: str
num_workers: int
resume: Union[bool, str] = "prompt"
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from open3d.ml.torch.layers import SparseConv, SparseConvTranspose
class SparseConvFunc(torch.autograd.Function):
@staticmethod
def symbolic(g, cls, feat, in_pos, out_pos, voxel_size):
kernel = cls.state_dict()["kernel"]
offset = cls.state_dict()["offset"]
kernel = g.op("Constant", value_t=kernel)
offset = g.op("Constant", value_t=offset)
return g.op("org.open3d::SparseConv", feat, in_pos, out_pos, kernel, offset)
@staticmethod
def forward(self, cls, feat, in_pos, out_pos, voxel_size):
return cls.origin_forward(feat, in_pos, out_pos, voxel_size)
class SparseConvONNX(SparseConv):
"""
This is a support class which helps export network with SparseConv in ONNX format.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.origin_forward = super().forward
def forward(self, feat, in_pos, out_pos, voxel_size):
return SparseConvFunc.apply(self, feat, in_pos, out_pos, voxel_size)
class SparseConvTransposeFunc(torch.autograd.Function):
@staticmethod
def symbolic(g, cls, feat, in_pos, out_pos, voxel_size):
kernel = cls.state_dict()["kernel"]
offset = cls.state_dict()["offset"]
kernel = g.op("Constant", value_t=kernel)
offset = g.op("Constant", value_t=offset)
return g.op("org.open3d::SparseConvTranspose", feat, in_pos, out_pos, kernel, offset)
@staticmethod
def forward(self, cls, feat, in_pos, out_pos, voxel_size):
return cls.origin_forward(feat, in_pos, out_pos, voxel_size)
class SparseConvTransposeONNX(SparseConvTranspose):
"""
This is a support class which helps export network with SparseConvTranspose in ONNX format.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.origin_forward = super().forward
def forward(self, feat, in_pos, out_pos, voxel_size):
return SparseConvTransposeFunc.apply(self, feat, in_pos, out_pos, voxel_size)
|
# ===========================================================================
# configuration.py --------------------------------------------------------
# ===========================================================================
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
import expmgmt.config.settings
from configparser import ConfigParser, ExtendedInterpolation
import logging
import os
import sys
# class -------------------------------------------------------------------
# ---------------------------------------------------------------------------
class Configuration(ConfigParser):
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def __init__(self):
# innitialization
ConfigParser.__init__(self, interpolation=ExtendedInterpolation())
self.logger = logging.getLogger("config")
# get folder where the configuration files are stored and the path of the main configuration file
self.dir_location = expmgmt.config.settings.get_config_folder()
self.file_location = expmgmt.config.settings.get_config_file()
# get folder where the scripts are stored
self.scripts_location = expmgmt.config.settings.get_scripts_folder()
# get folder where the experiments are stored
self.experiments_location = expmgmt.config.settings.get_projects_folder()
self.initialize()
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def handle_includes(self):
"""Include all defined configuration files in section 'include'.
The specified file in field 'additional-configuration-file' in the following example will be read if it exists.
::
[include]
additional-configuration-file = A:\\.config\\additional-configuration-file.ini
"""
# if sectiuon 'include' is not defined return
if not "include" in self.keys():
return
# read additional configuration files if exists
for name in self["include"]:
self.logger.debug("Including {0}".format(name)) # @log
fullpath = os.path.expanduser(self.get("include", name))
if os.path.exists(fullpath):
self.read(fullpath)
else:
self.logger.warning(
"{0} not included because it does not exist".format(
fullpath
)
) # @log
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def initialize(self):
# create directory structure --------------------------------------
# create configuration folder, e.g. /home/user/.config
if not os.path.exists(self.dir_location):
self.logger.warning(
'Creating configuration folder in %s' % self.dir_location
) # @log
os.makedirs(self.dir_location)
# create scripts and experiments folder, e.g. /home/user/.config/scripts and /home/user/.config/experiments
if not os.path.exists(self.scripts_location):
os.makedirs(self.scripts_location)
if not os.path.exists(self.experiments_location):
os.makedirs(self.experiments_location)
# create configuration file ---------------------------------------
# execute additional script files ---------------------------------
if os.path.exists(self.file_location):
# read configurations file if it exists
self.logger.debug(
'Reading configuration from {0}'.format(self.file_location)
) # @log
self.read(self.file_location)
self.handle_includes()
else:
# create configuration file, e.g. /home/user/.config/config.ini with default settings
default_info = expmgmt.config.settings.get_settings_default()
for section in default_info:
self[section] = {}
for field in default_info[section]:
self[section][field] = default_info[section][field]
with open(self.file_location, "w") as configfile:
self.write(configfile)
# execute additional script files ---------------------------------
configpy = expmgmt.config.settings.get_configpy_file()
if os.path.exists(configpy):
self.logger.debug('Executing {0}'.format(configpy)) # @log
with open(configpy) as fd:
exec(fd.read())
|
from django.db import models
from accounts.models import User
# Create your models here.
class Question(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments', null=True, blank=True)
question = models.CharField(max_length=1000, null=True, blank=True)
answer = models.CharField(max_length=1000, null=True, blank=True)
class Meta:
verbose_name = 'question'
verbose_name_plural = 'questions'
def __str__(self):
return self.question
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='questions', null=True, blank=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=True, blank=True)
comment = models.CharField(max_length=1000, null=True, blank=True)
class Meta:
verbose_name = 'comment'
verbose_name_plural = 'comments'
def __str__(self):
return self.comment + '\t' + self.question.question
|
#!/usr/bin/env python3
# coding: utf-8
from enum import Enum, auto
from sys import version_info
from typing import Any, Dict, List
if version_info.minor < 8:
from typing_extensions import TypedDict
else:
from typing import TypedDict # type: ignore
class DefaultWorkflowEngineParameter(TypedDict):
"""
A message that allows one to describe default parameters for a workflow
engine.
name:
The name of the parameter
type:
Describes the type of the parameter, e.g. float.
default_value:
The stringified version of the default parameter. e.g. "2.45".
"""
name: str
type: str
default_value: str
class Log(TypedDict):
"""
Log and other info
name:
The task or workflow name
cmd:
The command line that was executed
start_time:
When the command started executing, in ISO 8601 format
"%Y-%m-%dT%H:%M:%SZ"
end_time:
When the command stopped executing (completed, failed, or cancelled),
in ISO 8601 format "%Y-%m-%dT%H:%M:%SZ"
stdout:
A URL to retrieve standard output logs of the workflow run or task.
This URL may change between status requests, or may not be available
until the task or workflow has finished execution. Should be available
using the same credentials used to access the WES endpoint.
stderr:
A URL to retrieve standard error logs of the workflow run or task.
This URL may change between status requests, or may not be available
until the task or workflow has finished execution. Should be available
using the same credentials used to access the WES endpoint.
exit_code:
Exit code of the program
"""
name: str
cmd: List[str]
start_time: str
end_time: str
stdout: str
stderr: str
exit_code: int
class State(Enum):
"""
UNKNOWN:
The state of the task is unknown. This provides a safe default for
messages where this field is missing, for example, so that a missing
field does not accidentally imply that the state is QUEUED.
QUEUED:
The task is queued.
INITIALIZING:
The task has been assigned to a worker and is currently preparing to
run. For example, the worker may be turning on, downloading input
files, etc.
RUNNING:
The task is running. Input files are downloaded and the first Executor
has been started.
PAUSED:
The task is paused. An implementation may have the ability to pause a
task, but this is not required.
COMPLETE:
The task has completed running. Executors have exited without error
and output files have been successfully uploaded.
EXECUTOR_ERROR:
The task encountered an error in one of the Executor processes.
Generally, this means that an Executor exited with a non-zero exit
code.
SYSTEM_ERROR:
The task was stopped due to a system error, but not from an Executor,
for example an upload failed due to network issues, the worker's ran
out of disk space, etc.
CANCELED:
The task was canceled by the user.
CANCELING:
The task was canceled by the user, and is in the process of stopping.
"""
UNKNOWN = auto()
QUEUED = auto()
INITIALIZING = auto()
RUNNING = auto()
PAUSED = auto()
COMPLETE = auto()
EXECUTOR_ERROR = auto()
SYSTEM_ERROR = auto()
CANCELED = auto()
CANCELING = auto()
class WorkflowTypeVersion(TypedDict):
"""
Available workflow types supported by a given instance of the service.
workflow_type_version:
an array of one or more acceptable types for the `workflow_type`
"""
workflow_type_version: List[str]
class ServiceInfo(TypedDict):
"""
A message containing useful information about the running service,
including supported versions and default settings.
workflow_type_versions:
A map with keys as the workflow format type name (currently only CWL
and WDL are used although a service may support others) and value is a
workflow_type_version object which simply contains an array of one or
more version strings
supported_wes_versions:
The version(s) of the WES schema supported by this service
supported_filesystem_protocols:
The filesystem protocols supported by this service, currently these may
include common protocols using the terms 'http', 'https', 'sftp', 's3',
'gs', 'file', or 'synapse', but others are possible and the terms
beyond these core protocols are currently not fixed. This section
reports those protocols (either common or not) supported by this WES
service.
workflow_engine_versions:
The engine(s) used by this WES service, key is engine name
(e.g. Cromwell) and value is version
default_workflow_engine_parameters:
Each workflow engine can present additional parameters that can be
sent to the workflow engine. This message will list the default values,
and their types for each workflow engine.
system_state_counts:
The system statistics, key is the statistic, value is the count of
runs in that state. See the State enum for the possible keys.
auth_instructions_url:
A web page URL with human-readable instructions on how to get an
authorization token for use with a specific WES endpoint.
contact_info_url:
An email address URL (mailto:) or web page URL with contact
information for the operator of a specific WES endpoint. Users of the
endpoint should use this to report problems or security
vulnerabilities.
tags:
A key-value map of arbitrary, extended metadata outside the scope of
the above but useful to report back
"""
workflow_type_versions: Dict[str, WorkflowTypeVersion]
supported_wes_versions: List[str]
supported_filesystem_protocols: List[str]
workflow_engine_versions: Dict[str, str]
default_workflow_engine_parameters: List[DefaultWorkflowEngineParameter]
system_state_counts: Dict[State, int]
auth_instructions_url: str
contact_info_url: str
tags: Dict[str, str]
class RunStatus(TypedDict):
"""
Small description of a workflow run, returned by server during listing
"""
run_id: str
state: State
class RunListResponse(TypedDict):
"""
The service will return a RunListResponse when receiving a successful
RunListRequest.
runs:
A list of workflow runs that the service has executed or is executing.
The list is filtered to only include runs that the caller has
permission to see.
next_page_token:
A token which may be supplied as `page_token` in workflow run list
request to get the next page of results. An empty string indicates
there are no more items to return.
"""
runs: List[RunStatus]
next_page_token: str
class RunRequest(TypedDict):
"""
To execute a workflow, send a run request including all the details needed
to begin downloading and executing a given workflow.
workflow_params:
The workflow run parameterizations (JSON encoded), including input and
output file locations
workflow_type:
The workflow descriptor type, must be "CWL" or "WDL" currently (or
another alternative supported by this WES instance)
workflow_type_version:
The workflow descriptor type version, must be one supported by this
WES instance
tags:
A key-value map of arbitrary metadata outside the scope of
`workflow_params` but useful to track with this run request
workflow_engine_parameters:
Additional parameters can be sent to the workflow engine using this
field. Default values for these parameters can be obtained using the
ServiceInfo endpoint.
workflow_url:
The workflow CWL or WDL document. When `workflow_attachments` is used
to attach files, the `workflow_url` may be a relative path to one of
the attachments.
workflow_attachment:
The workflow_attachment array may be used to upload files that are
required to execute the workflow, including the primary workflow,
tools imported by the workflow, other files referenced by the workflow,
or files which are part of the input. The implementation should stage
these files to a temporary directory and execute the workflow from
there. These parts must have a Content-Disposition header with a
"filename" provided for each part. Filenames may include
subdirectories, but must not include references to parent directories
with '..' -- implementations should guard against maliciously
constructed filenames.
"""
workflow_params: str
workflow_type: str
workflow_type_version: str
tags: str
workflow_engine_parameters: str
workflow_url: str
class RunLog(TypedDict):
"""
run_id:
workflow run ID
request:
The original request message used to initiate this execution.
state:
The state of the run e.g. RUNNING (see State)
run_log:
The logs, and other key info like timing and exit code, for the overall
run of this workflow.
task_logs:
The logs, and other key info like timing and exit code, for each step in
the workflow run.
outputs:
The outputs from the workflow run.
"""
run_id: str
request: RunRequest
state: State
run_log: Log
task_logs: List[Log]
outputs: Dict[Any, Any]
class RunId(TypedDict):
"""
workflow run ID
"""
run_id: str
class ErrorResponse(TypedDict):
"""
An object that can optionally include information about the error.
msg:
A detailed error message.
status_code:
The integer representing the HTTP status code (e.g. 200, 404).
"""
msg: str
status_code: int
|
import matplotlib.pyplot as plt
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
from gem.evaluation import evaluate_graph_reconstruction as gr
from time import time
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
from gem.embedding.sdne import SDNE
import networkx as nx
# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
edge_f = 'karate.edgelist'
# Specify whether the edges are directed
isDirected = True
# Load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()
nx.write_gpickle(G, 'graph.gpickle') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Crypto.Cipher import CAST
from . import StandardBlockCipherUnit
class cast(StandardBlockCipherUnit, cipher=CAST):
"""
CAST encryption and decryption.
"""
pass
|
import numpy as np
import pyccl as ccl
def test_szcl():
COSMO = ccl.Cosmology(
Omega_b=0.05,
Omega_c=0.25,
h=0.67,
n_s=0.9645,
A_s=2.02E-9,
Neff=3.046)
bm = np.loadtxt("benchmarks/data/sz_cl_P13_szpowerspectrum.txt",
unpack=True)
l_bm = bm[0]
cl_bm = bm[1]
cl_bm *= (2*np.pi) / (1E12*l_bm*(l_bm+1))
mass_def = ccl.halos.MassDef(500, 'critical')
hmf = ccl.halos.MassFuncTinker08(COSMO, mass_def=mass_def)
hbf = ccl.halos.HaloBiasTinker10(COSMO, mass_def=mass_def)
hmc = ccl.halos.HMCalculator(COSMO, hmf, hbf, mass_def)
prf = ccl.halos.HaloProfilePressureGNFW(mass_bias=1./1.41)
pk = ccl.halos.halomod_Pk2D(COSMO, hmc, prf, get_2h=False)
tr = ccl.tSZTracer(COSMO, z_max=4.)
cl = ccl.angular_cl(COSMO, tr, tr, l_bm, p_of_k_a=pk)
assert np.all(np.fabs(cl/cl_bm-1) < 2E-2)
|
from .web_arima import The_Arima_Model as arima1
from .web_arima import Arima2 as arima2
from .web_monteCarlo import The_Monte_Carlo as monteCarlo
from .web_prophet import Web_prophet_kyle as prophet
from .web_regression import Regression_Model as regression
from .web_sarima import The_SARIMA_Model as sarima
from .web_stocker_helper import Stocker
from .web_stocker import web_stocker_run
from .web_univariate_timeSeries_rnn import The_Univariate_TS_Reg as univariate_1
from .web_univariate_rnn import The_Univariate_TS_Reg as univariate_2
from .web_mc import MC_Forecast
|
import torch
import torch.nn.functional as F
from torch.nn import Linear, ModuleDict
from torch_geometric.nn import GCNConv, global_mean_pool
from src.data.make_dataset import get_dataloader, get_mutag_data
class GCN(torch.nn.Module):
def __init__(
self,
input_num_features: int,
num_classes: int,
hidden_channels: dict = {"conv1": 64, "conv2": 64, "conv3": 64},
seed: int = 12345,
p: float = 0.5,
):
super(GCN, self).__init__()
torch.manual_seed(seed)
self.p = p
self.conv_layers = ModuleDict()
current_dim = input_num_features
for conv_name, hchannel in hidden_channels.items():
self.conv_layers[conv_name] = GCNConv(current_dim, hchannel)
current_dim = hchannel
self.linear = Linear(current_dim, num_classes)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
for layer in self.conv_layers.values():
x = F.relu(layer(x, edge_index))
# 2. Readout layer
x = global_mean_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
x = F.dropout(x, p=self.p, training=self.training)
x = self.linear(x)
return x
if __name__ == "__main__":
dataset = get_mutag_data(train=True, cleaned=False)
trainloader = get_dataloader(dataset)
data = next(iter(trainloader))
model = GCN(dataset.num_node_features, dataset.num_classes)
model.train()
out = model(data.x, data.edge_index, data.batch)
|
import importlib
import json
import re
from app.shared.handler import lambda_handler
@lambda_handler
async def handler(event=None, context=None, **kwargs):
unknown_api_response = 404, json.dumps({'message': 'Invalid API'})
path = event.get('path')
try:
_, api, function_name = path.split('/')
except Exception:
return unknown_api_response
if api != re.sub(r'[^a-z0-9]', '', api) or function_name != re.sub(r'[^a-z0-9]', '', function_name):
return unknown_api_response
if not api or not function_name:
return unknown_api_response
method_module = None
try:
method_module = importlib.import_module(f'app.endpoints.{api}.{function_name}')
except ModuleNotFoundError:
return unknown_api_response
func = getattr(method_module, 'handler', None)
return await func(event, context, coro=True)
|
# obtain a basic description of all the species seen in the past 30 years that the Solow p-value designated as extinct
# particularly interested in species designated extinct that are know (from Chong et al. (2009)) to be common
import csv
import sys
sys.path.insert(0,'../..') # allows us to import undetected extinctions package
from undetected_extinctions.helpers.name_cleaning import clean_species_name
# where the databases are located
# ---
fname_spps = '../../results/infer_detected_extinctions/seen_recently_inferred_extinct.csv'
fname_chong = '../../data/chong_2009_checklist/Chong_native_spp.csv'
fname_spp2fam = '../../data/cleaned_plants_database/species2family.csv'
fname_db2chong = '../../data/chong_2009_checklist/database2chong.csv'
# read in the intermediate data
# ---
# create dictionary to map from database name to name in Chong
csv_f = csv.reader(open(fname_db2chong))
header = next(csv_f)
db2chong = { row[0]: row[1] for row in csv_f }
# create dictionary to map species name to family
csv_f = csv.reader(open(fname_spp2fam))
header = next(csv_f)
spp2fam = { row[0]: row[1] for row in csv_f }
# read in species designated extinct their Solow p-value
csv_f = csv.reader(open(fname_spps))
header = next(csv_f)
spp_extincts = { row[0]: { 'frst': row[1], 'last': row[2], 'pvalue': float(row[5]) } for row in csv_f }
# read in chong
csv_f = csv.reader(open(fname_chong))
header = next(csv_f)
spp_chong = { clean_species_name(row[1]): { 'life_form': row[4].strip(), 'status': row[5].strip(), 'cultivated': row[6].strip() } for row in csv_f }
# make rows for a latex table
# ---
table = list() # family, species, first, last, p-value, lifeform, status, cultivated
for spp_name, D in spp_extincts.items():
fam_name = spp2fam[spp_name]
table_row = [ fam_name, '\emph{' + spp_name.capitalize() + '}', D['frst'], D['last'], '{:0.4f}'.format(D['pvalue']) ]
spp_name2 = ' '.join(spp_name.split()[0:2])
if spp_name in spp_chong:
C = spp_chong[spp_name2]
table_row += [ C['life_form'], C['status'], C['cultivated'] ]
else:
C = spp_chong[db2chong[spp_name]]
#table_row += [ '', '', '' ]
table_row += [ C['life_form'], C['status'], C['cultivated'] ]
table.append( table_row )
# write table
# ---
f = open('../../results/infer_detected_extinctions/describe_seen_recently_inferred_extinct.tex', 'w')
table.sort(key = lambda r: r[4] ) # want to go in sorted by p-value
for row in table:
f.write( '\t & '.join(row) )
f.write( '\\\\ \n' )
f.close()
|
import numpy as np
from rospy import loginfo, get_param
from human import Human
kitchen_feature = get_param('kitchen_feature')
human_feature = get_param('human_feature')
class Kitchen:
"""
This class represents a kitchen and saves Human objects.
"""
def __init__(self, kitchen_id, data):
"""
This constructor creates a kitchen object, which saves the kitchen id
and the data used for this kitchen.
:param kitchen_id: name of the kitchen
:type kitchen_id: string
:param data: data for this kitchen
:type data: DataFrame
"""
self.kitchen_id = kitchen_id
self.raw_data = data
self.humans = []
self.fit_data(kitchen_feature, human_feature)
def get_all_objects(self, table_name, context):
"""Returns all objects saved in this kitchen given the table
and context for every human.
:param table_name: name of the table
:type table_name: string
:param context: name of context
:type context: string
:returns: all objects for every human given the parameters
:rtype: list[list[VRItems]]
"""
return [human.get_all_objects(table_name, context)
for human in self.humans]
def fit_data(self, kitchen_feature, human_feature):
"""Initializes the human attribute using data saved in self.raw_data
and the given features.
:param kitchen_feature: name of the kitchen feature
:type kitchen_feature: string
:param human_feature: name of the human feature
:type human_feature: string"""
vr_data = self.raw_data
for kitchen_name in np.unique(vr_data[kitchen_feature]):
data_by_kitchen = vr_data.loc[vr_data[kitchen_feature] == str(kitchen_name)]
for human_name in np.unique(data_by_kitchen[human_feature]):
new_human = Human(str(human_name), data_by_kitchen)
if not (new_human in self.humans):
self.humans.append(new_human)
def get_object_location(self, object_id):
"""Gets storage location of the given object for the whole kitchen.
:param object_id: id of the object
:type object_id: string
:rtype: string"""
locations = []
for human in self.humans:
locations.append(human.get_object_storage(str(object_id)))
locations.sort(key=lambda t: t[1])
return locations[0][0]
def get_object_destination(self, object_id, context, human_name, table_id):
"""Gets the destination location of given object specific to the context, human_name
and table_id."""
for human in self.humans:
if human.name == human_name:
return human.get_object(table_id, context, object_id).object_destination[0][0]
def get_storage_costmap(self, context_name, object_id):
"""This function returns the storage costmap for the given object_id and context context_name.
:rtype: GetCostmapResponse"""
if self.humans:
human = self.humans[0]
costmap = human.get_storage_costmap(context_name, object_id)
loginfo("(GetCostmap) Returning storage costmaps")
return costmap
def get_destination_costmap(self, table_id, context_name, human_name, object_id,
x_object_positions, y_object_positions, placed_object_types):
"""This function returns the destination costmap for the given parameters.
:rtype: GetCostmapResponse"""
for human in self.humans:
if human.name == human_name:
costmap = human.get_destination_costmap(table_id, context_name, object_id,
x_object_positions, y_object_positions, placed_object_types)
loginfo("(GetCostmap) Returning destination costmaps")
return costmap |
import pandas as pd
import multiprocessing as mp
from api import Bittrex
import sqlite3 as sl
from time import time
import sys
BITTREX_MARKETS_FILE = './metadata/bittrex_markets.csv'
DB_PATH = './cryptodata.db'
bittrex = Bittrex()
def post_to_db(values,time):
conn = sl.connect(DB_PATH)
cursor = conn.cursor()
for value in values:
insert_query = ('INSERT INTO prices (MARKET,PRICE,TIMESTAMP) VALUES (' + ','.join(['"'+str(value[0])+'"',str(value[1]),'"'+time+'"']) + ')')
cursor.execute(insert_query)
conn.commit()
conn.close()
def get_markets():
market_metadata = pd.read_csv(BITTREX_MARKETS_FILE)
markets = list(market_metadata['MarketName'])
return markets
def sample_wrapper(time):
markets = get_markets()
pool = mp.Pool(processes = 8)
result = pool.map(sample, markets)
post_to_db(result,time)
def sample(market_name):
px = bittrex.get_current_fair_px({'market' : market_name})
return (market_name, px)
def main(args):
time = ' '.join(args[1:3])
time = time[:-2] + '00'
# fix time added to db; use exchange latency
sample_wrapper(time)
if __name__ == '__main__':
main(sys.argv)
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------
# Copyright (C) 2014 Dimosthenis Pediaditakis.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#---------------------------------------------------------------------
"""
Selena logging module
=====================
The is the main logging module of Selena which is imported from other
places to enerate logging informaiton of various levels.
"""
import logging
from logging import Logger
from Singleton import Singleton
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOG_LEVELS = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
class SelenaLogger(Logger, object):
"""
This is the main Selena logger Class
"""
__metaclass__ = Singleton
def __init__(self):
"""Selena logger constructor
Creates the main instance of the Selena logging module.
"""
Logger.__init__(self, "selena")
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(DEFAULT_LOG_LEVEL)
# create formatter
formatter = logging.Formatter(DEFAULT_LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
self.addHandler(ch)
# set the logger level
self.setLevel(DEFAULT_LOG_LEVEL)
def setAllLevels(self, pLevel):
"""
Sets the logging level for the logger itself and for all its handlers.
:param pLevel: The request logging level
"""
# Set the level for all handlers
for handler in self.handlers:
handler.setLevel(pLevel)
# Set the level for the logger
self.setLevel(pLevel)
slog = SelenaLogger()
S_DEBUG, S_INFO, S_WARN, S_ERROR, S_CRIT = [slog.debug, slog.info, slog.warning, slog.error, slog.critical]
|
import itertools
from math import log2
from typing import List, Sequence, TypeVar
import numpy as np
from scipy import stats
from sklearn.metrics import dcg_score, ndcg_score
from sklearn.metrics.pairwise import cosine_similarity
X = TypeVar("X")
def reciprocal_rank(relevant_item: X, recommendation: Sequence[X]) -> float:
"""
Calculate the reciprocal rank (RR) of an item in a ranked list of items.
Args:
relevant_item: a target item in the predicted list of items.
recommendation: An N x 1 sequence of predicted items.
Returns:
RR (float): The reciprocal rank of the item.
"""
for i, item in enumerate(recommendation):
if item == relevant_item:
return 1.0 / (i + 1.0)
raise ValueError("relevant item did not appear in recommendation")
def mean_reciprocal_rank(relevant_items: List, recommendation: List):
"""
Calculate the mean reciprocal rank (MRR) of items in a ranked list.
Args:
relevant_items (array-like): An N x 1 array of relevant items.
predicted (array-like): An N x 1 array of ordered items.
Returns:
MRR (float): The mean reciprocal rank of the relevant items in a predicted.
"""
reciprocal_ranks = []
for item in relevant_items:
rr = reciprocal_rank(item, recommendation)
reciprocal_ranks.append(rr)
return np.mean(reciprocal_ranks)
def rank(relevant_item: X, recommendation: Sequence[X]) -> float:
"""
Calculate the rank of an item in a ranked list of items.
Args:
relevant_item: a target item in the predicted list of items.
recommendation: An N x 1 sequence of predicted items.
Returns:
: The rank of the item.
"""
for i, item in enumerate(recommendation):
if item == relevant_item:
return i + 1.0
raise ValueError("relevant item did not appear in recommendation")
def mean_rank(relevant_items: Sequence[X], recommendation: Sequence[X]) -> float:
"""
Calculate the arithmetic mean rank (MR) of items in a ranked list.
Args:
relevant_items: An N x 1 sequence of relevant items.
predicted: An N x 1 sequence of ordered items.
Returns:
: The mean rank of the relevant items in a predicted.
"""
return np.mean([rank(item, recommendation) for item in relevant_items])
def gmean_rank(relevant_items: Sequence[X], recommendation: Sequence[X]) -> float:
"""
Calculate the geometric mean rank (GMR) of items in a ranked list.
Args:
relevant_items: An N x 1 sequence of relevant items.
predicted: An N x 1 sequence of ordered items.
Returns:
: The mean reciprocal rank of the relevant items in a predicted.
"""
return stats.gmean([rank(item, recommendation) for item in relevant_items])
def average_precision_at_k(relevant_items: np.array, recommendation: np.array, k=10):
"""
Calculate the average precision at k (AP@K) of items in a ranked list.
Args:
relevant_items (array-like): An N x 1 array of relevant items.
predicted (array-like): An N x 1 array of ordered items.
k (int): the number of items considered in the predicted list.
Returns:
AP@K (float): The average precision @ k of a predicted list.
`Original <https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py>`_
"""
if len(recommendation) > k:
recommendation = recommendation[:k]
score = 0.0
hits = 0.0
for i, item in enumerate(recommendation):
if item in relevant_items and item not in recommendation[:i]:
hits += 1.0
score += hits / (i + 1.0)
return score / min(len(relevant_items), k)
def mean_average_precision_at_k(relevant_items: List[list], recommendations: List[list], k: int = 10):
"""
Calculate the mean average precision at k (MAP@K) across predicted lists.
Each prediction should be paired with a list of relevant items. First predicted list is
evaluated against the first list of relevant items, and so on.
Example usage:
.. code-block:: python
import numpy as np
from rexmex.metrics.predicted import mean_average_precision_at_k
mean_average_precision_at_k(
relevant_items=np.array(
[
[1,2],
[2,3]
]
),
predicted=np.array([
[3,2,1],
[2,1,3]
])
)
>>> 0.708333...
Args:
relevant_items (array-like): An M x N array of relevant items.
recommendations (array-like): An M x N array of recommendation lists.
k (int): the number of items considered in the predicted list.
Returns:
MAP@K (float): The mean average precision @ k across recommendations.
"""
aps = []
for items, recommendation in zip(relevant_items, recommendations):
ap = average_precision_at_k(items, recommendation, k)
aps.append(ap)
return np.mean(aps)
def average_recall_at_k(relevant_items: List, recommendation: List, k: int = 10):
"""
Calculate the average recall at k (AR@K) of items in a ranked list.
Args:
relevant_items (array-like): An N x 1 array of relevant items.
recommendation (array-like): An N x 1 array of items.
k (int): the number of items considered in the predicted list.
Returns:
AR@K (float): The average precision @ k of a predicted list.
"""
if len(recommendation) > k:
recommendation = recommendation[:k]
num_hits = 0.0
score = 0.0
for i, item in enumerate(recommendation):
if item in relevant_items and item not in recommendation[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
return score / len(relevant_items)
def mean_average_recall_at_k(relevant_items: List[list], recommendations: List[list], k: int = 10):
"""
Calculate the mean average recall at k (MAR@K) for a list of recommendations.
Each recommendation should be paired with a list of relevant items. First recommendation list is
evaluated against the first list of relevant items, and so on.
Args:
relevant_items (array-like): An M x R list where M is the number of recommendation lists,
and R is the number of relevant items.
recommendations (array-like): An M x N list where M is the number of recommendation lists and
N is the number of recommended items.
k (int): the number of items considered in the recommendation.
Returns:
MAR@K (float): The mean average recall @ k across the recommendations.
"""
ars = []
for items, recommendation in zip(relevant_items, recommendations):
ar = average_recall_at_k(items, recommendation, k)
ars.append(ar)
return np.mean(ars)
def hits_at_k(relevant_items: np.array, recommendation: np.array, k=10):
"""
Calculate the number of hits of relevant items in a ranked list HITS@K.
Args:
relevant_items (array-like): An 1 x N array of relevant items.
predicted (array-like): An 1 x N array of predicted arrays
k (int): the number of items considered in the predicted list
Returns:
HITS@K (float): The number of relevant items in the first k items of a prediction.
"""
if len(recommendation) > k:
recommendation = recommendation[:k]
hits = 0.0
for i, item in enumerate(recommendation):
if item in relevant_items and item not in recommendation[:i]:
hits += 1.0
return hits / len(recommendation)
def spearmans_rho(relevant_items: np.array, recommendation: np.array):
"""
Calculate the Spearman's rank correlation coefficient (Spearman's rho) between two lists.
Args:
relevant_items (array-like): An 1 x N array of items.
recommendation (array-like): An 1 x N array of items.
Returns:
(float): Spearman's rho.
p-value (float): two-sided p-value for null hypothesis that both predicted are uncorrelated.
"""
return stats.spearmanr(relevant_items, recommendation)
def kendall_tau(relevant_items: np.array, recommendation: np.array):
"""
Calculate the Kendall's tau, measuring the correspondance between two lists.
Args:
relevant_items (array-like): An 1 x N array of items.
recommendation (array-like): An 1 x N array of items.
Returns:
Kendall tau (float): The tau statistic.
p-value (float): two-sided p-value for null hypothesis that there's no association between the predicted.
"""
return stats.kendalltau(relevant_items, recommendation)
def intra_list_similarity(recommendations: List[list], items_feature_matrix: np.array):
"""
Calculate the intra list similarity of recommended items. The items
are represented by feature vectors, which compared with cosine similarity.
The predicted consists of item indices, which are used to fetch the item
features.
Args:
recommendations (List[list]): A M x N array of predicted, where M is the number
of predicted and N the number of recommended items
features (matrix-link): A N x D matrix, where N is the number of items and D the
number of features representing one item
Returns:
(float): Average intra list similarity across predicted
`Original <https://github.com/statisticianinstilettos/recmetrics/blob/master/recmetrics/metrics.py#L232>`_
"""
intra_list_similarities = []
for predicted in recommendations:
predicted_features = items_feature_matrix[predicted]
similarity = cosine_similarity(predicted_features)
upper_right = np.triu_indices(similarity.shape[0], k=1)
avg_similarity = np.mean(similarity[upper_right])
intra_list_similarities.append(avg_similarity)
return np.mean(intra_list_similarities)
def personalization(recommendations: List[list]):
"""
Calculates personalization, a measure of similarity between recommendations.
A high value indicates that the recommendations are disimillar, or "personalized".
Args:
recommendations (List[list]): A M x N array of predicted items, where M is the number
of predicted lists and N the number of items
Returns:
(float): personalization
`Original <https://github.com/statisticianinstilettos/recmetrics/blob/master/recmetrics/metrics.py#L160>`_
"""
n_predictions = len(recommendations)
# map each ranked item to index
item2ix = {}
counter = 0
for prediction in recommendations:
for item in prediction:
if item not in item2ix:
item2ix[item] = counter
counter += 1
n_items = len(item2ix.keys())
# create matrix of predicted x items
items_matrix = np.zeros((n_predictions, n_items))
for i, prediction in enumerate(recommendations):
for item in prediction:
item_ix = item2ix[item]
items_matrix[i][item_ix] = 1
similarity = cosine_similarity(X=items_matrix)
dim = similarity.shape[0]
personalization = (similarity.sum() - dim) / (dim * (dim - 1))
return 1 - personalization
def novelty(recommendations: List[list], item_popularities: dict, num_users: int, k: int = 10):
"""
Calculates the capacity of the recommender system to to generate novel
and unexpected results.
Args:
recommendations (List[list]): A M x N array of items, where M is the number
of predicted lists and N the number of recommended items
item_popularities (dict): A dict mapping each item in the recommendations to a popularity value.
Popular items have higher values.
num_users (int): The number of users
k (int): The number of items considered in each recommendation.
Returns:
(float): novelty
Metric Definition:
Zhou, T., Kuscsik, Z., Liu, J. G., Medo, M., Wakeling, J. R., & Zhang, Y. C. (2010).
Solving the apparent diversity-accuracy dilemma of recommender systems.
Proceedings of the National Academy of Sciences, 107(10), 4511-4515.
`Original <https://github.com/statisticianinstilettos/recmetrics/blob/master/recmetrics/metrics.py#L14>`_
"""
epsilon = 1e-10
all_self_information = []
for recommendations in recommendations:
self_information_sum = 0.0
for i in range(k):
item = recommendations[i]
item_pop = item_popularities[item]
self_information_sum += -log2((item_pop + epsilon) / num_users)
avg_self_information = self_information_sum / k
all_self_information.append(avg_self_information)
return np.mean(all_self_information)
def normalized_distance_based_performance_measure(relevant_items: List, recommendation: List):
"""
Calculates the Normalized Distance-based Performance Measure (NPDM) between two
ordered lists. Two matching orderings return 0.0 while two unmatched orderings returns 1.0.
Args:
relevant_items (List): List of items
recommendation (List): The predicted list of items
Returns:
NDPM (float): Normalized Distance-based Performance Measure
Metric Definition:
Yao, Y. Y. "Measuring retrieval effectiveness based on user preference of documents."
Journal of the American Society for Information science 46.2 (1995): 133-145.
Definition from:
Shani, Guy, and Asela Gunawardana. "Evaluating recommendation systems."
Recommender systems handbook. Springer, Boston, MA, 2011. 257-297
"""
assert set(relevant_items) == set(recommendation)
item_relevant_items_rank = {item: i + 1 for i, item in enumerate(dict.fromkeys(relevant_items))}
item_predicted_rank = {item: i + 1 for i, item in enumerate(dict.fromkeys(recommendation))}
items = set(relevant_items)
item_combinations = itertools.combinations(items, 2)
C_minus = 0
C_plus = 0
C_u = 0
for item1, item2 in item_combinations:
item1_relevant_items_rank = item_relevant_items_rank[item1]
item2_relevant_items_rank = item_relevant_items_rank[item2]
item1_pred_rank = item_predicted_rank[item1]
item2_pred_rank = item_predicted_rank[item2]
C = np.sign(item1_pred_rank - item2_pred_rank) * np.sign(item1_relevant_items_rank - item2_relevant_items_rank)
C_u += C ** 2
if C < 0:
C_minus += 1
else:
C_plus += 1
C_u0 = C_u - (C_plus + C_minus)
NDPM = (C_minus + 0.5 * C_u0) / C_u
return NDPM
def discounted_cumulative_gain(y_true: np.array, y_score: np.array):
"""
Computes the Discounted Cumulative Gain (DCG), a sum of the true scores ordered
by the predicted scores, and then penalized by a logarithmic discount based on ordering.
Args:
y_true (array-like): An N x M array of ground truth values, where M > 1 for multilabel classification problems.
y_score (array-like): An N x M array of predicted values, where M > 1 for multilabel classification problems..
Returns:
DCG (float): Discounted Cumulative Gain
"""
return dcg_score(y_true, y_score)
def normalized_discounted_cumulative_gain(y_true: np.array, y_score: np.array):
"""
Computes the Normalized Discounted Cumulative Gain (NDCG), a sum of the true scores ordered
by the predicted scores, and then penalized by a logarithmic discount based on ordering.
The score is normalized between [0.0, 1.0]
Args:
y_true (array-like): An N x M array of ground truth values, where M > 1 for multilabel classification problems.
y_score (array-like): An N x M array of predicted values, where M > 1 for multilabel classification problems..
Returns:
NDCG (float) : Normalized Discounted Cumulative Gain
"""
return ndcg_score(y_true, y_score)
|
"""Test suite for AGS result type."""
import json
from uuid import uuid4
from app.analysis_results.analysis_result_models import AnalysisResultMeta, AnalysisResultWrapper
from app.display_modules.ags.tests.factory import AGSFactory
from tests.base import BaseTestCase
class TestAGSModule(BaseTestCase):
"""Tests for the AGS module."""
def test_get_ags(self):
"""Ensure getting a single AGS result works correctly."""
average_genome_size = AGSFactory()
wrapper = AnalysisResultWrapper(data=average_genome_size, status='S').save()
analysis_result = AnalysisResultMeta(average_genome_size=wrapper).save()
with self.client:
response = self.client.get(
f'/api/v1/analysis_results/{analysis_result.id}/average_genome_size',
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertIn('success', data['status'])
self.assertEqual(data['data']['status'], 'S')
self.assertIn('data', data['data'])
ags_result = data['data']['data']
self.assertIn('categories', ags_result)
self.assertIn('distributions', ags_result)
self.assertTrue(len(ags_result['distributions']) > 0)
def test_get_pending_average_genome_size(self): # pylint: disable=invalid-name
"""Ensure getting a pending AGS behaves correctly."""
average_genome_size = AGSFactory()
wrapper = AnalysisResultWrapper(data=average_genome_size).save()
analysis_result = AnalysisResultMeta(average_genome_size=wrapper).save()
with self.client:
response = self.client.get(
f'/api/v1/analysis_results/{analysis_result.uuid}/average_genome_size',
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertEqual(data['data']['status'], 'P')
def test_get_malformed_id_sample_similarity(self): # pylint: disable=invalid-name
"""Ensure getting a malformed ID for a AGS behaves correctly."""
with self.client:
response = self.client.get(
f'/api/v1/analysis_results/foobarblah/average_genome_size',
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid UUID provided.', data['message'])
self.assertIn('error', data['status'])
def test_get_missing_average_genome_size(self): # pylint: disable=invalid-name
"""Ensure getting a missing AGS behaves correctly."""
random_uuid = uuid4()
with self.client:
response = self.client.get(
f'/api/v1/analysis_results/{random_uuid}/average_genome_size',
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('Analysis Result does not exist.', data['message'])
self.assertIn('error', data['status'])
|
from distutils.core import setup
setup(
name = 'pymsteams',
packages = ['pymsteams'],
version = '0.1.3',
description = 'Format messages and post to Microsoft Teams.',
author = 'Ryan Veach',
author_email = '[email protected]',
url = 'https://github.com/rveachkc/pymsteams',
download_url = 'https://github.com/rveachkc/pymsteams/archive/0.1.3.tar.gz',
keywords = ['Microsoft', 'Teams'], # arbitrary keywords
classifiers = [],
install_requires=['requests'],
)
|
from translationstring import TranslationStringFactory
from pyramid.config import Configurator
from c2cwsgiutils.health_check import HealthCheck
from pyramid.events import BeforeRender, NewRequest
import c2cgeoform
from pkg_resources import resource_filename
from c2c.template.config import config as configuration
from c2cgeoportal_admin.subscribers import add_renderer_globals, add_localizer
search_paths = (
(resource_filename(__name__, 'templates/widgets'),)
+ c2cgeoform.default_search_paths
)
c2cgeoform.default_search_paths = search_paths
_ = TranslationStringFactory('c2cgeoportal_admin')
def main(_, **settings):
"""
This function returns a Pyramid WSGI application.
"""
configuration.init(settings.get('app.cfg'))
settings.update(configuration.get_config())
config = Configurator(settings=settings)
config.include('c2cwsgiutils.pyramid.includeme')
config.include('c2cgeoportal_admin')
from c2cgeoportal_commons.testing import (
generate_mappers,
get_engine,
get_session_factory,
get_tm_session,
)
# Initialize the dev dbsession
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# Make request.dbsession available for use in Pyramid
config.add_request_method(
# request.tm is the transaction manager used by pyramid_tm
lambda request: get_tm_session(session_factory, request.tm),
'dbsession',
reify=True
)
config.add_subscriber(add_renderer_globals, BeforeRender)
config.add_subscriber(add_localizer, NewRequest)
generate_mappers()
health_check = HealthCheck(config)
health_check.add_url_check('http://{}/'.format(settings['healthcheck_host']))
return config.make_wsgi_app()
class PermissionSetter:
def __init__(self, config):
self.default_permission_to_revert = None
self.config = config
def __enter__(self):
self.config.commit() # avoid .ConfigurationConflictError
if self.config.introspector.get_category('default permission'):
self.default_permission_to_revert = \
self.config.introspector.get_category('default permission')[0]['introspectable']['value']
self.config.set_default_permission('admin')
def __exit__(self, _type, value, traceback):
self.config.commit() # avoid .ConfigurationConflictError
self.config.set_default_permission(self.default_permission_to_revert)
def includeme(config: Configurator):
config.include('pyramid_jinja2')
config.include('c2cgeoform')
config.include('c2cgeoportal_commons')
config.include('c2cgeoportal_admin.routes')
# Use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
config.add_translation_dirs('c2cgeoportal_admin:locale')
with PermissionSetter(config):
config.scan()
|
from algernon.memory import Memory
import pytest
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import sgd
class MockModel:
def __init__(self, output_dims, input_dims):
self.w = np.random.random(size=(output_dims, input_dims))
def predict(self, X):
return np.dot(X, self.w.T)
def predict_proba(self, X):
return self.predict(X)
class TestMemory:
TEST_OBSERVATION_SHAPE = (4, 2)
TEST_ACTION_DIMS = 3
TEST_GAMMA = 0.3
TEST_MAX_MEMORY = 1000
def test_init_values(self):
m = Memory(TestMemory.TEST_OBSERVATION_SHAPE,
TestMemory.TEST_ACTION_DIMS,
TestMemory.TEST_GAMMA,
TestMemory.TEST_MAX_MEMORY)
assert m.observation_shape == TestMemory.TEST_OBSERVATION_SHAPE
assert m.action_dims == TestMemory.TEST_ACTION_DIMS
assert len(m.memories) == 0
assert m.max_memory == TestMemory.TEST_MAX_MEMORY
def test_observation_dims(self):
ret = Memory.get_observation_dims(TestMemory.TEST_OBSERVATION_SHAPE)
# 4 * 2
assert ret == 8
def test_append(self):
m = Memory(TestMemory.TEST_OBSERVATION_SHAPE,
TestMemory.TEST_ACTION_DIMS,
TestMemory.TEST_GAMMA,
TestMemory.TEST_MAX_MEMORY)
s = np.random.random(size=TestMemory.TEST_OBSERVATION_SHAPE)
s_prime = np.random.random(size=TestMemory.TEST_OBSERVATION_SHAPE)
m.append(s, 1, 0.2, s_prime, False)
assert len(m.memories) == 1
def test_get_batch(self):
m = Memory(TestMemory.TEST_OBSERVATION_SHAPE,
TestMemory.TEST_ACTION_DIMS,
TestMemory.TEST_GAMMA,
TestMemory.TEST_MAX_MEMORY)
for _ in range(10):
s = np.random.random(size=TestMemory.TEST_OBSERVATION_SHAPE).flatten()
a = np.random.randint(TestMemory.TEST_ACTION_DIMS)
r = np.random.random(size=1)[0]
s_prime = np.random.random(size=TestMemory.TEST_OBSERVATION_SHAPE).flatten()
m.append(s, a, r, s_prime, False)
assert len(m.memories) == 10
X, y = m.get_batch(MockModel(3, 8), 3)
assert X.shape[0] == y.shape[0] == 3
assert X.shape[1] == 8
assert y.shape[1] == 3
|
# Copyright (c) 2018, Stellapps Technologies Private Ltd.
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import has_common
def execute(filters=None):
columns, data = get_columns(), get_data(filters)
return columns ,data
def get_columns():
columns = [
_("Date") + ":Data:90",
_("Sales Invoice") + ":Link/Sales Invoice:150",
_("Item Code") + ":Data:150",
_("Quantity") + ":Float:150",
_("Rate") + ":Currency:150",
_("Total") + ":Currency:100",
_("Remarks") + ":Data:200"
]
return columns
def get_data(filters):
data = frappe.db.sql("""select
DATE_FORMAT(si.posting_date, "%d-%m-%y"),
si.name,
si_item.item_name,
si_item.qty,
si_item.rate,
si_item.qty*si_item.rate
from
`tabSales Invoice` si,
`tabSales Invoice Item` si_item
where
si.local_sale = 1
and si.name = si_item.parent
and si.customer_or_farmer = "Farmer"
and si_item.item_code not in ('COW Milk','BUFFALO Milk')
and si.docstatus = 1 and si.company = '{0}'
{1} order by si.posting_date """.format(filters.get('vlcc'),get_conditions(filters)),as_list=1,debug=0)
if data:
g_total = 0
for row in data:
g_total += row[5]
data.append(["","","Grand Total","","",g_total,""])
return data
def get_conditions(filters):
conditions = " and 1=1"
if filters.get('farmer') and filters.get('start_date') and filters.get('end_date'):
conditions += " and si.posting_date between '{0}' and '{1}' and si.farmer = '{2}'".format(filters.get('start_date'),filters.get('end_date'),filters.get('farmer'))
elif filters.get('start_date') and filters.get('end_date'):
conditions += " and si.posting_date between '{0}' and '{1}' ".format(filters.get('start_date'),filters.get('end_date'))
return conditions
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, flt
from erpnext.stock.utils import update_included_uom_in_report
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
from datetime import datetime
from datetime import date
def execute(filters=None):
include_uom = filters.get("include_uom")
columns = get_columns()
items = get_items(filters)
sl_entries = get_stock_ledger_entries(filters, items)
item_details = get_item_details(items, sl_entries, include_uom)
opening_row = get_opening_balance(filters, columns)
precision = cint(frappe.db.get_single_value("System Settings", "float_precision"))
data = []
conversion_factors = []
check_data = []
issue_list = []
receive_list = []
if opening_row:
data.append(opening_row)
actual_qty = stock_value = 0
#print "sl_entries",sl_entries
available_serial_nos = {}
for sle in sl_entries:
item_detail = item_details[sle.item_code]
row_data = []
if sle.get("actual_qty") > 0:
receive_list.append(sle)
else:
issue_list.append(sle)
check_data.append([sle.date, sle.item_code, item_detail.item_name, item_detail.item_group,
item_detail.brand, item_detail.description, sle.warehouse,
item_detail.stock_uom, sle.actual_qty, sle.qty_after_transaction,
(sle.incoming_rate if sle.actual_qty > 0 else 0.0),
sle.valuation_rate, sle.stock_value, sle.voucher_type, sle.voucher_no,
sle.batch_no, sle.serial_no, sle.project, sle.company])
if include_uom:
conversion_factors.append(item_detail.conversion_factor)
receive_date_wise_dic = {}
for receive_list_date_data in receive_list:
formatted_date = frappe.utils.formatdate(receive_list_date_data.get("date"), "yyyy-mm-dd")
if receive_date_wise_dic.get(formatted_date):
receive_date_wise_dic[formatted_date]["actual_qty"] += receive_list_date_data.get("actual_qty")
else:
receive_date_wise_dic[formatted_date] = receive_list_date_data
# print "receive_date_wise_dic",receive_date_wise_dic
sent_date_wise_dic = {}
for issue_list_date_data in issue_list:
formatted_date = frappe.utils.formatdate(issue_list_date_data.get("date"), "yyyy-mm-dd")
if sent_date_wise_dic.get(formatted_date):
sent_date_wise_dic[formatted_date]["actual_qty"] += issue_list_date_data.get("actual_qty")
else:
sent_date_wise_dic[formatted_date] = issue_list_date_data
item_age_calculated_rows = get_item_age_calculated_rows(receive_date_wise_dic, sent_date_wise_dic)
for date, item_age_calculated_row_list in sorted(item_age_calculated_rows.items()):
for item_age_calculated_row_dic in item_age_calculated_row_list:
row_dic = {
"date": date,
"item_code": item_age_calculated_row_dic.get("item_code"),
"karigar_wh": item_age_calculated_row_dic.get("warehouse"),
"recd_qty": item_age_calculated_row_dic.get("in"),
"sent_qty": item_age_calculated_row_dic.get("out"),
"average_ageing": item_age_calculated_row_dic.get("age")
}
data.append(row_dic)
return columns, data
def update_available_serial_nos(available_serial_nos, sle):
serial_nos = get_serial_nos(sle.serial_no)
key = (sle.item_code, sle.warehouse)
if key not in available_serial_nos:
available_serial_nos.setdefault(key, [])
existing_serial_no = available_serial_nos[key]
for sn in serial_nos:
if sle.actual_qty > 0:
if sn in existing_serial_no:
existing_serial_no.remove(sn)
else:
existing_serial_no.append(sn)
else:
if sn in existing_serial_no:
existing_serial_no.remove(sn)
else:
existing_serial_no.append(sn)
sle.balance_serial_no = '\n'.join(existing_serial_no)
def get_columns():
#25 columns now ,29 columns now
columns = []
for col in range(7):
columns.append("")
columns[0] = {
"label": ("Date"),
"fieldname": "date",
"width": 100
}
columns[1] = {
"label": ("Item"),
"fieldname": "item_code",
"width": 100,
"fieldtype": "Link",
"options": "Item"
}
columns[2] = {
"label": ("Karigar Wh"),
"fieldname": "karigar_wh",
"width": 100,
"fieldtype": "Link",
"options": "Warehouse"
}
columns[3] = {
"label": ("In"),
"fieldname": "recd_qty",
"width": 100
}
columns[4] = {
"label": ("Out"),
"fieldname": "sent_qty",
"width": 100
}
columns[5] = {
"label": ("Average Ageing"),
"fieldname": "average_ageing",
"width": 100
}
return columns
def get_stock_ledger_entries(filters, items):
item_conditions_sql = ''
if items:
item_conditions_sql = 'and sle.item_code in ({})'\
.format(', '.join([frappe.db.escape(i) for i in items]))
return frappe.db.sql("""select concat_ws(" ", posting_date, posting_time) as date,
item_code, warehouse, actual_qty, qty_after_transaction, incoming_rate, valuation_rate,
stock_value, voucher_type, voucher_no, batch_no, serial_no, company, project, stock_value_difference
from `tabStock Ledger Entry` sle
where company = %(company)s and
posting_date between %(from_date)s and %(to_date)s
{sle_conditions}
{item_conditions_sql}
order by posting_date asc, posting_time asc, creation asc"""\
.format(
sle_conditions=get_sle_conditions(filters),
item_conditions_sql = item_conditions_sql
), filters, as_dict=1)
def get_items(filters):
conditions = []
if filters.get("item_code"):
conditions.append("item.name=%(item_code)s")
else:
if filters.get("brand"):
conditions.append("item.brand=%(brand)s")
if filters.get("item_group"):
conditions.append(get_item_group_condition(filters.get("item_group")))
items = []
if conditions:
items = frappe.db.sql_list("""select name from `tabItem` item where {}"""
.format(" and ".join(conditions)), filters)
return items
def get_item_details(items, sl_entries, include_uom):
item_details = {}
if not items:
items = list(set([d.item_code for d in sl_entries]))
if not items:
return item_details
cf_field = cf_join = ""
if include_uom:
cf_field = ", ucd.conversion_factor"
cf_join = "left join `tabUOM Conversion Detail` ucd on ucd.parent=item.name and ucd.uom=%s" \
% frappe.db.escape(include_uom)
res = frappe.db.sql("""
select
item.name, item.item_name, item.description, item.item_group, item.brand, item.stock_uom {cf_field}
from
`tabItem` item
{cf_join}
where
item.name in ({item_codes})
""".format(cf_field=cf_field, cf_join=cf_join, item_codes=','.join(['%s'] *len(items))), items, as_dict=1)
for item in res:
item_details.setdefault(item.name, item)
return item_details
def get_sle_conditions(filters):
conditions = []
if filters.get("warehouse"):
warehouse_condition = get_warehouse_condition(filters.get("warehouse"))
if warehouse_condition:
conditions.append(warehouse_condition)
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
if filters.get("batch_no"):
conditions.append("batch_no=%(batch_no)s")
if filters.get("project"):
conditions.append("project=%(project)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_opening_balance(filters, columns):
if not (filters.item_code and filters.warehouse and filters.from_date):
return
from erpnext.stock.stock_ledger import get_previous_sle
last_entry = get_previous_sle({
"item_code": filters.item_code,
"warehouse_condition": get_warehouse_condition(filters.warehouse),
"posting_date": filters.from_date,
"posting_time": "00:00:00"
})
row = {}
row["item_code"] = _("'Opening'")
for dummy, v in ((9, 'qty_after_transaction'), (11, 'valuation_rate'), (12, 'stock_value')):
row[v] = last_entry.get(v, 0)
return row
def get_warehouse_condition(warehouse):
warehouse_details = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt"], as_dict=1)
if warehouse_details:
return " exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
return ''
def get_item_group_condition(item_group):
item_group_details = frappe.db.get_value("Item Group", item_group, ["lft", "rgt"], as_dict=1)
if item_group_details:
return "item.item_group in (select ig.name from `tabItem Group` ig \
where ig.lft >= %s and ig.rgt <= %s and item.item_group = ig.name)"%(item_group_details.lft,
item_group_details.rgt)
return ''
def get_item_age_calculated_rows(receive_date_wise_dic,sent_date_wise_dic):
initial_receive_item_age_rows = {}
# calculate initial age and bal qty here
for receive_date, receive_date_data in sorted(receive_date_wise_dic.items()):
age = 10
# update this age for all receive date rows
today_date = frappe.utils.nowdate()
today_date_temp = frappe.utils.formatdate(today_date, "yyyy-mm-dd");
receive_date_data["age"] = get_age_in_days(today_date, receive_date)
receive_date_data["bal_qty_temp"] = receive_date_data.get("actual_qty")
initial_receive_item_age_rows.update({receive_date: receive_date_data})
# print ("initial_receive_item_age_rows first",initial_receive_item_age_rows)
report_json_data = {}
sent_date_age = 2
today = 1
updated_initial_receive_item_age_rows = {} # received date updated balance qty
for sent_date, sent_date_data in sorted(sent_date_wise_dic.items()):
qty_needed_to_sent = abs(sent_date_data.get("actual_qty"))
qty_assigned_from_qty_to_be_sent = 0
qty_left_from_qty_to_be_sent = qty_needed_to_sent
updated_initial_receive_item_age_rows_temp_rec_loop = initial_receive_item_age_rows
for receive_date, initial_receive_item_age_row in sorted(initial_receive_item_age_rows.items()):
bal_qty_in_rec_date_data = updated_initial_receive_item_age_rows_temp_rec_loop[receive_date]["bal_qty_temp"]
if bal_qty_in_rec_date_data > 0: # checking stock against received date
if bal_qty_in_rec_date_data > qty_left_from_qty_to_be_sent:
sent_row_data = {}
sent_row_data["warehouse"] = initial_receive_item_age_row["warehouse"]
sent_row_data["item_code"] = initial_receive_item_age_row["item_code"]
sent_row_data["actual_qty"] = initial_receive_item_age_row["actual_qty"]
sent_age_cal = initial_receive_item_age_row["age"] - sent_date_age
sent_row_data["age"] = get_age_in_days(sent_date, receive_date)
sent_row_data["in"] = qty_left_from_qty_to_be_sent
sent_row_data["out"] = qty_left_from_qty_to_be_sent
sent_row_data["trans_type"] = "sent"
updated_initial_receive_item_age_rows_temp_rec_loop[receive_date]["bal_qty_temp"] = bal_qty_in_rec_date_data - qty_left_from_qty_to_be_sent
qty_left_from_qty_to_be_sent = qty_left_from_qty_to_be_sent - sent_row_data["out"]
qty_assigned_from_qty_to_be_sent = qty_assigned_from_qty_to_be_sent + sent_row_data["out"]
# sent row data update
if report_json_data.get(receive_date):
report_json_data[receive_date].append(sent_row_data)
else:
report_json_data[receive_date] = [sent_row_data]
break
elif bal_qty_in_rec_date_data == qty_left_from_qty_to_be_sent:
sent_row_data = {}
sent_row_data["warehouse"] = initial_receive_item_age_row["warehouse"]
sent_row_data["item_code"] = initial_receive_item_age_row["item_code"]
sent_row_data["actual_qty"] = initial_receive_item_age_row["actual_qty"]
sent_row_data["age"] = get_age_in_days(sent_date, receive_date)
sent_row_data["in"] = qty_left_from_qty_to_be_sent
sent_row_data["out"] = qty_left_from_qty_to_be_sent
sent_row_data["trans_type"] = "sent"
updated_initial_receive_item_age_rows_temp_rec_loop[receive_date]["bal_qty_temp"] = bal_qty_in_rec_date_data - qty_left_from_qty_to_be_sent
# sent row data update
if report_json_data.get(receive_date):
report_json_data[receive_date].append(sent_row_data)
else:
report_json_data[receive_date] = [sent_row_data]
qty_left_from_qty_to_be_sent = qty_left_from_qty_to_be_sent - sent_row_data["out"]
qty_assigned_from_qty_to_be_sent = qty_assigned_from_qty_to_be_sent + sent_row_data["out"]
break
else:
qty_can_be_sent_from_receive = bal_qty_in_rec_date_data
sent_row_data = {}
sent_row_data["warehouse"] = initial_receive_item_age_row["warehouse"]
sent_row_data["item_code"] = initial_receive_item_age_row["item_code"]
sent_row_data["actual_qty"] = initial_receive_item_age_row["actual_qty"]
sent_row_data["age"] = get_age_in_days(sent_date, receive_date)
sent_row_data["in"] = qty_can_be_sent_from_receive
sent_row_data["out"] = qty_can_be_sent_from_receive
sent_row_data["trans_type"] = "sent"
updated_initial_receive_item_age_rows_temp_rec_loop[receive_date]["bal_qty_temp"] = bal_qty_in_rec_date_data - qty_can_be_sent_from_receive
qty_left_from_qty_to_be_sent = qty_left_from_qty_to_be_sent - sent_row_data["out"]
qty_assigned_from_qty_to_be_sent = qty_assigned_from_qty_to_be_sent + sent_row_data["out"]
# sent row data update
if report_json_data.get(receive_date):
report_json_data[receive_date].append(sent_row_data)
else:
report_json_data[receive_date] = [sent_row_data]
if qty_left_from_qty_to_be_sent > 0:
continue
else:
break
# updation for receive loop calculation
initial_receive_item_age_rows = updated_initial_receive_item_age_rows_temp_rec_loop # each recive for loop will have updated receive balance qty
# updation for total received date calculatiom
updated_initial_receive_item_age_rows = updated_initial_receive_item_age_rows_temp_rec_loop
for receive_date, initial_receive_item_age_row in sorted(updated_initial_receive_item_age_rows.items()):
if initial_receive_item_age_row.get("bal_qty_temp") > 0:
receive_row_data = {}
receive_row_data["warehouse"] = initial_receive_item_age_row["warehouse"]
receive_row_data["item_code"] = initial_receive_item_age_row["item_code"]
receive_row_data["actual_qty"] = initial_receive_item_age_row["actual_qty"]
receive_row_data["age"] = initial_receive_item_age_row["age"]
receive_row_data["in"] = initial_receive_item_age_row["bal_qty_temp"]
receive_row_data["trans_type"] = "receive"
# receive row data update
# report_json_data[receive_date] = [receive_row_data]
if report_json_data.get(receive_date):
report_json_data[receive_date].append(receive_row_data)
else:
report_json_data[receive_date] = [receive_row_data]
return report_json_data
def get_age_in_days(from_date , to_date):
from_date = datetime.strptime(from_date, '%Y-%m-%d')
to_date = datetime.strptime(to_date, '%Y-%m-%d')
age = from_date - to_date
return age.days
@frappe.whitelist()
def calculateAge(posting_date):
posting_date = "1996-02-29"
today = date.today()
dtt = datetime.strptime(posting_date, '%Y-%m-%d')
dt = dtt.date()
age = today - dt
return age.days |
#######################################################################
# Copyright (C) 2008-2020 by Carnegie Mellon University.
#
# @OPENSOURCE_LICENSE_START@
# See license information in ../../../LICENSE.txt
# @OPENSOURCE_LICENSE_END@
#
#######################################################################
#######################################################################
# $SiLK: fglob.py ef14e54179be 2020-04-14 21:57:45Z mthomas $
#######################################################################
from subprocess import Popen, PIPE
from datetime import date, datetime, time
import errno
import sys
# Python 3.0 doesn't have basestring
if sys.hexversion >= 0x03000000:
basestring = str
__all__ = ['FGlob']
rwfglob_executable = "rwfglob"
class FGlob(object):
def __init__(self,
classname=None,
type=None,
sensors=None,
start_date=None,
end_date=None,
data_rootdir=None,
site_config_file=None):
global rwfglob_executable
if not (classname or type or sensors or start_date or end_date):
raise ValueError("Must contain a specification")
if end_date and not start_date:
raise ValueError("An end_date requires a start_date")
if isinstance(sensors, list):
sensors = ",".join(map(str, sensors))
elif isinstance(sensors, basestring):
sensors = str(sensors)
if isinstance(type, list):
type = ",".join(type)
elif isinstance(type, basestring):
type = str(type)
if isinstance(start_date, datetime):
start_date = start_date.strftime("%Y/%m/%d:%H")
elif isinstance(start_date, date):
start_date = start_date.strftime("%Y/%m/%d")
elif isinstance(start_date, time):
start_date = datetime.combine(date.today(), start_date)
start_date = start_date.strftime("%Y/%m/%d:%H")
if isinstance(end_date, datetime):
end_date = end_date.strftime("%Y/%m/%d:%H")
elif isinstance(end_date, date):
end_date = end_date.strftime("%Y/%m/%d")
elif isinstance(end_date, time):
end_date = datetime.combine(date.today(), end_date)
end_date = end_date.strftime("%Y/%m/%d:%H")
self.args = [rwfglob_executable, "--no-summary"]
for val, arg in [(classname, "class"),
(type, "type"),
(sensors, "sensors"),
(start_date, "start-date"),
(end_date, "end-date"),
(data_rootdir, "data-rootdir"),
(site_config_file, "site-config-file")]:
if val:
if not isinstance(val, str):
raise ValueError("Specifications must be strings")
self.args.append("--%s" % arg)
self.args.append(val)
def __iter__(self):
try:
rwfglob = Popen(self.args, bufsize=1, stdout=PIPE, close_fds=True,
universal_newlines=True)
except OSError:
# Handled using sys.exc_info() in order to compile on 2.4
# and 3.0
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
raise RuntimeError("Cannot find the %s program in PATH" %
self.args[0])
for line in rwfglob.stdout:
if rwfglob.returncode not in [None, 0]:
raise RuntimeError("rwfglob failed")
yield line.rstrip('\n\r')
|
from pesummary.gw.fetch import fetch_open_strain
import os
# Gravitational wave strain data is typically either 32s or 4096s in duration
# with a sampling rate of either 4KHz or 16KHz. We can download either by simply
# specifying the event name and specifying the duration and sampling rate with
# the `duration` and `sampling_rate` kwargs respectively
path = fetch_open_strain(
"GW190412", IFO="L1", duration=32, sampling_rate=4096, read_file=False
)
print(path)
# If we wish to read the file, we need to specify the channel name
data = fetch_open_strain(
"GW190412", IFO="L1", duration=32, sampling_rate=4096, read_file=True,
channel="L1:GWOSC-4KHZ_R1_STRAIN"
)
print(data)
|
# Spectral_Analysis_Amp_and_Phase.py
import os
import numpy as np
import pandas as pd
import scipy.linalg as la
import matplotlib.pyplot as plt
# Import time from the data or define it
t = np.arange(0.015, 0.021, 10**-7)
dt = 10**-7
# Define trainsize and number of modes
trainsize = 20000 # Number of snapshots used as training data.
num_modes = 44 # Number of POD modes.
reg = 0 # Just an input in case we regularize DMDc.
# Locate the full data of snapshots FOM and ROMs (INPUT)
Folder_name_data = 'C:\\Users\\Admin\\Desktop\\combustion\\'
file_name_FOM = 'traces_gems_60k_final.npy'
file_name_ROM_DMDc = 'traces_rom_DMDc_rsvd.npy'
file_name_ROM_cubic_r25 = 'traces_rom_cubic_tripple_reg_r25.npy'
file_name_ROM_cubic_r44 = 'traces_rom_cubic_r44.npy'
file_name_ROM_Quad_r44 = 'traces_rom_60k_100_30000.npy'
# Define output file location and file names to identify phase and amplitudes (OUTPUT)
folder_name = "C:\\Users\\Admin\\Desktop\\combustion\\spectral\\Final_plots\\"
Amp_name = folder_name + "\\" + "Amp" # Amplitude plots
Phase_name = folder_name + "\\" + "Phase" # Phase plots
# Load the data
FOM_ = np.load(Folder_name_data + file_name_FOM)
ROM_DMDc = np.load(Folder_name_data + file_name_ROM_DMDc)
ROM_cubic_r25 = np.load(Folder_name_data + file_name_ROM_cubic_r25)
ROM_cubic_r44 = np.load(Folder_name_data + file_name_ROM_cubic_r44)
ROM_Quad_r44 = np.load(Folder_name_data + file_name_ROM_Quad_r44)
# Plotting adjustments
End_plot_at = 60000 # 59990 # 40000
freq_limit_to_plot = 15000
# =============================================================================
def lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit, savefile):
"""Plots for comparision of data in time. Check the saved data in
folder_name.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
savefile
Suffix to save the file name
"""
print("Time series plots")
plt.xlim([0.015, 0.021]) # set axis limits
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid', c='k')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed', c='#ff7f0e')
# plt.plot(t[0:End_plot_at],
# pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
# label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF', linestyle='dashed', c='b')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot', c='r')
plt.xlabel('time')
plt.ylabel(unit)
plt.axvline(x=t[0] + trainsize*dt, color='black')
plt.legend()
fname = f"{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{savefile}.pdf"
plt.savefig(os.path.join(folder_name, fname),
bbox_inches="tight", dpi=200)
plt.show()
def L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit):
"""Plot L2 norm error comparision between all the ROMs.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
"""
print("L2 norm error plot")
e_ROM_Quad_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r25 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_DMDc = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF_r44', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot')
x_axis = ['ROM_Quad_r44', 'ROM_cubic_r25', 'ROM_cubic_r44', 'ROM_DMDc']
y_axis = [e_ROM_Quad_r44, e_ROM_cubic_r25, e_ROM_cubic_r44, e_ROM_DMDc]
plt.scatter(x_axis,y_axis, s=100)
plt.xlabel('time')
plt.ylabel(unit)
plt.title("L2 norm Error Plot")
plt.legend()
fnm = f"Error_plot_{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{unit}.pdf"
plt.savefig(os.path.join(folder_name, fnm), bbox_inches="tight", dpi=200)
plt.show()
def get_freq_and_amplitude(T_ROM):
"""
Parameters
----------
T_ROM = any input signal
Returns
-------
frequency and amplitude transformation of the signal
"""
fft1 = np.fft.fft(T_ROM)
fftfreq1 = np.fft.fftfreq(len(T_ROM), d=dt)
amplitude_DMD = abs(fft1)
return fftfreq1, amplitude_DMD, fft1
def amplitude_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
amplitude,
amplitude_Quad_r44, amplitude_cubic_r25,
amplitude_cubic_r44, amplitude_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_"):
"""Amplitude plot comparision and save files in the Amp name folder
Eg. for the test data filename will be : Amp_test_12_ts_20000_r_44_reg_0CO2
For the training data filename will be : Amp12_ts_20000_r_44_reg_0CO2
Parameters
----------
fftfreq
frequency content of the FOM
fftfreq_Quad_r44
frequency content of the Q-OPINF at r = 44
fftfreq_cubic_r25
frequency content of the C-OPINF at r = 25
fftfreq_cubic_r44
frequency content of the C-OPINF at r = 44
fftfreq_DMDc
frequency content of the DMDc at r = 44
amplitude
Amplitude content of the FOM
amplitude_Quad_r44
Amplitude content of the Q-OPINF at r = 44
amplitude_cubic_r25
Amplitude content of the C-OPINF at r = 25
amplitude_cubic_r44
Amplitude content of the C-OPINF at r = 44
amplitude_DMDc
Amplitude content of the DMDc at r = 44
unit
unit for each variable (Pa, Kelvin...)
savefile
Filename to be saved
title_test_or_train
"Training results plotted in the frequency domain"
save_id
'_ts_' for traindata, '_test_' for testing data
"""
st = 1
end = 60
plt.xlim([0,freq_limit_to_plot])
plt.scatter(fftfreq[st:end], amplitude[st:end],
s=50, label='FOM', marker='o', alpha=0.5, c='k')
plt.scatter(fftfreq_Quad_r44[st:end], amplitude_Quad_r44[st:end],
s=50, label='Q-OPINF', marker='s', alpha=0.5, c='#ff7f0e')
# plt.scatter(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# s=50, label='C-OPINF_r25', marker='p', alpha=0.5)
plt.scatter(fftfreq_cubic_r44[st:end], amplitude_cubic_r44[st:end],
s=50, label='C-OPINF', marker='*', alpha=0.5, c='b')
plt.scatter(fftfreq_DMDc[st:end], amplitude_DMDc[st:end],
s=50, label='DMDc', marker='+', alpha=0.5, c='r')
plt.plot(fftfreq[st:end], amplitude[st:end],
linestyle='solid', c='k')
plt.plot(fftfreq_Quad_r44[st:end], amplitude_Quad_r44[st:end],
linestyle='dashed', c='#ff7f0e')
# plt.plot(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# linestyle='dashed')
plt.plot(fftfreq_cubic_r44[st:end], amplitude_cubic_r44[st:end],
linestyle='dashed', c='b')
plt.plot(fftfreq_DMDc[st:end], amplitude_DMDc[st:end],
linestyle='dashdot', c='r')
plt.xlabel('freq')
plt.ylabel('Amplitude')
plt.legend()
# plt.title(title_test_or_train)
if save_id == "_ts_":
fname = f"{Amp_name}{T_st}{save_id}{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
elif save_id == "_test_":
fname = f"{Amp_name}{save_id}{T_st}_ts_{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
else:
raise ValueError(f"invalid save_id '{save_id}'")
plt.savefig(fname, bbox_inches="tight", dpi=200)
plt.show()
def get_min(X):
"""
Parameters
----------
X
Phase angle array
Returns
-------
min(X, 360-X)
"""
b = abs(X)
a = abs(360-b)
return np.minimum(a,b)
def phase_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
Phase_FOM,
Phase_Quad_r44, Phase_cubic_r25,
Phase_cubic_r44, Phase_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_"):
"""Phase plot comparision and save files in the Amp name folder.
For the test data filename will be : Phase_test_12_ts_20000_r_44_reg_0CO2
For the training data filename will be : Phase12_ts_20000_r_44_reg_0CO2
Parameters
----------
fftfreq
frequency content of the FOM
fftfreq_Quad_r44
frequency content of the Q-OPINF at r = 44
fftfreq_cubic_r25
frequency content of the C-OPINF at r = 25
fftfreq_cubic_r44
frequency content of the C-OPINF at r = 44
fftfreq_DMDc
frequency content of the DMDc at r = 44
Phase_FOM
Phase content of the FOM
Phase_Quad_r44
Phase content of the Q-OPINF at r = 44
Phase_cubic_r25
Phase content of the C-OPINF at r = 25
Phase_cubic_r44
Phase content of the C-OPINF at r = 44
Phase_DMDc
Phase content of the DMDc at r = 44
unit
unit for each variable (Pa, Kelvin...)
savefile
Filename to be saved
title_test_or_train
"Training results plotted in the frequency domain"
save_id
'_ts_' for traindata, '_test_' for testing data
"""
st = 1
end = 60
plt.xlim([0, freq_limit_to_plot])
# plt.scatter(fftfreq[st:end], Phase_FOM[st:end],
# s=50, label='FOM', marker='o', alpha=0.5, c='k')
plt.scatter(fftfreq_Quad_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_Quad_r44[st:end]),
s=50, label='Q-OPINF', marker='s', alpha=0.5, c='#ff7f0e')
# plt.scatter(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# s=50, label='C-OPINF_r25', marker='p', alpha=0.5)
plt.scatter(fftfreq_cubic_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_cubic_r44[st:end]),
s=50, label='C-OPINF', marker='*', alpha=0.5, c='b')
plt.scatter(fftfreq_DMDc[st:end],
get_min(Phase_FOM[st:end] - Phase_DMDc[st:end]),
s=50, label='DMDc', marker='+', alpha=0.5, c='r')
# plt.plot(fftfreq[st:end],Phase_FOM[st:end], linestyle='solid', c='k')
plt.plot(fftfreq_Quad_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_Quad_r44[st:end]),
linestyle='dashed', c='#ff7f0e')
# plt.plot(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# linestyle='dashed')
plt.plot(fftfreq_cubic_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_cubic_r44[st:end]),
linestyle='dashed', c='b')
plt.plot(fftfreq_DMDc[st:end],
get_min(Phase_FOM[st:end] - Phase_DMDc[st:end]),
linestyle='dashdot', c='r')
plt.xlabel('freq')
plt.ylabel('Phase angle difference FOM-ROM (degree)')
plt.legend()
# plt.title(title_test_or_train)
if save_id == "_ts_":
fname = f"{Phase_name}{T_st}{save_id}{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
if save_id == "_test_":
fname = f"{Phase_name}{save_id}{T_st}_ts_{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
else:
raise ValueError(f"invalid save_id '{save_id}'")
plt.savefig(fname, bbox_inches="tight", dpi=200)
plt.show()
def fftoutput_train(T_st, t, trainsize, num_modes, reg,
unit='Temperature in Kelvin', datanumber=0,
savefile='filename'):
"""Amplitude and phase plots for training dataset.
Parameters
----------
T_st
monitor location code
* 12: Monitor location 1
* 13: Monitor location 2
* 14: Monitor location 3
* 15: Monitor location 4
t
as defined in input
trainsize
as defined in input
num_modes
as defined in input
reg
as defined in input
unit
unit for each variable (Pa, Kelvin...)
datanumber
defines the state parameter
* -12: Pressure
* -8: Vx
* -4: Vy
* 0: Temperature
* 8: [CH4]
* 12: [O2]
* 16: [H2O]
* 20: [CO2]
savefile
Suffix to save the file name
"""
# fmax = 1/dt
ROM_S = trainsize # 20000
FOM_S = trainsize # 20000
T = pd.DataFrame(FOM_).loc[13][0:FOM_S]
# T_ROM = pd.DataFrame(ROM_DMDc).loc[13][0:ROM_S]
# df = 1/dt/trainsize
# fdomain = np.arange(0,fmax,df)
T = pd.DataFrame(FOM_).loc[T_st + datanumber][0:FOM_S]
T_ROM_Quad_r44 = pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:ROM_S]
T_ROM_DMDc = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:ROM_S]
T_ROM_cubic_r25 = pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:ROM_S]
T_ROM_cubic_r44 = pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:ROM_S]
lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber,unit,savefile)
# L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
# datanumber, unit)
# fftfreq1, amplitude_DMD, fft1 = get_freq_and_amplitude(T_ROM_DMD)
fftfreq_DMDc, amplitude_DMDc, fft_DMDc = get_freq_and_amplitude(T_ROM_DMDc)
fftfreq_Quad_r44, amplitude_Quad_r44, fft_Quad_r44 = get_freq_and_amplitude(T_ROM_Quad_r44)
fftfreq_cubic_r25, amplitude_cubic_r25, fft_cubic_r25 = get_freq_and_amplitude(T_ROM_cubic_r25)
fftfreq_cubic_r44, amplitude_cubic_r44, fft_cubic_r44 = get_freq_and_amplitude(T_ROM_cubic_r44)
fftfreq, amplitude, fft = get_freq_and_amplitude(T)
amplitude_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
amplitude,
amplitude_Quad_r44, amplitude_cubic_r25,
amplitude_cubic_r44, amplitude_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_")
Phase_FOM = np.angle(fft, deg=True)
Phase_Quad_r44 = np.angle(fft_Quad_r44, deg=True)
Phase_cubic_r25 = np.angle(fft_cubic_r25, deg=True)
Phase_cubic_r44 = np.angle(fft_cubic_r44, deg=True)
Phase_DMDc = np.angle(fft_DMDc, deg=True)
phase_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
Phase_FOM,
Phase_Quad_r44, Phase_cubic_r25,
Phase_cubic_r44, Phase_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_")
def fftoutput_test(T_st, t, trainsize, num_modes, reg,
unit='Temperature in Kelvin',
datanumber=0, savefile='filename'):
"""
T_st = monitor location code
code number for each location:
12 - Monitor location 1
13 - Monitor location 2
14 - Monitor location 3
15 - Monitor location 4
t = as defined in input
trainsize = as defined in input
num_modes = as defined in input
reg = as defined in input
unit = unit for each variable (Pa, Kelvin...)
datanumber = to define the state parameter
-12 = Pressure
-8 = Vx
-4 = Vy
0 = Temperature
8 = [CH4]
12 = [O2]
16 = [H2O]
20 = [CO2]
savefile = Suffix to save the file name
Returns
-------
The calculation of amplitude and phase plots for testing dataset
"""
# fmax = 1/dt
# ROM_S = len(t[0:End_plot_at]) - trainsize
FOM_S = len(t[0:End_plot_at]) - trainsize
T = pd.DataFrame(FOM_).loc[13][FOM_S::]
# T_ROM = pd.DataFrame(ROM_DMDc).loc[13][ROM_S::]
# df = 1/dt/(len(t[0:End_plot_at]) - trainsize)
# fdomain = np.arange(0,fmax,df)
T = pd.DataFrame(FOM_).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
# T_ROM_DMD = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_DMDc = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_Quad_r44 = pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber]
T_ROM_cubic_r25 = pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_cubic_r44 = pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
fftfreq_DMDc, amplitude_DMDc, fft_DMDc = get_freq_and_amplitude(T_ROM_DMDc)
fftfreq_Quad_r44, amplitude_Quad_r44, fft_Quad_r44 = get_freq_and_amplitude(T_ROM_Quad_r44)
fftfreq_cubic_r25, amplitude_cubic_r25, fft_cubic_r25 = get_freq_and_amplitude(T_ROM_cubic_r25)
fftfreq_cubic_r44, amplitude_cubic_r44, fft_cubic_r44 = get_freq_and_amplitude(T_ROM_cubic_r44)
fftfreq, amplitude, fft = get_freq_and_amplitude(T)
amplitude_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
amplitude,
amplitude_Quad_r44, amplitude_cubic_r25,
amplitude_cubic_r44, amplitude_DMDc,
unit, savefile,
title_test_or_train="Testing results plotted in the frequency domain",
save_id="_test_")
# Phase
Phase_FOM = np.angle(fft, deg=True)
Phase_Quad_r44 = np.angle(fft_Quad_r44, deg=True)
Phase_cubic_r25 = np.angle(fft_cubic_r25, deg=True)
Phase_cubic_r44 = np.angle(fft_cubic_r44, deg=True)
Phase_DMDc = np.angle(fft_DMDc, deg=True)
phase_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
Phase_FOM,
Phase_Quad_r44, Phase_cubic_r25,
Phase_cubic_r44, Phase_DMDc,
unit, savefile,
title_test_or_train="Testing results plotted in the frequency domain",
save_id="_test_")
for T_st in np.arange(12,16,1):
"""
T_st = monitor location code
code number for each location:
12 - Monitor location 1
13 - Monitor location 2
14 - Monitor location 3
15 - Monitor location 4
"""
fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='Temperature in Kelvin', datanumber=0, savefile='Temperature')
fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='Pressure in Pa', datanumber=-12, savefile='Pressure')
fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='CH4 in kmolm$^-3$', datanumber=8, savefile='CH4')
# fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='O2 in kmolm$^-3$', datanumber=12)
# fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='H2O in kmolm$^-3$', datanumber=16)
fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='CO2 in kmolm$^-3$', datanumber=20, savefile='CO2')
# fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='vx in ms-1', datanumber=-12+4)
# fftoutput_train(T_st, t, trainsize, num_modes, reg, unit='vy in ms-1', datanumber=-12+8)
fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='Temperature in Kelvin', datanumber=0, savefile='Temperature')
fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='Pressure in Pa', datanumber=-12, savefile='Pressure')
fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='CH4 in kmolm$^-3$', datanumber=8, savefile='CH4')
# fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='O2 in kmolm$^-3$', datanumber=12)
# fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='H2O in kmolm$^-3$', datanumber=16)
fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='CO2 in kmolm$^-3$', datanumber=20, savefile='CO2')
# fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='vx in ms-1', datanumber=-12+4)
# fftoutput_test(T_st, t, trainsize, num_modes, reg, unit='vy in ms-1', datanumber=-12+8)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('stub', models.CharField(max_length=32)),
('bot_name', models.CharField(max_length=32)),
('frequency', models.IntegerField(default=24)),
('submission_removal', models.IntegerField(default=168)),
('winning_text', models.CharField(max_length=255, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('url', models.URLField()),
('submitted', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('day', models.DateField(auto_now_add=True)),
('submission', models.ForeignKey(related_name='submission_votes', to='poll.Submission')),
],
options={
},
bases=(models.Model,),
),
]
|
#!/usr/bin/env python
# -*- encoding: utf-8
'''
_____.___._______________ __.____ __________ _________ ___ ___ _____ .___
\__ | |\_ _____/ |/ _| | \ \ \_ ___ \ / | \ / _ \ | |
/ | | | __)_| < | | / | \ / \ \// ~ \/ /_\ \| |
\____ | | \ | \| | / | \ \ \___\ Y / | \ |
/ ______|/_______ /____|__ \______/\____|__ / \______ /\___|_ /\____|__ /___|
\/ \/ \/ \/ \/ \/ \/
@author: Yekun Chai
@license: CYK
@email: [email protected]
@file: ut1.py
@time: @Time : 4/15/21 4:10 PM
@desc:
'''
def single_ut1():
grount_truth = [('PER', 'John Jones'), ('PER', 'Peter Peters'), ('LOC', 'York')]
prediction = [('PER', 'John Jones and Peter Peters came to York')]
text = 'John Jones and Peter Peters came to York'
one_result = muc.evaluate_one(prediction, grount_truth, text)
pprint.pprint(one_result)
def single_ut2():
"""
test single
"""
grount_truth = [("PER", "John Jones")]
prediction = [("PER", "John"), ("PER", "Jones")]
text = 'John Jones and Peter Peters came to York'
one_result = muc.evaluate_one(prediction, grount_truth, text)
pprint.pprint(one_result)
def single_ut3():
"""
test single
"""
grount_truth = [("PER", "John"), ("PER", "Jones")]
prediction = [("PER", "John Jones")]
text = 'John Jones and Peter Peters came to York'
one_result = muc.evaluate_one(prediction, grount_truth, text)
pprint.pprint(one_result)
def all_ut1():
"""
test all
"""
grount_truths = [
[('PER', 'John Jones'), ('PER', 'Peter Peters'), ('LOC', 'York')],
[('PER', 'John Jones'), ('PER', 'Peter Peters'), ('LOC', 'York')],
[('PER', 'John Jones'), ('PER', 'Peter Peters'), ('LOC', 'York')]
]
predictions = [
[('PER', 'John Jones and Peter Peters came to York')],
[('LOC', 'John Jones'), ('PER', 'Peters'), ('LOC', 'York')],
[('PER', 'John Jones'), ('PER', 'Peter Peters'), ('LOC', 'York')]
]
texts = [
'John Jones and Peter Peters came to York',
'John Jones and Peter Peters came to York',
'John Jones and Peter Peters came to York'
]
muc.evaluate_all(predictions, grount_truths * 1, texts, verbose=True)
if __name__ == '__main__':
import eval4ner.muc as muc
import pprint
single_ut2()
# single_ut3()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pysam
import singlecellmultiomics.molecule
import singlecellmultiomics.fragment
import gzip
import collections
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import pysamiterators
import sys
import os
import uuid
import singlecellmultiomics.bamProcessing.bamFunctions as bf
import singlecellmultiomics.features
import colorama
import numpy as np
class Fraction:
def __init__(self):
self.values = [0, 0]
def __setitem__(self, key, value):
self.values[key] = value
def __getitem__(self, key):
return self.values[key]
def __float__(self):
if sum(self.values) == 0:
return np.nan
return self.values[1] / (self.values[1] + self.values[0])
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Add methylation information to BAM file')
argparser.add_argument('alignmentfile', type=str)
argparser.add_argument(
'-o',
type=str,
help="output BAM path",
required=True)
argparser.add_argument(
'-bed',
type=str,
help="output bed file (base-level methylation status)",
default=None,
required=False)
# these options need to go <- VB
argparser.add_argument(
'-table',
type=str,
help="output table alias",
default=None,
required=False)
argparser.add_argument('-bin_size', type=int, default=250_000)
argparser.add_argument('-sliding_increment', type=int, default=50_000)
#
argparser.add_argument(
'-ref',
type=str,
required=False,
help='path to reference fasta file ')
argparser.add_argument(
'-min_mq',
default=20,
type=int,
help='Min mapping qual')
argparser.add_argument('-uhd', default=1, type=int,
help='Umi hamming distance')
argparser.add_argument(
'-mem',
default=40,
type=int,
help='Memory used per job')
argparser.add_argument(
'-time',
default=52,
type=int,
help='Time requested per job')
argparser.add_argument('-head', type=int)
argparser.add_argument('-contig', type=str, help='contig to run on')
argparser.add_argument('-method', type=str, help='nla or chic')
argparser.add_argument(
'-samples',
type=str,
help='Samples to select, separate with comma. For example CellA,CellC,CellZ',
default=None)
argparser.add_argument(
'-context',
type=str,
help='Context to select, separate with comma. For example Z,H,X',
default=None)
argparser.add_argument('--stranded', action='store_true')
argparser.add_argument(
'--cluster',
action='store_true',
help='split by chromosomes and submit the job on cluster')
argparser.add_argument(
'--no_sort_index',
action='store_true',
help='do not sort and index the output bam')
# Transcriptome splitting mode
tr = argparser.add_argument_group('transcriptome specific settings')
tr.add_argument(
'--transcriptome',
action='store_true',
help='Label transcripts, requires exons and introns')
tr.add_argument('-exons', type=str, help='Exon GTF file')
tr.add_argument(
'-introns',
type=str,
help='Intron GTF file, use exonGTF_to_intronGTF.py to create this file')
tr.add_argument(
'-recovery_umi_pool_radius',
type=int,
help='BP radius. When assigning transcripts without NLA site found, use this radius for molecule pooling',
default=4)
args = argparser.parse_args()
samples = None if args.samples is None else set(args.samples.split(','))
contexts = None if args.context is None else set(args.context.split(','))
# set(
# [x.upper() for x in args.context.split(',')] +
# [x.lower() for x in args.context.split(',')])
alignments = pysam.AlignmentFile(args.alignmentfile)
# Auto detect reference:
if args.ref is None:
args.ref = bf.get_reference_from_pysam_alignmentFile(alignments)
if args.ref is None:
raise ValueError("Supply reference, -ref")
if args.transcriptome:
print(
colorama.Style.BRIGHT +
'Running in transcriptome recovery mode' +
colorama.Style.RESET_ALL)
if args.exons is None or args.introns is None:
raise ValueError("Please supply both intron and exon GTF files")
if args.cluster:
if args.contig is None:
# Create jobs for all chromosomes:
temp_prefix = os.path.abspath(
os.path.dirname(args.o)) + '/' + str(uuid.uuid4())
hold_merge = []
for chrom in alignments.references:
if chrom.startswith('KN') or chrom.startswith('KZ') or chrom.startswith(
'chrUn') or chrom.endswith('_random') or 'ERCC' in chrom:
continue
temp_bam_path = f'{temp_prefix}_{chrom}.bam'
temp_bed_path = f'{temp_prefix}_{chrom}.bed'
arguments = " ".join([x for x in sys.argv if not x == args.o and x != '-o']) + \
f" -contig {chrom} -o {temp_bam_path} -bed {temp_bed_path}"
job = f'TAPS_{str(uuid.uuid4())}'
os.system(
f'submission.py --silent' +
f' -y --py36 -time {args.time} -t 1 -m {args.mem} -N {job} " {arguments};"')
hold_merge.append(job)
hold = ','.join(hold_merge)
os.system(
f'submission.py --silent' +
f' -y --py36 -time {args.time} -t 1 -m 10 -N {job} -hold {hold} " samtools merge {args.o} {temp_prefix}*.bam; samtools index {args.o}; rm {temp_prefix}*.ba*; cat {temp_prefix}*.bed > {args.bed}; rm {temp_prefix}*.bed"')
exit()
reference = pysamiterators.iterators.CachedFasta(pysam.FastaFile(args.ref))
taps = singlecellmultiomics.molecule.TAPS(reference=reference)
temp_out = f'{args.o}.temp.out.bam'
# Obtain contig sizes:
ref_lengths = {r: alignments.get_reference_length(
r) for r in alignments.references}
# Methylation dictionary: site->cell->value
binned_data = collections.defaultdict(
lambda: collections.defaultdict(Fraction))
cell_count = collections.Counter()
# Define molecule class arguments
molecule_class_args = {
'reference': reference,
'taps': taps,
'min_max_mapping_quality': args.min_mq
}
fragment_class_args = {'umi_hamming_distance': args.uhd}
# transcriptome mode specific arguments: ####
if args.transcriptome:
transcriptome_features = singlecellmultiomics.features.FeatureContainer()
print("Loading exons", end='\r')
transcriptome_features.loadGTF(
args.exons,
select_feature_type=['exon'],
identifierFields=(
'exon_id',
'gene_id'),
store_all=True,
contig=args.contig,
head=None)
print("Loading introns", end='\r')
transcriptome_features.loadGTF(
args.introns,
select_feature_type=['intron'],
identifierFields=['transcript_id'],
store_all=True,
contig=args.contig,
head=None)
print("All features loaded")
rejected_reads = [] # Store all rejected, potential transcript reads
# Add more molecule class arguments
molecule_class_args.update({
'features': transcriptome_features
})
# Method specific arguments
if args.method == 'nla':
molecule_class_args.update({'site_has_to_be_mapped': True})
elif args.method == 'chic':
fragment_class_args.update({'invert_strand': True})
if args.transcriptome:
if args.method == 'nla':
molecule_class = singlecellmultiomics.molecule.AnnotatedTAPSNlaIIIMolecule
fragment_class = singlecellmultiomics.fragment.NlaIIIFragment
elif args.method == 'chic':
molecule_class = singlecellmultiomics.molecule.AnnotatedTAPSCHICMolecule
fragment_class = singlecellmultiomics.fragment.CHICFragment
else:
raise ValueError("Supply 'nla' or 'chic' for -method")
else:
if args.method == 'nla':
molecule_class = singlecellmultiomics.molecule.TAPSNlaIIIMolecule
fragment_class = singlecellmultiomics.fragment.NlaIIIFragment
elif args.method == 'chic':
molecule_class = singlecellmultiomics.molecule.TAPSCHICMolecule
fragment_class = singlecellmultiomics.fragment.CHICFragment
else:
raise ValueError("Supply 'nla' or 'chic' for -method")
###############################################
statistics = collections.defaultdict(collections.Counter)
mcs = collections.Counter() # methylation calls seen
print(
colorama.Style.BRIGHT +
"Running TAPS tagging" +
colorama.Style.RESET_ALL)
if args.bed is not None:
bed = open(args.bed, "w")
with pysam.AlignmentFile(temp_out, "wb", header=alignments.header) as output:
for i, molecule in enumerate(
singlecellmultiomics.molecule.MoleculeIterator(
alignments=alignments,
molecule_class=molecule_class,
fragment_class=fragment_class,
fragment_class_args=fragment_class_args,
yield_invalid=True,
molecule_class_args=molecule_class_args,
contig=args.contig
)):
if args.head is not None and i >= args.head:
print(
colorama.Style.BRIGHT +
colorama.Fore.RED +
f"Head was supplied, stopped at {i} molecules" +
colorama.Style.RESET_ALL)
break
statistics['Input']['molecules'] += 1
statistics['Input']['fragments'] += len(molecule)
# Set (chromosome) unique identifier
molecule.set_meta('mi', f'NLA_{i}')
if args.transcriptome:
molecule.set_intron_exon_features()
if samples is not None and molecule.sample not in samples:
molecule.set_rejection_reason('sample_not_selected')
if output is not None:
molecule.write_pysam(output)
continue
if args.transcriptome:
if not molecule.is_valid():
if molecule.is_multimapped() or molecule.get_max_mapping_qual() < args.min_mq:
molecule.set_meta('RF', 'rejected_molecule_mq')
molecule.write_tags()
molecule.write_pysam(output)
statistics['Filtering']['low mapping quality'] += 1
statistics['Filtering']['rejected'] += 1
continue
rejected_reads.append(molecule[0].reads)
continue
statistics['Filtering'][f'valid {args.method} molecule'] += 1
if len(molecule.junctions):
molecule.set_meta('RF', 'transcript_junction')
molecule.set_meta('dt', 'RNA')
statistics['Data type detection']['RNA because junction found'] += 1
else:
if len(molecule.genes) == 0:
molecule.set_meta('dt', 'DNA')
statistics['Data type detection']['DNA not mapping to gene'] += 1
else:
# Check if NLA III sites are skipped...
if args.method == 'nla':
skipped = molecule.get_undigested_site_count()
if skipped == 0:
molecule.set_meta('dt', 'DNA')
else:
molecule.set_meta('dt', 'RNA or DNA')
else:
molecule.set_meta('dt', 'RNA or DNA')
else:
if not molecule.is_valid():
statistics['Filtering'][f'not valid {args.method}'] += 1
molecule.set_meta('RF', 'rejected_molecule')
molecule.write_tags()
molecule.write_pysam(output)
continue
statistics['Filtering'][f'valid {args.method} molecule'] += 1
molecule.set_meta('RF', 'accepted_molecule')
got_context_hit = False
methylated_hits = 0
unmethylated_hits = 0
readString = []
genomeString = []
for (chromosome, location), call in molecule.methylation_call_dict.items():
if call['context'] == '.': # Only use calls concerning C's
continue
got_context_hit += 1
mcs[call['context']] += 1
if call['context'].isupper():
methylated_hits += 1
readString.append(call['consensus'])
genomeString.append(call['reference_base'])
else:
unmethylated_hits += 1
# NEED TO REMOVE THIS CODE ENTIRELY!! <- VB
if args.table is not None:
for binIdx in singlecellmultiomics.utils.coordinate_to_bins(
location, args.bin_size, args.sliding_increment):
bin_start, bin_end = binIdx
if bin_start < 0 or bin_end > ref_lengths[molecule.chromosome]:
continue
if args.stranded:
binned_data[(chromosome, molecule.get_strand_repr(
), binIdx)][molecule.get_sample()][call['context'].isupper()] += 1
cell_count[molecule.get_sample()] += 1
else:
binned_data[(chromosome, binIdx)][molecule.get_sample(
)][call['context'].isupper()] += 1
cell_count[molecule.get_sample()] += 1
###
if args.bed is not None:
# Skip non-selected contexts only for table
if contexts is not None and call['context'] not in contexts:
continue
else:
# name = cell barcode + context
name = ":".join(
[molecule.sample.split("_")[-1], call['context']])
bed.write(
f'{chromosome}\t{location}\t{location+1}\t{name}\t1\t{molecule.get_strand_repr()}\n')
refbase = '' if not genomeString else max(
set(genomeString), key=genomeString.count)
readbase = '' if not readString else max(
set(readString), key=readString.count)
readConversionString = None
genomeConversionString = None
# OT
readConversionString = None
genomeConversionString = None
if readbase == 'T' and refbase == 'C' and molecule.get_strand() == 1: # '+'
readConversionString = 'CT'
genomeConversionString = 'CT'
# OB
elif readbase == 'A' and refbase == 'G' and molecule.get_strand() == 0: # '-'
readConversionString = 'CT'
genomeConversionString = 'GA'
# CTOT
elif readbase == 'A' and refbase == 'C' and molecule.get_strand() == 1: # '+'
readConversionString = 'GA'
genomeConversionString = 'CT'
# CTOB
elif readbase == 'A' and refbase == 'G' and molecule.get_strand() == 0: # '-'
readConversionString = 'GA'
genomeConversionString = 'GA'
if readConversionString is not None:
molecule.set_meta('XR', readConversionString)
if genomeConversionString is not None:
molecule.set_meta('XG', genomeConversionString)
molecule.set_meta('ME', methylated_hits)
molecule.set_meta('um', unmethylated_hits)
statistics['Methylation']['methylated Cs'] += methylated_hits
statistics['Methylation']['unmethylated Cs'] += unmethylated_hits
molecule.write_tags()
molecule.write_pysam(output)
# close bed
if args.bed is not None:
bed.close()
if args.transcriptome:
print(
colorama.Style.BRIGHT +
f"Running transcriptome recovery on {len(rejected_reads)} reads")
for i, molecule in enumerate(
singlecellmultiomics.molecule.MoleculeIterator(
# plug in the possible_transcripts as read source
alignments=rejected_reads,
# Drop the TAPS and NLAIII checks
molecule_class=singlecellmultiomics.molecule.FeatureAnnotatedMolecule,
# Plain fragment, no NLAIII
fragment_class=singlecellmultiomics.fragment.Fragment,
fragment_class_args={
'umi_hamming_distance': args.uhd,
# this is the amount of bases R1 can shift to be assigned to the same molecule
'assignment_radius': args.recovery_umi_pool_radius
},
yield_invalid=True,
molecule_class_args={
'features': transcriptome_features,
'reference': reference,
'min_max_mapping_quality': 20
}
)):
if not molecule.is_valid():
statistics['Filtering']['rejected at transcriptome recovery step'] += 1
statistics['Filtering']['rejected'] += 1
molecule.set_meta('RF', 'rejected_recovery_invalid')
molecule.write_tags()
molecule.write_pysam(output)
continue
molecule.set_meta('mi', f'TRAN_{i}')
# Add gene annotations:
molecule.annotate(0)
molecule.set_intron_exon_features()
if len(molecule.genes) == 0:
molecule.set_meta('RF', 'rejected_recovery_no_gene')
statistics['Filtering']['rejected_recovery_no_gene'] += 1
molecule.write_tags()
molecule.write_pysam(output)
continue
if len(molecule.junctions):
molecule.set_meta('RF', 'recovered_transcript_junction')
statistics['Filtering']['recovered_transcript_junction'] += 1
statistics['Data type detection'][
f'RNA because junction found and no {args.method} site mapped'] += 1
else:
molecule.set_meta('RF', 'recovered_transcript_gene')
statistics['Data type detection'][
f'RNA because gene found and no {args.method} site mapped'] += 1
statistics['Filtering']['recovered_transcript_gene'] += 1
molecule.set_meta('dt', 'RNA')
statistics['Data type detection']['RNA'] += 1
molecule.write_tags()
molecule.write_pysam(output)
# Show statistics:
print(
'\n' +
colorama.Style.BRIGHT +
'Statistics' +
colorama.Style.RESET_ALL)
for statistic_class in [
'Input',
'Filtering',
'Data type detection',
'Methylation']:
print(f'{colorama.Style.BRIGHT} {statistic_class} {colorama.Style.RESET_ALL}')
for statistic, value in statistics[statistic_class].most_common():
print(f' {statistic}\t{value}')
print(f'{colorama.Style.BRIGHT} Methylation calls {colorama.Style.RESET_ALL}')
for call, description in zip('zZxXhH',
['unmethylated C in CpG context (CG)',
'methylated C in CpG context (CG)',
'unmethylated C in CHG context ( C[ACT]G )',
'methylated C in CHG context ( C[ACT]G )',
'unmethylated C in CHH context ( C[ACT][ACT] )',
'methylated C in CHH context ( C[ACT][ACT] )'
]):
if call.isupper():
print(
f' {colorama.Style.BRIGHT}{call}{colorama.Style.RESET_ALL}\t{mcs[call]}',
end='\t')
else:
print(f' {call}\t{mcs[call]}', end='\t')
print(f'{colorama.Style.DIM}{description}{colorama.Style.RESET_ALL}')
print('\n')
if args.table is not None:
print(
colorama.Style.BRIGHT +
'Writing raw unmethylated counts' +
colorama.Style.RESET_ALL)
# Write raw counts:
df = pd.DataFrame(
{loc: {sample: binned_data[loc][sample][0] for sample in binned_data[loc]} for loc in binned_data})
df.to_pickle(f'{args.table}_unmethylated_{args.contig}.pickle.gz')
df.to_csv(f'{args.table}_unmethylated_{args.contig}.csv')
del df
print(
colorama.Style.BRIGHT +
'Writing raw methylated counts' +
colorama.Style.RESET_ALL)
df = pd.DataFrame(
{loc: {sample: binned_data[loc][sample][1] for sample in binned_data[loc]} for loc in binned_data})
df.to_pickle(f'{args.table}_methylated_{args.contig}.pickle.gz')
df.to_csv(f'{args.table}_methylated_{args.contig}.csv')
del df
print(
colorama.Style.BRIGHT +
'Writing ratio tables' +
colorama.Style.RESET_ALL)
# cast all fractions to float
for loc in binned_data:
for sample in binned_data[loc]:
binned_data[loc][sample] = float(binned_data[loc][sample])
df = pd.DataFrame(binned_data)
del binned_data
if args.contig:
df.to_pickle(f'{args.table}_ratio_{args.contig}.pickle.gz')
df.to_csv(f'{args.table}_ratio_{args.contig}.csv')
else:
df.to_pickle(f'{args.table}_ratio.pickle.gz')
df.to_csv(f'{args.table}_ratio.csv')
print(
colorama.Style.BRIGHT +
'Sorting and indexing final file' +
colorama.Style.RESET_ALL)
# Sort and index
# Perform a reheading, sort and index
if not args.no_sort_index:
cmd = f"""samtools sort {temp_out} > {args.o}; samtools index {args.o};
rm {temp_out};
"""
else:
cmd = f"mv {temp_out} {args.o}"
os.system(cmd)
print("All done.")
|
#!/usr/bin/env python3
"""Main game logic for Pybrix
2018.08.23 -- GVG"""
import pygame
import sys
import os
import tetromino as tet
import display
from random import randint
from board import Board
from settings import GRID_SIZE, COLORS
def init():
global f
pygame.init()
pygame.font.init()
f = open("testing.out", "w")
f.write("")
def main():
init()
global myfont
global screen
global state
global current_state
global clock
global done
global score
global upcoming_tets
global fall_speed
myfont = pygame.font.SysFont('Comic Sans MS', 30)
screen = pygame.display.set_mode((600, 1000))
clock = pygame.time.Clock()
done = False
state = enum('MENU','INIT','NEWPIECE','MOVEDOWN','MOTION','CLEARROWS')
current_state = state.MENU
score = 0
upcoming_tets = []
fall_speed = 1000
while not done:
screen.fill((100, 100, 100))
state_execute(current_state)
pygame.display.flip()
#screen.fill((0, 0, 0))
def state_menu(): # Should display main menu and check for input on menu options
global current_state
textsurface = myfont.render('Menu', False, (0, 0, 0),(0,0,255))
screen.blit(textsurface,(0,0))
# render_menu()
#
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_q):
done = True
elif event.type == pygame.KEYDOWN and event.key == pygame.K_1:
current_state+=1
return;
def state_init(): # Should start game: display empty board, reset score, show first pieces
global current_state
global score
global level
global b
global upcoming_tets
upcoming_tets = []
textsurface = myfont.render('Init', False, (100, 100, 100),(0,0,255))
screen.blit(textsurface,(0,0))
# board = blank_board()
# render_board(board,0) # (no pieces yet)
# score = 0
# push_upcoming_piece() x3
b = Board(screen)
score = 0
level = 1
#for i in range(b.shape[0]):
# for j in range(b.shape[1]):
# b.grid[i, j] = sample([0,1,2,3,4,5,6],1)[0]
b.draw()
display.draw_score(screen, score)
push_upcoming_tet(upcoming_tets, b, screen)
push_upcoming_tet(upcoming_tets, b, screen)
push_upcoming_tet(upcoming_tets, b, screen)
current_state+=1
return;
def push_upcoming_tet(upcoming_tets, board, screen):
r = randint(0,6)
upcoming_tets.append(tet.Tetromino(r, board, screen))
return r;
def state_newpiece(): # Should place random new piece above top row of board, and cycle coming pieces
global current_state
global active_tet
textsurface = myfont.render('New Piece', False, (0, 0, 0),(0,0,255))
screen.blit(textsurface,(0,0))
active_tet = pop_upcoming_tet(upcoming_tets)
push_upcoming_tet(upcoming_tets, b, screen)
b.draw()
active_tet.draw()
current_state+=1
return;
def pop_upcoming_tet(upcoming_tets):
f = open("testing.out","a")
s = upcoming_tets.pop(0)
f.write("New tetromino, type " + str(s.shape) + "\n")
return s;
def state_movedown(): # Should move active piece down one row
global current_state
textsurface = myfont.render('Move Down', False, (0, 0, 0),(0,0,255))
screen.blit(textsurface,(0,0))
if active_tet.drop():
b.draw()
active_tet.draw()
current_state+=1
else:
if active_tet.check_lose():
#render_loss_message()
current_state = state.MENU
else:
current_state = state.CLEARROWS
active_tet.add_to_board()
b.draw()
return;
def state_motion(): # Should respond to user instructions: translate, rotate, drop pieces
global current_state
global score
global fall_speed
global done
textsurface = myfont.render('Motion', False, (0, 0, 0),(0,0,255))
screen.blit(textsurface,(0,0))
current_state = state.MOVEDOWN
clk = pygame.time.get_ticks()
a = 0
dropped = 0
while pygame.time.get_ticks() - clk < fall_speed and not dropped:
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_q):
done = True
#elif event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
# active_tet.translate(0)
#elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
# active_tet.translate(1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_z:
active_tet.rotate(1)
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_x or event.key == pygame.K_UP):
active_tet.rotate(0)
#elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
active_tet.drop()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
t = active_tet.droppp()
score+=t
dropped = 1
keys_pressed = pygame.key.get_pressed()
if keys_pressed[pygame.K_LEFT]:
f = open("testing.out","a")
f.write("move left\n")
if a:
active_tet.translate(0)
pygame.time.delay(200)
a = 0
else:
a = 1
if keys_pressed[pygame.K_RIGHT]:
if a:
active_tet.translate(1)
pygame.time.delay(100)
a = 0
else:
a = 1
if keys_pressed[pygame.K_DOWN]:
if a:
active_tet.drop()
pygame.time.delay(100)
a = 0
else:
a = 1
if keys_pressed[pygame.K_r]:
if a:
current_state = state.INIT
pygame.time.delay(100)
a = 0
else:
a = 1
b.draw()
active_tet.draw()
pygame.display.flip()
keys_pressed = pygame.key.get_pressed()
display.draw_score(screen, score)
level = int(score/1000)+1
display.draw_level(screen, level)
fall_speed = 200 + 800/level
return;
def state_clearrows(): # Clear filled rows and drop bulk accordingly
rowscores = [0, 40, 100, 300, 1200]
global current_state
global score
textsurface = myfont.render('Clear Rows', False, (0, 0, 0),(0,0,255))
screen.blit(textsurface,(0,0))
numrows = b.clear_rows()
current_state = state.NEWPIECE
score += rowscores[numrows]*(level+1)
return;
def enum(*args):
enums = dict(zip(args, range(len(args))))
return type('Enum', (), enums)
def state_execute(argument):
switcher = {
state.MENU: state_menu,
state.INIT: state_init,
state.NEWPIECE: state_newpiece,
state.MOVEDOWN: state_movedown,
state.MOTION: state_motion,
state.CLEARROWS: state_clearrows,
}
func = switcher.get(argument, lambda: "nothing")
return func()
main()
|
#!/usr/bin/env python
"""
This script will query the AWS API using boto3 and provide a list (table) of all regions and your current opt in status
Example Usage:
./list-regions.py
"""
from __future__ import print_function
import boto3
import requests
import sys
from botocore.exceptions import ClientError
from prettytable import PrettyTable
unknown_string = 'unknown'
country_mapping = {
'af-south-1': 'Africa (Cape Town)',
'ap-east-1': 'Asia Pacific (Hong Kong)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ca-central-1': 'Canada (Central)',
'eu-central-1': 'Europe (Frankfurt)',
'eu-west-1': 'Europe (Ireland)',
'eu-west-2': 'Europe (London)',
'eu-west-3': 'Europe (Paris)',
'eu-north-1': 'Europe (Stockholm)',
'eu-south-1': 'Europe (Milan)',
'me-south-1': 'Middle East (Bahrain)',
'sa-east-1': 'South America (Sao Paulo)',
'us-east-2': 'US East (Ohio)',
'us-east-1': 'US East (North Virginia)',
'us-west-1': 'US West (California) ',
'us-west-2': 'US West (Oregon)',
}
def main(cmdline=None):
"""
The main function. This takes the command line arguments provided and parse them.
"""
client = boto3.client('ec2')
results = query_api(client)
display_results(results)
def query_api(client):
"""
Query the API
"""
results = []
try:
response = client.describe_regions(AllRegions=True)
except ClientError as e:
print("Error: " + str(e))
else:
if 'Regions' in response:
for region in response['Regions']:
azs = []
my_region_name = region['RegionName']
status = region['OptInStatus'].replace('-', ' ')
results.append({
'RegionName': my_region_name,
'Location': country_mapping[my_region_name] if my_region_name in country_mapping else unknown_string,
'STATUS': status,
})
return results
def display_results(results):
"""
Display the results
"""
table = PrettyTable()
table.field_names = [
'Region Name',
'Location',
'Status',
]
for parts in results:
table.add_row([
parts['RegionName'],
parts['Location'],
parts['STATUS'],
])
table.sortby = 'Region Name'
print(table)
if __name__ == "__main__":
# This runs when the application is run from the command it grabs sys.argv[1:] which is everything after
# the program name and passes it to main the return value from main is then used as the argument to
# sys.exit, which you can test for in the shell. program exit codes are usually 0 for ok, and non-zero
# for something going wrong.
sys.exit(main(sys.argv[1:]))
|
import yfinance as yf
import logging
import pandas as pd
import zipfile
import urllib.request
logging.basicConfig(filename='output.log', filemode='a',
format='%(asctime)s - %(levelname)-4s [%(filename)s:%(lineno)d] %(message)s', level=logging.INFO)
class MktDataReader:
def __init__(self, start_date, end_date, data_source = 'yahoo', tickers = []) -> None:
self.df_stocks = pd.DataFrame() # pd.DataFrame that will contain price data for the stocks to analyze
self.df_stocks_bkfilled = None
self.mcaps = None # market caps
self.ff_factors = None # fama-french factors
self.data_source = data_source
self.tickers = tickers
self.start_date = start_date
self.end_date = end_date
# call the fetch data method according to the chosen data source
self.fetch_data()
# call the fetch method for the fama french factors df
self.fetch_fama_french()
def fetch_fama_french(self, period = "daily"):
if period == "daily":
ff_url = "https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_5_Factors_2x3_daily_CSV.zip"
elif period == "monthly":
ff_url = "https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_5_Factors_2x3_CSV.zip"
# Download the file and save it
# We will name it fama_french.zip file
urllib.request.urlretrieve(ff_url,'fama_french.zip')
zip_file = zipfile.ZipFile('fama_french.zip', 'r')
# Next we extact the file data
zip_file.extractall()
# Make sure you close the file after extraction
zip_file.close()
# Now open the CSV file
ff_factors = pd.read_csv('F-F_Research_Data_5_Factors_2x3_daily.CSV', skiprows = 3, index_col = 0)
# Format the date index
ff_factors.index = pd.to_datetime(ff_factors.index, format= '%Y%m%d')
# set the famma french factors in the MktDataReader class
self.ff_factors = pd.DataFrame(ff_factors)
def fetch_data(self):
if self.data_source == "yahoo":
logging.info(f"Fetching data from yfinance")
tmp_df = pd.DataFrame(yf.download(self.tickers, start = self.start_date, end = self.end_date))
fetched_data = tmp_df['Adj Close'] # take the closing price
self.df_stocks = fetched_data
def impute_missing_data(self):
assert self.df_stocks.empty == False, "You haven't fetched the data yet"
logging.info(f"Imputing missing values with the given strategy bkfill")
# TODO: add different fill methods
tmp = self.df_stocks.copy()
self.df_stocks_bkfilled = tmp.fillna(method='bfill') # bkfill with the next available price
def fetch_market_caps(self):
assert len(self.tickers) != 0, "You need to provide a list of tickers before fetching the mcaps"
mcaps = {}
for t in self.tickers:
stock = yf.Ticker(t)
mcaps[t] = stock.info["marketCap"]
self.mcaps = mcaps
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import textwrap
from sqlalchemy import type_coerce
from sqlalchemy.event import listens_for
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.sql.sqltypes import SchemaType, SmallInteger
from sqlalchemy.sql.type_api import TypeDecorator
class _EnumIntWrapper(int):
"""Int subclass that keeps the repr of the enum's member."""
def __init__(self, enum_member):
self.enum_member = enum_member
def __repr__(self):
return repr(self.enum_member)
class PyIntEnum(TypeDecorator, SchemaType):
"""Custom type which handles values from a PEP-435 Enum.
In addition to the Python-side validation this also creates a
CHECK constraint to ensure only valid enum members are stored.
By default all enum members are allowed, but `exclude_values`
can be used to exclude some.
:param enum: the Enum repesented by this type's values
:param exclude_values: a set of Enum values which are not allowed
:raise ValueError: when using/loading a value not in the Enum.
"""
impl = SmallInteger
cache_ok = True
def __init__(self, enum=None, exclude_values=None):
self.enum = enum
self.exclude_values = frozenset(exclude_values or ())
TypeDecorator.__init__(self)
SchemaType.__init__(self)
def process_bind_param(self, value, dialect):
if value is None:
return None
if not isinstance(value, self.enum):
# Convert plain (int) value to enum member
value = self.enum(value)
return _EnumIntWrapper(value)
def process_result_value(self, value, dialect):
if value is None:
return None
# Note: This raises a ValueError if `value` is not in the Enum.
return self.enum(value)
def coerce_set_value(self, value):
if value is None:
return None
return self.enum(value)
def alembic_render_type(self, autogen_context, toplevel_code):
name = f'_{self.enum.__name__}'
members = '\n'.join(f' {x.name} = {x.value!r}' for x in self.enum)
enum_tpl = textwrap.dedent('''
class {name}(int, Enum):
{members}
''')
toplevel_code.add(enum_tpl.format(name=name, members=members))
autogen_context.imports.add('from enum import Enum')
autogen_context.imports.add('from indico.core.db.sqlalchemy import PyIntEnum')
if self.exclude_values:
return '{}({}, exclude_values={{{}}})'.format(type(self).__name__, name, ', '.join(
f'{name}.{x.name}' for x in sorted(self.exclude_values)
))
else:
return f'{type(self).__name__}({name})'
def marshmallow_get_field_kwargs(self):
return {'enum': self.enum}
@listens_for(PyIntEnum, 'before_parent_attach')
def _type_before_parent_attach(type_, col):
@listens_for(col, 'after_parent_attach')
def _col_after_parent_attach(col, table):
int_col = type_coerce(col, SmallInteger)
e = CheckConstraint(int_col.in_(x.value for x in type_.enum if x not in type_.exclude_values),
f'valid_enum_{col.name}')
e.info['alembic_dont_render'] = True
assert e.table is table
|
def insertin_sort(list):
for i in range(1 ,len(list)):
key = list[i]
last = i-1
while last >= 0 and key < list[last]:
list[last+1] = list[last]
last = last-1
list[last+1] = key
list = [12,45,90,3,10]
insertin_sort(list)
print(list) |
import os
import shutil
import subprocess
from setuptools import setup, Extension, Command
from setuptools.command.sdist import sdist as sdist
from setuptools.command.build_ext import build_ext as build_ext
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
BUILD_DIR = os.path.join(BASE_DIR, 'vendor', 'build')
VENDOR_DIR = os.path.join(BASE_DIR, 'vendor', 'base64')
INSTALL_DIR = os.path.join(BASE_DIR, 'vendor', 'install')
LIBRARY_DIR = os.path.join(INSTALL_DIR, 'static')
INCLUDE_DIR = os.path.join(INSTALL_DIR, 'include')
CMAKE_OPTIONS = [
'-DCMAKE_BUILD_TYPE=Release',
'-DB64_STREAM_BUILD_TESTS=OFF',
'-DB64_STREAM_BUILD_EXE=OFF',
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON',
]
class b64_stream_build_ext(build_ext):
user_options = build_ext.user_options + [
('cython-force', None, 'run cythonize() force'),
]
boolean_options = build_ext.boolean_options + ['cython-force']
def initialize_options(self):
super().initialize_options()
self.cython_force = False
self._cmake_options = []
def finalize_options(self):
need_cythonize = self.cython_force
cfiles = {}
for extension in self.distribution.ext_modules:
for i, sfile in enumerate(extension.sources):
if sfile.endswith('.pyx'):
prefix, ext = os.path.splitext(sfile)
cfile = prefix + '.c'
if os.path.exists(cfile) and not self.cython_force:
extension.sources[i] = cfile
else:
if os.path.exists(cfile):
cfiles[cfile] = os.path.getmtime(cfile)
else:
cfiles[cfile] = 0
need_cythonize = True
if need_cythonize:
try:
import Cython
from distutils.version import LooseVersion
except ImportError:
raise RuntimeError(
'please install Cython to compile uvloop from source')
if LooseVersion(Cython.__version__) < LooseVersion('0.28'):
raise RuntimeError(
'uvloop requires Cython version 0.28 or greater')
from Cython.Build import cythonize
self.distribution.ext_modules[:] = cythonize(self.distribution.ext_modules)
super().finalize_options()
def _build_lib(self):
if os.path.exists(INSTALL_DIR):
shutil.rmtree(INSTALL_DIR)
if os.path.exists(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
os.mkdir(BUILD_DIR)
cmake_options = ['-DCMAKE_INSTALL_PREFIX={}'.format(INSTALL_DIR)]
subprocess.run(['cmake'] + cmake_options + CMAKE_OPTIONS + [VENDOR_DIR], cwd=BUILD_DIR, check=True)
subprocess.run(['cmake', '--build', '.', '--target', 'install'], cwd=BUILD_DIR, check=True)
def build_extensions(self):
self._build_lib()
super().build_extensions()
class b64_stream_sdist(sdist):
pass
extensions = [
Extension(
'b64_stream._b64_stream',
['b64_stream/_b64_stream.pyx'],
libraries=['b64_stream'],
library_dirs=[LIBRARY_DIR],
include_dirs=[INCLUDE_DIR],
language='c',
),
]
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as fd:
readme = fd.read()
setup(
name='b64-stream',
version='1.0.0',
description='Base64 stream encode/decode library',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/ikrivosheev/py-base64',
license='Apache 2',
author='Ivan Krivosheev',
author_email='[email protected]',
packages=['b64_stream'],
python_requires=">=3.5",
include_package_data=True,
ext_modules=extensions,
install_requires=[],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
cmdclass={
'sdist': b64_stream_sdist,
'build_ext': b64_stream_build_ext,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
]
)
|
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Panel, Series
from pandas.util import testing as tm
@pytest.mark.filterwarnings('ignore:\\nPanel:FutureWarning')
class TestMultiIndexPanel(object):
def test_iloc_getitem_panel_multiindex(self):
# GH 7199
# Panel with multi-index
multi_index = MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
tm.assert_frame_equal(result1, expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
tm.assert_frame_equal(result2, expected2)
expected1 = DataFrame(index=['a'], columns=multi_index,
dtype='float64')
result1 = wd1.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result1, expected1)
expected2 = DataFrame(index=['a'], columns=simple_index,
dtype='float64')
result2 = wd2.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result2, expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
items=['a', 'b', 'c'], major_axis=mi,
minor_axis=['u', 'v', 'w'])
result = p.iloc[:, 1, 0]
expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
tm.assert_series_equal(result, expected)
result = p.loc[:, (1, 'y'), 'u']
tm.assert_series_equal(result, expected)
|
"""Test
>>> find_longest_substring_with_at_most_k_distict_chars("abcba", 2)
'bcb'
"""
from collections import deque, namedtuple
def find_longest_substring_with_at_most_k_distict_chars(s: str, k: int) -> str:
class Cstats:
def __init__(self):
self.number = 0
self.positions = list()
# collecting
charmap = dict()
for i,c in enumerate(s):
if c not in charmap:
charmap[c] = Cstats()
charmap[c].number += 1
charmap[c].positions.append(i)
# get valid candidates
Substring = namedtuple('Substring', ['start','end'])
candidates = [Substring(0, len(s)-1)] # default return if nothin is found
for stats in charmap.values():
if stats.number >= k:
for start,end in zip(stats.positions[:-(k-1)], stats.positions[k-1:]):
candidates.append(Substring(start, end))
# second filtering
candidates = sorted(candidates, key=lambda x: x.end - x.start)
for smaller in candidates:
for bigger in reversed(candidates):
if smaller.start > bigger.start and smaller.end < bigger.end:
candidates.remove(bigger)
# get the longest
longest = candidates[-1]
return s[longest.start : longest.end+1]
if __name__=="__main__":
import doctest
doctest.testmod() |
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=None):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nomes_e_atributos_da_classe(cls):
return f'Classe: {cls} - olhos {cls.olhos}'
class Homem(Pessoa):
pass
if __name__ == '__main__':
lucas = Pessoa(nome='Lucas')
anna = Pessoa(nome='Anna')
pedro = Pessoa(nome='Pedro')
felipe = Homem(lucas, anna, pedro, nome='Felipe')
print(Pessoa.cumprimentar(felipe))
print(id(felipe))
print(felipe.cumprimentar())
print(felipe.nome)
print(felipe.idade)
for filho in felipe.filhos:
print(filho.nome)
felipe.sobrenome = 'Souza'
print(felipe.sobrenome)
print(felipe.__dict__)
print(lucas.__dict__)
del felipe.sobrenome
print(felipe.__dict__)
print(Pessoa.olhos)
print(felipe.olhos)
felipe.olhos = 1
print(felipe.__dict__)
print(anna.olhos)
print(id(Pessoa.olhos), id(pedro.olhos), id(felipe.olhos))
print(Pessoa.metodo_estatico(), felipe.metodo_estatico())
print(Pessoa.nomes_e_atributos_da_classe(), felipe.nomes_e_atributos_da_classe())
pessoa = Pessoa()
print(isinstance(pessoa, Pessoa))
print(isinstance(pessoa, Homem))
print(isinstance(felipe, Pessoa))
print(isinstance(felipe, Homem))
|
#! /usr/bin/python
import os
import sys
import requests
import json
from collections import deque
DATA_DIR = '/Users/danielgoldin/data/meerkat'
profiles_to_parse = deque()
profiles_done = set()
def put(url, data):
r = requests.put(url, data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False)
if r.status_code == 200:
return json.loads(r.content)
else:
return {}
def get(url):
r = requests.get(url, verify=False)
if r.status_code == 200:
return json.loads(r.content)
else:
return {}
def get_profile(user_id):
return get('https://resources.meerkatapp.co/users/{0}/profile?v=2'.format(user_id))
def get_complete_info(user_id):
print 'Getting data for', user_id
profile_path = os.path.join(DATA_DIR, user_id + '_profile')
if not os.path.exists(profile_path):
p = get_profile(user_id)
with open(profile_path, 'w') as f:
f.write(json.dumps(p, indent=2))
else:
with open(profile_path, 'r') as f:
p = json.loads(f.read())
if not os.path.exists(profile_path.replace('_profile', '_followers')):
followers = get(p['followupActions']['followers'])
with open(profile_path.replace('_profile', '_followers'), 'w') as f:
f.write(json.dumps(followers, indent=2))
else:
with open(profile_path.replace('_profile', '_followers'), 'r') as f:
followers = json.loads(f.read())
if not os.path.exists(profile_path.replace('_profile', '_following')):
following = get(p['followupActions']['following'])
with open(profile_path.replace('_profile', '_following'), 'w') as f:
f.write(json.dumps(following, indent=2))
else:
with open(profile_path.replace('_profile', '_following'), 'r') as f:
following = json.loads(f.read())
profiles_done.add(user_id)
new_users = []
for f in followers['result']:
if f['id'] not in profiles_done:
new_users.append(f['id'])
for f in following['result']:
if f['id'] not in profiles_done:
new_users.append(f['id'])
return new_users
def search(username):
return put('https://social.meerkatapp.co/users/search?v=2', {'username' : username})
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Pass in a username to start crawling'
exit()
username = sys.argv[1]
print 'Searching for', username
u = search(username)
if u:
user_id = u['result'][0] # Just pick the first one
print 'Found user_id', user_id
else:
print 'Could not find user_id', r.content
exit()
profiles_to_parse.append(user_id)
while len(profiles_to_parse):
user_id = profiles_to_parse.popleft()
try:
new_user_ids = get_complete_info(user_id)
print 'Found {0} new user ids'.format(len(new_user_ids))
profiles_to_parse.extend(new_user_ids)
except Exception as e:
print 'Failed to get data for user_id {0}: {1}'.format(user_id, e)
# Starting with 54f39f741f0000ea01e76d14 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.