content
stringlengths 5
1.05M
|
---|
from PIL import Image
import numpy as np
import _pickle as pickle
import os
import glob
# only using the library for confusion_matrix
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from knn import NearestNeighbour
# Loads a binary patch file
def load_CIFAR_batch(file):
""" load single batch of cifar"""
with open(file, 'rb') as f:
datadict = pickle.load(f, encoding='latin1')
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
# Loads training and test batch files
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for i in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (i,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
"""
Xtr, Ytr = training data
Xte, Yte = testing data
"""
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
# converts image into array and formats it into the same format as training
# Note: Due to evaluation and being vague this might need to edited to your specifications
def load_CIFAR10_image(image):
X = np.array(image)
X = X.reshape(1, 3, 32, 32).transpose(0,2,3,1).astype("float")
return X
# Loads image and formats it
def load_CIFAR10_images(TROOT,ROOT):
xs = []
ys = []
for i in range(1,6):
f = os.path.join(TROOT, 'data_batch_%d' % (i,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
"""
Xtr, Ytr = training data
Xte = testing data
"""
xs = []
for filename in glob.glob(ROOT + '/*.png'):
image = Image.open(filename)
X = load_CIFAR10_image(image)
xs.append(X)
Xte = np.concatenate(xs)
return Xtr, Ytr, Xte
# prints out confusion matrix
def plot_confusion_matrix(cm, title,i, cmap=plt.cm.Blues):
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('CIFAR_10_confusion_matrix_'+ i + '.png')
# prints our results into a nice confusion matrix
def results(Y_pred, Yte):
cm = confusion_matrix(Yte, Y_pred)
title = "10NN Confusion Matrix"
i= "normal"
print(cm)
plt.figure()
plot_confusion_matrix(cm,title, i)
# title = "10NN Normalised Confusion Matrix"
# i = "normalised"
# cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# plt.figure()
# plot_confusion_matrix(cm_norm, title, i)
# The main knn function that uses the NearestNeighbour class in knn.py
def run_knn():
new = input('Testing with new undefined images? (y or n): ')
if (new == 'y'):
Xtr, Ytr, Xte = load_CIFAR10_images('cifar-10-batches-py','INFO3406_assignment1_query')
else:
Xtr, Ytr, Xte, Yte = load_CIFAR10('cifar-10-batches-py')
# flattens out all images to be one dimensional
Xtr_rows = Xtr.reshape(Xtr.shape[0], 32 * 32 * 3) # Xtr_rows become 50000x 3072
Xte_rows = Xte.reshape(Xte.shape[0], 32 * 32 * 3) # Xtr_rows become 10000x 3072
validation_accuracies = []
for k in [10]:
nn = NearestNeighbour() # create a Nearest Neighbor classifier class
nn.train(Xtr_rows, Ytr) # train the classifier on the training images and labels
Yte_predict = nn.predict(Xte_rows, k) # predict labels on the test images
# and now print the classification accuracy, which is the average number
# of examples that are correctly predicted (i.e label matches)
if (new != 'y'):
# Compute the accuracy between the actuals and the predictions
acc = np.mean(Yte_predict == Yte)
print('K-NN %d' % (k))
print('accuracy: %f' % (acc))
# For graphing
# results(Yte_predict,Yte)
# Print predictions to csv
np.savetxt("cifar10_predictions.csv", Yte_predict, delimiter=",")
# return the predictions and the actual values
return
if __name__ == '__main__':
run_knn()
|
#!/usr/bin/env python
import json
from pathlib import Path
def minmax_coordinates(path):
minx = miny = 0x800000
maxx = maxy = -0x800000
for fn in path.glob('*.json'):
with open(fn) as fp:
obj = json.load(fp)
hole = obj.get('hole', [])
figure = obj.get('figure', {}).get('vertices', [])
for [x, y] in (hole + figure):
minx, maxx = min(minx, x), max(maxx, x)
miny, maxy = min(miny, y), max(maxy, y)
print(f'coordinate range: x={minx}:{maxx}, y={miny}:{maxy}')
def max_vertices(path):
maxhole = maxfig = -0x800000
for fn in path.glob('*.json'):
with open(fn) as fp:
obj = json.load(fp)
hole = obj.get('hole', [])
maxhole = max(maxhole, len(hole))
figure = obj.get('figure', {}).get('vertices', [])
maxfig = max(maxfig, len(figure))
print('max vertices count')
print(' hole', maxhole)
print(' fig', maxfig)
def main():
path = Path(__file__).with_name('spec') / 'problems'
minmax_coordinates(path)
max_vertices(path)
if __name__ == '__main__':
main()
|
""" Testing the radius cutoff utility
"""
from particula.util.radius_cutoff import cut_rad
def test_cuts():
""" testing cuts:
* test if starting radius is lower than mode
* test if ending radius is higher than mod
* test if lower radius is smaller than end radius
* test if lower radius is smaller when cutoff is smaller
* test if ending radius is larger when cutoff is larger
* test if ending radius is larger when gsigma is higher
"""
assert cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[0] <= 1e-7
assert cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[1] >= 1e-7
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[0]
<=
cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[0]
<=
cut_rad(cutoff=.9990, gsigma=1.25, mode=1e-7)[0]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[1]
>=
cut_rad(cutoff=.9990, gsigma=1.25, mode=1e-7)[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=1e-7)[1]
<=
cut_rad(cutoff=.9999, gsigma=1.35, mode=1e-7)[1]
)
def test_multi_cuts():
""" test case for different modes
"""
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[0]
<=
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[0]
<=
cut_rad(cutoff=.9990, gsigma=1.25, mode=[1e-7, 1e-8])[0]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
>=
cut_rad(cutoff=.9990, gsigma=1.25, mode=[1e-7, 1e-8])[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
<=
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
==
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7])[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[1]
>=
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-8])[1]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[0]
==
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-8])[0]
)
assert (
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7, 1e-8])[0]
<=
cut_rad(cutoff=.9999, gsigma=1.25, mode=[1e-7])[0]
)
|
import sys
import numpy as np
from daetools.pyDAE import *
from time import localtime, strftime
from pyUnits import m, kg, s, K, Pa, mol, J, W, kJ, hour, l
class model2(daeModel):
def __init__(self, Name, Parent=None, Description=''):
daeModel.__init__(self, Name, Parent, Description)
self.muMax = daeParameter(
'muMax', day**(-1), self, 'Maximum sepcific growth rate')
self.ks = daeParameter('ks', mol * m**(-3), self,
'Liquid phase mass transfer coefficient')
self.ki = daeParameter('ki', mol * 1000, self, 'Inhibition constant')
self.k = daeParameter('k', mol * 1000, self, 'Inhibition constant')
self.Yi = daeParameter('Yi', mol * kg**(-1), self, 'Yield coefficient')
self.kla = daeParameter('kla', s**(-1), self,
'Mass transfer coefficient')
self.h = daeParameter('h', Pa * (m**3) * s**(-1),
self, 'Henrys constant')
self.sgin = daeParameter(
'sgin', Pa, self, 'Initial CO2 partial pressure')
self.i0 = daeParameter('i0', day**(-1), self, 'Initial irradiance')
self.a = daeParameter('a', (m**3) * kg**(-1), self, 'Coefficient')
self.x = daeVariable('x', molar_concentration_t,
self, 'Biomass conentration')
self.sl = daeVariable('sl', molar_concentration_t,
self, 'Liquid CO2 conentration')
self.sg = daeVariable('sg', molar_concentration_t,
self, 'CO2 partial pressure')
self.mu = daeVariable('mu', molar_concentration_t,
self, 'Specific growth rate')
self.i = daeVariable('i', molar_conentration_t, self, 'Irradiance')
self.t = daeVariable('t', time_1, self, 'Time')
def DeclareEquations(self):
muMax = self.muMax()
ks = self.ks()
ki = self.ki()
k = self.k()
Yi = self.Yi()
kla = self.kla()
h = self.h()
sgin = self.sgin()
i0 = self.i0()
a = self.a()
x = self.x()
sl = self.sl()
sg = self.sg()
mu = self.mu()
i = self.i()
t = self.t()
dx_dt = dt(self.x())
dsl_dt = dt(self.sl())
dsg_dt = dt(self.sg())
eq = self.CreatEquation('mu', '')
eq.Residual = mu - muMax * sl / \
((sl + ks + (sl**2) / ki) * i / (i + k))
eq = self.CreatEquation('i', '')
eq.Residual = i - i0 / (a * x * (1 - np.exp(-a * x)))
eq = self.CreatEquation('x', '')
eq.Residual = dx_dt - mu * x
eq = self.CreatEquation('sl', '')
eq.Residual = dsl_dt - kla * ((sg / h) - sl) - (Yi * dx_dt)
eq = self.CreatEquation('sg', '')
eq.Residual = dsg_dt - sgin - kla * ((sg / h) - sl)
class simModel(daeSimulation):
def __init__(self):
daeSimulation.__init__(self)
self.m = model2('model2')
self.m.Description = __doc__
def SetUpParametersAndDomains(self):
self.m.muMax.SetValue(0.5 * 1 / day)
self.m.ks.SetValue(1 * mol / m**(-3))
self.m.ki.SetValue(3 * mol * 1000)
self.m.k.SetValue(14 * mol*1000)
self.m.Yi.SetValue(0.5 * mol / kg)
self.m.kla.SetValue(0.00095 * 1 / s)
self.m.h.SetValue(0.00316 * Pa * m**(3) / s**(-1))
self.m.sgin.SetValue(0.06 * Pa)
self.m.i0.SetValue(75 * 1 / day)
self.m.a.SetValue(0.014 * m**3 / kg)
def SetUpVariables(self):
self.m.time_f.AssignValue(1.0 / 16.0)
self.m.x.SetInitialCondition(0.03 * kg / l)
self.m.sl.SetInitialCondition(0 * Pa * 1000)
self.m.sg.SetInitialCondition(17 * Pa * 1000)
def run(**kwargs):
simulation = simTutorial()
return daeActivity.simulate(simulation, reportingInterval=600,
timeHorizon=3*60*60, **kwargs)
if __name__ == "__main__":
guiRun = False if (
len(sys.argv) > 1 and sys.argv[1] == 'console') else True
run(guiRun=guiRun)
|
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import os
os.environ["KMP_AFFINITY"]="disabled"
import csv
import math
import tensorflow as tf
import FK2018
#import objgraph
#from pympler.tracker import SummaryTracker
import mrcnn.model as modellib
import pandas as pd
from mrcnn import utils
import numpy as np
import argparse
import matplotlib.pyplot as plt
#import seaborn as sns
from keras import backend as K
conf = K.tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
K.set_session(K.tf.Session(config=conf))
print("imported keras")
import logging
from multiprocessing import Pool
global ts_dataset
global ae_dive1_dataset
global ae_dive2_dataset
global ae_dive3_dataset
def get_dataset_filepath(experiment):
filepath = "/home/jenny/Documents/FK2018/tunasand/05_dive/"
filepath = "/scratch/jw22g14/FK2018/tunasand/20180805_215810_ts_un6k/2021_dataset/"
filepath = "/scratch/jw22g14/FK2018/March_2021/TS1/"
if experiment["colour_correction_type"] == "debayered":
filepath += "debayered/"
elif experiment["colour_correction_type"] in ["grey", "greyworld"]:
filepath += "greyworld/"
elif experiment["colour_correction_type"] in ["alt", "altitude_corrected"]:
filepath += "altitude_corrected/"
else:
print(experiment["colour_correction_type"])
exit()
if experiment["distortion_correction"]:
filepath += "distortion_corrected/"
else:
filepath += "no_distortion/"
#rescaling_dict={"res_nn":"rescaled_nn", "drop_res":"dropped_resolution", "rescaled":"rescaled", "not_rescaled":"not_rescaled", "drop_res_up":"dropped_resolution_scaledup", "drop_res_up_nn": "dropped_resolution_scaledup_nn", "drop_res_nn": "dropped_resolution_nn"}
#filepath += rescaling_dict[experiment['rescaled']] + "/"
# filepath += experiment['rescaled']
rescaling_dict={"rescaled":"dropped_resolution", "not_rescaled":"not_rescaled", "upscaled":"rescaled"}
filepath += rescaling_dict[experiment['rescaled']]
return filepath
def get_ae2000_dataset_filepaths(experiment):
filepath = "/home/jenny/Documents/FK2018/ae/"
base_filepath = "/scratch/jw22g14/FK2018/March_2021/"
filepath=""
if experiment["colour_correction_type"] == "debayered":
filepath += "debayered/"
elif experiment["colour_correction_type"] in ["grey", "greyworld"]:
filepath += "greyworld/"
elif experiment["colour_correction_type"] in ["alt", "altitude_corrected"]:
filepath += "altitude/"
else:
print(experiment["colour_correction_type"])
exit()
if experiment["distortion_correction"]:
filepath += "distortion_corrected/"
else:
filepath += "no_distortion/"
filepath_list = []
#TODO match
rescaling_type = experiment['rescaled']
for area in ["AE1"]:
filepath_list.append(f"{base_filepath}{area}/{filepath}{rescaling_type}")
return filepath_list
def load_images(image_ids, dataset, config):
images=[]
for image_id in image_ids:
# Load image
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
dataset, config, image_id, use_mini_mask=False
)
if image is None:
print("Image not loaded - " + image_id)
exit()
images.append({'image':image, 'image_meta':image_meta, 'gt_class_id':gt_class_id, 'gt_bbox':gt_bbox, 'gt_mask':gt_mask})
return images
def compute_both_batch_aps(image_ids, dataset, model, config, iterations=500):
AP_list = []
classless_AP_list = []
precision_list = []
classless_precision_list = []
recall_list = []
classless_recall_list = []
predicted_class_list = []
gt_class_list = []
predicted_size_list = []
gt_size_list = []
overlaps_list = []
classless_overlaps_list = []
class_APs={}
class_precision={}
class_recalls={}
class_overlaps={}
total_predicted_pixels = 0
total_groundtruth_pixels = 0
total_overlapping_pixels = 0
for iteration in range(iterations):
images=load_images(image_ids, dataset, config)
logging.debug("running detection for iteration "+str(iteration))
for n, image in enumerate(images):
if len(image['gt_class_id']) > 0:
logging.debug("getting stats for image " +str(image['image_meta'][0]))
# Compute AP
results = model.detect([image['image']], verbose=0)
r = results[0]
logging.debug(f"With {len(r['rois'])} regions of interest")
r["blank_class_ids"]=[]
image["gt_blank_class_ids"]=[]
for id_num in range(len(r["class_ids"])):
r["blank_class_ids"].append(0)
for id_num in range(len(image["gt_class_id"])):
image["gt_blank_class_ids"].append(0)
masks_shape = r["masks"].shape
print(image['image_meta'][0])
print(masks_shape)
print(image['gt_mask'].shape)
AP, precisions, recalls, overlaps = utils.compute_ap(
image['gt_bbox'],
image['gt_class_id'],
image['gt_mask'],
r['rois'],
r["class_ids"],
r["scores"],
np.reshape(r["masks"], [masks_shape[0], masks_shape[1], 1, masks_shape[2]]),
iou_threshold=0.5,
)
AP_list.append(AP)
precision_list.append(precisions)
recall_list.append(recalls)
classless_AP, classless_precisions, classless_recalls, classless_overlaps = utils.compute_ap(
image['gt_bbox'],
np.array(image['gt_blank_class_ids']),
image['gt_mask'],
r['rois'],
np.array(r["blank_class_ids"]),
r["scores"],
np.reshape(r["masks"], [masks_shape[0], masks_shape[1], 1, masks_shape[2]]),
iou_threshold=0.5,
)
classless_AP_list.append(classless_AP)
classless_precision_list.append(classless_precisions)
classless_recall_list.append(classless_recalls)
predicted_pixels = np.any(r["masks"], axis=2)
groundtruth_pixels = np.any(image['gt_mask'], axis=2)
overlap_pixels = np.logical_and(predicted_pixels, groundtruth_pixels)
total_predicted_pixels += np.sum(predicted_pixels)
total_groundtruth_pixels += np.sum(groundtruth_pixels)
total_overlapping_pixels += np.sum(overlap_pixels)
for class_id in set(image['gt_class_id']):
class_indices = np.where(r['class_ids']==class_id)
class_r = {}
class_r['rois']=r['rois'][class_indices]
class_r['class_ids']=r['class_ids'][class_indices]
class_r['scores']=r['scores'][class_indices]
class_r['masks']=r['masks'][:,:,class_indices]
class_gt_indices = np.where(image['gt_class_id']==class_id)
class_gt_bbox = image['gt_bbox'][class_gt_indices]
class_gt_class_id = image['gt_class_id'][class_gt_indices]
class_gt_mask = image['gt_mask'][:,:,class_gt_indices]
if len(class_gt_class_id) > 0:
AP, precisions, recalls, overlaps =\
utils.compute_ap(class_gt_bbox, class_gt_class_id, class_gt_mask,
class_r['rois'], class_r['class_ids'], class_r['scores'], class_r['masks'])
if not str(class_id) in class_APs.keys():
class_APs[str(class_id)] = []
class_precision[str(class_id)] = []
class_recalls[str(class_id)] = []
class_overlaps[str(class_id)] = []
class_APs[str(class_id)].append(AP)
class_precision[str(class_id)].append(precisions)
class_recalls[str(class_id)].append(recalls)
class_overlaps[str(class_id)].append(overlaps)
logging.debug(f"found {len(overlaps_list)} overlap values")
logging.debug(f"finished all {iterations} iterations for these images")
return AP_list, classless_AP_list, precision_list, classless_precision_list, recall_list, classless_recall_list, predicted_class_list, gt_class_list, predicted_size_list, gt_size_list, overlaps_list, classless_overlaps_list, total_predicted_pixels, total_groundtruth_pixels, total_overlapping_pixels, class_APs, class_precision, class_recalls, class_overlaps
def load_dataset(filepath):
dataset = FK2018.FKDataset()
dataset.load_fk(filepath)
dataset.prepare()
return dataset
def get_stats(weights_filepath, dataset, iterations=5):
config = FK2018.FKConfig()
print("got config")
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
# DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
# Must call before using the dataset
dataset.prepare()
print(dataset)
logging.debug("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
# with tf.device(DEVICE):
tf_config=tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config).as_default():
model = modellib.MaskRCNN(mode="inference", model_dir="./log", config=config)
# Load weights
logging.debug("Loading weights "+ str(weights_filepath))
model.load_weights(weights_filepath, by_name=True)
image_ids = dataset.image_ids
AP_list, classless_AP_list, precision_list, classless_precision_list, recall_list, classless_recall_list, predicted_class_list, gt_class_list, predicted_size_list, gt_size_list, overlaps_list, classless_overlaps_list, total_predicted_pixels, total_groundtruth_pixels, total_overlapping_pixels, class_APs, class_precision, class_recalls, class_overlaps = compute_both_batch_aps(image_ids, dataset, model, config, iterations=iterations)
return AP_list, classless_AP_list, precision_list, classless_precision_list, recall_list, classless_recall_list, predicted_class_list, gt_class_list, predicted_size_list, gt_size_list, overlaps_list, classless_overlaps_list, total_predicted_pixels, total_groundtruth_pixels, total_overlapping_pixels, class_APs, class_precision, class_recalls, class_overlaps
def plot_class_boxplots():
experiments = get_experiments()
print(experiments.columns)
sns.boxplot(
data=experiments,
y=experiments.columns[16],
x="colour_correction_type",
hue="separate_channel_ops",
)
# experiments.boxplot(column=experiments.columns[16], by=["colour_correction_type", "distortion_correction"], rot=90)
plt.show()
def directory_to_experiment_info(directory):
colour_correction_type = directory.split("/")[-1].split("-")[0]
if directory.split("/")[-1].split("-")[1] == "distortion_corrected":
distortion_correction = True
else:
distortion_correction = False
rescaled = directory.split("/")[-1].split("-")[2]
number = int(directory.split("/")[-1].split("-")[-1])
experiment = {
"colour_correction_type": colour_correction_type,
"distortion_correction": distortion_correction,
"rescaled": rescaled,
"number": number,
}
experiment["elastic_transformations"] = number % 8 > 3
experiment["separate_channel_operations"] = number % 2 > 0
experiment["flip_rotate"] = number % 4 > 1
print("got experiment info from directory")
print(experiment)
return experiment
def add_single_experiment(directory, df_filepath, datasets):
csv_filenames = [
f for f in os.listdir(directory) if f[0:5] == "resul" and f[-4:] == ".csv"
]
print(list(os.walk(directory)))
weights_folder = [f for f in os.walk(directory)][0][1][0]
for filename in csv_filenames:
experiment = directory_to_experiment_info(directory)
with open(directory + "/" + filename, "r") as csvfile:
logging.debug("./" + directory + "/" + filename)
plots = csv.reader(csvfile, delimiter=",")
headers = next(plots, None)
for header in headers:
experiment[header] = []
for row in plots:
for i, header in enumerate(headers):
experiment[header].append(float(row[i]))
experiment["minimum_val_loss"] = min(experiment["val_loss"])
experiment["minimum_loss"] = min(experiment["loss"])
number = int(directory.split("-")[-1].split(".")[0])
experiment["number"] = number
experiment["repeat"] = math.floor(number / 8)
experiment["elastic_transformations"] = number % 8 > 3
experiment["separate_channel_operations"] = number % 2 > 0
experiment["flip_rotate"] = number % 4 > 1
print(experiment.keys())
weights_file = ( directory + "/" + weights_folder + "/" + "mask_rcnn_fk2018_best.h5" )
if (not experiment_in_dataframe(df_filepath, experiment)) and os.path.isfile(weights_file):
config = FK2018.FKConfig()
print("got config")
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
# DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
# Must call before using the dataset
# with tf.device(DEVICE):
tf_config=tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config).as_default():
model = modellib.MaskRCNN(mode="inference", model_dir="./log", config=config)
# Load weights
logging.debug("Loading weights "+ str(weights_file))
model.load_weights(weights_file, by_name=True)
image_ids = datasets[0].image_ids
#tracker = SummaryTracker()
logging.debug("getting stats")
logging.debug("tunasand stats")
experiment["AP_list"], \
experiment["classless_AP_list"], \
experiment["precision_list"], \
experiment["classless_precision_list"], \
experiment["recall_list"], \
experiment["classless_recall_list"], \
experiment["predicted_class_list"], \
experiment["gt_class_list"], \
experiment["predicted_size_list"], \
experiment["gt_size_list"], \
experiment["overlaps"], \
experiment["classless_overlaps_list"], \
experiment["total_predicted_pixels"], \
experiment["total_groundtruth_pixels"], \
experiment["total_overlapping_pixels"], \
experiment["class_APs"], \
experiment["class_precision"], \
experiment["class_recalls"], \
experiment["class_overlaps"] = compute_both_batch_aps(image_ids, datasets[0], model, config, iterations=20) #get_stats(weights_file, datasets[0])
experiment["AP_list_short"],\
experiment["classless_AP_list_short"],\
experiment["precision_list_short"],\
experiment["classless_precision_list_short"], \
experiment["recall_list_short"], \
experiment["classless_recall_list_short"], \
experiment["predicted_class_list_short"], \
experiment["gt_class_list_short"], \
experiment["predicted_size_list_short"], \
experiment["gt_size_list_short"], \
experiment["overlaps_short"], \
experiment["classless_overlaps_list_short"], \
experiment["total_predicted_pixels_short"], \
experiment["total_groundtruth_pixels_short"], \
experiment["total_overlapping_pixels_short"], \
experiment["class_APs_short"], \
experiment["class_precision_short"], \
experiment["class_recalls_short"], \
experiment["class_overlaps_short"] = compute_both_batch_aps(image_ids, datasets[0], model, config, iterations=5) #get_stats(weights_file, datasets[0])
#objgraph.show_most_common_types()
#roots = objgraph.get_leaking_objects()
#print(len(roots))
#tracker.print_diff()
for i, dataset in enumerate(["AE_area1"]):
image_ids = datasets[i+1].image_ids
logging.debug(f"aestats, {dataset}")
experiment[f"AP_list_{dataset}"], \
experiment[f"classless_AP_list_{dataset}"], \
experiment[f"precision_list_{dataset}"], \
experiment[f"classless_precision_list_{dataset}"], \
experiment[f"recall_list_{dataset}"], \
experiment[f"classless_recall_list_{dataset}"], \
experiment[f"predicted_class_list_{dataset}"], \
experiment[f"gt_class_list_{dataset}"], \
experiment[f"predicted_size_list_{dataset}"], \
experiment[f"gt_size_list_{dataset}"], \
experiment[f"overlaps_{dataset}"], \
experiment[f"classless_overlaps_list_{dataset}"], \
experiment[f"total_predicted_pixels_{dataset}"], \
experiment[f"total_groundtruth_pixels_{dataset}"], \
experiment[f"total_overlapping_pixels_{dataset}"], \
experiment[f"class_APs_{dataset}"], \
experiment[f"class_precision_{dataset}"], \
experiment[f"class_recalls_{dataset}"], \
experiment[f"class_overlaps_{dataset}"] = compute_both_batch_aps(image_ids, datasets[i+1], model, config, iterations=200) #get_stats(weights_file, datasets[i+1])
#objgraph.show_growth()
#roots = objgraph.get_leaking_objects()
#print(len(roots))
#tracker.print_diff()
update_dataframe(df_filepath, experiment)
else:
print("already in dataframe, skipping "+filename)
def create_dataframe(number=None, outfile="", folder="logs"):
print("LOADED IMPORTS - NOW RUNNING CODE")
df_filepath = f"./experiments_dataframe_{outfile}.csv"
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',filename='log'+str(number)+'.log',level=logging.DEBUG)
experiments = []
files = [x for x in os.walk(f"{folder}/")][0][1]
directories = [f"{folder}/" + f for f in files]
print(directories)
for directory in directories:
experiment=directory_to_experiment_info(directory)
dataset_filepath = get_dataset_filepath(experiment)
ae_dataset_filepaths = get_ae2000_dataset_filepaths(experiment)
datasets=[dataset_filepath]#load_dataset(ae_dataset_filepath_dive1), load_dataset(ae_dataset_filepath_dive2), load_dataset(ae_dataset_filepath_dive3)]
datasets.extend(ae_dataset_filepaths)
datasets = [load_dataset(d) for d in datasets]
add_single_experiment(directory, df_filepath, datasets)
return
def max_overlaps(experiments):
for experiment in experiments:
new_overlaps=[]
print(experiment['overlaps'])
def experiment_in_dataframe(df_filepath, experiment):
if not os.path.exists(df_filepath):
return False
df = pd.read_csv(df_filepath)
pre_existing_experiment = df.loc[
(df["colour_correction_type"] == experiment["colour_correction_type"])
& (df["distortion_correction"] == experiment["distortion_correction"])
& (df["rescaled"] == experiment["rescaled"])
& (df["number"] == experiment["number"])
]
print(experiment["number"])
print(pre_existing_experiment[['colour_correction_type', 'distortion_correction', 'rescaled', 'number']])
return not pre_existing_experiment.empty
def update_dataframe(df_filepath, experiment):
if not os.path.exists(df_filepath):
df = pd.DataFrame(columns=experiment.keys())
else:
df = pd.read_csv(df_filepath, index_col=0)
df = df.append(experiment, ignore_index=True)
logging.debug("saving experiment to "+df_filepath)
df.to_csv(df_filepath)
def get_experiments():
df = pd.read_csv("./experiments_dataframe.csv")
return df
def array_num_to_dataset(number):
num_to_dataset_dict = {0:['histogram_normalised','no_distortion_correction','dropped_resolution'],
1:['histogram_normalised','no_distortion_correction','dropped_resolution_scaledup'],
2:['histogram_normalised','distortion_correction','dropped_resolution'],
3:['histogram_normalised','distortion_correction','dropped_resolution_scaledup'],
4:['greyworld_corrected','no_distortion_correction','dropped_resolution'],
5:['greyworld_corrected','no_distortion_correction','dropped_resolution_scaledup'],
6:['greyworld_corrected','distortion_correction','dropped_resolution'],
7:['greyworld_corrected','distortion_correction','dropped_resolution_scaledup'],
8:['attenuation_correction','no_distortion_correction','dropped_resolution'],
9:['attenuation_correction','no_distortion_correction','dropped_resolution_scaledup'],
10:['attenuation_correction','distortion_correction','dropped_resolution'],
11:['attenuation_correction','distortion_correction','dropped_resolution_scaledup'],
12:['histogram_normalised','no_distortion_correction','not_rescaled'],
13:['histogram_normalised','no_distortion_correction','rescaled'],
14:['histogram_normalised','distortion_correction','not_rescaled'],
15:['histogram_normalised','distortion_correction','rescaled'],
16:['greyworld_corrected','no_distortion_correction','not_rescaled'],
17:['greyworld_corrected','no_distortion_correction','rescaled'],
18:['greyworld_corrected','distortion_correction','not_rescaled'],
19:['greyworld_corrected','distortion_correction','rescaled'],
20:['attenuation_correction','no_distortion_correction','not_rescaled'],
21:['attenuation_correction','no_distortion_correction','rescaled'],
22:['attenuation_correction','distortion_correction','not_rescaled'],
23:['attenuation_correction','distortion_correction','rescaled']
}
return num_to_dataset_dict[number]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pass job array id')
parser.add_argument('array_id', type=int, help='job array id')
parser.add_argument('outfile', type=str, help='output filename')
parser.add_argument('logs_folder', type=str, default='logs')
args = parser.parse_args()
number = args.array_id
outfile = args.outfile
logs_folder = args.logs_folder
create_dataframe(number=number, outfile=outfile, folder=logs_folder)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvoiceContactInfo(object):
def __init__(self):
self._contact_addr = None
self._contact_mail = None
self._contact_mobile = None
self._contact_name = None
@property
def contact_addr(self):
return self._contact_addr
@contact_addr.setter
def contact_addr(self, value):
self._contact_addr = value
@property
def contact_mail(self):
return self._contact_mail
@contact_mail.setter
def contact_mail(self, value):
self._contact_mail = value
@property
def contact_mobile(self):
return self._contact_mobile
@contact_mobile.setter
def contact_mobile(self, value):
self._contact_mobile = value
@property
def contact_name(self):
return self._contact_name
@contact_name.setter
def contact_name(self, value):
self._contact_name = value
def to_alipay_dict(self):
params = dict()
if self.contact_addr:
if hasattr(self.contact_addr, 'to_alipay_dict'):
params['contact_addr'] = self.contact_addr.to_alipay_dict()
else:
params['contact_addr'] = self.contact_addr
if self.contact_mail:
if hasattr(self.contact_mail, 'to_alipay_dict'):
params['contact_mail'] = self.contact_mail.to_alipay_dict()
else:
params['contact_mail'] = self.contact_mail
if self.contact_mobile:
if hasattr(self.contact_mobile, 'to_alipay_dict'):
params['contact_mobile'] = self.contact_mobile.to_alipay_dict()
else:
params['contact_mobile'] = self.contact_mobile
if self.contact_name:
if hasattr(self.contact_name, 'to_alipay_dict'):
params['contact_name'] = self.contact_name.to_alipay_dict()
else:
params['contact_name'] = self.contact_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvoiceContactInfo()
if 'contact_addr' in d:
o.contact_addr = d['contact_addr']
if 'contact_mail' in d:
o.contact_mail = d['contact_mail']
if 'contact_mobile' in d:
o.contact_mobile = d['contact_mobile']
if 'contact_name' in d:
o.contact_name = d['contact_name']
return o
|
cont = 0
total = 0
soma = 0
num = int(input('Digite um número [999 para parar]: '))
while num != 999:
soma += num
total += 1
num = int(input('Digite um número [999 para parar]: '))
print('Você digitou {} números e a some entre eles foi {}'.format(total, soma))
|
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from voltha.adapters.adtran_olt.codec.ietf_interfaces import (
IetfInterfacesConfig, IetfInterfacesState
)
from mock import MagicMock
from pytest_twisted import inlineCallbacks
from xmltodict import parse
def test_create_config():
IetfInterfacesConfig(None)
@inlineCallbacks
def test_get_config():
session = MagicMock()
ifc = IetfInterfacesConfig(session)
session.get.return_value = 'test value'
cfg = yield ifc.get_config()
assert 'test value' == cfg
assert ('running',) == session.get.call_args[0]
xml = parse(session.get.call_args[1]['filter'])
contents = {
'filter': {
'@xmlns': 'urn:ietf:params:xml:ns:netconf:base:1.0',
'interfaces': {
'@xmlns': 'urn:ietf:params:xml:ns:yang:ietf-interfaces',
'interface': None
}
}
}
assert contents == xml
def test_create_state():
IetfInterfacesState(None)
@inlineCallbacks
def test_get_state():
session = MagicMock()
ifc = IetfInterfacesState(session)
session.get.return_value = 'test value'
state = yield ifc.get_state()
assert 'test value' == state
xml = parse(session.get.call_args[0][0])
contents = {
'filter': {
'@xmlns': 'urn:ietf:params:xml:ns:netconf:base:1.0',
'interfaces-state': {
'@xmlns': 'urn:ietf:params:xml:ns:yang:ietf-interfaces',
'interface': {
'name': None,
'type': None,
'admin-status': None,
'oper-status': None,
'last-change': None,
'phys-address': None,
'speed': None
}
}
}
}
assert contents == xml
|
import sys
sys.path.append("../pyscatwave/")
import numpy as np
import scipy as sp
import scipy.io
import scipy.optimize
import torch
from torch.autograd import Variable
from full_embedding import FullEmbedding
from solver_hack_full import SolverHack, MSELoss, offset_greed_search_psnr
from check_conv_criterion import CheckConvCriterion, SmallEnoughException
from utils import cuda_available
import make_figs as signal
from loss import PSNR
import matplotlib.pyplot as plt
from time import time
from tqdm import tqdm, trange
from itertools import product
from termcolor import colored
do_cuda = True # if True, will check if CUDA is available et use the GPU accordingly
print()
cuda = cuda_available()
if do_cuda or not cuda:
print("CUDA available: {}\n".format(cuda))
else:
print("CUDA denied\n".format(cuda))
cuda = False
#----- Analysis parameters -----
T = 2**14
J = (9)
Q = 10
# Select which scattering orders and phase harmonic coefficients are used
scatt_orders = (1, 2)
phe_coeffs = ('harmonic', 'mixed')
# Parameters for phase harmonic coefficients
deltaj = 3
deltak = (0,)
num_k_modulus = 0
delta_cooc = 2
wavelet_type = 'morlet'
high_freq = 0.425 # Center frequency of mother wavelet
max_chunk = 2 # None # 50 # None # Process coefficients in chunks of size max_chunk if too much memory is used
alpha = 0.5 # weight of scattering in loss, phase harmonic has weight 1 - alpha
maxiter = 1000
tol = 1e-12
#----- Create data -----
n_dirac = 10 # number of discontinuities
x0 = signal.staircase(T, n_dirac) # Created as torch tensor
# Convert to numpy and create random intialization
x0 = x0.cpu().numpy()[0, 0]
#----- Setup Analysis -----
# Pack parameters for phase harmonics and scattering
phe_params = {
'delta_j': deltaj, 'delta_k': deltak,
'wav_type':wavelet_type, 'high_freq':high_freq,
'delta_cooc': delta_cooc, 'max_chunk': max_chunk
}
scatt_params = dict()
# Create full embedding that combines scattering and phase harmonics
phi = FullEmbedding(
T, J, Q, phe_params=phe_params, scatt_params=scatt_params,
scatt_orders=scatt_orders, phe_coeffs=phe_coeffs)
num_coeff, nscat = phi.shape(), phi.count_scatt_coeffs()
nharm = num_coeff - nscat
# Create loss object
loss_fn = MSELoss(phi, alpha=alpha, use_cuda=cuda)
# Create solver object to interface embeddings with scipy's optimize
solver_fn = SolverHack(phi, x0, loss_fn, cuda=cuda)
if cuda:
phi=phi.cuda()
loss_fn = loss_fn.cuda()
solver_fn = solver_fn.cuda()
# Create object that checks and prints convergence information
check_conv_criterion = CheckConvCriterion(solver_fn, 1e-24)
# Decide how to compute the gradient
jac = True # True: provided by solver_fn, False: computed numerically by minimize
func = solver_fn.joint if jac else solver_fn.function
#----- Optimization -----
# Initial point
xini = np.random.randn(*x0[None, None].shape)
tic = time()
try:
options = {'maxiter': maxiter, 'maxfun': maxiter}
res = sp.optimize.minimize(
solver_fn.joint, xini, method='L-BFGS-B', jac=jac,
callback=check_conv_criterion, tol=tol,
options=options)
x, niter, loss, msg = res['x'], res['nit'], res['fun'], res['message']
except SmallEnoughException:
print('Finished through SmallEnoughException')
toc = time()
# Recover final loss
final_loss, final_grad = solver_fn.joint(x)
final_gloss = np.linalg.norm(final_grad, ord=float('inf'))
if not isinstance(msg, str):
msg = msg.decode("ASCII")
print(colored('Optimization Exit Message : ' + msg, 'blue'))
print(colored("found parameters in {}s, {} iterations -- {}it/s".format(
round(toc - tic, 4), niter, round(niter / (toc - tic), 2)), 'blue'))
print(colored(" relative error {:.3E}".format(final_loss), 'blue'))
print(colored(" relative gradient error {:.3E}".format(final_gloss), 'blue'))
x0_norm_msg = " x0 norm S{:.2E} H{:.2E}".format(
float(solver_fn.loss_scat0.data.cpu().numpy()),
float(solver_fn.loss_scat0.data.cpu().numpy())
)
print(colored(x0_norm_msg, 'blue'))
# Recover log of loss throughout optimization
logs_loss = check_conv_criterion.logs_loss
logs_grad = check_conv_criterion.logs_grad
logs_scat = check_conv_criterion.logs_scat
logs_harm = check_conv_criterion.logs_harm
# Recenter data and compute PSNR:
offset = offset_greed_search_psnr(x, x0)
x = np.roll(x, offset)
#----- Plot results -----
plt.figure()
plt.subplot(211)
plt.plot(x0)
plt.subplot(212)
plt.plot(x)
plt.show()
|
# importing packages
from multiprocessing import pool
from pprint import pprint
from pytube import YouTube
from youtube_dl import YoutubeDL
import os
from multiprocessing.pool import Pool
def download(value):
for count in range(0, 3):
try:
initial_list = YouTube(value)
audio = initial_list.streams.filter(only_audio=True, file_extension='webm').first()
audio.download("songs")
return True
except:
pass
return False
def main():
with open("song_list.txt", "r") as f:
songs = f.read()
pool = Pool()
song_list = songs.strip().split("\n")
song_list = list(set(song_list))
results = pool.map(download, song_list)
for link, success in zip(song_list, results):
print(f"Success - {success} ||| link - {link}")
if __name__ == "__main__":
main() |
'''
Function:
斐波那契数列
Author:
Charles
'''
class Solution:
memory = {}
def fib(self, n: int) -> int:
if n <= 1: return n
if n not in self.memory:
self.memory[n] = (self.fib(n-1) + self.fib(n-2)) % 1000000007
return self.memory[n] |
from flask import render_template_string
import flask_featureflags
from flask_caching import Cache
from dmutils.flask_init import pluralize, init_manager
from dmutils.forms import FakeCsrf
from .helpers import BaseApplicationTest
import pytest
@pytest.mark.parametrize("count,singular,plural,output", [
(0, "person", "people", "people"),
(1, "person", "people", "person"),
(2, "person", "people", "people"),
])
def test_pluralize(count, singular, plural, output):
assert pluralize(count, singular, plural) == output
class TestDevCacheInit(BaseApplicationTest):
def setup(self):
self.cache = Cache()
self.config.DM_CACHE_TYPE = 'dev'
super(TestDevCacheInit, self).setup()
def test_config(self):
assert self.cache.config['CACHE_TYPE'] == 'simple'
class TestProdCacheInit(BaseApplicationTest):
def setup(self):
self.cache = Cache()
self.config.DM_CACHE_TYPE = 'prod'
super(TestProdCacheInit, self).setup()
def test_config(self):
assert self.cache.config['CACHE_TYPE'] == 'filesystem'
class TestInitManager(BaseApplicationTest):
def test_init_manager(self):
init_manager(self.flask, 5000, [])
class TestFeatureFlags(BaseApplicationTest):
def setup(self):
self.config.FEATURE_FLAGS = {
'YES': True,
'NO': False,
}
super(TestFeatureFlags, self).setup()
def test_flags(self):
with self.flask.app_context():
assert flask_featureflags.is_active('YES')
assert not flask_featureflags.is_active('NO')
class TestCsrf(BaseApplicationTest):
def setup(self):
super(TestCsrf, self).setup()
@self.flask.route('/thing', methods=['POST'])
def post_endpoint():
return 'done'
def test_csrf_okay(self):
res = self.app.post(
'/thing',
data={'csrf_token': FakeCsrf.valid_token},
)
assert res.status_code == 200
def test_csrf_missing(self):
res = self.app.post('/thing')
assert res.status_code == 400
def test_csrf_wrong(self):
res = self.app.post(
'/thing',
data={'csrf_token': 'nope'},
)
assert res.status_code == 400
def test_new_style_csrf(self):
with self.app.session_transaction() as sess:
sess['csrf_token'] = 'abc123'
sess['_csrf_token'] = 'def456'
for t in ['abc123', 'def456']:
res = self.app.post(
'/thing',
headers={
'X-CSRFToken': t
}
)
assert res.status_code == 200
res = self.app.post(
'/thing',
data={'csrf_token': t}
)
assert res.status_code == 200
res = self.app.post(
'/thing',
data={'_csrf_token': t}
)
assert res.status_code == 200
BAD = 'bad'
res = self.app.post(
'/thing',
headers={
'X-CSRFToken': BAD
}
)
assert res.status_code == 400
res = self.app.post(
'/thing',
data={'csrf_token': BAD}
)
assert res.status_code == 400
res = self.app.post(
'/thing',
data={'_csrf_token': BAD}
)
assert res.status_code == 400
class TestTemplateFilters(BaseApplicationTest):
# formats themselves are tested in test_formats
def test_timeformat(self):
with self.flask.app_context():
template = '{{ "2000-01-01T00:00:00.000000Z"|timeformat }}'
result = render_template_string(template)
assert result.strip()
def test_shortdateformat(self):
with self.flask.app_context():
template = '{{ "2000-01-01T00:00:00.000000Z"|shortdateformat }}'
result = render_template_string(template)
assert result.strip()
def test_dateformat(self):
with self.flask.app_context():
template = '{{ "2000-01-01T00:00:00.000000Z"|dateformat }}'
result = render_template_string(template)
assert result.strip()
def test_datetimeformat(self):
with self.flask.app_context():
template = '{{ "2000-01-01T00:00:00.000000Z"|datetimeformat }}'
result = render_template_string(template)
assert result.strip()
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class Preprocessor:
thresh_sobelx_max = {'gray': 100, 'sat': 100}
thresh_sobelx_min = {'gray': 50 , 'sat': 20 }
def crop(self, image):
#Clip the ROI
xLength = image.shape[1];
yLength = image.shape[0];
resultImg = np.copy(image);
shiftUp = 100/720 * yLength#75/540 * yLength; #75
shiftSideUp = 550/1280 * xLength;#400/960 *xLength; #400
BoundaryUp = image.shape[0]/2 +shiftUp;
BoundaryDown = yLength;
BoundaryUpLX = shiftSideUp;
BoundaryUpRX = xLength - shiftSideUp;
LeftUp = [BoundaryUpLX, BoundaryUp];
DownIdent = 0;
LeftDown = [DownIdent, BoundaryDown];
RightUp = [BoundaryUpRX, BoundaryUp];
RightDown = [xLength-DownIdent, BoundaryDown];
BoundaryL = np.polyfit([LeftDown [0], LeftUp [0]], [LeftDown [1], LeftUp [1]], 1);
BoundaryR = np.polyfit([RightDown[0], RightUp[0]], [RightDown[1], RightUp[1]], 1);
XX, YY = np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0]))
GoodIndecesR = (YY >= (XX * BoundaryR[0] + BoundaryR[1])) & (YY > RightUp[1])
GoodIndecesL = (YY >= (XX * BoundaryL[0] + BoundaryL[1])) & (YY > LeftUp[1])
GoodIndeces = GoodIndecesL & GoodIndecesR
badIndeces = ~GoodIndeces
resultImg[badIndeces] = 0;
#in the future, those control points should be dynamically refined
#controlPts = np.float32([LeftDown, LeftUp, RightUp, RightDown])
return resultImg
def extractChannel(self, img, mode):
if(mode == 'gray'):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return gray
else:
if(mode == 'sat'):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
sat = hls[:,:,2]
return sat
def extractEdges(self, img, mode):
if(mode == 'all'):
gray = self.extractChannel(img, 'gray')
grayBinary = self.applySobel(gray, self.thresh_sobelx_min['gray'], self.thresh_sobelx_max['gray'])
sat = self.extractChannel(img, 'sat')
satBinary = self.applySobel(sat, self.thresh_sobelx_min['sat'], self.thresh_sobelx_max['sat'])
result = grayBinary | satBinary;
return result;
else:
channel = self.extractChannel(img, mode)
channelBinary = self.applySobel(channel, self.thresh_sobelx_min[mode], self.thresh_sobelx_max[mode])
return channelBinary
def applySobel(self, img, thresh_min, thresh_max):
sobel_kernel = 5
# Take the gradient in x
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, sobel_kernel)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255 * sobelx / np.max(sobelx));
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel);
binary_output[(scaled_sobel > thresh_min) & (scaled_sobel <= thresh_max)] = 255
return binary_output
|
import contextlib
import inspect
import io
from enum import Enum
from functools import lru_cache
from importlib.metadata import entry_points
from typing import Type, TextIO, Optional, TypeVar, List, get_args, get_origin
from uuid import UUID
from pydantic import BaseModel
_EDB_TYPES = {
"string": "str",
"boolean": "bool",
"integer": "int64",
"array": "array",
}
class DatabaseModel(BaseModel):
id: UUID = None
__declarations__ = []
__edb_module__ = None
@classmethod
@lru_cache()
def edb_schema(cls, current_module="default", self_only=True):
schema = cls.schema()
def _get_type(attr):
if "type" in attr:
return _EDB_TYPES[attr["type"]]
else:
rv = attr["$ref"].split("/")[-1]
mod_name = schema["definitions"][rv].get(
"module", current_module
)
if mod_name != current_module:
rv = f"{mod_name}::{rv}"
return rv
def _is_link(attr):
if "type" in attr:
return False
else:
type_ = attr["$ref"].split("/")[-1]
return schema["definitions"][type_]["type"] == "object"
title = schema["title"]
module = cls.__edb_module__ or "default"
if module != current_module:
title = f"{module}::{title}"
inherited_props = set()
parents = []
for p in cls.__mro__[1:]:
if not DatabaseModel.issubclass(p):
continue
p_schema = p.edb_schema(current_module)
inherited_props.update(p_schema["properties"])
parents.append(p_schema["title"])
required = set(schema.get("required", []))
props = {}
for name, attr in schema["properties"].items():
edb_prop = {}
if name == "id":
continue
if self_only and name in inherited_props:
continue
if name in required:
edb_prop["required"] = True
if _is_link(attr):
edb_prop["declaration"] = "link"
else:
edb_prop["declaration"] = "property"
edb_prop["type"] = _get_type(attr)
if "items" in attr:
edb_prop["items"] = {"type": _get_type(attr["items"])}
if "constraint" in attr:
edb_prop["constraint"] = attr["constraint"]
props[name] = edb_prop
definitions = {}
for definition in schema.get("definitions", {}).values():
if "enum" in definition:
definitions[definition["title"]] = {
"enum": definition["enum"],
"type": _EDB_TYPES[definition["type"]],
}
return {
"title": title,
"parents": parents,
"properties": props,
"declarations": cls.__declarations__,
"definitions": definitions,
}
@classmethod
def issubclass(cls, v):
return cls in getattr(v, "__mro__", [v])[1:]
@classmethod
def from_obj(cls, obj):
values = {}
for key in dir(obj):
value = getattr(obj, key)
key_type = cls.__annotations__.get(key)
if hasattr(key_type, "from_obj"):
value = key_type.from_obj(value)
elif get_origin(key_type) is list:
sub_type = get_args(key_type)[0]
if issubclass(type(sub_type), type) and issubclass(
sub_type, Enum
):
value = [sub_type(str(val)) for val in value]
else:
value = [val for val in value]
elif issubclass(type(key_type), type) and issubclass(
key_type, Enum
):
value = key_type(str(value))
values[key] = value
return cls.construct(**values)
@classmethod
def select(cls, *expressions, current_module="default", filters=None):
buf = io.StringIO()
schema = cls.edb_schema(current_module)
buf.write(f"SELECT {schema['title']}")
if expressions:
with _curley_braces(buf) as inf:
for exp in expressions:
print(f"{exp},", file=inf)
elif filters:
buf.write(" ")
if filters:
buf.write("FILTER ")
buf.write(filters)
return buf.getvalue()
def _compile_values(
self, schema, buf, extra_values, include, exclude=None
):
d = self.dict(
include=include or set(schema["properties"]),
exclude=exclude,
exclude_unset=True,
)
if d or extra_values:
with _curley_braces(buf) as inf:
for name, value in d.items():
if name in extra_values:
continue
attr = schema["properties"][name]
type_ = attr["type"]
if type_ == "array" and "items" in attr:
type_ = f"<array<{attr['items']['type']}>><array<str>>"
else:
type_ = f"<{type_}>"
print(f"{name} := {type_}${name},", file=inf)
for name, value in extra_values.items():
print(f"{name} := ({value}),", file=inf)
return True
else:
return False
def insert(
self,
current_module="default",
include=None,
conflict_on=None,
conflict_else=None,
**extra_values,
):
buf = io.StringIO()
schema = self.edb_schema(current_module, self_only=False)
buf.write(f"INSERT {schema['title']}")
if (
not self._compile_values(schema, buf, extra_values, include)
and conflict_on
):
buf.write(" ")
if conflict_on:
buf.write("UNLESS CONFLICT ")
if conflict_on:
buf.write("ON ")
buf.write(conflict_on)
if conflict_else:
buf.write(f" ELSE ({conflict_else.strip()})")
return buf.getvalue()
def update(
self,
current_module="default",
include=None,
exclude=None,
filters=None,
**extra_values,
):
buf = io.StringIO()
schema = self.edb_schema(current_module, self_only=False)
buf.write(f"UPDATE {schema['title']}")
if filters:
buf.write(f" FILTER {filters}")
buf.write(" SET")
self._compile_values(schema, buf, extra_values, include, exclude)
return buf.getvalue()
class Config:
@staticmethod
def schema_extra(schema, model):
schema["module"] = model.__edb_module__
class Declaration:
def __init__(self):
frame = inspect.currentframe()
while frame.f_locals.get("self", None) is self:
frame = frame.f_back
frame.f_locals.setdefault("__declarations__", []).append(self)
def compile(self, buf: TextIO):
pass
class Constraint(Declaration):
def __init__(self, name: str, on: Optional[str] = None):
super().__init__()
self.name = name
self.on = on
def compile(self, buf: TextIO):
buf.write(f"constraint {self.name}")
if self.on:
buf.write(f" on ({self.on})")
print(";", file=buf)
class ExclusiveConstraint(Constraint):
def __init__(self, *properties):
props_str = ", ".join((f".{name}" for name in properties))
if len(properties) > 1:
props_str = f"({props_str})"
super().__init__("exclusive", props_str)
class ComputableProperty(Declaration):
def __init__(self, name: str, expression: str, required=False):
super().__init__()
self.name = name
self.expression = expression
self.required = required
def compile(self, buf: TextIO):
if self.required:
buf.write("required ")
print(f"property {self.name} := ({self.expression});", file=buf)
class ExtendedComputableProperty(Declaration):
def __init__(
self, name: str, expression: str, required=False, exclusive=False
):
super().__init__()
self.name = name
self.expression = expression
self.required = required
self.exclusive = exclusive
def compile(self, buf: TextIO):
if self.required:
buf.write("required ")
with _curley_braces(
buf, f"property {self.name}", semicolon=True
) as inf:
print(f"USING ({self.expression});", file=inf)
if self.exclusive:
print("constraint exclusive;", file=inf)
def with_block(module=None, **expressions):
f = io.StringIO()
f.write("WITH ")
if module:
f.write(f"MODULE {module}")
if expressions:
f.write(", ")
for i, (name, exp) in enumerate(expressions.items()):
f.write(f"{name} := ({exp})")
if i < len(expressions) - 1:
f.write(", ")
f.write("\n")
return f.getvalue()
ActualType = TypeVar("ActualType")
def prop(type_: Type[ActualType], **kwargs) -> Type[ActualType]:
class _Type(type_):
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(kwargs)
return _Type
@lru_cache()
def get_models():
from .idp.base import get_idps
py_mods = []
for ep in entry_points()["authub.modules"]:
py_mod = ep.load()
py_mods.append((ep.name, py_mod))
for idp in get_idps().values():
py_mods.append((idp.name, idp.module))
py_mod_names = set()
for name, py_mod in py_mods:
py_mod_names.add(py_mod.__name__)
models = {}
for name, py_mod in py_mods:
for k in dir(py_mod):
if k.startswith("_"):
continue
v = getattr(py_mod, k)
if not DatabaseModel.issubclass(v):
continue
if (
v.__module__ in py_mod_names
and v.__module__ != py_mod.__name__
):
continue
models[v] = None
if "__edb_module__" not in v.__dict__:
v.__edb_module__ = name
return list(models)
class IndentIO(io.TextIOBase):
def __init__(self, wrapped_io):
self._io = wrapped_io
self._indent = True
def write(self, text: str) -> int:
rv = 0
if self._indent:
rv += self._io.write(" ")
self._indent = False
if text.endswith("\n"):
text = text[:-1]
self._indent = True
rv += self._io.write(text.replace("\n", "\n "))
if self._indent:
rv += self._io.write("\n")
return rv
@contextlib.contextmanager
def _curley_braces(f: TextIO, text: str = "", semicolon=False) -> TextIO:
print(text + " {", file=f)
yield IndentIO(f)
if semicolon:
print("};", file=f)
else:
print("}", file=f)
def _compile_definitions(f: TextIO, models: List[Type[DatabaseModel]]):
definitions = {}
for v in models:
schema = v.edb_schema(v.__edb_module__)
for name, definition in schema["definitions"].items():
definitions[name] = definition
for name, definition in definitions.items():
choices = ", ".join((str(val) for val in definition["enum"]))
print(f"scalar type {name} extending enum<{choices}>;", file=f)
if definitions:
print(file=f)
def _compile_schema(f: TextIO, v: Type[DatabaseModel]):
schema = v.edb_schema(v.__edb_module__)
extending = ""
if schema["parents"]:
extending = " extending " + ", ".join(schema["parents"])
with _curley_braces(f, f"type {schema['title']}{extending}") as tf:
for name, attr in schema["properties"].items():
if attr.get("required"):
tf.write("required ")
tf.write(f"{attr['declaration']} {name} -> {attr['type']}")
if "items" in attr:
tf.write(f"<{attr['items']['type']}>")
if "constraint" in attr:
with _curley_braces(tf, semicolon=True) as af:
af.write("constraint ")
af.write(attr["constraint"])
print(";", file=af)
else:
print(";", file=tf)
for dec in schema["declarations"]:
dec.compile(tf)
def compile_schema(schema_dir):
"""Update database schema SDL."""
models_by_module_name = {}
for model in get_models():
models_by_module_name.setdefault(model.__edb_module__, []).append(
model
)
for module_name, models in models_by_module_name.items():
buf = io.StringIO()
with _curley_braces(
buf, f"module {module_name}", semicolon=True
) as mf:
_compile_definitions(mf, models)
for i, v in enumerate(models):
_compile_schema(mf, v)
if i < len(models) - 1:
print(file=buf)
with (schema_dir / f"{module_name}.esdl").open("w") as f:
f.write(buf.getvalue())
|
__author__ = 'super3'
|
#!/usr/bin/env python
import unittest
from tempfile import mktemp, mkstemp
import os
from anuga.utilities.data_audit import *
class Test_data_audit(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_license_file_is_not_valid1(self):
"""Basic test using an invalid XML file. This one
should fail on bad CRC checksum
"""
# Generate invalid checksum example
tmp_name = mktemp(suffix='.asc')
fid = open(tmp_name, 'w')
string = 'Example data file with textual content. AAAABBBBCCCC1234'
fid.write(string)
fid.close()
# Create associated license file
basename, ext = os.path.splitext(tmp_name)
license_filename = basename + '.lic'
licfid = open(license_filename, 'w')
xml_string = """<?xml version="1.0" encoding="iso-8859-1"?>
<ga_license_file>
<metadata>
<author>Ole Nielsen</author>
</metadata>
<datafile>
<filename>%s</filename>
<checksum>-111111</checksum>
<publishable>Yes</publishable>
<accountable>Jane Sexton</accountable>
<source>Unknown</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is a polygon comprising easting and northing locations
tracing parts of the coastline at Dampier WA as well as a rectangular area inland.
This is used to specifically set the onshore initial condition in a tsunami scenario
and here, it is used with a unit test in test_polygon.py.
The coastline was derived from Maritime Boundaries which is a public dataset. However,
rumour has it that some of it was digitised from a Landgate supplied image.
The origin and license issues are still undecided</IP_info>
</datafile>
</ga_license_file>
""" % tmp_name
licfid.write(xml_string)
licfid.close()
#licfid = open(license_filename)
# print licfid.read()
# licfid.close()
try:
license_file_is_valid(license_filename, tmp_name)
except CRCMismatch:
pass
else:
msg = 'Should have raised bad CRC exception'
raise Exception(msg)
# Clean up
os.remove(license_filename)
try:
os.remove(tmp_name)
except:
# FIXME(DSG) Windows seems to have a problem deleting this file
# This is a work-a-round. It doesn't fix the root problem
# It does delete the file though.
fid = open(tmp_name, 'a')
string = 'Example data file'
fid.write(string)
fid.close()
os.remove(tmp_name)
def test_license_file_is_not_valid2(self):
"""Basic test using an invalid XML file. This one
should fail on Not Publishable
"""
# Generate invalid checksum example
tmp_name = mktemp(suffix='.asc')
fid = open(tmp_name, 'w')
string = 'Example data file with textual content. AAAABBBBCCCC1234'
fid.write(string)
fid.close()
# Create associated license file
basename, ext = os.path.splitext(tmp_name)
license_filename = basename + '.lic'
licfid = open(license_filename, 'w')
xml_string = """<?xml version="1.0" encoding="iso-8859-1"?>
<ga_license_file>
<metadata>
<author>Ole Nielsen</author>
</metadata>
<datafile>
<filename>%s</filename>
<checksum>2810517858</checksum>
<publishable>no</publishable>
<accountable>Jane Sexton</accountable>
<source>Unknown</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is a polygon comprising easting and northing locations</IP_info>
</datafile>
</ga_license_file>
""" % tmp_name
licfid.write(xml_string)
licfid.close()
licfid = open(license_filename)
# print licfid.read()
try:
license_file_is_valid(licfid, tmp_name)
except NotPublishable:
pass
else:
msg = 'Should have raised NotPublishable exception'
raise Exception(msg)
# Clean up
licfid.close()
os.remove(license_filename)
fid.close()
try:
os.remove(tmp_name)
except:
# FIXME(DSG) Windows seems to have a problem deleting this file
# This is a work-a-round. It doesn't fix the root problem
# It does delete the file though.
fid = open(tmp_name, 'a')
string = 'Example data file'
fid.write(string)
fid.close()
os.remove(tmp_name)
def test_license_file_is_not_valid3(self):
"""Basic test using an invalid XML file. This one
should fail on Filename Mismatch
"""
tmp_fd, tmp_name = mkstemp(suffix='.asc', dir='.')
fid = os.fdopen(tmp_fd, 'w')
string = 'Example data file with textual content. AAAABBBBCCCC1234'
fid.write(string)
fid.close()
# Create associated license file
basename, ext = os.path.splitext(tmp_name)
license_filename = basename + '.lic'
licfid = open(license_filename, 'w')
xml_string = """<?xml version="1.0" encoding="iso-8859-1"?>
<ga_license_file>
<metadata>
<author>Ole Nielsen</author>
</metadata>
<datafile>
<filename>%s</filename>
<checksum>2810517858</checksum>
<publishable>Yes</publishable>
<accountable>Jane Sexton</accountable>
<source>Unknown</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is a polygon comprising easting and northing locations</IP_info>
</datafile>
</ga_license_file>
""" % (basename + '.no_exist')
licfid.write(xml_string)
licfid.close()
licfid = open(license_filename)
# print licfid.read()
try:
license_file_is_valid(licfid, basename + '.no_exist')
except FilenameMismatch:
pass
else:
msg = 'Should have raised FilenameMismatch exception'
raise Exception(msg)
# Clean up
licfid.close()
fid.close()
os.remove(license_filename)
os.remove(tmp_name)
def test_license_file_is_valid(self):
"""Basic test using an valid XML file
"""
# Generate valid example
tmp_name = mktemp(suffix='.asc')
fid = open(tmp_name, 'w')
string = 'Example data file with textual content. AAAABBBBCCCC1234'
fid.write(string)
fid.close()
# Strip leading dir (./)
#data_filename = os.path.split(tmp_name)[1]
# print 'Name', data_filename
# Create associated license file
basename, ext = os.path.splitext(tmp_name)
license_filename = basename + '.lic'
licfid = open(license_filename, 'w')
xml_string = """<?xml version="1.0" encoding="iso-8859-1"?>
<ga_license_file>
<metadata>
<author>Ole Nielsen</author>
</metadata>
<datafile>
<filename>%s</filename>
<checksum>%s</checksum>
<publishable>Yes</publishable>
<accountable>Jane Sexton</accountable>
<source>Unknown</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is a test</IP_info>
</datafile>
</ga_license_file>
""" % (tmp_name, '2810517858')
licfid.write(xml_string)
licfid.close()
license_file_is_valid(license_filename, tmp_name)
# Clean up
os.remove(license_filename)
os.remove(tmp_name)
def test_valid_license_file_with_multiple_files(self):
"""Test of XML file with more than one datafile element.
"""
# Generate example files
tmp_name = mktemp(suffix='.asc')
fid = open(tmp_name, 'w')
string = 'Example data file with textual content. AAAABBBBCCCC1234'
fid.write(string)
fid.close()
# Derive filenames
basename, ext = os.path.splitext(tmp_name)
data_filename1 = basename + '.asc'
data_filename2 = basename + '.prj'
license_filename = basename + '.lic'
# print data_filename1, data_filename2, license_filename
# Write data to second data file
fid = open(data_filename2, 'w')
string = 'Another example data file with text in it'
fid.write(string)
fid.close()
# Create license file
licfid = open(license_filename, 'w')
xml_string = """<?xml version="1.0" encoding="iso-8859-1"?>
<ga_license_file>
<metadata>
<author>Ole Nielsen</author>
</metadata>
<datafile>
<filename>%s</filename>
<checksum>%s</checksum>
<publishable>Yes</publishable>
<accountable>Jane Sexton</accountable>
<source>Generated on the fly</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is a test</IP_info>
</datafile>
<datafile>
<filename>%s</filename>
<checksum>%s</checksum>
<publishable>Yes</publishable>
<accountable>Ole Nielsen</accountable>
<source>Generated on the fly</source>
<IP_owner>Geoscience Australia</IP_owner>
<IP_info>This is another test</IP_info>
</datafile>
</ga_license_file>
""" % (data_filename1, '2810517858', data_filename2, '2972536556')
licfid.write(xml_string)
licfid.close()
licfid = open(license_filename)
license_file_is_valid(licfid, data_filename1)
license_file_is_valid(licfid, data_filename2)
licfid.close()
# Clean up
os.remove(license_filename)
os.remove(data_filename1)
os.remove(data_filename2)
################################################################################
if __name__ == "__main__":
suite = unittest.makeSuite(Test_data_audit, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
|
from __future__ import division
from gurobipy import *
from miscs import *
import numpy as np
#following Tests are prepared for double checking
def CTbaseline(k, rest):
tmpSum = 0.0
for i in rest:
tmpSum +=utiliAddE(i)
tmpSum = utiliAddE(k)+tmpSum
#print tmpSum
if tmpSum <= np.log(2):
return True
else:
return False
def k2uFirstCarryinhypo(k, rest):
#Lemma 9a
tmpSum = 1.0
for i in rest:
tmpSum*=(utili(i)+1)
tmpSum = (utiliAddE(k)+2)*tmpSum
#print tmpSum
if tmpSum <= 3:
return True
else:
return False
def k2uFirstCarryinUbound(k, rest):
#Lemma 9b
tmpSum = 0.0
for i in rest:
tmpSum += utili(i)
#print tmpSum
#print np.log(3/(utiliAddE(k)+2))
#print ""
if tmpSum <= np.log(3/(utiliAddE(k)+2)):
return True
else:
return False
def k2uSecondBlockinghypo(k, rest):
#Lemma 10a
tmpSum = 0.0
tmpSumP = 1.0
for i in rest:
tmpSum += min(i['shared-R'], i['exclusive-R'])
tmpSumP *= utili(i)+1
res = ((k['shared-R']+tmpSum+k['exclusive-R'])/k['period']+1)
if res * tmpSumP <= 2:
return True
else:
return False
def k2uSecondBlockingUbound(k, rest):
#Lemma 10b
tmpSum = 0.0
tmpSumP = 0.0
for i in rest:
tmpSum += min(i['shared-R'], i['exclusive-R'])
tmpSumP += utili(i)
#print (k['shared-R']+k['exclusive-R']+tmpSum)/k['period']+tmpSumP
#print (len(rest)+1)
#print (2**(1/(len(rest)+1))-1)
if ((k['shared-R']+k['exclusive-R']+tmpSum)/k['period'])+tmpSumP<=(len(rest)+1)*(2**(1/(len(rest)+1))-1):
return True
else:
return False
def k2qJitterBound(k, rest):
#Lemma 11
tmpSum = 0.0
tmpSumP = 0.0
for i in rest:
tmpSum += vfunc(i)
tmpSumP += utili(i)
#if (k['shared-R']+k['exclusive-R']+tmpSum)/1-tmpSumP <= k['period']:
if utiliAddE(k)+tmpSum/k['period']+tmpSumP<=1 and tmpSumP <=1:
return True
else:
return False
def inflation(k, rest, alltasks):
#Lemma 12
tmpSum = 0.0
for i in rest:
tmpSum += utili(i)
if utili(k)+tmpSum <= np.log(3/(2+max(utiliAddE(j) for j in alltasks))):
return True
else:
return False
|
#! /usr/bin/env python
from __future__ import print_function
from __future__ import division
from manual_preds import manual_train_and_predict
from info_reduc import random_error
from auto_preds import auto_train_and_predict
from sklearn.preprocessing import scale
import pickle
import math
import numpy as np
import pandas as pd
import glob
import os
class LearnSet(object):
"""
A set of parameters (i.e., features and labels) for a machine learning
algorithm, each in the format of a pandas dataframe
"""
def __init__(self, nuc_concs, burnup):#reactor, enrichment, burnup):
self.nuc_concs = nuc_concs
#self.reactor = reactor
#self.enrichment = enrichment
self.burnup = burnup
###################################################
# TODO: Leaving the following global for now; fix!#
###################################################
# Info for labeling the simulation values in the training set
pwrburn = (600, 1550, 2500, 3450, 4400, 5350, 6300, 7250, 8200, 9150, 10100,
11050, 12000, 12950, 13900, 14850, 15800, 16750, 17700
)
bwrburn = (600, 1290, 1980, 2670, 3360, 4050, 4740, 5430, 6120, 6810, 7500,
8190, 8880, 9570, 10260, 10950, 11640, 12330
)
phwrburn = (600, 1290, 1980, 2670, 3360, 4050, 4740, 5430, 6120, 6810, 7500,
8190, 8880, 9570, 10260, 10950, 11640, 12330
)
o_rxtrs = ('ce14x14', 'ce16x16', 'w14x14', 'w15x15', 'w17x17', 's14x14',
'vver440', 'vver440_3.82', 'vver440_4.25', 'vver440_4.38',
'vver1000', 'ge7x7-0', 'ge8x8-1', 'ge9x9-2', 'ge10x10-8',
'abb8x8-1', 'atrium9x9-9', 'svea64-1', 'svea100-0', 'candu28',
'candu37'
)
enrich = (2.8, 2.8, 2.8, 2.8, 2.8, 2.8, 3.6, 3.82, 4.25, 4.38, 2.8, 2.9,
2.9, 2.9, 2.9, 2.9, 2.9, 2.9, 2.9, 0.711, 0.711
)
train_label = {'ReactorType': ['pwr']*11 + ['bwr']*8 + ['phwr']*2,
'OrigenReactor': o_rxtrs,
'Enrichment': enrich,
'Burnup': [pwrburn]*11 + [bwrburn]*8 + [phwrburn]*2,
'CoolingInts': [(0.000694, 7, 30, 365.25)]*21
}
# Info for labeling the simulated/expected values in the testing set
t_burns = ((1400, 5000, 11000), (5000, 6120), (1700, 8700, 17000),
(8700, 9150), (8700, 9150), (2000, 7200, 10800),
(7200, 8800), (7200, 8800)
)
cool1 = (0.000694, 7, 30, 365.25) #1 min, 1 week, 1 month, 1 year in days
cool2 = (0.002082, 9, 730.5) #3 min, 9 days, 2 years in days
cool3 = (7, 9) #7 and 9 days
t_o_rxtrs = ('candu28_0', 'candu28_1', 'ce16x16_2', 'ce16x16_3', 'ce16x16_4',
'ge7x7-0_5','ge7x7-0_6', 'ge7x7-0_7'
)
t_enrich = (0.711, 0.711, 2.8, 2.8, 3.1, 2.9, 2.9, 3.2)
test_label = {'ReactorType': ['phwr']*2 + ['pwr']*3 + ['bwr']*3,
'OrigenReactor': t_o_rxtrs,
'Enrichment': t_enrich,
'Burnup': t_burns,
'CoolingInts': [cool1, cool2, cool1, cool2, cool3, cool1, cool2, cool3]
}
def format_df(filename):
"""
This takes a csv file and reads the data in as a dataframe.
Parameters
----------
filename : str of simulation output in a csv file
Returns
-------
data : pandas dataframe containing csv entries
"""
data = pd.read_csv(filename).T
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
return data
def get_labels(filename, rxtrs):
"""
This takes a filename and a dict with all simulation parameters, and
searches for the entries relevant to the given simulation (file).
Parameters
----------
filename : str of simulation output in a csv file
rxtrs : dict of a data set detailing simulation parameters in ORIGEN
Returns
-------
rxtr_info : dict of all the labels for a given simulation data set
"""
tail, _ = os.path.splitext(os.path.basename(filename))
i = rxtrs['OrigenReactor'].index(tail)
rxtr_info = {'ReactorType': rxtrs['ReactorType'][i],
'Enrichment': rxtrs['Enrichment'][i],
'Burnup': rxtrs['Burnup'][i],
'CoolingInts': rxtrs['CoolingInts'][i]
}
return rxtr_info
def label_data(label, data):
"""
Takes the labels for and a dataframe of the simulation results;
adds these labels as additional columns to the dataframe.
Parameters
----------
label : dict representing the labels for a simulation
data : dataframe of simulation results
Returns
-------
data : dataframe of simulation results + label entries in columns
"""
col = len(data.columns)
data.insert(loc = col, column = 'ReactorType', value = label['ReactorType'])
data.insert(loc = col+1, column = 'Enrichment', value = label['Enrichment'])
burnup = burnup_label(label['Burnup'], label['CoolingInts'])
data.insert(loc = col+2, column = 'Burnup', value = burnup)
return data
def burnup_label(burn_steps, cooling_ints):
"""
Takes the burnup steps and cooling intervals for each case within the
simulation and creates a list of the burnup of the irradiated and cooled/
decayed fuels; returns a list to be added as the burnup label to the main
dataframe.
Parameters
----------
burn_steps : list of the steps of burnup from the simulation parameters
cooling_ints : list of the cooling intervals from the simulation parameters
Returns
-------
burnup_list : list of burnups to be applied as a label for a given simulation
"""
num_cases = len(burn_steps)
steps_per_case = len(cooling_ints) + 2
burnup_list = [0, ]
for case in range(0, num_cases):
for step in range(0, steps_per_case):
if (case == 0 and step == 0):
continue
elif (case > 0 and step == 0):
burn_step = burn_steps[case-1]
burnup_list.append(burn_step)
else:
burn_step = burn_steps[case]
burnup_list.append(burn_step)
return burnup_list
def dataframeXY(all_files, rxtr_label):
""""
Takes the glob of files in a directory as well as the dict of labels and
produces a dataframe that has both the data features (X) and labeled data (Y).
Parameters
----------
all_files : list of str holding all simulation file names in a directory
rxtr_label : dict holding all parameters for all simulations in a directory
Returns
-------
dfXY : dataframe that has all features and labels for all simulations in a
directory
"""
all_data = []
for f in all_files:
data = format_df(f)
labels = get_labels(f, rxtr_label)
labeled = label_data(labels, data)
all_data.append(labeled)
dfXY = pd.concat(all_data)
##FILTERING STUFFS##
# Delete sim columns
# Need better way to know when the nuclide columns start (6 for now)
# Prob will just search for column idx that starts with str(1)?
cols = len(dfXY.columns)
dfXY = dfXY.iloc[:, 6:cols]
# Filter out 0 burnups so MAPE can be calc'd
dfXY = dfXY.loc[dfXY.Burnup > 0, :]
return dfXY
def top_nucs(dfXY, top_n):
"""
loops through the rows of a dataframe and keeps the top_n nuclides
(by concentration) from each row
Parameters
----------
dfXY : dataframe of nuclide concentrations + labels
top_n : number of nuclides to sort and filter by
Returns
-------
nuc_set : set of the top_n nucs as determined
"""
x = len(dfXY.columns)-3
dfX = dfXY.iloc[:, 0:x]
# Get a set of top n nucs from each row (instance)
nuc_set = set()
for case, conc in dfX.iterrows():
top_n_series = conc.sort_values(ascending=False)[:top_n]
nuc_list = list(top_n_series.index.values)
nuc_set.update(nuc_list)
return nuc_set
def filter_nucs(df, nuc_set, top_n):
"""
for each instance (row), keep only top 200 values, replace rest with 0
Parameters
----------
df : dataframe of nuclide concentrations
nuc_set : set of top_n nuclides
top_n : number of nuclides to sort and filter by
Returns
-------
top_n_df : dataframe that has values only for the top_n nuclides of the set
nuc_set in each row
"""
# To filter further, have to reconstruct the df into a new one
# Found success appending each row to a new df as a series
top_n_df = pd.DataFrame(columns=tuple(nuc_set))
for case, conc in df.iterrows():
top_n_series = conc.sort_values(ascending=False)[:top_n]
nucs = top_n_series.index.values
# some top values in test set aren't in nuc set, so need to delete those
del_list = list(set(nucs) - nuc_set)
top_n_series.drop(del_list, inplace=True)
filtered_row = conc.filter(items=top_n_series.index.values)
top_n_df = top_n_df.append(filtered_row)
# replace NaNs with 0, bc scikit don't take no NaN
top_n_df.fillna(value=0, inplace=True)
return top_n_df
def splitXY(dfXY):
"""
Takes a dataframe with all X (features) and Y (labels) information and
produces four different dataframes: nuclide concentrations only (with
input-related columns deleted) + 1 dataframe for each label column.
Parameters
----------
dfXY : dataframe with nuclide concentraations and 3 labels: reactor type,
enrichment, and burnup
Returns
-------
dfX : dataframe with only nuclide concentrations for each instance
r_dfY : dataframe with reactor type for each instance
e_dfY : dataframe with fuel enrichment for each instance
b_dfY : dataframe with fuel burnup for each instance
"""
x = len(dfXY.columns)-3
dfX = dfXY.iloc[:, 0:x]
r_dfY = dfXY.iloc[:, x]
e_dfY = dfXY.iloc[:, x+1]
b_dfY = dfXY.iloc[:, x+2]
return dfX, r_dfY, e_dfY, b_dfY
def main():
"""
Takes all origen files and compiles them into the appropriate dataframes for
training and testing sets. Then splits those dataframes into the appropriate
X and Ys for prediction of reactor type, fuel enrichment, and burnup.
The training set is varied by number of features included in trainX to
create a learning curve.
"""
pkl_train = 'trainXY_2nov.pkl'
pkl_test = 'testXY_2nov.pkl'
print("scrips\n")
#print("Did you check your training and testing data paths?\n")
# Training Datasets
#trainpath = "../origen/origen-data/training/9may2017/csv/"
#trainpath = "../origen-data/training/2nov2017/csv/"
#trainpath = "../origen/origen-data/training/2nov2017/csv/"
#train_files = glob.glob(os.path.join(trainpath, "*.csv"))
#trainXY = dataframeXY(train_files, train_label)
#trainXY.reset_index(inplace = True)
#pickle.dump(trainXY, open(pkl_train, 'wb'))
# Get set of top 200 nucs from training set
# The filter_nuc func repeats stuff from top_nucs but it is needed because
# the nuc_set needs to be determined from the training set for the test set
# and the training set is filtered within each loop
trainXY = pd.read_pickle(pkl_train)
top_n = 200
nuc_set = top_nucs(trainXY, top_n)
trainX, trainYr, trainYe, trainYb = splitXY(trainXY)
trainX = filter_nucs(trainX, nuc_set, top_n)
######################################################
#trainX = scale(trainX) WILL DO SCALING AFTER DATA MANIP
######################################################
train_set = LearnSet(nuc_concs = trainX, burnup = trainYb)
# Testing Dataset (for now)
#testpath = "../origen/origen-data/testing/10may2017_2/csv/"
#testpath = "../origen-data/testing/2nov2017/csv/"
#testpath = "../origen/origen-data/testing/2nov2017/csv/"
#test_files = glob.glob(os.path.join(testpath, "*.csv"))
#testXY = dataframeXY(test_files, test_label)
#testXY.reset_index(inplace = True)
#pickle.dump(testXY, open(pkl_test, 'wb'))
testXY = pd.read_pickle(pkl_test)
testX, testYr, testYe, testYb = splitXY(testXY)
testX = filter_nucs(testX, nuc_set, top_n)
######################################################
#testX = scale(testX) WILL DO SCALING AFTER DATA MANIP
######################################################
test_set = LearnSet(nuc_concs = testX, burnup = testYb)
#random_error(train_set, test_set)
#auto_train_and_predict(train_set)
manual_train_and_predict(train_set, test_set)
print("All csv files are saved in this directory!\n")
return
if __name__ == "__main__":
main()
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Sequence, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.blocks.fcn import FCN
from monai.networks.layers.factories import Act, Conv, Norm, Pool
__all__ = ["AHnet", "Ahnet", "ahnet", "AHNet"]
class Bottleneck3x3x1(nn.Module):
expansion = 4
def __init__(
self,
spatial_dims: int,
inplanes: int,
planes: int,
stride: Union[Sequence[int], int] = 1,
downsample: Optional[nn.Sequential] = None,
) -> None:
super().__init__()
conv_type = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
self.conv1 = conv_type(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_type(planes)
self.conv2 = conv_type(
planes,
planes,
kernel_size=(3, 3, 1)[-spatial_dims:],
stride=stride,
padding=(1, 1, 0)[-spatial_dims:],
bias=False,
)
self.bn2 = norm_type(planes)
self.conv3 = conv_type(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_type(planes * 4)
self.relu = relu_type(inplace=True)
self.downsample = downsample
self.stride = stride
self.pool = pool_type(kernel_size=(1, 1, 2)[-spatial_dims:], stride=(1, 1, 2)[-spatial_dims:])
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
out = self.pool(out)
out += residual
out = self.relu(out)
return out
class Projection(nn.Sequential):
def __init__(self, spatial_dims: int, num_input_features: int, num_output_features: int):
super().__init__()
conv_type = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
self.add_module("norm", norm_type(num_input_features))
self.add_module("relu", relu_type(inplace=True))
self.add_module("conv", conv_type(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
class DenseBlock(nn.Sequential):
def __init__(
self,
spatial_dims: int,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
dropout_prob: float,
):
super().__init__()
for i in range(num_layers):
layer = Pseudo3DLayer(
spatial_dims, num_input_features + i * growth_rate, growth_rate, bn_size, dropout_prob
)
self.add_module("denselayer%d" % (i + 1), layer)
class UpTransition(nn.Sequential):
def __init__(
self, spatial_dims: int, num_input_features: int, num_output_features: int, upsample_mode: str = "transpose"
):
super().__init__()
conv_type = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
self.add_module("norm", norm_type(num_input_features))
self.add_module("relu", relu_type(inplace=True))
self.add_module("conv", conv_type(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if upsample_mode == "transpose":
conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims]
self.add_module(
"up", conv_trans_type(num_output_features, num_output_features, kernel_size=2, stride=2, bias=False)
)
else:
align_corners: Optional[bool] = None
if upsample_mode in ["trilinear", "bilinear"]:
align_corners = True
self.add_module("up", nn.Upsample(scale_factor=2, mode=upsample_mode, align_corners=align_corners))
class Final(nn.Sequential):
def __init__(
self, spatial_dims: int, num_input_features: int, num_output_features: int, upsample_mode: str = "transpose"
):
super().__init__()
conv_type = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
self.add_module("norm", norm_type(num_input_features))
self.add_module("relu", relu_type(inplace=True))
self.add_module(
"conv",
conv_type(
num_input_features,
num_output_features,
kernel_size=(3, 3, 1)[-spatial_dims:],
stride=1,
padding=(1, 1, 0)[-spatial_dims:],
bias=False,
),
)
if upsample_mode == "transpose":
conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims]
self.add_module(
"up", conv_trans_type(num_output_features, num_output_features, kernel_size=2, stride=2, bias=False)
)
else:
align_corners: Optional[bool] = None
if upsample_mode in ["trilinear", "bilinear"]:
align_corners = True
self.add_module("up", nn.Upsample(scale_factor=2, mode=upsample_mode, align_corners=align_corners))
class Pseudo3DLayer(nn.Module):
def __init__(self, spatial_dims: int, num_input_features: int, growth_rate: int, bn_size: int, dropout_prob: float):
super().__init__()
# 1x1x1
conv_type = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
self.bn1 = norm_type(num_input_features)
self.relu1 = relu_type(inplace=True)
self.conv1 = conv_type(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
# 3x3x1
self.bn2 = norm_type(bn_size * growth_rate)
self.relu2 = relu_type(inplace=True)
self.conv2 = conv_type(
bn_size * growth_rate,
growth_rate,
kernel_size=(3, 3, 1)[-spatial_dims:],
stride=1,
padding=(1, 1, 0)[-spatial_dims:],
bias=False,
)
# 1x1x3
self.bn3 = norm_type(growth_rate)
self.relu3 = relu_type(inplace=True)
self.conv3 = conv_type(
growth_rate,
growth_rate,
kernel_size=(1, 1, 3)[-spatial_dims:],
stride=1,
padding=(0, 0, 1)[-spatial_dims:],
bias=False,
)
# 1x1x1
self.bn4 = norm_type(growth_rate)
self.relu4 = relu_type(inplace=True)
self.conv4 = conv_type(growth_rate, growth_rate, kernel_size=1, stride=1, bias=False)
self.dropout_prob = dropout_prob
def forward(self, x):
inx = x
x = self.bn1(x)
x = self.relu1(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu2(x)
x3x3x1 = self.conv2(x)
x = self.bn3(x3x3x1)
x = self.relu3(x)
x1x1x3 = self.conv3(x)
x = x3x3x1 + x1x1x3
x = self.bn4(x)
x = self.relu4(x)
new_features = self.conv4(x)
self.dropout_prob = 0.0 # Dropout will make trouble!
# since we use the train mode for inference
if self.dropout_prob > 0.0:
new_features = F.dropout(new_features, p=self.dropout_prob, training=self.training)
return torch.cat([inx, new_features], 1)
class PSP(nn.Module):
def __init__(self, spatial_dims: int, psp_block_num: int, in_ch: int, upsample_mode: str = "transpose"):
super().__init__()
self.up_modules = nn.ModuleList()
conv_type = Conv[Conv.CONV, spatial_dims]
pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
self.pool_modules = nn.ModuleList()
self.project_modules = nn.ModuleList()
for i in range(psp_block_num):
size = (2 ** (i + 3), 2 ** (i + 3), 1)[-spatial_dims:]
self.pool_modules.append(pool_type(kernel_size=size, stride=size))
self.project_modules.append(
conv_type(
in_ch,
1,
kernel_size=(1, 1, 1)[-spatial_dims:],
stride=1,
padding=(1, 1, 0)[-spatial_dims:],
)
)
self.spatial_dims = spatial_dims
self.psp_block_num = psp_block_num
self.upsample_mode = upsample_mode
if self.upsample_mode == "transpose":
conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims]
for i in range(psp_block_num):
size = (2 ** (i + 3), 2 ** (i + 3), 1)[-spatial_dims:]
pad_size = (2 ** (i + 3), 2 ** (i + 3), 0)[-spatial_dims:]
self.up_modules.append(
conv_trans_type(
1,
1,
kernel_size=size,
stride=size,
padding=pad_size,
)
)
def forward(self, x):
outputs = []
if self.upsample_mode == "transpose":
for (project_module, pool_module, up_module) in zip(
self.project_modules, self.pool_modules, self.up_modules
):
output = up_module(project_module(pool_module(x)))
outputs.append(output)
else:
for (project_module, pool_module) in zip(self.project_modules, self.pool_modules):
interpolate_size = x.shape[2:]
align_corners: Optional[bool] = None
if self.upsample_mode in ["trilinear", "bilinear"]:
align_corners = True
output = F.interpolate(
project_module(pool_module(x)),
size=interpolate_size,
mode=self.upsample_mode,
align_corners=align_corners,
)
outputs.append(output)
x = torch.cat(outputs, dim=1)
return x
class AHNet(nn.Module):
"""
AHNet based on `Anisotropic Hybrid Network <https://arxiv.org/pdf/1711.08580.pdf>`_.
Adapted from `lsqshr's official code <https://github.com/lsqshr/AH-Net/blob/master/net3d.py>`_.
Except from the original network that supports 3D inputs, this implementation also supports 2D inputs.
According to the `tests for deconvolutions <https://github.com/Project-MONAI/MONAI/issues/1023>`_, using
``"transpose"`` rather than linear interpolations is faster. Therefore, this implementation sets ``"transpose"``
as the default upsampling method.
To meet the requirements of the structure, the input size for each spatial dimension
(except the last one) should be: divisible by 2 ** (psp_block_num + 3) and no less than 32 in ``transpose`` mode,
and should be divisible by 32 and no less than 2 ** (psp_block_num + 3) in other upsample modes.
In addition, the input size for the last spatial dimension should be divisible by 32, and at least one spatial size
should be no less than 64.
Args:
layers: number of residual blocks for 4 layers of the network (layer1...layer4). Defaults to ``(3, 4, 6, 3)``.
spatial_dims: spatial dimension of the input data. Defaults to 3.
in_channels: number of input channels for the network. Default to 1.
out_channels: number of output channels for the network. Defaults to 1.
psp_block_num: the number of pyramid volumetric pooling modules used at the end of the network before the final
output layer for extracting multiscale features. The number should be an integer that belongs to [0,4]. Defaults
to 4.
upsample_mode: [``"transpose"``, ``"bilinear"``, ``"trilinear"``, ``nearest``]
The mode of upsampling manipulations.
Using the last two modes cannot guarantee the model's reproducibility. Defaults to ``transpose``.
- ``"transpose"``, uses transposed convolution layers.
- ``"bilinear"``, uses bilinear interpolate.
- ``"trilinear"``, uses trilinear interpolate.
- ``"nearest"``, uses nearest interpolate.
pretrained: whether to load pretrained weights from ResNet50 to initialize convolution layers, default to False.
progress: If True, displays a progress bar of the download of pretrained weights to stderr.
"""
def __init__(
self,
layers: tuple = (3, 4, 6, 3),
spatial_dims: int = 3,
in_channels: int = 1,
out_channels: int = 1,
psp_block_num: int = 4,
upsample_mode: str = "transpose",
pretrained: bool = False,
progress: bool = True,
):
self.inplanes = 64
super().__init__()
conv_type = Conv[Conv.CONV, spatial_dims]
conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims]
norm_type = Norm[Norm.BATCH, spatial_dims]
pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
relu_type: Type[nn.ReLU] = Act[Act.RELU]
conv2d_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2]
norm2d_type: Type[nn.BatchNorm2d] = Norm[Norm.BATCH, 2]
self.conv2d_type = conv2d_type
self.norm2d_type = norm2d_type
self.conv_type = conv_type
self.norm_type = norm_type
self.relu_type = relu_type
self.pool_type = pool_type
self.spatial_dims = spatial_dims
self.psp_block_num = psp_block_num
self.psp = None
if spatial_dims not in [2, 3]:
raise AssertionError("spatial_dims can only be 2 or 3.")
if psp_block_num not in [0, 1, 2, 3, 4]:
raise AssertionError("psp_block_num should be an integer that belongs to [0, 4].")
self.conv1 = conv_type(
in_channels,
64,
kernel_size=(7, 7, 3)[-spatial_dims:],
stride=(2, 2, 1)[-spatial_dims:],
padding=(3, 3, 1)[-spatial_dims:],
bias=False,
)
self.pool1 = pool_type(kernel_size=(1, 1, 2)[-spatial_dims:], stride=(1, 1, 2)[-spatial_dims:])
self.bn0 = norm_type(64)
self.relu = relu_type(inplace=True)
if upsample_mode in ["transpose", "nearest"]:
# To maintain the determinism, the value of kernel_size and stride should be the same.
# (you can check this link for reference: https://github.com/Project-MONAI/MONAI/pull/815 )
self.maxpool = pool_type(kernel_size=(2, 2, 2)[-spatial_dims:], stride=2)
else:
self.maxpool = pool_type(kernel_size=(3, 3, 3)[-spatial_dims:], stride=2, padding=1)
self.layer1 = self._make_layer(Bottleneck3x3x1, 64, layers[0], stride=1)
self.layer2 = self._make_layer(Bottleneck3x3x1, 128, layers[1], stride=2)
self.layer3 = self._make_layer(Bottleneck3x3x1, 256, layers[2], stride=2)
self.layer4 = self._make_layer(Bottleneck3x3x1, 512, layers[3], stride=2)
# Make the 3D dense decoder layers
densegrowth = 20
densebn = 4
ndenselayer = 3
num_init_features = 64
noutres1 = 256
noutres2 = 512
noutres3 = 1024
noutres4 = 2048
self.up0 = UpTransition(spatial_dims, noutres4, noutres3, upsample_mode)
self.dense0 = DenseBlock(spatial_dims, ndenselayer, noutres3, densebn, densegrowth, 0.0)
noutdense = noutres3 + ndenselayer * densegrowth
self.up1 = UpTransition(spatial_dims, noutdense, noutres2, upsample_mode)
self.dense1 = DenseBlock(spatial_dims, ndenselayer, noutres2, densebn, densegrowth, 0.0)
noutdense1 = noutres2 + ndenselayer * densegrowth
self.up2 = UpTransition(spatial_dims, noutdense1, noutres1, upsample_mode)
self.dense2 = DenseBlock(spatial_dims, ndenselayer, noutres1, densebn, densegrowth, 0.0)
noutdense2 = noutres1 + ndenselayer * densegrowth
self.trans1 = Projection(spatial_dims, noutdense2, num_init_features)
self.dense3 = DenseBlock(spatial_dims, ndenselayer, num_init_features, densebn, densegrowth, 0.0)
noutdense3 = num_init_features + densegrowth * ndenselayer
self.up3 = UpTransition(spatial_dims, noutdense3, num_init_features, upsample_mode)
self.dense4 = DenseBlock(spatial_dims, ndenselayer, num_init_features, densebn, densegrowth, 0.0)
noutdense4 = num_init_features + densegrowth * ndenselayer
self.psp = PSP(spatial_dims, psp_block_num, noutdense4, upsample_mode)
self.final = Final(spatial_dims, psp_block_num + noutdense4, out_channels, upsample_mode)
# Initialise parameters
for m in self.modules():
if isinstance(m, (conv_type, conv_trans_type)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, norm_type):
m.weight.data.fill_(1)
m.bias.data.zero_()
if pretrained:
net2d = FCN(pretrained=True, progress=progress)
self.copy_from(net2d)
def _make_layer(
self,
block: Type[Bottleneck3x3x1],
planes: int,
blocks: int,
stride: int = 1,
) -> nn.Sequential:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
self.conv_type(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=(stride, stride, 1)[: self.spatial_dims],
bias=False,
),
self.pool_type(
kernel_size=(1, 1, stride)[: self.spatial_dims], stride=(1, 1, stride)[: self.spatial_dims]
),
self.norm_type(planes * block.expansion),
)
layers = []
layers.append(
block(self.spatial_dims, self.inplanes, planes, (stride, stride, 1)[: self.spatial_dims], downsample)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.spatial_dims, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.bn0(x)
x = self.relu(x)
conv_x = x
x = self.maxpool(x)
pool_x = x
fm1 = self.layer1(x)
fm2 = self.layer2(fm1)
fm3 = self.layer3(fm2)
fm4 = self.layer4(fm3)
sum0 = self.up0(fm4) + fm3
d0 = self.dense0(sum0)
sum1 = self.up1(d0) + fm2
d1 = self.dense1(sum1)
sum2 = self.up2(d1) + fm1
d2 = self.dense2(sum2)
sum3 = self.trans1(d2) + pool_x
d3 = self.dense3(sum3)
sum4 = self.up3(d3) + conv_x
d4 = self.dense4(sum4)
if self.psp_block_num > 0:
psp = self.psp(d4)
x = torch.cat((psp, d4), dim=1)
else:
x = d4
return self.final(x)
def copy_from(self, net):
# This method only supports for 3D AHNet, the input channel should be 1.
p2d, p3d = next(net.conv1.parameters()), next(self.conv1.parameters())
# From 64x3x7x7 -> 64x3x7x7x1 -> 64x1x7x7x3
weights = p2d.data.unsqueeze(dim=4).permute(0, 4, 2, 3, 1).clone()
p3d.data = weights.repeat([1, p3d.shape[1], 1, 1, 1])
# Copy the initial module BN0
copy_bn_param(net.bn0, self.bn0)
# Copy layer1 to layer4
for i in range(1, 5):
layer_num = "layer" + str(i)
layer_2d = []
layer_3d = []
for m1 in vars(net)["_modules"][layer_num].modules():
if isinstance(m1, (self.norm2d_type, self.conv2d_type)):
layer_2d.append(m1)
for m2 in vars(self)["_modules"][layer_num].modules():
if isinstance(m2, (self.norm_type, self.conv_type)):
layer_3d.append(m2)
for m1, m2 in zip(layer_2d, layer_3d):
if isinstance(m1, self.conv2d_type):
copy_conv_param(m1, m2)
if isinstance(m1, self.norm2d_type):
copy_bn_param(m1, m2)
def copy_conv_param(module2d, module3d):
for p2d, p3d in zip(module2d.parameters(), module3d.parameters()):
p3d.data[:] = p2d.data.unsqueeze(dim=4).clone()[:]
def copy_bn_param(module2d, module3d):
for p2d, p3d in zip(module2d.parameters(), module3d.parameters()):
p3d.data[:] = p2d.data[:] # Two parameter gamma and beta
AHnet = Ahnet = ahnet = AHNet
|
from .http import EasydbClient
from .domain import SpaceDoesNotExistException, BucketDoesNotExistException, ElementDoesNotExistException, \
TransactionDoesNotExistException, MultipleElementFields, ElementField, Element, FilterQuery, \
PaginatedElements, TransactionOperation, OperationResult, Element, UnknownOperationException, \
BucketAlreadyExistsException |
# terrascript/data/stackpath.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:27:49 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.stackpath
#
# instead of
#
# >>> import terrascript.data.stackpath.stackpath
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.stackpath.stackpath import *
|
from ._GetBool import *
from ._GetMotorsHeadingOffset import *
from ._GetPOI import *
from ._GetPTZ import *
from ._InsertTask import *
from ._QueryAlarms import *
from ._ResetFromSubState import *
from ._SetBuzzer import *
from ._SetByte import *
from ._SetElevator import *
from ._SetEncoderTurns import *
from ._SetLaserMode import *
from ._SetMotorMode import *
from ._SetMotorPID import *
from ._SetMotorStatus import *
from ._SetNamedDigitalOutput import *
from ._SetString import *
from ._SetTransform import *
from ._ack_alarm import *
from ._axis_record import *
from ._enable_disable import *
from ._get_alarms import *
from ._get_digital_input import *
from ._get_modbus_register import *
from ._get_mode import *
from ._home import *
from ._set_CartesianEuler_pose import *
from ._set_analog_output import *
from ._set_digital_output import *
from ._set_float_value import *
from ._set_height import *
from ._set_modbus_register import *
from ._set_mode import *
from ._set_named_digital_output import *
from ._set_odometry import *
from ._set_ptz import *
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Disentangled Sequential Variational Autoencoder.
This file provides a partial reimplementation of the Disentangled
Sequential Autoencoder paper [(Li and Mandt, 2018)][1]. More
specifically, this script implements the disentangled sequential
variational autoencoder model defined in section 2 of the paper, and
trains and performs the qualitative analysis on the "sprites" dataset
according to section 4.1. This script does not perform the quantitative
analysis of section 4.1, or replicate the findings on the other two
datasets found in sections 4.2 or 4.3.
The disentangled sequential variational autoencoder posits a generative
model in which a static, time-invariant latent variable `f` is sampled
from a prior `p(f)`, a dynamic, time-variant latent variable `z_t` at
timestep `t` is sampled from a conditional distribution
`p(z_t | z_{<t})`, and an observation `x_t` is generated by a
probabilistic decoder `p(x_t | z_t, f)`. The full generative model is
defined as
```none
p(x_{1:T}, z_{1:T}, f) = p(f) prod_{t=1}^T p(z_t | z_{<t}) p(x_t | z_t, f).
```
We then posit an approximate posterior over the latent variables in the
form of a probabilistic encoder `q(z_{1:T}, f | x_{1:T})`. Paired with
the probabilistic decoder, we can form a sequential variational
autoencoder model. Variational inference can be used to fit the model by
decomposing the log marginal distribution `log p(x_{1:T})` into the
evidence lower bound (ELBO) and the KL divergence between the true and
approximate posteriors over the latent variables
```none
log p(x) = -KL[q(z_{1:T},f|x_{1:T}) || p(x_{1:T},z_{1:T},f)]
+ KL[q(z_{1:T},f|x_{1:T}) || p(z_{1:T},f|x_{1:T})]
= ELBO + KL[q(z_{1:T},f|x_{1:T}) || p(z_{1:T},f|x_{1:T})]
>= ELBO # Jensen's inequality for KL divergence.
>= int int q(z_{1:T},f|x_{1:T}) [
log p(x_{1:T},z_{1:T},f) - log q(z_{1:T},f|x_{1:T}) ] dz_{1:T} df.
```
We then maximize the ELBO with respect to the model's parameters.
The approximate posterior `q(z_{1:T}, f | x_{1:T})` can be formulated in
two ways. The first formulation is a distribution that factorizes
across timesteps,
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t),
```
where `q(f | x_{1:T})` is a multivariate Gaussian parameterized by a
bidirectional LSTM-based model, and `q(z_t | x_t)` is a multivariate
Gaussian parameterized by a convolutional model. This is known as the
"factorized" `q` distribution.
The second formulation is a distribution
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) q(z_{1:T} | f, x_{1:T}),
```
where `q(z_{1:T} | f, x_{1:T})` is a multivariate Gaussian parameterized
by a model consisting of a bidirectional LSTM followed by a basic RNN,
and `q(f | x_{1:T})` is the same as previously described. This is known
as the "full" `q` distribution.
When this script is executed, it will produce model checkpoints for the
five most recent log steps, and log all intermediate results to
TensorBoard logs in the `logdir` directory. If `model_dir` points to an
existing directory of checkpoints, then the most recent one will be
restored. The intermediate results include the ELBO and image summaries
for reconstruction and generation corresponding to the qualitative
analysis in the paper. Optional debug logging also produces summaries
for gradient norms, log probabilities, and histograms for all parameter
tensors. With the current defaults, this script runs in 5 hours, 55
minutes on a single V100 GPU, and produces a model with an ELBO of
-9.044e+4.
#### References
[1]: Yingzhen Li and Stephan Mandt. Disentangled Sequential Autoencoder.
In _International Conference on Machine Learning_, 2018.
https://arxiv.org/abs/1803.02991
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import functools
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.examples import sprites_dataset
from tensorflow.contrib import checkpoint as contrib_checkpoint
tfd = tfp.distributions
flags.DEFINE_integer(
"batch_size",
default=32,
help="Batch size during training.")
flags.DEFINE_float(
"clip_norm",
default=1e10,
help="Threshold for global norm gradient clipping.")
flags.DEFINE_boolean(
"enable_debug_logging",
default=None,
help="Whether or not to include extra TensorBoard logging for debugging.")
flags.DEFINE_boolean(
"fake_data",
default=None,
help="Whether or not to train with synthetic data.")
flags.DEFINE_integer(
"hidden_size",
default=512,
help="Dimensionality of the model intermediates.")
flags.DEFINE_enum(
"latent_posterior",
default="factorized",
enum_values=["factorized", "full"],
help="The formulation for the latent posterior `q`.")
flags.DEFINE_integer(
"latent_size_dynamic",
default=32,
help="Dimensionality of each dynamic, time-variant latent variable `z_t`.")
flags.DEFINE_integer(
"latent_size_static",
default=256,
help="Dimensionality of the static, time-invariant latent variable `f`.")
flags.DEFINE_float(
"learning_rate",
default=0.0001,
help="Learning rate during training.")
flags.DEFINE_string(
"logdir", # `log_dir` is already defined by absl
default="/tmp/disentangled_vae/logs/{timestamp}",
help="Directory in which to write TensorBoard logs.")
flags.DEFINE_integer(
"log_steps",
default=100,
help="Frequency, in steps, of TensorBoard logging.")
flags.DEFINE_integer(
"max_steps",
default=10000,
help="Number of steps over which to train.")
flags.DEFINE_string(
"model_dir",
default="/tmp/disentangled_vae/models/{timestamp}",
help="Directory in which to save model checkpoints.")
flags.DEFINE_integer(
"num_reconstruction_samples",
default=1,
help="Number of samples to use during reconstruction evaluation.")
flags.DEFINE_integer(
"num_samples",
default=4,
help="Number of samples to use during training.")
flags.DEFINE_integer(
"seed",
default=42,
help="Random seed.")
FLAGS = flags.FLAGS
class LearnableMultivariateNormalDiag(tf.keras.Model):
"""Learnable multivariate diagonal normal distribution.
The model is a multivariate normal distribution with learnable
`mean` and `stddev` parameters.
"""
def __init__(self, dimensions):
"""Constructs a learnable multivariate diagonal normal model.
Args:
dimensions: An integer corresponding to the dimensionality of the
distribution.
"""
super(LearnableMultivariateNormalDiag, self).__init__()
with tf.compat.v1.name_scope(self._name):
self.dimensions = dimensions
self._mean = tf.compat.v2.Variable(
tf.random.normal([dimensions], stddev=0.1), name="mean")
# Initialize the std dev such that it will be close to 1 after a softplus
# function.
self._untransformed_stddev = tf.compat.v2.Variable(
tf.random.normal([dimensions], mean=0.55, stddev=0.1),
name="untransformed_stddev")
def __call__(self, *args, **kwargs):
# Allow this Model to be called without inputs.
dummy = tf.zeros(self.dimensions)
return super(LearnableMultivariateNormalDiag, self).__call__(
dummy, *args, **kwargs)
def call(self, inputs):
"""Runs the model to generate multivariate normal distribution.
Args:
inputs: Unused.
Returns:
A MultivariateNormalDiag distribution with event shape
[dimensions], batch shape [], and sample shape [sample_shape,
dimensions].
"""
del inputs # unused
with tf.compat.v1.name_scope(self._name):
return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)
@property
def loc(self):
"""The mean of the normal distribution."""
return self._mean
@property
def scale_diag(self):
"""The diagonal standard deviation of the normal distribution."""
return tf.nn.softplus(self._untransformed_stddev) + 1e-5 # keep > 0
class LearnableMultivariateNormalDiagCell(tf.keras.Model):
"""Multivariate diagonal normal distribution RNN cell.
The model is an LSTM-based recurrent function that computes the
parameters for a multivariate normal distribution at each timestep
`t`.
"""
def __init__(self, dimensions, hidden_size):
"""Constructs a learnable multivariate diagonal normal cell.
Args:
dimensions: An integer corresponding to the dimensionality of the
distribution.
hidden_size: Dimensionality of the LSTM function parameters.
"""
super(LearnableMultivariateNormalDiagCell, self).__init__()
self.dimensions = dimensions
self.hidden_size = hidden_size
self.lstm_cell = tf.keras.layers.LSTMCell(hidden_size)
self.output_layer = tf.keras.layers.Dense(2*dimensions)
def zero_state(self, sample_batch_shape=()):
"""Returns an initial state for the LSTM cell.
Args:
sample_batch_shape: A 0D or 1D tensor of the combined sample and
batch shape.
Returns:
A tuple of the initial previous output at timestep 0 of shape
[sample_batch_shape, dimensions], and the cell state.
"""
h0 = tf.zeros([1, self.hidden_size])
c0 = tf.zeros([1, self.hidden_size])
combined_shape = tf.concat((tf.convert_to_tensor(
value=sample_batch_shape, dtype=tf.int32), [self.dimensions]),
axis=-1)
previous_output = tf.zeros(combined_shape)
return previous_output, (h0, c0)
def call(self, inputs, state):
"""Runs the model to generate a distribution for a single timestep.
This generates a batched MultivariateNormalDiag distribution using
the output of the recurrent model at the current timestep to
parameterize the distribution.
Args:
inputs: The sampled value of `z` at the previous timestep, i.e.,
`z_{t-1}`, of shape [..., dimensions].
`z_0` should be set to the empty matrix.
state: A tuple containing the (hidden, cell) state.
Returns:
A tuple of a MultivariateNormalDiag distribution, and the state of
the recurrent function at the end of the current timestep. The
distribution will have event shape [dimensions], batch shape
[...], and sample shape [sample_shape, ..., dimensions].
"""
# In order to allow the user to pass in a single example without a batch
# dimension, we always expand the input to at least two dimensions, then
# fix the output shape to remove the batch dimension if necessary.
original_shape = inputs.shape
if len(original_shape) < 2:
inputs = tf.reshape(inputs, [1, -1])
out, state = self.lstm_cell(inputs, state)
out = self.output_layer(out)
correct_shape = tf.concat((original_shape[:-1], tf.shape(input=out)[-1:]),
0)
out = tf.reshape(out, correct_shape)
loc = out[..., :self.dimensions]
scale_diag = tf.nn.softplus(out[..., self.dimensions:]) + 1e-5 # keep > 0
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag), state
class Decoder(tf.keras.Model):
"""Probabilistic decoder for `p(x_t | z_t, f)`.
The decoder generates a sequence of image frames `x_{1:T}` from
dynamic and static latent variables `z_{1:T}` and `f`, respectively,
for timesteps `1:T`.
"""
def __init__(self, hidden_size, channels=3):
"""Constructs a probabilistic decoder.
For each timestep, this model takes as input a concatenation of the
dynamic and static latent variables `z_t` and `f`, respectively,
outputs an intermediate representation via an affine function (i.e.,
a one hidden layer MLP), then transforms this with four transpose
convolution layers and up-sampling to the spatial shape of `x_t`.
Together with the priors, this allows us to specify a generative
model
```none
p(x_{1:T}, z_{1:T}, f) = p(f) prod_{t=1}^T p(z_t | z_{<t}) p(x_t | z_t, f).
```
Args:
hidden_size: Dimensionality of the intermediate representations.
channels: The depth of the output tensor.
"""
super(Decoder, self).__init__()
self.hidden_size = hidden_size
activation = tf.nn.leaky_relu
self.dense = tf.keras.layers.Dense(hidden_size, activation=activation)
# Spatial sizes: (1,1) -> (8,8) -> (16,16) -> (32,32) -> (64,64).
conv_transpose = functools.partial(
tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation)
self.conv_transpose1 = conv_transpose(256, 8, 1, padding="VALID")
self.conv_transpose2 = conv_transpose(256, 3, 2)
self.conv_transpose3 = conv_transpose(256, 3, 2)
self.conv_transpose4 = conv_transpose(channels, 3, 2, activation=None)
def call(self, inputs):
"""Runs the model to generate a distribution p(x_t | z_t, f).
Args:
inputs: A tuple of (z_{1:T}, f), where `z_{1:T}` is a tensor of
shape [..., batch_size, timesteps, latent_size_dynamic], and `f`
is of shape [..., batch_size, latent_size_static].
Returns:
A batched Independent distribution wrapping a set of Normal
distributions over the pixels of x_t, where the Independent
distribution has event shape [height, width, channels], batch
shape [batch_size, timesteps], and sample shape [sample_shape,
batch_size, timesteps, height, width, channels].
"""
# We explicitly broadcast f to the same shape as z other than the final
# dimension, because `tf.concat` can't automatically do this.
dynamic, static = inputs
timesteps = tf.shape(input=dynamic)[-2]
static = static[..., tf.newaxis, :] + tf.zeros([timesteps, 1])
latents = tf.concat([dynamic, static], axis=-1) # (sample, N, T, latents)
out = self.dense(latents)
out = tf.reshape(out, (-1, 1, 1, self.hidden_size))
out = self.conv_transpose1(out)
out = self.conv_transpose2(out)
out = self.conv_transpose3(out)
out = self.conv_transpose4(out) # (sample*N*T, h, w, c)
expanded_shape = tf.concat(
(tf.shape(input=latents)[:-1], tf.shape(input=out)[1:]), axis=0)
out = tf.reshape(out, expanded_shape) # (sample, N, T, h, w, c)
return tfd.Independent(
distribution=tfd.Normal(loc=out, scale=1.),
reinterpreted_batch_ndims=3, # wrap (h, w, c)
name="decoded_image")
class Compressor(tf.keras.Model):
"""Feature extractor.
This convolutional model aims to extract features corresponding to a
sequence of image frames for use in downstream probabilistic encoders.
The architecture is symmetric to that of the convolutional decoder.
"""
def __init__(self, hidden_size):
"""Constructs a convolutional compressor.
This model takes as input `x_{1:T}` and outputs an intermediate
representation for use in downstream probabilistic encoders.
Args:
hidden_size: Dimensionality of the intermediate representations.
"""
super(Compressor, self).__init__()
self.hidden_size = hidden_size
# Spatial sizes: (64,64) -> (32,32) -> (16,16) -> (8,8) -> (1,1).
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=tf.nn.leaky_relu)
self.conv1 = conv(256, 3, 2)
self.conv2 = conv(256, 3, 2)
self.conv3 = conv(256, 3, 2)
self.conv4 = conv(hidden_size, 8, padding="VALID")
def call(self, inputs):
"""Runs the model to generate an intermediate representation of x_t.
Args:
inputs: A batch of image sequences `x_{1:T}` of shape
`[sample_shape, batch_size, timesteps, height, width,
channels]`.
Returns:
A batch of intermediate representations of shape [sample_shape,
batch_size, timesteps, hidden_size].
"""
image_shape = tf.shape(input=inputs)[-3:]
collapsed_shape = tf.concat(([-1], image_shape), axis=0)
out = tf.reshape(inputs, collapsed_shape) # (sample*batch*T, h, w, c)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
expanded_shape = tf.concat((tf.shape(input=inputs)[:-3], [-1]), axis=0)
return tf.reshape(out, expanded_shape) # (sample, batch, T, hidden)
class EncoderStatic(tf.keras.Model):
"""Probabilistic encoder for the time-invariant latent variable `f`.
The conditional distribution `q(f | x_{1:T})` is a multivariate
normal distribution on `R^{latent_size}` at each timestep `t`,
conditioned on intermediate representations of `x_{1:T}` from the
convolutional encoder. The parameters are computed by passing the
inputs through a bidirectional LSTM function, then passing the final
output to an affine function to yield normal parameters for
`q(f | x_{1:T})`.
Together with the EncoderDynamicFactorized class, we can formulate the
factorized approximate latent posterior `q` inference ("encoder")
model as
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t).
```
Together with the EncoderDynamicFull class, we can formulate the full
approximate latent posterior `q` inference ("encoder") model as
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) q(z_{1:T} | f, x_{1:T}).
```
"""
def __init__(self, latent_size, hidden_size):
"""Constructs an encoder for `f`.
Args:
latent_size: An integer corresponding to the dimensionality of the
distribution.
hidden_size: Dimensionality of the LSTM, RNN, and affine function
parameters.
"""
super(EncoderStatic, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_size),
merge_mode="sum")
self.output_layer = tf.keras.layers.Dense(2*latent_size)
def call(self, inputs):
"""Runs the model to generate a distribution `q(f | x_{1:T})`.
This generates a list of batched MultivariateNormalDiag
distributions using the output of the recurrent model at each
timestep to parameterize each distribution.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
Returns:
A batched MultivariateNormalDiag distribution with event shape
[latent_size], batch shape [..., batch_size], and sample shape
[sample_shape, ..., batch_size, latent_size].
"""
# TODO(dusenberrymw): Remove these reshaping commands after b/113126249 is
# fixed.
collapsed_shape = tf.concat(([-1], tf.shape(input=inputs)[-2:]), axis=0)
out = tf.reshape(inputs, collapsed_shape) # (sample*batch_size, T, hidden)
out = self.bilstm(out) # (sample*batch_size, hidden)
expanded_shape = tf.concat((tf.shape(input=inputs)[:-2], [-1]), axis=0)
out = tf.reshape(out, expanded_shape) # (sample, batch_size, hidden)
out = self.output_layer(out) # (sample, batch_size, 2*latent_size)
loc = out[..., :self.latent_size]
scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 # keep > 0
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
class EncoderDynamicFactorized(tf.keras.Model):
"""Probabilistic encoder for the time-variant latent variable `z_t`.
The conditional distribution `q(z_t | x_t)` is a multivariate normal
distribution on `R^{latent_size}` at each timestep `t`, conditioned on
an intermediate representation of `x_t` from the convolutional
encoder. The parameters are computed by a one-hidden layer neural
net.
In this formulation, we posit that the dynamic latent variable `z_t`
is independent of static latent variable `f`.
Together with the EncoderStatic class, we can formulate the factorized
approximate latent posterior `q` inference ("encoder") model as
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t).
```
"""
def __init__(self, latent_size, hidden_size):
"""Constructs a "factorized" encoder for `z_t`.
Args:
latent_size: An integer corresponding to the
dimensionality of the distribution.
hidden_size: Dimensionality of the affine function parameters.
"""
super(EncoderDynamicFactorized, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)
self.output_layer = tf.keras.layers.Dense(2*latent_size)
def call(self, inputs):
"""Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
Returns:
A batch of MultivariateNormalDiag distributions with event shape
[latent_size], batch shape [..., batch_size, timesteps], and
sample shape [sample_shape, ..., batch_size, timesteps,
latent_size].
"""
out = self.dense(inputs) # (..., batch, time, hidden)
out = self.output_layer(out) # (..., batch, time, 2*latent)
loc = out[..., :self.latent_size]
scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 # keep > 0
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
class EncoderDynamicFull(tf.keras.Model):
"""Probabilistic encoder for the time-variant latent variable `z_t`.
The conditional distribution `q(z_{1:T} | x_{1:T}, f)` is a
multivariate normal distribution on `R^{latent_size}` at each timestep
`t`, conditioned on both an intermediate representation of the inputs
`x_t` from the convolutional encoder, and on a sample of the static
latent variable `f` at each timestep. The parameters are computed by
passing the inputs through a bidirectional LSTM function, then passing
these intermediates through an RNN function and an affine function to
yield normal parameters for `q(z_t | x_{1:T}, f)`.
In this formulation, we posit that `z_t` is conditionally dependent on
`f`.
Together with the EncoderStatic class, we can formulate the full
approximate later posterior `q` inference ("encoder") model as
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) q(z_{1:T} | f, x_{1:T}).
```
"""
def __init__(self, latent_size, hidden_size):
"""Constructs a "full" encoder for `z_t`.
Args:
latent_size: An integer corresponding to the
dimensionality of the distribution.
hidden_size: Dimensionality of the LSTM, RNN, and affine function
parameters.
"""
super(EncoderDynamicFull, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_size, return_sequences=True),
merge_mode="sum")
self.rnn = tf.keras.layers.SimpleRNN(hidden_size, return_sequences=True)
self.output_layer = tf.keras.layers.Dense(2*latent_size)
def call(self, inputs):
"""Runs the model to generate a distribution `q(z_{1:T} | x_{1:T}, f)`.
This generates a list of batched MultivariateNormalDiag
distributions using the output of the recurrent model at each
timestep to parameterize each distribution.
Args:
inputs: A tuple of a batch of intermediate representations of
image frames across all timesteps of shape [..., batch_size,
timesteps, dimensions], and a sample of the static latent
variable `f` of shape [..., batch_size, latent_size].
Returns:
A batch of MultivariateNormalDiag distributions with event shape
[latent_size], batch shape [broadcasted_shape, batch_size,
timesteps], and sample shape [sample_shape, broadcasted_shape,
batch_size, timesteps, latent_size], where `broadcasted_shape` is
the broadcasted sampled shape between the inputs and static
sample.
"""
# We explicitly broadcast `x` and `f` to the same shape other than the final
# dimension, because `tf.concat` can't automatically do this. This will
# entail adding a `timesteps` dimension to `f` to give the shape `(...,
# batch, timesteps, latent)`, and then broadcasting the sample shapes of
# both tensors to the same shape.
features, static_sample = inputs
length = tf.shape(input=features)[-2]
static_sample = static_sample[..., tf.newaxis, :] + tf.zeros([length, 1])
sample_shape_static = tf.shape(input=static_sample)[:-3]
sample_shape_inputs = tf.shape(input=features)[:-3]
broadcast_shape_inputs = tf.concat((sample_shape_static, [1, 1, 1]), 0)
broadcast_shape_static = tf.concat((sample_shape_inputs, [1, 1, 1]), 0)
features = features + tf.zeros(broadcast_shape_inputs)
static_sample = static_sample + tf.zeros(broadcast_shape_static)
# `combined` will have shape (..., batch, T, hidden+latent).
combined = tf.concat((features, static_sample), axis=-1)
# TODO(dusenberrymw): Remove these reshaping commands after b/113126249 is
# fixed.
collapsed_shape = tf.concat(([-1], tf.shape(input=combined)[-2:]), axis=0)
out = tf.reshape(combined, collapsed_shape)
out = self.bilstm(out) # (sample*batch, T, hidden_size)
out = self.rnn(out) # (sample*batch, T, hidden_size)
expanded_shape = tf.concat(
(tf.shape(input=combined)[:-2], tf.shape(input=out)[1:]), axis=0)
out = tf.reshape(out, expanded_shape) # (sample, batch, T, hidden_size)
out = self.output_layer(out) # (sample, batch, T, 2*latent_size)
loc = out[..., :self.latent_size]
scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 # keep > 0
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
class DisentangledSequentialVAE(tf.keras.Model):
"""Disentangled Sequential Variational Autoencoder.
The disentangled sequential variational autoencoder posits a generative
model in which a static, time-invariant latent variable `f` is sampled
from a prior `p(f)`, a dynamic, time-variant latent variable `z_t` at
timestep `t` is sampled from a conditional distribution
`p(z_t | z_{<t})`, and an observation `x_t` is generated by a
probabilistic decoder `p(x_t | z_t, f)`. The full generative model is
defined as
```none
p(x_{1:T}, z_{1:T}, f) = p(f) prod_{t=1}^T p(z_t | z_{<t}) p(x_t | z_t, f).
```
We then posit an approximate posterior over the latent variables in the
form of a probabilistic encoder `q(z_{1:T}, f | x_{1:T})`. Paired with
the probabilistic decoder, we can form a sequential variational
autoencoder model. Variational inference can be used to fit the model by
decomposing the log marginal distribution `log p(x_{1:T})` into the
evidence lower bound (ELBO) and the KL divergence between the true and
approximate posteriors over the latent variables
```none
log p(x) = -KL[q(z_{1:T},f|x_{1:T}) || p(x_{1:T},z_{1:T},f)]
+ KL[q(z_{1:T},f|x_{1:T}) || p(z_{1:T},f|x_{1:T})]
= ELBO + KL[q(z_{1:T},f|x_{1:T}) || p(z_{1:T},f|x_{1:T})]
>= ELBO # Jensen's inequality for KL divergence.
>= int int q(z_{1:T},f|x_{1:T}) [
log p(x_{1:T},z_{1:T},f) - log q(z_{1:T},f|x_{1:T}) ] dz_{1:T} df.
```
We then maximize the ELBO with respect to the model's parameters.
The approximate posterior `q(z_{1:T}, f | x_{1:T})` can be formulated in
two ways. The first formulation is a distribution that factorizes
across timesteps,
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t),
```
where `q(f | x_{1:T})` is a multivariate Gaussian parameterized by a
bidirectional LSTM-based model, and `q(z_t | x_t)` is a multivariate
Gaussian parameterized by a convolutional model. This is known as the
"factorized" `q` distribution.
The second formulation is a distribution
```none
q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) q(z_{1:T} | f, x_{1:T}),
```
where `q(z_{1:T} | f, x_{1:T})` is a multivariate Gaussian parameterized
by a model consisting of a bidirectional LSTM followed by a basic RNN,
and `q(f | x_{1:T})` is the same as previously described. This is known
as the "full" `q` distribution.
"""
def __init__(self, latent_size_static, latent_size_dynamic,
hidden_size, channels, latent_posterior):
"""Constructs a Disentangled Sequential Variational Autoencoder.
Args:
latent_size_static: Integer dimensionality of the static,
time-invariant latent variable `f`.
latent_size_dynamic: Integer dimensionality of each dynamic,
time-variant latent variable `z_t`.
hidden_size: Integer dimensionality of the model intermediates.
channels: Integer depth of the output of the decoder.
latent_posterior: Either "factorized" or "full" to indicate the
formulation for the latent posterior `q`.
"""
super(DisentangledSequentialVAE, self).__init__()
self.latent_size_static = latent_size_static
self.latent_size_dynamic = latent_size_dynamic
self.hidden_size = hidden_size
self.channels = channels
self.latent_posterior = latent_posterior
self.static_prior = LearnableMultivariateNormalDiag(latent_size_static)
self.dynamic_prior = LearnableMultivariateNormalDiagCell(
latent_size_dynamic, hidden_size)
self.decoder = Decoder(hidden_size, channels)
self.compressor = Compressor(hidden_size)
self.static_encoder = EncoderStatic(latent_size_static, hidden_size)
if latent_posterior == "factorized":
self.dynamic_encoder = EncoderDynamicFactorized(
latent_size_dynamic, hidden_size)
else:
self.dynamic_encoder = EncoderDynamicFull(
latent_size_dynamic, hidden_size)
def generate(self, batch_size, length, samples=1, fix_static=False,
fix_dynamic=False):
"""Generate new sequences.
Args:
batch_size: Number of sequences to generate.
length: Number of timesteps to generate for each sequence.
samples: Number of samples to draw from the latent distributions.
fix_static: Boolean for whether or not to share the same random
sample of the static latent variable `f` from its prior across
all examples.
fix_dynamic: Boolean for whether or not to share the same random
sample of the dynamic latent variable `z_{1:T}` from its prior
across all examples.
Returns:
A batched Independent distribution wrapping a set of Normal
distributions over the pixels of the generated sequences, where
the Independent distribution has event shape [height, width,
channels], batch shape [samples, batch_size, timesteps], and
sample shape [sample_shape, samples, batch_size, timesteps,
height, width, channels].
"""
static_sample, _ = self.sample_static_prior(samples, batch_size, fix_static)
dynamic_sample, _ = self.sample_dynamic_prior(samples, batch_size, length,
fix_dynamic)
likelihood = self.decoder((dynamic_sample, static_sample))
return likelihood
def reconstruct(self, inputs, samples=1, sample_static=False,
sample_dynamic=False, swap_static=False, swap_dynamic=False,
fix_static=False, fix_dynamic=False):
"""Reconstruct the given input sequences.
Args:
inputs: A batch of image sequences `x_{1:T}` of shape
`[batch_size, timesteps, height, width, channels]`.
samples: Number of samples to draw from the latent distributions.
sample_static: Boolean for whether or not to randomly sample the
static latent variable `f` from its prior distribution.
sample_dynamic: Boolean for whether or not to randomly sample the
dynamic latent variable `z_{1:T}` from its prior distribution.
swap_static: Boolean for whether or not to swap the encodings for
the static latent variable `f` between the examples.
swap_dynamic: Boolean for whether or not to swap the encodings for
the dynamic latent variable `z_{1:T}` between the examples.
fix_static: Boolean for whether or not to share the same random
sample of the static latent variable `f` from its prior across
all examples.
fix_dynamic: Boolean for whether or not to share the same random
sample of the dynamic latent variable `z_{1:T}` from its prior
across all examples.
Returns:
A batched Independent distribution wrapping a set of Normal
distributions over the pixels of the reconstruction of the input,
where the Independent distribution has event shape [height, width,
channels], batch shape [samples, batch_size, timesteps], and
sample shape [sample_shape, samples, batch_size, timesteps,
height, width, channels].
"""
batch_size = tf.shape(input=inputs)[-5]
length = len(tf.unstack(inputs, axis=-4)) # hack for graph mode
features = self.compressor(inputs) # (..., batch, timesteps, hidden)
if sample_static:
static_sample, _ = self.sample_static_prior(
samples, batch_size, fix_static)
else:
static_sample, _ = self.sample_static_posterior(features, samples)
if swap_static:
static_sample = tf.reverse(static_sample, axis=[1])
if sample_dynamic:
dynamic_sample, _ = self.sample_dynamic_prior(
samples, batch_size, length, fix_dynamic)
else:
dynamic_sample, _ = self.sample_dynamic_posterior(
features, samples, static_sample)
if swap_dynamic:
dynamic_sample = tf.reverse(dynamic_sample, axis=[1])
likelihood = self.decoder((dynamic_sample, static_sample))
return likelihood
def sample_static_prior(self, samples, batch_size, fixed=False):
"""Sample the static latent prior.
Args:
samples: Number of samples to draw from the latent distribution.
batch_size: Number of sequences to sample.
fixed: Boolean for whether or not to share the same random
sample across all sequences.
Returns:
A tuple of a sample tensor of shape [samples, batch_size,
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [].
"""
dist = self.static_prior()
if fixed: # in either case, shape is (samples, batch, latent)
sample = dist.sample((samples, 1)) + tf.zeros([batch_size, 1])
else:
sample = dist.sample((samples, batch_size))
return sample, dist
def sample_static_posterior(self, inputs, samples):
"""Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
Returns:
A tuple of a sample tensor of shape [samples, batch_size,
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [..., batch_size].
"""
dist = self.static_encoder(inputs)
sample = dist.sample(samples)
return sample, dist
def sample_dynamic_prior(self, samples, batch_size, length, fixed=False):
"""Sample the dynamic latent prior.
Args:
samples: Number of samples to draw from the latent distribution.
batch_size: Number of sequences to sample.
length: Number of timesteps to sample for each sequence.
fixed: Boolean for whether or not to share the same random
sample across all sequences.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [samples, 1, length] if fixed or [samples, batch_size,
length] otherwise.
"""
if fixed:
sample_batch_size = 1
else:
sample_batch_size = batch_size
sample, state = self.dynamic_prior.zero_state([samples, sample_batch_size])
locs = []
scale_diags = []
sample_list = []
for _ in range(length):
dist, state = self.dynamic_prior(sample, state)
sample = dist.sample()
locs.append(dist.parameters["loc"])
scale_diags.append(dist.parameters["scale_diag"])
sample_list.append(sample)
sample = tf.stack(sample_list, axis=2)
loc = tf.stack(locs, axis=2)
scale_diag = tf.stack(scale_diags, axis=2)
if fixed: # tile along the batch axis
sample = sample + tf.zeros([batch_size, 1, 1])
return sample, tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
def sample_dynamic_posterior(self, inputs, samples, static_sample=None):
"""Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
static_sample: A tensor sample of the static latent variable `f`
of shape [..., batch_size, latent_size]. Only used
for the full dynamic posterior formulation.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [broadcasted_shape, batch_size, length], where
`broadcasted_shape` is the broadcasted sampled shape between the
inputs and static sample.
Raises:
ValueError: If the "full" latent posterior formulation is being
used, yet a static latent sample was not provided.
"""
if self.latent_posterior == "factorized":
dist = self.dynamic_encoder(inputs)
samples = dist.sample(samples) # (s, N, T, lat)
else: # full
if static_sample is None:
raise ValueError(
"The full dynamic posterior requires a static latent sample")
dist = self.dynamic_encoder((inputs, static_sample))
samples = dist.sample() # (samples, N, latent)
return samples, dist
def image_summary(seqs, name, num=None):
"""Visualizes sequences as TensorBoard summaries.
Args:
seqs: A tensor of shape [n, t, h, w, c].
name: String name of this summary.
num: Integer for the number of examples to visualize. Defaults to
all examples.
"""
seqs = tf.clip_by_value(seqs, 0., 1.)
seqs = tf.unstack(seqs[:num])
joined_seqs = [tf.concat(tf.unstack(seq), 1) for seq in seqs]
joined_seqs = tf.expand_dims(tf.concat(joined_seqs, 0), 0)
tf.compat.v2.summary.image(
name,
joined_seqs,
max_outputs=1,
step=tf.compat.v1.train.get_or_create_global_step())
def visualize_reconstruction(inputs, reconstruct, num=3, name="reconstruction"):
"""Visualizes the reconstruction of inputs in TensorBoard.
Args:
inputs: A tensor of the original inputs, of shape [batch, timesteps,
h, w, c].
reconstruct: A tensor of a reconstruction of inputs, of shape
[batch, timesteps, h, w, c].
num: Integer for the number of examples to visualize.
name: String name of this summary.
"""
reconstruct = tf.clip_by_value(reconstruct, 0., 1.)
inputs_and_reconstruct = tf.concat((inputs[:num], reconstruct[:num]), axis=0)
image_summary(inputs_and_reconstruct, name)
def visualize_qualitative_analysis(inputs, model, samples=1, batch_size=3,
length=8):
"""Visualizes a qualitative analysis of a given model.
Args:
inputs: A tensor of the original inputs, of shape [batch, timesteps,
h, w, c].
model: A DisentangledSequentialVAE model.
samples: Number of samples to draw from the latent distributions.
batch_size: Number of sequences to generate.
length: Number of timesteps to generate for each sequence.
"""
average = lambda dist: tf.reduce_mean(
input_tensor=dist.mean(), axis=0) # avg over samples
with tf.compat.v1.name_scope("val_reconstruction"):
reconstruct = functools.partial(model.reconstruct, inputs=inputs,
samples=samples)
visualize_reconstruction(inputs, average(reconstruct()))
visualize_reconstruction(inputs, average(reconstruct(sample_static=True)),
name="static_prior")
visualize_reconstruction(inputs, average(reconstruct(sample_dynamic=True)),
name="dynamic_prior")
visualize_reconstruction(inputs, average(reconstruct(swap_static=True)),
name="swap_static")
visualize_reconstruction(inputs, average(reconstruct(swap_dynamic=True)),
name="swap_dynamic")
with tf.compat.v1.name_scope("generation"):
generate = functools.partial(model.generate, batch_size=batch_size,
length=length, samples=samples)
image_summary(average(generate(fix_static=True)), "fix_static")
image_summary(average(generate(fix_dynamic=True)), "fix_dynamic")
def summarize_dist_params(dist, name, name_scope="dist_params"):
"""Summarize the parameters of a distribution.
Args:
dist: A Distribution object with mean and standard deviation
parameters.
name: The name of the distribution.
name_scope: The name scope of this summary.
"""
with tf.compat.v1.name_scope(name_scope):
tf.compat.v2.summary.histogram(
name="{}/{}".format(name, "mean"),
data=dist.mean(),
step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(
name="{}/{}".format(name, "stddev"),
data=dist.stddev(),
step=tf.compat.v1.train.get_or_create_global_step())
def summarize_mean_in_nats_and_bits(inputs, units, name,
nats_name_scope="nats",
bits_name_scope="bits_per_dim"):
"""Summarize the mean of a tensor in nats and bits per unit.
Args:
inputs: A tensor of values measured in nats.
units: The units of the tensor with which to compute the mean bits
per unit.
name: The name of the tensor.
nats_name_scope: The name scope of the nats summary.
bits_name_scope: The name scope of the bits summary.
"""
mean = tf.reduce_mean(input_tensor=inputs)
with tf.compat.v1.name_scope(nats_name_scope):
tf.compat.v2.summary.scalar(
name,
mean,
step=tf.compat.v1.train.get_or_create_global_step())
with tf.compat.v1.name_scope(bits_name_scope):
tf.compat.v2.summary.scalar(
name,
mean / units / tf.math.log(2.),
step=tf.compat.v1.train.get_or_create_global_step())
def main(argv):
del argv # unused
tf.compat.v1.enable_eager_execution()
tf.compat.v1.set_random_seed(FLAGS.seed)
timestamp = datetime.strftime(datetime.today(), "%y%m%d_%H%M%S")
FLAGS.logdir = FLAGS.logdir.format(timestamp=timestamp)
FLAGS.model_dir = FLAGS.model_dir.format(timestamp=timestamp)
if not tf.io.gfile.exists(FLAGS.model_dir):
tf.io.gfile.makedirs(FLAGS.model_dir)
sprites_data = sprites_dataset.SpritesDataset(fake_data=FLAGS.fake_data)
model = DisentangledSequentialVAE(
latent_size_static=FLAGS.latent_size_static,
latent_size_dynamic=FLAGS.latent_size_dynamic,
hidden_size=FLAGS.hidden_size, channels=sprites_data.channels,
latent_posterior=FLAGS.latent_posterior)
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(
tf.compat.v1.train.cosine_decay(FLAGS.learning_rate, global_step,
FLAGS.max_steps))
checkpoint = tf.train.Checkpoint(model=model, global_step=global_step,
optimizer=optimizer)
checkpoint_manager = contrib_checkpoint.CheckpointManager(
checkpoint, directory=FLAGS.model_dir, max_to_keep=5)
checkpoint.restore(checkpoint_manager.latest_checkpoint)
writer = tf.compat.v2.summary.create_file_writer(FLAGS.logdir)
writer.set_as_default()
dataset = sprites_data.train.map(lambda *x: x[0]).shuffle(1000).repeat()
dataset = dataset.batch(FLAGS.batch_size).take(FLAGS.max_steps)
if FLAGS.enable_debug_logging:
for inputs in dataset.prefetch(buffer_size=None):
with tf.compat.v2.summary.record_if(
lambda: tf.math.equal(0, global_step % FLAGS.log_steps)):
tf.compat.v2.summary.histogram(
"image",
data=inputs,
step=tf.compat.v1.train.get_or_create_global_step())
with tf.GradientTape() as tape:
features = model.compressor(inputs) # (batch, timesteps, hidden)
static_sample, static_posterior = model.sample_static_posterior(
features, FLAGS.num_samples) # (samples, batch, latent)
dynamic_sample, dynamic_posterior = model.sample_dynamic_posterior(
features, FLAGS.num_samples, static_sample) # (sampl, N, T, latent)
likelihood = model.decoder((dynamic_sample, static_sample))
reconstruction = tf.reduce_mean( # integrate samples
input_tensor=likelihood.mean()[:FLAGS.num_reconstruction_samples],
axis=0)
visualize_reconstruction(inputs, reconstruction,
name="train_reconstruction")
static_prior = model.static_prior()
_, dynamic_prior = model.sample_dynamic_prior(
FLAGS.num_samples, FLAGS.batch_size, sprites_data.length)
if FLAGS.enable_debug_logging:
summarize_dist_params(static_prior, "static_prior")
summarize_dist_params(static_posterior, "static_posterior")
summarize_dist_params(dynamic_prior, "dynamic_prior")
summarize_dist_params(dynamic_posterior, "dynamic_posterior")
summarize_dist_params(likelihood, "likelihood")
static_prior_log_prob = static_prior.log_prob(static_sample)
static_posterior_log_prob = static_posterior.log_prob(static_sample)
dynamic_prior_log_prob = tf.reduce_sum(
input_tensor=dynamic_prior.log_prob(dynamic_sample),
axis=-1) # sum time
dynamic_posterior_log_prob = tf.reduce_sum(
input_tensor=dynamic_posterior.log_prob(dynamic_sample),
axis=-1) # sum time
likelihood_log_prob = tf.reduce_sum(
input_tensor=likelihood.log_prob(inputs), axis=-1) # sum time
if FLAGS.enable_debug_logging:
with tf.compat.v1.name_scope("log_probs"):
summarize_mean_in_nats_and_bits(
static_prior_log_prob, FLAGS.latent_size_static, "static_prior")
summarize_mean_in_nats_and_bits(
static_posterior_log_prob, FLAGS.latent_size_static,
"static_posterior")
summarize_mean_in_nats_and_bits(
dynamic_prior_log_prob, FLAGS.latent_size_dynamic *
sprites_data.length, "dynamic_prior")
summarize_mean_in_nats_and_bits(
dynamic_posterior_log_prob, FLAGS.latent_size_dynamic *
sprites_data.length, "dynamic_posterior")
summarize_mean_in_nats_and_bits(
likelihood_log_prob, sprites_data.frame_size ** 2 *
sprites_data.channels * sprites_data.length, "likelihood")
elbo = tf.reduce_mean(input_tensor=static_prior_log_prob -
static_posterior_log_prob +
dynamic_prior_log_prob -
dynamic_posterior_log_prob + likelihood_log_prob)
loss = -elbo
tf.compat.v2.summary.scalar(
"elbo",
elbo,
step=tf.compat.v1.train.get_or_create_global_step())
grads = tape.gradient(loss, model.variables)
grads, global_norm = tf.clip_by_global_norm(grads, FLAGS.clip_norm)
grads_and_vars = list(zip(grads, model.variables)) # allow reuse in py3
if FLAGS.enable_debug_logging:
with tf.compat.v1.name_scope("grads"):
tf.compat.v2.summary.scalar(
"global_norm_grads",
global_norm,
step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.scalar(
"global_norm_grads_clipped",
tf.linalg.global_norm(grads),
step=tf.compat.v1.train.get_or_create_global_step())
for grad, var in grads_and_vars:
with tf.compat.v1.name_scope("grads"):
tf.compat.v2.summary.histogram(
"{}/grad".format(var.name),
data=grad,
step=tf.compat.v1.train.get_or_create_global_step())
with tf.compat.v1.name_scope("vars"):
tf.compat.v2.summary.histogram(
var.name,
data=var,
step=tf.compat.v1.train.get_or_create_global_step())
optimizer.apply_gradients(grads_and_vars, global_step)
is_log_step = global_step.numpy() % FLAGS.log_steps == 0
is_final_step = global_step.numpy() == FLAGS.max_steps
if is_log_step or is_final_step:
checkpoint_manager.save()
print("ELBO ({}/{}): {}".format(global_step.numpy(), FLAGS.max_steps,
elbo.numpy()))
with tf.compat.v2.summary.record_if(True):
val_data = sprites_data.test.take(20)
inputs = next(iter(val_data.shuffle(20).batch(3)))[0]
visualize_qualitative_analysis(inputs, model,
FLAGS.num_reconstruction_samples)
writer.flush()
if __name__ == "__main__":
app.run(main)
|
#!/usr/bin/env python
import matplotlib
matplotlib.use("Agg") # NOQA
import matplotlib.pyplot as plt
import numpy as np
import os
import re
import sys
# OpenCV import for python3.5
sys.path.remove('/opt/ros/{}/lib/python2.7/dist-packages'.format(os.getenv('ROS_DISTRO'))) # NOQA
import cv2 # NOQA
sys.path.append('/opt/ros/{}/lib/python2.7/dist-packages'.format(os.getenv('ROS_DISTRO'))) # NOQA
from chainercv.visualizations import vis_semantic_segmentation
from cv_bridge import CvBridge
from edgetpu.basic.basic_engine import BasicEngine
import rospkg
import rospy
from jsk_topic_tools import ConnectionBasedTransport
from sensor_msgs.msg import Image
class EdgeTPUSemanticSegmenter(ConnectionBasedTransport):
def __init__(self):
super(EdgeTPUSemanticSegmenter, self).__init__()
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('coral_usb')
self.bridge = CvBridge()
self.classifier_name = rospy.get_param(
'~classifier_name', rospy.get_name())
model_file = os.path.join(
pkg_path,
'./models/deeplabv3_mnv2_pascal_quant_edgetpu.tflite')
model_file = rospy.get_param('~model_file', model_file)
label_file = rospy.get_param('~label_file', None)
self.engine = BasicEngine(model_file)
self.input_shape = self.engine.get_input_tensor_shape()[1:3]
if label_file is None:
self.label_names = [
'background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor'
]
self.label_ids = list(range(len(self.label_names)))
else:
self.label_ids, self.label_names = self._load_labels(label_file)
self.pub_label = self.advertise(
'~output/label', Image, queue_size=1)
self.pub_image = self.advertise(
'~output/image', Image, queue_size=1)
def subscribe(self):
self.sub_image = rospy.Subscriber(
'~input', Image, self.image_cb, queue_size=1, buff_size=2**26)
def unsubscribe(self):
self.sub_image.unregister()
@property
def visualize(self):
return self.pub_image.get_num_connections() > 0
def config_callback(self, config, level):
self.score_thresh = config.score_thresh
self.top_k = config.top_k
return config
def _load_labels(self, path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
labels = {int(num): text.strip() for num, text in lines}
return list(labels.keys()), list(labels.values())
def image_cb(self, msg):
img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')
H, W = img.shape[:2]
input_H, input_W = self.input_shape
input_tensor = cv2.resize(img, (input_W, input_H))
input_tensor = input_tensor.flatten()
_, label = self.engine.run_inference(input_tensor)
label = label.reshape(self.input_shape)
label = cv2.resize(
label, (W, H), interpolation=cv2.INTER_NEAREST)
label = label.astype(np.int32)
label_msg = self.bridge.cv2_to_imgmsg(label, '32SC1')
label_msg.header = msg.header
self.pub_label.publish(label_msg)
if self.visualize:
fig = plt.figure(
tight_layout={'pad': 0})
ax = plt.subplot(1, 1, 1)
ax.axis('off')
ax, legend_handles = vis_semantic_segmentation(
img.transpose((2, 0, 1)), label,
label_names=self.label_names, alpha=0.7,
all_label_names_in_legend=True, ax=ax)
ax.legend(
handles=legend_handles, bbox_to_anchor=(1, 1), loc=2)
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
vis_img = np.fromstring(
fig.canvas.tostring_rgb(), dtype=np.uint8)
vis_img.shape = (h, w, 3)
fig.clf()
plt.close()
vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
# BUG: https://answers.ros.org/question/316362/sensor_msgsimage-generates-float-instead-of-int-with-python3/ # NOQA
vis_msg.step = int(vis_msg.step)
vis_msg.header = msg.header
self.pub_image.publish(vis_msg)
if __name__ == '__main__':
rospy.init_node('edgetpu_semantic_segmenter')
segmenter = EdgeTPUSemanticSegmenter()
rospy.spin()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from random import randint
import time
class Claptrap(object):
meta = {
'name': "Claptrap",
'description': "Just few funny Clap Trap's quotes",
'author': "F. Kolacek <[email protected]>",
'version': "1.0",
'triggers': {
'^!clap': "haveSomeFun"
},
'usage': [
"!clap - Shows random Clap Trap's quote"
]
}
def __init__(self, bot):
self._last = time.time()
# Quotes from: http://borderlands.wikia.com/wiki/Claptrap/Quotes
self._quotes = [
[
"Jack: Claptrap -- start bootup sequence.",
"Claptrap: Directive one: Protect humanity! Directive two: Obey Jack at all costs. Directive three: Dance!",
"Jack: No no no no! Cancel directive three!",
"Claptrap: Commencing directive three! Uhntssuhntssuhntss--",
"Jack: Ugh, friggin' hate that guy.",
"Claptrap (commenting): Ahh -- one of my very first startup sequences! The memories..."
],
[
"Jack: Ah, man, I am so late!",
"Jack: NO! Son of a... HEY! You! Yeah yeah, Claptrap unit!",
"Claptrap: Who -- uh, me sir?",
"Jack: Oh, no, I'm sorry the OTHER Hyperion piece of metal crap that can open doors for me. I'm sorry.",
"Claptrap: I can do more than open doors sir! We CL4P-TP units can be programmed to do anything from open doors to ninja-sassinate highly important Janitory officals!",
"Claptrap: I once started a revolution myself. There were lots of guns and a lot of dying. You'd think I would have gotten some better benefits out of the whole thing but no, demoted back to door-opening servitude!",
"Jack: Yeahyeahyeahyeah, got it, just shut up and open the door. I'm late for the quarterly meeting."
],
[
"Claptrap: Booting sequence complete. Hello! I am your new steward bot. Designation: CL4P-TP, Hyperion Robot, Class C. Please adjust factory settings to meet your needs before deployment.",
"Jack: Finally! Can you hear me? What do you remember?",
"Claptrap: Yes. Remember what? Are... are you my father?",
"Jack: Ah, no... uh, you --",
"Claptrap: -- Are you god? Am I dead?",
"Jack: Nonono, you're not dead, you're --",
"Claptrap: I'M DEAD I'M DEAD OHMYGOD I'M DEAD!",
"Jack: You. Are. Not. Dead! Your new designation is FR4G-TP. Fragtrap. You are a merciless killing machine. Got it?",
"Claptrap: O-KAY! Thanks for giving me a second chance, God. I really appreciate it.",
"Jack: What? No, nooo, you are so STUPID! Whatever. You're welcome."
],
[ "Recompiling my combat code!" ],
[ "This time it'll be awesome, I promise!" ],
[ "Healsies!" ],
[ "Crap, no more shots left!" ],
[ "Watch as we observe the rare and beautiful Clappy Bird!" ],
[ "Yeehaw!" ],
[ "Badass!" ],
[ "RUN FOR YOUR LIIIIIVES!!!" ],
[ "That guy looks an awful lot like a Badass!" ],
[ "Hehehehe, mwaa ha ha ha, MWAA HA HA HA!" ],
[ "I am a tornado of death and bullets!" ],
[ "Ha ha ha! Fall before your robot overlord!" ],
[ "Is it dead? Can, can I open my eyes now?" ],
[ "I didn't panic! Nope, not me!" ],
[ "Ha ha ha! Suck it!" ],
[ "Bad guy go boom!" ],
[ "Take a chill pill!" ],
[ "Freezy peezy!" ],
[ "I can't feel my fingers! Gah! I don't have any fingers!" ],
[ "Ow hohoho, that hurts! Yipes!" ],
[ "If only my chassis... weren't made of recycled human body parts! Wahahaha!" ],
[ "Disgusting. I love it!" ],
[ "Ooooh! Terrabits!" ],
[ "I'm pulling tricks outta my hat!" ],
[ "Push this button, flip this dongle, voila! Help me!" ],
[ "I have an IDEA!" ],
[ "I AM ON FIRE!!! OH GOD, PUT ME OUT!!!" ],
[ "Roses are red and/Violets are blue/Wait... how many syllables was that?" ],
[ "Burn them, my mini-phoenix!" ],
[ "Tell me I'm the prettiest!" ],
[ "I am rubber, and you are so dead!" ],
[ "Trouncy, flouncy... founcy... those aren't words." ],
[ "Gotta blow up a bad guy, GOTTA BLOW UP A BAD GUY!" ],
[ "You can call me Gundalf!" ],
[ "Avada kedavra!" ],
[ "Kill, reload! Kill, reload! KILL! RELOAD!" ],
[ "Boogie time!" ],
[ "Everybody, dance time! Da-da-da-dun-daaa-da-da-da-dun-daaa!" ],
[ "I brought you a present: EXPLOSIONS!" ],
[
"Summoned bot: \"Knock Knock.\"",
"Claptrap: \"Who's there?\"",
"Summoned bot: \"Wub.\"",
"Claptrap: \"Wub who?\"",
"Summoned bot: \"Wubwubwubwubwub.\"",
"Claptrap: \"... You're dead to me.\""
],
[ "Wubwubwub. Dubstep dubstep. Wubwubwubwub DROP! Dubstep" ],
[ "I'll die the way I lived: annoying!" ],
[ "This could've gone better!" ],
[ "What's that smell? Oh wait, it's just you!" ],
[ "Yo momma's so dumb, she couldn't think of a good ending for this 'yo momma' joke!" ],
[ "Oh yeah? Well, uh... yeah." ],
[ "I'm too pretty to die!" ],
[ "No, nononono NO!" ],
[ "I will prove to you my robotic superiority!" ],
[ "I am so impressed with myself!" ],
[ "Argh arghargh death gurgle gurglegurgle urgh... death" ],
[ "Don't bother with plastic surgery - there's NO fixing that!" ],
[ "Uh... wasn't me!" ],
[ "I am right behind you, Vault Hunting friend!" ],
[ "So, uh... what OS does your drone use?" ],
[ "I can do that to! ... Sorta... Except not!" ],
[ "Bringing down the law, painfully!" ],
[ "I did a challenge? I did a challenge!" ],
[ "Everything's upside down!" ],
[ "I'm Trap, Claptrap. Double oh... Trap." ],
[ "Get ready for some Fragtrap face time!" ],
[ "Coffee? Black... like my soul." ],
[ "Crazy young whippersnappers..." ],
[ "Guess who?" ],
[ "Burn, baby, burn!" ],
[ "Remember, use caution near an open flame" ],
[ "Zippity doodah!" ],
[ "Hyperiooooon Punch!" ],
[ "High five!" ],
[ "Can I shoot something now? Or climb some stairs? SOMETHING exciting?" ],
[ " like these, I really start to question the meaning of my existence. Then I get distra-hey! What's this? This looks cool!" ],
[ "It would really stink if I couldn't control what I was thinking. Like, who wants to know that I'm thinking about cheese and lint, right?" ],
[ "How does math work? Does this skin make me look fat? If a giraffe and a car had a baby, would it be called a caraffe? Life's big questions, man." ],
[ "Does this mean I can start dancing? Pleeeeeeaaaaase?" ],
[ "It's really quiet... and lonely... (hums briefly) Also this 'stopped moving' thing makes me uncomfortable. It gives me time to stop and think... literally. I'VE STOPPED, AND I'M THINKING! IT HURTS ME!" ],
[ "Oh. My. God. What if I'm like... a fish? And, if I'm not moving... I stop breathing? AND THEN I'LL DIE! HELP ME! HELP MEEEEE HEE HEE HEEE! HHHHHHHELP!" ],
[ "So, this one time, I went to a party, and there was a beautiful subatomic particle accelerator there. Our circuits locked across the room and... I don't remember what happened next. I mean, I can't. We coulda gotten married and had gadgets together, but now, I'll never know." ],
[ "Ahem, ahem. What's going on? Did I break something?" ]
]
pass
def haveSomeFun(self, bot, message):
if message.isSystem():
return
cmd = message.cmd()
if cmd and cmd == "!clap":
if time.time() - self._last < 2:
return
quoteId = randint(0, len(self._quotes) - 1)
quotes = self._quotes[quoteId]
for quote in quotes:
bot.addReply(message.channel, quote)
self._last = time.time()
|
#!/bin/python
import fileinput
def bits_to_int(bits):
return int("".join(str(bit) for bit in bits), 2)
counts = None
total = 0
for line in fileinput.input():
total += 1
bits = [int(bit) for bit in line.strip()]
if counts is None:
counts = [0] * len(bits)
else:
assert len(bits) == len(counts)
for i in range(len(counts)):
counts[i] += bits[i]
cutoff = total / 2
assert not any(count == cutoff for count in counts)
gamma = [int(count > cutoff) for count in counts]
epsilon = [int(not g) for g in gamma]
print(bits_to_int(gamma) * bits_to_int(epsilon))
|
$ git clone [email protected]:<github_username>/bpython.git
|
import io
import os
import csv
import json
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
PDF_NAME_LIST = []
PDF_LIST = []
DATA_DICT = {}
PATH = "your/path/here/"
OUTPUT = open("PDF_Dict.txt", "w")
# Gather all PDFs into a list
for file in os.listdir(PATH):
PDF_LIST.append(file)
# Gather all PDFs without '.pdf' - probably a waste of space but helpful for now to keep text clean
for file in PDF_LIST:
file = file[:-4]
PDF_NAME_LIST.append(file)
def extract_text_from_pdf(pdf_path):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
#Utilizing PDF miner module to read each page of the current PDF
with open(pdf_path, 'rb') as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
total = len(PDF_LIST)
if __name__ == '__main__':
for i in range(len(PDF_LIST)):
DATA_DICT[PDF_NAME_LIST[i]
] = extract_text_from_pdf(PATH + PDF_LIST[i])
#Progress output in terminal
print(i+1, " | ", total, " | ", PDF_NAME_LIST[i])
OUTPUT.write(json.dumps(DATA_DICT))
print("Completed")
|
#equipe: Carlos Eduardo Rodrigues de Aguiar
import matplotlib.pyplot as plt
games = ['God of War','GTA San Andreas','Sonic Unleashed','Super Mario World','Clash Royale','Among Us','League of Legends','Watch Dogs','Dark Souls','Shadow of Colossus']
nota = [9.3,9.2,8.9,9.6,8.0,7.6,7.9,7.8,8.9,9.3]
desenvolvedor = ['Santa Monica','Rockstar Games','Sega','Nintendo','Supercell','InnerSloth','Riot Games','Ubisoft','FromSoftware','SCE Japan Studio']
descricao = ['''A história centra-se em torno de seu personagem, Kratos, um guerreiro espartano enganado para matar sua esposa e filha por seu antigo mestre, o deus da guerra Ares.
Kratos mata Ares a mando da deusa Atena e toma seu lugar como o novo deus da guerra,
mas ainda é assombrado por pesadelos de seu passado.''', '''Ambientado no fim de 1992, a trama de San Andreas gira em torno de um membro de gangue, Carl "CJ" Johnson,
que retorna para seu lar em Los Santos, depois de uma longa temporada em Liberty City (a versão de Nova York do jogo),
após descobrir a morte de sua mãe.''', '''. O jogo traz personagens e elementos novos ao universo Sonic, como um dos antagonistas, Dark Gaia, o novo parceiro de Sonic, Chip,
e a oportunidade de controlar Sonic the Werehog, forma que o protagonista assume ao cair da noite.''', '''Em Super Mario World, Mario, Luigi e princesa Peach foram passar férias na ilha dos dinossauros, mas o terrível Bowser está causando confusões por lá, raptando ovos de dinossauros.
Mario e Luigi percorrem sete mundos até chegar no grande castelo de Bowser para resgatarem a princesa.''','''Em Clash Royale, o jogador precisa montar decks com poucas cartas e desafiar outros jogadores em partidas online. No começo, enfrentamos um tutorial, mas em seguida, o jogador estará por sua própria conta e risco em uma arena online.
As cartas são poucas, mas a sequência se repete durante os combates''','''Among Us é um jogo disponível para Android que tem conquistado jogadores por sua premissa simples: temos vários players em uma nave espacial, sendo que um deles é um impostor. O objetivo da tripulação é descobrir quem é esse impostor, para expulsá-lo de lá.
Fácil de entender, mas nem tão fácil de executar.''','''League of Legends é um jogo de estratégia em que duas equipes de cinco poderosos Campeões se enfrentam para destruir a base uma da outra.
Escolha entre mais de 140 Campeões para realizar jogadas épicas, assegurar abates e destruir torres conforme você luta até a vitória.''','''A história segue um homem chamado Aiden Pearce (voz de Noam Jenkins), um hacker "grey hat" altamente qualificado, descrito como uma pessoa que usa bem os "punhos e a inteligência."
Devido a uma "tragédia familiar violenta", Aiden procura fazer a sua própria justiça para com os culpados manipulando o ctOS.''','''Dark Souls se passa primariamente no reino fictício de Lordran,
onde os jogadores assumem o papel de um personagem denominado "Chosen Undead" que,
segundo lendas, seria responsável pela quebra de uma maldição que torna incapazes de morrer aqueles que são afligidos por uma misteriosa marca negra.''','''O enredo do jogo se concentra em um jovem chamado Wander, que deve viajar por uma terra proibida com o objetivo de derrotar dezesseis criaturas,
conhecidas simplesmente como "Colossi", para restaurar a vida de uma garota chamada Mono.''']
##########################FUNÇÔES#############################
def adcionar_game():
print('▀▄▀▄▀▄' * 10)
novo_game = input('Digite o nome do novo game: ')
novo_nota = float(input('Digite a nota deste novo game: '))
novo_desenvolvedor = input('Digite o nome do desenvolvedor do game: ')
novo_descricao = input('Digite a descrição do game: ')
games.append(novo_game)
nota.append(novo_nota)
desenvolvedor.append(novo_desenvolvedor)
descricao.append(novo_descricao)
print('Atualizando...')
print('Agora nós temos {} games cadastrados'.format(len(games)))
print(games)
print('')
print('▀▄▀▄▀▄' * 10)
def detalhes():
print('▀▄▀▄▀▄' * 10)
select = input('Digite o nome do game: ')
if select in games:
indice = games.index(select)
print('Game: {}'.format(select))
print('Nota do Game: {}'.format(nota[indice]))
print('O desenvolvero(a) é {}'.format(desenvolvedor[indice]))
print('Descrição: {}'.format(descricao[indice]))
else:
print('Esse game não está no catalogo. Nome invalido!')
print('▀▄▀▄▀▄' * 10)
def estatisticas():
print('▀▄▀▄▀▄' * 10)
media = sum(nota)/len(nota)
print('A maior nota dada entre os games foi {0}'.format(max(nota)))
print('')
print('A menor nota dada entre os games foi {0}'.format(min(nota)))
print('')
print('A media das notas entre os games foi {:.2f}'.format(media))
print('▀▄▀▄▀▄' * 10)
def grafico():
with plt.style.context('ggplot'):
plt.title('Notas dos games')
plt.xlabel('Games')
plt.ylabel('Notas')
plt.bar(games, nota)#gráfico de barras
plt.show()
########################################################
print('''================================
= MEUS GAMES =
========= FAVORITOS =========
================================
''')
print('''Já estão cadastrados {} games que mais gostei:'''.format(len(games)))
print('')
print(games)
print('''
Já estão cadastrados {} notas dos games :'''.format(len(nota)))
print('')
print(nota)
print('''
Já estão cadastrados {} desenvolvedores dos games :'''.format(len(desenvolvedor)))
print('')
print(desenvolvedor)
print('''
Já estão cadastrados {} descrições dos games:'''.format(len(descricao)))
print('')
print(descricao)
print('''▀▄▀▄▀▄''' * 10)
input('''
Pressione ENTER para continuar...
''')
opcao = ''
while opcao != 5:
opcao = int(input('''Escolha uma das opções abaixo:
[1] Adicionar um game ao catálogo.
[2] Ver as estatisticas.
[3] Ver os detalhes de um game.
[4] Mostra graficos comparativos.
[5] Sair do programa.
Qual opção você vai escolher:
'''))
if opcao == 1:
adcionar_game()
elif opcao == 2:
estatisticas()
elif opcao == 3:
detalhes()
elif opcao == 4:
grafico()
print('Obrigado por usar meu programa. ( ͡▀̿ ̿ ͜ʖ ͡▀̿ ̿ )') |
# coding=utf-8
# @Time : 2021/2/2 15:53
# @Auto : zzf-jeff
import numpy as np
import cv2
import os
import random
from tqdm import tqdm
train_txt_path = './train_val_list.txt'
num_img = 10000 # 挑选多少图片进行计算
img_h, img_w = 640, 640
imgs = np.zeros([img_w, img_h, 3, 1])
means, stdevs = [], []
with open(train_txt_path, 'r') as f:
lines = f.readlines()
random.shuffle(lines) # shuffle , 随机挑选图片
for i in tqdm(range(num_img)):
img_path = lines[i].split('\t')[0]
img = cv2.imread(img_path)
img = cv2.resize(img, (img_h, img_w))
img = img[:, :, :, np.newaxis]
imgs = np.concatenate((imgs, img), axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in tqdm(range(3)):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# cv2 读取的图像格式为BGR,PIL/Skimage读取到的都是RGB不用转
means.reverse() # BGR --> RGB
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
print('transforms.Normalize(normMean = {}, normStd = {})'.format(means, stdevs)) |
import boto3
exceptions = boto3.client('macie').exceptions
AccessDeniedException = exceptions.AccessDeniedException
InternalException = exceptions.InternalException
InvalidInputException = exceptions.InvalidInputException
LimitExceededException = exceptions.LimitExceededException
|
import os.path
from functools import reduce
import operator
import data, cabfile, folder, header
class InvalidCabinet(Exception):
pass
class InvalidFileAllocation(InvalidCabinet):
pass
class EmptyFileName(InvalidCabinet):
pass
class Cabinet:
def __init__(self, buffer, verify_integrity=True):
self.buffer = buffer
self.header = header.create(buffer)
self.folders = list(folder.create_folders(self.header, buffer))
self.files = list(cabfile.create_files(self.header, buffer))
self.datas = { folder: list(data.create_datas(self.header, folder, buffer)) for folder in self.folders }
self.resolve()
if verify_integrity:
self.verify()
def resolve(self):
self.folder_and_files = [( folder, list(find_files_in_folder(self.files, folder)) ) for folder in self.folders]
self.folder_and_datas = list(( (f, files, self.datas[f]) for (f, files) in self.folder_and_files))
def verify(self):
self.verify_all_files_allocated_to_folder()
self.verify_filenames()
def verify_all_files_allocated_to_folder(self):
files_in_folders = reduce(operator.concat, [folder_and_files_list[1] for folder_and_files_list in self.folder_and_files], [])
if len(self.files) != len(files_in_folders):
files_without_folder = set(self.files).symmetric_difference(set(files_in_folders))
raise InvalidFileAllocation(f"Files not allocated to a folder: {files_without_folder}")
def verify_filenames(self):
if any(not file.name for file in self.files):
raise EmptyFileName("File with empty name in cabinet")
def find_files_in_folder(files, folder):
for fi in files:
if fi.folder_index == folder.index:
yield fi
def open_cab(path):
cab = read_cab(path)
if not cab.header.has_next_cabinet and not cab.header.has_previous_cabinet:
return cab
cab = read_chained_cabs(cab, os.path.dirname(path))
return cab
def read_chained_cabs(root_cab, current_dir):
set_id = root_cab.header.set_id
cabinet_map = {
root_cab.header.sequence: root_cab
}
current_cab = root_cab
while current_cab.header.has_previous_cabinet:
current_cab = read_cab(os.path.join(current_dir, current_cab.header.previous_cabinet))
if current_cab.header.set_id != set_id:
pass # TODO: log
else:
cabinet_map[current_cab.header.sequence] = current_cab
current_cab = root_cab
while current_cab.header.has_next_cabinet:
current_cab = read_cab(os.path.join(current_dir, current_cab.header.next_cabinet))
if current_cab.header.set_id != set_id:
pass # TODO: log
else:
cabinet_map[current_cab.header.sequence] = current_cab
max_cabinet_index = max(cabinet_map.keys())
cabinets = [None] * (max_cabinet_index + 1)
for index, value in cabinet_map.items():
cabinets[index] = value
return merge_cabs(cabinets)
def merge_cabs(cabs):
folders = []
files = []
datas = []
for cab in cabs:
if any(f.is_continued_from_previous for f in cab.files):
print('Merge with previous', cab.header)
if any(f.is_continued_to_next for f in cab.files):
print('Continue to next', cab.header)
if any(f.is_continued_from_previous_and_to_next for f in cab.files):
print('Prev and next', cab.header)
folders.extend(cab.folders)
files.extend(cab.files)
datas.extend(cab.datas)
return folders, files, datas
def read_cab(path):
with open(path, 'rb') as f:
buffer = f.read()
return Cabinet(buffer)
|
import logging
from time import sleep
from fluent import handler
# Example from https://github.com/fluent/fluent-logger-python
custom_format = {
'host': '%(hostname)s',
'where': '%(module)s.%(funcName)s',
'type': '%(levelname)s',
'stack_trace': '%(exc_text)s'
}
logging.basicConfig(level=logging.INFO)
#l = logging.getLogger('fluent.test')
log = logging.getLogger(__name__)
#h = handler.FluentHandler('fluentd.test', host='localhost', port=24224, buffer_overflow_handler=overflow_handler)
h = handler.FluentHandler('fluentd.test', host='fluentd', port=24224)
formatter = handler.FluentRecordFormatter(custom_format)
h.setFormatter(formatter)
log.addHandler(h)
def test_function():
log.info({
'from': 'userA',
'to': 'userB'
})
log.info('{"from": "userC", "to": "userD"}')
log.info("This log entry will be logged with the additional key: 'message'.")
while True:
test_function()
sleep(10)
|
"""Module to test API class."""
import pytest
from mailerlite import MailerLiteApi
from mailerlite.constants import API_KEY_TEST
@pytest.fixture
def header():
headers = {'content-type': "application/json",
"X-MailerLite-ApiDocs": "true",
'x-mailerlite-apikey': API_KEY_TEST
}
return headers
def test_wrong_headers():
headers_2 = {'content-type': "application/json",
"X-MailerLite-ApiDocs": "true",
'x-mailerlite-apikey': 'FAKE_KEY'
}
with pytest.raises(ValueError):
api = MailerLiteApi(headers_2)
with pytest.raises(OSError):
api = MailerLiteApi('FAKE_KEY')
api.subscribers.all()
def test_api(header):
api = MailerLiteApi(API_KEY_TEST)
assert api.headers == header
batch_requests = {"requests": [{"method": "GET",
"path": "/api/v2/groups"
},
{"method": "GET",
"path": "/api/v2/fields"
}
]
}
res = api.batch(batch_requests)
assert len(res) == 2
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 28 14:25:41 2014
@author: victor
@todo Adicionar roleta genérico aqui pois vários algoritmos vão precisar
"""
import copy
from random import randrange
#import mysql.connector |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns("main.views", url(r"ajax/get_signature$", "jsapi_signature"), url(r"ajax/log$", "log"),)
|
"""Training script, this is converted from a ipython notebook
"""
import os
import csv
import sys
import numpy as np
import mxnet as mx
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# In[2]:
def get_lenet():
""" A lenet style net, takes difference of each frame as input.
"""
source = mx.sym.Variable("data")
source = (source - 128) * (1.0/128)
frames = mx.sym.SliceChannel(source, num_outputs=30)
diffs = [frames[i+1] - frames[i] for i in range(29)]
source = mx.sym.Concat(*diffs)
net = mx.sym.Convolution(source, kernel=(5, 5), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
net = mx.sym.Convolution(net, kernel=(3, 3), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(net)
flatten = mx.symbol.Dropout(flatten)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=600)
# Name the final layer as softmax so it auto matches the naming of data iterator
# Otherwise we can also change the provide_data in the data iter
return mx.symbol.LogisticRegressionOutput(data=fc1, name='softmax')
def CRPS(label, pred):
""" Custom evaluation metric on CRPS.
"""
for i in range(pred.shape[0]):
for j in range(pred.shape[1] - 1):
if pred[i, j] > pred[i, j + 1]:
pred[i, j + 1] = pred[i, j]
return np.sum(np.square(label - pred)) / label.size
# In[3]:
def encode_label(label_data):
"""Run encoding to encode the label into the CDF target.
"""
systole = label_data[:, 1]
diastole = label_data[:, 2]
systole_encode = np.array([
(x < np.arange(600)) for x in systole
], dtype=np.uint8)
diastole_encode = np.array([
(x < np.arange(600)) for x in diastole
], dtype=np.uint8)
return systole_encode, diastole_encode
def encode_csv(label_csv, systole_csv, diastole_csv):
systole_encode, diastole_encode = encode_label(np.loadtxt(label_csv, delimiter=","))
np.savetxt(systole_csv, systole_encode, delimiter=",", fmt="%g")
np.savetxt(diastole_csv, diastole_encode, delimiter=",", fmt="%g")
# Write encoded label into the target csv
# We use CSV so that not all data need to sit into memory
# You can also use inmemory numpy array if your machine is large enough
encode_csv("./train-label.csv", "./train-systole.csv", "./train-diastole.csv")
# # Training the systole net
# In[4]:
network = get_lenet()
batch_size = 32
devs = [mx.gpu(0)]
data_train = mx.io.CSVIter(data_csv="./train-64x64-data.csv", data_shape=(30, 64, 64),
label_csv="./train-systole.csv", label_shape=(600,),
batch_size=batch_size)
data_validate = mx.io.CSVIter(data_csv="./validate-64x64-data.csv", data_shape=(30, 64, 64),
batch_size=1)
systole_model = mx.model.FeedForward(ctx=devs,
symbol = network,
num_epoch = 65,
learning_rate = 0.001,
wd = 0.00001,
momentum = 0.9)
systole_model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
# # Predict systole
# In[5]:
systole_prob = systole_model.predict(data_validate)
# # Training the diastole net
# In[6]:
network = get_lenet()
batch_size = 32
devs = [mx.gpu(0)]
data_train = mx.io.CSVIter(data_csv="./train-64x64-data.csv", data_shape=(30, 64, 64),
label_csv="./train-diastole.csv", label_shape=(600,),
batch_size=batch_size)
diastole_model = mx.model.FeedForward(ctx=devs,
symbol = network,
num_epoch = 65,
learning_rate = 0.001,
wd = 0.00001,
momentum = 0.9)
diastole_model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
# # Predict diastole
# In[7]:
diastole_prob = diastole_model.predict(data_validate)
# # Generate Submission
# In[8]:
def accumulate_result(validate_lst, prob):
sum_result = {}
cnt_result = {}
size = prob.shape[0]
fi = csv.reader(open(validate_lst))
for i in range(size):
line = fi.__next__() # Python2: line = fi.next()
idx = int(line[0])
if idx not in cnt_result:
cnt_result[idx] = 0.
sum_result[idx] = np.zeros((1, prob.shape[1]))
cnt_result[idx] += 1
sum_result[idx] += prob[i, :]
for i in cnt_result.keys():
sum_result[i][:] /= cnt_result[i]
return sum_result
# In[9]:
systole_result = accumulate_result("./validate-label.csv", systole_prob)
diastole_result = accumulate_result("./validate-label.csv", diastole_prob)
# In[10]:
# we have 2 person missing due to frame selection, use udibr's hist result instead
def doHist(data):
h = np.zeros(600)
for j in np.ceil(data).astype(int):
h[j:] += 1
h /= len(data)
return h
train_csv = np.genfromtxt("./train-label.csv", delimiter=',')
hSystole = doHist(train_csv[:, 1])
hDiastole = doHist(train_csv[:, 2])
# In[11]:
def submission_helper(pred):
p = np.zeros(600)
pred.resize(p.shape)
p[0] = pred[0]
for j in range(1, 600):
a = p[j - 1]
b = pred[j]
if b < a:
p[j] = a
else:
p[j] = b
return p
# In[12]:
fi = csv.reader(open("data/sample_submission_validate.csv"))
f = open("submission.csv", "w")
fo = csv.writer(f, lineterminator='\n')
fo.writerow(fi.__next__()) # Python2: fo.writerow(fi.next())
for line in fi:
idx = line[0]
key, target = idx.split('_')
key = int(key)
out = [idx]
if key in systole_result:
if target == 'Diastole':
out.extend(list(submission_helper(diastole_result[key])))
else:
out.extend(list(submission_helper(systole_result[key])))
else:
print("Miss: %s" % idx)
if target == 'Diastole':
out.extend(hDiastole)
else:
out.extend(hSystole)
fo.writerow(out)
f.close()
|
from django.apps import AppConfig
from django.db.models import signals
class PlaybookJobsConfig(AppConfig):
name = 'waldur_ansible.playbook_jobs'
verbose_name = 'Waldur Ansible Playbooks'
def ready(self):
from . import handlers
Playbook = self.get_model('Playbook')
signals.pre_delete.connect(
handlers.delete_playbook_workspace,
sender=Playbook,
dispatch_uid='waldur_ansible.handlers.delete_playbook_workspace',
)
signals.pre_save.connect(
handlers.resize_playbook_image,
sender=Playbook,
dispatch_uid='waldur_ansible.handlers.resize_playbook_image',
)
|
# Generated by Django 3.1.2 on 2020-11-21 00:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0007_auto_20201102_1902'),
]
operations = [
migrations.RemoveField(
model_name='search',
name='default_facets',
),
]
|
__all__ = [
'configmapping',
'connectionchecks',
'docgen',
'readconfig'
]
|
"""This module defines a base class for CRDS command line scripts.
MAYBE integrate rc, environment, and command line parameters.
"""
# -----------------------------------------------------------------------------
import sys
import os
import argparse
import pdb
import cProfile, pstats
import re
from collections import Counter, defaultdict
# -----------------------------------------------------------------------------
from argparse import RawTextHelpFormatter
# -----------------------------------------------------------------------------
from . import log, heavy_client, constants
from . import config, utils, exceptions, rmap
from crds.client import api
# from crds import data_file : deferred, see below
# =============================================================================
def _show_version():
"""Dump CRDS version information and exit python."""
print(heavy_client.version_info())
sys.exit(-1)
# =============================================================================
# command line parameter type coercion / verification functions
def dataset(filename):
"""Ensure `filename` names a dataset."""
from crds import data_file
if data_file.is_dataset(filename):
return filename
else:
raise ValueError("Parameter", repr(filename),
"does not appear to be a dataset filename.")
def reference_file(filename):
"""Ensure `filename` is a reference file."""
assert config.is_reference(filename), \
"A reference file is required but got bad type: '%s'" % filename
return filename
def mapping(filename):
"""Ensure `filename` is a CRDS mapping file."""
assert config.is_mapping(filename), "A .rmap, .imap, or .pmap file is required but got: '%s'" % filename
return filename
def mapping_spec(spec):
"""Ensure `spec` is a CRDS mapping specification, a filename or a date based spec."""
assert config.is_mapping_spec(spec), "A .rmap, .imap, or .pmap file or date base specification is required but got: '%s'" % spec
return spec
def context_spec(spec):
"""Ensure filename is a .pmap or abstract .pmap like "jwst-edit" or date based context spec."""
assert config.is_context_spec(spec), \
"Parameter should be a .pmap or abstract context specifier, not: " + repr(spec)
return spec
def reference_mapping(filename):
"""Ensure `filename` is a .rmap file."""
assert filename.endswith(".rmap"), "A .rmap file is required but got: '%s'" % filename
return filename
#def mapping(filename):
# """Ensure that `filename` is any known CRDS mapping."""
# if api.is_known_mapping(filename):
# return filename
# else:
# raise ValueError("Parameter", repr(filename),
# "is not a known CRDS mapping.")
def observatory(obs):
"""Verify that `obs` is the name of an observatory and return it."""
obs = obs.lower()
assert obs in constants.ALL_OBSERVATORIES, \
"Unknown observatory " + repr(obs)
return obs
def nrange(string):
"""Verify a context range expression MIN:MAX and return (MIN, MAX)."""
assert re.match(r"\d+:\d+", string), \
"Invalid context range specification " + repr(string)
rmin, rmax = [int(x) for x in string.split(":")]
assert 0 <= rmin <= rmax, "Invalid range values"
return rmin, rmax
def process_key(string):
"""Check the format of a remote process identification key."""
assert config.PROCESS_KEY_RE.match(string), "Invalid format for process key: " + repr(string)
return string
def user_name(string):
"""Check the format of a server user name string."""
assert config.USER_NAME_RE.match(string), "Invalid user name " + repr(string)
return string
# =============================================================================
# =============================================================================
class Script:
"""Base class for CRDS command line scripts with standard properties.
`args` is either a string of command line parameters or a parameter list of command line words. If
defaulted to None then `args` is treated as sys.argv (default argparse handling). Note that `args`
shoulld include the program name as args[0]. Explicitly specifying `args` is used to
instantiate a script in code for testing, etc.
"""
decription = epilog = usage = None
formatter_class = RawTextHelpFormatter
def __init__(self, argv=None, parser_pars=None, reset_log=True, print_status=False):
self.stats = utils.TimingStats()
self._already_reported_stats = False
if isinstance(argv, str):
argv = argv.split()
elif argv is None:
argv = sys.argv
self._argv = argv
if parser_pars is None:
parser_pars = {}
for key in ["description", "epilog", "usage", "formatter_class"]:
self._add_key(key, parser_pars)
self.parser = argparse.ArgumentParser(prog=argv[0], **parser_pars)
self.add_args()
self.add_standard_args()
self.args = self.parser.parse_args(argv[1:])
if self.args.readonly_cache:
config.set_cache_readonly(True)
log.set_verbose(log.get_verbose() or self.args.verbosity or self.args.verbose)
log.set_log_time(config.get_log_time() or self.args.log_time)
output_cmd = log.info if self.args.dump_cmdline else log.verbose
output_cmd("Command:", [os.path.basename(argv[0])] + argv[1:], verbosity=30)
self.print_status = print_status
self.reset_log = reset_log
if self.reset_log:
log.reset() # reset the infos, warnings, and errors counters as if new commmand line run.
self._exit_status = None
self.show_context_resolution = True
def main(self):
"""Write a main method to perform the actions of the script using self.args."""
raise NotImplementedError("Script subclasses have to define main().")
def _main(self):
"""_main() completes any complex generic setup, like determining contexts, and then
calls self.main() which does the real work of the script. _main() defines the full
call tree of code which is run inside the profiler or debugger.
"""
self.contexts = self.determine_contexts()
self._exit_status = self.main()
self.report_stats() # here if not called already
if self.print_status:
log.standard_status()
return self._exit_status
# @data_file.hijack_warnings
def __call__(self):
"""Run the script's _main() according to command line parameters."""
try:
if self.args.debug_traps:
log.set_exception_trap(False)
if self.args.version:
_show_version()
elif self.args.profile:
self._profile()
elif self.args.pdb:
pdb.runctx("self._main()", globals(), locals())
else:
self._main()
except KeyboardInterrupt as exc:
if self.args.pdb:
raise
else:
raise KeyboardInterrupt("Interrupted... quitting.") from exc
return self._exit_status
@property
def locator(self):
"""Return the module for observatory specific file locations and plugins functions."""
return utils.get_locator_module(self.observatory)
@property
def obs_pkg(self):
"""Return the package __init__ for observatory specific constants."""
return utils.get_observatory_package(self.observatory)
def determine_contexts(self):
"""Return the list of contexts used by this invocation of the script. Empty for Script."""
return []
def add_args(self):
"""Add script-specific argparse add_argument calls here on self.parser"""
# raise NotImplementedError("Script subclasses have to define add_args().")
@property
def readonly_cache(self):
"""Return True of the cache is readonly."""
return config.get_cache_readonly()
@property
@utils.cached
def observatory(self):
"""Return either the command-line override observatory, or the one determined
by the client/server exchange.
"""
if self.args.jwst:
return self.set_server("jwst")
if self.args.hst:
return self.set_server("hst")
obs = os.environ.get("CRDS_OBSERVATORY", None)
if obs:
return self.set_server(obs.lower())
url = os.environ.get("CRDS_SERVER_URL", None)
if url is not None:
for obs in constants.ALL_OBSERVATORIES:
if obs in url.lower():
return self.set_server(obs)
files = []
if hasattr(self, "contexts"):
files += self.contexts
if hasattr(self.args, "files"):
files += self.args.files if self.args.files else []
for file_ in files:
if file_.startswith("hst"):
return self.set_server("hst")
if file_.startswith("jwst"):
return self.set_server("jwst")
for file_ in files:
with log.verbose_on_exception("Failed file_to_observatory for", repr(file_)):
return self.set_server(utils.file_to_observatory(file_))
return api.get_default_observatory()
def set_server(self, observatory):
"""Based on `observatory`, set the CRDS server to an appropriate default, particularly
for the case where CRDS_SERVER_URL is not set.
"""
url = config.get_server_url(observatory)
if url is not None:
api.set_crds_server(url)
return observatory
def _add_key(self, key, parser_pars):
"""Add any defined class attribute for `key` to dict `parser_pars`."""
inlined = getattr(self, key, parser_pars)
if inlined is not None:
parser_pars[key] = inlined
return parser_pars
def add_argument(self, *args, **keys):
"""Add a parser argument."""
self.parser.add_argument(*args, **keys)
def get_exclusive_arg_group(self, *args, **keys):
"""Return a mutually exlusive argument group."""
return self.parser.add_mutually_exclusive_group(*args, **keys)
def add_standard_args(self):
"""Add standard CRDS command line parameters."""
self.add_argument("-v", "--verbose",
help="Set log verbosity to True, nominal debug level.", action="store_true")
self.add_argument("--verbosity",
help="Set log verbosity to a specific level: 0..100.", type=int, default=0)
self.add_argument("--dump-cmdline", action="store_true",
help="Dump the command line parameters used to start the script to the log.")
self.add_argument("-R", "--readonly-cache", action="store_true",
help="Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.")
self.add_argument('-I', '--ignore-cache', action='store_true', dest="ignore_cache",
help="Download required files even if they're already in the cache.")
self.add_argument("-V", "--version",
help="Print the software version and exit.", action="store_true")
self.add_argument("-J", "--jwst", dest="jwst", action="store_true",
help="Force observatory to JWST for determining header conventions.""")
self.add_argument("-H", "--hst", dest="hst", action="store_true",
help="Force observatory to HST for determining header conventions.""")
self.add_argument("--stats", action="store_true",
help="Track and print timing statistics.")
self.add_argument("--profile",
help="Output profile stats to the specified file.", type=str, default="")
self.add_argument("--log-time", action="store_true",
help="Add date/time to log messages.")
self.add_argument("--pdb",
help="Run under pdb.", action="store_true")
self.add_argument("--debug-traps",
help="Bypass exception error message traps and re-raise exception.", action="store_true")
def print_help(self):
"""Print out command line help."""
self.parser.print_help()
def require_server_connection(self):
"""Check a *required* server connection and ERROR/exit if offline."""
try:
if not self.server_info.connected:
raise RuntimeError("Required server connection unavailable.")
except Exception as exc:
self.fatal_error("Failed connecting to CRDS server at CRDS_SERVER_URL =",
repr(api.get_crds_server()), "::", str(exc))
@property
@utils.cached
def server_info(self): # see also crds.sync server_info which does not update.
"""Return the server_info dict from the CRDS server *or* cache config for non-networked use where possible."""
info = heavy_client.get_config_info(self.observatory)
heavy_client.update_config_info(self.observatory)
return info
@property
@utils.cached
def bad_files(self):
"""Return the current list of ALL known bad mappings and references, not context-specific."""
return self.server_info.bad_files_set
@property
def default_context(self):
"""Return the default operational .pmap defined by the CRDS server or cache."""
return self.server_info["operational_context"]
def get_words(self, word_list):
"""Process a file list, expanding @-files into corresponding lists of
files. Return a flat, depth-first, file list.
"""
words = []
for word in word_list:
if word.startswith("@"):
words.extend(self._load_word_list(word[1:]))
else:
words.append(word)
return words # [word.lower() for word in words]
def _load_word_list(self, at_file):
"""Recursively load an @-file, returning a list of words.
Any stripped line beginning with # is a comment line to be ignored.
Any word beginning with @ is a file to load recursively.
Each line is split into words/words using whitespace.
"""
words = []
with open(at_file) as atf:
for line in atf.readlines():
word = line.strip()
if word.startswith("#"):
continue
if word.startswith("@"):
more = self._load_word_list(word[1:])
else:
more = word.split()
words.extend(more)
return self.get_words(words) # another pass to fix paths
def get_files(self, file_list):
"""Expand a list of files by treating any filename beginning with an
@-sign as a file containing one word per line.
"""
return self.get_words(file_list)
@property
def files(self):
"""Handle @-files and add cache_paths to command line file parameters.
Nominally self.files are assumed to be references or mappings. Override locate_file()
to handle other files.
"""
if not hasattr(self.args, "files"):
raise NotImplementedError("Class must implement list of `self.args.files` raw file paths.")
files1 = self.get_files(self.args.files)
files2 = []
for file in files1:
files2.extend(expand_all_instruments(self.observatory, file))
return [self.locate_file(fname) for fname in files2]
#
# NOTES:
# crds:// will always mean "inside the cache"
# ./ will always mean "current directory"
# pathless files are more ambiguous, historically in CRDS they mean "in the cache"
# but in most/all OSes pathless means "current directory" so concievably could change
#
def locate_file(self, filename):
"""Locate file defines how members of the self.args.files list are located.
The default behavior is to locate CRDS cached files, either references or mappings.
This is inappropriate for datasets so in some cases locate_file needs to be overridden.
Symbolic context names, e.g. hst-operatonal, resolved to literal contexts, e.g. hst_0320.pmap
"""
filename = config.pop_crds_uri(filename) # nominally crds://
filename = self.resolve_context(filename) if config.is_date_based_mapping_spec(filename) else filename
return config.locate_file(filename, observatory=self.observatory)
def locate_file_outside_cache(self, filename):
"""This is essentially normal filename syntax, except crds:// is interpreted to mean
locate filename inside the CRDS cache. symbolic context names are also resolved to
literal context filenames.
"""
filename2 = config.pop_crds_uri(filename) # nominally crds://
filename2 = self.resolve_context(filename2) # e.g. hst-operational --> hst_0320.pmap
if filename != filename2: # Had crds:// or was date based
return config.locate_file(filename2, self.observatory)
else:
if not os.path.dirname(filename2):
return "./" + filename2
else:
return filename2
# return os.path.abspath(filename2)
def _profile(self):
"""Run _main() under the Python profiler."""
if self.args.profile == "console":
self._console_profile(self._main)
else:
cProfile.runctx("self._main()", locals(), locals(), self.args.profile)
def _console_profile(self, function):
"""Run `function` under the profiler and print results to console."""
prof = cProfile.Profile()
prof.enable()
function()
prof.disable()
prof_stats = pstats.Stats(prof).sort_stats("cumulative")
prof_stats.print_stats(100)
def report_stats(self):
"""Print out collected statistics."""
if self.args.stats and not self._already_reported_stats:
self.stats.report()
self._already_reported_stats = True
def increment_stat(self, name, amount=1):
"""Add `amount` to the statistics counter for `name`."""
self.stats.increment(name, amount)
def get_stat(self, name):
"""Return statistic `name`."""
return self.stats.get_stat(name)
def run(self, *args, **keys):
"""script.run() is the same thing as script() but more explicit."""
return self.__call__(*args, **keys)
def resolve_context(self, context):
"""Resolve context spec `context` into a .pmap, .imap, or .rmap filename, interpreting
date based specifications against the CRDS server operational context history.
"""
if isinstance(context, str) and context.lower() == "none":
return None
if not config.is_date_based_mapping_spec(context):
return context
final_context = heavy_client.get_context_name(self.observatory, context)
if self.show_context_resolution:
log.info("Symbolic context", repr(context), "resolves to", repr(final_context))
return final_context
def get_conjugates(self, file_list):
"""Given a list of references, return any GEIS data files associated with them."""
from crds import data_file
return [ data_file.get_conjugate(ref) for ref in file_list if data_file.get_conjugate(ref) is not None]
def get_file_properties(self, filename):
"""Return (instrument, filekind) corresponding to `file`, and '' for none."""
return utils.get_file_properties(self.observatory, filename)
def categorize_files(self, filepaths):
"""Organize files in list `filepaths` into a dictionary of lists as follows:
{ (instrument, filekind) : filepath, ... }
"""
categorized = defaultdict(list)
for path in filepaths:
instrument, filekind = self.get_file_properties(path)
categorized[(instrument, filekind)].append(path)
return dict(categorized)
def fatal_error(self, *args, **keys):
"""Issue an error message and terminate the program."""
log.fatal_error(*args, **keys)
def dump_files(self, context=None, files=None, ignore_cache=None):
"""Download mapping or reference `files1` with respect to `context`, tracking stats."""
if context is None:
context = self.default_context
if ignore_cache is None:
ignore_cache = self.args.ignore_cache
_localpaths, downloads, nbytes = api.dump_files(
context, files, ignore_cache=ignore_cache, raise_exceptions=self.args.pdb)
self.increment_stat("total-files", downloads)
self.increment_stat("total-bytes", nbytes)
def dump_mappings(self, mappings, ignore_cache=None):
"""Download all `mappings` and their dependencies if not already cached.."""
if ignore_cache is None:
ignore_cache = self.args.ignore_cache
if not self.server_info.connected:
log.verbose("Not connected to server. Skipping dump_mappings", mappings, verbosity=55)
return
for mapping in mappings:
_localpaths, downloads, nbytes = api.dump_mappings3(
mapping, ignore_cache=ignore_cache, raise_exceptions=self.args.pdb)
self.increment_stat("total-files", downloads)
self.increment_stat("total-bytes", nbytes)
def sync_files(self, files, context=None, ignore_cache=None):
"""Like dump_files(), but dumps recursive closure of any mappings rather than just the listed mapping."""
mappings = [ os.path.basename(filename)
for filename in files if config.is_mapping(filename) ]
references = [os.path.basename(filename)
for filename in files if not config.is_mapping(filename) ]
if mappings:
self.dump_mappings(mappings, ignore_cache)
if references:
self.dump_files(context, references, ignore_cache)
def are_all_references(self, files):
"""Return True IFF every file in files is a reference."""
for filename in files:
if not config.is_reference(filename):
return False
else:
return True
def are_all_mappings(self, files):
"""Return True IFF every file in files is a mapping."""
for filename in files:
if not config.is_mapping(filename):
return False
else:
return True
# =============================================================================
class UniqueErrorsMixin:
"""This mixin supports tracking certain errors messages."""
def __init__(self, *args, **keys):
self.ue_mixin = self.get_empty_mixin()
# Exception trap context manager for use in "with" blocks
# trapping exceptions.
self.error_on_exception = log.exception_trap_logger(
self.log_and_track_error) # can be overridden
def get_empty_mixin(self):
"""Return a bundle of freshly initialized counters and tracking information. The
bundle is used to isolate mixin parameters from subclass parameters to prevent
accidental overrides.
"""
class Struct:
pass
mixin = Struct()
mixin.messages = {}
mixin.count = Counter()
mixin.tracked_errors = 0
mixin.unique_data_names = set()
mixin.all_data_names = set()
mixin.data_names_by_key = defaultdict(list)
mixin.announce_suppressed = Counter()
return mixin
def clear_error_counts(self):
"""Clear the error tracking status by re-initializing/zeroing mixin data structures."""
self.ue_mixin = self.get_empty_mixin()
def add_args(self):
"""Add command line parameters to Script arg parser."""
self.add_argument("--dump-unique-errors", action="store_true",
help="Record and dump the first instance of each kind of error.")
self.add_argument("--unique-errors-file",
help="Write out data names (ids or filenames) for first instance of unique errors to specified file.")
self.add_argument("--all-errors-file",
help="Write out all err'ing data names (ids or filenames) to specified file.")
self.add_argument("--unique-threshold", type=int, default=1,
help="Only print unique error classes with this many or more instances.")
self.add_argument("--max-errors-per-class", type=int, default=500, metavar="N",
help="Only print the first N detailed errors of any particular class.")
self.add_argument("--unique-delimiter", type=str, default=None,
help="Use the given delimiter (e.g. semicolon) in tracked error messages to make them amenable to spreadsheets.")
def log_and_track_error(self, data, instrument, filekind, *params, **keys):
"""Issue an error message and record the first instance of each unique kind of error, where "unique"
is defined as (instrument, filekind, msg_text) and omits data id.
"""
# Always count messages
self.ue_mixin.tracked_errors += 1
msg = self.format_prefix(data, instrument, filekind, *params, **keys)
key = log.format(instrument, filekind.upper(), *params, **keys)
if key not in self.ue_mixin.messages:
self.ue_mixin.messages[key] = msg
self.ue_mixin.unique_data_names.add(data)
self.ue_mixin.count[key] += 1
self.ue_mixin.all_data_names.add(data)
self.ue_mixin.data_names_by_key[key].append(data)
# Past a certain max, supress the error log messages.
if self.ue_mixin.count[key] < self.args.max_errors_per_class:
log.error(msg)
else:
log.increment_errors()
# Before suppressing, announce the suppression
if not self.ue_mixin.announce_suppressed[key]:
self.ue_mixin.announce_suppressed[key] += 1 # flag
log.info("Max error count %d exceeded for:" % self.args.max_errors_per_class,
key.strip(), "suppressing remaining error messages.")
return None # for log.exception_trap_logger --> don't reraise
def format_prefix(self, data, instrument, filekind, *params, **keys):
"""Create a standard (instrument,filekind,data) prefix for log messages."""
delim = self.args.unique_delimiter # for spreadsheets
data, instrument, filekind = str(data), str(instrument), str(filekind) # squash 2.7 unicode
if delim:
return log.format(delim, instrument.upper(), delim, filekind.upper(), delim, data, delim,
*params, end="", **keys)
else:
return log.format("instrument="+repr(instrument.upper()), "type="+repr(filekind.upper()), "data="+repr(data), ":: ",
*params, end="", **keys)
def dump_unique_errors(self):
"""Print out the first instance of errors recorded by log_and_track_error(). Write out error list files."""
if self.args.dump_unique_errors:
if self.args.unique_threshold > 1:
log.info("Limiting error class reporting to cases with at least",
self.args.unique_threshold, "instances.")
log.info("="*20, "unique error classes", "="*20)
classes = len(self.ue_mixin.messages)
for key in sorted(self.ue_mixin.messages):
if self.ue_mixin.count[key] >= self.args.unique_threshold:
log.info("%06d" % self.ue_mixin.count[key], "errors like::", self.ue_mixin.messages[key])
else:
self.drop_error_class(key)
classes -= 1
log.info("All unique error types:", classes)
log.info("Untracked errors:", log.errors() - self.ue_mixin.tracked_errors)
log.info("="*20, "="*len("unique error classes"), "="*20)
if self.args.all_errors_file:
self.dump_error_data(self.args.all_errors_file, self.ue_mixin.all_data_names)
if self.args.unique_errors_file:
self.dump_error_data(self.args.unique_errors_file, self.ue_mixin.unique_data_names)
def drop_error_class(self, key):
"""Remove the errors classified by `key` from the error classes and counts."""
for data in self.ue_mixin.data_names_by_key[key]:
self.ue_mixin.all_data_names = self.ue_mixin.all_data_names - set([data])
self.ue_mixin.unique_data_names = self.ue_mixin.unique_data_names - set([data])
self.ue_mixin.count[key] -= 1
def dump_error_data(self, filename, error_list):
"Write out list of err'ing filenames or dataset ids to `filename`."""
with open(filename, "w+") as err_file:
err_file.write("\n".join(sorted(error_list))+"\n")
# =============================================================================
class ContextsScript(Script):
"""Baseclass for a script proving support for command line specified contexts."""
def __init__(self, *args, **keys):
super(ContextsScript, self).__init__(*args, **keys)
self.contexts = []
def add_args(self):
group = self.get_exclusive_arg_group(required=False)
group.add_argument('--contexts', metavar='CONTEXT', type=mapping_spec, nargs='*',
help="Specify a list of CRDS mappings to operate on: .pmap, .imap, or .rmap or date-based specification")
group.add_argument("--range", metavar="MIN:MAX", type=nrange, dest="range", default=None,
help='Operate for pipeline context ids (.pmaps) between <MIN> and <MAX>.')
group.add_argument('--all', action='store_true',
help='Operate with respect to all known CRDS contexts.')
group.add_argument('--last-n-contexts', metavar="N", type=int, default=None,
help='Operate with respect to the last N contexts.')
group.add_argument("--up-to-context", metavar='CONTEXT', type=mapping_spec, nargs=1, default=None,
help='Operate on all contexts up to and including the specified context.')
group.add_argument("--after-context", metavar='CONTEXT', type=mapping_spec, nargs=1, default=None,
help='Operate on all contexts after and including the specified context.')
def determine_contexts(self):
"""Support explicit specification of contexts, context id range, or all."""
log.verbose("Determining contexts.", verbosity=55)
if self.args.contexts:
# permit instrument and reference mappings, not just pipelines:
_contexts2 = []
for ctx in self.args.contexts:
_contexts2.extend(expand_all_instruments(self.observatory, ctx))
contexts = []
for ctx in _contexts2:
resolved = self.resolve_context(ctx)
if resolved != 'N/A':
contexts.append(resolved)
elif self.args.all:
contexts = self._list_mappings("*.pmap")
elif self.args.last_n_contexts:
contexts = self._list_mappings("*.pmap")[-self.args.last_n_contexts:]
elif self.args.range:
rmin, rmax = self.args.range
contexts = []
all_contexts = self._list_mappings("*.pmap")
for context in all_contexts:
match = re.match(r"\w+_(\d+).pmap", context)
if match:
serial = int(match.group(1))
if rmin <= serial <= rmax:
contexts.append(context)
elif self.args.up_to_context:
pmaps = self._list_mappings("*.pmap")
with log.augment_exception("Invalid --up-to-context", repr(self.args.up_to_context[0]), exc_class=exceptions.CrdsError):
up_to_context = self.resolve_context(self.args.up_to_context[0])
up_to_ix = pmaps.index(up_to_context)+1
contexts = pmaps[:up_to_ix]
elif self.args.after_context:
pmaps = self._list_mappings("*.pmap")
with log.augment_exception("Invalid --after-context", repr(self.args.after_context[0]), exc_class=exceptions.CrdsError):
after_context = self.resolve_context(self.args.after_context[0])
after_ix = pmaps.index(after_context)
contexts = pmaps[after_ix:]
elif config.get_crds_env_context():
contexts = [self.resolve_context(config.get_crds_env_context())]
else:
contexts = [self.resolve_context(self.observatory + "-operational")]
log.verbose("Determined contexts: ", contexts, verbosity=55)
return sorted(contexts)
def _list_mappings(self, glob_pattern="*.pmap"):
"""Return a list of all the .pmap's on the CRDS Server."""
self.require_server_connection()
return heavy_client.list_mappings(self.observatory, glob_pattern)
def dump_files(self, context, files=None, ignore_cache=None):
"""Download mapping or reference `files1` with respect to `context`, tracking stats."""
if ignore_cache is None:
ignore_cache = self.args.ignore_cache
_localpaths, downloads, nbytes = api.dump_files(
context, files, ignore_cache=ignore_cache, raise_exceptions=self.args.pdb)
self.increment_stat("total-files", downloads)
self.increment_stat("total-bytes", nbytes)
def get_context_mappings(self):
"""Return the set of mappings which are pointed to by the mappings
in `self.contexts`.
"""
files = set()
useable_contexts = []
if not self.contexts:
return []
log.verbose("Getting all mappings for specified contexts.", verbosity=55)
if self.args.all:
files = self._list_mappings("*.*map")
pmaps = self._list_mappings("*.pmap")
useable_contexts = []
if pmaps and files:
with log.warn_on_exception("Failed dumping mappings for", repr(self.contexts)):
self.dump_files(pmaps[-1], files)
for context in self.contexts:
with log.warn_on_exception("Failed loading context", repr(context)):
pmap = rmap.get_cached_mapping(context)
useable_contexts.append(context)
else:
for context in self.contexts:
with log.warn_on_exception("Failed listing mappings for", repr(context)):
try:
pmap = rmap.get_cached_mapping(context)
files |= set(pmap.mapping_names())
except Exception:
files |= set(api.get_mapping_names(context))
useable_contexts.append(context)
useable_contexts = sorted(useable_contexts)
if useable_contexts and files:
with log.warn_on_exception("Failed dumping mappings for", repr(self.contexts)):
self.dump_files(useable_contexts[-1], files)
self.contexts = useable_contexts # XXXX reset self.contexts
files = sorted(files)
log.verbose("Got mappings from specified (usable) contexts: ", files, verbosity=55)
return files
def get_context_references(self):
"""Return the set of references which are pointed to by the references
in `contexts`.
"""
files = set()
for context in self.contexts:
try:
pmap = rmap.get_cached_mapping(context)
files |= set(pmap.reference_names())
log.verbose("Determined references from cached mapping", repr(context))
except Exception: # only ask the server if loading context fails
files |= set(api.get_reference_names(context))
return sorted(files)
def expand_all_instruments(observatory, context):
"""Expand symbolic context specifiers for rmaps with "all" for instrument
into the list of rmaps for every instrument in the related context (e.g. edit or operational).
e.g. jwst-all-photom-operational --> [jwst-miri-photom-operational, jwst-nircam-photom-operational, ...]
Expansion of "all" is determined by instruments in e.g. jwst-operational
"""
pattern = observatory + r"\-all\-([^\-]+)\-(.+)"
mtch = re.match(pattern, context)
if mtch:
root_context = observatory + "-" + mtch.group(2)
pmap = heavy_client.get_symbolic_mapping(root_context)
all_imaps = [ "-".join([observatory, instrument, mtch.group(1), mtch.group(2)])
for instrument in pmap.selections.keys() if instrument != "system"]
else:
all_imaps = [context]
return all_imaps
|
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import PIL.Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import pretrained_networks
import tensorflow.compat.v1 as tensorflow
tf = tensorflow
tf.disable_v2_behavior()
# Get tf noise variables, for the stochastic variation
def generate_zs_from_seeds(seeds):
zs = []
for seed_idx, seed in enumerate(seeds):
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
zs.append(z)
return zs
# Trunctation psi value needed for the truncation trick
def generate_images(zs, truncation_psi):
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if not isinstance(truncation_psi, list):
truncation_psi = [truncation_psi] * len(zs)
imgs = []
for z_idx, z in tqdm(enumerate(zs)):
Gs_kwargs.truncation_psi = truncation_psi[z_idx]
noise_rnd = np.random.RandomState(1) # fix noise
tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
imgs.append(PIL.Image.fromarray(images[0], 'RGB'))
# Return array of PIL.Image
return imgs
def generate_images_from_seeds(seeds, truncation_psi):
return generate_images(generate_zs_from_seeds(seeds), truncation_psi)
from math import ceil
def createImageGrid(images, scale=0.25, rows=1):
w,h = images[0].size
w = int(w*scale)
h = int(h*scale)
height = rows*h
cols = ceil(len(images) / rows)
width = cols*w
canvas = PIL.Image.new('RGBA', (width,height), 'white')
for i,img in enumerate(images):
img = img.resize((w,h), PIL.Image.ANTIALIAS)
canvas.paste(img, (w*(i % cols), h*(i // cols)))
return canvas
# Make sure you use tensoflow version 1
print('Tensorflow version: {}'.format(tf.__version__))
network_pkl = '../pkl/network-snapshot-001544.pkl'
# It returns 3 networks, we will be mainly using Gs
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
def interpolate(zs, steps):
out = []
for i in range(len(zs)-1):
for index in range(steps):
fraction = index/float(steps)
out.append(zs[i+1]*fraction + zs[i]*(1-fraction))
return out
import scipy
import moviepy.editor
grid_size = [3,3]
duration_sec = 10
smoothing_sec = 1.0
image_zoom = 1
fps = 30
random_seed = np.random.randint(0, 999)
num_frames = int(np.rint(duration_sec * fps))
random_state = np.random.RandomState(random_seed)
# Generate latent vectors
shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
all_latents = random_state.randn(*shape).astype(np.float32)
all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * fps] + [0] * len(Gs.input_shape), mode='wrap')
all_latents /= np.sqrt(np.mean(np.square(all_latents)))
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_h, img_w, channels = images.shape
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros([grid_h * img_h, grid_w * img_w, channels], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[y : y + img_h, x : x + img_w] = images[idx]
return grid
# Frame generation func for moviepy.
def make_frame(t):
frame_idx = int(np.clip(np.round(t * fps), 0, num_frames - 1))
latents = all_latents[frame_idx]
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, truncation_psi=0.7,
randomize_noise=False, output_transform=fmt,
minibatch_size=16)
grid = create_image_grid(images, grid_size)
if image_zoom > 1:
grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
if grid.shape[2] == 1:
grid = grid.repeat(3, 2) # grayscale => RGB
return grid
def main():
# generate 9 random seeds
seeds = np.random.randint(10000000, size=9)
print(seeds)
zs = generate_zs_from_seeds(seeds)
imgs = generate_images(zs, 0.5)
createImageGrid(imgs, 3)
video_clip = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
# Use this if you want to generate .mp4 video instead
# video_clip.write_videofile('random_grid_%s.mp4' % random_seed, fps=fps, codec='libx264', bitrate='2M')
video_clip.write_videofile('random_grid_%s.mp4' % random_seed, fps=fps)
if __name__ == '__main__':
main() |
#!/usr/bin/python
import sys
import os
import six
try:
import configparser
except:
from six.moves import configparser
ConfigParser=configparser
fusePath = 'fuse'
class runScript:
def __init__(self):
self.runSimulationScript = "cd build\n"
self.Name = "unset"
def AddName(self,Name):
self.Name = Name
def AddScript(self,Name,BuildScriptName,RunScript):
self.runSimulationScript += BuildScriptName + '\n'
self.runSimulationScript += RunScript + '\n'
self.runSimulationScript += 'echo "<'+Name + '>" >> ' + self.Name + '.txt \n'
self.runSimulationScript += 'cat dummy_diff.txt >> ' + self.Name + '.txt \n'
self.runSimulationScript += 'echo "</'+Name + '>" >> ' + self.Name + '.txt \n'
def write2File(self,FileName):
self.runSimulationScript = 'echo "start ' + self.Name +'"\nrm -f build/'+ self.Name +".txt\n" + self.runSimulationScript
self.runSimulationScript += '\necho "====Finished Running====="\n'
self.runSimulationScript += "\ncat " + self.Name +".txt\n"
self.runSimulationScript += "\n cd - \n"
with open(FileName,"w") as f :
f.write(self.runSimulationScript)
gRunScript = runScript()
def HandleSimulation(config,section,path):
Name = config.get(section,"Name")
Name = Name.replace('"', '')
ExecutableName = path + "/" + Name + "_beh.exe"
TopLevelModule = config.get(section,"TopLevelModule")
tclbatchName = path + "/" +Name+"_beh.cmd"
ProjectName = path + "/" + Name+ "_beh.prj"
makeScript = fusePath + ' -intstyle ise -incremental -lib secureip -o ' + ExecutableName +" -prj " +ProjectName + " " + TopLevelModule
makeScript_name = path+"/sim_"+Name+"_build.sh"
with open(makeScript_name,"w") as f :
f.write(makeScript)
RunScript = ExecutableName + " -intstyle ise -tclbatch " +tclbatchName + " -wdb " + path + "/" + Name + "_beh.wdb"
ReferenceInputDataFile= config.get(section,'ReferenceInputDataFile')
inFile = config.get(section,'InputDataFile')
OutputDataFile= config.get(section,'OutputDataFile')
ReferenceDataFile= config.get(section,'ReferenceOutputDataFile')
runScript_name = path+"/sim_"+Name+"_run.sh"
gRunScript.AddScript(Name, makeScript_name, runScript_name)
with open(runScript_name,"w") as f :
if OutputDataFile and ReferenceDataFile:
f.write('rm -f ' + OutputDataFile +'\n')
f.write('rm -f dummy_diff.txt \n')
if inFile and ReferenceInputDataFile:
f.write('\ncp ' + ReferenceInputDataFile +' ' + inFile +'\n')
f.write(RunScript+'\n')
if inFile and ReferenceInputDataFile:
f.write('\nrm -f '+ inFile +'\nfi\n')
if OutputDataFile and ReferenceDataFile:
f.write('\necho "<======diff========>"\ndiff ' +OutputDataFile + ' ' +ReferenceDataFile +'\necho "<=======end diff=====>"\n')
f.write('\ndiff ' +OutputDataFile + ' ' +ReferenceDataFile +'> dummy_diff.txt\n')
onerror=config.get(section,'Onerror')
Runtime =config.get(section,'Runtime')
tclbatchScript = "onerror "+onerror +"\nwave add /\nrun "+Runtime + ";\nquit -f;"
with open(tclbatchName,"w") as f :
f.write(tclbatchScript)
with open(ProjectName,"w") as f :
for op in config.options(section):
opValue = config.get(section,op)
if opValue == None:
f.write('vhdl work "' + op+ '"\n')
def handleImplement(config,section,path):
'xst -intstyle ise -filter "/home/ise/xilinx_share2/GitHub/AxiStream/build/iseconfig/filter.filter" -ifn "/home/ise/xilinx_share2/GitHub/AxiStream/build/tb_streamTest.xst" -ofn "/home/ise/xilinx_share2/GitHub/AxiStream/build/tb_streamTest.syr"'
pass
def main(args = None):
if args == None:
args = sys.argv[1:]
if len(args) < 1:
sys.exit()
FileName = args[0]
Path = os.path.abspath(args[1])
print(FileName)
gRunScript.AddName("run")
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.optionxform=str
config.read(FileName)
sections = config.sections()
for s in sections:
if "Simulation" in s:
print(s)
HandleSimulation(config,s,Path)
elif "Implement" in s:
pass
gRunScript.write2File("run.sh")
if (__name__ == "__main__"):
main() |
import unittest
from zoolandia import *
class TestHabitat(unittest.TestCase):
def test_name_empty_string_default(self):
habitat = Habitat()
self.assertEqual(habitat.name,'')
def test_members_empty_set_default(self):
habitat = Habitat()
self.assertIsInstance(habitat.members, set)
def test_add_member(self):
aquarium = Aquarium('freshwater')
bob = Betta('orange', 'Bob')
james = Betta('orange', 'James')
aquarium.add_member(bob)
self.assertIn(bob, aquarium.members)
aquarium.add_member(james)
self.assertIn(bob, aquarium.members)
self.assertIn(james, aquarium.members)
def test_remove_members(self):
aquarium = Aquarium('freshwater')
james = Betta('orange', 'James')
aquarium.add_member(james)
aquarium.remove_member(james)
self.assertNotIn(james, aquarium.members)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.0.5 on 2019-05-24 14:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_text', models.CharField(help_text='Answer Text', max_length=3000)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Category Name', max_length=100)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('campus', models.CharField(help_text='Campus', max_length=20)),
('semester', models.CharField(help_text='Semester', max_length=20)),
('title', models.CharField(help_text='Title', max_length=20)),
('credit', models.CharField(help_text='Credit', max_length=20)),
('department', models.CharField(help_text='Department', max_length=20)),
('instructor', models.CharField(help_text='Instructor', max_length=20)),
('times', models.CharField(help_text='Times', max_length=20)),
('room', models.CharField(help_text='Times', max_length=20)),
('additional_info', models.CharField(help_text='Additional Info', max_length=20)),
('misc_links', models.CharField(help_text='Misc. Links', max_length=20)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(help_text='Question', max_length=140)),
('details', models.CharField(help_text='Question Details', max_length=3000)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ManyToManyField(to='newapp.Question'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
import re
from neuralparticles.tools.plot_helpers import write_dict_csv
from neuralparticles.tools.param_helpers import getParam, checkUnusedParams
log_path = getParam("log", "")
csv_path = getParam("csv", "")
checkUnusedParams()
if csv_path == "":
csv_path = log_path[:-4] + ".csv"
p_loss_l = re.compile("[ ]*(\d*)/\d* \[.*\]\s-\sETA")
p_loss = re.compile("(\w*): ([0-9.e-]*)")
history = {}
with open(log_path, 'r') as file:
for line in file:
if p_loss_l.match(line):
for l in p_loss.findall(line[line.find("loss:"):]):
if not l[0] in history:
history[l[0]] = []
history[l[0]].append(float(l[1]))
write_dict_csv(csv_path, history) |
from functools import partial
from .encentry import EncEntry
def EncEntryTemplate(**kwargs):
return partial(EncEntry, **kwargs)
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
class QtumTransactionReceiptBloomFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-txindex']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(COINBASE_MATURITY+50)
"""
pragma solidity >= 0.6.12;
contract Greeter {
event ListIndexedGreeting(address indexed greeter, string message);
event ListUnindexedGreeting(address greeter, string message);
event ListAccountValue(address indexed account, uint256 value);
function greet_indexed(string memory message) public {
emit ListIndexedGreeting(msg.sender, message);
}
function greet_unindexed(string memory message) public {
emit ListUnindexedGreeting(msg.sender, message);
}
function get_account_val(uint256 value) public {
emit ListAccountValue(msg.sender, value);
}
}
"""
"""
Function signatures:
{
"9f06a908": "get_account_val(uint256)",
"378b8e93": "greet_indexed(string)",
"3126af1c": "greet_unindexed(string)"
}
"""
contract_bytecode = "608060405234801561001057600080fd5b50600436106100415760003560e01c80633126af1c14610046578063378b8e93146101015780639f06a908146101bc575b600080fd5b6100ff6004803603602081101561005c57600080fd5b810190808035906020019064010000000081111561007957600080fd5b82018360208201111561008b57600080fd5b803590602001918460018302840111640100000000831117156100ad57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506101ea565b005b6101ba6004803603602081101561011757600080fd5b810190808035906020019064010000000081111561013457600080fd5b82018360208201111561014657600080fd5b8035906020019184600183028401116401000000008311171561016857600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506102a7565b005b6101e8600480360360208110156101d257600080fd5b810190808035906020019092919050505061035d565b005b7fd745aa0aba5ff43ae2ed3da2c13d614246c4d0b902544218da2b3d53e7c614e53382604051808373ffffffffffffffffffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561026957808201518184015260208101905061024e565b50505050905090810190601f1680156102965780820380516001836020036101000a031916815260200191505b50935050505060405180910390a150565b3373ffffffffffffffffffffffffffffffffffffffff167f6666872cc79417abb8dbbf3dfe870a075ffe6abca793b19ee44b06203e9f4ae8826040518080602001828103825283818151815260200191508051906020019080838360005b83811015610320578082015181840152602081019050610305565b50505050905090810190601f16801561034d5780820380516001836020036101000a031916815260200191505b509250505060405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167f321017b4ec93a5b534a9169bb1dafc27e1a018764fee8cba7bed1f9d320730f3826040518082815260200191505060405180910390a25056fea264697066735822122080714bfe79581739318e0133416e6eaac435e621dbf5acab191c29ab5390080164736f6c63430007040033"
contract_address = self.nodes[0].createcontract(contract_bytecode)[
'address']
self.nodes[0].generate(1)
ret = self.nodes[0].sendtocontract(
contract_address, "9f06a9080000000000000000000000000000000000000000000000000000000000000018")
self.nodes[0].generate(1)
assert('txid' in ret)
assert('sender' in ret)
assert('hash160' in ret)
receipt = self.nodes[0].gettransactionreceipt(ret['txid'])
assert('bloom' in receipt)
assert(receipt['bloom'] == "00200000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000004000000000040000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000080000000000000000000000000000800000000000000000000000000000000000000000000000000")
ret2 = self.nodes[0].sendtocontract(
contract_address,
"378b8e930000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d746869732069732061206c6f6700000000000000000000000000000000000000"
)
assert('txid' in ret2)
assert('sender' in ret2)
assert('hash160' in ret2)
receipt2 = self.node.gettransactionreceipt(ret2['txid'])
assert('bloom' in receipt2)
assert(receipt2['bloom'] == "00000000000000000000000000000080000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000200000000000000000000000000000000000000000000000000100000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000200000000000000000000000000000000000000000000000000000000000")
if __name__ == '__main__':
QtumTransactionReceiptBloomFilterTest().main()
|
import itertools
import os
import re
import requests
from datetime import datetime, timedelta
from bson import ObjectId
from lxml import etree
from constants.spider import FILE_SUFFIX_LANG_MAPPING, LangType, SUFFIX_IGNORE, SpiderType, QueryType, ExtractType
from constants.task import TaskStatus
from db.manager import db_manager
def get_lang_by_stats(stats: dict) -> LangType:
"""
Get programming language provided suffix stats
:param stats: stats is generated by utils.file.get_file_suffix_stats
:return:
"""
try:
data = stats.items()
data = sorted(data, key=lambda item: item[1])
data = list(filter(lambda item: item[0] not in SUFFIX_IGNORE, data))
top_suffix = data[-1][0]
if FILE_SUFFIX_LANG_MAPPING.get(top_suffix) is not None:
return FILE_SUFFIX_LANG_MAPPING.get(top_suffix)
return LangType.OTHER
except IndexError as e:
pass
def get_spider_type(path: str) -> SpiderType:
"""
Get spider type
:param path: spider directory path
"""
for file_name in os.listdir(path):
if file_name == 'scrapy.cfg':
return SpiderType.SCRAPY
def get_spider_col_fields(col_name: str, task_id: str = None, limit: int = 100) -> list:
"""
Get spider collection fields
:param col_name: collection name
:param task_id: task_id
:param limit: limit
"""
filter_ = {}
if task_id is not None:
filter_['task_id'] = task_id
items = db_manager.list(col_name, filter_, limit=limit, sort_key='_id')
fields = set()
for item in items:
for k in item.keys():
fields.add(k)
return list(fields)
def get_last_n_run_errors_count(spider_id: ObjectId, n: int) -> list:
tasks = db_manager.list(col_name='tasks',
cond={'spider_id': spider_id},
sort_key='create_ts',
limit=n)
count = 0
for task in tasks:
if task['status'] == TaskStatus.FAILURE:
count += 1
return count
def get_last_n_day_tasks_count(spider_id: ObjectId, n: int) -> list:
return db_manager.count(col_name='tasks',
cond={
'spider_id': spider_id,
'create_ts': {
'$gte': (datetime.now() - timedelta(n))
}
})
def get_list_page_data(spider, sel):
data = []
if spider['item_selector_type'] == QueryType.XPATH:
items = sel.xpath(spider['item_selector'])
else:
items = sel.cssselect(spider['item_selector'])
for item in items:
row = {}
for f in spider['fields']:
if f['type'] == QueryType.CSS:
# css selector
res = item.cssselect(f['query'])
else:
# xpath
res = item.xpath(f['query'])
if len(res) > 0:
if f['extract_type'] == ExtractType.TEXT:
row[f['name']] = res[0].text
else:
row[f['name']] = res[0].get(f['attribute'])
data.append(row)
return data
def get_detail_page_data(url, spider, idx, data):
r = requests.get(url)
sel = etree.HTML(r.content)
row = {}
for f in spider['detail_fields']:
if f['type'] == QueryType.CSS:
# css selector
res = sel.cssselect(f['query'])
else:
# xpath
res = sel.xpath(f['query'])
if len(res) > 0:
if f['extract_type'] == ExtractType.TEXT:
row[f['name']] = res[0].text
else:
row[f['name']] = res[0].get(f['attribute'])
# assign values
for k, v in row.items():
data[idx][k] = v
def generate_urls(base_url: str) -> str:
url = base_url
# number range list
list_arr = []
for i, res in enumerate(re.findall(r'{(\d+),(\d+)}', base_url)):
try:
_min = int(res[0])
_max = int(res[1])
except ValueError as err:
raise ValueError(f'{base_url} is not a valid URL pattern')
# list
_list = range(_min, _max + 1)
# key
_key = f'n{i}'
# append list and key
list_arr.append((_list, _key))
# replace url placeholder with key
url = url.replace('{' + res[0] + ',' + res[1] + '}', '{' + _key + '}', 1)
# string list
for i, res in enumerate(re.findall(r'\[([\w\-,]+)\]', base_url)):
# list
_list = res.split(',')
# key
_key = f's{i}'
# append list and key
list_arr.append((_list, _key))
# replace url placeholder with key
url = url.replace('[' + ','.join(_list) + ']', '{' + _key + '}', 1)
# combine together
_list_arr = []
for res in itertools.product(*map(lambda x: x[0], list_arr)):
_url = url
for _arr, _rep in zip(list_arr, res):
_list, _key = _arr
_url = _url.replace('{' + _key + '}', str(_rep), 1)
yield _url
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudroast.meniscus.fixtures import TenantFixture
class TestTenant(TenantFixture):
def test_create_tenant(self):
tenant_id, resp = self.tenant_behaviors.create_tenant()
self.assertEqual(resp.status_code, 201,
'Wrong status code. The tenant was probably not '
'created')
def test_get_tenant(self):
tenant_id, resp = self.tenant_behaviors.create_tenant()
resp = self.tenant_client.get_tenant(tenant_id)
self.assertEqual(resp.status_code, 200)
self.assertIsNotNone(resp.entity)
self.assertEqual(resp.entity[0].tenant_id, tenant_id)
|
# from spaceone.tester.scenario import Scenario
import random
from spaceone.core.utils import random_string
from spaceone.tester.scenario.runner.runner import ServiceRunner, print_json
__all__ = ['RoleRunner']
class RoleRunner(ServiceRunner):
def __init__(self, clients, update_mode=False):
self.set_clients(clients)
self.update_mode = update_mode
self.role_name2id = {}
def create_or_update_role(self, scenario_roles, domain):
if domain is None:
raise Exception(f'Cannot create roles. (domain={domain}')
for scenario_role in scenario_roles:
if isinstance(scenario_role, dict):
role_data = _prepare_role_data(scenario_role)
role = None
if self.update_mode:
role = self._get_role(role_data, domain.domain_id)
if role:
role = self._update_role(role_data, domain.domain_id)
if role is None:
role_id = self._create_role(role_data, domain.domain_id)
self.role_name2id[role_data['name']] = role_id
return self.role_name2id
def _get_role(self, role_data, domain_id):
if 'role_id' not in role_data:
return None
role = None
print("########### GET Role ###############")
try:
role = self.identity.Role.get(
{'role_id': role_data['role_id'], 'domain_id': domain_id},
metadata=self.get_meta()
)
except:
print("########### NOT FOUND - role ###############")
if role:
print("########### Role FOUND ###############")
return role
return None
def _create_role(self, role_data, domain_id):
print("########### Create Role ###############")
role_data.update({'domain_id': domain_id})
#print(f"meta: {self.get_meta()}")
print(f"role_data: {role_data}")
role = self.identity.Role.create(
role_data,
metadata=self.get_meta()
)
self.append_terminator(self.identity.Role.delete,
{'domain_id': domain_id,
'role_id': role.role_id})
print_json(role)
return role.role_id
def _update_role(self, role_data, domain_id):
role = None
try:
# update_role
role_data.update({'domain_id': domain_id})
role = self.identity.Role.update(
role_data,
metadata=self.get_meta()
)
print("########### Update Role - update-mode ###############")
print_json(role)
except Exception as e:
print(f'Cannot update role. (role={role}')
return role
def _prepare_role_data(scenario_role):
default_role = {}
# Overwrite param, if needed
default_role.update(scenario_role)
return default_role
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import docutils.core
import testtools
from testtools import matchers
PROPOSED_CHANGE_SECTION = 'Proposed Change'
IMPLEMENTATION_SECTION = 'Implementation'
TITLES = {
'Problem Description': [],
'Proposed Change': [
'Alternatives',
'Security Impact',
'Notifications Impact',
'Other End User Impact',
'Performance Impact',
'Other Deployer Impact',
'Developer Impact'
],
'Implementation': [
'Assignee(s)',
'Work Items'
],
'Dependencies': [],
'Documentation Impact': [],
'References': []
}
class TestTitles(testtools.TestCase):
def _get_title(self, section_tree):
section = {
'subtitles': [],
}
for node in section_tree:
if node.tagname == 'title':
section['name'] = node.rawsource
elif node.tagname == 'section':
# Note subsection subtitles are thrown away
subsection = self._get_title(node)
section['subtitles'].append(subsection['name'])
return section
def _get_titles(self, spec):
titles = {}
for node in spec:
if node.tagname == 'section':
section = self._get_title(node)
titles[section['name']] = section['subtitles']
return titles
def _check_titles(self, titles):
for section in TITLES:
self.assertIn(section, titles)
for subsection in TITLES[PROPOSED_CHANGE_SECTION]:
self.assertIn(subsection, titles[PROPOSED_CHANGE_SECTION])
for subsection in TITLES[IMPLEMENTATION_SECTION]:
self.assertIn(subsection, titles[IMPLEMENTATION_SECTION])
def test_template(self):
files = ['specs/template.rst'] + glob.glob('specs/*/*')
for filename in files:
if not os.path.exists(os.path.dirname(filename)):
self.assertThat(filename, matchers.EndsWith('.rst'),
'spec\'s file must use the "rst" extension.')
with open(filename) as f:
data = f.read()
spec = docutils.core.publish_doctree(data)
titles = self._get_titles(spec)
self._check_titles(titles)
|
"""
--- Aircraft Design ---
sizing plot for civil-jet (Far 25)
*FAR = Federal Aviation Regulation
Ref. Kenichi Rinoie, "Aircraft
Design method - conceptual design
from single pulloperant to SST - "
"""
import math
import time
import matplotlib.pyplot as plt
class LapseRate:
def __init__(self):
# Lapse rate
self.lp = 0.0
# Coefficient
self.k = [0.0, 0.0, 0.0, 0.0]
# Bypass ratio
self.bpr = 12.0
# Maximum time steps (for mach axis)
self.max_t = 5
# Density ratio of air
self.sigma = 1.0
# Draw the graph
self.x = []
self.y = []
def calc_lp(self):
"""
Calculate the Lapse rate
"""
# Set the coefficients
# 1. bpr <= 1.0
def set_coeffs_1(mach):
if mach <= 0.4:
self.k[0] = 1.0
self.k[1] = 0.0
self.k[2] = -0.2
self.k[3] = 0.07
elif mach >= 0.4 and mach <= 0.9:
self.k[0] = -0.856
self.k[1] = 0.062
self.k[2] = 0.16
self.k[3] = -0.23
elif mach >= 0.9 and mach <= 2.2:
self.k[0] = 1.0
self.k[1] = -0.145
self.k[2] = -0.5
self.k[3] = -0.05
else:
print("Error: Mach number is out of range")
exit()
# 2. bpr >= 3.0 and bpr <= 6.0
def set_coeffs_2(mach):
if mach <= 0.4:
self.k[0] = 1.0
self.k[1] = 0.0
self.k[2] = -0.6
self.k[3] = -0.04
elif mach >= 0.4 and mach <= 0.9:
self.k[0] = 0.88
self.k[1] = -0.016
self.k[2] = -0.3
self.k[3] = 0.0
# 3. bpr >= 8.0
def set_coeffs_3(mach):
if mach <= 0.4:
self.k[0] = 1.0
self.k[1] = 0.0
self.k[2] = -0.595
self.k[3] = -0.03
elif mach >= 0.4 and mach <= 0.9:
self.k[0] = -0.89
self.k[1] = -0.014
self.k[2] = -0.3
self.k[3] = 0.005
s = 0.7
for i in range(self.max_t):
if i >= 1:
mach = 0.1 * i
else:
mach = 0.0
if mach > 0.9:
mach -= 0.9
if self.bpr <= 1.0:
set_coeffs_1(mach)
s = 0.8
elif self.bpr >= 3.0 and self.bpr <= 6.0:
set_coeffs_2(mach)
elif self.bpr >= 8:
set_coeffs_3(mach)
else:
print("Error: Bypass ratio is out of range")
exit()
self.lp = (self.k[0] + self.k[1] * self.bpr + (self.k[2] +
self.k[3] * self.bpr) * mach) * (self.sigma ** s)
# print(mach)
self.x.append(mach)
self.y.append(self.lp)
# Output the graph
def output(self):
# set labels
plt.xlabel("Mach number")
plt.ylabel("Lapse rate")
plt.title("Performance curve of Engine", c="darkred", size="large", style="italic")
# plot 1. take-off
plt.plot(self.x, self.y, label="Sea level")
plt.legend(loc="upper left", frameon=True)
plt.show()
if __name__ == "__main__":
lp = LapseRate()
lp.calc_lp()
lp.output()
|
"""
Component to offer a way to select a date and / or a time.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/input_datetime/
"""
import logging
import datetime
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, CONF_ICON, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'input_datetime'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_HAS_DATE = 'has_date'
CONF_HAS_TIME = 'has_time'
CONF_INITIAL = 'initial'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
SERVICE_SET_DATETIME = 'set_datetime'
SERVICE_SET_DATETIME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_DATE): cv.date,
vol.Optional(ATTR_TIME): cv.time,
})
def has_date_or_time(conf):
"""Check at least date or time is true."""
if conf[CONF_HAS_DATE] or conf[CONF_HAS_TIME]:
return conf
raise vol.Invalid('Entity needs at least a date or a time')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: vol.All({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_HAS_DATE, default=False): cv.boolean,
vol.Optional(CONF_HAS_TIME, default=False): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_INITIAL): cv.string,
}, has_date_or_time)})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up an input datetime."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME)
has_time = cfg.get(CONF_HAS_TIME)
has_date = cfg.get(CONF_HAS_DATE)
icon = cfg.get(CONF_ICON)
initial = cfg.get(CONF_INITIAL)
entities.append(InputDatetime(object_id, name,
has_date, has_time, icon, initial))
if not entities:
return False
async def async_set_datetime_service(entity, call):
"""Handle a call to the input datetime 'set datetime' service."""
time = call.data.get(ATTR_TIME)
date = call.data.get(ATTR_DATE)
if (entity.has_date and not date) or (entity.has_time and not time):
_LOGGER.error("Invalid service data for %s "
"input_datetime.set_datetime: %s",
entity.entity_id, str(call.data))
return
entity.async_set_datetime(date, time)
component.async_register_entity_service(
SERVICE_SET_DATETIME, SERVICE_SET_DATETIME_SCHEMA,
async_set_datetime_service
)
await component.async_add_entities(entities)
return True
class InputDatetime(RestoreEntity):
"""Representation of a datetime input."""
def __init__(self, object_id, name, has_date, has_time, icon, initial):
"""Initialize a select input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self.has_date = has_date
self.has_time = has_time
self._icon = icon
self._initial = initial
self._current_datetime = None
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
restore_val = None
# Priority 1: Initial State
if self._initial is not None:
restore_val = self._initial
# Priority 2: Old state
if restore_val is None:
old_state = await self.async_get_last_state()
if old_state is not None:
restore_val = old_state.state
if restore_val is not None:
if not self.has_date:
self._current_datetime = dt_util.parse_time(restore_val)
elif not self.has_time:
self._current_datetime = dt_util.parse_date(restore_val)
else:
self._current_datetime = dt_util.parse_datetime(restore_val)
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_datetime
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {
'has_date': self.has_date,
'has_time': self.has_time,
}
if self._current_datetime is None:
return attrs
if self.has_date and self._current_datetime is not None:
attrs['year'] = self._current_datetime.year
attrs['month'] = self._current_datetime.month
attrs['day'] = self._current_datetime.day
if self.has_time and self._current_datetime is not None:
attrs['hour'] = self._current_datetime.hour
attrs['minute'] = self._current_datetime.minute
attrs['second'] = self._current_datetime.second
if not self.has_date:
attrs['timestamp'] = self._current_datetime.hour * 3600 + \
self._current_datetime.minute * 60 + \
self._current_datetime.second
elif not self.has_time:
extended = datetime.datetime.combine(self._current_datetime,
datetime.time(0, 0))
attrs['timestamp'] = extended.timestamp()
else:
attrs['timestamp'] = self._current_datetime.timestamp()
return attrs
def async_set_datetime(self, date_val, time_val):
"""Set a new date / time."""
if self.has_date and self.has_time and date_val and time_val:
self._current_datetime = datetime.datetime.combine(date_val,
time_val)
elif self.has_date and not self.has_time and date_val:
self._current_datetime = date_val
if self.has_time and not self.has_date and time_val:
self._current_datetime = time_val
self.async_schedule_update_ha_state()
|
# Rest framework imports
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
# Serializers
from users_manage_api.serializers import UserLoginSerializer, UserProfileModelSerializer, CreateUserSerializer
# Internal apps imports
from users_manage_api.models import UserProfile
from students_site_api import models as student_models
class CreateUser(APIView):
queryset = UserProfile.objects.all()
#Retrieve all the users
serializer_class = CreateUserSerializer
#Default serializer for the view
def post(self, request):
serializer = CreateUserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
student_progres = student_models.StudentProgress.objects.create(
name=serializer.data['email']
)
#Create a pogress for the student
student_progres.save()
return Response("Invalid Input")
data = {
'user': serializer.data,
}
return Response(data, status=status.HTTP_201_CREATED)
class UserAPIView(APIView):
#Only active users, inspect in model for more information about the fields
queryset = UserProfile.objects.filter(is_active=True, is_student=True)
##############Why is this the properly reference?
serializer_class = UserProfileModelSerializer
def get(self, request):
#Only shows active users and students
user = UserProfile.objects.filter(is_active=True, is_student=True)
serializer = UserProfileModelSerializer(user, many=True)
return Response(serializer.data)
def post(self, request):
"""User sign in."""
serializer = UserLoginSerializer(data=request.data)
if serializer.is_valid():
#Return an object instance, based on the validated data and assigns this to user and token
user, token = serializer.save()
else:
return Response("Invalid input")
data = {
'user': UserProfileModelSerializer(user).data,
'access_token': token,
'api_message': 'login completed successfully'
}
return Response(data, status=status.HTTP_201_CREATED)
|
import argparse
# We can change the default message by `usage`
parser = argparse.ArgumentParser(prog='myprogram',
usage='%(prog)s [options]')
parser.add_argument('--foo',
nargs='?',
help='foo help')
parser.add_argument('bar',
nargs='+',
help='bar help')
# args = parser.parse_args()
print(parser.print_help())
|
# -*- coding: utf-8 -*-
"""
lantz.simulators.fungen
~~~~~~~~~~~~~~~~~~~~~~~
A simulated function generator.
See specification in the Lantz documentation.
:copyright: 2015 by The Lantz Authors
:license: BSD, see LICENSE for more details.
"""
import time
import logging
import math
from . import SIMULATORS
from .instrument import SimError, InstrumentHandler, main_tcp, main_serial, main_generic
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s',
datefmt='%Y-%d-%m %H:%M:%S')
class SimFunctionGenerator(InstrumentHandler):
def __init__(self):
super().__init__()
self._amp = 0.0
self.fre = 1000.0
self.off = 0.0
self._wvf = 0
self.out = 0
self.dou = {ch:0 for ch in range(1, 9)}
self.din = {ch:0 for ch in range(1, 9)}
self.din_key_convert = int
self.dou_key_convert = int
self.start_time = time.time() #This is for the 'experiment' example
@property
def idn(self):
return 'FunctionGenerator Serial #12345'
@property
def wvf(self):
return self._wvf
@wvf.setter
def wvf(self, value):
if value < 0 or value > 3:
raise SimError
self._wvf = value
@property
def amp(self):
return self._amp
@amp.setter
def amp(self, value):
if value > 10 or value < 0:
raise SimError
self._amp = value
def cal(self):
logging.info('Calibrating ...')
time.sleep(.1)
def tes(self, level, repetitions):
level = int(level)
repetitions = int(repetitions)
for rep in range(repetitions):
logging.info('Testing level %s. (%s/%s)', level, rep + 1, repetitions)
def generator_output(self):
"""This function generates the output, used in the 'experiment' example
"""
if self.out == 1:
dt = time.time() - self.start_time
value = self._amp * math.sin(2 * math.pi * self.fre * dt) + self.off
else:
value = 0
return value
def main(args=None):
return main_generic(args, SimFunctionGenerator)
SIMULATORS['fungen'] = main
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import call_tracking_display_location as gage_call_tracking_display_location
from google.ads.googleads.v8.enums.types import call_type
from google.ads.googleads.v8.enums.types import google_voice_call_status
__protobuf__ = proto.module(
package='google.ads.googleads.v8.resources',
marshal='google.ads.googleads.v8',
manifest={
'CallView',
},
)
class CallView(proto.Message):
r"""A call view that includes data for call tracking of call-only
ads or call extensions.
Attributes:
resource_name (str):
Output only. The resource name of the call view. Call view
resource names have the form:
``customers/{customer_id}/callViews/{call_detail_id}``
caller_country_code (str):
Output only. Country code of the caller.
caller_area_code (str):
Output only. Area code of the caller. Null if
the call duration is shorter than 15 seconds.
call_duration_seconds (int):
Output only. The advertiser-provided call
duration in seconds.
start_call_date_time (str):
Output only. The advertiser-provided call
start date time.
end_call_date_time (str):
Output only. The advertiser-provided call end
date time.
call_tracking_display_location (google.ads.googleads.v8.enums.types.CallTrackingDisplayLocationEnum.CallTrackingDisplayLocation):
Output only. The call tracking display
location.
type_ (google.ads.googleads.v8.enums.types.CallTypeEnum.CallType):
Output only. The type of the call.
call_status (google.ads.googleads.v8.enums.types.GoogleVoiceCallStatusEnum.GoogleVoiceCallStatus):
Output only. The status of the call.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
caller_country_code = proto.Field(
proto.STRING,
number=2,
)
caller_area_code = proto.Field(
proto.STRING,
number=3,
)
call_duration_seconds = proto.Field(
proto.INT64,
number=4,
)
start_call_date_time = proto.Field(
proto.STRING,
number=5,
)
end_call_date_time = proto.Field(
proto.STRING,
number=6,
)
call_tracking_display_location = proto.Field(
proto.ENUM,
number=7,
enum=gage_call_tracking_display_location.CallTrackingDisplayLocationEnum.CallTrackingDisplayLocation,
)
type_ = proto.Field(
proto.ENUM,
number=8,
enum=call_type.CallTypeEnum.CallType,
)
call_status = proto.Field(
proto.ENUM,
number=9,
enum=google_voice_call_status.GoogleVoiceCallStatusEnum.GoogleVoiceCallStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_s3
from moto import mock_dynamodb2
from handler import call
BUCKET = "some-bucket"
KEY = "incoming/transaction-0001.txt"
BODY = "Hello World!"
TXNS_TABLE = "my-transactions-table"
## Test Setup Functions
from contextlib import contextmanager
@contextmanager
def do_test_setup():
with mock_s3():
with mock_dynamodb2():
set_up_s3()
set_up_dynamodb()
yield
def set_up_s3():
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket=BUCKET)
boto3.client('s3', region_name='us-east-1').put_object(Bucket=BUCKET, Key=KEY, Body=BODY)
def set_up_dynamodb():
client = boto3.client('dynamodb', region_name='us-east-1')
client.create_table(
AttributeDefinitions=[
{
'AttributeName': 'transaction_id',
'AttributeType': 'N'
},
],
KeySchema=[
{
'AttributeName': 'transaction_id',
'KeyType': 'HASH'
}
],
TableName=TXNS_TABLE,
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
## Tests
def test_handler_moves_incoming_object_to_processed():
with do_test_setup():
# Run call with an event describing the file:
call(s3_object_created_event(BUCKET, KEY), None)
conn = boto3.resource('s3', region_name='us-east-1')
assert_object_doesnt_exist(conn, BUCKET, KEY)
# Check that it exists in `processed/`
obj = conn.Object(BUCKET, "processed/transaction-0001.txt").get()
assert obj['Body'].read() == b'Hello World!'
def test_handler_adds_record_in_dynamo_db_about_object():
with do_test_setup():
call(s3_object_created_event(BUCKET, KEY), None)
table = boto3.resource('dynamodb', region_name='us-east-1').Table(TXNS_TABLE)
item = table.get_item(Key={'transaction_id': '0001'})['Item']
assert item['body'] == 'Hello World!'
## Helpers
def assert_object_doesnt_exist(conn, bucket_name, key):
with pytest.raises(ClientError) as e_info:
conn.Object(bucket_name, key).get()
assert e_info.response['Error']['Code'] == 'NoSuchKey'
def s3_object_created_event(bucket_name, key):
return {
"Records": [
{
"eventVersion": "2.0",
"eventTime": "1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901",
"key": key,
"size": 1024
},
"bucket": {
"arn": "bucketarn",
"name": bucket_name,
"ownerIdentity": {
"principalId": "EXAMPLE"
}
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'WEB编程的练习'
__author__ = 'Jacklee'
"""
使用flask框架
处理3个URL
1. GET / 首页 返回HOME
2. GET /signin 登录页,显示登录表单
3. POST /signin 处理登录表单,显示登录结果
对于不同的路由flask使用装饰器进行关联
案例中使用jinja2模板进行页面的渲染
需要安装jinja2
pip3 install jinja2
"""
from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/signin', methods=['GET'])
def signin_form():
return render_template('form.html')
@app.route('/signin', methods=['POST'])
def signin():
#需要从request对象读取表单内容
username = request.form['username']
if username == 'admin' and request.form['password'] == 'password':
return render_template('signin-ok.html', username=username)
return render_template('form.html', message='Bad username or password.', username=username)
if __name__ == '__main__':
app.run()
|
from poynt import API
class Store():
@classmethod
def get_store(cls, business_id, store_id):
"""
Gets a store by ID.
Arguments:
business_id (str): the business ID
store_id (str): the store ID
"""
api = API.shared_instance()
return api.request(
url='/businesses/%s/stores/%s' % (business_id, store_id),
method='GET'
)
|
from ethereum.utils import sha3 as keccak256, decode_hex
from asynceth.test.utils import words
async def test_f(jsonrpc, abiv2_contract):
method_id = keccak256("f((uint256,uint256[],(uint256,uint256)[]),(uint256,uint256),uint256)")[:4].hex()
data = words('80', '8', '9', 'a', '1', '60', 'c0', '2', '2', '3', '2', '4', '5', '6', '7').hex()
data = "0x{method_id}{data}".format(method_id=method_id, data=data)
await jsonrpc.eth_call(to_address=abiv2_contract.address,
data=data)
function_input = ((1, (2, 3), ((4, 5), (6, 7))), (8, 9), 10)
await abiv2_contract.f(*function_input)
async def test_g(jsonrpc, abiv2_contract):
rval = await abiv2_contract.g()
assert rval == [(1, (2, 3), ((4, 5), (6, 7))), (8, 9), 10]
async def test_array_output(jsonrpc, abiv2_contract):
method_id = keccak256("testArrayOutput()")[:4].hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data="0x" + method_id)
assert decode_hex(rval) == words('20', '2', '1', '2')
assert await abiv2_contract.testArrayOutput() == (1, 2)
async def test_multidimensional_array_output(jsonrpc, abiv2_contract):
method_id = keccak256("testMultidimensionalArrayOutput()")[:4].hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data="0x" + method_id)
assert decode_hex(rval) == words('20', '2', '40', 'a0', '2', '1', '2', '2', '3', '4')
assert await abiv2_contract.testMultidimensionalArrayOutput() == ((1, 2), (3, 4))
async def test_struct_with_multidimensional_array_output(jsonrpc, abiv2_contract):
method_id = keccak256("testStructWithMultidimensionalArrayOutput()")[:4].hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data="0x" + method_id)
assert decode_hex(rval) == words('20', '20', '2', '40', 'a0', '2', '1', '2', '2', '3', '4')
rval = await abiv2_contract.testStructWithMultidimensionalArrayOutput()
assert rval == (((1, 2), (3, 4)),)
async def test_struct_with_multidimensional_array_input(jsonrpc, abiv2_contract):
method_id = keccak256("testStructWithMultidimensionalArrayInput((uint256[][]))")[:4].hex()
data = words('20', '20', '2', '40', 'a0', '2', '1', '2', '2', '3', '4').hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data="0x" + method_id + data)
assert int(rval, 16) == 10
assert await abiv2_contract.testStructWithMultidimensionalArrayInput((((1, 2), (3, 4)),)) == 10
async def test_struct_array_input(jsonrpc, abiv2_contract):
method_id = keccak256("testStructArrayInput((uint256,uint256)[])")[:4].hex()
struct_array = [(1, 2), (3, 4)]
data = "0x{method_id}{data_offset:064x}{num_elements:064x}{elements}".format(
method_id=method_id, data_offset=32, num_elements=len(struct_array),
elements="".join(["{0:064x}{1:064x}".format(*struct) for struct in struct_array]))
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data=data)
assert int(rval[2:], 16) == 10
assert await abiv2_contract.testStructArrayInput(struct_array) == 10
async def test_struct_multidimensional_array_output(jsonrpc, abiv2_contract):
method_id = keccak256("testStructMultidimensionalArrayOutput()")[:4].hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data="0x" + method_id)
assert decode_hex(rval) == words('20', '40', 'e0', '2', '1', '2', '3', '4', '2', '5', '6', '7', '8')
rval = await abiv2_contract.testStructMultidimensionalArrayOutput()
assert rval == (((1, 2), (3, 4)), ((5, 6), (7, 8)))
async def test_multidimensional_array_input(jsonrpc, abiv2_contract):
method_id = keccak256("testMultidimensionalArrayInput(uint256[][])")[:4].hex()
data = "0x" + method_id + words('20', '2', '40', 'a0', '2', '1', '2', '2', '3', '4').hex()
rval = await jsonrpc.eth_call(to_address=abiv2_contract.address,
data=data)
assert int(rval, 16) == 10
assert await abiv2_contract.testMultidimensionalArrayInput(((1, 2), (3, 4))) == 10
|
"""The Rituals Perfume Genie integration."""
import asyncio
import logging
from aiohttp.client_exceptions import ClientConnectorError
from pyrituals import Account
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import ACCOUNT_HASH, DOMAIN
_LOGGER = logging.getLogger(__name__)
EMPTY_CREDENTIALS = ""
PLATFORMS = ["switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Rituals Perfume Genie component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Rituals Perfume Genie from a config entry."""
session = async_get_clientsession(hass)
account = Account(EMPTY_CREDENTIALS, EMPTY_CREDENTIALS, session)
account.data = {ACCOUNT_HASH: entry.data.get(ACCOUNT_HASH)}
try:
await account.get_devices()
except ClientConnectorError as ex:
raise ConfigEntryNotReady from ex
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = account
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import re
print("Example 1 : ")
pattern = 'd'
sequence = 'abcdef'
x = re.match(pattern,sequence)
y = re.search(pattern,sequence)
print(y.group())
try:
print(x.group())
except:
print("Using the match function will now return any value. "+
"This is because it seaches from first, and if it is false prints false")
print("Example 2 : ")
pattern = 'a'
pattern1 = 'd'
sequence = 'abcdef'
x = re.match(pattern,sequence)
y = re.search(pattern1,sequence)
if x:
print("Match Found")
else:
print("Match not found")
if y:
print("Match Found")
else:
print("Match not found")
input("Press any key to exit ") |
"""test max methods"""
__revision__ = None
class Aaaa(object):
"""yo"""
def __init__(self):
pass
def meth1(self):
"""hehehe"""
raise NotImplementedError
def meth2(self):
"""hehehe"""
return 'Yo', self
class Bbbb(Aaaa):
"""yeah"""
def meth1(self):
"""hehehe bis"""
return "yeah", self
|
print ( True or False ) == True
print ( True or True ) == True
print ( False or False ) == False
print ( True and False ) == False
print ( True and True ) == True
print ( False and False ) == False
print ( not True ) == False
print ( not False ) == True
print ( not True or False ) == ( (not True) or False )
print ( not False or False ) == ( (not False) or False )
print ( not True and True ) == ( (not True) and True )
print ( not False and True ) == ( (not False) and True )
print ( not True and not False or False ) == ( ( (not True) and (not False) ) or False )
|
from fastapi import APIRouter
from app.core.config import settings
from .endpoints import login, users, wishlist
api_router = APIRouter(prefix=settings.api_v1_str)
api_router.include_router(login.router)
api_router.include_router(users.router)
api_router.include_router(wishlist.admin_router)
api_router.include_router(wishlist.router)
|
import time
from concurrent import futures
import grpc
from eu.softfire.tub.core import CoreManagers
from eu.softfire.tub.core.CoreManagers import list_resources
from eu.softfire.tub.entities.entities import ManagerEndpoint, ResourceMetadata
from eu.softfire.tub.entities.repositories import save, find, delete, find_by_element_value
from eu.softfire.tub.messaging.grpc import messages_pb2
from eu.softfire.tub.messaging.grpc import messages_pb2_grpc
from eu.softfire.tub.utils.utils import get_logger, get_config, get_mapping_managers
logger = get_logger('eu.softfire.tub.messaging')
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def receive_forever():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=int(get_config('system', 'server_threads', 5))))
messages_pb2_grpc.add_RegistrationServiceServicer_to_server(RegistrationAgent(), server)
binding = '[::]:%s' % get_config('messaging', 'bind_port', 50051)
logger.info("Binding rpc registration server to: %s" % binding)
server.add_insecure_port(binding)
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.debug("Stopping server")
server.stop(True)
def unregister_endpoint(manager_endpoint_name: str) -> bool:
deleted = False
for manager_endpoint in find(ManagerEndpoint):
if manager_endpoint.name == manager_endpoint_name:
for resource_type in get_mapping_managers().get(manager_endpoint.name):
for rm in [rm for rm in find(ResourceMetadata) if rm.node_type.lower() == resource_type.lower()]:
delete(rm)
delete(manager_endpoint)
deleted = True
return deleted
class RegistrationAgent(messages_pb2_grpc.RegistrationServiceServicer):
def update_status(self, request, context):
# logger.debug("Received request: %s" % request)
username = request.username
manager_name = request.manager_name
resources = request.resources
if username and manager_name and resources and len(resources):
CoreManagers.update_experiment(username, manager_name, resources)
response_message = messages_pb2.ResponseMessage()
response_message.result = 0
return response_message
def unregister(self, request, context):
logger.info("unregistering %s" % request.name)
deleted = unregister_endpoint(request.name)
if deleted:
return messages_pb2.ResponseMessage(result=0)
else:
return messages_pb2.ResponseMessage(result=1, error_message="manager endpoint not found")
def register(self, request, context):
logger.info("registering %s" % request.name)
old_managers = find_by_element_value(ManagerEndpoint, ManagerEndpoint.name, request.name)
for old_man in old_managers:
delete(old_man)
logger.debug("Removed old manager endpoint: %s:%s" % (old_man.name, old_man.endpoint))
manager_endpoint = ManagerEndpoint()
manager_endpoint.name = request.name
manager_endpoint.endpoint = request.endpoint
save(manager_endpoint, ManagerEndpoint)
list_resources()
response_message = messages_pb2.ResponseMessage()
response_message.result = 0
return response_message
def __init__(self):
self.stop = False
def stop(self):
self.stop = True
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, unicode_literals, absolute_import
import sys,os
import numpy as np
import pandas as pd
# get access to math tools from RAVEN
try:
from utils import mathUtils
except ImportError:
new = os.path.realpath(os.path.join(os.path.realpath(__file__),'..','..','..','..','framework'))
sys.path.append(new)
from utils import mathUtils
whoAmI = False # enable to show test dir and out files
debug = False # enable to increase printing
class UnorderedCSVDiffer:
"""
Used for comparing two CSV files without regard for column, row orders
"""
def __init__(self, test_dir, out_files,relative_error=1e-10,absolute_check=False,zeroThreshold=None):
"""
Create an UnorderedCSVDiffer class
Note naming conventions are out of our control due to MOOSE test harness standards.
@ In, test_dir, the directory where the test takes place
@ In, out_files, the files to be compared. They will be in test_dir + out_files
@ In, relative_error, float, optional, relative error
@ In, absolute_check, bool, optional, if True then check absolute values instead of values
@ Out, None.
"""
self.__out_files = out_files
self.__message = ""
self.__same = True
self.__test_dir = test_dir
self.__check_absolute_values = absolute_check
self.__rel_err = relative_error
self.__zero_threshold = float(zeroThreshold) if zeroThreshold is not None else 0.0
if debug or whoAmI:
print('test dir :',self.__test_dir)
print('out files:',self.__out_files)
if debug:
print('err :',self.__rel_err)
print('abs check:',self.__check_absolute_values)
print('zero thr :',self.__zero_threshold)
def finalizeMessage(self,same,msg,filename):
"""
Compiles useful messages to print, prepending with file paths.
@ In, same, bool, True if files are the same
@ In, msg, list(str), messages that explain differences
@ In, filename, str, test filename/path
@ Out, None
"""
if not same:
self.__same = False
self.__message += '\nDIFF in {}: \n {}'.format(filename,'\n '.join(msg))
def findRow(self,row,csv):
"""
Searches for "row" in "csv"
@ In, row, pd.Series, row of data
@ In, csv, pd.Dataframe, dataframe to look in
@ Out, match, pd.Dataframe or list, matching row of data (or empty list if none found)
"""
if debug:
print('')
print('Looking for:\n',row)
print('Looking in:\n',csv)
match = csv.copy()
# TODO can I do this as a single search, using binomial on floats +- rel_err?
for idx, val in row.iteritems():
if debug:
print(' checking index',idx,'value',val)
# Due to relative matches in floats, we may not be sorted with respect to this index.
## In an ideal world with perfect matches, we would be. Unfortunately, we have to sort again.
match = match.sort_values(idx)
# check type consistency
## get a sample from the matching CSV column
### TODO could check indices ONCE and re-use instead of checking each time
matchVal = match[idx].values.item(0) if match[idx].values.shape[0] != 0 else None
## find out if match[idx] and/or "val" are numbers
matchIsNumber = mathUtils.isAFloatOrInt(matchVal)
valIsNumber = mathUtils.isAFloatOrInt(val)
## if one is a number and the other is not, consider it a non-match.
if matchIsNumber != valIsNumber:
if debug:
print(' Not same type (number)! lfor: "{}" lin: "{}"'.format(valIsNumber,matchIsNumber))
return []
# find index of lowest and highest possible matches
## if values are floats, then matches could be as low as val(1-rel_err) and as high as val(1+rel_err)
if matchIsNumber:
# adjust for negative values
sign = np.sign(val)
lowest = np.searchsorted(match[idx].values,val*(1.0-sign*self.__rel_err))
highest = np.searchsorted(match[idx].values,val*(1.0+sign*self.__rel_err),side='right')-1
## if not floats, then check exact matches
else:
lowest = np.searchsorted(match[idx].values,val)
highest = np.searchsorted(match[idx].values,val,side='right')-1
if debug:
print(' low/hi match index:',lowest,highest)
## if lowest is past end of array, no match found
if lowest == len(match[idx]):
if debug:
print(' Match is past end of sort list!')
return []
## if entry at lowest index doesn't match entry, then it's not to be found
if not self.matches(match[idx].values[lowest],val,matchIsNumber,self.__rel_err):
if debug:
print(' Match is not equal to insert point!')
return []
## otherwise, we have some range of matches
match = match[slice(lowest,highest+1)]
if debug:
print(' After searching for {}={}, remaining matches:\n'.format(idx,val),match)
return match
def matches(self,a,b,isNumber,tol):
"""
Determines if two objects match within tolerance.
@ In, a, object, first object ("measured")
@ In, b, object, second object ("actual")
@ In, isNumber, bool, if True then treat as float with tolerance (else check equivalence)
@ In, tol, float, tolerance at which to hold match (if float)
@ Out, matches, bool, True if matching
"""
if not isNumber:
return a == b
if self.__check_absolute_values:
return abs(a-b) < tol
# otherwise, relative error
scale = abs(b) if b != 0 else 1.0
return abs(a-b) < scale*tol
def diff(self):
"""
Run the comparison.
@ In, None
@ Out, same, bool, if True then files are the same
@ Out, messages, str, messages to print on fail
"""
# read in files
for outFile in self.__out_files:
# local "same" and message list
same = True
msg = []
# load test file
testFilename = os.path.join(self.__test_dir,outFile)
try:
testCSV = pd.read_csv(testFilename,sep=',')
# if file is empty, we can check that's consistent, too
except pd.errors.EmptyDataError:
testCSV = None
# if file doesn't exist, that's another problem
except IOError:
msg.append('Test file does not exist!')
same = False
# load gold file
goldFilename = os.path.join(self.__test_dir, 'gold', outFile)
try:
goldCSV = pd.read_csv(goldFilename,sep=',')
# if file is empty, we can check that's consistent, too
except pd.errors.EmptyDataError:
goldCSV = None
# if file doesn't exist, that's another problem
except IOError:
msg.append('Gold file does not exist!')
same = False
# if either file did not exist, clean up and go to next outfile
if not same:
self.finalizeMessage(same,msg,testFilename)
continue
# at this point, we've loaded both files (even if they're empty), so compare them.
## first, cover the case when both files are empty.
if testCSV is None or goldCSV is None:
if not (testCSV is None and goldCSV is None):
same = False
if testCSV is None:
msg.append('Test file is empty, but Gold is not!')
else:
msg.append('Gold file is empty, but Test is not!')
# either way, move on to the next file, as no more comparison is needed
self.finalizeMessage(same,msg,testFilename)
continue
## at this point, both files have data loaded
## check columns using symmetric difference
diffColumns = set(goldCSV.columns)^set(testCSV.columns)
if len(diffColumns) > 0:
same = False
msg.append('Columns are not the same! Different: {}'.format(', '.join(diffColumns)))
self.finalizeMessage(same,msg,testFilename)
continue
## check index length
if len(goldCSV.index) != len(testCSV.index):
same = False
msg.append('Different number of entires in Gold ({}) versus Test ({})!'.format(len(goldCSV.index),len(testCSV.index)))
self.finalizeMessage(same,msg,testFilename)
continue
## at this point both CSVs have the same shape, with the same header contents.
## align columns
testCSV = testCSV[goldCSV.columns.tolist()]
## set marginal values to zero, fix infinites
testCSV = self.prepDataframe(testCSV,self.__zero_threshold)
goldCSV = self.prepDataframe(goldCSV,self.__zero_threshold)
## check for matching rows
for idx in goldCSV.index:
find = goldCSV.iloc[idx].rename(None)
match = self.findRow(find,testCSV)
if len(match) == 0:
same = False
msg.append('Could not find match for row "{}" in Gold:\n{}'.format(idx+1,find)) #+1 because of header row
msg.append('The Test output csv is:')
msg.append(str(testCSV))
# stop looking once a mismatch is found
break
self.finalizeMessage(same,msg,testFilename)
return self.__same, self.__message
def prepDataframe(self,csv,tol):
"""
Does several prep actions:
- For any columns that contain numbers, drop near-zero numbers to zero
- replace infs and nans with symbolic values
@ In, csv, pd.DataFrame, contents to reduce
@ In, tol, float, tolerance sufficently near zero
@ Out, csv, converted dataframe
"""
# use absolute or relative?
key = {'atol':tol} if self.__check_absolute_values else {'rtol':tol}
# take care of infinites
csv = csv.replace(np.inf,-sys.maxint)
csv = csv.replace(np.nan,sys.maxint)
for col in csv.columns:
example = csv[col].values.item(0) if csv[col].values.shape[0] != 0 else None
# skip columns that aren't numbers TODO might skip float columns with "None" early on
if not mathUtils.isAFloatOrInt(example):
continue
# flatten near-zeros
csv[col].values[np.isclose(csv[col].values,0,**key)] = 0
# TODO would like to sort here, but due to relative errors it doesn't do enough good. Instead, sort in findRow.
return csv
|
import requests
response = requests.get('https://api.github.com')
# Проверка на код
if response.status_code == 200:
print("Success")
elif response.status_code == 404:
print('Not Found')
# Более общая проверка на код в промежутке от 200 до 400
if response:
print('Success')
else:
print('An error has occured')
|
"""Run a simple web server which responds to requests to monitor a directory
tree and identify files over a certain size threshold. The directory is
scanned to pick up the current situation and then monitored for new / removed
files. By default, the page auto-refreshes every 60 seconds and shows the
top 50 files ordered by size.
"""
from __future__ import with_statement
import os, sys
import cgi
import datetime
import operator
import socket
import threading
import time
import traceback
import Queue
import urllib
import urlparse
from wsgiref.simple_server import make_server
from wsgiref.util import shift_path_info
import win32timezone
import error_handler
from winsys import core, fs, misc
print("Logging to %s" % core.log_filepath)
def deltastamp(delta):
def pluralise(base, n):
if n > 1:
return "%d %ss" % (n, base)
else:
return "%d %s" % (n, base)
if delta > datetime.timedelta(0):
output_format = "%s ago"
else:
output_format = "in %s"
days = delta.days
if days != 0:
wks, days = divmod(days, 7)
if wks > 0:
if wks < 9:
output = pluralise("wk", wks)
else:
output = pluralse("mth", int(round(1.0 * wks / 4.125)))
else:
output = pluralise("day", days)
else:
mins, secs = divmod(delta.seconds, 60)
hrs, mins = divmod(mins, 60)
if hrs > 0:
output = pluralise("hr", hrs)
elif mins > 0:
output = pluralise("min", mins)
else:
output = pluralise("sec", secs)
return output_format % output
class x_stop_exception(Exception):
pass
def get_files(path, size_threshold_mb, results, stop_event):
"""Intended to run inside a thread: scan the contents of
a tree recursively, pushing every file which is at least
as big as the size threshold onto a results queue. Stop
if the stop_event is set.
"""
size_threshold = size_threshold_mb * 1024 * 1024
root = fs.dir(path)
top_level_folders = sorted(root.dirs(), key=operator.attrgetter("written_at"), reverse=True)
try:
for tlf in top_level_folders:
for f in tlf.flat(ignore_access_errors=True):
if stop_event.isSet():
print("stop event set")
raise x_stop_exception
try:
if f.size > size_threshold:
results.put(f)
except fs.exc.x_winsys:
continue
except x_stop_exception:
return
def watch_files(path, size_threshold_mb, results, stop_event):
"""Intended to run inside a thread: monitor a directory tree
for file changes. Convert the changed files to fs.File objects
and push then onto a results queue. Stop if the stop_event is set.
"""
size_threshold = size_threshold_mb * 1024 * 1024
BUFFER_SIZE = 8192
MAX_BUFFER_SIZE = 1024 * 1024
#
# The double loop is because the watcher process
# can fall over with an internal which is (I think)
# related to a small buffer size. If that happens,
# restart the process with a bigger buffer up to a
# maximum size.
#
buffer_size = BUFFER_SIZE
while True:
watcher = fs.watch(path, True, buffer_size=buffer_size)
while True:
if stop_event.isSet(): break
try:
action, old_file, new_file = watcher.next()
core.warn("Monitored: %s - %s => %s" % (action, old_file, new_file))
if old_file is not None:
if (not old_file) or (old_file and old_file.size > size_threshold):
results.put(old_file)
if new_file is not None and new_file != old_file:
if new_file and new_file.size > size_threshold:
results.put(new_file)
except fs.exc.x_winsys:
pass
except RuntimeError:
try:
watcher.stop()
except:
pass
buffer_size = min(2 * buffer_size, MAX_BUFFER_SIZE)
print("Tripped up on a RuntimeError. Trying with buffer of", buffer_size)
class Path(object):
"""Keep track of the files and changes under a particular
path tree. No attempt is made to optimise the cases where
one tree is contained within another.
When the Path is started, it kicks of two threads: one to
do a complete scan; the other to monitor changes. Both
write back to the same results queue which is the basis
for the set of files which will be sorted and presented
on the webpage.
For manageability, the files are pulled off the queue a
chunk at a time (by default 1000).
"""
def __init__(self, path, size_threshold_mb, n_files_at_a_time):
self._path = path
self._size_threshold_mb = size_threshold_mb
self._n_files_at_a_time = n_files_at_a_time
self._changes = Queue.Queue()
self._stop_event = threading.Event()
self._files = set()
self.file_getter = threading.Thread(
target=get_files,
args=(path, size_threshold_mb, self._changes, self._stop_event)
)
self.file_getter.setDaemon(1)
self.file_getter.start()
self.file_watcher = threading.Thread(
target=watch_files,
args=(path, size_threshold_mb, self._changes, self._stop_event)
)
self.file_watcher.setDaemon(1)
self.file_watcher.start()
def __str__(self):
return "<Path: %s (%d files above %d Mb)>" % (self._path, len(self._files), self._size_threshold_mb)
__repr__ = __str__
def updated(self):
"""Pull at most _n_files_at_a_time files from the queue. If the
file exists, add it to the set (which will, of course, ignore
duplicates). If it doesn't exist, remove it from the set, ignoring
the case where it isn't there to start with.
"""
for i in range(self._n_files_at_a_time):
try:
f = self._changes.get_nowait()
if f:
self._files.add(f)
else:
self._files.discard(f)
except Queue.Empty:
break
return self._files
def finish(self):
self._stop_event.set()
def status(self):
status = []
if self.file_getter.isAlive():
status.append("Scanning")
if self.file_watcher.isAlive():
status.append("Monitoring")
return " & ".join(status)
class App(object):
"""The controlling WSGI app. On each request, it looks up the
path handler which corresponds to the path form variable. It then
pulls any new entries and displays them according to the user's
parameters.
"""
PATH = ""
N_FILES_AT_A_TIME = 1000
SIZE_THRESHOLD_MB = 100
TOP_N_FILES = 50
REFRESH_SECS = 60
HIGHLIGHT_DAYS = 0
HIGHLIGHT_HRS = 12
HIGHLIGHT_MINS = 0
def __init__(self):
self._paths_lock = threading.Lock()
self.paths = {}
self._paths_accessed = {}
def doc(self, files, status, form):
path = form.get("path", self.PATH)
top_n_files = int(form.get("top_n_files", self.TOP_N_FILES) or 0)
size_threshold_mb = int(form.get("size_threshold_mb", self.SIZE_THRESHOLD_MB) or 0)
refresh_secs = int(form.get("refresh_secs", self.REFRESH_SECS) or 0)
highlight_days = int(form.get("highlight_days", self.HIGHLIGHT_DAYS) or 0)
highlight_hrs = int(form.get("highlight_hrs", self.HIGHLIGHT_HRS) or 0)
highlight_mins = int(form.get("highlight_mins", self.HIGHLIGHT_MINS) or 0)
highlight_delta = datetime.timedelta(days=highlight_days, hours=highlight_hrs, minutes=highlight_mins)
highlight_deltastamp = deltastamp(highlight_delta)
if files:
title = cgi.escape("Top %d files on %s over %dMb - %s" % (min(len(files), self.TOP_N_FILES), path, size_threshold_mb, status))
else:
title = cgi.escape("Top files on %s over %dMb - %s" % (path, size_threshold_mb, status))
doc = []
doc.append("<html><head><title>%s</title>" % title)
doc.append("""<style>
body {font-family : calibri, verdana, sans-serif;}
h1 {font-size : 120%;}
form#params {font-size : 120%;}
form#params input {font-family : calibri, verdana, sans-serif;}
form#params span.label {font-weight : bold;}
p.updated {margin-bottom : 1em; font-style : italic;}
table {width : 100%;}
thead tr {background-color : black; color : white; font-weight : bold;}
table tr.odd {background-color : #ddd;}
table tr.highlight td {background-color : #ffff80;}
table td {padding-right : 0.5em;}
table td.filename {width : 72%;}
</style>""")
doc.append("""<style media="print">
form#params {display : none;}
</style>""")
doc.append("</head><body>")
doc.append("""<form id="params" action="/" method="GET">
<span class="label">Scan</span> <input type="text" name="path" value="%(path)s" size="20" maxlength="20" />
<span class="label">for files over</span> <input type="text" name="size_threshold_mb" value="%(size_threshold_mb)s" size="5" maxlength="5" />Mb
<span class="label">showing the top</span> <input type="text" name="top_n_files" value="%(top_n_files)s" size="3" maxlength="3" /> files
<span class="label">refreshing every</span> <input type="text" name="refresh_secs" value="%(refresh_secs)s" size="3" maxlength="3" /> secs
<span class="label">highlighting the last </span> <input type="text" name="highlight_days" value="%(highlight_days)s" size="3" maxlength="3" /> days
</span> <input type="text" name="highlight_hrs" value="%(highlight_hrs)s" size="3" maxlength="3" /> hrs
</span> <input type="text" name="highlight_mins" value="%(highlight_mins)s" size="3" maxlength="3" /> mins
<input type="submit" value="Refresh" />
</form><hr>""" % locals())
now = win32timezone.utcnow()
if path:
doc.append("<h1>%s</h1>" % title)
latest_filename = "\\".join(files[-1].parts[1:]) if files else "(no file yet)"
doc.append(u'<p class="updated">Last updated %s</p>' % time.asctime())
doc.append(u'<table><thead><tr><td class="filename">Filename</td><td class="size">Size (Mb)</td><td class="updated">Updated</td></tr></thead>')
for i, f in enumerate(files[:top_n_files]):
try:
doc.append(
u'<tr class="%s %s"><td class="filename">%s</td><td class="size">%5.2f</td><td class="updated">%s</td>' % (
"odd" if i % 2 else "even",
"highlight" if ((now - max(f.written_at, f.created_at)) <= highlight_delta) else "",
f.relative_to(path).lstrip(fs.seps),
f.size / 1024.0 / 1024.0,
max(f.written_at, f.created_at)
)
)
except fs.exc.x_winsys:
pass
doc.append("</table>")
doc.append("</body></html>")
return doc
def handler(self, form):
path = form.get("path", self.PATH)
size_threshold_mb = int(form.get("size_threshold_mb", self.SIZE_THRESHOLD_MB) or 0)
refresh_secs = int(form.get("refresh_secs", self.REFRESH_SECS) or 0)
status = "Waiting"
if path and fs.Dir(path):
#
# Ignore any non-existent paths, including garbage.
# Create a new path handler if needed, or pull back
# and existing one, and return the latest list.
#
with self._paths_lock:
if path not in self.paths:
self.paths[path] = Path(path, size_threshold_mb, self.N_FILES_AT_A_TIME)
path_handler = self.paths[path]
if path_handler._size_threshold_mb != size_threshold_mb:
path_handler.finish()
path_handler = self.paths[path] = Path(path, size_threshold_mb, self.N_FILES_AT_A_TIME)
self._paths_accessed[path] = win32timezone.utcnow()
files = sorted(path_handler.updated(), key=operator.attrgetter("size"), reverse=True)
status = path_handler.status()
#
# If any path hasn't been queried for at least
# three minutes, close the thread down and delete
# its entry. If it is queried again, it will just
# be restarted as new.
#
for path, last_accessed in self._paths_accessed.iteritems():
if (win32timezone.utcnow() - last_accessed).seconds > 180:
path_handler = self.paths.get(path)
if path_handler:
path_handler.finish()
del self.paths[path]
del self._paths_accessed[path]
else:
files = []
return self.doc(files, status, form)
def __call__(self, environ, start_response):
"""Only attempt to handle the root URI. If a refresh interval
is requested (the default) then send a header which forces
the refresh.
"""
path = shift_path_info(environ).rstrip("/")
if path == "":
form = dict((k, v[0]) for (k, v) in cgi.parse_qs(list(environ['QUERY_STRING']).iteritems()) if v)
if form.get("path"):
form['path'] = form['path'].rstrip("\\") + "\\"
refresh_secs = int(form.get("refresh_secs", self.REFRESH_SECS) or 0)
headers = []
headers.append(("Content-Type", "text/html; charset=utf-8"))
if refresh_secs:
headers.append(("Refresh", "%s" % refresh_secs))
start_response("200 OK", headers)
return (d.encode("utf8") + "\n" for d in self.handler(form))
else:
start_response("404 Not Found", [("Content-Type", "text/plain")])
return []
def finish(self):
for path_handler in self.paths.itervalues():
path_handler.finish()
if __name__ == '__main__':
misc.set_console_title("Monitor Directory")
PORT = 8000
HOSTNAME = socket.getfqdn()
threading.Timer(
2.0,
lambda: os.startfile("http://%s:%s" % (HOSTNAME, PORT))
).start()
app = App()
try:
make_server('', PORT, app).serve_forever()
except KeyboardInterrupt:
print("Shutting down gracefully...")
finally:
app.finish()
|
from graphene.relay.node import Node
from graphene_django.types import DjangoObjectType
from cookbook.graphql.core.connection import CountableDjangoObjectType
from cookbook.ingredients.models import Category, Ingredient
from cookbook.ingredients.filtersets import IngredientFilterSet
# Graphene will automatically map the Category model's fields onto the CategoryNode.
# This is configured in the CategoryNode's Meta class (as you can see below)
class CategoryNode(CountableDjangoObjectType):
class Meta:
model = Category
interfaces = (Node,)
# filter_fields = ["name", "ingredients"]
# Allow for some more advanced filtering here
filter_fields = {
"name": ["exact", "icontains", "istartswith"],
"ingredients": ["exact"],
}
# class IngredientNode(CountableDjangoObjectType):
class IngredientNode(DjangoObjectType):
class Meta:
model = Ingredient
# Allow for some more advanced filtering here
interfaces = (Node,)
# filter_fields = {
# "name": ["exact", "icontains", "istartswith"],
# "notes": ["exact", "icontains"],
# "category": ["exact"],
# "category__name": ["exact"],
# }
# demo via a filterset_class
# filterset_class = IngredientFilterSet
|
from django.urls import path
from the_mechanic_backend.v0.service import views
urlpatterns = [
path('', views.ServiceList.as_view(), name='create-service'),
#
# path('<int:service_id>/', views.ServiceDetailsView.as_view(), name='update-service'),
path('general/', views.GeneralServiceView.as_view(), name='general-service'),
path('service_type/', views.ServiceTypeView.as_view(), name='sub-service'),
path('service_type/<int:service_type_id>/sub_service', views.SubServiceView.as_view(), name='sub-service'),
]
|
from django.conf.urls.defaults import url, patterns, include
registry_urlpatterns = patterns(
'varify.samples.views',
url(r'^$', 'registry', name='global-registry'),
url(r'^projects/(?P<pk>\d+)/$', 'project_registry',
name='project-registry'),
url(r'^batches/(?P<pk>\d+)/$', 'batch_registry', name='batch-registry'),
url(r'^samples/(?P<pk>\d+)/$', 'sample_registry', name='sample-registry'),
)
urlpatterns = patterns(
'varify.samples.views',
url(r'^registry/', include(registry_urlpatterns)),
url(r'^cohorts/$', 'cohort_form', name='cohorts'),
url(r'^cohorts/(?P<pk>\d+)/$', 'cohort_form', name='cohorts'),
url(r'^cohorts/(?P<pk>\d+)/delete/$', 'cohort_delete',
name='cohort-delete'),
)
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_true, start_nodes
class WalletImportExportTest (BitcoinTestFramework):
def setup_network(self, split=False):
num_nodes = 3
extra_args = [([
"-exportdir={}/export{}".format(self.options.tmpdir, i),
] + (["-walletrequirebackup"] if i == 0 else [])) for i in range(num_nodes)]
self.nodes = start_nodes(num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
sapling_address2 = self.nodes[2].z_getnewaddress('sapling')
privkey2 = self.nodes[2].z_exportkey(sapling_address2)
self.nodes[0].z_importkey(privkey2)
# test walletconfirmbackup
try:
self.nodes[0].getnewaddress()
except JSONRPCException as e:
errorString = e.error['message']
assert_equal("Error: Please acknowledge that you have backed up" in errorString, True)
try:
self.nodes[0].z_getnewaddress('sapling')
except JSONRPCException as e:
errorString = e.error['message']
assert_equal("Error: Please acknowledge that you have backed up" in errorString, True)
dump_path0 = self.nodes[0].z_exportwallet('walletdumpmnem')
(mnemonic, _, _, _) = parse_wallet_file(dump_path0)
self.nodes[0].walletconfirmbackup(mnemonic)
# Now that we've confirmed backup, we can generate addresses
sprout_address0 = self.nodes[0].z_getnewaddress('sprout')
sapling_address0 = self.nodes[0].z_getnewaddress('sapling')
# node 0 should have the keys
dump_path0 = self.nodes[0].z_exportwallet('walletdump')
(_, t_keys0, sprout_keys0, sapling_keys0) = parse_wallet_file(dump_path0)
sapling_line_lengths = [len(sapling_key0.split(' #')[0].split()) for sapling_key0 in sapling_keys0.splitlines()]
assert_equal(2, len(sapling_line_lengths), "Should have 2 sapling keys")
assert_true(2 in sapling_line_lengths, "Should have a key with 2 parameters")
assert_true(4 in sapling_line_lengths, "Should have a key with 4 parameters")
assert_true(sprout_address0 in sprout_keys0)
assert_true(sapling_address0 in sapling_keys0)
assert_true(sapling_address2 in sapling_keys0)
# node 1 should not have the keys
dump_path1 = self.nodes[1].z_exportwallet('walletdumpbefore')
(_, t_keys1, sprout_keys1, sapling_keys1) = parse_wallet_file(dump_path1)
assert_true(sprout_address0 not in sprout_keys1)
assert_true(sapling_address0 not in sapling_keys1)
# import wallet to node 1
self.nodes[1].z_importwallet(dump_path0)
# node 1 should now have the keys
dump_path1 = self.nodes[1].z_exportwallet('walletdumpafter')
(_, t_keys1, sprout_keys1, sapling_keys1) = parse_wallet_file(dump_path1)
assert_true(sprout_address0 in sprout_keys1)
assert_true(sapling_address0 in sapling_keys1)
assert_true(sapling_address2 in sapling_keys1)
# make sure we have preserved the metadata
for sapling_key0 in sapling_keys0.splitlines():
assert_true(sapling_key0 in sapling_keys1)
# Helper functions
def parse_wallet_file(dump_path):
file_lines = open(dump_path, "r", encoding="utf8").readlines()
# We expect information about the HDSeed and fingerpring in the header
assert_true("recovery_phrase" in file_lines[5], "Expected emergency recovery phrase")
assert_true("language" in file_lines[6], "Expected mnemonic seed language")
assert_true("fingerprint" in file_lines[7], "Expected mnemonic seed fingerprint")
mnemonic = file_lines[5].split("=")[1].replace("\"", "").strip()
(t_keys, i) = parse_wallet_file_lines(file_lines, 0)
(sprout_keys, i) = parse_wallet_file_lines(file_lines, i)
(sapling_keys, i) = parse_wallet_file_lines(file_lines, i)
return (mnemonic, t_keys, sprout_keys, sapling_keys)
def parse_wallet_file_lines(file_lines, i):
keys = []
# skip blank lines and comments
while i < len(file_lines) and (file_lines[i] == '\n' or file_lines[i].startswith("#")):
i += 1
# add keys until we hit another blank line or comment
while i < len(file_lines) and not (file_lines[i] == '\n' or file_lines[i].startswith("#")):
keys.append(file_lines[i])
i += 1
return ("".join(keys), i)
if __name__ == '__main__':
WalletImportExportTest().main()
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
"""
Config class
Contains path information for the face detection cascade and the Bowie image.
"""
CASCADE_PATH = os.environ.get('CASCADE_PATH') or os.path.join(basedir,'data','cascade.xml')
BOWIE_PATH = os.environ.get('BOWIE_PATH') or os.path.join(basedir,'data','bowie.png') |
import unittest
from handlers.declension_handler import DeclensionHandler
from casing_manager import get_words_casing, apply_words_cases, apply_cases
class DeclensionHandlerTest(unittest.TestCase):
def test_inflected_text(self):
text_handler = DeclensionHandler()
source_text = "Иванов Иван Иванович"
case = "gent"
result = text_handler.get_inflected_text(source_text, case)
expected_result = "Иванова Ивана Ивановича"
self.assertEqual(expected_result, result)
def test_complex_name(self):
text_handler = DeclensionHandler()
source_text = "Южно-Уральский торгово-промышленный"
case = "datv"
number = "plur"
result = text_handler.get_inflected_text(source_text, case=case, number=number)
expected_result = "Южно-Уральским торгово-промышленным"
self.assertEqual(expected_result, result)
def test_inflected_person_name_by_fullname(self):
text_handler = DeclensionHandler()
source_text = "Иванов Иван Иванович"
case = "datv"
result = text_handler.get_inflected_person_name(case, fullname=source_text)
expected_result = "Иванову Ивану Ивановичу"
self.assertEqual(expected_result, result)
def test_male_surname(self):
text_handler = DeclensionHandler()
source_text = "Хероведов Андрей Михайлович"
case = "datv"
result = text_handler.get_inflected_person_name(case, fullname=source_text)
expected_result = "Хероведову Андрею Михайловичу"
self.assertEqual(expected_result, result)
def test_female_case(self):
text_handler = DeclensionHandler()
fullname = "Шишь Алёна Алексеевна"
case = "datv"
result = text_handler.get_inflected_person_name(case, fullname=fullname)
expected_result = "Шишь Алёне Алексеевне"
self.assertEqual(expected_result, result)
fullname = "Сидорова Ольга Ларисовна"
result = text_handler.get_inflected_person_name(case, fullname=fullname)
expected_result = "Сидоровой Ольге Ларисовне"
self.assertEqual(expected_result, result)
def test_inflected_person_separated_name_parts(self):
text_handler = DeclensionHandler()
name = "Алёна"
surname = "Охременко"
case = "datv"
result = text_handler.get_inflected_person_name(case, name=name, surname=surname)
expected_result = "Охременко Алёне"
self.assertEqual(expected_result, result)
def test_empty_name(self):
text_handler = DeclensionHandler()
case = "datv"
result = text_handler.get_inflected_person_name(case)
expected_result = ""
self.assertEqual(expected_result, result)
class CaseTests(unittest.TestCase):
def test_get_words_casing(self):
text = "HelLo WoRlD"
result = get_words_casing(text.split())
expected_result = [[False, True, True, False, True],
[False, True, False, True, False]]
self.assertEqual(expected_result, result)
def test_apply_words_cases(self):
text = "HelLo WoRlD"
new_text = "hello, world!"
words_cases = get_words_casing(text.split())
expected_result = ["HelLo,", "WoRlD!"]
result = apply_words_cases(new_text.split(), words_cases)
self.assertEqual(expected_result, result)
def test_apply_cases(self):
source_text = "ООО Пельмень Иван"
target_text = "ооо пельменю ивану"
expected_result = "ООО Пельменю Ивану"
result = apply_cases(source_text, target_text)
self.assertEqual(expected_result, result)
|
for c in range(2, 51,2):
print(c, end=' ')
print('esses são os números pares') |
__author__ = 'MBlaauw'
#!/usr/bin/env python
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.utils import serial
from theano import tensor as T
from theano import function
import pickle
import numpy as np
import csv
def process(mdl, ds, batch_size=100):
# This batch size must be evenly divisible into number of total samples!
mdl.set_batch_size(batch_size)
X = mdl.get_input_space().make_batch_theano()
Y = mdl.fprop(X)
y = T.argmax(Y, axis=1)
f = function([X], y)
yhat = []
for i in xrange(ds.X.shape[0] / batch_size):
x_arg = ds.X[i * batch_size:(i + 1) * batch_size, :]
yhat.append(f(x_arg.astype(X.dtype)))
return np.array(yhat)
tst = pickle.load(open('saved_tst.pkl', 'rb'))
ds = DenseDesignMatrix(X=tst)
mdl = serial.load('saved_clf.pkl')
fname = 'results.csv'
test_size = ds.X.shape[0]
sets = 1
res = np.zeros((sets, test_size), dtype='float32')
for n, i in enumerate([test_size * x for x in range(sets)]):
yhat = process(mdl, ds)
res[n, :] = yhat.ravel()
converted_results = [['id', 'label']] + [[n + 1, int(x)]
for n, x in enumerate(res.ravel())]
with open(fname, 'w') as f:
csv_f = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONE)
csv_f.writerows(converted_results) |
# -*- coding: utf8 -*-
"""
.. module:: burpui.api.client
:platform: Unix
:synopsis: Burp-UI client api module.
.. moduleauthor:: Ziirish <[email protected]>
"""
import os
import re
from . import api, cache_key, force_refresh
from ..engines.server import BUIServer # noqa
from .custom import fields, Resource
from ..decorators import browser_cache
from ..ext.cache import cache
from ..exceptions import BUIserverException
from flask_restx.marshalling import marshal
from flask_restx import inputs
from flask import current_app, request
from flask_login import current_user
bui = current_app # type: BUIServer
ns = api.namespace("client", "Client methods")
node_fields = ns.model(
"ClientTree",
{
"date": fields.DateTime(
required=True,
dt_format="iso8601",
description="Human representation of the backup date",
),
"gid": fields.Integer(required=True, description="gid owner of the node"),
"inodes": fields.Integer(required=True, description="Inodes of the node"),
"mode": fields.String(
required=True, description='Human readable mode. Example: "drwxr-xr-x"'
),
"name": fields.String(required=True, description="Node name"),
"title": fields.SafeString(
required=True, description="Node name (alias)", attribute="name"
),
"fullname": fields.String(required=True, description="Full name of the Node"),
"key": fields.String(
required=True,
description="Full name of the Node (alias)",
attribute="fullname",
),
"parent": fields.String(required=True, description="Parent node name"),
"size": fields.String(
required=True, description='Human readable size. Example: "12.0KiB"'
),
"type": fields.String(required=True, description='Node type. Example: "d"'),
"uid": fields.Integer(required=True, description="uid owner of the node"),
"selected": fields.Boolean(
required=False, description="Is path selected", default=False
),
"lazy": fields.Boolean(
required=False,
description="Do the children have been loaded during this"
+ " request or not",
default=True,
),
"folder": fields.Boolean(required=True, description="Is it a folder"),
"expanded": fields.Boolean(
required=False, description="Should we expand the node", default=False
),
# Cannot use nested on own
"children": fields.Raw(required=False, description="List of children"),
},
)
@ns.route(
"/browse/<name>/<int:backup>",
"/<server>/browse/<name>/<int:backup>",
endpoint="client_tree",
)
@ns.doc(
params={
"server": "Which server to collect data from when in" + " multi-agent mode",
"name": "Client name",
"backup": "Backup number",
},
)
class ClientTree(Resource):
"""The :class:`burpui.api.client.ClientTree` resource allows you to
retrieve a list of files in a given backup.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
A mandatory ``GET`` parameter called ``root`` is used to know what path we
are working on.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
parser.add_argument(
"root",
help="Root path to expand. You may specify several of them",
action="append",
)
parser.add_argument(
"recursive",
type=inputs.boolean,
help="Returns the whole tree instead of just the sub-tree",
nullable=True,
required=False,
default=False,
)
parser.add_argument(
"selected",
type=inputs.boolean,
help="Make the returned path selected at load time. Only works"
+ " if 'recursive' is True",
nullable=True,
required=False,
default=False,
)
parser.add_argument(
"init",
type=inputs.boolean,
help="First call to load the root of the tree",
nullable=True,
required=False,
default=False,
)
@cache.cached(timeout=3600, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_list_with(node_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
@browser_cache(3600)
def get(self, server=None, name=None, backup=None):
"""Returns a list of 'nodes' under a given path
**GET** method provided by the webservice.
The *JSON* returned is:
::
[
{
"date": "2015-05-21 14:54:49",
"gid": "0",
"inodes": "173",
"selected": false,
"expanded": false,
"children": [],
"mode": "drwxr-xr-x",
"name": "/",
"key": "/",
"title": "/",
"fullname": "/",
"parent": "",
"size": "12.0KiB",
"type": "d",
"uid": "0"
}
]
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:param backup: The backup we are working on
:type backup: int
:returns: The *JSON* described above.
"""
args = self.parser.parse_args()
server = server or args["serverName"]
json = []
if not name or not backup:
return json
root_list = sorted(args["root"]) if args["root"] else []
root_loaded = False
paths_loaded = []
to_select_list = []
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "Sorry, you are not allowed to view this client")
from_cookie = None
if args["init"] and not root_list:
from_cookie = request.cookies.get("fancytree-1-expanded", "")
if from_cookie:
args["recursive"] = True
_root = bui.client.get_tree(name, backup, agent=server)
root_list = [x["name"] for x in _root]
for path in from_cookie.split("~"):
if not path.endswith("/"):
path += "/"
if path not in root_list:
root_list.append(path)
root_list = sorted(root_list)
try:
root_list_clean = []
for root in root_list:
if args["recursive"]:
path = ""
# fetch the root first if not already loaded
if not root_loaded:
part = bui.client.get_tree(name, backup, level=0, agent=server)
root_loaded = True
else:
part = []
root = root.rstrip("/")
to_select = root.rsplit("/", 1)
if not to_select[0]:
to_select[0] = "/"
if len(to_select) == 1:
# special case we want to select '/'
to_select = ("", "/")
if not root:
root = "/"
to_select_list.append(to_select)
root_list_clean.append(root)
paths = root.split("/")
for level, sub in enumerate(paths, start=1):
path = os.path.join(path, sub)
if not path:
path = "/"
if path in paths_loaded:
continue
temp = bui.client.get_tree(
name, backup, path, level, agent=server
)
paths_loaded.append(path)
part += temp
else:
part = bui.client.get_tree(name, backup, root, agent=server)
json += part
if args["selected"]:
for entry in json:
for parent, fold in to_select_list:
if entry["parent"] == parent and entry["name"] == fold:
entry["selected"] = True
break
if entry["parent"] in root_list_clean:
entry["selected"] = True
if not root_list:
json = bui.client.get_tree(name, backup, agent=server)
if args["selected"]:
for entry in json:
if not entry["parent"]:
entry["selected"] = True
if args["recursive"]:
tree = {}
rjson = []
roots = []
for entry in json:
# /!\ after marshalling, 'fullname' will be 'key'
tree[entry["fullname"]] = marshal(entry, node_fields)
for key, entry in tree.items():
parent = entry["parent"]
if not entry["children"]:
entry["children"] = None
if parent:
node = tree[parent]
if not node["children"]:
node["children"] = []
node["children"].append(entry)
if node["folder"]:
node["lazy"] = False
node["expanded"] = True
else:
roots.append(entry["key"])
for fullname in roots:
rjson.append(tree[fullname])
json = rjson
else:
for entry in json:
entry["children"] = None
if not entry["folder"]:
entry["lazy"] = False
except BUIserverException as e:
self.abort(500, str(e))
return json
@ns.route(
"/browseall/<name>/<int:backup>",
"/<server>/browseall/<name>/<int:backup>",
endpoint="client_tree_all",
)
@ns.doc(
params={
"server": "Which server to collect data from when in" + " multi-agent mode",
"name": "Client name",
"backup": "Backup number",
},
)
class ClientTreeAll(Resource):
"""The :class:`burpui.api.client.ClientTreeAll` resource allows you to
retrieve a list of all the files in a given backup.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
@cache.cached(timeout=3600, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_list_with(node_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"405": "Method not allowed",
"500": "Internal failure",
},
)
@browser_cache(3600)
def get(self, server=None, name=None, backup=None):
"""Returns a list of all 'nodes' of a given backup
**GET** method provided by the webservice.
The *JSON* returned is:
::
[
{
"date": "2015-05-21 14:54:49",
"gid": "0",
"inodes": "173",
"selected": false,
"expanded": false,
"children": [],
"mode": "drwxr-xr-x",
"name": "/",
"key": "/",
"title": "/",
"fullname": "/",
"parent": "",
"size": "12.0KiB",
"type": "d",
"uid": "0"
}
]
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:param backup: The backup we are working on
:type backup: int
:returns: The *JSON* described above.
"""
args = self.parser.parse_args()
server = server or args["serverName"]
if not bui.client.get_attr("batch_list_supported", False, server):
self.abort(405, "Sorry, the requested backend does not support this method")
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "Sorry, you are not allowed to view this client")
try:
json = self._get_tree_all(name, backup, server)
except BUIserverException as e:
self.abort(500, str(e))
return json
@staticmethod
def _get_tree_all(name, backup, server):
json = bui.client.get_tree(name, backup, "*", agent=server)
tree = {}
rjson = []
roots = []
def __expand_json(js):
res = {}
for entry in js:
# /!\ after marshalling, 'fullname' will be 'key'
res[entry["fullname"]] = marshal(entry, node_fields)
return res
tree = __expand_json(json)
# TODO: we can probably improve this at some point
redo = True
while redo:
redo = False
for key, entry in tree.items().copy():
parent = entry["parent"]
if not entry["children"]:
entry["children"] = None
if parent:
if parent not in tree:
parent2 = parent
last = False
while parent not in tree and not last:
if not parent2:
last = True
json = bui.client.get_tree(
name, backup, parent2, agent=server
)
if parent2 == "/":
parent2 = ""
else:
parent2 = os.path.dirname(parent2)
tree2 = __expand_json(json)
tree.update(tree2)
roots = []
redo = True
break
node = tree[parent]
if not node["children"]:
node["children"] = []
elif entry in node["children"]:
continue
node["children"].append(entry)
if node["folder"]:
node["lazy"] = False
node["expanded"] = False
else:
roots.append(entry["key"])
for fullname in roots:
rjson.append(tree[fullname])
return rjson
@ns.route(
"/report/<name>",
"/<server>/report/<name>",
"/report/<name>/<int:backup>",
"/<server>/report/<name>/<int:backup>",
endpoint="client_report",
)
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent" + " mode",
"name": "Client name",
"backup": "Backup number",
},
)
class ClientReport(Resource):
"""The :class:`burpui.api.client.ClientStats` resource allows you to
retrieve a report on a given backup for a given client.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
report_tpl_fields = ns.model(
"ClientReportTpl",
{
"changed": fields.Integer(
required=True, description="Number of changed files", default=0
),
"deleted": fields.Integer(
required=True, description="Number of deleted files", default=0
),
"new": fields.Integer(
required=True, description="Number of new files", default=0
),
"scanned": fields.Integer(
required=True, description="Number of scanned files", default=0
),
"total": fields.Integer(
required=True, description="Total number of files", default=0
),
"unchanged": fields.Integer(
required=True, description="Number of scanned files", default=0
),
},
)
report_fields = ns.model(
"ClientReport",
{
"dir": fields.Nested(report_tpl_fields, required=True),
"duration": fields.Integer(
required=True, description="Backup duration in seconds"
),
"efs": fields.Nested(report_tpl_fields, required=True),
"encrypted": fields.Boolean(
required=True, description="Is the backup encrypted"
),
"end": fields.DateTime(
dt_format="iso8601",
required=True,
description="Timestamp of the end date of the backup",
),
"files": fields.Nested(report_tpl_fields, required=True),
"files_enc": fields.Nested(report_tpl_fields, required=True),
"hardlink": fields.Nested(report_tpl_fields, required=True),
"meta": fields.Nested(report_tpl_fields, required=True),
"meta_enc": fields.Nested(report_tpl_fields, required=True),
"number": fields.Integer(required=True, description="Backup number"),
"received": fields.Integer(required=True, description="Bytes received"),
"softlink": fields.Nested(report_tpl_fields, required=True),
"special": fields.Nested(report_tpl_fields, required=True),
"start": fields.DateTime(
dt_format="iso8601",
required=True,
description="Timestamp of the beginning of the backup",
),
"totsize": fields.Integer(
required=True, description="Total size of the backup"
),
"vssfooter": fields.Nested(report_tpl_fields, required=True),
"vssfooter_enc": fields.Nested(report_tpl_fields, required=True),
"vssheader": fields.Nested(report_tpl_fields, required=True),
"vssheader_enc": fields.Nested(report_tpl_fields, required=True),
"windows": fields.Boolean(
required=True, description="Is the client a windows system"
),
},
)
@cache.cached(timeout=1800, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_with(report_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
@browser_cache(1800)
def get(self, server=None, name=None, backup=None):
"""Returns a global report of a given backup/client
**GET** method provided by the webservice.
The *JSON* returned is:
::
{
"dir": {
"changed": 0,
"deleted": 0,
"new": 394,
"scanned": 394,
"total": 394,
"unchanged": 0
},
"duration": 5,
"efs": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"encrypted": true,
"end": 1422189124,
"files": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"files_enc": {
"changed": 0,
"deleted": 0,
"new": 1421,
"scanned": 1421,
"total": 1421,
"unchanged": 0
},
"hardlink": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"meta": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"meta_enc": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"number": 1,
"received": 1679304,
"softlink": {
"changed": 0,
"deleted": 0,
"new": 1302,
"scanned": 1302,
"total": 1302,
"unchanged": 0
},
"special": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"start": 1422189119,
"total": {
"changed": 0,
"deleted": 0,
"new": 3117,
"scanned": 3117,
"total": 3117,
"unchanged": 0
},
"totsize": 5345361,
"vssfooter": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"vssfooter_enc": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"vssheader": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"vssheader_enc": {
"changed": 0,
"deleted": 0,
"new": 0,
"scanned": 0,
"total": 0,
"unchanged": 0
},
"windows": "false"
}
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:param backup: The backup we are working on
:type backup: int
:returns: The *JSON* described above.
"""
server = server or self.parser.parse_args()["serverName"]
json = []
if not name:
err = [[1, "No client defined"]]
self.abort(400, err)
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "You don't have rights to view this client report")
if backup:
try:
json = bui.client.get_backup_logs(backup, name, agent=server)
except BUIserverException as exp:
self.abort(500, str(exp))
else:
try:
json = bui.client.get_backup_logs(-1, name, agent=server)
except BUIserverException as exp:
self.abort(500, str(exp))
return json
@api.disabled_on_demo()
@ns.marshal_with(report_fields, code=202, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"400": "Missing arguments",
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
def delete(self, name, backup, server=None):
"""Deletes a given backup from the server
**DELETE** method provided by the webservice.
The access is filtered by the :mod:`burpui.misc.acl` module so that you
can only delete backups you have access to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:param backup: The backup we are working on
:type backup: int
"""
server = server or self.parser.parse_args()["serverName"]
if not name:
err = [[1, "No client defined"]]
self.abort(400, err)
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and (
not current_user.acl.is_moderator()
or current_user.acl.is_moderator()
and not current_user.acl.is_client_rw(name, server)
)
):
self.abort(403, "You don't have rights on this client")
msg = bui.client.delete_backup(name, backup, server)
if msg:
self.abort(500, msg)
bui.audit.logger.info(
f"requested the deletion of backup {backup} for {name}", server=server
)
return 202, ""
@ns.route("/stats/<name>", "/<server>/stats/<name>", endpoint="client_stats")
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent" + " mode",
"name": "Client name",
},
)
class ClientStats(Resource):
"""The :class:`burpui.api.client.ClientReport` resource allows you to
retrieve a list of backups for a given client.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
client_fields = ns.model(
"ClientStats",
{
"number": fields.Integer(required=True, description="Backup number"),
"received": fields.Integer(required=True, description="Bytes received"),
"size": fields.Integer(required=True, description="Total size"),
"encrypted": fields.Boolean(
required=True, description="Is the backup encrypted"
),
"deletable": fields.Boolean(
required=True, description="Is the backup deletable"
),
"date": fields.DateTime(
required=True,
dt_format="iso8601",
description="Human representation of the backup date",
),
},
)
@cache.cached(timeout=1800, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_list_with(client_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
@browser_cache(1800)
def get(self, server=None, name=None):
"""Returns a list of backups for a given client
**GET** method provided by the webservice.
The *JSON* returned is:
::
[
{
"date": "2015-01-25 13:32:00",
"deletable": true,
"encrypted": true,
"received": 123,
"size": 1234,
"number": 1
},
]
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:returns: The *JSON* described above.
"""
server = server or self.parser.parse_args()["serverName"]
try:
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "Sorry, you cannot access this client")
json = bui.client.get_client(name, agent=server)
except BUIserverException as exp:
self.abort(500, str(exp))
return json
@ns.route("/labels/<name>", "/<server>/labels/<name>", endpoint="client_labels")
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent" + " mode",
"name": "Client name",
},
)
class ClientLabels(Resource):
"""The :class:`burpui.api.client.ClientLabels` resource allows you to
retrieve the labels of a given client.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
parser.add_argument("clientName", help="Client name")
labels_fields = ns.model(
"ClientLabels",
{
"labels": fields.List(fields.String, description="List of labels"),
},
)
@cache.cached(timeout=1800, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_list_with(labels_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
@browser_cache(1800)
def get(self, server=None, name=None):
"""Returns the labels of a given client
**GET** method provided by the webservice.
The *JSON* returned is:
::
{
"labels": [
"label1",
"label2"
]
}
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:returns: The *JSON* described above.
"""
try:
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "Sorry, you cannot access this client")
labels = self._get_labels(name, server)
except BUIserverException as exp:
self.abort(500, str(exp))
return {"labels": labels}
@staticmethod
def _get_labels(client, server=None):
key = "labels-{}-{}".format(client, server)
ret = cache.cache.get(key)
if ret is not None:
return ret
labels = bui.client.get_client_labels(client, agent=server)
ret = []
ignore = re.compile("|".join(bui.ignore_labels)) if bui.ignore_labels else None
reformat = (
[(re.compile(regex), replace) for regex, replace in bui.format_labels]
if bui.format_labels
else []
)
for label in labels:
if bui.ignore_labels and ignore.search(label):
continue
tmp_label = label
for regex, replace in reformat:
tmp_label = regex.sub(replace, tmp_label)
ret.append(tmp_label)
cache.cache.set(key, ret, 1800)
return ret
@ns.route(
"/running",
"/running/<name>",
"/<server>/running",
"/<server>/running/<name>",
endpoint="client_running_status",
)
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent" + " mode",
"name": "Client name",
},
)
class ClientRunningStatus(Resource):
"""The :class:`burpui.api.client.ClientRunningStatus` resource allows you to
retrieve the running status of a given client.
This resource is part of the :mod:`burpui.api.client` module.
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
parser.add_argument("clientName", help="Client name")
running_fields = ns.model(
"ClientRunningStatus",
{
"state": fields.LocalizedString(required=True, description="Running state"),
"percent": fields.Integer(
required=False, description="Backup progress in percent", default=-1
),
"phase": fields.String(
required=False, description="Backup phase", default=None
),
"last": fields.DateTime(
required=False, dt_format="iso8601", description="Date of last backup"
),
},
)
@ns.marshal_list_with(running_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
"403": "Insufficient permissions",
"500": "Internal failure",
},
)
def get(self, server=None, name=None):
"""Returns the running status of a given client
**GET** method provided by the webservice.
The *JSON* returned is:
::
{
"state": "running",
"percent": 42,
"phase": "2",
"last": "now"
}
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent
mode
:type server: str
:param name: The client we are working on
:type name: str
:returns: The *JSON* described above.
"""
args = self.parser.parse_args()
server = server or args["serverName"]
name = name or args["clientName"]
try:
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(name, server)
):
self.abort(403, "Sorry, you cannot access this client")
json = bui.client.get_client_status(name, agent=server)
except BUIserverException as exp:
self.abort(500, str(exp))
return json
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from include import *
from timecat import locate_next_line
dataset_index = 0
def judge(f, st, ed):
global dataset_index
dataset_index += 1
print("\ndataset[{}]".format(dataset_index))
locate_next_line(f, st, ed)
print(f.readline())
if __name__ == "__main__":
testlog = "test.log"
with open(testlog, "r") as f:
#1 打印第1行
f.seek(0)
st = f.tell()
f.seek(0, os.SEEK_END)
ed = f.tell()
judge(f, st, ed)
#2 打印第2行
f.seek(0)
f.read(5)
st = f.tell()
f.seek(0, os.SEEK_END)
ed = f.tell()
judge(f, st, ed)
#3 打印第一行最后3个字符
f.seek(0)
f.read(5)
st = f.tell()
f.readline()
f.seek(-3, os.SEEK_CUR)
ed = f.tell()
judge(f, st, ed)
#4 打印最后一行的倒数5个字符,eof
f.seek(-5, os.SEEK_END)
st = f.tell()
ed = f.tell()
judge(f, st, ed)
|
#coding: utf-8
from caty.core.typeinterface import dereference
try:
from sqlalchemy import *
from sqlalchemy.orm import *
from interfaces.sqlalchemy.SQLAlchemy import SQLAlchemyBase
except:
import traceback
traceback.print_exc()
print '[Warning] sqlalchemy is not installed or sqlalchemy IDL is not compiled'
else:
class SQLAlchemyWrapper(SQLAlchemyBase):
config = None
@classmethod
def initialize(cls, app_instance, setting):
if not setting:
config = {}
else:
config = setting.get('config', {})
cls.config = config
cfg = {'encoding': 'utf-8'}
url = 'sqlite:///:memory:'
for k, v in config.items():
if k == 'url':
url = v
else:
cfg[k] = v
cls.engine = create_engine(url, **cfg)
@classmethod
def instance(cls, app, system_param):
return SQLAlchemyWrapper(system_param)
@classmethod
def finalize(cls, app):
pass
def create(self, mode, user_param=None):
obj = Facility.create(self, mode)
return obj
def clone(self):
return self
def __init__(self, *ignore):
conn = self.engine.connect()
self.conn = conn
SessionClass = sessionmaker(bind=conn, autoflush=True)
SessionClass.configure(bind=conn)
self.session = SessionClass()
def commit(self):
try:
self.session.commit()
finally:
self.conn.close()
def cancel(self):
try:
self.session.rollback()
finally:
self.conn.close()
def _generate_py_class(self, object_type, name):
buff = []
_ = buff.append
_(u'from sqlalchemy.ext.declarative import declarative_base')
_(u'from sqlalchemy import *')
_(u'from sqlalchemy.orm import *')
_(u'from string import Template')
_(u'from caty.util.collection import conditional_dict')
_(u'Base = declarative_base()')
_(u'class %s(Base):' % name)
_(u' __tablename__ = "%s"' % name)
# optionalなプロパティの初期値はNoneになるが、これは型エラーを引き起こす。
# DBにおいてnullableなプロパティを記録しておき、JSON変換時に適宜undefinedにする。
_(u' __nullable__ = set()')
for k, v in object_type.items():
if v.optional:
_(u' __nullable__.add("%s")' % k)
for k, v in object_type.items():
nullable = 'False'
primary_key = 'False'
if v.optional:
v = dereference(v, reduce_option=True)
nullable = 'True'
if 'primary-key' in v.annotations:
primary_key = 'True'
if v.type == 'string':
t = 'String'
elif v.type == 'integer':
t = 'Integer'
elif v.type == 'number':
t = 'Numeric'
else:
throw_caty_exception(u'NotImplemented', v.type)
_(" %s = Column('%s', %s, primary_key=%s, nullable=%s)" % (k, k, t, primary_key, nullable))
init = []
__ = init.append
__(u' def __init__(self')
__(u',')
for k, v in object_type.items():
if not v.optional:
__(u'%s' % k)
__(u',')
for k, v in object_type.items():
if v.optional:
v = dereference(v, reduce_option=True)
if 'default' not in v.annotations:
__(u'%s=None' % (k))
else:
__(u'%s=%s' % (k, v.annotations['default'].value))
__(u',')
init.pop(-1)
__('):')
_(u''.join(init))
for k, v in object_type.items():
_(' self.%s = %s' % (k, k))
_(u'')
rep = []
__ = rep.append
_(u' def __repr__(self):')
num = 0
for k, v in object_type.items():
__(u"'{%d}'" % num)
__(u', ')
num += 1
rep.pop(-1)
fmt = ''.join(rep)
rep[:] = []
__(u' return Template("%s<%s>")' % (name, fmt))
__(u'.substitute((')
for k, v in object_type.items():
__("self.%s" % k)
__(', ')
rep.pop(-1)
__(u'))')
_(u''.join(rep))
_(u'')
_(u' def to_json(self):')
_(u' return conditional_dict(lambda k, v: not(v is None and k in self.__nullable__), {')
for k, v in object_type.items():
_(' "%s": self.%s,' % (k, k))
_(u' })')
return u'\n'.join(buff)
def _create_table(self, cls):
cls.metadata.create_all(self.engine)
|
#!/usr/bin/python3
import sys
import battlelib
dirin = '/home/vova/stud_tanks/'
dirout = '/home/vova/tanks-results/res/'
prefix = 'https://senya.github.io/tanks-results/res/'
prefix = 'res/'
players = ['krohalev', 'patritskya', 'kozlova', 'venskaya', 'scherbakov', 'mishina', 'lomonosov', 'abdrakhimov']
#players = ['bot1', 'bot2', 'krohalev']
tab = [[0] * len(players) for i in players]
battles = []
index = open('/home/vova/tanks-results/index.html', 'w')
def battle(p1, p2, k):
print(p1, ' ', p2)
p1_p, p2_p, json = battlelib.battle(dirin + p1 + '/bot.py', dirin + p2 + '/bot.py', 5000)
f = p1 + '_' + p2 + '-' + str(k) + '.json'
ht = prefix + f
f = dirout + f
with open(f, 'w') as ff:
print(json, file=ff)
battles.append((p1, p1_p, p2, p2_p, ht))
return (p1_p, p2_p)
for i, p in enumerate(players):
for j in range(i):
p2 = players[j]
p1_points = 0
p2_points = 0
for k in range(5):
p1_p, p2_p = battle(p, p2, k)
p1_points += p1_p
p2_points += p2_p
for k in range(5):
p2_p, p1_p = battle(p2, p, k)
p1_points += p1_p
p2_points += p2_p
tab[i][j] = p1_points
tab[j][i] = p2_points
index.write('<table>')
index.write('<tr><td></td>')
for p in players:
index.write('<td>' + p + '</td>')
index.write('<td>Total</td>')
sor = []
for i, p in enumerate(players):
index.write('<tr><td>' + p + '</td>')
total = 0
for j in range(len(players)):
if i == j:
index.write('<td></td>')
continue
res = tab[i][j]
total += res
index.write('<td>' + str(res) + '</td>')
index.write('<td>' + str(total) + '</td></tr>')
sor.append((p, total))
index.write('</table>')
sor.sort(key=lambda pl:-pl[1])
index.write('<table>')
for pl in sor:
index.write('<tr><td>' + pl[0] + '</td><td>' + str(pl[1]) + '</td></tr>')
index.write('</table>')
index.write('<table>')
for b in battles:
index.write('<tr>' + ''.join(['<td>' + str(t) + '</td>' for t in b[:-1]]) + '<td><a href="' + b[4] + '">json</td>')
index.write('</table>')
index.close()
|
#!/usr/bin/env python3
from led_system import LEDSystem
system = LEDSystem()
system.createConfig(249)
|
import os
import threading
import PIL.Image as IMG
import numpy as np
def send_to_back(func, kwargs={}):
t = threading.Thread(target=func, kwargs=kwargs)
t.start()
def save_as_img(tensor, to_dir='tensor_img'):
def f(tsr=tensor, dir=to_dir):
t = tsr.clone().detach().cpu().numpy() * 255
t[t < 0] = 0
t[t > 255] = 255
os.makedirs(dir, exist_ok=True)
try:
for i, b in enumerate(t):
for j, c in enumerate(b):
IMG.fromarray(np.array(c.squeeze(), dtype=np.uint8)).save(
dir + os.sep + 't_' + str(i) + '_' + str(j) + '.png')
except Exception as e:
print(str(e))
send_to_back(f)
|
import os
import themata
project = 'themata'
copyright = '2020, Adewale Azeez, Creative Commons Zero v1.0 Universal License'
author = 'Adewale Azeez'
html_theme_path = [themata.get_html_theme_path()]
html_theme = 'sugar'
html_favicon = 'images/themata.png'
master_doc = 'index'
exclude_patterns = [
'hackish/*',
'milkish/*',
'fandango/*',
'clear/*',
'fluid/*',
'garri/*',
'water/*',
'sugar/*'
]
html_theme_options = {
'navbar_links': [
("Download", "https://pypi.org/project/themata/"),
("Github", "https://github.com/Thecarisma/themata"),
("Follow me on twitter", "https://twitter.com/iamthecarisma")
],
'navbar_sec_links': [
("hackish", "hackish/index"),
("milkish", "milkish/index"),
("fandango", "fandango/index"),
("clear", "clear/index"),
("fluid", "fluid/index"),
("garri", "garri/index"),
("water", "water/index"),
("sugar", "sugar/index")
],
'metadata': {
"enable": True,
"url": "https://thecarisma.github.io/themata",
"type": "website",
"title": "Set of Highly customizable sphinx themes.",
"description": "Themata package contains different sphinx theme that can be easily customized to look like a complete website or just a documentation webpage.",
"image": "https://raw.githubusercontent.com/Thecarisma/themata/main/docs/images/themata.small.png",
"keywords": "python, sphinx, thecarisma, themata, documentation, markdown, rst, themes",
"author": "Adewale Azeez"
},
'twitter_metadata': {
"enable": True,
"card": "summary",
"site": "@iamthecarisma",
"creator": "@iamthecarisma",
"title": "Set of Highly customizable sphinx themes.",
"description": "Themata package contains different sphinx theme that can be easily customized to look like a complete website or just a documentation webpage.",
"image": "https://raw.githubusercontent.com/Thecarisma/themata/main/docs/images/themata.small.png",
}
} |
import itertools
import random
import os
from math import floor, ceil
from functools import partial
import cv2
import json
import numpy as np
from PIL import Image
import torch.utils.data
import torchvision.transforms.functional as xF
from torchvision.transforms import ColorJitter
from skin_lesion.default_paths import DEFAULT_IGNORE_FILES
def cvt2sv(image):
return cv2.cvtColor(image, code=cv2.COLOR_BGR2HSV)[:, :, 1:]
def cvt2ab(image):
return cv2.cvtColor(image, code=cv2.COLOR_BGR2Lab)[:, :, 1:]
color_str2cvtr = {
("bgr", "rgb"): partial(cv2.cvtColor, code=cv2.COLOR_BGR2RGB),
("bgr", "hsv"): partial(cv2.cvtColor, code=cv2.COLOR_BGR2HSV),
("bgr", "sv"): cvt2sv,
("bgr", "a*b*"): cvt2ab
}
def resize(img, rows, columns):
# cv2 is (width, height)
return cv2.resize(img, (columns, rows))
class SegDataset(torch.utils.data.Dataset):
IN_CROPPED_IMG = "cropped_img"
IN_ORIG_IMG = "orig_img"
GT_CROPPED_IMG = "truth_cropped_img"
GT_ORIG_IMG = "truth_orig_img"
IMG_KEYS = [IN_CROPPED_IMG, IN_ORIG_IMG, GT_CROPPED_IMG, GT_ORIG_IMG]
INPUT_FNAME = "input_fname"
TRUTH_FNAME = "truth_fname"
BBOX = "bbox"
UFM_BBOX = "ufm_bbox"
RATIOS = "ratios"
@staticmethod
def torch_xform(sample):
for k in [SegDataset.IN_CROPPED_IMG, SegDataset.IN_ORIG_IMG]:
try:
sample[k] = xF.to_tensor(sample[k])
except TypeError:
pass
for k in [SegDataset.GT_CROPPED_IMG, SegDataset.GT_ORIG_IMG]:
try:
sample[k] = (torch.from_numpy(sample[k]) > 0).to(torch.uint8)
except TypeError:
pass
return sample
def __init__(self,
input_fldr,
truth_fldr=None,
img_size=(512, 512),
colorspace=("RGB", "SV", "a*b*"),
jitter=(0.02, 0.02, 0.02, 0.02),
ignore_files=DEFAULT_IGNORE_FILES,
bbox_file=None,
xform=None,
random_lr=False,
random_ud=False,
random_45=True,
random_90=True,
noised_bbox=True):
"""Segmentation dataset.
Assumes that the truth and input folders have same
order sorted.
Parameters
----------
img_size : tuple
Resize to (rows, columns)
truth_fldr : str, None
Use None for no truth (testing mode)
"""
self.has_truth = bool(truth_fldr) # None, False => False, str => True
self.color_cvtr = []
for cspace in colorspace:
self.color_cvtr.append(color_str2cvtr[("bgr", cspace.lower())])
self.input_fldr = input_fldr
self.input_files = sorted(
(f for f in os.listdir(self.input_fldr) if f not in ignore_files))
self.input_files_folded = []
for file in self.input_files:
# it matters that input_files is sorted here!
if "_90." not in file and "_180." not in file and "_270." not in file:
self.input_files_folded.append([file])
cur_base = file
else:
assert cur_base.split(".")[0] in file
self.input_files_folded[-1].append(file)
self.img_size = img_size
if self.has_truth:
self.truth_fldr = truth_fldr
self.truth_files_folded = []
for files in self.input_files_folded:
self.truth_files_folded.append(
[os.path.splitext(f)[0] + "_segmentation.png"
for f in files]
)
self.truth_files = itertools.chain.from_iterable(self.truth_files_folded)
self.has_bboxes = False
if bbox_file is not None:
with open(bbox_file, "r") as f:
bbox_data = json.load(f)
self.has_bboxes = True
self.bboxes = {}
for res in bbox_data:
zero_based_id = res["image_id"] - 1
isic_id = zero_based_id // 4
rotation = 90 * (zero_based_id % 4)
if isic_id not in self.bboxes:
self.bboxes[isic_id] = {rotation: []}
elif rotation not in self.bboxes:
self.bboxes[isic_id][rotation] = []
self.bboxes[isic_id][rotation].append(res)
else:
self.has_bboxes = False
self.bboxes = None
self.xform = xform
self.train()
self.random_lr = random_lr
self.random_ud = random_ud
self.random_45 = random_45
self.random_90 = random_90
self.noised_bbox = noised_bbox
if bbox_file is None and self.noised_bbox:
print("[Warning] No bounding boxes but noised_bbox requires them.")
self.jitter = ColorJitter(*jitter)
def train(self):
self._train = True
def eval(self):
self._train = False
@staticmethod
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
@staticmethod
def _maybe_rotate(sample, keys):
# images have been rotated by 90s and flipped, so randomly rotating by 45 or not suffices
if random.choice([True, False]):
for k in keys:
try:
sample[k] = SegDataset.rotate_image(sample[k], 45)
except KeyError:
pass
return sample
@staticmethod
def _maybe_flip(sample, keys, axis=0):
if random.choice([True, False]):
for k in keys:
try:
sample[k] = cv2.flip(sample[k], axis)
except KeyError:
pass
return sample
def _jitter(self, input_img):
return np.array(self.jitter(Image.fromarray(input_img)))
def maybe_jitter(self, input_img):
if self._train and self.jitter:
return self._jitter(input_img)
else:
return input_img
def augment(self, sample):
if not self.random_lr and not self.random_ud:
return sample
if self.random_lr:
sample = self._maybe_flip(sample, self.IMG_KEYS, axis=1)
if self.random_ud:
sample = self._maybe_flip(sample, self.IMG_KEYS, axis=0)
if self.random_45:
sample = self._maybe_rotate(sample, self.IMG_KEYS)
return sample
def maybe_augment(self, sample):
if self._train:
return self.augment(sample)
else:
return sample
def retrieve_bbox(self, input_fname):
fname = os.path.splitext(input_fname[5:])[0]
try:
id_, rot_angle = fname.split("_")
except ValueError:
id_, rot_angle = int(fname), 0
else:
id_, rot_angle = int(id_), int(rot_angle)
bboxes = self.bboxes[id_][rot_angle]
best = max(bboxes, key=lambda x: x['score'])
bottom, left, width, height = best['bbox']
bottom, left = floor(bottom), floor(left)
width, height = ceil(width), ceil(height)
c0, cf = bottom, bottom + width
r0, rf = left, left + height
return {'r0': r0, 'rf': rf, 'c0': c0, 'cf': cf}
@staticmethod
def add_percent(bbox, img_size, percent=(0.1, 0.1)):
r, c = img_size
r_add = r * percent[0]
c_add = c * percent[1]
bbox['c0'] = max(0, int(bbox['c0'] - c_add))
bbox['cf'] = min(c, int(bbox['cf'] + c_add))
bbox['r0'] = max(0, int(bbox['r0'] - r_add))
bbox['rf'] = min(r, int(bbox['rf'] + r_add))
def calc_ufm_bbox(self, full_bbox, ratios):
return {k: int(v * ratios[k[0]]) for k, v in full_bbox.items()}
def crop(self, input_img, best_bbox):
assert input_img.shape[0] >= best_bbox['rf'] > best_bbox['r0'] >= 0, (
input_img.shape, best_bbox
)
assert input_img.shape[1] >= best_bbox['cf'] > best_bbox['c0'] >= 0, (
input_img.shape, best_bbox
)
input_img = input_img[best_bbox['r0']:best_bbox['rf'],
best_bbox['c0']:best_bbox['cf']]
return input_img
@staticmethod
def manual_rot_bbox(bbox, cols, rows, rot_choice):
if rot_choice == 0:
return
else:
r0, rf, c0, cf = bbox['r0'], bbox['rf'], bbox['c0'], bbox['cf']
if rot_choice == 1:
r0n, rfn, c0n, cfn = cols - cf, cols - c0, r0, rf
elif rot_choice == 2:
r0n, rfn, c0n, cfn = rows - rf, rows - r0, cols - cf, cols - c0
elif rot_choice == 3:
r0n, rfn, c0n, cfn = c0, cf, rows - rf, rows - r0
bbox['r0'], bbox['rf'], bbox['c0'], bbox['cf'] = r0n, rfn, c0n, cfn
def __len__(self):
return len(self.input_files_folded)
def get_mole(self, idx):
input_files = self.input_files_folded[idx]
assert len(input_files) == 1 or len(input_files) == 4
manual_rot = False
if self._train and self.random_90:
rot_choice = random.randint(0, 3)
if len(input_files) != 4:
manual_rot = True
load_choice = 0
else:
load_choice = rot_choice
else:
load_choice = 0
input_fname = input_files[load_choice]
input_path = os.path.join(self.input_fldr, input_fname)
# cv2 is BGR and we'll probably want HSV, La*b* or at least RGB
raw_img = cv2.imread(input_path)
if raw_img is None:
raise FileNotFoundError(input_path)
rows_0, cols_0 = raw_img.shape[:2]
if manual_rot and rot_choice != 0:
raw_img = np.rot90(raw_img, k=rot_choice)
raw_img = self.maybe_jitter(raw_img)
ratios = {'r': self.img_size[0] / raw_img.shape[0],
'c': self.img_size[1] / raw_img.shape[1]}
orig_imgs = []
for cvtr in self.color_cvtr:
orig_imgs.append(cvtr(raw_img))
orig_img = np.concatenate(orig_imgs, axis=-1)
if self.has_bboxes:
best_bbox = self.retrieve_bbox(input_fname)
if manual_rot:
self.manual_rot_bbox(best_bbox, cols_0, rows_0, rot_choice)
if self._train and self.noised_bbox:
grace = (0.1 * random.uniform(0, 2), 0.1 * random.uniform(0, 2))
else:
grace = (0.1, 0.1)
self.add_percent(best_bbox, orig_img.shape[:2], percent=grace)
ufm_bbox = self.calc_ufm_bbox(best_bbox, ratios)
cropped_img = self.crop(orig_img, best_bbox)
else:
# None or False would make more sense, but this 'hacks' the default batching fn
# and bool({}) is False.
best_bbox = {}
cropped_img = {}
ufm_bbox = {}
if cropped_img is not {}:
ufm_cropped_img = resize(cropped_img, *self.img_size)
else:
ufm_cropped_img = {}
ufm_orig_img = resize(orig_img, *self.img_size)
sample = {self.IN_CROPPED_IMG: ufm_cropped_img,
self.IN_ORIG_IMG: ufm_orig_img,
self.BBOX: best_bbox,
self.UFM_BBOX: ufm_bbox,
self.RATIOS: ratios,
self.INPUT_FNAME: input_fname}
if self.has_truth:
truth_fname = self.truth_files_folded[idx][load_choice]
truth_path = os.path.join(self.truth_fldr, truth_fname)
orig_gt = cv2.imread(truth_path, 0)
if manual_rot and rot_choice != 0:
orig_gt = np.rot90(orig_gt, k=rot_choice)
if orig_gt is None:
raise FileNotFoundError(truth_path)
if self.has_bboxes:
cropped_gt = self.crop(orig_gt, best_bbox)
else:
cropped_gt = {}
if cropped_gt is not {}:
ufm_cropped_gt = resize(cropped_gt, *self.img_size)
else:
ufm_cropped_gt = {}
ufm_orig_gt = resize(orig_gt, *self.img_size)
_, ufm_cropped_gt = cv2.threshold(ufm_cropped_gt, 256 // 2, 255, cv2.THRESH_BINARY)
_, ufm_orig_gt = cv2.threshold(ufm_orig_gt, 256 // 2, 255, cv2.THRESH_BINARY)
sample.update({self.GT_CROPPED_IMG: ufm_cropped_gt,
self.GT_ORIG_IMG: ufm_orig_gt,
self.TRUTH_FNAME: truth_fname})
return sample
def __getitem__(self, idx):
sample = self.get_mole(idx)
sample = self.maybe_augment(sample)
if self.xform:
sample = self.xform(sample)
return sample
|
#addfunctions.py
def add(num1,num2):
result=num1+num2
return result
def addall(*nums):
ttl=0
for num in nums:
ttl=ttl+num
return ttl
|
import pygame
from pygame.locals import *
import player
import boss_toriel
import boss_mario
import boss_captain
boss_cons = boss_captain.CaptainViridian
class Game:
def __init__(self):
pygame.mixer.pre_init(48000, -16, 2, 1024)
pygame.init()
self.display = pygame.display.set_mode((400,700))
self.hud = pygame.surface.Surface((400,100))
self.play_area = pygame.surface.Surface((400,600))
self.screen_rect = pygame.rect.Rect(0,0,400,600)
self.t = pygame.time.get_ticks()
self.restart_flag = False
self.player = player.Player()
self.player.set_center(200,400)
self.boss = boss_cons(self)
def restart(self):
self.restart_flag = True
def run(self):
clock = pygame.time.Clock()
while True:
self.inputs()
self.update()
self.draw()
clock.tick(60)
if self.restart_flag:
self.player = player.Player()
self.player.set_center(200,400)
self.boss.stop_music()
self.boss = boss_cons(self)
self.restart_flag = False
def inputs(self):
for e in pygame.event.get():
if e.type == QUIT:
quit()
def draw(self):
self.play_area.fill((0,0,0))
self.boss.draw(self.play_area)
self.player.draw(self.play_area)
self.display.blit(self.play_area,(0,100))
self.hud.fill((125,125,125))
self.boss.draw_hud(self.hud)
self.player.draw_hud(self.hud)
self.display.blit(self.hud,(0,0))
pygame.display.update()
def update(self):
dt = pygame.time.get_ticks() - self.t
dt /= 1000
self.t = pygame.time.get_ticks()
self.player.update(dt,self)
self.boss.update(dt,self)
def next_boss(self):
print("YOU ARE WINNER")
quit()
|
import h5py
import sys
# argv[1] : H5 file path
# argv[2] : path to actions list
with h5py.File(sys.argv[1], 'r') as fin, open(sys.argv[2], 'r') as fin2:
act_names = fin2.read().splitlines()
act = act_names[fin['stream0/logits'].value.argmax()]
print('Detected action: {}'.format(act))
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bzip2(Package):
"""bzip2 is a freely available, patent free high-quality data
compressor. It typically compresses files to within 10% to 15%
of the best available techniques (the PPM family of statistical
compressors), whilst being around twice as fast at compression
and six times faster at decompression."""
homepage = "https://sourceware.org/bzip2/"
url = "https://sourceware.org/pub/bzip2/bzip2-1.0.8.tar.gz"
version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')
version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')
version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')
variant('shared', default=True, description='Enables the build of shared libraries.')
depends_on('diffutils', type='build')
# override default implementation
@property
def libs(self):
shared = '+shared' in self.spec
return find_libraries(
'libbz2', root=self.prefix, shared=shared, recursive=True
)
def patch(self):
# bzip2 comes with two separate Makefiles for static and dynamic builds
# Tell both to use Spack's compiler wrapper instead of GCC
filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')
filter_file(
r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'
)
# The Makefiles use GCC flags that are incompatible with PGI
if self.compiler.name == 'pgi':
filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')
filter_file('-Wall -Winline', '-Minform=inform', 'Makefile-libbz2_so') # noqa
# Patch the link line to use RPATHs on macOS
if 'darwin' in self.spec.architecture:
v = self.spec.version
v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))
kwargs = {'ignore_absent': False, 'backup': False, 'string': True}
mf = FileFilter('Makefile-libbz2_so')
mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa
.format(v2, v3),
'$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa
.format(v1, v2, v3, v3),
**kwargs)
mf.filter(
'$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa
'$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'
.format(v3), **kwargs)
mf.filter(
'rm -f libbz2.so.{0}'.format(v2),
'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)
mf.filter(
'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),
'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),
**kwargs)
def install(self, spec, prefix):
# Build the dynamic library first
if '+shared' in spec:
make('-f', 'Makefile-libbz2_so')
# Build the static library and everything else
make()
make('install', 'PREFIX={0}'.format(prefix))
if '+shared' in spec:
install('bzip2-shared', join_path(prefix.bin, 'bzip2'))
v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))
if 'darwin' in self.spec.architecture:
lib = 'libbz2.dylib'
lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)
for v in (v1, v2, v3))
else:
lib = 'libbz2.so'
lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)
for v in (v1, v2, v3))
install(lib3, join_path(prefix.lib, lib3))
with working_dir(prefix.lib):
for l in (lib, lib1, lib2):
symlink(lib3, l)
with working_dir(prefix.bin):
force_remove('bunzip2', 'bzcat')
symlink('bzip2', 'bunzip2')
symlink('bzip2', 'bzcat')
|
import cv2
import os
import sys
import imutils
imgpath = sys.argv[3]#Source folder
namp = sys.argv[4] #Folder to write files
def movdet(imx,imy,ind):
imc = cv2.imread(imx,0) #copy of the image to crop without boxes
im0 = cv2.imread(imx,0) #Reads frame in gray
im1 =cv2.imread(imy,0) #Reads frame in gray
im1 = cv2.GaussianBlur(im1,(21,21),0)
imD = cv2.absdiff(im0,im1)
thr = cv2.threshold(imD,25,255, cv2.THRESH_BINARY)[1]
thr = cv2.dilate(thr, None, iterations=2)
(cnts, _) = cv2.findContours(thr.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
idx = 0
for c in cnts:
if cv2.contourArea(c) < 200:#min area, could be define as arg later
continue
(x,y,w,h) = cv2.boundingRect(c)
cv2.rectangle(im0,(x,y),(x+w,y+h),(255,255,0),1)
if(w>19 and h >20): #limit 20x20 boxes
crop = imc[y:y+h,x:x+w]
idx+=1
cv2.imwrite('crop\\'+ "it" + str(ind) + '_' + str(idx) + '.jpg',crop)
def main():
iters = 0
lim0 = int(sys.argv[1])
lim1 = int(sys.argv[2])
for i in range(lim0,lim1):
ima = imgpath + namp + str(i) + ".jpg"
imb = imgpath + namp + str(i+1) + ".jpg"
movdet(ima,imb,iters)
iters+=1
print ("done with " + str(iters) + " images")
main()
|
#导入pymysql的包
import pymysql
import threading
def demo(conn):
try:
cur=conn.cursor()#获取一个游标
cur.execute('select id from 10 ORDER BY RAND() LIMIT 1000')
data=cur.fetchall()
print(len(data))
# cur.close()#关闭游标
# conn.close()#释放数据库资源
except Exception :print("查询失败")
def get_connect(self):
"""
获取连接信息
返回: conn.cursor()
"""
# if not self.db:
# raise(NameError,"没有设置数据库信息")
self.conn = pymysql.connect(host='10',user='10',passwd='10.com',db='10',port=10,charset='utf8mb4')
cur = self.conn.cursor()
if not cur:
raise (NameError, "连接数据库失败")
else:
return cur
def loop():
threads = []
threads_num = 5 # 线程数量
con1=get_connect()
con2=get_connect()
con3=get_connect()
con4=get_connect()
con5=get_connect()
t1 = threading.Thread(target=demo, args=(con1))
threads.append(t1)
t2 = threading.Thread(target=demo, args=(con2))
threads.append(t2)
t3 = threading.Thread(target=demo, args=(con3))
threads.append(t3)
t4 = threading.Thread(target=demo, args=(con4))
threads.append(t4)
t5 = threading.Thread(target=demo, args=(con5))
threads.append(t5)
if __name__ == '__main__':
loop()
|
# Ein Palindrom ist ein Wort das in beide Richtungen gleich gelesen wird:
# Bsp: Anna, Drehherd, Kukuk
#
# Bauen Sie eine Funktion, die ein Wort entgegennimmt und dann zurückgibt, ob es sie bei dem Wort um ein Palindrom handelt oder nicht.
#wort = input("Geben sie ein Wort ein")
def palindrom(wort):
wort = wort.lower()
if wort == wort[::-1]:
return("Ihr Wort ist ein Palindrom")
else:
return("Ihr Wort ist kein Palindrom")
print(palindrom(input("Geben Sie ein Wort ein: "))) |
import urlparse
import urllib3
import requests
import time
import heapq
from bs4 import BeautifulSoup
import robotparser
import os
CRLF = '\r\n'
FIELD_END = '#'
ENTRY_END = '$'
# It would be more natural to use a Max-Heap in this program.
# Since I already had my old code for a Min-Heap, I employed that by negating the values stored in it.
# min-heap
class min_heap():
# min-heap : Void -> min-heap
# Returns: An empty min-heap object.
def __init__(self):
self.heap = []
# insert : PosInt -> Void
# Effect: Adds the given value into the min-heap.
def insert(self, value):
heapq.heappush(self.heap, value)
# pop : Void -> PosInt
# Returns: The root of the min-heap, ie the minimum value.
def pop(self):
if self.heap == []:
return None
return heapq.heappop(self.heap)
# view_top : Void -> PosInt
# Returns: The root of the min-heap without removing it from the min-heap.
def view_top(self):
if self.heap == []:
return None
return self.heap[0]
# heapify : Void -> Void
# Effect : Readjusts the heap by applying the min-heap property
def heapify(self):
heapq.heapify(self.heap)
# size : Void -> Integer
# Returns : The number of elements in the heap.
def size(self):
return len(self.heap)
class queue_element():
def __init__(self, url, inlinks):
self.url = url
self.inlinks = -inlinks
self.timestamp = time.time()
def increase_inlinks(self, delta):
self.inlinks -= delta
def __cmp__(self, other):
if self.inlinks == other.inlinks:
if self.timestamp < other.timestamp:
return -1
return 1
return self.inlinks - other.inlinks
def text_out_links(html, base):
soup = BeautifulSoup(html)
if soup.title != None:
text = soup.title.text
else:
text = ''
text += ''.join(map(lambda x: x.text.strip(), soup.find_all('p')))
return (text, map(lambda x: canonicalize(x['href'], base), filter(lambda x: x.has_attr('href') and x.text != '', soup.find_all('a'))))
def canonicalize(url, base):
if url.endswith('/'):
url = url[:-1]
parsed = urlparse.urlparse(url)
if (parsed[1] == '' and parsed[2] != '' and not (parsed[2].startswith('/') or parsed[2].startswith('.'))) or (parsed[0] == '' and parsed[1] != ''):
for i in range(len(url)):
if url[i].isalnum():
break
url = 'http://' + url[i:]
parsed = urlparse.urlparse(url)
if ':' in parsed.netloc:
if (parsed.scheme == 'http' and parsed.netloc.split(':')[1] == '80') or (parsed.scheme == 'https' and parsed.netloc.split(':')[1] == '443'):
parsed = (parsed.scheme, parsed.netloc.lower().split(':')[0], parsed.path, parsed.params, parsed.query, '')
else:
parsed = (parsed.scheme, parsed.netloc.lower(), parsed.path, parsed.params, parsed.query, '')
#print(parsed)
return urlparse.urljoin(base, urlparse.urlunparse(parsed))
def fetch(session, url, again=True):
try:
session.head(url)
r = session.get(url, headers={'accept':'text/html'})
#print(r.status_code)
if r.status_code == 200:
html = r.text
ct = r.headers.get('content-type')
content_type = None
charset = 'utf-8'
#charset = 'iso-8859-1'
if ct != None:
ct_header = ct.split(';')
content_type = ct_header[0]
#if len(ct_header) > 1 and ct_header[1].strip().startswith('charset='):
# charset = ct_header[1].split('=')[1]
soup = BeautifulSoup(html)
if (soup.html.get('lang') == None or soup.html['lang'] == 'en') and content_type == 'text/html':
return (html, charset, True)
except requests.exceptions.ConnectionError as e:
print(e)
if again:
return fetch(session, url, False)
except:
#print(e)
print(url)
return ('', None, False)
def store(results_filename, charset, url, text, html, outlinks):
fp = open(results_filename, 'w+')
fp.write(url + '\n' + FIELD_END + '\n')
fp.write(text.encode(charset, 'ignore') + '\n' + FIELD_END + '\n')
fp.write(html.strip().encode(charset, 'ignore') + '\n' + FIELD_END + '\n')
fp.write(','.join(outlinks).encode(charset, 'ignore'))
fp.close()
def polite(robotcheckers, url):
host = urlparse.urlparse(url).netloc
try:
rc = robotcheckers[host]
except KeyError:
rc = robotparser.RobotFileParser()
rc.set_url('http://' + host + '/robots.txt')
rc.read()
robotcheckers[host] = rc
return rc.can_fetch('*', url)
def hours_minutes(seconds):
return str(seconds/3600) + ' hours, ' + str((seconds%3600)/60) + ' mins elapsed.'
# best-first policy priorities:
# 1. seed URLs
# 2. higher number of in-links
# 3. longest time spent in the queues
def crawl(CRAWL_LIMIT, results_filepath, seeds):
heap = min_heap()
visited = set()
queue = {}
robotcheckers = {}
crawled = 1
start_time = time.time()
time_taken = start_time
string = ''
session = requests.Session()
#print('hi')
for seed in seeds:
if not polite(robotcheckers, seed):
#print('impolite' + seed)
continue
html, charset, ok = fetch(session, seed)
if not ok:
#print('not okay')
continue
text, outlinks = text_out_links(html, seed)
for link in outlinks:
if link not in visited:
try:
queue[link].increase_inlinks(1)
except KeyError:
new_element = queue_element(link, 1)
queue[link] = new_element
heap.insert(new_element)
store(os.path.join(results_filepath,str(crawled)+'.txt'), charset, link, text, html, outlinks)
visited.add(seed)
crawled += 1
while(crawled < CRAWL_LIMIT):
next_element = heap.pop()
next_link = next_element.url
queue.pop(next_link)
if not polite(robotcheckers, next_link):
continue
html, charset, ok = fetch(session, next_link)
if not ok:
continue
text, outlinks = text_out_links(html, next_link)
for link in outlinks:
if link not in visited:
try:
queue[link].increase_inlinks(1)
except KeyError:
new_element = queue_element(link, 1)
queue[link] = new_element
heap.insert(new_element)
store(os.path.join(results_filepath,str(crawled)+'.txt'), charset, next_link, text, html, outlinks)
visited.add(next_link)
crawled += 1
heap.heapify()
if crawled % 100 == 0:
print('Last batch took ' + str(int(time.time() - time_taken)) + ' seconds')
print(str(crawled) + ' pages crawled.')
#print('Frontier Size: ' + str(len(queue)))
print('Heap Size: ' + str(heap.size()))
time_taken = time.time()
print(hours_minutes(int(time_taken - start_time)) + '\n')
# print(visited)
print('Crawling complete')
print(len(visited))
#print(fetched)
def main():
urllib3.disable_warnings()
# replace the example websites given below with your seed URLs
seeds = ('www.abc.com', 'www.pqr.com', 'www.xyz.com')
results_filename = '~/Documents/IR/code/hw3/data/fresh_crawl'
crawl(21000, results_filename, seeds)
#print(pages)
def tests():
b1 = 'http://www.google.com'
b2 = 'http://www.google.com/a/b.txt'
u1 = '../SomeFile.txt'
u2 = 'http://www.Example.com/SomeFile.txt'
u3 = 'www.example.com/SomeFile.txt'
u4 = '//www.example.com/SomeFile.txt'
u5 = '#skip'
u6 = 'http://www.google.com/'
r1 = 'http://www.google.com/SomeFile.txt'
r2 = 'http://www.example.com/SomeFile.txt'
cases = [
(canonicalize(u1, b2), r1, '1'),
(canonicalize(u2, b1), r2, '2'),
(canonicalize(u3, b1), r2, '3'),
(canonicalize(u4, b1), r2, '4'),
(canonicalize(u5, b1), b1, '5'),
(canonicalize(u6, b1), b1, '6'), ]
def check(tup_3):
return 'Expected: ' + tup_3[1] + '\nGot: ' + tup_3[0] + '\n' + tup_3[2]
print('\n'.join(map(check, filter(lambda x: x[0] != x[1], cases))))
if __name__ == '__main__':
main()
# tests()
|
import json
import re
with open('warnings.txt', 'r') as f:
w = json.loads(f.read())
for elem in w['missing-variable-declarations']:
print elem[0] + ':' + elem[1] + ' ' + elem[3]
|
from typing import Optional
from pydantic import BaseModel
class JobCat(BaseModel):
job_cat: str
class JobDesc(BaseModel):
job_desc: str
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from src.RegularShape import RegularShape
class SymmetricShape(RegularShape):
# def _count_points(self, num):
# line = get_line(self.points[0], self.points[1])
@staticmethod
def name():
return 'Symmetric Shape'
|
import discord
from cogs.utils.settings import Settings
from cogs.utils.botdata import BotData
from cogs.utils.helpers import *
from cogs.utils.helpformatter import MangoHelpFormatter
import cogs.utils.loggingdb as loggingdb
import traceback
import asyncio
import string
from discord.ext import commands
import logging
logging.basicConfig(level=logging.INFO)
botdata = BotData()
settings = Settings()
loggingdb_session = loggingdb.create_session(settings.resource("loggingdb.db"))
# This have to be done after loading settings
from cogs.utils.clip import *
from cogs.utils.httpgetter import HttpGetter
httpgetter = HttpGetter()
description = """The juiciest unsigned 8 bit integer you is eva gonna see.
For more information about me, try `{cmdpfx}info`"""
permissions = 314432
bot = commands.Bot(command_prefix='!', formatter=MangoHelpFormatter(), description=description)
bot.remove_command("help")
thinker = Thinker(bot)
invite_link = f"https://discordapp.com/oauth2/authorize?permissions={permissions}&scope=bot&client_id=213476188037971968"
deprecated_commands = {
"ttschannel": "config ttschannel",
"unttschannel": "config ttschannel none",
"opendotasql": "https://www.opendota.com/explorer"
}
@bot.event
async def on_ready():
print('Logged in as:\n{0} (ID: {0.id})'.format(bot.user))
print('Connecting to voice channels if specified in botdata.json ...')
await bot.change_presence(game=discord.Game(name="DOTA 3 [?help]", url="http://github.com/mdiller/MangoByte"))
cog = bot.get_cog("Audio")
for guildinfo in botdata.guildinfo_list():
if guildinfo.voicechannel is not None:
try:
print(f"connecting voice to: {guildinfo.voicechannel}")
await cog.connect_voice(guildinfo.voicechannel)
except UserError as e:
if e.message == "channel not found":
guildinfo.voicechannel = None
else:
raise
except asyncio.TimeoutError:
guildinfo.voicechannel = None
new_nick = bot.user.name + " v" + get_version()
for guild in bot.guilds:
if guild.me.guild_permissions.change_nickname:
if guild.me.nick is None or (guild.me.nick.startswith(bot.user.name) and guild.me.nick != new_nick):
await guild.me.edit(nick=new_nick)
async def get_cmd_signature(ctx):
bot.formatter.context = ctx
bot.formatter.command = ctx.command
return bot.formatter.get_command_signature()
# Whether or not we report invalid commands
async def invalid_command_reporting(ctx):
if ctx.message.guild is None:
return True
else:
return botdata.guildinfo(ctx.message.guild.id).invalidcommands
@bot.event
async def on_command_error(ctx, error):
if ctx.message in thinker.messages:
await thinker.stop_thinking(ctx.message)
try:
if isinstance(error, commands.CommandNotFound):
cmd = ctx.message.content[1:].split(" ")[0]
if cmd in deprecated_commands:
await ctx.send(f"You shouldn't use `?{cmd}` anymore. It's *deprecated*. Try `?{deprecated_commands[cmd]}` instead.")
return
elif cmd == "" or cmd.startswith("?") or cmd.startswith("!"):
return # These were probably not meant to be commands
if cmd.lower() in bot.commands:
new_message = ctx.message
new_message.content = "?" + cmd.lower() + ctx.message.content[len(cmd) + 1:]
await bot.process_commands(new_message)
elif await invalid_command_reporting(ctx):
await ctx.send(f"🤔 Ya I dunno what a '{cmd}' is, but it ain't a command. Try `?help` fer a list of things that ARE commands.")
elif isinstance(error, commands.CheckFailure):
print("(suppressed)")
return # The user does not have permissions
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=await bot.formatter.format_as_embed(ctx, ctx.command))
elif isinstance(error, commands.BadArgument):
signature = await get_cmd_signature(ctx)
await ctx.send((
"Thats the wrong type of argument for that command.\n\n"
f"Ya gotta do it like this:\n`{signature}`\n\n"
f"Try `?help {ctx.command}` for a more detailed description of the command"))
elif isinstance(error, commands.CommandInvokeError) and isinstance(error.original, discord.errors.Forbidden):
await print_missing_perms(ctx, error)
elif isinstance(error, commands.CommandInvokeError) and isinstance(error.original, discord.errors.HTTPException):
await ctx.send("Looks like there was a problem with discord just then. Try again in a bit.")
elif isinstance(error, commands.CommandInvokeError) and isinstance(error.original, UserError):
await ctx.send(error.original.message)
else:
await ctx.send("Uh-oh, sumthin dun gone wrong 😱")
trace_string = report_error(ctx.message, error, skip_lines=4)
if settings.debug:
await ctx.send(f"```{trace_string}```")
except discord.errors.Forbidden:
await ctx.author.send("Looks like I don't have permission to talk in that channel, sorry")
error_file = "errors.json"
async def print_missing_perms(ctx, error):
if not (ctx.guild):
await ctx.send("Uh-oh, sumthin dun gone wrong 😱")
trace_string = report_error(ctx.message, error, skip_lines=0)
my_perms = ctx.channel.permissions_for(ctx.guild.me)
perms_strings = read_json(settings.resource("json/permissions.json"))
perms = []
for i in range(0, 32):
if ((permissions >> i) & 1) and not my_perms._bit(i):
words = perms_strings["0x{:08x}".format(1 << i)].split("_")
for i in range(0, len(words)):
words[i] = f"**{words[i][0] + words[i][1:].lower()}**"
perms.append(" ".join(words))
if perms:
await ctx.send("Looks like I'm missin' these permissions 😢:\n" + "\n".join(perms))
else:
await ctx.send(f"Looks like I'm missing permissions 😢. Have an admin giff me back my permissions, or re-invite me to the server using this invite link: {invite_link}")
def report_error(message, error, skip_lines=2):
if os.path.isfile(error_file):
error_list = read_json(error_file)
else:
error_list = []
try:
raise error.original
except:
trace = traceback.format_exc().replace("\"", "'").split("\n")
if skip_lines > 0 and len(trace) >= (2 + skip_lines):
del trace[1:(skip_lines + 1)]
trace = [x for x in trace if x] # removes empty lines
error_list.append({
"author": message.author.id,
"message_id": message.id,
"message": message.clean_content,
"message_full": message.content,
"command_error": type(error).__name__,
"error": str(error),
"traceback": trace
})
if settings.error_logging:
write_json(error_file, error_list)
trace_string = "\n".join(trace)
print(f"\nError on: {message.clean_content}\n{trace_string}\n")
return trace_string
if __name__ == '__main__':
bot.load_extension("cogs.general")
bot.load_extension("cogs.audio")
bot.load_extension("cogs.dotabase")
bot.load_extension("cogs.dotastats")
bot.load_extension("cogs.pokemon")
bot.load_extension("cogs.admin")
bot.run(settings.token)
|
import hashlib
import numpy as np
from django.conf import settings
from dicom_to_cnn.tools.pre_processing import series
from dicom_to_cnn.model.reader.Nifti import Nifti
from dicom_to_cnn.model.post_processing.mip.MIP_Generator import MIP_Generator
class DicomToCnn:
def to_nifti(self,folder_path: str):
"""[Get DICOM seerie path and transform to nifti]
Args:
folder_path (str): [DICOM series folder path]
"""
data_path = settings.STORAGE_DIR
path = folder_path
nifti=series.get_series_object(path)
nifti_str=str(nifti)
nifti_str=nifti_str[1:44]
if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')
if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
nifti.set_ExportType('suv')
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')
def generate_mip(self,idImage:str):
data_path = settings.STORAGE_DIR
directory=settings.STORAGE_DIR+'/image'
path_ct =data_path+'/image/image_'+idImage+'.nii'
objet = Nifti(path_ct)
resampled = objet.resample(shape=(256, 256, 1024))
resampled[np.where(resampled < 500)] = 0 #500 UH
normalize = resampled[:,:,:,]/np.max(resampled)
mip_generator = MIP_Generator(normalize)
mip_generator.project(angle=0)
mip_generator.save_as_png('image_2D_'+idImage, directory, vmin=0, vmax=1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.