code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import os import sys import numpy as np import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from torch.utils.data import Dataset from sklearn.model_selection import train_test_split import torch TEST_SET_DIR = '/scratch/sr365/ML_MM_Benchmark/testsets' def get_data_into_loaders(data_x, data_y, batch_size, DataSetClass, rand_seed=0, test_ratio=0.3): """ Helper function that takes structured data_x and data_y into dataloaders :param data_x: the structured x data :param data_y: the structured y data :param rand_seed: the random seed :param test_ratio: The testing ratio :return: train_loader, test_loader: The pytorch data loader file """ if test_ratio == 1: # Test case print('This is testing mode!!! Test ratio = 1') test_data = DataSetClass(data_x, data_y) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) return None, test_loader x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=test_ratio, random_state=rand_seed) print('total number of training sample is {}, the dimension of the feature is {}'.format(len(x_train), len(x_train[0]))) print('total number of test sample is {}'.format(len(y_test))) # Construct the dataset using a outside class train_data = DataSetClass(x_train, y_train) test_data = DataSetClass(x_test, y_test) # Construct train_loader and test_loader train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) return train_loader, test_loader def normalize_np(x, x_max_list=None, x_min_list=None): """ Normalize the x into [-1, 1] range in each dimension [:, i] :param x: np array to be normalized :return: normalized np array """ if x_max_list is not None: if x_min_list is None or len(x[0]) != len(x_max_list) or len(x_max_list) != len(x_min_list): print("In normalize_np, your dimension does not match with provided x_max, try again") quit() new_x_max_list = [] new_x_min_list = [] for i in range(len(x[0])): if x_max_list is None: x_max = np.max(x[:, i]) x_min = np.min(x[:, i]) else: x_max = x_max_list[i] x_min = x_min_list[i] x_range = (x_max - x_min ) /2. x_avg = (x_max + x_min) / 2. x[:, i] = (x[:, i] - x_avg) / x_range print("In normalize_np, row ", str(i), " your max is:", np.max(x[:, i])) print("In normalize_np, row ", str(i), " your min is:", np.min(x[:, i])) if x_max_list is None: assert np.max(x[:, i]) - 1 < 0.0001, 'your normalization is wrong' assert np.min(x[:, i]) + 1 < 0.0001, 'your normalization is wrong' new_x_max_list.append(x_max) new_x_min_list.append(x_min) return x, np.array(new_x_max_list), np.array(new_x_min_list) def read_data_color_filter(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Color') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Color') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed,test_ratio=1) print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data_Yang(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Yang') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # Normalize the dataset data_x, x_max, x_min = normalize_np(data_x) # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Yang') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values data_x, _, _, = normalize_np(data_x, x_max, x_min) print('This is Yang dataset with data_x shape of', np.shape(data_x)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=1) print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data_peurifoy(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Peurifoy') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Peurifoy') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values data_x = (data_x - 50) / 20. print('This is Perifoy dataset with data_x shape of', np.shape(data_x)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed,test_ratio=1) # The geometric boundary of peurifoy dataset is [30, 70], normalizing manually data_x = (data_x - 50) / 20. print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data(flags, eval_data_all=False): """ The data reader allocator function The input is categorized into couple of different possibilities 0. meta_material 1. gaussian_mixture 2. sine_wave 3. naval_propulsion 4. robotic_arm 5. ballistics :param flags: The input flag of the input data set :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: """ print("In read_data, flags.data_set =", flags.data_set) if 'Yang' in flags.data_set or 'ADM' in flags.data_set: train_loader, test_loader = read_data_Yang(flags,eval_data_all=eval_data_all) elif 'Peurifoy' in flags.data_set : train_loader, test_loader = read_data_peurifoy(flags,eval_data_all=eval_data_all) elif 'olor' in flags.data_set: train_loader, test_loader =read_data_color_filter(flags,eval_data_all=eval_data_all) else: sys.exit("Your flags.data_set entry is not correct, check again!") return train_loader, test_loader class MetaMaterialDataSet(Dataset): """ The Meta Material Dataset Class """ def __init__(self, ftr, lbl, bool_train): """ Instantiate the Dataset Object :param ftr: the features which is always the Geometry !! :param lbl: the labels, which is always the Spectra !! :param bool_train: """ self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] class SimulatedDataSet_class_1d_to_1d(Dataset): """ The simulated Dataset Class for classification purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind], self.y[ind] class SimulatedDataSet_class(Dataset): """ The simulated Dataset Class for classification purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind, :], self.y[ind] class SimulatedDataSet_regress(Dataset): """ The simulated Dataset Class for regression purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind, :], self.y[ind, :]
AEML
/models/Transformer/utils/data_reader.py
data_reader.py
import numpy as np from sklearn.metrics import confusion_matrix import seaborn as sns import matplotlib.pyplot as plt import os import pandas as pd def compare_truth_pred(pred_file, truth_file, cut_off_outlier_thres=None, quiet_mode=False): """ Read truth and pred from csv files, compute their mean-absolute-error and the mean-squared-error :param pred_file: full path to pred file :param truth_file: full path to truth file :return: mae and mse """ if isinstance(pred_file, str): # If input is a file name (original set up) pred = pd.read_csv(pred_file, header=None, sep=' ').values print(np.shape(pred)) if np.shape(pred)[1] == 1: pred = pd.read_csv(pred_file, header=None, sep=',').values truth = pd.read_csv(truth_file, header=None, sep=' ').values print(np.shape(truth)) if np.shape(truth)[1] == 1: truth = pd.read_csv(truth_file, header=None, sep=',').values elif isinstance(pred_file, np.ndarray): pred = pred_file truth = truth_file else: print('In the compare_truth_pred function, your input pred and truth is neither a file nor a numpy array') if not quiet_mode: print("in compare truth pred function in eval_help package, your shape of pred file is", np.shape(pred)) # Getting the mean absolute error and the mean squared error mae = np.mean(np.abs(pred-truth), axis=1) mse = np.mean(np.square(pred-truth), axis=1) # When necessary you can choose to cut off the outliers here if cut_off_outlier_thres is not None: mse = mse[mse < cut_off_outlier_thres] mae = mae[mae < cut_off_outlier_thres] return mae, mse def plotMSELossDistrib(pred_file, truth_file, save_dir='data/'): """ Function to plot the MSE distribution histogram :param: pred_file: The Y prediction file :param: truth_file: The Y truth file :param: flags: The flags of the model/evaluation :param: save_dir: The directory to save the plot """ mae, mse = compare_truth_pred(pred_file, truth_file) plt.figure(figsize=(12, 6)) plt.hist(mse, bins=100) plt.xlabel('Mean Squared Error') plt.ylabel('cnt') plt.suptitle('(Avg MSE={:.4e}, 25%={:.3e}, 75%={:.3e})'.format(np.mean(mse), np.percentile(mse, 25), np.percentile(mse, 75))) if isinstance(pred_file, str): eval_model_str = pred_file.split('Ypred')[-1].split('.')[0] else: eval_model_str = 'MSE_unknon_name' plt.savefig(os.path.join(save_dir, '{}.png'.format(eval_model_str))) print('(Avg MSE={:.4e})'.format(np.mean(mse))) return np.mean(mse)
AEML
/models/Transformer/utils/evaluation_helper.py
evaluation_helper.py
import numpy as np import torch from utils import plotsAnalysis import os from utils.helper_functions import load_flags def auto_swipe(mother_dir=None): """ This function swipes the parameter space of a folder and extract the varying hyper-parameters and make 2d heatmap w.r.t. all combinations of them """ if mother_dir is None: #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep8' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Yang_new_sweep/' mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/new_norm_color/' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Color_new_sweep/' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder_pos_analysis/Color' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/models' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/prev_sweep/test_size' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep_encode_lr' flags_list = [] # First step, get the list of object flags for folder in os.listdir(mother_dir): # Get the current sub_folder cur_folder = os.path.join(mother_dir, folder) if not os.path.isdir(cur_folder) or not os.path.isfile(os.path.join(cur_folder, 'flags.obj')): print('Either this is not a folder or there is no flags object under this folder for ', cur_folder) continue # Read the pickle object cur_flags = load_flags(cur_folder) flags_list.append(cur_flags) # From the list of flags, get the things that are different except for loss terms att_list = [a for a in dir(cur_flags) if not a.startswith('_') and not 'loss' in a and not 'trainable_param' in a and not 'model_name' in a and not 'dir' in a] print('In total {} attributes, they are {}'.format(len(att_list), att_list)) # Create a dictionary that have keys as attributes and unique values as that attDict = {key: [] for key in att_list} # Loop over all the flags and get the unique values inside for flags in flags_list: for keys in attDict.keys(): try: att = getattr(flags,keys) except: print('There is not attribute {} in flags, continue'.format(keys)) continue # Skip if this is already inside the list if att in attDict[keys]: continue attDict[keys].append(att) # Get the atts in the dictionary that has more than 1 att inside varying_att_list = [] for keys in attDict.keys(): if len(attDict[keys]) > 1: # For linear layers, apply special handlings if 'linear' not in keys: varying_att_list.append(keys) continue length_list = [] num_node_in_layer_list = [] # Loop over the lists of linear for linear_list in attDict[keys]: assert type(linear_list) == list, 'Your linear layer is not list, check again' length_list.append(len(linear_list)) # Record the length instead if 'head_linear' in keys: if len(linear_list) > 2: num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes elif 'tail_linear' in keys: if len(linear_list) > 1: num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes # Add these two attributes to the if len(np.unique(length_list)) > 1: varying_att_list.append(keys) if len(np.unique(num_node_in_layer_list)) > 1: varying_att_list.append('linear_unit') print('varying attributes are', varying_att_list) # Showing how they are changing for keys in varying_att_list: if keys == 'linear_unit': continue print('att is {}, they have values of {}'.format(keys, attDict[keys])) if len(varying_att_list) == 1: # There is only 1 attribute that is changing att = varying_att_list[0] key_a = att key_b = 'lr' for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']: #try: print('doing heatmap {}'.format(heatmap_value)) plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png', HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value) #except Exception as e: # print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value)) # print('error message: {}'.format(e)) # Start calling the plotsAnalysis function for all the pairs for a, key_a in enumerate(varying_att_list): for b, key_b in enumerate(varying_att_list): # Skip the same attribute if a <= b: continue # Call the plotsAnalysis function #for heatmap_value in ['best_validation_loss']: for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']: print('doing heatmap {}'.format(heatmap_value)) try: plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png', HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value) except: print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value)) if __name__ == '__main__': #pathnamelist = ['/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep4', # '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep4']#, #'/scratch/sr365/ML_MM_Benchmark/Color_temp/models/sweep2'] #'/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/lr_sweep'] #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder' #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sequence_len' #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/MLP_complexity/' #for dirs in os.listdir(big_mother_dir): # mother_dir = os.path.join(big_mother_dir, dirs) # if os.path.isdir(mother_dir): # auto_swipe(mother_dir) auto_swipe()
AEML
/models/Transformer/utils/plot_swipe.py
plot_swipe.py
import glob import os import pandas as pd import numpy as np import sys sys.path.insert(-1, '/scratch/yd105/ML_MM_Benchmark') # Torch # Own import flag_reader from utils import data_reader from class_wrapper import Network from model_maker import Forward from utils.helper_functions import put_param_into_folder, write_flags_and_BVE def training_from_flag(flags): """ Training interface. 1. Read data 2. initialize network 3. train network 4. record flags :param flag: The training flags read from command line or parameter.py :return: None """ if flags.use_cpu_only: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Get the data train_loader, test_loader = data_reader.read_data(flags) # Reset the boundary is normalized if flags.normalize_input: flags.geoboundary_norm = [-1, 1, -1, 1] print("Boundary is set at:", flags.geoboundary) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader) total_param = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print("Total learning parameter is: %d"%total_param) # Training process print("Start training now...") ntwk.train() # Do the house keeping, write the parameters and put into folder, also use pickle to save the flags obejct write_flags_and_BVE(flags, ntwk.best_validation_loss, ntwk.ckpt_dir) # put_param_into_folder(ntwk.ckpt_dir) def importData(flags): # pull data into python, should be either for training set or eval set directory = os.path.join(flags.data_dir, 'Yang', 'dataIn') x_range = flags.x_range y_range = flags.y_range train_data_files = [] for file in os.listdir(os.path.join(directory)): if file.endswith('.csv'): train_data_files.append(file) print(train_data_files) # get data ftr = [] lbl = [] for file_name in train_data_files: # import full arrays print(x_range) ftr_array = pd.read_csv(os.path.join(directory, file_name), delimiter=',', header=None, usecols=x_range) lbl_array = pd.read_csv(os.path.join(directory, file_name), delimiter=',', header=None, usecols=y_range) # append each data point to ftr and lbl for params, curve in zip(ftr_array.values, lbl_array.values): ftr.append(params) lbl.append(curve) ftr = np.array(ftr, dtype='float32') lbl = np.array(lbl, dtype='float32') for i in range(len(ftr[0, :])): print('For feature {}, the max is {} and min is {}'.format(i, np.max(ftr[:, i]), np.min(ftr[:, i]))) print(ftr.shape, lbl.shape) np.savetxt('data_x.csv', ftr, delimiter=',') np.savetxt('data_y.csv', lbl, delimiter=',') return ftr, lbl def data_check(): xd = pd.read_csv('data_x.csv',delimiter=',', header=None) yd = pd.read_csv('data_y.csv',delimiter=',', header=None) x = xd.to_numpy() y = yd.to_numpy() print(x.shape, y.shape, x.dtype, y.dtype) return if __name__ == '__main__': # Read the parameters to be set flags = flag_reader.read_flag() # Call the train from flag function training_from_flag(flags)
AEML
/models/MLP/train.py
train.py
import argparse import pickle import os # Libs # Own module from parameters import * # Torch def read_flag(): """ This function is to write the read the flags from a parameter file and put them in formats :return: flags: a struct where all the input params are stored """ parser = argparse.ArgumentParser() parser.add_argument('--data-set', default=DATA_SET, type=str, help='which data set you are chosing') # Model Architectural Params parser.add_argument('--skip-connection', type=bool, default=SKIP_CONNECTION, help='The boolean flag indicates whether use skip connections') parser.add_argument('--use-conv', type=bool, default=USE_CONV, help='The boolean flag that indicate whether we use upconv layer if not using lorentz') parser.add_argument('--linear', type=list, default=LINEAR, help='The fc layers units') parser.add_argument('--conv-out-channel', type=list, default=CONV_OUT_CHANNEL, help='The output channel of your 1d conv') parser.add_argument('--conv-kernel-size', type=list, default=CONV_KERNEL_SIZE, help='The kernel size of your 1d conv') parser.add_argument('--conv-stride', type=list, default=CONV_STRIDE, help='The strides of your 1d conv') # Optimization params parser.add_argument('--optim', default=OPTIM, type=str, help='the type of optimizer that you want to use') parser.add_argument('--reg-scale', type=float, default=REG_SCALE, help='#scale for regularization of dense layers') parser.add_argument('--x-range', type=list, default=X_RANGE, help='columns of input parameters') parser.add_argument('--y-range', type=list, default=Y_RANGE, help='columns of output parameters') parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (100)') parser.add_argument('--eval-step', default=EVAL_STEP, type=int, help='# steps between evaluations') parser.add_argument('--train-step', default=TRAIN_STEP, type=int, help='# steps to train on the dataSet') parser.add_argument('--lr', default=LEARN_RATE, type=float, help='learning rate') # parser.add_argument('--decay-step', default=DECAY_STEP, type=int, # help='decay learning rate at this number of steps') parser.add_argument('--lr-decay-rate', default=LR_DECAY_RATE, type=float, help='decay learn rate by multiplying this factor') parser.add_argument('--stop_threshold', default=STOP_THRESHOLD, type=float, help='The threshold below which training should stop') parser.add_argument('--dropout', default=DROPOUT, type=float, help='dropout rate') parser.add_argument('--skip_head', default=SKIP_HEAD, type=int, help='skip head') parser.add_argument('--skip_tail', default=SKIP_TAIL, type=list, help='skip tail') # Data specific Params parser.add_argument('--geoboundary', default=GEOBOUNDARY, type=tuple, help='the boundary of the geometric data') parser.add_argument('--data-dir', default=DATA_DIR, type=str, help='data directory') parser.add_argument('--normalize-input', default=NORMALIZE_INPUT, type=bool, help='whether we should normalize the input or not') parser.add_argument('--test-ratio', default=TEST_RATIO, type=float, help='the ratio of test case') parser.add_argument('--rand-seed', default=RAND_SEED, type=float, help='Random seed for train/val split') # Running specific parser.add_argument('--eval-model', default=EVAL_MODEL, type=str, help='the folder name of the model that you want to evaluate') parser.add_argument('--use-cpu-only', type=bool, default=USE_CPU_ONLY, help='The boolean flag that indicate use CPU only') parser.add_argument('--num-plot-compare', type=int, default=NUM_COM_PLOT_TENSORBOARD, help='#Plots to store in tensorboard during training for spectra compare') parser.add_argument('--model-name', default=MODEL_NAME, type=str, help='name of the model') flags = parser.parse_args() # This is for command line version of the code # flags = parser.parse_args(args = [])#This is for jupyter notebook version of the code # flagsVar = vars(flags) return flags def save_flags(flags, save_file="flags.obj"): """ This function serialize the flag object and save it for further retrieval during inference time :param flags: The flags object to save :param save_file: The place to save the file :return: None """ with open(save_file,'wb') as f: # Open the file pickle.dump(flags, f) # Use Pickle to serialize the object def load_flags(save_dir, save_file="flags.obj"): """ This function inflate the pickled object to flags object for reuse, typically during evaluation (after training) :param save_dir: The place where the obj is located :param save_file: The file name of the file, usually flags.obj :return: flags """ with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file flags = pickle.load(f) # Use pickle to inflate the obj back to RAM return flags def write_flags_and_BVE(flags, best_validation_loss): """ The function that is usually executed at the end of the training where the flags and the best validation loss are recorded They are put in the folder that called this function and save as "parameters.txt" This parameter.txt is also attached to the generated email :param flags: The flags struct containing all the parameters :param best_validation_loss: The best_validation_loss recorded in a training :return: None """ #To avoid terrible looking shape of y_range yrange = flags.y_range # yrange_str = str(yrange[0]) + ' to ' + str(yrange[-1]) yrange_str = [yrange[0], yrange[-1]] flags_dict = vars(flags) flags_dict_copy = flags_dict.copy() # in order to not corrupt the original data strucutre flags_dict_copy['y_range'] = yrange_str # Change the y range to be acceptable long string flags_dict_copy['best_validation_loss'] = best_validation_loss #Append the bvl # Convert the dictionary into pandas data frame which is easier to handle with and write read print(flags_dict_copy) with open('parameters.txt','w') as f: print(flags_dict_copy, file = f ) # Pickle the obj save_flags(flags)
AEML
/models/MLP/flag_reader.py
flag_reader.py
import os import sys sys.path.insert(0, '/scratch/yd105/ML_MM_Benchmark') #Torch import numpy as np # Own import flag_reader from class_wrapper import Network from model_maker import Forward from utils import data_reader from utils.evaluation_helper import plotMSELossDistrib # Libs def evaluate_from_model(model_dir): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print("Retrieving flag object for parameters") print(model_dir) flags = flag_reader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode #flags.x_range = [i for i in range(0, 14)] #flags.y_range = [i for i in range(14, 2014)] flags.skip_connection = True flags.test_ratio = 0 # Get the data train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished") def evaluate_all(models_dir="models"): """ This function evaluate all the models in the models/. directory :return: None """ for file in os.listdir(models_dir): if os.path.isfile(os.path.join(models_dir, file, 'flags.obj')): evaluate_from_model(os.path.join(models_dir, file)) return None if __name__ == '__main__': # Read the flag, however only the flags.eval_model is used and others are not used useless_flags = flag_reader.read_flag() print(useless_flags.eval_model) # Call the evaluate function from model evaluate_from_model(useless_flags.eval_model)
AEML
/models/MLP/evaluate.py
evaluate.py
import os import time # Torch import torch from torch import nn from torch.utils.tensorboard import SummaryWriter # from torchsummary import summary from torch.optim import lr_scheduler # Libs import numpy as np from math import inf import pandas as pd # Own module from AEML.data.loader import get_data_into_loaders_only_x, get_test_data_into_loaders from AEML.models.MLP.model_maker import Forward from AEML.models.MLP.utils.evaluation_helper import plotMSELossDistrib from AEML.models.MLP.utils.time_recorder import time_keeper class Network(object): def __init__(self, dim_g, dim_s, linear=[500, 500, 500, 500, 500, 500], skip_connection=False, skip_head=0, dropout=0, model_name=None, ckpt_dir=os.path.join(os.path.abspath(''), 'models','MLP'), inference_mode=False, saved_model=None): linear[0] = dim_g linear[-1] = dim_s if inference_mode: # If inference mode, use saved model self.ckpt_dir = os.path.join(ckpt_dir, saved_model) self.saved_model = saved_model print("This is inference mode, the ckpt is", self.ckpt_dir) else: # training mode, create a new ckpt folder if model_name is None: self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime())) else: self.ckpt_dir = os.path.join(ckpt_dir, model_name) self.log = SummaryWriter(self.ckpt_dir) # Create a summary writer for keeping the summary to the tensor board self.best_validation_loss = float('inf') # Set the BVL to large number self.best_training_loss = float('inf') # Set the BTL to large number # marking the flag object with these information class FlagsObject(object): pass flags = FlagsObject() field_list = ['dim_g', 'dim_s', 'linear', 'skip_connection', 'skip_head', 'dropout'] for field in field_list: setattr(flags, field, eval(field)) # flags.dim_g, flags.dim_s, flags.feature_channel_num, flags.nhead_encoder, flags.dim_fc_encoder, # flags.num_encoder_layer, flags.head_linear, flags.tail_linear, flags.sequence_length, # flags.model_name = dim_g, dim_s, feature_channel_num, nhead_encoder, dim_fc_encoder,num_encoder_layer, # head_linear, tail_linear, sequence_length, model_name self.flags = flags self.model = self.create_model() def create_model(self): """ Function to create the network module :return: the created nn module """ model = Forward(self.flags) print(model) return model def make_loss(self, logit=None, labels=None, G=None): """ Create a tensor that represents the loss. This is consistant both at training time \ and inference time for Backward model :param logit: The output of the network, the predicted geometry :param labels: The ground truth labels, the Truth geometry :param boundary: The boolean flag indicating adding boundary loss or not :param z_log_var: The log variance for VAE kl_loss :param z_mean: The z mean vector for VAE kl_loss :return: the total loss """ MSE_loss = nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss BDY_loss = 0 if G is not None: # This is using the boundary loss X_range, X_lower_bound, X_upper_bound = self.get_boundary_lower_bound_uper_bound() X_mean = (X_lower_bound + X_upper_bound) / 2 # Get the mean relu = torch.nn.ReLU() BDY_loss_all = 1 * relu(torch.abs(G - self.build_tensor(X_mean)) - 0.5 * self.build_tensor(X_range)) self.MSE_loss = MSE_loss self.Boundary_loss = BDY_loss return torch.add(MSE_loss, BDY_loss) def make_optimizer(self, optim, lr, reg_scale): """ Make the corresponding optimizer from the Only below optimizers are allowed. Welcome to add more :return: """ if optim == 'Adam': op = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=reg_scale) elif optim == 'RMSprop': op = torch.optim.RMSprop(self.model.parameters(), lr=lr, weight_decay=reg_scale) elif optim == 'SGD': op = torch.optim.SGD(self.model.parameters(), lr=lr, weight_decay=reg_scale) else: raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben") return op def make_lr_scheduler(self, optm, lr_scheduler_name, lr_decay_rate, warm_restart_T_0=50): """ Make the learning rate scheduler as instructed. More modes can be added to this, current supported ones: 1. ReduceLROnPlateau (decrease lr when validation error stops improving :return: """ if lr_scheduler_name == 'warm_restart': return lr_scheduler.CosineAnnealingWarmRestarts(optm, warm_restart_T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose=False) elif lr_scheduler_name == 'reduce_plateau': return lr_scheduler.ReduceLROnPlateau(optimizer=optm, mode='min', factor=lr_decay_rate, patience=10, verbose=True, threshold=1e-4) def save(self): """ Saving the model to the current check point folder with name best_model_forward.pt :return: None """ # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')) torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_forward.pt')) def load_model(self, pre_trained_model=None, model_directory=None): """ Loading the model from the check point folder with name best_model_forward.pt :return: """ if pre_trained_model is None: # Loading the trained model if model_directory is None: model_directory = self.ckpt_dir # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))) self.model = torch.load(os.path.join(model_directory, 'best_model_forward.pt')) print("You have successfully loaded the model from ", model_directory) else: # Loading the pretrained model from the internet print("You have successfully loaded the pretrained model for ", pre_trained_model) return 0 def train_(self, train_loader, test_loader, epochs=500, optm='Adam', weight_decay=1e-4, lr=1e-4, lr_scheduler_name=None, lr_decay_rate=0.2, eval_step=10, stop_threshold=1e-7): """ The major training function. This would start the training using information given in the flags :return: None """ print("Starting training now") cuda = True if torch.cuda.is_available() else False if cuda: self.model.cuda() # Construct optimizer after the model moved to GPU self.optm = self.make_optimizer(optm, lr, weight_decay) if lr_scheduler_name is not None: self.lr_scheduler = self.make_lr_scheduler(self.optm, lr_scheduler_name, lr_decay_rate) for epoch in range(epochs): # Set to Training Mode train_loss = 0 self.model.train() for j, (geometry, spectra) in enumerate(train_loader): if cuda: geometry = geometry.cuda() # Put data onto GPU spectra = spectra.cuda() # Put data onto GPU self.optm.zero_grad() # Zero the gradient first S_pred = self.model(geometry) loss = self.make_loss(logit=S_pred, labels=spectra) loss.backward() # Calculate the backward gradients self.optm.step() # Move one step the optimizer train_loss += loss.cpu().data.numpy() # Aggregate the loss del S_pred, loss # Calculate the avg loss of training train_avg_loss = train_loss / (j + 1) # Recording the best training loss if train_avg_loss < self.best_training_loss: self.best_training_loss = train_avg_loss if epoch % eval_step == 0: # For eval steps, do the evaluations and tensor board # Record the training loss to the tensorboard self.log.add_scalar('Loss/total_train', train_avg_loss, epoch) # Set to Evaluation Mode self.model.eval() print("Doing Evaluation on the model now") test_loss = 0 for j, (geometry, spectra) in enumerate(test_loader): # Loop through the eval set if cuda: geometry = geometry.cuda() spectra = spectra.cuda() S_pred = self.model(geometry) loss = self.make_loss(logit=S_pred, labels=spectra) test_loss += loss.cpu().data.numpy() # Aggregate the loss del loss, S_pred # Record the testing loss to the tensorboard test_avg_loss = test_loss/ (j+1) self.log.add_scalar('Loss/total_test', test_avg_loss, epoch) print("This is Epoch %d, training loss %.5f, validation loss %.5f" \ % (epoch, train_avg_loss, test_avg_loss )) # Model improving, save the model down if test_avg_loss < self.best_validation_loss: self.best_validation_loss = test_avg_loss print("Saving the model down...") self.save() if self.best_validation_loss < stop_threshold: print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\ (epoch, self.best_validation_loss)) break # Learning rate decay upon plateau if lr_scheduler_name is not None: self.lr_scheduler.step(train_avg_loss) def __call__(self, test_X, batch_size=512): """ This is to call this model to do testing, :param: test_X: The testing X to be input to the model """ # put model to GPU if cuda available cuda = True if torch.cuda.is_available() else False if cuda: self.model.cuda() # Converting the numpy into cuda #if isinstance(test_X, np.ndarray): # print('your input is an numpy array, converting to an tensor for you') # test_X = torch.tensor(test_X).cuda() # Make the model to eval mode self.model.eval() # Preparing for eval Ypred = None test_loader = get_data_into_loaders_only_x(test_X) # Partitioning the model output into small batches to avoid RAM overflow for j, geometry in enumerate(test_loader): # Loop through the eval set if cuda: geometry = geometry.cuda() # output the Ypred Ypred_batch = self.model(geometry).cpu().data.numpy() if Ypred is None: Ypred = Ypred_batch else: Ypred = np.concatenate([Ypred, Ypred_batch], axis=0) print('Inference finished, result in ypred shape', np.shape(Ypred)) return Ypred def evaluate(self, test_x, test_y, save_output=False, save_dir='data/', prefix=''): # Make sure there is a place for the evaluation if not os.path.isdir(save_dir): os.makedirs(save_dir) # Put things on conda cuda = True if torch.cuda.is_available() else False if cuda: self.model.cuda() # Set to evaluation mode for batch_norm layers self.model.eval() saved_model_str = prefix if save_output: # Get the file names Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str)) Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str)) Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str)) tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt')) test_loader = get_test_data_into_loaders(test_x, test_y) # Open those files to append with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\ open(Ypred_file, 'a') as fyp: for j, (geometry, spectra) in enumerate(test_loader): if cuda: geometry = geometry.cuda() spectra = spectra.cuda() Ypred = self.model(geometry).cpu().data.numpy() np.savetxt(fxt, geometry.cpu().data.numpy()) np.savetxt(fyt, spectra.cpu().data.numpy()) np.savetxt(fyp, Ypred) tk.record(1) # Record the total time of the eval period MSE = plotMSELossDistrib(Ypred_file, Ytruth_file) else: test_loader = get_test_data_into_loaders(test_x, test_y) truth = np.array([]) pred = np.array([]) for j, (geometry, spectra) in enumerate(test_loader): if cuda: geometry = geometry.cuda() spectra = spectra.cuda() Ypred = self.model(geometry).cpu().data.numpy() truth = np.append(truth, spectra.cpu().data.numpy()) pred = np.append(pred, Ypred) MSE = np.mean(np.square(truth-pred), axis=0) print(MSE) return MSE
AEML
/models/MLP/class_wrapper.py
class_wrapper.py
# Built in import math # Libs import numpy as np # Pytorch module import torch.nn as nn import torch.nn.functional as F import torch from torch import pow, add, mul, div, sqrt class Forward(nn.Module): def __init__(self, flags): super(Forward, self).__init__() self.skip_connection = flags.skip_connection if flags.dropout > 0: self.dp = True self.dropout = nn.Dropout(p=flags.dropout) else: self.dp = False self.skip_head = flags.skip_head """ General layer definitions: """ # Linear Layer and Batch_norm Layer definitions here self.linears = nn.ModuleList([]) self.bn_linears = nn.ModuleList([]) #self.dropout = nn.ModuleList([]) #Dropout layer was tested for fixing overfitting problem for ind, fc_num in enumerate(flags.linear[0:-1]): # Excluding the last one as we need intervals self.linears.append(nn.Linear(fc_num, flags.linear[ind + 1])) self.bn_linears.append(nn.BatchNorm1d(flags.linear[ind + 1])) #self.dropout.append(nn.Dropout(p=0.05)) def forward(self, G): """ The forward function which defines how the network is connected :param G: The input geometry (Since this is a forward network) :return: S: Spectrum outputs """ out = G # initialize the out # For the linear part for ind, (fc, bn) in enumerate(zip(self.linears, self.bn_linears)): #print(out.size() if self.skip_connection: if ind < len(self.linears) - 1: if ind == self.skip_head: out = F.relu(bn(fc(out))) if self.dp: out = self.dropout(out) identity = out elif ind > self.skip_head and (ind - self.skip_head)%2 == 0: out = F.relu(bn(fc(out))) # ReLU + BN + Linear if self.dp: out = self.dropout(out) out += identity identity = out else: out = F.relu(bn(fc(out))) if self.dp: out = self.dropout(out) else: out = (fc(out)) else: if ind < len(self.linears) - 1: out = F.relu(bn(fc(out))) else: out = fc(out) return out
AEML
/models/MLP/model_maker.py
model_maker.py
import os import shutil from copy import deepcopy import sys import pickle import numpy as np # from ensemble_mm.predict_ensemble import ensemble_predict_master # 1 def get_Xpred(path, name=None): """ Get certain predicion or truth numpy array from path, with name of model specified. If there is no name specified, return the first found such array :param path: str, the path for which to search :param name: str, the name of the model to find :return: np array """ out_file = None if name is not None: name = name.replace('/','_') for filename in os.listdir(path): if ("Xpred" in filename): if name is None: out_file = filename print("Xpred File found", filename) break else: if (name in filename): out_file = filename break assert out_file is not None, "Your Xpred model did not found" + name return np.loadtxt(os.path.join(path,out_file)) # 2 def get_Ypred(path, name=None): """ Get certain predicion or truth numpy array from path, with name of model specified. If there is no name specified, return the first found such array :param path: str, the path for which to search :param name: str, the name of the model to find :return: np array """ out_file = None name = name.replace('/','_') for filename in os.listdir(path): if ("Ypred" in filename): if name is None: out_file = filename print("Ypred File found", filename) break else: if (name in filename): out_file = filename break assert out_file is not None, "Your Xpred model did not found" + name return np.loadtxt(os.path.join(path,out_file)) # 3 def get_Xtruth(path, name=None): """ Get certain predicion or truth numpy array from path, with name of model specified. If there is no name specified, return the first found such array :param path: str, the path for which to search :param name: str, the name of the model to find :return: np array """ out_file = None if name is not None: name = name.replace('/','_') for filename in os.listdir(path): if ("Xtruth" in filename): if name is None: out_file = filename print("Xtruth File found", filename) break else: if (name in filename): out_file = filename break assert out_file is not None, "Your Xpred model did not found" + name return np.loadtxt(os.path.join(path,out_file)) # 4 def get_Ytruth(path, name=None): """ Get certain predicion or truth numpy array from path, with name of model specified. If there is no name specified, return the first found such array :param path: str, the path for which to search :param name: str, the name of the model to find :return: np array """ out_file = None name = name.replace('/','_') for filename in os.listdir(path): if ("Ytruth" in filename): if name is None: out_file = filename print("Ytruth File found", filename) break else: if (name in filename): out_file = filename break assert out_file is not None, "Your Xpred model did not found" + name return np.loadtxt(os.path.join(path,out_file)) # 5 def put_param_into_folder(ckpt_dir): """ Put the parameter.txt into the folder and the flags.obj as well :return: None """ """ Old version of finding the latest changing file, deprecated # list_of_files = glob.glob('models/*') # Use glob to list the dirs in models/ # latest_file = max(list_of_files, key=os.path.getctime) # Find the latest file (just trained) # print("The parameter.txt is put into folder " + latest_file) # Print to confirm the filename """ # Move the parameters.txt destination = os.path.join(ckpt_dir, "parameters.txt"); shutil.move("parameters.txt", destination) # Move the flags.obj destination = os.path.join(ckpt_dir, "flags.obj"); shutil.move("flags.obj", destination) # 6 def save_flags(flags, save_dir, save_file="flags.obj"): """ This function serialize the flag object and save it for further retrieval during inference time :param flags: The flags object to save :param save_file: The place to save the file :return: None """ with open(os.path.join(save_dir, save_file),'wb') as f: # Open the file pickle.dump(flags, f) # Use Pickle to serialize the object # 7 def load_flags(save_dir, save_file="flags.obj"): """ This function inflate the pickled object to flags object for reuse, typically during evaluation (after training) :param save_dir: The place where the obj is located :param save_file: The file name of the file, usually flags.obj :return: flags """ with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file flags = pickle.load(f) # Use pickle to inflate the obj back to RAM return flags # 8 def write_flags_and_BVE(flags, ntwk, forward_best_loss=None): """ The function that is usually executed at the end of the training where the flags and the best validation loss are recorded They are put in the folder that called this function and save as "parameters.txt" This parameter.txt is also attached to the generated email :param flags: The flags struct containing all the parameters :param ntwk: The network object that contains the bvl, save_dir :return: None Deprecated parameters: :param best_validation_loss: The best_validation_loss recorded in a training :param forard_best_loss: The forward best loss only applicable for Tandem model """ flags.best_validation_loss = ntwk.best_validation_loss try: flags.best_training_loss = ntwk.best_training_loss except: print("There is no training loss, this is an older version of the coder") if forward_best_loss is not None: flags.best_forward_validation_loss = forward_best_loss copy_flags = deepcopy(flags) flags_dict = vars(copy_flags) # Convert the dictionary into pandas data frame which is easier to handle with and write read with open(os.path.join(ntwk.ckpt_dir, 'parameters.txt'), 'w') as f: print(flags_dict, file=f) # Pickle the obj save_flags(flags, save_dir=ntwk.ckpt_dir) # 16 def normalize_eval(x, x_max, x_min): """ Normalize the x into [-1, 1] range in each dimension [:, i] :param x: np array to be normalized :return: normalized np array """ for i in range(len(x[0])): x_range = (x_max - x_min ) /2. x_avg = (x_max + x_min) / 2. x[:, i] = (x[:, i] - x_avg) / x_range return x # 17 def unnormalize_eval(x, x_max, x_min): """ UnNormalize the x into [-1, 1] range in each dimension [:, i] :param x: np array to be normalized :return: normalized np array """ for i in range(len(x[0])): x_range = (x_max - x_min ) /2. x_avg = (x_max + x_min) / 2. x[:, i] = x[:, i] * x_range + x_avg return x
AEML
/models/MLP/utils/helper_functions.py
helper_functions.py
import os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as pd import seaborn as sns; sns.set() from utils import helper_functions from utils.evaluation_helper import compare_truth_pred from sklearn.neighbors import NearestNeighbors from pandas.plotting import table from scipy.spatial import distance_matrix from scipy.sparse import csr_matrix from scipy.sparse.csgraph import minimum_spanning_tree from matplotlib.lines import Line2D def InferenceAccuracyExamplePlot(model_name, save_name, title, sample_num=10, fig_size=(15,5), random_seed=1, target_region=[0,300 ]): """ The function to plot the Inference accuracy and compare with FFDS algorithm. It takes the Ypred and Ytruth file as input and plot the first <sample_num> of spectras. It also takes a random of 10 points to at as the target points. :param model_name: The model name as the postfix for the Ytruth file :param save_name: The saving name of the figure :param title: The saving title of the figure :param sample_num: The number of sample to plot for comparison :param fig_size: The size of the figure :param random_seed: The random seed value :param target_region: The region that the targets get :return: """ # Get the prediction and truth file first Ytruth_file = os.path.join('data','test_Ytruth_{}.csv'.format(model_name)) Ypred_file = os.path.join('data','test_Ypred_{}.csv'.format(model_name)) Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ').values Ypred = pd.read_csv(Ypred_file, header=None, delimiter=' ').values # Draw uniform random distribution for the reference points np.random.seed(random_seed) # To make sure each time we have same target points targets = target_region[0] + (target_region[1] - target_region[0]) * np.random.uniform(low=0, high=1, size=10) # Cap the random numbers within 0-299 targets = targets.astype("int") # Make the frequency into real frequency in THz fre_low = 0.86 fre_high = 1.5 frequency = fre_low + (fre_high - fre_low)/len(Ytruth[0, :]) * np.arange(300) for i in range(sample_num): # Start the plotting f = plt.figure(figsize=fig_size) plt.title(title) plt.scatter(frequency[targets], Ytruth[i,targets], label='S*') plt.plot(frequency, Ytruth[i,:], label='FFDS') plt.plot(frequency, Ypred[i,:], label='Candidate') plt.legend() plt.ylim([0,1]) plt.xlim([fre_low, fre_high]) plt.grid() plt.xlabel("Frequency (THz)") plt.ylabel("Transmittance") plt.savefig(os.path.join('data',save_name + str(i) + '.png')) def RetrieveFeaturePredictionNMse(model_name): """ Retrieve the Feature and Prediciton values and place in a np array :param model_name: the name of the model return Xtruth, Xpred, Ytruth, Ypred """ # Retrieve the prediction and truth and prediction first feature_file = os.path.join('data', 'test_Xtruth_{}.csv'.format(model_name)) pred_file = os.path.join('data', 'test_Ypred_{}.csv'.format(model_name)) truth_file = os.path.join('data', 'test_Ytruth_{}.csv'.format(model_name)) feat_file = os.path.join('data', 'test_Xpred_{}.csv'.format(model_name)) # Getting the files from file name Xtruth = pd.read_csv(feature_file,header=None, delimiter=' ') Xpred = pd.read_csv(feat_file,header=None, delimiter=' ') Ytruth = pd.read_csv(truth_file,header=None, delimiter=' ') Ypred = pd.read_csv(pred_file,header=None, delimiter=' ') #retrieve mse, mae Ymae, Ymse = compare_truth_pred(pred_file, truth_file) #get the maes of y print(Xtruth.shape) return Xtruth.values, Xpred.values, Ytruth.values, Ypred.values, Ymae, Ymse def ImportColorBarLib(): """ Import some libraries that used in a colorbar plot """ import matplotlib.colors as colors import matplotlib.cm as cmx from matplotlib.collections import LineCollection from matplotlib.colors import ListedColormap, BoundaryNorm import matplotlib as mpl print("import sucessful") return mpl def UniqueMarkers(): import itertools markers = itertools.cycle(( 'x','1','+', '.', '*','D','v','h')) return markers def SpectrumComparisonNGeometryComparison(rownum, colnum, Figsize, model_name, boundary = [-1,1,-1,1]): """ Read the Prediction files and plot the spectra comparison plots :param SubplotArray: 2x2 array indicating the arrangement of the subplots :param Figsize: the size of the figure :param Figname: the name of the figures to save :param model_name: model name (typically a list of numebr containing date and time) """ mpl = ImportColorBarLib() #import lib Xtruth, Xpred, Ytruth, Ypred, Ymae, Ymse = RetrieveFeaturePredictionNMse(model_name) #retrieve features print("Ymse shape:",Ymse.shape) print("Xpred shape:", Xpred.shape) print("Xtrth shape:", Xtruth.shape) #Plotting the spectrum comaprison f = plt.figure(figsize=Figsize) fignum = rownum * colnum for i in range(fignum): ax = plt.subplot(rownum, colnum, i+1) plt.ylabel('Transmission rate') plt.xlabel('frequency') plt.plot(Ytruth[i], label = 'Truth',linestyle = '--') plt.plot(Ypred[i], label = 'Prediction',linestyle = '-') plt.legend() plt.ylim([0,1]) f.savefig('Spectrum Comparison_{}'.format(model_name)) """ Plotting the geometry comparsion, there are fignum points in each plot each representing a data point with a unique marker 8 dimension therefore 4 plots, 2x2 arrangement """ #for j in range(fignum): pointnum = fignum #change #fig to #points in comparison f = plt.figure(figsize = Figsize) ax0 = plt.gca() for i in range(4): truthmarkers = UniqueMarkers() #Get some unique markers predmarkers = UniqueMarkers() #Get some unique markers ax = plt.subplot(2, 2, i+1) #plt.xlim([29,56]) #setting the heights limit, abandoned because sometime can't see prediciton #plt.ylim([41,53]) #setting the radius limits for j in range(pointnum): #Since the colored scatter only takes 2+ arguments, plot 2 same points to circumvent this problem predArr = [[Xpred[j, i], Xpred[j, i]] ,[Xpred[j, i + 4], Xpred[j, i + 4]]] predC = [Ymse[j], Ymse[j]] truthplot = plt.scatter(Xtruth[j,i],Xtruth[j,i+4],label = 'Xtruth{}'.format(j), marker = next(truthmarkers),c = 'm',s = 40) predplot = plt.scatter(predArr[0],predArr[1],label = 'Xpred{}'.format(j), c =predC ,cmap = 'jet',marker = next(predmarkers), s = 60) plt.xlabel('h{}'.format(i)) plt.ylabel('r{}'.format(i)) rect = mpl.patches.Rectangle((boundary[0],boundary[2]),boundary[1] - boundary[0], boundary[3] - boundary[2], linewidth=1,edgecolor='r', facecolor='none',linestyle = '--',label = 'data region') ax.add_patch(rect) plt.autoscale() plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), mode="expand",ncol = 6, prop={'size': 5})#, bbox_to_anchor=(1,0.5)) cb_ax = f.add_axes([0.93, 0.1, 0.02, 0.8]) cbar = f.colorbar(predplot, cax=cb_ax) #f.colorbar(predplot) f.savefig('Geometry Comparison_{}'.format(model_name)) class HMpoint(object): """ This is a HeatMap point class where each object is a point in the heat map properties: 1. BV_loss: best_validation_loss of this run 2. feature_1: feature_1 value 3. feature_2: feature_2 value, none is there is no feature 2 """ def __init__(self, bv_loss, f1, f2 = None, f1_name = 'f1', f2_name = 'f2'): self.bv_loss = bv_loss self.feature_1 = f1 self.feature_2 = f2 self.f1_name = f1_name self.f2_name = f2_name #print(type(f1)) def to_dict(self): return { self.f1_name: self.feature_1, self.f2_name: self.feature_2, self.bv_loss: self.bv_loss } def HeatMapBVL(plot_x_name, plot_y_name, title, save_name='HeatMap.png', HeatMap_dir = 'HeatMap', feature_1_name=None, feature_2_name=None, heat_value_name = 'best_validation_loss'): """ Plotting a HeatMap of the Best Validation Loss for a batch of hyperswiping thing First, copy those models to a folder called "HeatMap" Algorithm: Loop through the directory using os.look and find the parameters.txt files that stores the :param HeatMap_dir: The directory where the checkpoint folders containing the parameters.txt files are located :param feature_1_name: The name of the first feature that you would like to plot on the feature map :param feature_2_name: If you only want to draw the heatmap using 1 single dimension, just leave it as None """ one_dimension_flag = False #indication flag of whether it is a 1d or 2d plot to plot #Check the data integrity if (feature_1_name == None): print("Please specify the feature that you want to plot the heatmap"); return if (feature_2_name == None): one_dimension_flag = True print("You are plotting feature map with only one feature, plotting loss curve instead") #Get all the parameters.txt running related data and make HMpoint objects HMpoint_list = [] df_list = [] #make a list of data frame for further use print("going through folder: ", HeatMap_dir) for subdir, dirs, files in os.walk(HeatMap_dir): for file_name in files: #print("passing file-name:", file_name) if (file_name == 'parameters.txt'): file_path = os.path.join(subdir, file_name) #Get the file relative path from # df = pd.read_csv(file_path, index_col=0) flag = helper_functions.load_flags(subdir) flag_dict = vars(flag) df = pd.DataFrame() for k in flag_dict: df[k] = pd.Series(str(flag_dict[k]), index=[0]) #print(df) if (one_dimension_flag): df_list.append(df[[heat_value_name, feature_1_name]]) HMpoint_list.append(HMpoint(float(df[heat_value_name][0]), eval(str(df[feature_1_name][0])), f1_name = feature_1_name)) else: if feature_2_name == 'linear_unit' or feature_1_name =='linear_unit': # If comparing different linear units print('You are plotting versus linear unit') # linear_unit has always need to be at the feature_2 and either from linear or linear_f, # If you want to test for linear_b for Tandem, make sure you modify manually here try: df['linear_unit'] = eval(df['linear'][0])[1] except: try: df['linear_unit'] = eval(df['head_linear'][0])[1] except: df['linear_unit'] = eval(df['tail_linear'][0])[1] #df['best_validation_loss'] = get_bvl(file_path) if feature_2_name == 'kernel_second': # If comparing different kernel convs #print(df['conv_kernel_size']) #print(type(df['conv_kernel_size'])) df['kernel_second'] = eval(df['conv_kernel_size'][0])[1] df['kernel_first'] = eval(df['conv_kernel_size'][0])[0] df_list.append(df[[heat_value_name, feature_1_name, feature_2_name]]) HMpoint_list.append(HMpoint(float(df[heat_value_name][0]),eval(str(df[feature_1_name][0])), eval(str(df[feature_2_name][0])), feature_1_name, feature_2_name)) #print("df_list =", df_list) if len(df_list) == 0: print("Your df list is empty, which means you probably mis-spelled the folder name or your folder does not have any parameters.txt?") #Concatenate all the dfs into a single aggregate one for 2 dimensional usee df_aggregate = pd.concat(df_list, ignore_index = True, sort = False) df_aggregate = df_aggregate.astype({heat_value_name: 'float'}) print("before transformation:", df_aggregate) [h, w] = df_aggregate.shape #print('df_aggregate has shape {}, {}'.format(h, w)) # making the 2d ones with list to be the lenghth (num_layers) for i in range(h): for j in range(w): #print('debugging for nan: ', df_aggregate.iloc[i,j]) if isinstance(df_aggregate.iloc[i,j], str) and 'nan' not in df_aggregate.iloc[i,j]: if isinstance(eval(df_aggregate.iloc[i,j]), list): df_aggregate.iloc[i,j] = len(eval(df_aggregate.iloc[i,j])) # If the grid is random (making too sparse a signal), aggregate them # The signature of a random grid is the unique value of rows for feature is very large if len(np.unique(df_aggregate.values[:, -1])) > 0.95 * h and False: # If the number of unique features is more than 80%, this is random print('This is probably a randomly selected trail? check if not!!!') df_aggregate = df_aggregate.astype('float') num_bins = 5 # Put all random values into 5 bins num_items = int(np.floor(h/num_bins)) # each bins have num_item numbers inside, last one being more feature_1_value_list = df_aggregate.values[:, -1] # Get the values feature_2_value_list = df_aggregate.values[:, -2] feature_1_order = np.argsort(feature_1_value_list) # Get the order feature_2_order = np.argsort(feature_2_value_list) for i in range(num_bins): if i != num_bins - 1: df_aggregate.iloc[feature_1_order[i*num_items: (i+1)*num_items], -1] = df_aggregate.iloc[feature_1_order[i*num_items], -1] df_aggregate.iloc[feature_2_order[i*num_items: (i+1)*num_items], -2] = df_aggregate.iloc[feature_2_order[i*num_items], -2] else: df_aggregate.iloc[feature_1_order[i*num_items: ], -1] = df_aggregate.iloc[feature_1_order[i*num_items], -1] df_aggregate.iloc[feature_2_order[i*num_items: ], -2] = df_aggregate.iloc[feature_2_order[i*num_items], -2] #print('type of last number of df_aggregate is', type(df_aggregate.iloc[-1, -1])) ######################################################################################################## #df_aggregate.iloc[:, df.columns != heat_value_name] = df_aggregate.iloc[:, df.columns != heat_value_name].round(decimals=3) ######################################################################################################## #print("after transoformation:",df_aggregate) #Change the feature if it is a tuple, change to length of it for cnt, point in enumerate(HMpoint_list): #print("For point {} , it has {} loss, {} for feature 1 and {} for feature 2".format(cnt, # point.bv_loss, point.feature_1, point.feature_2)) assert(isinstance(point.bv_loss, float)) #make sure this is a floating number if (isinstance(point.feature_1, tuple)): point.feature_1 = len(point.feature_1) if (isinstance(point.feature_2, tuple)): point.feature_2 = len(point.feature_2) f = plt.figure() #After we get the full list of HMpoint object, we can start drawing if (feature_2_name == None): print("plotting 1 dimension HeatMap (which is actually a line)") HMpoint_list_sorted = sorted(HMpoint_list, key = lambda x: x.feature_1) #Get the 2 lists of plot bv_loss_list = [] feature_1_list = [] for point in HMpoint_list_sorted: bv_loss_list.append(point.bv_loss) feature_1_list.append(point.feature_1) print("bv_loss_list:", bv_loss_list) print("feature_1_list:",feature_1_list) #start plotting plt.plot(feature_1_list, bv_loss_list,'o-') else: #Or this is a 2 dimension HeatMap print("plotting 2 dimension HeatMap") #point_df = pd.DataFrame.from_records([point.to_dict() for point in HMpoint_list]) df_aggregate = df_aggregate.round(decimals=9) df_aggregate = df_aggregate.reset_index() df_aggregate.sort_values(feature_1_name, axis=0, inplace=True) df_aggregate.sort_values(feature_2_name, axis=0, inplace=True) df_aggregate.sort_values(heat_value_name, axis=0, inplace=True) print("before dropping", df_aggregate) df_aggregate = df_aggregate.drop_duplicates(subset=[feature_1_name, feature_2_name], keep='first') print("after dropping", df_aggregate) point_df_pivot = df_aggregate.reset_index().pivot(index=feature_1_name, columns=feature_2_name, values=heat_value_name).astype(float) #point_df_pivot = point_df_pivot.rename({'5': '05'}, axis=1) # Trying to sort the indexs point_df_pivot.sort_index(axis=0, inplace=True, key=lambda x: x.astype(float)) point_df_pivot.sort_index(axis=1, inplace=True, key=lambda x: x.astype(float)) #point_df_pivot = point_df_pivot.reindex(sorted(point_df_pivot.columns), axis=1) print("pivot=") csvname = HeatMap_dir + 'pivoted.csv' point_df_pivot.to_csv(csvname, float_format="%.3g") print(point_df_pivot) sns.heatmap(point_df_pivot, cmap = "YlGnBu") plt.xlabel(plot_y_name) # Note that the pivot gives reversing labels plt.ylabel(plot_x_name) # Note that the pivot gives reversing labels plt.title(title) plt.savefig(save_name) point_df_pivot.to_csv(save_name.replace('.png','.csv')) def PlotPossibleGeoSpace(figname, Xpred_dir, compare_original = False,calculate_diversity = None): """ Function to plot the possible geometry space for a model evaluation result. It reads from Xpred_dir folder and finds the Xpred result insdie and plot that result :params figname: The name of the figure to save :params Xpred_dir: The directory to look for Xpred file which is the source of plotting :output A plot containing 4 subplots showing the 8 geomoetry dimensions """ Xpred = helper_functions.get_Xpred(Xpred_dir) Xtruth = helper_functions.get_Xtruth(Xpred_dir) f = plt.figure() ax0 = plt.gca() print(np.shape(Xpred)) if (calculate_diversity == 'MST'): diversity_Xpred, diversity_Xtruth = calculate_MST(Xpred, Xtruth) elif (calculate_diversity == 'AREA'): diversity_Xpred, diversity_Xtruth = calculate_AREA(Xpred, Xtruth) for i in range(4): ax = plt.subplot(2, 2, i+1) ax.scatter(Xpred[:,i], Xpred[:,i + 4],s = 3,label = "Xpred") if (compare_original): ax.scatter(Xtruth[:,i], Xtruth[:,i+4],s = 3, label = "Xtruth") plt.xlabel('h{}'.format(i)) plt.ylabel('r{}'.format(i)) plt.xlim(-1,1) plt.ylim(-1,1) plt.legend() if (calculate_diversity != None): plt.text(-4, 3.5,'Div_Xpred = {}, Div_Xtruth = {}, under criteria {}'.format(diversity_Xpred, diversity_Xtruth, calculate_diversity), zorder = 1) plt.suptitle(figname) f.savefig(figname+'.png') def PlotPairwiseGeometry(figname, Xpred_dir): """ Function to plot the pair-wise scattering plot of the geometery file to show the correlation between the geometry that the network learns """ Xpredfile = helper_functions.get_Xpred(Xpred_dir) Xpred = pd.read_csv(Xpredfile, header=None, delimiter=' ') f=plt.figure() axes = pd.plotting.scatter_matrix(Xpred, alpha = 0.2) #plt.tight_layout() plt.title("Pair-wise scattering of Geometery predictions") plt.savefig(figname) def calculate_AREA(Xpred, Xtruth): """ Function to calculate the area for both Xpred and Xtruth under using the segmentation of 0.01 """ area_list = np.zeros([2,4]) X_list = [Xpred, Xtruth] binwidth = 0.05 for cnt, X in enumerate(X_list): for i in range(4): hist, xedges, yedges = np.histogram2d(X[:,i],X[:,i+4], bins = np.arange(-1,1+binwidth,binwidth)) area_list[cnt, i] = np.mean(hist > 0) X_histgt0 = np.mean(area_list, axis = 1) assert len(X_histgt0) == 2 return X_histgt0[0], X_histgt0[1] def calculate_MST(Xpred, Xtruth): """ Function to calculate the MST for both Xpred and Xtruth under using the segmentation of 0.01 """ MST_list = np.zeros([2,4]) X_list = [Xpred, Xtruth] for cnt, X in enumerate(X_list): for i in range(4): points = X[:,i:i+5:4] distance_matrix_points = distance_matrix(points,points, p = 2) csr_mat = csr_matrix(distance_matrix_points) Tree = minimum_spanning_tree(csr_mat) MST_list[cnt,i] = np.sum(Tree.toarray().astype(float)) X_MST = np.mean(MST_list, axis = 1) return X_MST[0], X_MST[1] def get_bvl(file_path): """ This is a helper function for 0119 usage where the bvl is not recorded in the pickled object but in .txt file and needs this funciton to retrieve it """ df = pd.read_csv(file_path, delimiter=',') bvl = 0 for col in df: if 'best_validation_loss' in col: print(col) strlist = col.split(':') #print("in get_bvl, str is: ", strlist[1]) if strlist[1].endswith(']') or strlist[1].endswith('}') : strlist[1] = strlist[1][:-1] bvl = eval(strlist[1]) print("bvl = ", bvl) if bvl == 0: print("Error! We did not found a bvl in .txt.file") else: return float(bvl) def get_xpred_ytruth_xtruth_from_folder(data_dir): """ This function get the list of Xpred and single Ytruth file in the folder from multi_eval and output the list of Xpred, single Ytruth and Single Xtruth numpy array for further operation Since this is not operating on NA series, there is no order in the Xpred files ########################################################### #NOTE: THIS FUNCTION SHOULD NOT OPERATE ON NA BASED METHOD# ########################################################### :param data_dir: The directory to get the files :output Xpred_list: The list of Xpred files, each element is a numpy array with same shape of Xtruth :output Ytruth: The Ytruth numpy array :output Xtruth: The Xtruth numpy array """ # Reading Truth files Yt = pd.read_csv(os.path.join(data_dir, 'Ytruth.csv'), header=None, delimiter=' ').values Xt = pd.read_csv(os.path.join(data_dir, 'Xtruth.csv'), header=None, delimiter=' ').values # Reading the list of prediction files Xpred_list = [] for files in os.listdir(data_dir): if 'Xpred' in files: Xp = pd.read_csv(os.path.join(data_dir, files), header=None, delimiter=' ').values Xpred_list.append(Xp) return Xpred_list, Xt, Yt def reshape_xpred_list_to_mat(Xpred_list): """ This function reshapes the Xpred list (typically from "get_xpred_ytruth_xtruth_from_folder") which has the shape: #initialization (2048, as a list) x #data_point (1000) x #xdim into a matrix form for easier formatting for the backpropagation in NA modes :param Xpred_list: A list of #init, each element has shape of (#data_point 1000 x #xdim) :output X_init_mat: A matrix of shape (2048, 1000, dxim) """ # Get length of list (2048) list_length = len(Xpred_list) # Get shape of Xpred files xshape = np.shape(Xpred_list[0]) # Init the big matrix X_init_mat = np.zeros([list_length, xshape[0], xshape[1]]) # Fill in the matrix for ind, xpred in enumerate(Xpred_list): X_init_mat[ind,:,:] = np.copy(xpred) return X_init_mat def get_mse_mat_from_folder(data_dir): """ The function to get the mse matrix from the giant folder that contains all the multi_eval files. Due to the data structure difference of NA storing, we would handle NA in different way than other algorithms :param data_dir: The directory where the data is in """ Yt = pd.read_csv(os.path.join(data_dir, 'Ytruth.csv'), header=None, delimiter=' ').values print("shape of ytruth is", np.shape(Yt)) # Get all the Ypred into list Ypred_list = [] #################################################################### # Special handling for NA as it output file structure is different # #################################################################### if 'NA' in data_dir or 'on' in data_dir or 'GA' in data_dir: l, w = np.shape(Yt) print("shape of Yt", l,' ', w) num_trails = 200 #num_trails = 2048 Ypred_mat = np.zeros([l, num_trails, w]) check_full = np.zeros(l) # Safety check for completeness for files in os.listdir(data_dir): if '_Ypred_' in files: Yp = pd.read_csv(os.path.join(data_dir, files), header=None, delimiter=' ').values if len(np.shape(Yp)) == 1: # For ballistic data set where it is a coloumn only Yp = np.reshape(Yp, [-1, 1]) print("shape of Ypred file is", np.shape(Yp)) # Truncating to the top num_trails inferences if len(Yp) != num_trails: Yp = Yp[:num_trails,:] number_str = files.split('inference')[-1][:-4] print(number_str) number = int(files.split('inference')[-1][:-4]) Ypred_mat[number, :, :] = Yp check_full[number] = 1 assert np.sum(check_full) == l, 'Your list is not complete, {} Ypred files out of {} are present'.format(np.sum(check_full), l) # Finished fullfilling the Ypred mat, now fill in the Ypred list as before for i in range(num_trails): Ypred_list.append(Ypred_mat[:, i, :]) else: for files in os.listdir(data_dir): if 'Ypred' in files: #print(files) Yp = pd.read_csv(os.path.join(data_dir, files), header=None, delimiter=' ').values if len(np.shape(Yp)) == 1: # For ballistic data set where it is a coloumn only Yp = np.reshape(Yp, [-1, 1]) #print("shape of Ypred file is", np.shape(Yp)) Ypred_list.append(Yp) # Calculate the large MSE matrix mse_mat = np.zeros([len(Ypred_list), len(Yt)]) print("shape of mse_mat is", np.shape(mse_mat)) for ind, yp in enumerate(Ypred_list): if np.shape(yp) != np.shape(Yt): print("Your Ypred file shape does not match your ytruth, however, we are trying to reshape your ypred file into the Ytruth file shape") print("shape of the Yp is", np.shape(yp)) print("shape of the Yt is", np.shape(Yt)) yp = np.reshape(yp, np.shape(Yt)) if ind == 1: print(np.shape(yp)) # For special case yp = -999, it is out of numerical simulator print("shape of np :", np.shape(yp)) print("shape of Yt :", np.shape(Yt)) if np.shape(yp)[1] == 1: # If this is ballistics (actually sinewave also is 1d) print("this is ballistics dataset, checking the -999 situation now") valid_index = yp[:, 0] != -999 invalid_index = yp[:, 0] == -999 #valid_index = np.arange(len(yp[:, 0]))[valid_index] #invalid_index = np.arange(len(yp[:, 0]))[not valid_index] print("shape of valid flag :", np.shape(valid_index)) mse = np.mean(np.square(yp - Yt), axis=1) mse_mat[ind, valid_index] = mse[valid_index] mse_mat[ind, invalid_index] = -999 #valid_num = np.sum(valid_index) #yp = yp[valid_index, :] #Yt_valid = Yt[valid_index, :] #print("shape of np after valid :", np.shape(yp)) #print("shape of Yt after valid :", np.shape(Yt_valid)) #mse = np.mean(np.square(yp - Yt_valid), axis=1) #if valid_num == len(valid_index): # mse_mat[ind, :] = mse #else: # mse_mat[ind, :valid_num] = mse # mse_mat[ind, valid_num:] = np.mean(mse) else: mse = np.mean(np.square(yp - Yt), axis=1) mse_mat[ind, :] = mse print("shape of the yp is", np.shape(yp)) print("shape of mse is", np.shape(mse)) # Extra step to work with -999 case for ballistics if np.shape(yp)[1] == 1: print("Processing the ballistics data due to -999") for i in range(len(Yt)): # Get that list mse_list = mse_mat[:, i] # change ones with -999 to mean of others valid_list = mse_list != -999 invalid_list = mse_list == -999 mse_mat[invalid_list, i] = np.mean(mse_list[valid_list]) return mse_mat, Ypred_list def MeanAvgnMinMSEvsTry(data_dir): """ Plot the mean average Mean and Min Squared error over Tries :param data_dir: The directory where the data is in :param title: The title for the plot :return: """ # Read Ytruth file if not os.path.isdir(data_dir): print("Your data_dir is not a folder in MeanAvgnMinMSEvsTry function") print("Your data_dir is:", data_dir) return # Get the MSE matrix from the giant folder with multi_eval mse_mat, Ypred_list = get_mse_mat_from_folder(data_dir) # Shuffle array and average results shuffle_number = 0 if shuffle_number > 0: # Calculate the min and avg from mat mse_min_list = np.zeros([len(Ypred_list), shuffle_number]) mse_avg_list = np.zeros([len(Ypred_list), shuffle_number]) for shuf in range(shuffle_number): rng = np.random.default_rng() rng.shuffle(mse_mat) for i in range(len(Ypred_list)): mse_avg_list[i, shuf] = np.mean(mse_mat[:i+1, :]) mse_min_list[i, shuf] = np.mean(np.min(mse_mat[:i+1, :], axis=0)) # Average the shuffled result mse_avg_list = np.mean(mse_avg_list, axis=1) mse_min_list = np.mean(mse_min_list, axis=1) else: # Currently the results are not shuffled as the statistics are enough # Calculate the min and avg from mat mse_min_list = np.zeros([len(Ypred_list),]) mse_avg_list = np.zeros([len(Ypred_list),]) mse_std_list = np.zeros([len(Ypred_list),]) mse_quan2575_list = np.zeros([2, len(Ypred_list)]) if 'NA' in data_dir: cut_front = 0 else: cut_front = 0 for i in range(len(Ypred_list)-cut_front): mse_avg_list[i] = np.mean(mse_mat[cut_front:i+1+cut_front, :]) mse_min_list[i] = np.mean(np.min(mse_mat[cut_front:i+1+cut_front, :], axis=0)) mse_std_list[i] = np.std(np.min(mse_mat[cut_front:i+1+cut_front, :], axis=0)) mse_quan2575_list[0, i] = np.percentile(np.min(mse_mat[cut_front:i+1+cut_front, :], axis=0), 25) mse_quan2575_list[1, i] = np.percentile(np.min(mse_mat[cut_front:i+1+cut_front, :], axis=0), 75) # Save the list down for further analysis np.savetxt(os.path.join(data_dir, 'mse_mat.csv'), mse_mat, delimiter=' ') np.savetxt(os.path.join(data_dir, 'mse_avg_list.txt'), mse_avg_list, delimiter=' ') np.savetxt(os.path.join(data_dir, 'mse_min_list.txt'), mse_min_list, delimiter=' ') np.savetxt(os.path.join(data_dir, 'mse_std_list.txt'), mse_std_list, delimiter=' ') np.savetxt(os.path.join(data_dir, 'mse_quan2575_list.txt'), mse_quan2575_list, delimiter=' ') # Plotting f = plt.figure() x_axis = np.arange(len(Ypred_list)) plt.plot(x_axis, mse_avg_list, label='avg') plt.plot(x_axis, mse_min_list, label='min') plt.legend() plt.xlabel('inference number') plt.ylabel('mse error') plt.savefig(os.path.join(data_dir,'mse_plot vs time')) return None def MeanAvgnMinMSEvsTry_all(data_dir): # Depth=2 now based on current directory structure """ Do the recursive call for all sub_dir under this directory :param data_dir: The mother directory that calls :return: """ for dirs in os.listdir(data_dir): print("entering :", dirs) print("this is a folder?:", os.path.isdir(os.path.join(data_dir, dirs))) print("this is a file?:", os.path.isfile(os.path.join(data_dir, dirs))) #if this is not a folder if not os.path.isdir(os.path.join(data_dir, dirs)): print("This is not a folder", dirs) continue for subdirs in os.listdir(os.path.join(data_dir, dirs)): if os.path.isfile(os.path.join(data_dir, dirs, subdirs, 'mse_min_list.txt')): # if this has been done continue; print("enters folder", subdirs) MeanAvgnMinMSEvsTry(os.path.join(data_dir, dirs, subdirs)) return None def DrawBoxPlots_multi_eval(data_dir, data_name, save_name='Box_plot'): """ The function to draw the statitstics of the data using a Box plot :param data_dir: The mother directory to call :param data_name: The data set name """ # Predefine name of mse_mat mse_mat_name = 'mse_mat.csv' #Loop through directories mse_mat_dict = {} for dirs in os.listdir(data_dir): print(dirs) if not os.path.isdir(os.path.join(data_dir, dirs)):# or 'NA' in dirs: print("skipping due to it is not a directory") continue; for subdirs in os.listdir((os.path.join(data_dir, dirs))): if subdirs == data_name: # Read the lists mse_mat = pd.read_csv(os.path.join(data_dir, dirs, subdirs, mse_mat_name), header=None, delimiter=' ').values # Put them into dictionary mse_mat_dict[dirs] = mse_mat # Get the box plot data box_plot_data = [] for key in sorted(mse_mat_dict.keys()): data = mse_mat_dict[key][0, :] # data = np.mean(mse_mat_dict[key], axis=1) box_plot_data.append(data) print('{} avg error is : {}'.format(key, np.mean(data))) # Start plotting f = plt.figure() plt.boxplot(box_plot_data, patch_artist=True, labels=sorted(mse_mat_dict.keys())) plt.ylabel('mse') ax = plt.gca() ax.set_yscale('log') plt.savefig(os.path.join(data_dir, data_name + save_name + '.png')) return None def DrawAggregateMeanAvgnMSEPlot(data_dir, data_name, save_name='aggregate_plot', gif_flag=False, plot_points=200,resolution=None, dash_group='nobody', dash_label='', solid_label='',worse_model_mode=False): # Depth=2 now based on current directory structure """ The function to draw the aggregate plot for Mean Average and Min MSEs :param data_dir: The mother directory to call :param data_name: The data set name :param git_flag: Plot are to be make a gif :param plot_points: Number of points to be plot :param resolution: The resolution of points :return: """ # Predefined name of the avg lists min_list_name = 'mse_min_list.txt' avg_list_name = 'mse_avg_list.txt' std_list_name = 'mse_std_list.txt' quan2575_list_name = 'mse_quan2575_list.txt' # Loop through the directories avg_dict, min_dict, std_dict, quan2575_dict = {}, {}, {}, {} for dirs in os.listdir(data_dir): # Dont include NA for now and check if it is a directory print("entering :", dirs) print("this is a folder?:", os.path.isdir(os.path.join(data_dir, dirs))) print("this is a file?:", os.path.isfile(os.path.join(data_dir, dirs))) if not os.path.isdir(os.path.join(data_dir, dirs)):# or dirs == 'NA':# or 'boundary' in dirs:: print("skipping due to it is not a directory") continue; for subdirs in os.listdir((os.path.join(data_dir, dirs))): if subdirs == data_name: # Read the lists mse_avg_list = pd.read_csv(os.path.join(data_dir, dirs, subdirs, avg_list_name), header=None, delimiter=' ').values mse_min_list = pd.read_csv(os.path.join(data_dir, dirs, subdirs, min_list_name), header=None, delimiter=' ').values mse_std_list = pd.read_csv(os.path.join(data_dir, dirs, subdirs, std_list_name), header=None, delimiter=' ').values mse_quan2575_list = pd.read_csv(os.path.join(data_dir, dirs, subdirs, quan2575_list_name), header=None, delimiter=' ').values print("The quan2575 error range shape is ", np.shape(mse_quan2575_list)) print("dirs =", dirs) print("shape of mse_min_list is:", np.shape(mse_min_list)) # Put them into dictionary avg_dict[dirs] = mse_avg_list min_dict[dirs] = mse_min_list std_dict[dirs] = mse_std_list quan2575_dict[dirs] = mse_quan2575_list #print("printing the min_dict", min_dict) def plotDict(dict, name, data_name=None, logy=False, time_in_s_table=None, avg_dict=None, plot_points=50, resolution=None, err_dict=None, color_assign=False, dash_group='nobody', dash_label='', solid_label='', plot_xlabel=False, worse_model_mode=False): """ :param name: the name to save the plot :param dict: the dictionary to plot :param logy: use log y scale :param time_in_s_table: a dictionary of dictionary which stores the averaged evaluation time in seconds to convert the graph :param plot_points: Number of points to be plot :param resolution: The resolution of points :param err_dict: The error bar dictionary which takes the error bar input :param avg_dict: The average dict for plotting the starting point :param dash_group: The group of plots to use dash line :param dash_label: The legend to write for dash line :param solid_label: The legend to write for solid line :param plot_xlabel: The True or False flag for plotting the x axis label or not :param worse_model_mode: The True or False flag for plotting worse model mode (1X, 10X, 50X, 100X worse model) """ import matplotlib.colors as mcolors if worse_model_mode: color_dict = {"(1X": "limegreen", "(10X": "blueviolet", "(50X":"cornflowerblue", "(100X": "darkorange"} else: # # manual color setting # color_dict = {"VAE": "blueviolet","cINN":"crimson", "INN":"cornflowerblue", "Random": "limegreen", # "MDN": "darkorange", "NA_init_lr_0.1_decay_0.5_batch_2048":"limegreen"} # Automatic color setting color_dict = {} if len(dict.keys()) < 10: color_pool = mcolors.TABLEAU_COLORS.keys() else: color_pool = mcolors.CSS4_COLORS.keys() for ind, key in enumerate(dict.keys()): color_dict[key] = list(color_pool)[ind] f = plt.figure(figsize=[6,3]) ax = plt.gca() ax.spines['bottom'].set_color('black') ax.spines['top'].set_color('black') ax.spines['left'].set_color('black') ax.spines['right'].set_color('black') text_pos = 0.01 # List for legend legend_list = [] print("All the keys=", dict.keys()) print("All color keys=", color_dict.keys()) for key in sorted(dict.keys()): ###################################################### # This is for 02.02 getting the T=1, 50, 1000 result # ###################################################### #text = key.replace('_',' ')+"\n" + ': t1={:.2e},t50={:.2e},t1000={:.2e}'.format(dict[key][0][0], dict[key][49][0], dict[key][999][0]) #print("printing message on the plot now") #plt.text(1, text_pos, text, wrap=True) #text_pos /= 5 # Linestyle if dash_group is not None and dash_group in key: linestyle = 'dashed' else: linestyle = 'solid' x_axis = np.arange(len(dict[key])).astype('float') x_axis += 1 if time_in_s_table is not None: x_axis *= time_in_s_table[data_name][key] #print("printing", name) print('key = ', key) #print(dict[key]) if err_dict is None: if color_assign: line_axis, = plt.plot(x_axis[:plot_points:resolution], dict[key][:plot_points:resolution],c=color_dict[key.split('_')[0]],label=key, linestyle=linestyle) else: line_axis, = plt.plot(x_axis[:plot_points:resolution], dict[key][:plot_points:resolution],label=key, linestyle=linestyle) else: # This is the case where we plot the continuous error curve if resolution is None: # For mm_bench, this is not needed #label = key.split('_')[0] label = key if linestyle == 'dashed': label = None #color_key = key.split('_')[0].split(')')[0] # '_' is for separating BP_ox_FF_ox and ')' is for worse model color_key = key #print("color key = ", color_key) line_axis, = plt.plot(x_axis[:plot_points], dict[key][:plot_points], color=color_dict[color_key], linestyle=linestyle, label=label) lower = - err_dict[key][0, :plot_points] + np.ravel(dict[key][:plot_points]) higher = err_dict[key][1, :plot_points] + np.ravel(dict[key][:plot_points]) plt.fill_between(x_axis[:plot_points], lower, higher, color=color_dict[color_key], alpha=0.2) else: if color_assign: line_axis = plt.errorbar(x_axis[:plot_points:resolution], dict[key][:plot_points:resolution],c=color_dict[key.split('_')[0]], yerr=err_dict[key][:, :plot_points:resolution], label=key.replace('_',' '), capsize=5, linestyle=linestyle)#, errorevery=resolution)#, else: line_axis = plt.errorbar(x_axis[:plot_points:resolution], dict[key][:plot_points:resolution], yerr=err_dict[key][:, :plot_points:resolution], label=key.replace('_',' '), capsize=5, linestyle=linestyle)#, errorevery=resolution)#, #dash_capstyle='round')#, uplims=True, lolims=True) legend_list.append(line_axis) if logy: ax = plt.gca() ax.set_yscale('log') print(legend_list) legend_list.append(Line2D([0], [0], color='k', linestyle='dashed', lw=1, label=dash_label)) legend_list.append(Line2D([0], [0], color='k', linestyle='solid', lw=1, label=solid_label)) #ax.legend(handles=legend_list, loc=1, ncol=2, prop={'size':8}) if time_in_s_table is not None and plot_xlabel: plt.xlabel('inference time (s)') elif plot_xlabel: plt.xlabel('# of inference made (T)') #plt.ylabel('MSE') plt.xlim([1, plot_points]) if 'ball' in data_name: data_name = 'D1: ' + data_name elif 'sine' in data_name: data_name = 'D2: ' + data_name elif 'robo' in data_name: data_name = 'D3: ' + data_name elif 'meta' in data_name: data_name = 'D4: ' + data_name else: # This is not a standard dataset plt.legend(prop={'size': 2}) plt.savefig(os.path.join(data_dir, data_name + save_name + name), transparent=True, dpi=300) plt.close('all') return plt.grid(True, axis='both',which='major',color='b',alpha=0.2) #plt.title(data_name.replace('_',' '), fontsize=20) ax = plt.gca() data_index = int(data_name.split(':')[0].split('D')[-1]) if data_index % 2 == 0: # If this is a even number ax.yaxis.tick_right() else: ax.yaxis.tick_left() if data_index < 3: ax.xaxis.tick_top() plt.xticks([1, 10, 20, 30, 40, 50]) plt.savefig(os.path.join(data_dir, data_name + save_name + name), transparent=True, dpi=300) plt.close('all') ax = plotDict(min_dict,'_minlog_quan2575.png', plot_points=plot_points, logy=True, avg_dict=avg_dict, err_dict=quan2575_dict, data_name=data_name, dash_group=dash_group, dash_label=dash_label, solid_label=solid_label, resolution=resolution, worse_model_mode=worse_model_mode) #plotDict(min_dict,'_min_quan2575.png', plot_points, resolution, logy=False, avg_dict=avg_dict, err_dict=quan2575_dict) #plotDict(min_dict,'_minlog_std.png', plot_points, resolution, logy=True, avg_dict=avg_dict, err_dict=std_dict) #plotDict(min_dict,'_min_std.png', plot_points, resolution, logy=False, avg_dict=avg_dict, err_dict=std_dict) # if plot gifs if not gif_flag: return else: for i in range(2,20,1): plotDict(min_dict, str(i), logy=True, plot_points=i) for i in range(20,1000,20): plotDict(min_dict, str(i), logy=True, plot_points=i) return ax def DrawEvaluationTime(data_dir, data_name, save_name='evaluation_time', logy=False, limit=1000): """ This function is to plot the evaluation time behavior of different algorithms on different data sets :param data_dir: The mother directory where all the results are put :param data_name: The specific dataset to analysis :param save_name: The saving name of the plotted figure :param logy: take logrithmic at axis y :param limit: the limit of x max :return: """ eval_time_dict = {} for dirs in os.listdir(data_dir): print("entering :", dirs) print("this is a folder?:", os.path.isdir(os.path.join(data_dir, dirs))) print("this is a file?:", os.path.isfile(os.path.join(data_dir, dirs))) if not os.path.isdir(os.path.join(data_dir, dirs)): print("skipping due to it is not a directory") continue; for subdirs in os.listdir((os.path.join(data_dir, dirs))): if subdirs == data_name: # Read the lists eval_time = pd.read_csv(os.path.join(data_dir, dirs, subdirs, 'evaluation_time.txt'), header=None, delimiter=',').values[:, 1] # Put them into dictionary eval_time_dict[dirs] = eval_time # Plotting f = plt.figure() for key in sorted(eval_time_dict.keys()): average_time = eval_time_dict[key][-1] / len(eval_time_dict[key]) plt.plot(np.arange(len(eval_time_dict[key])), eval_time_dict[key], label=key + 'average_time={0:.2f}s'.format(average_time)) plt.legend() plt.xlabel('#inference trails') plt.ylabel('inference time taken (s)') plt.title(data_name + 'evaluation_time') plt.xlim([0, limit]) if logy: ax = plt.gca() ax.set_yscale('log') plt.savefig(os.path.join(data_dir, data_name + save_name + 'logy.png')) else: plt.savefig(os.path.join(data_dir, data_name + save_name + '.png')) if __name__ == '__main__': # NIPS version #MeanAvgnMinMSEvsTry_all('/work/sr365/NA_compare/') #datasets = ['ballistics'] #datasets = ['meta_material', 'robotic_arm','sine_wave','ballistics'] #lr_list = ['lr1','lr0.5','lr0.05'] #for lr in lr_list: # MeanAvgnMinMSEvsTry_all('/work/sr365/NA_compare/'+lr) # for dataset in datasets: # DrawAggregateMeanAvgnMSEPlot('/work/sr365/NA_compare/'+lr, dataset) # #DrawAggregateMeanAvgnMSEPlot('/work/sr365/NA_compare/', 'ballistics') # NIPS version work_dir = '/home/sr365/mm_bench_multi_eval' #lr_list = [10, 1, 0.1, 0.01, 0.001] MeanAvgnMinMSEvsTry_all(work_dir) #datasets = ['Yang_sim','Peurifoy'] #datasets = ['Yang_sim'] #for lr in lr_list: for dataset in datasets: #DrawAggregateMeanAvgnMSEPlot(work_dir, dataset, resolution=5) DrawAggregateMeanAvgnMSEPlot(work_dir, dataset) """ # NIPS version on Groot #work_dir = '/data/users/ben/robotic_stuck/retrain5/' work_dir = '/data/users/ben/multi_eval/' MeanAvgnMinMSEvsTry_all(work_dir) datasets = ['ballistics','robotic_arm'] ##datasets = ['meta_material', 'robotic_arm','sine_wave','ballistics'] for dataset in datasets: DrawAggregateMeanAvgnMSEPlot(work_dir, dataset) """ # NIPS version for INN robo #MeanAvgnMinMSEvsTry_all('/work/sr365/multi_eval/special/') #datasets = ['robotic_arm'] #datasets = ['meta_material', 'robotic_arm','sine_wave','ballistics'] #for dataset in datasets: # DrawAggregateMeanAvgnMSEPlot('/work/sr365/multi_eval/special', dataset) #MeanAvgnMinMSEvsTry_all('/home/sr365/ICML_exp_cINN_ball/') #DrawAggregateMeanAvgnMSEPlot('/home/sr365/ICML_exp_cINN_ball', dataset) """ # Modulized version (ICML) #data_dir = '/data/users/ben/' # I am groot! data_dir = '/home/sr365/' # quad #data_dir = '/work/sr365/' algo_list = ['cINN','INN','VAE','MDN','Random'] #algo_list = '' for algo in algo_list: MeanAvgnMinMSEvsTry_all(data_dir + 'ICML_exp/' + algo + '/') #datasets = ['meta_material'] #datasets = ['robotic_arm','sine_wave','ballistics'] datasets = ['robotic_arm','sine_wave','ballistics','meta_material'] for dataset in datasets: DrawAggregateMeanAvgnMSEPlot(data_dir+ 'ICML_exp/'+algo+'/', dataset) """ # Modulized version plots (ICML_0120) #data_dir = '/data/users/ben/' ##data_dir = '/work/sr365/' ##algo_list = ['cINN','INN','VAE','MDN','Random'] #algo_list = ['Ball','rob','sine','MM'] #for algo in algo_list: # #MeanAvgnMinMSEvsTry_all(data_dir + 'ICML_exp_0120/top_ones_' + algo + '/') # #datasets = ['robotic_arm','ballistics'] # datasets = ['robotic_arm','sine_wave','ballistics','meta_material'] # for dataset in datasets: # DrawAggregateMeanAvgnMSEPlot(data_dir+ 'ICML_exp_0120/top_ones_'+algo+'/', dataset) ## Draw the top ones #datasets = ['robotic_arm','sine_wave','ballistics','meta_material'] #draw_dir = '/data/users/ben/best_plot/' #for dataset in datasets: # DrawAggregateMeanAvgnMSEPlot(draw_dir + dataset , dataset)
AEML
/models/MLP/utils/plotsAnalysis.py
plotsAnalysis.py
import os import sys import numpy as np import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from torch.utils.data import Dataset from sklearn.model_selection import train_test_split import torch TEST_SET_DIR = '/scratch/sr365/ML_MM_Benchmark/testsets' def get_data_into_loaders(data_x, data_y, batch_size, DataSetClass, rand_seed=0, test_ratio=0.3): """ Helper function that takes structured data_x and data_y into dataloaders :param data_x: the structured x data :param data_y: the structured y data :param rand_seed: the random seed :param test_ratio: The testing ratio :return: train_loader, test_loader: The pytorch data loader file """ if test_ratio == 1: # Test case print('This is testing mode!!! Test ratio = 1') test_data = DataSetClass(data_x, data_y) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) return None, test_loader x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=test_ratio, random_state=rand_seed) print('total number of training sample is {}, the dimension of the feature is {}'.format(len(x_train), len(x_train[0]))) print('total number of test sample is {}'.format(len(y_test))) # Construct the dataset using a outside class train_data = DataSetClass(x_train, y_train) test_data = DataSetClass(x_test, y_test) # Construct train_loader and test_loader train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) return train_loader, test_loader def normalize_np(x, x_max_list=None, x_min_list=None): """ Normalize the x into [-1, 1] range in each dimension [:, i] :param x: np array to be normalized :return: normalized np array """ if x_max_list is not None: if x_min_list is None or len(x[0]) != len(x_max_list) or len(x_max_list) != len(x_min_list): print("In normalize_np, your dimension does not match with provided x_max, try again") quit() new_x_max_list = [] new_x_min_list = [] for i in range(len(x[0])): if x_max_list is None: x_max = np.max(x[:, i]) x_min = np.min(x[:, i]) else: x_max = x_max_list[i] x_min = x_min_list[i] x_range = (x_max - x_min ) /2. x_avg = (x_max + x_min) / 2. x[:, i] = (x[:, i] - x_avg) / x_range print("In normalize_np, row ", str(i), " your max is:", np.max(x[:, i])) print("In normalize_np, row ", str(i), " your min is:", np.min(x[:, i])) if x_max_list is None: assert np.max(x[:, i]) - 1 < 0.0001, 'your normalization is wrong' assert np.min(x[:, i]) + 1 < 0.0001, 'your normalization is wrong' new_x_max_list.append(x_max) new_x_min_list.append(x_min) return x, np.array(new_x_max_list), np.array(new_x_min_list) def read_data_color_filter(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Color') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Color') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed,test_ratio=1) print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data_Yang(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Yang') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # Normalize the dataset data_x, x_max, x_min = normalize_np(data_x) # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Yang') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values data_x, _, _, = normalize_np(data_x, x_max, x_min) print('This is Yang dataset with data_x shape of', np.shape(data_x)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=1) print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data_peurifoy(flags, eval_data_all=False): """ Data reader function for the gaussian mixture data set :param flags: Input flags :return: train_loader and test_loader in pytorch data set format (normalized) """ # Read the data data_dir = os.path.join(flags.data_dir, 'Peurifoy') data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values # This is to for the last test dataset if eval_data_all: data_dir = os.path.join(TEST_SET_DIR, 'Peurifoy') data_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values data_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values data_x = (data_x - 50) / 20. print('This is Perifoy dataset with data_x shape of', np.shape(data_x)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed,test_ratio=1) # The geometric boundary of peurifoy dataset is [30, 70], normalizing manually data_x = (data_x - 50) / 20. print("shape of data_x", np.shape(data_x)) print("shape of data_y", np.shape(data_y)) return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, rand_seed=flags.rand_seed, test_ratio=flags.test_ratio) def read_data(flags, eval_data_all=False): """ The data reader allocator function The input is categorized into couple of different possibilities 0. meta_material 1. gaussian_mixture 2. sine_wave 3. naval_propulsion 4. robotic_arm 5. ballistics :param flags: The input flag of the input data set :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: """ print("In read_data, flags.data_set =", flags.data_set) if 'Yang' in flags.data_set or 'ADM' in flags.data_set: train_loader, test_loader = read_data_Yang(flags,eval_data_all=eval_data_all) elif 'Peurifoy' in flags.data_set : train_loader, test_loader = read_data_peurifoy(flags,eval_data_all=eval_data_all) elif 'olor' in flags.data_set: train_loader, test_loader =read_data_color_filter(flags,eval_data_all=eval_data_all) else: sys.exit("Your flags.data_set entry is not correct, check again!") return train_loader, test_loader class MetaMaterialDataSet(Dataset): """ The Meta Material Dataset Class """ def __init__(self, ftr, lbl, bool_train): """ Instantiate the Dataset Object :param ftr: the features which is always the Geometry !! :param lbl: the labels, which is always the Spectra !! :param bool_train: """ self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] class SimulatedDataSet_class_1d_to_1d(Dataset): """ The simulated Dataset Class for classification purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind], self.y[ind] class SimulatedDataSet_class(Dataset): """ The simulated Dataset Class for classification purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind, :], self.y[ind] class SimulatedDataSet_regress(Dataset): """ The simulated Dataset Class for regression purposes""" def __init__(self, x, y): self.x = x self.y = y self.len = len(x) def __len__(self): return self.len def __getitem__(self, ind): return self.x[ind, :], self.y[ind, :]
AEML
/models/MLP/utils/data_reader.py
data_reader.py
import numpy as np from sklearn.metrics import confusion_matrix import seaborn as sns import matplotlib.pyplot as plt import os import pandas as pd def compare_truth_pred(pred_file, truth_file, cut_off_outlier_thres=None, quiet_mode=False): """ Read truth and pred from csv files, compute their mean-absolute-error and the mean-squared-error :param pred_file: full path to pred file :param truth_file: full path to truth file :return: mae and mse """ if isinstance(pred_file, str): # If input is a file name (original set up) pred = pd.read_csv(pred_file, header=None, sep=' ').values print(np.shape(pred)) if np.shape(pred)[1] == 1: pred = pd.read_csv(pred_file, header=None, sep=',').values truth = pd.read_csv(truth_file, header=None, sep=' ').values print(np.shape(truth)) if np.shape(truth)[1] == 1: truth = pd.read_csv(truth_file, header=None, sep=',').values elif isinstance(pred_file, np.ndarray): pred = pred_file truth = truth_file else: print('In the compare_truth_pred function, your input pred and truth is neither a file nor a numpy array') if not quiet_mode: print("in compare truth pred function in eval_help package, your shape of pred file is", np.shape(pred)) # Getting the mean absolute error and the mean squared error mae = np.mean(np.abs(pred-truth), axis=1) mse = np.mean(np.square(pred-truth), axis=1) # When necessary you can choose to cut off the outliers here if cut_off_outlier_thres is not None: mse = mse[mse < cut_off_outlier_thres] mae = mae[mae < cut_off_outlier_thres] return mae, mse def plotMSELossDistrib(pred_file, truth_file, save_dir='data/'): """ Function to plot the MSE distribution histogram :param: pred_file: The Y prediction file :param: truth_file: The Y truth file :param: flags: The flags of the model/evaluation :param: save_dir: The directory to save the plot """ mae, mse = compare_truth_pred(pred_file, truth_file) plt.figure(figsize=(12, 6)) plt.hist(mse, bins=100) plt.xlabel('Mean Squared Error') plt.ylabel('cnt') plt.suptitle('(Avg MSE={:.4e}, 25%={:.3e}, 75%={:.3e})'.format(np.mean(mse), np.percentile(mse, 25), np.percentile(mse, 75))) if isinstance(pred_file, str): eval_model_str = pred_file.split('Ypred')[-1].split('.')[0] else: eval_model_str = 'MSE_unknon_name' plt.savefig(os.path.join(save_dir, '{}.png'.format(eval_model_str))) print('(Avg MSE={:.4e})'.format(np.mean(mse))) return np.mean(mse)
AEML
/models/MLP/utils/evaluation_helper.py
evaluation_helper.py
import numpy as np import torch from utils import plotsAnalysis import os from utils.helper_functions import load_flags def auto_swipe(mother_dir=None): """ This function swipes the parameter space of a folder and extract the varying hyper-parameters and make 2d heatmap w.r.t. all combinations of them """ if mother_dir is None: #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep8' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Yang_new_sweep/' mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/new_norm_color/' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Color_new_sweep/' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder_pos_analysis/Color' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/models' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/prev_sweep/test_size' #mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep_encode_lr' flags_list = [] # First step, get the list of object flags for folder in os.listdir(mother_dir): # Get the current sub_folder cur_folder = os.path.join(mother_dir, folder) if not os.path.isdir(cur_folder) or not os.path.isfile(os.path.join(cur_folder, 'flags.obj')): print('Either this is not a folder or there is no flags object under this folder for ', cur_folder) continue # Read the pickle object cur_flags = load_flags(cur_folder) flags_list.append(cur_flags) # From the list of flags, get the things that are different except for loss terms att_list = [a for a in dir(cur_flags) if not a.startswith('_') and not 'loss' in a and not 'trainable_param' in a and not 'model_name' in a and not 'dir' in a] print('In total {} attributes, they are {}'.format(len(att_list), att_list)) # Create a dictionary that have keys as attributes and unique values as that attDict = {key: [] for key in att_list} # Loop over all the flags and get the unique values inside for flags in flags_list: for keys in attDict.keys(): try: att = getattr(flags,keys) except: print('There is not attribute {} in flags, continue'.format(keys)) continue # Skip if this is already inside the list if att in attDict[keys]: continue attDict[keys].append(att) # Get the atts in the dictionary that has more than 1 att inside varying_att_list = [] for keys in attDict.keys(): if len(attDict[keys]) > 1: # For linear layers, apply special handlings if 'linear' not in keys: varying_att_list.append(keys) continue length_list = [] num_node_in_layer_list = [] # Loop over the lists of linear for linear_list in attDict[keys]: assert type(linear_list) == list, 'Your linear layer is not list, check again' length_list.append(len(linear_list)) # Record the length instead if 'head_linear' in keys: if len(linear_list) > 2: num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes elif 'tail_linear' in keys: if len(linear_list) > 1: num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes # Add these two attributes to the if len(np.unique(length_list)) > 1: varying_att_list.append(keys) if len(np.unique(num_node_in_layer_list)) > 1: varying_att_list.append('linear_unit') print('varying attributes are', varying_att_list) # Showing how they are changing for keys in varying_att_list: if keys == 'linear_unit': continue print('att is {}, they have values of {}'.format(keys, attDict[keys])) if len(varying_att_list) == 1: # There is only 1 attribute that is changing att = varying_att_list[0] key_a = att key_b = 'lr' for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']: #try: print('doing heatmap {}'.format(heatmap_value)) plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png', HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value) #except Exception as e: # print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value)) # print('error message: {}'.format(e)) # Start calling the plotsAnalysis function for all the pairs for a, key_a in enumerate(varying_att_list): for b, key_b in enumerate(varying_att_list): # Skip the same attribute if a <= b: continue # Call the plotsAnalysis function #for heatmap_value in ['best_validation_loss']: for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']: print('doing heatmap {}'.format(heatmap_value)) try: plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png', HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value) except: print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value)) if __name__ == '__main__': #pathnamelist = ['/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep4', # '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep4']#, #'/scratch/sr365/ML_MM_Benchmark/Color_temp/models/sweep2'] #'/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/lr_sweep'] #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder' #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sequence_len' #big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/MLP_complexity/' #for dirs in os.listdir(big_mother_dir): # mother_dir = os.path.join(big_mother_dir, dirs) # if os.path.isdir(mother_dir): # auto_swipe(mother_dir) auto_swipe()
AEML
/models/MLP/utils/plot_swipe.py
plot_swipe.py
import pandas as pd import numpy as np from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from einops.layers.torch import Rearrange from . import helper import os import math class FeedForward(nn.Module): ''' FC -> GELU + DO -> FC ''' def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim) ) def forward(self, x): return self.net(x) class MLayer(nn.Module): def __init__(self, dim, num_patch, token_dim, channel_dim, dropout = 0.): super().__init__() self.token_mix = nn.Sequential( nn.LayerNorm(dim), Rearrange('b n d -> b d n'), FeedForward(num_patch, token_dim, dropout), Rearrange('b d n -> b n d') ) self.channel_mix = nn.Sequential( nn.LayerNorm(dim), FeedForward(dim, channel_dim, dropout), ) def forward(self, x): #skip connection x = x + self.token_mix(x) x = x + self.channel_mix(x) return x class MMixer(nn.Module): def __init__(self,patch_size,embed_dim, n_block, token_dim, channel_dim,input_dim, output_dim,dropout=0.,expand=False,expand_dim=128): super().__init__() if expand: self.expand_linear = nn.Linear(input_dim,expand_dim) self.expand = True else: self.expand = False self.linear_embedding = nn.Linear(patch_size,embed_dim) if expand: assert expand_dim%patch_size == 0, "Impossible to patchify the input with this patch_size" self.num_patch = expand_dim//patch_size else: assert input_dim%patch_size == 0, "Impossible to patchify the input with this patch_size" self.num_patch = input_dim//patch_size self.patch_size = patch_size self.mixer_layers = nn.ModuleList([]) for _ in range(n_block): self.mixer_layers.append(MLayer(embed_dim, self.num_patch, token_dim, channel_dim,dropout=dropout)) self.layer_norm = nn.LayerNorm(embed_dim) self.mlp = nn.Sequential( nn.Linear(embed_dim, output_dim) ) def patchify(self,X,patch_size=1): ''' suppose X is of shape (n,d): n is size of batch, d is size of feature ''' n,d = X.shape assert d%patch_size == 0, "Impossible to patchify the input with this patch_size" num_patch = d//patch_size stack=[] for i in range(num_patch): stack.append(X[:,i*patch_size:(i+1)*patch_size]) return torch.stack(stack,dim=1) #the returned X is of size (n,num_patch,patch_size) def forward(self,x): #expand the input dimension if necessary if self.expand: x = self.expand_linear(x) #slice input into patches x = self.patchify(x,self.patch_size) #linear embedding from patch_size to dim x = self.linear_embedding(x) for mixer in self.mixer_layers: x = mixer(x) x = self.layer_norm(x) x = x.mean(dim=1) #global average pooling prediction = self.mlp(x) return prediction class Monster(nn.Module): ''' OOP for the model that combines and Mixer and MLP layers ''' def __init__(self,input_dim,output_dim,mlp_dim,patch_size,mixer_layer_num,mlp1_layer_num=3,dropout=0.): super().__init__() sequence=[nn.Linear(input_dim,mlp_dim),nn.ReLU(),nn.Dropout(dropout)] for _ in range(mlp1_layer_num): sequence.append(nn.Linear(mlp_dim,mlp_dim)) sequence.append(nn.ReLU()) sequence.append(nn.Dropout(dropout)) self.MLP1 = nn.Sequential( *sequence ) #the mixer takes output from first MLP self.mixer = MMixer(patch_size=patch_size,embed_dim=128,n_block=mixer_layer_num, token_dim=128, channel_dim=256, input_dim=mlp_dim,output_dim=output_dim,expand=False,dropout=dropout) def forward(self,x): x = self.MLP1(x) prediction = self.mixer(x) return prediction #=========== Define Model unique to FB search ============ class MonsterFB(nn.Module): ''' OOP for the model that combines and Mixer and MLP layers ''' def __init__(self,dim_g,dim_s,mlp_dim,patch_size,mixer_layer_num, \ embed_dim=128, token_dim=128, channel_dim=256, \ mlp_layer_num_front=3,mlp_layer_num_back=3,dropout=0., \ device=None, stop_threshold=1e-7, \ log_mode=False, ckpt_dir= os.path.join(os.path.abspath(''), 'models','Mixer') \ ): super().__init__() input_dim = dim_g output_dim = dim_s # GPU device if not device: self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") else: self.device = device self.stop_threshold = stop_threshold self.ckpt_dir = ckpt_dir self.log = SummaryWriter(self.ckpt_dir) self.log_mode = log_mode # MLP layers in front sequence1=[nn.Linear(input_dim,mlp_dim),nn.ReLU(),nn.Dropout(dropout)] for _ in range(mlp_layer_num_front-1): sequence1.append(nn.Linear(mlp_dim,mlp_dim)) sequence1.append(nn.ReLU()) sequence1.append(nn.Dropout(dropout)) # MLP layers at back if mlp_layer_num_back == 1: sequence2=[nn.Linear(mlp_dim,output_dim)] else: sequence2=[nn.Linear(mlp_dim,mlp_dim),nn.ReLU(),nn.Dropout(dropout)] for _ in range(mlp_layer_num_back-1): if _ == mlp_layer_num_back -2: sequence2.append(nn.Linear(mlp_dim,output_dim)) else: sequence2.append(nn.Linear(mlp_dim,mlp_dim)) sequence2.append(nn.ReLU()) sequence2.append(nn.Dropout(dropout)) self.MLP1 = nn.Sequential( *sequence1 ) self.MLP2 = nn.Sequential( *sequence2 ) #the MLP-MIXER self.mixer = MMixer(patch_size=patch_size,embed_dim=embed_dim,n_block=mixer_layer_num, token_dim=token_dim, channel_dim=channel_dim, input_dim=mlp_dim,output_dim=mlp_dim,expand=False) def forward(self,x): x = self.MLP1(x) x = self.mixer(x) prediction = self.MLP2(x) return prediction def evaluate(self, test_x, test_y, save_dir='data/', prefix=''): # Make sure there is a place for the evaluation if not os.path.isdir(save_dir): os.makedirs(save_dir) saved_model_str = prefix # Get the file names Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str)) Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str)) Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str)) test_loader = DataLoader(helper.MyDataset(test_x,test_y)) if criterion is None: criterion = nn.MSELoss() mse_error = helper.eval_loader(self,test_loader,self.device,criterion) # Write to files if log_mode = True if self.log_mode: with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\ open(Ypred_file, 'a') as fyp: for j, (geometry, spectra) in enumerate(test_loader): geometry = geometry.to(self.device) spectra = spectra.to(self.device) Ypred = self.forward(geometry).cpu().data.numpy() np.savetxt(fxt, geometry.cpu().data.numpy()) np.savetxt(fyt, spectra.cpu().data.numpy()) np.savetxt(fyp, Ypred) return mse_error def load_model(self, pre_trained_model=None, model_directory=None): """ Loading the model from the check point folder with name best_model_forward.pt :return: """ if pre_trained_model is None: # Loading the trained model if model_directory is None: model_directory = self.ckpt_dir # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))) self.model = torch.load(os.path.join(model_directory, 'best_model_forward.pt')) print("You have successfully loaded the model from ", model_directory) else: # Loading the pretrained model from the internet print("You have successfully loaded the pretrained model for ", pre_trained_model) def train_(self,trainloader,testloader, \ batch_size=128,criterion=None,epochs=300, eval_step=10, \ optm='Adam', lr=1e-4, weight_decay=5e-4, lr_scheduler_name=None, lr_decay_rate=0.3): ''' Parameters: (1) trainloader: data loader of training data (2) testloader: data loader of test/val data ''' # Construct optimizer after the model moved to GPU optimizer = self.make_optimizer(optim=optm, lr=lr, reg_scale=weight_decay) scheduler = self.make_lr_scheduler(optimizer, lr_scheduler_name, lr_decay_rate) if not criterion: criterion = nn.MSELoss() minvalloss = math.inf self.to(self.device) for epoch in tqdm(range(epochs)): self.train() for i,data in enumerate(trainloader): x, y = data optimizer.zero_grad() predict = self.forward(x.to(self.device)) loss = criterion(predict,y.to(self.device)) loss.backward() optimizer.step() if epoch % eval_step == 0: # For eval steps, do the evaluations and tensor board trainloss = helper.eval_loader(self,trainloader,self.device,criterion) valloss = helper.eval_loader(self,testloader,self.device,criterion) self.log.add_scalar('Loss/total_train', trainloss, epoch) self.log.add_scalar('Loss/total_test', valloss, epoch) if valloss < minvalloss: minvalloss = valloss self.minvalloss = minvalloss if self.log_mode: self.save() print("Saving the model down...") if minvalloss < self.stop_threshold: print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\ (epoch, self.minvalloss)) break print("This is Epoch %d, training loss %.5f, validation loss %.5f" \ % (epoch, trainloss, valloss)) if scheduler: scheduler.step() def save(self): """ Saving the model to the current check point folder with name best_model_forward.pt :return: None """ # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')) torch.save(self, os.path.join(self.ckpt_dir, 'best_model_forward.pt')) def make_lr_scheduler(self, optm, lr_scheduler_name, lr_decay_rate, warm_restart_T_0=50): """ Make the learning rate scheduler as instructed. More modes can be added to this, current supported ones: 1. ReduceLROnPlateau (decrease lr when validation error stops improving :return: """ if lr_scheduler_name == 'warm_restart': return lr_scheduler.CosineAnnealingWarmRestarts(optm, warm_restart_T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose=False) elif lr_scheduler_name == 'reduce_plateau': return lr_scheduler_name.ReduceLROnPlateau(optimizer=optm, mode='min', factor=lr_decay_rate, patience=10, verbose=True, threshold=1e-4) else: return None def make_optimizer(self, optim, lr, reg_scale): """ Make the corresponding optimizer from the Only below optimizers are allowed. Welcome to add more :return: """ if optim == 'Adam': op = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=reg_scale) elif optim == 'RMSprop': op = torch.optim.RMSprop(self.parameters(), lr=lr, weight_decay=reg_scale) elif optim == 'SGD': op = torch.optim.SGD(self.parameters(), lr=lr, weight_decay=reg_scale) else: raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben") return op
AEML
/models/MIXER/MLP_MIXER.py
MLP_MIXER.py
import numpy as np import torch class MyDataset(torch.utils.data.Dataset): def __init__(self, X, Y): 'Initialization' self.X = X self.Y = Y def __len__(self): 'Denotes the total number of samples' return len(self.X) def __getitem__(self, index): 'Generates one sample of data' X = self.X[index] Y = self.Y[index] return X, Y """# Helper Functions""" def train_test_split(X,Y,seed=42): np.random.seed(seed) indices = np.random.permutation(range(len(X))) train_indices = indices[:len(X)//10*7] val_indices = indices[len(X)//10*7:len(X)//10*8] test_indices = indices[len(X)//10*8:] X=torch.tensor(X).float() Y=torch.tensor(Y).float() return X[train_indices],Y[train_indices],X[val_indices],Y[val_indices],X[test_indices],Y[test_indices] def train_val_split(X,Y,seed=42): np.random.seed(seed) indices = np.random.permutation(range(len(X))) train_indices = indices[:len(X)//10*7] val_indices = indices[len(X)//10*7:] X=torch.tensor(X).float() Y=torch.tensor(Y).float() return X[train_indices],Y[train_indices],X[val_indices],Y[val_indices] def eval_loader(model,loader,device,criterion): model.eval() losses=[] for data in loader: x, y = data predict = model(x.to(device)) loss = criterion(predict,y.to(device)).item() losses.append(loss) return np.mean(losses) def normalize_np(x, x_max_list=None, x_min_list=None): """ Normalize the x into [-1, 1] range in each dimension [:, i] :param x: np array to be normalized :return: normalized np array """ if x_max_list is not None: if x_min_list is None or len(x[0]) != len(x_max_list) or len(x_max_list) != len(x_min_list): print("In normalize_np, your dimension does not match with provided x_max, try again") quit() new_x_max_list = [] new_x_min_list = [] for i in range(len(x[0])): if x_max_list is None: x_max = np.max(x[:, i]) x_min = np.min(x[:, i]) else: x_max = x_max_list[i] x_min = x_min_list[i] x_range = (x_max - x_min ) /2. x_avg = (x_max + x_min) / 2. x[:, i] = (x[:, i] - x_avg) / x_range print("In normalize_np, row ", str(i), " your max is:", np.max(x[:, i])) print("In normalize_np, row ", str(i), " your min is:", np.min(x[:, i])) if x_max_list is None: assert np.max(x[:, i]) - 1 < 0.0001, 'your normalization is wrong' assert np.min(x[:, i]) + 1 < 0.0001, 'your normalization is wrong' new_x_max_list.append(x_max) new_x_min_list.append(x_min) return x, np.array(new_x_max_list), np.array(new_x_min_list) def plotMSELossDistrib(pred_file, truth_file, save_dir='data/'): """ Function to plot the MSE distribution histogram :param: pred_file: The Y prediction file :param: truth_file: The Y truth file :param: flags: The flags of the model/evaluation :param: save_dir: The directory to save the plot """ mae, mse = compare_truth_pred(pred_file, truth_file) plt.figure(figsize=(12, 6)) plt.hist(mse, bins=100) plt.xlabel('Mean Squared Error') plt.ylabel('cnt') plt.suptitle('(Avg MSE={:.4e}, 25%={:.3e}, 75%={:.3e})'.format(np.mean(mse), np.percentile(mse, 25), np.percentile(mse, 75))) if isinstance(pred_file, str): eval_model_str = pred_file.split('Ypred')[-1].split('.')[0] else: eval_model_str = 'MSE_unknon_name' plt.savefig(os.path.join(save_dir, '{}.png'.format(eval_model_str))) print('(Avg MSE={:.4e})'.format(np.mean(mse))) return np.mean(mse)
AEML
/models/MIXER/helper.py
helper.py
try: from Crypto.Cipher import AES except ImportError: print("Pycryptodome Is Not Found On This Computer\n" "Please Install using pip [ pip install pycryptodome ]") sys.exit(1) import os, sys from hashlib import sha256 class AES_Encryption: """ The Advanced Encryption Standard (AES) is a symmetric block cipher chosen by the U.S. government to protect classified information. AES is implemented in software and hardware throughout the world to encrypt sensitive data. It is essential for government computer security, cybersecurity and electronic data protection. Please Refer To The https://github.com/pmk456/AES-Encryptor README.md For Perfectly Using This Package """ def __init__(self, key, iv="THIS IS IV 45600", mode=AES.MODE_CBC): """ Constructor For This Class :param key: Key Must be string which will used to encrypt the strings or files :param iv: initializing vector which is used to randomize the encrypted data, This Must Be 16 Bytes Long, default=THIS IS IV 45600 :param mode: mode for encrytping data, default=MODE_CBC """ if len(iv) < 16 or len(iv) > 16: print("Incorrect IV Length (It Must Be 16 Bytes Long)") sys.exit(1) if not isinstance(key, str): print("Key Must Be String") sys.exit(1) if not isinstance(iv, str): print("IV Must Be String") sys.exit(1) self.key = sha256(key.encode()).digest() self.IV = iv.encode() self.mode = mode def pad(self, data): """ This Function Is Created For Padding Messages into multiple of 16 :param data: Data which is not a multiple of 16 :return: returns encoded string and make it multiple of 16 """ while len(data) % 16 != 0: data = data + ' ' return data.encode() def encrypt(self, message): """ Used To Encrypt Strings :param message: String Which Want To Be Encrypted :return: Encrypted Data Of The String Which Will Be In Bytes """ if not isinstance(message, str): return "Encrypt Function Only Accepts Strings" try: cipher = AES.new(key=self.key, mode=self.mode, iv=self.IV) encrypted_msg = cipher.encrypt(self.pad(message)) except Exception: return "Failed To Encrypt String" else: return encrypted_msg def decrypt(self, data): """ Used To Decrypt Data Given :param data: data which is encrypted with the same given key :return: Plain string """ if not isinstance(data, bytes): return "Decrypt Function Only Accepts Bytes" try: cipher = AES.new(key=self.key, mode=self.mode, iv=self.IV) decrypted_data = cipher.decrypt(data) except Exception: return "Failed To Decrypt String Please Check The Key And IV\n" \ "Please Re-Verify The Given Data, Data May Be Changed\n" \ "Data Bytes Must Be Multiple Of 16" else: return decrypted_data.decode().rstrip() def file_encrypt(self, path): """ Used To Encrypt The File :param path: Path Of The File Note: If You are using windows please put [ \\ ] :return: Encrypted File In the same given path with the same name but with extension .enc """ if not os.path.exists(path): print("Path not exists") if sys.platform == 'win32': print(r"Note: If You are using windows please put[ \\ ]\n" r"Example: C:\\Windows\\System32\\File.txt") sys.exit(1) try: cipher = AES.new(key=self.key, mode=self.mode, iv=self.IV) with open(path) as file: data = self.pad(file.read()) encrypted_data = cipher.encrypt(data) new = path + '.enc' with open(new, 'wb') as file: file.write(encrypted_data) except Exception: return '''Something Went Wrong During Encryption Of The File''' else: return '''File Successfully Encrypted With Given Key''' def file_decrypt(self, path): """ Used To Decrypt The File :param path: Path Of The File Note: If You are using windows please put [ \\ ] Example: C:\\Windows\\System32\\File.txt :return: Decrypted File With Removed .enc extension In the same given path """ if not isinstance(path, str): print("Path Must Be String") if not os.path.exists(path): print("Path not exists") if sys.platform == 'win32': print(r"Note: If You are using windows please put[ \\ ]\n" r"Example: C:\\Windows\\System32\\File.txt") sys.exit(1) try: cipher = AES.new(key=self.key, mode=self.mode, iv=self.IV) with open(path, 'rb') as file: data = file.read() decrypted_data = cipher.decrypt(data) new = path.replace('.enc', '') with open(new, 'wb') as file: file.write(decrypted_data) except Exception: return '''Something Went Wrong During Decryption Of The File, Please Cross Check Key And IV''' else: return '''File Successfully Decrypted With Given Key'''
AES-Encryptor
/AES_Encryptor-2.0-py3-none-any.whl/Encryptor/__init__.py
__init__.py
from AES_Python.encrypt import encrypt from AES_Python.decrypt import decrypt import AES_Python def main(): print("-"*66) print(r""" ______ _____ _____ _ _ /\ | ____|/ ____| | __ \ | | | | / \ | |__ | (___ ______| |__) | _| |_| |__ ___ _ __ / /\ \ | __| \___ \______| ___/ | | | __| '_ \ / _ \| '_ \ / ____ \| |____ ____) | | | | |_| | |_| | | | (_) | | | | /_/ \_\______|_____/ |_| \__, |\__|_| |_|\___/|_| |_| __/ | |___/ """) print("-"*66) print(f"Version: {AES_Python.__version__} {AES_Python.__copyright__}") print("-"*66) print("""This is a simple AES (Advanced Encryption Standard) implementation in Python-3. It is a pure Python implementation of AES that is designed to be used as a educational tool only. It is not intended to be used in any other use case than educational and no security is guaranteed for data encrypted or decrypted using this tool.""") print("-"*66) run() def run(): action = input("Do you want to encrypt, decrypt or quit? (e/d/q): ") if action == "e": running_mode = input("Please select cipher running mode (ECB/CBC/CFB/OFB/CTR/GCM): ") if running_mode == "ECB": key = input("Please enter your key: ") file_path = input("Please enter path to file: ") confirmation = input("Are you sure you want to encrypt this file? (y/n): ") if confirmation == "y": encrypt(key, file_path, running_mode) print("Encryption complete!") elif confirmation == "n": print("Encryption aborted!") exit() else: print("Invalid input!") exit() elif running_mode in ["CBC", "CFB", "OFB", "CTR", "GCM"]: key = input("Please enter your key: ") iv = input("Please enter your iv: ") file_path = input("Please enter path to file: ") confirmation = input("Are you sure you want to encrypt this file? (y/n): ") if confirmation == "y": decrypt(key, file_path, running_mode, iv) print("Encryption complete!") elif confirmation == "n": print("Encryption aborted!") exit() else: print("Invalid input!") exit() else: print("Invalid cipher running mode") run() elif action == "d": pass elif action == "q": print("Exiting...") exit() else: print("Invalid action (to cancel enter 'q')") run() if __name__ == "__main__": main()
AES-Python
/AES_Python-1.0.0-py3-none-any.whl/AES_Python/__main__.py
__main__.py
from os.path import getsize from os import remove # --------------- # Fixed variables # --------------- # Sbox & inverse Sbox subBytesTable = ( 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 ) invSubBytesTable = ( 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d ) # Round constant round_constant = ( 0x00000000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, 0x6C000000, 0xD8000000, 0xAB000000, 0x4D000000, ) # --------------- # Main action functions # --------------- # Xtime def xtime(a): return (((a << 1) ^ 0x1B) & 0xFF) if (a & 0x80) else (a << 1) # Converts a list to a matrix of 4x4 def list_to_matrix(data): return [list(data[i:i+4]) for i in range(0, len(data), 4)] # Converts a matrix of 4x4 to a list def matrix_to_list(matrix): return sum(matrix, []) # Add round key function def add_round_key(data, round_key): key = list_to_matrix(round_key) for i in range(4): for j in range(4): data[i][j] ^= key[i][j] return data # XOR function def xor(data1, data2): data1, data2 = list_to_matrix(data1), list_to_matrix(data2) for i in range(4): for j in range(4): data1[i][j] ^= data2[i][j] return matrix_to_list(data1) # Performs the byte substitution layer def sub_bytes(data, bytesTable): for r in range(4): for c in range(4): data[r][c] = bytesTable[data[r][c]] return data # Shift rows function def shift_rows(data): data[0][1], data[1][1], data[2][1], data[3][1] = data[1][1], data[2][1], data[3][1], data[0][1] data[0][2], data[1][2], data[2][2], data[3][2] = data[2][2], data[3][2], data[0][2], data[1][2] data[0][3], data[1][3], data[2][3], data[3][3] = data[3][3], data[0][3], data[1][3], data[2][3] return data # Inverse shift rows function def inv_shift_rows(data): data[0][1], data[1][1], data[2][1], data[3][1] = data[3][1], data[0][1], data[1][1], data[2][1] data[0][2], data[1][2], data[2][2], data[3][2] = data[2][2], data[3][2], data[0][2], data[1][2] data[0][3], data[1][3], data[2][3], data[3][3] = data[1][3], data[2][3], data[3][3], data[0][3] return data # Performs the mix columns layer def mix_columns(data): def mix_single_column(data): # see Sec 4.1.2 in The Design of Rijndael t = data[0] ^ data[1] ^ data[2] ^ data[3] u = data[0] data[0] ^= t ^ xtime(data[0] ^ data[1]) data[1] ^= t ^ xtime(data[1] ^ data[2]) data[2] ^= t ^ xtime(data[2] ^ data[3]) data[3] ^= t ^ xtime(data[3] ^ u) def mix(data): for i in range(4): mix_single_column(data[i]) return data data = mix(data) return data # Preforms the inverse mix columns layer def inv_mix_columns(data): # see Sec 4.1.3 in The Design of Rijndael for i in range(4): u = xtime(xtime(data[i][0] ^ data[i][2])) v = xtime(xtime(data[i][1] ^ data[i][3])) data[i][0] ^= u data[i][1] ^= v data[i][2] ^= u data[i][3] ^= v mix_columns(data) return data # Adds a padding to ensure a bloke size of 16 bytes def add_padding(data): length = 16 - len(data) for i in range(length): data.append(0) return data, length # Removes the padding def remove_padding(data, identifier): if identifier[-1] == 0: return data elif identifier[-1] > 0 and identifier[-1] < 16: return data[:-identifier[-1]] else: raise ValueError('Invalid padding') # Performs the encryption rounds def encryption_rounds(data, key): # generates round keys round_keys, nr = keyExpansion(key) # Creates a 4x4 matrix from the 16-byte array data = list_to_matrix(data) # Inizial add round key data = add_round_key(data, round_keys[0]) # Rounds 1 to 9 or 1 to 11 or 1 to 13 for i in range(1, (nr - 1)): data = sub_bytes(data, subBytesTable) data = shift_rows(data) data = mix_columns(data) data = add_round_key(data, round_keys[i]) # Final round data = sub_bytes(data, subBytesTable) data = shift_rows(data) data = add_round_key(data, round_keys[nr - 1]) return matrix_to_list(data) # Performs the decryption rounds def decryption_rounds(data, key): # generates round keys round_keys, nr = keyExpansion(key) # Creates a 4x4 matrix from the 16-byte array data = list_to_matrix(data) # Inizial add round key data = add_round_key(data, round_keys[-1]) # Rounds 1 to 9 or 1 to 11 or 1 to 13 for i in range(1, (nr - 1)): data = inv_shift_rows(data) data = sub_bytes(data, invSubBytesTable) data = add_round_key(data, round_keys[-(i+1)]) data = inv_mix_columns(data) # Final round data = inv_shift_rows(data) data = sub_bytes(data, invSubBytesTable) data = add_round_key(data, round_keys[0]) return matrix_to_list(data) # --------------- # Key expantion setup # --------------- # Key expansion function (returns a list of round keys) def keyExpansion(key): # Format key correctly for the key expansion key = [key[i:i+2] for i in range(0, len(key), 2)] # Key expansion setup if len(key) == 16: words = key_schedule(key, 4, 11) nr = 11 if len(key) == 24: words = key_schedule(key, 6, 13) nr = 13 if len(key) == 32: words = key_schedule(key, 8, 15) nr = 15 round_keys = [None for i in range(nr)] tmp = [None for i in range(4)] for i in range(nr * 4): for index, t in enumerate(words[i]): tmp[index] = int(t, 16) # type: ignore words[i] = tuple(tmp) for i in range(nr): round_keys[i] = (words[i * 4] + words[i * 4 + 1] + words[i * 4 + 2] + words[i * 4 + 3]) return round_keys, nr # Key schedule (nk = number of colums, nr = number of rounds) def key_schedule(key, nk, nr): # Create list and populates first nk words with key words = [(key[4*i], key[4*i+1], key[4*i+2], key[4*i+3]) for i in range(nk)] # fill out the rest based on previews words, rotword, subword and rcon values limit = False for i in range(nk, (nr * nk)): # get required previous keywords temp, word = words[i-1], words[i-nk] # if multiple of nk use rot, sub, rcon etc if i % nk == 0: x = SubWord(RotWord(temp)) rcon = round_constant[int(i/nk)] temp = hexor(x, hex(rcon)[2:]) limit = False elif i % 4 == 0: limit = True if i % 4 == 0 and limit and nk >= 8: temp = SubWord(temp) # xor the two hex values xord = hexor(''.join(word), ''.join(temp)) words.append((xord[:2], xord[2:4], xord[4:6], xord[6:8])) return words # takes two hex values and calculates hex1 xor hex2 def hexor(hex1, hex2): # convert to binary bin1 = hex2binary(hex1) bin2 = hex2binary(hex2) # calculate xord = int(bin1, 2) ^ int(bin2, 2) # cut prefix hexed = hex(xord)[2:] # leading 0s get cut above, if not length 8 add a leading 0 if len(hexed) != 8: hexed = '0' + hexed return hexed # takes a hex value and returns binary def hex2binary(hex): return bin(int(str(hex), 16)) # takes from 1 to the end, adds on from the start to 1 def RotWord(word): return word[1:] + word[:1] # selects correct value from sbox based on the current word def SubWord(word): sWord = [] # loop throug the current word for i in range(4): # check first char, if its a letter(a-f) get corresponding decimal # otherwise just take the value and add 1 if word[i][0].isdigit() is False: row = ord(word[i][0]) - 86 else: row = int(word[i][0])+1 # repeat above for the seoncd char if word[i][1].isdigit() is False: col = ord(word[i][1]) - 86 else: col = int(word[i][1])+1 # get the index base on row and col (16x16 grid) sBoxIndex = (row*16) - (17-col) # get the value from sbox without prefix piece = hex(subBytesTable[sBoxIndex])[2:] # check length to ensure leading 0s are not forgotton if len(piece) != 2: piece = '0' + piece sWord.append(piece) # return string return ''.join(sWord) # --------------- # Running modes setup # --------------- # ECB encryption function def ecb_enc(key, file_path): file_size = getsize(file_path) with open(f"{file_path}.enc", 'wb') as output, open(file_path, 'rb') as data: for i in range(int(file_size/16)): raw = [i for i in data.read(16)] result = bytes(encryption_rounds(raw, key)) output.write(result) if file_size % 16 != 0: raw = [i for i in data.read()] raw, length = add_padding(raw) result = bytes(encryption_rounds(raw, key)) identifier = bytes(encryption_rounds([0 for i in range(15)] + [length], key)) output.write(result + identifier) else: identifier = bytes(encryption_rounds([0 for i in range(16)], key)) output.write(identifier) remove(file_path) # ECB decryption function def ecb_dec(key, file_path): file_size = getsize(file_path) file_name = file_path[:-4] with open(f"{file_name}", 'wb') as output, open(file_path, 'rb') as data: for i in range(int(file_size/16) - 2): raw = [i for i in data.read(16)] result = bytes(decryption_rounds(raw, key)) output.write(result) data_pice = [i for i in data.read(16)] identifier = [i for i in data.read()] result = decryption_rounds(data_pice, key) identifier = decryption_rounds(identifier, key) result = bytes(remove_padding(result, identifier)) output.write(result) remove(file_path) # CBC encryption function def cbc_enc(key, file_path, iv): file_size = getsize(file_path) vector = [int(iv[i:i+2], 16) for i in range(0, len(iv), 2)] with open(f"{file_path}.enc", 'wb') as output, open(file_path, 'rb') as data: for i in range(int(file_size/16)): raw = [i for i in data.read(16)] raw = xor(raw, vector) vector = encryption_rounds(raw, key) output.write(bytes(vector)) if file_size % 16 != 0: raw = [i for i in data.read()] raw, length = add_padding(raw) raw = xor(raw, vector) vector = encryption_rounds(raw, key) identifier = xor(([0 for i in range(15)] + [length]), vector) identifier = encryption_rounds(identifier, key) output.write(bytes(vector + identifier)) else: identifier = xor([0 for i in range(16)], vector) identifier = bytes(encryption_rounds(identifier, key)) output.write(identifier) remove(file_path) # CBC decryption function def cbc_dec(key, file_path, iv): iv = [int(iv[i:i+2], 16) for i in range(0, len(iv), 2)] file_size = getsize(file_path) file_name = file_path[:-4] with open(f"{file_name}", 'wb') as output, open(file_path, 'rb') as data: if int(file_size/16) - 3 >= 0: vector = [i for i in data.read(16)] raw = decryption_rounds(vector, key) result = xor(raw, iv) output.write(bytes(result)) for i in range(int(file_size/16) - 3): raw = [i for i in data.read(16)] result = decryption_rounds(raw, key) result = xor(result, vector) vector = raw output.write(bytes(result)) else: vector = iv data_pice = [i for i in data.read(16)] vector_1, identifier = data_pice, [i for i in data.read()] result = decryption_rounds(data_pice, key) identifier = decryption_rounds(identifier, key) identifier = xor(identifier, vector_1) data_pice = xor(result, vector) result = bytes(remove_padding(data_pice, identifier)) output.write(result) remove(file_path)
AES-Python
/AES_Python-1.0.0-py3-none-any.whl/AES_Python/AES.py
AES.py
from base64 import b64decode from base64 import b64encode from Crypto import Random from Crypto.Cipher import AES from Crypto.Hash import SHA256 from AESEncryptor_heureka_code.Exceptions import WrongPassword, TextIsNotEncrypted, TextIsEncrypted __author__ = "heureka-code" __date__ = "11.03.2021" __maintainer__ = "heureka-code" __status__ = "Prototype" __doc__ = """ class AESTextEncryptor: // __init__ // Nimmt die Argumente: // * passwort, ein String, der das Passwort für die Verschlüsselung ist // * signaturtext, ein String, an dem das Programm einen verschlüsselten Text erkennt // * chunks, bezeichnet die Blockgröße def __init__(self, passwort: str, signaturtext: str = "Gewidmet Lou", chunks: int = 32*1024) // encrypt // Nimmt das Argument: // * text, der String, der verschlüsselt werden soll. // Liefert einen String im base64 Format zurück def encrypt(self, text: str) -> str // decrypt // Nimmt das Argument: // * text, ein String im base64 Format, der entschlüsselt werden soll. // Liefert einen String mit dem entschlüsselten Text zurück def decrypt(self, text: str) -> str // Gibt den Ausgabestring zurück def __str__(self) -> str // Gibt den Representationsstring zurück def __repr__(self) -> str // Gibt zurück, ob das Passwort mit dem des Arguments übereinstimmt def __eq__(self, other) -> bool // Gibt zurück, ob das Passwort dem Argument ungleich ist def __ne__(self, other) -> bool // Gibt die Länge des Passworts zurück def __len__(self) -> int // Und Property Methoden // Und private Methoden """ class AESTextEncryptor: def __init__(self, passwort: str, signaturtext: str = None, chunks: int = 32 * 1024): if signaturtext not in [None, ""]: self.__signaturtext = signaturtext else: self.__signaturtext = "Gewidmet Lou" self.__chunks = chunks self.__passwort = passwort pass def encrypt(self, text: str) -> str: self.__text_bereits_verschluesselt_pruefung(text) out_bytes = bytes() size = str(len(text)).zfill(16) IV = Random.new().read(16) encrytor = AES.new(self.__get_key_from_password(), AES.MODE_CFB, IV) out_bytes += bytes(self.__signaturtext, encoding="utf8") out_bytes += SHA256.new(self.__get_key_from_password()).digest() out_bytes += size.encode("utf-8") out_bytes += IV index = 0 while True: chunk = bytes(text[index: index+self.__chunks], encoding="utf8") if len(chunk) == 0: break if len(chunk) % 16 != 0: chunk += b" " * (16 - (len(chunk) % 16)) out_bytes += encrytor.encrypt(chunk) index += self.__chunks return str(b64encode(out_bytes), encoding="utf8") def decrypt(self, text: str) -> str: text = b64decode(text) self.__text_kann_mit_passwort_entschlusselt_werden(text) self.__text_nicht_verschluesselt_pruefung(text) text = text[len(self.__signaturtext)+32:] out_bytes = bytes() size = int(text[:16]) text = text[16:] IV = text[:16] text = text[16:] decryptor = AES.new(self.__get_key_from_password(), AES.MODE_CFB, IV) index = 0 while True: chunk = text[index: index + self.__chunks] if len(chunk) == 0: break out_bytes += decryptor.decrypt(chunk) index += self.__chunks return str(out_bytes[:size], encoding="utf8") def __str__(self) -> str: """Gibt den Ausgabestring des Objekts zurück""" return f"<AESTextEncryptor passwort={self.__passwort} signatur={self.__signaturtext} chunks={self.__chunks}>" def __repr__(self) -> str: """Gibt den Representationsstring zurück""" return f"AESTextEncryptor({self.__passwort}, {self.__signaturtext}, {self.__chunks})" def __eq__(self, other) -> bool: """Gibt zurück, ob das Passwort mit dem des Arguments übereinstimmt""" if type(other) == str: return self.__passwort == other elif type(other) == type(self): return self.__passwort == other.__passwort pass def __ne__(self, other) -> bool: """Gibt zurück, ob das Passwort von dem des Arguments verschieden ist""" return self.__eq__(other) is False def __len__(self) -> int: """Gibt die Länge des Passworts zurück""" return len(self.__passwort) def __get_key_from_password(self) -> bytes: return SHA256.new(bytes(self.__passwort, encoding="utf8")).digest() def __is_encrypted(self, text): return bytes(self.__signaturtext, encoding="utf8") == text[:len(self.__signaturtext)] def __get_text_key(self, text): return text[len(self.__signaturtext):len(self.__signaturtext)+32] def __text_bereits_verschluesselt_pruefung(self, text): if self.__is_encrypted(text): raise TextIsEncrypted pass def __text_nicht_verschluesselt_pruefung(self, text): if self.__is_encrypted(text): pass else: raise TextIsNotEncrypted pass def __text_kann_mit_passwort_entschlusselt_werden(self, text): if self.__get_text_key(text) != SHA256.new(self.__get_key_from_password()).digest(): raise WrongPassword pass @property def signaturtext(self) -> str: return self.__signaturtext @property def passwort(self) -> str: return self.__passwort @property def chunks(self) -> int: return self.__chunks pass
AESEncryptor-heureka-code
/AESEncryptor_heureka_code-1.3.0-py3-none-any.whl/AESEncryptor_heureka_code/AESTextEncryptor.py
AESTextEncryptor.py
from os.path import getsize, isfile from os import remove from base64 import b64decode, b64encode from Crypto import Random from Crypto.Cipher import AES from Crypto.Hash import SHA256 from AESEncryptor_heureka_code.Exceptions import WrongPassword, FileIsEncrypted, FileIsNotEncrypted __author__ = "heureka-code" __date__ = "11.03.2021" __maintainer__ = "heureka-code" __status__ = "Prototype" __doc__ = """ class AESFileEncryptor: // __init__ // Nimmt die Argumente: // * passwort: str, das Passwort mit dem Datein verschlüsselt werden sollen. // * signaturtext: str, ist der Text, an dem das Tool eine verschlüsselte Datei erkennt. // * chunks: int, bezeichnet die Blockgröße def __init__(self, passwort: str, signaturtext: str = "Gewidmet Lou", chunks: int = 32*1024) // encrypt // Nimmt die Argumente // * file: str, die Datei, die verschlüsselt werden soll. // * delete_orig_after: bool, gibt an, ob die Datei, die verschlüsselt wurde, gelöscht werden soll. // Und verschlüsselt eine Datei mit den im Konstruktor festgelegten Parametern def encrypt(self, file: str, delete_orig_after: bool = False) // decrypt // Nimmt die Argumente // * file: str, die Datei, die entschlüsselt werden soll. // * delete_orig_after: bool, gibt an, ob die Datei, die entschlüsselt wurde, gelöscht werden soll. // Und entschlüsselt eine Datei mit den im Konstruktor festgelegten Parametern def decrypt(self, file: str, delete_orig_after: bool = False) // Gibt den Ausgabestring zurück def __str__(self) -> str // Gibt den Representationsstring aus def __repr__(self) -> str // Gibt die Gleichheit des Arguments mit dem Passwort zurück def __eq__(self, other) -> bool // Gibt die Ungleichheit des Arguments mit dem Passwort zurück def __ne__(self, other) -> bool // Gibt die Länge des Passworts zurück def __len__(self) -> int // Und Property Methoden // Sonst nur private Methoden """ class AESFileEncryptor: def __init__(self, passwort: str, signaturtext: str = None, chunks: int = 32 * 1024): """Initialisiert das Objekt""" if signaturtext not in [None, ""]: self.__signaturtext = signaturtext else: self.__signaturtext = "Gewidmet Lou" self.__chunks: int = chunks self.__passwort: str = passwort pass def encrypt(self, file: str, delete_orig_after: bool = False): """Verschlüsselt eine Datei""" self.__encrypt(file, file + ".enc", delete_orig_after) pass def decrypt(self, file: str, delete_orig_after: bool = False): """Entschlüsselt eine Datei""" self.__decrypt(file, delete_orig_after) pass def __str__(self): """Gibt den Ausgabestring des Objekts zurück""" return f"<AESFileEncryptor passwort={self.__passwort} " \ f"signatur={self.__signaturtext}, chunks={self.__chunks}>" def __repr__(self): """Gibt den Representationsstring zurück""" return f"AESFileEncrytor(\"{self.__passwort}\", \"{self.__signaturtext}\", \"{self.__chunks}\")" def __eq__(self, other): """Gibt zurück, ob das Passwort mit dem des Arguments übereinstimmt""" if type(other) == str: return self.__passwort == other elif type(other) == type(self): return self.__passwort == other.__passwort pass def __ne__(self, other): """Gibt zurück, ob das Passwort von dem des Arguments verschieden ist""" return self.__eq__(other) is False def __len__(self): """Gibt die Länge des Passworts zurück""" return len(self.__passwort) def __get_key_from_password(self) -> bytes: return SHA256.new(bytes(self.__passwort, encoding="utf8")).digest() def __is_encrypted(self, file): with open(file, "rb") as f_input: return self.__signaturtext == str(f_input.read(len(self.__signaturtext)), encoding="utf8") pass def __get_file_key(self, file): with open(file, "rb") as f_in: signatur = f_in.read(len(self.__signaturtext)) key = f_in.read(32) return key def __datei_existiert_pruefung(self, file): if isfile(file): pass else: raise FileNotFoundError pass def __datei_bereits_verschluesselt_pruefung(self, file): if self.__is_encrypted(file): raise FileIsEncrypted pass def __datei_nicht_verschluesselt_pruefung(self, file): if self.__is_encrypted(file): pass else: raise FileIsNotEncrypted pass def __datei_kann_mit_passwort_entschlusselt_werden(self, file): if self.__get_file_key(file) != SHA256.new(self.__get_key_from_password()).digest(): raise WrongPassword pass def __encrypt(self, file: str, out_file_name, delete_orig_after=False): self.__datei_existiert_pruefung(file) self.__datei_bereits_verschluesselt_pruefung(file) filesize = str(getsize(file)).zfill(16) IV = Random.new().read(16) encrytor = AES.new(self.__get_key_from_password(), AES.MODE_CFB, IV) with open(file, "rb") as f_input: with open(out_file_name, "wb") as f_output: f_output.write(bytes(self.__signaturtext, encoding="utf8")) f_output.write(SHA256.new(self.__get_key_from_password()).digest()) f_output.write(filesize.encode("utf-8")) f_output.write(IV) while True: chunk = f_input.read(self.__chunks) if len(chunk) == 0: break if len(chunk) % 16 != 0: chunk += b" " * (16 - (len(chunk) % 16)) f_output.write(encrytor.encrypt(chunk)) pass pass if delete_orig_after: remove(file) pass def __decrypt(self, file: str, delete_orig_after=False): self.__datei_nicht_verschluesselt_pruefung(file) self.__datei_kann_mit_passwort_entschlusselt_werden(file) out_file_name = file.rstrip(".enc") with open(file, "rb") as f_input: self.__datei_nicht_verschluesselt_pruefung(file) header_signatur = f_input.read(len(self.__signaturtext)) key_hash = f_input.read(32) filesize = int(f_input.read(16)) IV = f_input.read(16) decryptor = AES.new(self.__get_key_from_password(), AES.MODE_CFB, IV) with open(out_file_name, "wb") as f_output: while True: chunk = f_input.read(self.__chunks) if len(chunk) == 0: break f_output.write(decryptor.decrypt(chunk)) f_output.truncate(filesize) pass pass if delete_orig_after: remove(file) pass @property def signaturtext(self) -> str: return self.__signaturtext @property def passwort(self) -> str: return self.__passwort @property def chunks(self) -> int: return self.__chunks pass
AESEncryptor-heureka-code
/AESEncryptor_heureka_code-1.3.0-py3-none-any.whl/AESEncryptor_heureka_code/AESFileEncryptor.py
AESFileEncryptor.py
# AESRLib - The blend of AES-**R** # ##### v1.1 release ##### PyPi package for variable key based triple layer capsule encryption with power of AES, base64 and user-defined **randomizer (R)** functions. **Index** - [AESRLib - The blend of AES-**R**](https://pypi.org/project/AESRLib/) - [1.0 Changelog](#10-changelog) - [2.0 Usage](#20-usage) - [2.1 Installation](#21-installation) - [2.1.1 Pre-requisities](#211-pre-requisities) - [2.2 Using it](#22-using-it) - [2.2.1 With IDE/IDLE ](#221-with-ideidle) - [2.2.2 With CMD/PS/Terminal ](#222-with-cmdpsterminal) - [3.0 Future Proposals](#30-future-proposals) - [4.0 LICENSE](#40-license) ## **1.0 Changelog** ## > What's new in **v1.1** (v1.1) than **v1.0** (v1.0) * Fixes (minor): Converted the ValueError for wrong password alert into a log message. ## **2.0 Usage** ## To install and use it follow below steps as per convenience. ### 2.1 **Installation** ### * To download, simply use pip to download it from pypi in a python3 supported environment. If you don't have python in your system, download it from python official site (https://www.python.org/downloads) ``` pip install AESRLib ``` * To contribute, fork the git repo and proceed (https://github.com/me-yutakun/AESRLib) #### 2.1.1 Pre-requisities #### python>=3.10, pycryptodome ### 2.2 **Using it** ### #### 2.2.1 With IDE/IDLE #### First, import the module using: ``` from AESRLib import AESRandomizer as alib ``` And you are ready to go! The interactive function to trigger AESR for ready-to-use purpose is: ``` alib.initializer(filename) ``` filename - Its the input filename given with extension for processing like 'test.txt' Providing the abstract function for instant use makes the user's task too easy and readily accessible Ensure that the script using AESR is run in the **same root folder** as the file. #### 2.2.2 With CMD/PS/Terminal #### A ready to use way for running it without any hassle, just follow the steps (after installation of AESRLib as referred above): 1. Download/Copy the main.py file in a folder locally (main.py file is available in https://github.com/me-yutakun/AESRLib) 2. Go to that folder and open cmd/powershell/terminal 3. Place the file you want to encrypt/decrypt in same folder 4. Type ``` python main.py``` in cmd/powershell/terminal then press enter **Recommended**: If screen is exiting too fast after showing error or you are not able to see the result properly use this way to run it. ## **3.0 Future Proposals** ## 1. Includes enhancement for randomization function 2. Bug fixes ## **4.0 LICENSE** ## MIT License Copyright (c) 2018 ###### **Updated**: 22.5.22 8.30 PM (IST)
AESRLib
/AESRLib-1.1.tar.gz/AESRLib-1.1/README.md
README.md
import pyAesCrypt, sys, os, six import pyautogui as py if six.PY2: import Tkinter as tk from Tkinter import * else: import tkinter as tk from tkinter import * #window setup def create(): window = Tk() window.title("File Encrypt") encryptButton = Button(window,width=7, text="Encrypt" , bg='black', fg='green', command= encrypt) encryptButton.grid(column= 0, row= 0) decryptButton = Button(window,width=7,bg='black' , text="Decrypt", fg='green', command= decrypt) decryptButton.grid(column= 1, row= 0) window.mainloop() def getFilePath(): path = py.prompt(text='Enter path to your file', title='File Path' , default='/base/filepath.(extension)') return path def encrypt(): # encryption/decryption buffer size - 64K bufferSize = 64 * 1024 match=False filePathEncrypt = getFilePath() while match is False: password = py.password(text='Enter password:', title='Password', default='enter password', mask='*') passwordConfirm = py.password(text='Confirm password:', title='Password', default='confirm password', mask='*') if password == passwordConfirm: match =True else: py.prompt(text='Passwords did not match, please click ok and try enter password again.', title='Password') # Encrypt pyAesCrypt.encryptFile(filePathEncrypt , filePathEncrypt+".aes" , password, bufferSize) os.system('rm ' + filePathEncrypt) def decrypt(): filePathDecrypt = getFilePath() # encryption/decryption buffer size - 64K bufferSize = 64 * 1024 password = py.password(text='Enter password:', title='Password', default='enter password', mask='*') # get length of file Path pathLen = len(filePathDecrypt) # get range for string array endRange = pathLen - 3 #get file path without aes extension fileWithout, empty = os.path.splitext(filePathDecrypt)[0:endRange] # decrypt pyAesCrypt.decryptFile(filePathDecrypt, fileWithout , password,bufferSize) os.system('rm ' + filePathDecrypt) if __name__ == '__main__': create()
AESencrypt
/AESencrypt-0.0.5-py3-none-any.whl/AESencrypter/__init__.py
__init__.py
import datetime as dt from math import ceil import numpy as np import pandas as pd from scipy.signal import find_peaks, lfilter, butter, medfilt from scipy.stats import entropy from actigraph import raw_to_counts # TODO?: convert Datetime->Float or vice versa to speed up computation ############################## Helper functions: ############################### def median_filter(data, kernel_size=3): df_copy = data.copy() axes = [ax for ax in ['x','y','z','rx','ry','rz'] if ax in data.columns] for axis in axes: df_copy[axis] = medfilt(data[axis], kernel_size=kernel_size) return df_copy def filt(accel, cutoff, btype, order=3): accel_copy = accel.copy() sr = get_sr(accel) nyq = 0.5 * sr normal_cutoff = cutoff / nyq b, a = butter(order, normal_cutoff, btype=btype, fs=sr) axes = [ax for ax in ['x','y','z','rx','ry','rz'] if ax in accel.columns] for axis in axes: accel_copy[axis] = lfilter(b, a, accel[axis]) return accel_copy def hpf(accel, cutoff, order=3): return filt(accel, cutoff, btype='highpass', order=order) def lpf(accel, cutoff, order=3): return filt(accel, cutoff, btype='lowpass', order=order) def bpf(accel, low=0.2, high=20, order=3): cutoff = np.array([low,high]) return filt(accel, cutoff, btype='bandpass', order=order) def get_starts_ends(data, epochs, window_size): # could use window+overlap instead of passing epochs. if type(data.index) == pd.DatetimeIndex: epoch_starts = epochs.index.to_pydatetime() epoch_ends = epoch_starts + dt.timedelta(seconds=window_size) elif type(data.index) == pd.Float64Index: epoch_starts = epochs.index.values epoch_ends = epoch_starts + window_size return epoch_starts, epoch_ends def get_peaks_from_fft(yf, xf): """yf is the fft amplitude. xf are the corresponding frequencies. returns (frequencies, heights)""" if len(yf) == 0: return [], [] # Find peaks: prom = np.percentile(yf, 95) peaks, peak_props = find_peaks(yf, prominence=prom, height=(None,None)) # Arrange by prominence: order = np.flip(np.argsort(peak_props['prominences'])) freqs = xf[peaks[order]] heights = peak_props['prominences'][order] return freqs, heights def get_sr(df): """Returns the sample rate in Hz for the given data. May not work if SR isn't constant.""" if len(df) < 2: raise ValueError("dataset must have >= 2 samples to find SR.") if type(df.index) == pd.DatetimeIndex: return 1/(df.index[1]-df.index[0]).total_seconds() # TODO: make a median method for this, rather than first 2 elements? elif type(df.index) == pd.Float64Index: #return 1/(df.index[1]-df.index[0]) return 1.0/np.median(df.index.values[1:] - df.index.values[:-1]) def get_freq_stats(frame, **kwargs): """Find the most dominant frequencies (Hz) in VM acceleration between some start and stop times. Peaks outside [fmin,fmax] are ignored. The list is sorted by prominence. Peak prominence must be >95th percentile of signal amplitudes to make the list. Returns a dict containing freqs, peak heights, total power, power spectral entropy, f_625, and p_625. If no peaks are found, peaks and heights are empty. Keyword arguments: frame -- a (slice of a) DataFrame sr -- sample rate (Hz) fmin -- minumum frequency to include (Hz) fmax -- maximum frequency to include (Hz) """ # TODO: pad data around frame to allow better FFT? if len(frame) < 2: return { 'top_freqs': [], 'top_freq_powers': [], 'total_power': None, 'entropy': None, 'f_625': None, 'p_625': None, } sr = kwargs.get('sr') if not sr: sr = get_sr(frame) fmin = kwargs.get('fmin', 0.2) # TODO: 0.1 default? fmax = kwargs.get('fmax', 5) if (fmin > 0.6) or (fmax < 2.5): raise ValueError("[fmin, fmax] must include the range [0.6, 2.5] (Hz)") accel = frame.vm # Compute FFT: yf = np.abs(np.fft.rfft(accel)) # TODO?: might phase data be useful? xf = np.fft.rfftfreq( len(accel), 1.0/sr ) freq_bin_width = xf[1] # minus xf[0], which is 0 # TODO?: 'friendly' window sizes for FFT performance # Crop to different frequency ranges: i625 = (xf > 0.6) & (xf < 2.5) yf625 = yf[i625] xf625 = xf[i625] i = (xf > fmin) & (xf < fmax) yf = yf[i] xf = xf[i] # TODO: adjust everything below to handle empty xf/yf # Find peaks: freqs, heights = get_peaks_from_fft(yf, xf) # Other stats: ent = entropy(yf**2) total_power = np.sum((yf/sr)**2) * freq_bin_width # total power is only within this restricted frequency range (0.2-5Hz; # reference uses 0.3-15Hz). units are ~ W/kg not W. to get total power # over all freqs, we don't need FFT, could just do np.sum(accel**2) / sr. try: peak_625_loc = np.argmax(yf625) except ValueError: peak_625_loc = None return { 'top_freqs': freqs, 'top_freq_powers': (heights/sr)**2 * freq_bin_width, 'total_power': total_power, 'entropy': ent, # TODO: use get_peaks_from_fft() for f_625+p_625 if there is a minimum # power requirement for them to be defined. 'f_625': xf625[peak_625_loc] if peak_625_loc is not None else None, 'p_625': (yf625[peak_625_loc]/sr)**2 * freq_bin_width if peak_625_loc is not None else None, } ######################### Feature-computing functions: ######################### def vm_accel(**kwargs): """kwargs: - data (the dataframe) - epochs - window_size - overlap """ data = kwargs.get('data') if ('x' not in data.columns) and ('y' not in data.columns) and ('z' not in data.columns): return # can't compute anything epochs = kwargs.get('epochs') window_size = kwargs.get('window_size') overlap = kwargs.get('overlap') if overlap == 0: if type(data.index) == pd.DatetimeIndex: groups = data.groupby( (data.index - data.index[0]).total_seconds() // window_size ) elif type(data.index) == pd.Float64Index: groups = data.groupby( (data.index - data.index[0]) // window_size ) means = groups.mean().vm.values stds = groups.std().vm.values if len(means) == len(epochs): epochs['vm_mean'] = means epochs['vm_std'] = stds elif len(means) == len(epochs)+1: # off-by-one errors can happen if e.g. first sample is at time 0s # and last is at time 1200s. that would result in e.g. 41 groups # to fill 40 windows. we'll just delete the last group. # TODO: delete first OR last group, whichever had less samples # TODO?: fix that when creating epochs instead epochs['vm_mean'] = means[:len(epochs)] epochs['vm_std'] = stds[:len(epochs)] else: raise ValueError("Can't store %d results in %d windows." % (len(means), len(epochs))) else: # can't use groupby() etc. with overlapping windows. see # https://github.com/pandas-dev/pandas/issues/15354. vm_mean = np.full(len(epochs), np.nan, dtype=np.float64) vm_std = np.full(len(epochs), np.nan, dtype=np.float64) epoch_starts, epoch_ends = get_starts_ends(data, epochs, window_size) for i, (epoch_start, epoch_end) in enumerate(zip(epoch_starts, epoch_ends)): accel = data.loc[epoch_start:epoch_end] vm_mean[i] = np.mean(accel.vm) vm_std[i] = np.std(accel.vm) epochs['vm_mean'] = vm_mean epochs['vm_std'] = vm_std def timestamp_features(**kwargs): data = kwargs.get('data') if type(data.index) != pd.DatetimeIndex: return # TODO: compute hour of day, and day of week. and maybe month or week of # year to capture season. be sure to exclude (at least some of) these by # default in main code. raise NotImplementedError() def freq_stats(**kwargs): """Gets dominant frequencies, power, entropy, etc.""" data = kwargs.get('data') if ('x' not in data.columns) and ('y' not in data.columns) and ('z' not in data.columns): return # can't compute anything epochs = kwargs.get('epochs') window_size = kwargs.get('window_size') epoch_starts, epoch_ends = get_starts_ends(data, epochs, window_size) f1 = np.full(len(epochs), np.nan, dtype=np.float64) p1 = np.full(len(epochs), np.nan, dtype=np.float64) f2 = np.full(len(epochs), np.nan, dtype=np.float64) p2 = np.full(len(epochs), np.nan, dtype=np.float64) f625 = np.full(len(epochs), np.nan, dtype=np.float64) p625 = np.full(len(epochs), np.nan, dtype=np.float64) total_power = np.full(len(epochs), np.nan, dtype=np.float64) ps_ent = np.full(len(epochs), np.nan, dtype=np.float64) for i, (epoch_start, epoch_end) in enumerate(zip(epoch_starts, epoch_ends)): frame = data[epoch_start:epoch_end] results = get_freq_stats(frame, **kwargs) if len(results['top_freqs']) >= 1: f1[i] = results['top_freqs'][0] p1[i] = results['top_freq_powers'][0] if len(results['top_freqs']) >= 2: f2[i] = results['top_freqs'][1] p2[i] = results['top_freq_powers'][1] f625[i] = results['f_625'] p625[i] = results['p_625'] total_power[i] = results['total_power'] ps_ent[i] = results['entropy'] # Note: reference uses 0.3-15Hz total power. We use 0.2-5Hz. f1_prev = np.concatenate(([np.nan],f1[:-1])) # TODO: standardize these (and all) output names: epochs['f1_Hz'] = f1 epochs['f1_power'] = p1 epochs['f1_change'] = f1 / f1_prev # could do this in db instead, but meh. epochs['f2_Hz'] = f2 epochs['f2_power'] = p2 epochs['f625_Hz'] = f625 epochs['f625_power'] = p625 epochs['total_power'] = total_power epochs['p1_fraction'] = epochs['f1_power'] / epochs['total_power'] epochs['ps_entropy'] = ps_ent def corr_coeffs(**kwargs): data = kwargs.get('data') epochs = kwargs.get('epochs') window_size = kwargs.get('window_size') outputs = {} pairings = [['x','y'], ['x','z'], ['y','z'], ['rx','ry'], ['rx','rz'], ['ry','rz']] valid_pairs = [p for p in pairings if (p[0] in data.columns and p[1] in data.columns)] for pair in valid_pairs: pair_str = 'corr_' + pair[0]+pair[1] outputs[pair_str] = np.full(len(epochs), np.nan, dtype=np.float64) if len(outputs) == 0: return # can't compute anything epoch_starts, epoch_ends = get_starts_ends(data, epochs, window_size) valid_axes = [ax for ax in ['x','y','z','rx','ry','rz'] if ax in data.columns] for i, (epoch_start, epoch_end) in enumerate(zip(epoch_starts, epoch_ends)): accel = data.loc[epoch_start:epoch_end] corrs = np.corrcoef(accel[valid_axes].T) for pair in valid_pairs: pair_str = 'corr_' + pair[0]+pair[1] if pair[0] in accel.columns and pair[1] in accel.columns: outputs[pair_str][i] = corrs[valid_axes.index(pair[0])][valid_axes.index(pair[1])] for col in outputs: epochs[col] = outputs[col] def acti_counts(**kwargs): """Compute Actigraph-like "counts". See https://www.ncbi.nlm.nih.gov/pubmed/28604558. See also: https://actigraph.desk.com/customer/en/portal/articles/2515835-what-is-the-difference-among-the-energy-expenditure-algorithms- https://actigraph.desk.com/customer/en/portal/articles/2515804-what-is-the-difference-among-the-met-algorithms- Maybe this if you have HR data: https://actigraph.desk.com/customer/en/portal/articles/2515579-what-is-hree-in-actilife- """ data = kwargs.get('data') if ('x' not in data.columns) and ('y' not in data.columns) and ('z' not in data.columns): return # can't compute anything epochs = kwargs.get('epochs') overlap = kwargs.get('overlap') window_size = kwargs.get('window_size') sr = kwargs.get('sr') if not sr: sr = get_sr(data) axis_counts = {} for axis in ['x','y','z']: if axis in data.columns: axis_counts[axis+'c'] = raw_to_counts(data[axis], sr) results = None for counts in axis_counts: if results is None: results = axis_counts[counts]**2 else: results += axis_counts[counts]**2 vmc = np.sqrt(results) # axis_counts and vmc contain the number of counts for each second of data. if type(data.index) == pd.DatetimeIndex: index = pd.date_range( freq = '1S', start = data.index[0] + dt.timedelta(seconds = 0.5), periods = len(vmc), name = 'time' ) elif type(data.index) == pd.Float64Index: index = pd.Float64Index( data = np.arange(data.index[0]+0.5, data.index[-1]+0.5, 1), name = 'time' ) if len(vmc) != len(index): rm_from_vmc = len(vmc) - len(index) if abs(rm_from_vmc) > 30: # off by more than 30 seconds raise ValueError("Something is wrong. Number of samples really doesn't match SR+timespan of recording.") print("Truncating %d second(s) of counts to (hopefully) match up with windows." % rm_from_vmc) if rm_from_vmc > 0: vmc = vmc[:len(index)] elif rm_from_vmc < 0: vmc = np.pad(vmc,(0,-rm_from_vmc),mode='constant',constant_values=np.nan) counts = pd.Series( data = vmc, index = index, dtype = np.float64, name = 'counts_per_sec' ) # note: with 0.5s offset, counts happen *around* reported time, not just # before or after it if overlap == 0: counts = counts.to_frame() counts['elapsed_sec'] = counts.index - data.index[0] if type(data.index) == pd.DatetimeIndex: counts['elapsed_sec'] = counts['elapsed_sec'].total_seconds() counts_groups = counts.groupby(counts.elapsed_sec // window_size).counts_per_sec epochs['cpm_mean'] = counts_groups.sum().values / (window_size/60.0) else: cpm_mean = np.full(len(epochs), np.nan, dtype=np.float64) epoch_starts, epoch_ends = get_starts_ends(data, epochs, window_size) for i, (epoch_start, epoch_end) in enumerate(zip(epoch_starts, epoch_ends)): accel = data.loc[epoch_start:epoch_end] cpm_mean[i] = np.sum(counts[epoch_start:epoch_end]) / (window_size/60.0) epochs['cpm_mean'] = cpm_mean ################# Map functions to the features they compute: ################## function_feature_map = { vm_accel: ['vm_mean', 'vm_std'], freq_stats: ['f1_Hz', 'f1_power', 'f1_change', 'f2_Hz', 'f2_power', 'f625_Hz', 'f625_power', 'total_power', 'p1_fraction', 'ps_entropy'], corr_coeffs: ['corr_xy', 'corr_xz', 'corr_yz', 'corr_rxry', 'corr_rxrz', 'corr_ryrz'], acti_counts: ['cpm_mean'], timestamp_features: [], # TODO } ################################################################################
AFE
/AFE-2020.3.29-py3-none-any.whl/afe/features.py
features.py
import datetime as dt from math import ceil from warnings import warn import pandas as pd import numpy as np from .features import function_feature_map, get_sr, lpf, median_filter #################################### Class: #################################### class AFE: """Accelerometer Feature Extractor. Extracts features useful for machine learning from raw accelerometer/gyroscope data.""" def __init__(self, data, sr=None, interpolate=True, warn_interpolating=True, smooth=True): """data is a pandas DataFrame containing a time-based index (either timestamp, or seconds elapsed). Columns must be labeled x, y, z (for accelerometer) and rx, ry, rz (for gyroscope). Units must be g's. Any columns may be omitted, e.g. a 2-axis accelerometer may include only x and y. You shouldn't normally need to specify sample rate (sr). If interpolate is set and the sample rate seems inconsistent within the data, we'll interpolate the data to a fixed sample rate (matching the intended original SR as best we can). This class will store a copy of the original DataFrame. """ if type(data) != pd.DataFrame: raise TypeError("data must be a Pandas DataFrame.") data = data.copy() # never mess with the original check_timeindex(data) check_axes(data) data.sort_index(inplace=True) # this really shouldn't be necessary... if not sr: sr = get_sr(data) new_df = interpolate_data(data, sr=sr, warn_interpolating=warn_interpolating) if new_df is not None: data = new_df # TODO?: unit conversion self.data = data self.sr = sr self.data_is_smoothed = False if smooth: self.smooth_data() def smooth_data(self): data = median_filter(self.data) data = lpf(data, cutoff=0.5*self.sr, order=3) self.data = data self.data_is_smoothed = True def compute_vm_accel(self, overwrite=False): if 'vm' not in self.data.columns or overwrite: vm = np.zeros(len(self.data)) for axis in ['x','y','z']: if axis in self.data.columns: vm = vm + self.data[axis].values**2 self.data['vm'] = np.sqrt(vm.astype(np.float64)) def get_features(self, window_size=30, overlap=0, include_timestamp_features=False, include_features=[], exclude_features=[]): """Get a DataFrame of features. Data will be segmented into window_size-second windows with overlap-second overlaps. Set window_size and overlap to None to extract 1 set of features for the entire dataset. If include_timestamp_features is True, and the dataset has a timestamp index, features such as day_of_week will be included in the returned DataFrame. This may be useful to make predictions that have seasonal, circadian, or other time-dependence. include_features and exclude_features are whitelist/blacklist (list of strings) of features to be computed. If a feature extraction function provides some features that are not blacklisted, but some that are, it will be run anyway, and the blacklisted features will be dropped after computing them. """ # TODO: implement some kind of save/don't-recompute option that # preserves already-computed features internally and can recall them # without recomputing. if overlap >= window_size: raise ValueError("overlap must be less than window size.") # TODO: support negative overlap to allow gaps (if it doesn't work already) if include_features: functions_to_run = [fn for fn in function_feature_map if not set(function_feature_map[fn]).isdisjoint(set(include_features))] else: functions_to_run = list(function_feature_map.keys()) if exclude_features: functions_to_run = [fn for fn in functions_to_run if not set(function_feature_map[fn]).issubset(set(exclude_features))] start_times = get_epoch_start_times(self.data, window_size, overlap) epochs = pd.DataFrame(data=None, index=start_times, columns=None, dtype=None) # could store one of these instead of passing window and overlap around separately from epochs: # epochs.window_size = window_size # epochs.window_overlap = overlap # TODO?: remove gravity (or split from 'body' accel)? e.g. "low pass # Butterworth filter with a corner frequency of 0.3 Hz". some other # papers suggest 0.25-0.5 Hz cutoff. self.compute_vm_accel() for function in functions_to_run: function( data = self.data, epochs = epochs, # function will modify this in place. at least that's the plan right now. window_size = window_size, overlap = overlap, include_timestamp_features = include_timestamp_features, sr = self.sr, ) epochs = epochs[[col for col in epochs.columns if col not in exclude_features]] return epochs ############################## Helper functions: ############################### def check_timeindex(df): """Make sure the DataFrame's index is either float or timestamp. """ if type(df.index) == pd.DatetimeIndex or type(df.index) == pd.Float64Index: return raise TypeError("data index must be float or timestamp.") def check_axes(df): needed = ['x','y','z','rx','ry','rz'] if set(df.columns).isdisjoint(needed): raise ValueError("data must contain some columns from [x,y,z,rx,ry,rz].") def get_epoch_start_times(data, window_size, overlap): """Get the start times for all epochs in this recording (as a pandas Index).""" period = window_size - overlap if type(data.index) == pd.DatetimeIndex: start_times = pd.date_range( start = data.index[0], end = data.index[-1], freq = '%dS'%period, name = 'epoch_start' ) elif type(data.index) == pd.Float64Index: timespan = data.index[-1] - data.index[0] windows = ceil(timespan / window_size) start_times = [data.index[0] + i*period for i in range(windows)] start_times = pd.Float64Index(start_times, name = 'epoch_start') return start_times def interpolate_data(df, **kwargs): """Returns a new DataFrame with interpolated copies of the original data. Returns None if interpolation wasn't needed.""" sr = kwargs.get('sr') warn_interpolating = kwargs.get('warn_interpolating') timespan = df.index[-1] - df.index[0] if type(df.index) == pd.DatetimeIndex: timespan = timespan.total_seconds() apparent_timespan = len(df)/sr if ceil(timespan) == ceil(apparent_timespan): return None # seems good enough as-is # based on number of rows in df, we have apparent_timespan seconds of data. # but based on first and last timestamp, we have a different amount of data. # so we'll interpolate df to fix the inconsistent SR. if warn_interpolating: warn("Sample rate doesn't seem consistent. Will interpolate data.") step = 1/sr if type(df.index) == pd.DatetimeIndex: step = dt.timedelta(seconds=step) new_t = np.arange(df.index.values[0], df.index.values[-1], step) keep_cols = ['x','y','z','rx','ry','rz'] new_df = pd.DataFrame( index = new_t, columns = keep_cols, ) for axis in keep_cols: if axis in df.columns: col_interped = np.interp(new_t, df.index.values, df[axis].values) new_df[axis] = col_interped else: new_df.drop(columns=axis, inplace=True) return new_df ################################################################################
AFE
/AFE-2020.3.29-py3-none-any.whl/afe/__init__.py
__init__.py
from typing import Callable, Tuple import numpy as np from tqdm import tqdm from scipy import sparse from FEM.Solvers.Lineal import LinealSparse from .Elements.E2D import Quadrilateral from .Core import Core, Geometry, logging class Elasticity(Core): """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], rho: Tuple[float, list], fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, fz: Callable = lambda x: 0, **kargs) -> None: """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ if isinstance(E, float) or isinstance(E, int): E = [E]*len(geometry.elements) if isinstance(v, float) or isinstance(v, int): v = [v]*len(geometry.elements) if isinstance(rho, float) or isinstance(rho, int): rho = [rho]*len(geometry.elements) self.E = E self.v = v self.C = [] for E, v in zip(self.E, self.v): c = E/((1.0+v)*(1.0-2.0*v))*np.array([ [1.0-v, v, v, 0.0, 0.0, 0.0], [v, 1.0-v, v, 0.0, 0.0, 0.0], [v, v, 1.0-v, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, (1.0-2.0*v)/2.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, (1.0-2.0*v)/2.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, (1.0-2.0*v)/2.0]]) self.C.append(c) self.rho = rho self.fx = fx self.fy = fy self.fz = fz if not geometry.nvn == 3: print( 'Border conditions lost, please usea a geometry with 3 variables per node (nvn=3)\nRegenerating Geoemtry...') geometry.nvn = 3 geometry.cbe = [] geometry.cbn = [] geometry.initialize() Core.__init__(self, geometry, sparse=True, **kargs) self.K = sparse.lil_matrix((self.ngdl, self.ngdl)) self.M = sparse.lil_matrix((self.ngdl, self.ngdl)) self.name = 'Isotropic Elasticity sparse' self.properties['E'] = self.E self.properties['v'] = self.v self.properties['fx'] = None self.properties['fy'] = None self.properties['fz'] = None self.properties['rho'] = self.rho def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) _x, _p = e._x, e._p # jac, dpz = e.jacs, e.dpz detjac = e.detjac # _j = np.linalg.inv(jac) dpx = e.dpx C = self.C[ee] o = [0.0]*m Ke = np.zeros([3*m, 3*m]) Me = np.zeros([3*m, 3*m]) Fe = np.zeros([3*m, 1]) for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o, *o], [*o, *dpx[k, 1, :], *o], [*o, *o, *dpx[k, 2, :]], [*dpx[k, 2, :], *o, *dpx[k, 0, :]], [*o, *dpx[k, 2, :], *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :], *o]]) P = np.array([ [*_p[k], *o, *o], [*o, *_p[k], *o], [*o, *o, *_p[k]]]) Ke += (B.T@C@B)*detjac[k]*e.W[k] Me += self.rho[ee]*(P.T@P)*detjac[k]*e.W[k] _fx = self.fx(_x[k]) _fy = self.fy(_x[k]) _fz = self.fz(_x[k]) F = np.array([[_fx], [_fy], [_fz]]) Fe += (P.T @ F)*detjac[k]*e.W[k] self.F[np.ix_(e.gdlm)] += Fe self.K[np.ix_(e.gdlm, e.gdlm)] += Ke self.M[np.ix_(e.gdlm, e.gdlm)] += Me def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method """ logging.info('Ensembling equation system...') self.M = self.M.tocsr() logging.info('Done!') def postProcess(self, **kargs) -> None: """Calculate stress and strain for each element gauss point and vertice. The results are stored in each element sigmas and epsilons properties. """ for i, e in enumerate(self.elements): _, _, du = e.giveSolution(True) exx = du[:, 0, 0] eyy = du[:, 1, 1] ezz = du[:, 2, 2] exy = du[:, 0, 1]+du[:, 1, 0] eyz = du[:, 1, 2]+du[:, 2, 1] exz = du[:, 0, 2]+du[:, 2, 0] epsilons = np.array([exx, eyy, ezz, exz, eyz, exy]) C = self.C[i] e.sigmas = (C @ epsilons).T e.epsilons = epsilons.T # TODO Region 2D is a more robust class for this job def profile(self, region: list[float], n: float = 10) -> None: """Creates a profile in a given region coordinates Args: region (list[float]): List of region coordinates (square region 2D) n (float, optional): Number of points. Defaults to 10. Returns: np.ndarray: Coordinates, displacements and second variable solution """ coords = np.array(region) gdl = np.array([[-1]*len(coords)]) e = Quadrilateral(coords, gdl, n, fast=True) _x, _ = e.T(e.domain.T) valuesU = [] valuesDU = [] for x in tqdm(_x, unit='point'): for e in self.elements: if e.isInside([x])[0]: np.array([[1.3, 2.5, 3.5], [1.5, 2.6, 8.5]]) z = e.inverseMapping(x.reshape([3, 1])) _, u, du = e.giveSolutionPoint(z, True) valuesU += [u] valuesDU += [du] return _x, np.array(valuesU), np.array(valuesDU) class NonLocalElasticity(Elasticity): """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. l (float): Internal lenght z1 (float): Z1 factor Lr (float): Influence distance af (Callable): Atenuation function fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], rho: Tuple[float, list], l: float, z1: float, Lr: float, af: Callable, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, fz: Callable = lambda x: 0, **kargs) -> None: """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. l (float): Internal lenght z1 (float): Z1 factor Lr (float): Influence distance af (Callable): Atenuation function fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ Elasticity.__init__(self, geometry, E, v, rho, fx, fy, fz, **kargs) self.l = l self.z1 = z1 self.z2 = 1.0-z1 self.af = af self.Lr = Lr self.properties['l'] = self.l self.properties['z1'] = self.z1 self.properties['z2'] = self.z2 self.properties['Lr'] = self.Lr self.properties['af'] = None nonlocals = self.geometry.detectNonLocal(Lr) for e, dno in zip(self.elements, nonlocals): e.enl = dno self.name = 'Non Local Elasticity sparse-lil' self.KL = sparse.lil_matrix((self.ngdl, self.ngdl)) self.KNL = sparse.lil_matrix((self.ngdl, self.ngdl)) self.M = sparse.lil_matrix((self.ngdl, self.ngdl)) def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e._x, e._p # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates # jac, dpz = e.J(e.Z.T) detjac = e.detjac # _j = np.linalg.inv(jac) # Jacobian inverse dpx = e.dpx # Shape function derivatives in global coordinates C = self.C[ee] o = [0.0]*m Ke = np.zeros([3*m, 3*m]) Me = np.zeros([3*m, 3*m]) Fe = np.zeros([3*m, 1]) for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o, *o], [*o, *dpx[k, 1, :], *o], [*o, *o, *dpx[k, 2, :]], [*dpx[k, 2, :], *o, *dpx[k, 0, :]], [*o, *dpx[k, 2, :], *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :], *o]]) P = np.array([ [*_p[k], *o, *o], [*o, *_p[k], *o], [*o, *o, *_p[k]]]) Ke += (B.T@C@B)*detjac[k]*e.W[k] Me += self.rho[ee]*(P.T@P)*detjac[k]*e.W[k] _fx = self.fx(_x[k]) _fy = self.fy(_x[k]) _fz = self.fz(_x[k]) F = np.array([[_fx], [_fy], [_fz]]) Fe += (P.T @ F)*detjac[k]*e.W[k] self.F[np.ix_(e.gdlm)] += Fe self.KL[np.ix_(e.gdlm, e.gdlm)] += Ke self.M[np.ix_(e.gdlm, e.gdlm)] += Me # print('Ensembling equation system...') # for e in tqdm(self.elements, unit='Element'): # self.K[np.ix_(e.gdlm, e.gdlm)] += e.Ke*self.z1 # for i, eee in enumerate(e.enl): # enl = self.elements[eee] # self.K[np.ix_(e.gdlm, enl.gdlm)] += e.knls[i]*self.z2 # self.F[np.ix_(e.gdlm)] += e.Fe # self.Q[np.ix_(e.gdlm)] += e.Qe # print('Done!') e.knls = [] for inl in tqdm(e.enl, unit=' Nolocal'): enl = self.elements[inl] mnl = len(enl.gdl.T) Knl = np.zeros([3*m, 3*mnl]) _xnl = enl._x detjacnl = enl.detjac dpxnl = enl.dpx for k in range(len(e.Z)): B = np.array([ [*dpx[k, 0, :], *o, *o], [*o, *dpx[k, 1, :], *o], [*o, *o, *dpx[k, 2, :]], [*dpx[k, 2, :], *o, *dpx[k, 0, :]], [*o, *dpx[k, 2, :], *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :], *o]]) for knl in range(len(enl.Z)): ro = np.linalg.norm(_x[k]-_xnl[knl])/self.l azn = self.af(ro) Bnl = np.array([ [*dpxnl[knl, 0, :], *o, *o], [*o, *dpxnl[knl, 1, :], *o], [*o, *o, *dpxnl[knl, 2, :]], [*dpxnl[knl, 2, :], *o, *dpxnl[knl, 0, :]], [*o, *dpxnl[knl, 2, :], *dpxnl[knl, 1, :]], [*dpxnl[knl, 1, :], *dpxnl[knl, 0, :], *o]]) Knl += azn*(Bnl.T@C@B)*detjac[k] * \ e.W[k]*detjacnl[knl]*enl.W[knl] self.KNL[np.ix_(e.gdlm, enl.gdlm)] += Knl.T def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method """ logging.info('Ensembling equation system...') self.K = self.KL*self.z1 + self.KNL*self.z2 self.M = self.M.tocsr() logging.info('Done!') class NonLocalElasticityFromTensor(NonLocalElasticity): def __init__(self, geometry: Geometry, C: np.ndarray, rho: Tuple[float, list], l: float, z1: float, Lr: float, af: Callable, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, fz: Callable = lambda x: 0, **kargs) -> None: NonLocalElasticity.__init__( self, geometry, 0.0, 0.0, rho, l, z1, Lr, af, fx, fy, fz, **kargs) self.properties['C'] = C.tolist() self.C = [C]*len(geometry.elements) class ElasticityFromTensor(Elasticity): def __init__(self, geometry: Geometry, C: np.ndarray, rho: Tuple[float, list], fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, fz: Callable = lambda x: 0, **kargs) -> None: Elasticity.__init__(self, geometry, 0.0, 0.0, rho, fx, fy, fz, **kargs) self.properties['C'] = C.tolist() self.C = [C]*len(geometry.elements) class NonLocalElasticityLegacy(Elasticity): """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. l (float): Internal lenght z1 (float): Z1 factor Lr (float): Influence distance af (Callable): Atenuation function fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], rho: Tuple[float, list], l: float, z1: float, Lr: float, af: Callable, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, fz: Callable = lambda x: 0, **kargs) -> None: """Creates a 3D Elasticity problem Args: geometry (Geometry): Input 3D geometry E (Tuple[float, list]): Young Moduli v (Tuple[float, list]): Poisson coeficient rho (Tuple[float, list]): Density. l (float): Internal lenght z1 (float): Z1 factor Lr (float): Influence distance af (Callable): Atenuation function fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. fz (Callable, optional): Force in z direction. Defaults to lambdax:0. """ Elasticity.__init__(self, geometry, E, v, rho, fx, fy, fz, **kargs) self.l = l self.z1 = z1 self.z2 = 1.0-z1 self.af = af self.Lr = Lr nonlocals = self.geometry.detectNonLocal(Lr) for e, dno in zip(self.elements, nonlocals): e.enl = dno self.name = 'Non Local Elasticity sparse' self.zs = [] def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates C = self.C[ee] o = [0.0]*m Ke = np.zeros([3*m, 3*m]) Me = np.zeros([3*m, 3*m]) Fe = np.zeros([3*m, 1]) for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o, *o], [*o, *dpx[k, 1, :], *o], [*o, *o, *dpx[k, 2, :]], [*dpx[k, 2, :], *o, *dpx[k, 0, :]], [*o, *dpx[k, 2, :], *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :], *o]]) P = np.array([ [*_p[k], *o, *o], [*o, *_p[k], *o], [*o, *o, *_p[k]]]) Ke += (B.T@C@B)*detjac[k]*e.W[k] Me += self.rho[ee]*(P.T@P)*detjac[k]*e.W[k] _fx = self.fx(_x[k]) _fy = self.fy(_x[k]) _fz = self.fz(_x[k]) F = np.array([[_fx], [_fy], [_fz]]) Fe += (P.T @ F)*detjac[k]*e.W[k] self.F[np.ix_(e.gdlm)] += Fe for gdl in e.gdlm: self.I += [gdl]*(3*m) self.J += e.gdlm self.Im += [gdl]*(3*m) self.Jm += e.gdlm Ke_flat = (Ke).flatten().tolist() self.zs += [1]*len(Ke_flat) self.V += Ke_flat self.Vm += Me.flatten().tolist() e.knls = [] for inl in tqdm(e.enl, unit=' Nolocal'): enl = self.elements[inl] mnl = len(enl.gdl.T) Knl = np.zeros([3*m, 3*mnl]) _xnl, _ = enl.T(enl.Z.T) jacnl, dpznl = enl.J(enl.Z.T) detjacnl = np.linalg.det(jacnl) _jnl = np.linalg.inv(jacnl) dpxnl = _jnl @ dpznl for k in range(len(e.Z)): B = np.array([ [*dpx[k, 0, :], *o, *o], [*o, *dpx[k, 1, :], *o], [*o, *o, *dpx[k, 2, :]], [*dpx[k, 2, :], *o, *dpx[k, 0, :]], [*o, *dpx[k, 2, :], *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :], *o]]) for knl in range(len(enl.Z)): ro = np.linalg.norm(_x[k]-_xnl[knl])/self.l azn = self.af(ro) Bnl = np.array([ [*dpxnl[knl, 0, :], *o, *o], [*o, *dpxnl[knl, 1, :], *o], [*o, *o, *dpxnl[knl, 2, :]], [*dpxnl[knl, 2, :], *o, *dpxnl[knl, 0, :]], [*o, *dpxnl[knl, 2, :], *dpxnl[knl, 1, :]], [*dpxnl[knl, 1, :], *dpxnl[knl, 0, :], *o]]) Knl += azn*(Bnl.T@C@B)*detjac[k] * \ e.W[k]*detjacnl[knl]*enl.W[knl] for gdl in e.gdlm: self.I += [gdl]*(3*m) self.J += enl.gdlm Knl_flat = (Knl).flatten().tolist() self.V += (Knl).flatten().tolist() self.zs += [0]*len(Knl_flat) self.V_0 = np.array(self.V) self.zs = np.array(self.zs) def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method """ logging.info('Ensembling equation system...') self.V = np.zeros(self.V_0.shape) self.V[self.zs == 1] = self.V_0[self.zs == 1]*self.z1 self.V[self.zs == 0] = self.V_0[self.zs == 0]*self.z2 self.K = sparse.coo_matrix( (self.V, (self.I, self.J)), shape=(self.ngdl, self.ngdl)).tolil() self.M = sparse.coo_matrix( (self.Vm, (self.Im, self.Jm)), shape=(self.ngdl, self.ngdl)).tocsr() logging.info('Done!')
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elasticity3D.py
Elasticity3D.py
from typing import Union from tqdm import tqdm import numpy as np from .Geometry import Geometry from .Solvers import Lineal, NonLinealSolver, LinealSparse import logging from .FEMLogger import FEMLogger from functools import partialmethod import json class Core(): """Create the Finite Element problem. Args: geometry (Geometry): Input geometry. The geometry must contain the elements, and the border conditions. You can create the geometry of the problem using the Geometry class. solver (Union[Lineal, NonLinealSolver], optional): Finite Element solver. If not provided, Lineal solver is used. sparse (bool, optional): To use sparse matrix formulation. Defaults to False verbose (bool, optional): To print console messages and progress bars. Defaults to False. """ def __init__(self, geometry: Geometry, solver: Union[Lineal, NonLinealSolver] = None, sparse: bool = False, verbose: bool = False, name='') -> None: """Create the Finite Element problem. Args: geometry (Geometry): Input geometry. The geometry must contain the elements, and the border conditions. You can create the geometry of the problem using the Geometry class. solver (Union[Lineal, NonLinealSolver], optional): Finite Element solver. If not provided, Lineal solver is used. sparse (bool, optional): To use sparse matrix formulation. Defaults to False verbose (bool, optional): To print console messages and progress bars. Defaults to False. name (str, optional): To print custom name on logging file. Defaults to ''. """ self.logger = FEMLogger(name) if verbose: self.logger.setup_logging(console_log_level="info") else: self.logger.setup_logging() self.geometry = geometry self.ngdl = self.geometry.ngdl if not sparse: self.K = np.zeros([self.ngdl, self.ngdl]) self.F = np.zeros([self.ngdl, 1]) self.Q = np.zeros([self.ngdl, 1]) self.U = np.zeros([self.ngdl, 1]) self.S = np.zeros([self.ngdl, 1]) self.cbe = self.geometry.cbe self.cbn = self.geometry.cbn self.elements = self.geometry.elements self.verbose = verbose tqdm.__init__ = partialmethod(tqdm.__init__, disable=not verbose) self.name = 'Generic FEM ' if not solver: self.solver = Lineal(self) if sparse: self.solver = LinealSparse(self) else: self.solver = solver(self) if self.solver.type == 'non-lineal-newton': self.T = np.zeros([self.ngdl, self.ngdl]) elif self.solver.type == "Base": logging.error("Base solver should not be used.") raise Exception("Base solver should not be used.") self.properties = {'verbose': self.verbose, 'name': name, 'problem': self.__class__.__name__} def description(self): """Generates the problem description for loggin porpuses """ return f'FEM problem using the {self.name} formulation.,DOF: {self.ngdl},Elements: {len(self.elements)},Solver: {self.solver.type},EBC: {len(self.cbe)},NBC: {len(self.cbn)}' def ensembling(self) -> None: """Ensembling of equation system. This method use the element gdl and the element matrices. The element matrices degrees of fredom must match the dimension of the element gdl. For m>1 variables per node, the gdl will be flattened. This ensure that the element matrices will always be a 2-D Numpy Array. """ logging.info('Ensembling equation system...') for e in self.elements: self.K[np.ix_(e.gdlm, e.gdlm)] += e.Ke self.F[np.ix_(e.gdlm)] += e.Fe self.Q[np.ix_(e.gdlm)] += e.Qe if 'newton' in self.solver.type: try: self.T[np.ix_(e.gdlm, e.gdlm)] += e.Te except Exception as e: logging.error( "Impossible to access tangent matrix. Check tangent matrix creation in integration class.") raise e logging.info('Done!') def restartMatrix(self) -> None: """Sets all model matrices and vectors to 0 state """ self.K[:, :] = 0.0 self.F[:, :] = 0.0 self.Q[:, :] = 0.0 self.S[:, :] = 0.0 if 'newton' in self.solver.type: try: self.T[:, :] = 0.0 except Exception as e: logging.error("Impossible to clear tangent matrix.") raise e def borderConditions(self) -> None: """Assign border conditions to the system. The border conditios are assigned in this order: 1. Natural border conditions 2. Essential border conditions This ensures that in a node with 2 border conditions the essential border conditions will be applied. """ logging.info('Border conditions...') for i in tqdm(self.cbn, unit=' Natural'): self.Q[int(i[0])] = i[1] if self.cbe: border_conditions = np.zeros([self.ngdl, 1]) cb = np.array(self.cbe) ncb = len(cb) border_conditions[np.ix_(cb[:, 0].astype(int)) ] = cb[:, 1].reshape([ncb, 1]) # FIXME esto puede resultar en pasar la matriz dwe lil a densa!! self.S = self.S - ([email protected]).T for i in tqdm(self.cbe, unit=' Essential'): self.K[int(i[0]), :] = 0 self.K[:, int(i[0])] = 0 self.K[int(i[0]), int(i[0])] = 1 if 'newton' in self.solver.type: try: self.T[int(i[0]), :] = 0 self.T[:, int(i[0])] = 0 self.T[int(i[0]), int(i[0])] = 1 except Exception as e: logging.error("Impossible to access tangent matrix.") raise e self.S = self.S + self.F + self.Q for i in self.cbe: self.S[int(i[0])] = i[1] logging.info('Done!') def condensedSystem(self) -> None: """Assign border conditions to the system and modifies the matrices to condensed mode The border conditios are assigned in this order: 1. Natural border conditions 2. Essential border conditions This ensures that in a node with 2 border conditions the essential border conditions will be applied. """ # FIXME tiene que funcionar para todos los casos # self.borderConditions() logging.info('Border conditions...') for i in tqdm(self.cbn, unit=' Natural'): self.Q[int(i[0])] = i[1] if self.cbe: border_conditions = np.zeros([self.ngdl, 1]) cb = np.array(self.cbe) ncb = len(cb) border_conditions[np.ix_(cb[:, 0].astype(int)) ] = cb[:, 1].reshape([ncb, 1]) # FIXME esto puede resultar en pasar la matriz dwe lil a densa!! self.S = self.S - ([email protected]).T for i in tqdm(self.cbe, unit=' Essential'): self.K[int(i[0]), :] = 0 self.K[:, int(i[0])] = 0 self.K[int(i[0]), int(i[0])] = 1 if self.calculateMass: self.M[int(i[0]), :] = 0 self.M[:, int(i[0])] = 0 self.M[int(i[0]), int(i[0])] = 1 if 'newton' in self.solver.type: try: self.T[int(i[0]), :] = 0 self.T[:, int(i[0])] = 0 self.T[int(i[0]), int(i[0])] = 1 except Exception as e: logging.error("Impossible to access tangent matrix.") raise e self.S = self.S + self.F + self.Q for i in self.cbe: self.S[int(i[0])] = i[1] logging.info('Done!') def solveES(self, **kargs) -> None: """Solve the finite element problem """ self.solver.run(**kargs) def solve(self, plot: bool = True, **kargs) -> None: """A series of Finite Element steps Args: plot (bool, optional): To post process the solution. Defaults to True. **kargs: Solver specific parameters. """ desc = self.description().split(',') for d in desc: logging.debug(d) self.solveES(**kargs) if plot: logging.info('Post processing solution...') self.postProcess(**kargs) logging.info('Done!') duration = self.logger.end_timer().total_seconds() self.properties['duration'] = duration logging.info("End!") def solveFromFile(self, file: str, plot: bool = True, **kargs) -> None: """Load a solution file and show the post process for a given geometry Args: file (str): Path to the previously generated solution file. plot (bool, optional): To post process the solution. Defaults to True. """ logging.info('Loading File...') self.U = np.loadtxt(file) for e in self.elements: e.setUe(self.U) logging.info('Done!') if plot: logging.info('Post processing solution...') self.postProcess(**kargs) logging.info('Done!') def solveFromArray(self, solution: np.ndarray, plot: bool = True, **kargs) -> None: """Load a solution array to the problem. Args: solution (np.ndarray): Solution vertical array with shape (self.ngdl,1) plot (bool, optional): To post process the solution. Defaults to True. """ logging.info('Casting solution') self.U = solution for e in self.elements: e.setUe(self.U) logging.info('Done!') if plot: logging.info('Post processing solution...') self.postProcess(**kargs) logging.info('Done!') def profile(self) -> None: """Create a profile for a 3D or 2D problem. """ pass def elementMatrices(self) -> None: """Calculate the element matrices """ pass def postProcess(self) -> None: """Post process the solution """ pass def exportJSON(self, filename: str = None): self.properties['description'] = self.description() if not filename: filename = __name__ y = self.geometry.exportJSON() pjson = json.loads(y) pjson["solutions"] = [] # Avoid Numpy array conversion for info, sol in zip(self.solver.solutions_info, np.array(self.solver.solutions).tolist()): pjson["solutions"].append({'info': info, "U": sol}) pjson['properties'] = self.properties y = json.dumps(pjson) with open(filename, "w") as f: f.write(y)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Core.py
Core.py
from .Core import Core, Geometry from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt from typing import Callable class EDO1D(Core): """Create a 1D 1 variable per node Finite Element problem The differential equation is: .. math:: a(x)\\frac{d^2u}{dx^2}+c(x)u=f(x) Args: geometry (Geometry): 1D Geometry of the problem. Use the Mesh.Lineal class a (function): Function a, if a is constant you can use a = lambda x: [value] c (function): Function c, if c is constant you can use c = lambda x: [value] f (function): Function f, if f is constant you can use f = lambda x: [value] """ def __init__(self, geometry: Geometry, a: Callable, c: Callable, f: Callable) -> None: """Create a 1D 1 variable per node Finite Element problem The differential equation is: .. math:: a(x)\\frac{d^2u}{dx^2}+c(x)u=f(x) Args: geometry (Geometry): 1D Geometry of the problem. Use the Mesh.Lineal class a (function): Function a, if a is constant you can use a = lambda x: [value] c (function): Function c, if c is constant you can use c = lambda x: [value] f (function): Function f, if f is constant you can use f = lambda x: [value] """ self.a = a self.c = c self.f = f Core.__init__(self, geometry) self.name = 'Generic 1D second order diferential equation' self.properties['a'] = None self.properties['b'] = None self.properties['c'] = None self.properties['WARNING'] = "It's not possible lo save callables" def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model. Element matrices and forces are calculated with Gauss-Legendre quadrature. Point number depends of element discretization. """ for e in tqdm(self.elements, unit='Element'): # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates for i in range(e.n): # self part must be vectorized for j in range(e.n): for k in range(len(e.Z)): # Iterate over gauss points on domain e.Ke[i, j] += (self.a(_x[k])*dpx[k][0][i]*dpx[k][0] [j] + self.c(_x[k])*_p[k][i]*_p[k][j])*detjac[k]*e.W[k] for k in range(len(e.Z)): # Iterate over gauss points on domain e.Fe[i][0] += self.f(_x[k])*_p[k][i]*detjac[k]*e.W[k] # e.Fe[:,0] = 2*self.G*self._phi*detjac@_p # e.Ke = (np.transpose(dpx,axes=[0,2,1]) @ dpx).T @ detjac def postProcess(self) -> None: """Generate graph of solution and solution derivative """ X = [] U1 = [] U2 = [] fig = plt.figure() ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() U1 += _u[0].tolist() U2 += (du[:, 0, 0]).tolist() ax1.plot(X, U1) ax2.plot(X, np.array(U2)) ax1.grid() ax2.grid() ax1.set_title(r'$U(x)$') ax2.set_title(r'$\frac{dU}{dx}$')
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/EDO1D.py
EDO1D.py
from .Core import Core, Geometry from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt class Poisson2D(Core): """Create a Poisson2D finite element problem The differential equation is: .. math:: -\\nabla^2\\Psi=\\theta Args: geometry (Geometry): 2D 1 variable per node geometry phi (float): Value """ def __init__(self, geometry: Geometry, phi: float) -> None: """Create a Poisson2D finite element problem The differential equation is: .. math:: -\\nabla^2\\Psi=\\theta Args: geometry (Geometry): 2D 1 variable per node geometry phi (float): Value """ self._phi = phi Core.__init__(self, geometry) self.name = '2D Poisson equation' self.properties['_phi'] = self._phi def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac)*e.W _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates # for k in range(len(_x)): #Iterate over gauss points on domain # for i in range(e.n): #self part must be vectorized # for j in range(e.n): # e.Ke[i,j] += (dpx[k][0][i]*dpx[k][0][j] + dpx[k][1][i]*dpx[k][1][j])*detjac[k]*e.W[k] # e.Fe[i][0] += 2*self.G*self._phi*_p[k][i]*detjac[k]*e.W[k] e.Fe[:, 0] = self._phi*detjac@_p e.Ke = (np.transpose(dpx, axes=[0, 2, 1]) @ dpx).T @ detjac def postProcess(self, levels=30, derivatives=True) -> None: """Create graphs for solution and derivatives. """ X = [] Y = [] U1 = [] U2 = [] U3 = [] U4 = [] if derivatives: fig = plt.figure() ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U2 += (-du[:, 0, 0]).tolist() U3 += du[:, 0, 1].tolist() U1 += _u[0].tolist() U4 += np.sqrt(du[:, 0, 0]**2 + du[:, 0, 1]**2).tolist() surf = ax1.tricontourf(X, Y, U1, cmap='rainbow', levels=levels) fig.colorbar(surf, ax=ax1) ax1.set_title(r'$U$') surf = ax2.tricontourf(X, Y, U2, cmap='rainbow', levels=levels) ax2.set_title(r'$\frac{\partial U}{\partial x}$') fig.colorbar(surf, ax=ax2) surf = ax3.tricontourf(X, Y, U3, cmap='rainbow', levels=levels) ax3.set_title(r'$\frac{\partial U}{\partial y}$') fig.colorbar(surf, ax=ax3) surf = ax4.tricontourf(X, Y, U4, cmap='rainbow', levels=levels) ax4.set_title( r'$\sqrt{\left(\frac{\partial U}{\partial x}\right)^2+\left(\frac{\partial U}{\partial y}\right)^2}$') fig.colorbar(surf, ax=ax4) mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) ax4.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) ax4.fill(Xs, Ys, color='white', zorder=30) ax4.set_aspect('equal') ax1.set_aspect('equal') ax2.set_aspect('equal') ax3.set_aspect('equal') else: fig = plt.figure() ax1 = fig.add_subplot(1, 1, 1) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U1 += _u[0].tolist() surf = ax1.tricontourf(X, Y, U1, cmap='rainbow', levels=levels) fig.colorbar(surf, ax=ax1) mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax1.fill(Xs, Ys, color='white', zorder=30) if not mask == None: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Poisson2D.py
Poisson2D.py
from .Core import Core, tqdm, np, Geometry import matplotlib.pyplot as plt class Heat1D(Core): """Creates a 1D Stady state heat problem. Convective border conditions can be applied The differential equation is: .. math:: -\\frac{d}{dx}\\left(Ak\\frac{dT}{dx}\\right)+\\beta P(T-T_{\\infty})=0 Args: geometry (Geometry): Input 1D Geometry. 1 variable per node A (float or list): Section area. If float, all elements will have the same area. If list, the i-element will have the i-area P (float or list): Section perimeter. If float, all elements will have the same perimeter. If list, the i-element will have the i-perimeter k (float or list): Conductivity. If float, all elements will have the same conductivity. If list, the i-element will have the i-conductivity beta (float or list): Transfer coeficient. If float, all elements will have the same transfer coeficient. If list, the i-element will have the i-transfer coeficient Ta (float): Ambient temperature (also called T∞) q (float or list, optional): Internal heat generation rate. If float, all elements will have the same internal heat generation rate coeficient. If list, the i-element will have the i-internal heat generation rate. Defaults to 0.0. """ def __init__(self, geometry: Geometry, A: float, P: float, ku: float, beta: float, Ta: float, q: float = 0.0) -> None: """Creates a 1D Stady state heat problem. Convective border conditions can be applied The differential equation is: .. math:: -\\frac{d}{dx}\\left(Ak\\frac{dT}{dx}\\right)+\\beta P(T-T_{\\infty}) Args: geometry (Geometry): Input 1D Geometry. 1 variable per node A (float or list): Section area. If float, all elements will have the same area. If list, the i-element will have the i-area P (float or list): Section perimeter. If float, all elements will have the same perimeter. If list, the i-element will have the i-perimeter ku (float or list): Conductivity. If float, all elements will have the same conductivity. If list, the i-element will have the i-conductivity beta (float or list): Transfer coeficient. If float, all elements will have the same transfer coeficient. If list, the i-element will have the i-transfer coeficient Ta (float): Ambient temperature q (float or list, optional): Internal heat generation rate. If float, all elements will have the same internal heat generation rate coeficient. If list, the i-element will have the i-internal heat generation rate. Defaults to 0.0. """ if isinstance(A, float) or isinstance(A, int): A = [A]*len(geometry.elements) if isinstance(P, float) or isinstance(P, int): P = [P]*len(geometry.elements) if isinstance(ku, float) or isinstance(ku, int): ku = [ku]*len(geometry.elements) if isinstance(beta, float) or isinstance(beta, int): beta = [beta]*len(geometry.elements) if isinstance(q, float) or isinstance(q, int): q = [q]*len(geometry.elements) self.A = A self.P = P self.ku = ku self.beta = beta self.Ta = Ta self.q = q Core.__init__(self, geometry) self.name = '1D Heat transfer' self.properties['A'] = self.A self.properties['P'] = self.P self.properties['ku'] = self.ku self.properties['beta'] = self.beta self.properties['Ta'] = self.Ta self.properties['q'] = self.q def elementMatrices(self) -> None: """Calculate the element matrices using Gauss Legendre quadrature. """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) K = np.zeros([m, m]) F = np.zeros([m, 1]) _x, _p = e.T(e.Z.T) jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) dpx = _j @ dpz for i in range(m): for j in range(m): for k in range(len(e.Z)): K[i, j] += (self.ku[ee]*self.A[ee]*dpx[k][0][i]*dpx[k][0] [j]+self.beta[ee]*self.P[ee]*_p[k][i]*_p[k][j])*detjac[k]*e.W[k] for k in range(len(e.Z)): F[i][0] += (_p[k][i]*(self.A[ee]*self.q[ee] + self.P[ee]*self.beta[ee]*self.Ta))*detjac[k]*e.W[k] e.Fe += F e.Ke += K def defineConvectiveBoderConditions(self, node: int, value: float = 0) -> None: """Add a convective border condition. The value is: :math:`kA\\frac{dT}{dx}+\\beta A(T-T_{\infty})=value` Args: node (int): Node where the above border condition is applied value (float, optional): Defined below. Defaults to 0. """ near = np.infty for i, e in enumerate(self.elements): act = min(abs(self.geometry.gdls[node][0] - e._coords[0]), abs(self.geometry.gdls[node][0] - e._coords[0])) if act < near: near = act k = i if act == 0: break self.cbn += [[node, value+self.Ta*self.beta[k]*self.A[k]]] self.K[node, node] += self.beta[k]*self.A[k] def postProcess(self) -> None: """Generate graph of solution and solution derivative """ X = [] U1 = [] fig = plt.figure() ax1 = fig.add_subplot(1, 1, 1) for e in tqdm(self.elements, unit='Element'): _x, _u = e.giveSolution(False) X += _x.T[0].tolist() U1 += _u[0].tolist() ax1.plot(X, U1) ax1.grid() ax1.set_title(r'$T(x)$') plt.show()
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Heat1D.py
Heat1D.py
from typing import Callable, Tuple import matplotlib.pyplot as plt import numpy as np from matplotlib import gridspec from tqdm import tqdm from scipy import sparse from .Solvers import LinealSparse from .Core import Core, Geometry, logging class PlaneStressOrthotropic(Core): """Creates a plane stress problem with orthotropic formulation Args: geometry (Geometry): Input geometry E1 (Tuple[float, list]): Young moduli in direction 1 (x) E2 (Tuple[float, list]): Young moduli in direction 2 (y) G12 (Tuple[float, list]): Shear moduli v12 (Tuple[float, list]): Poisson moduli t (Tuple[float, list]): Thickness rho (Tuple[float, list], optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. """ def __init__(self, geometry: Geometry, E1: Tuple[float, list], E2: Tuple[float, list], G12: Tuple[float, list], v12: Tuple[float, list], t: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Creates a plane stress problem with orthotropic formulation Args: geometry (Geometry): Input geometry E1 (Tuple[float, list]): Young moduli in direction 1 (x) E2 (Tuple[float, list]): Young moduli in direction 2 (y) G12 (Tuple[float, list]): Shear moduli v12 (Tuple[float, list]): Poisson moduli t (Tuple[float, list]): Thickness rho (Tuple[float, list], optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. """ if isinstance(t, float) or isinstance(t, int): t = [t]*len(geometry.elements) if isinstance(E1, float) or isinstance(E1, int): E1 = [E1]*len(geometry.elements) if isinstance(E2, float) or isinstance(E2, int): E2 = [E2]*len(geometry.elements) if isinstance(G12, float) or isinstance(G12, int): G12 = [G12]*len(geometry.elements) if isinstance(v12, float) or isinstance(v12, int): v12 = [v12]*len(geometry.elements) self.calculateMass = False self.rho = None if rho: if isinstance(rho, int) or isinstance(rho, float): self.rho = [rho]*len(geometry.elements) self.calculateMass = True self.t = t self.E1 = E1 self.E2 = E2 self.G12 = G12 self.v12 = v12 self.v21 = [] self.C11 = [] self.C22 = [] self.C12 = [] self.C66 = [] self.fx = fx self.fy = fy for i in range(len(self.E1)): v21 = self.v12[i]*self.E2[i]/self.E1[i] self.v21.append(v21) C11 = self.E1[i] / (1 - self.v12[i]*self.v21[i]) C22 = self.E2[i] / (1 - self.v12[i]*self.v21[i]) C12 = self.v21[i] * C11 C66 = G12[i] self.C11.append(C11) self.C22.append(C22) self.C12.append(C12) self.C66.append(C66) if not geometry.nvn == 2: logging.warning( 'Border conditions lost, please usea a geometry with 2 variables per node (nvn=2)\nRegenerating Geoemtry...') geometry.nvn = 2 geometry.cbe = [] geometry.cbn = [] geometry.initialize() Core.__init__(self, geometry, **kargs) self.name = 'Plane Stress Orthotropic' self.properties['E1'] = self.E1 self.properties['E2'] = self.E2 self.properties['G12'] = self.G12 self.properties['v12'] = self.v12 self.properties['fx'] = None self.properties['fy'] = None self.properties['t'] = self.t self.properties['rho'] = self.rho self.properties['calculateMass'] = self.calculateMass def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) Fux = np.zeros([m, 1]) Fvx = np.zeros([m, 1]) # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates c11 = self.C11[ee] c12 = self.C12[ee] c22 = self.C22[ee] c66 = self.C66[ee] C = np.array([ [c11, c12, 0.0], [c12, c22, 0.0], [0.0, 0.0, c66]]) Fe = np.zeros([2*m, 1]) Ke = np.zeros([2*m, 2*m]) if self.calculateMass: Me = np.zeros([2*m, 2*m]) o = [0.0]*m for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) P = np.array([ [*_p[k], *o], [*o, *_p[k]]]) Ke += self.t[ee]*(B.T@C@B)*detjac[k]*e.W[k] if self.calculateMass: Me += self.rho[ee]*self.t[ee]*(P.T@P)*detjac[k]*e.W[k] Fe += self.t[ee]*([email protected]([[self.fx(_x[k])], [self.fy(_x[k])]]))*detjac[k]*e.W[k] if e.intBorders: # Cambiar esto a la notación matricial for j in range(len(e.borders)): border = e.borders[j] if (len(border.properties['load_x']) + len(border.properties['load_y'])): _x, _p = e.T(e.Tj[j](border.Z.T)) _s = border.TS(border.Z.T) detjac = border.coords[-1, 0]*0.5 for i in range(m): for fx in border.properties['load_x']: for k in range(len(border.Z)): Fux[i, 0] += fx(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] for fy in border.properties['load_y']: for k in range(len(border.Z)): Fvx[i, 0] += fy(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] subm = np.linspace(0, 2*m-1, 2*m).reshape([2, m]).astype(int) e.Fe = Fe e.Ke = Ke if self.calculateMass: e.Me = Me e.Fe[np.ix_(subm[0])] += Fux e.Fe[np.ix_(subm[1])] += Fvx def postProcess(self, mult: float = 1000, gs=None, levels=1000, **kargs) -> None: """Generate the stress surfaces and displacement fields for the geometry Args: mult (int, optional): Factor for displacements. Defaults to 1000. gs (list, optional): List of 4 gridSpec matplotlib objects. Defaults to None. """ X = [] Y = [] U1 = [] U2 = [] U3 = [] fig = plt.figure() if not gs: gss = gridspec.GridSpec(3, 3) gs = [gss[0, 0], gss[0, 1], gss[0, 2], gss[1:, :]] ax1 = fig.add_subplot(gs[0]) ax2 = fig.add_subplot(gs[1]) ax3 = fig.add_subplot(gs[2]) ax5 = fig.add_subplot(gs[3]) ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U1 += (self.C11[ee]*du[:, 0, 0]+self.C12[ee]*du[:, 1, 1]).tolist() U2 += (self.C12[ee]*du[:, 0, 0]+self.C22[ee]*du[:, 1, 1]).tolist() U3 += (self.C66[ee]*(du[:, 0, 1]+du[:, 1, 0])).tolist() coordsNuevas = e._coordsg + e._Ueg * mult ax5.plot(*e._coordsg.T, '--', color='gray', alpha=0.7) ax5.plot(*coordsNuevas.T, '-', color='black') ax5.legend(['Original Shape', 'Deformed Shape (x'+format(mult)+')']) ax5.set_aspect('equal') ax1.set_aspect('equal') ax3.set_aspect('equal') ax2.set_aspect('equal') cmap = 'rainbow' surf = ax1.tricontourf(X, Y, U1, cmap=cmap, levels=levels, **kargs) plt.colorbar(surf, ax=ax1) ax1.set_title(r'$\sigma_{xx}$') surf = ax2.tricontourf(X, Y, U2, cmap=cmap, levels=levels, **kargs) plt.colorbar(surf, ax=ax2) ax2.set_title(r'$\sigma_{yy}$') surf = ax3.tricontourf(X, Y, U3, cmap=cmap, levels=levels, **kargs) plt.colorbar(surf, ax=ax3) ax3.set_title(r'$\sigma_{xy}$') mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) def giveStressPoint(self, X: np.ndarray) -> Tuple[tuple, None]: """Calculates the stress in a given set of points. Args: X (np.ndarray): Points to calculate the Stress. 2D Matrix. with 2 rows. First row is an array of 1 column with X coordinate. Second row is an array of 1 column with Y coordinate Returns: tuple or None: Tuple of stress (:math:`\sigma_x,\sigma_y,\sigma_{xy}`) if X,Y exists in domain. """ for ee, e in enumerate(self.elements): if e.isInside(X.T[0]): z = e.inverseMapping(np.array([X.T[0]]).T) _, _, du = e.giveSolutionPoint(z, True) sx = (self.C11[ee]*du[:, 0, 0] + self.C12[ee]*du[:, 1, 1]).tolist() sy = (self.C12[ee]*du[:, 0, 0] + self.C22[ee]*du[:, 1, 1]).tolist() sxy = (self.C66[ee]*(du[:, 0, 1]+du[:, 1, 0])).tolist() return sx, sy, sxy def profile(self, p0: list, p1: list, n: float = 100) -> None: """ Generate a profile between selected points Args: p0 (list): start point of the profile [x0,y0] p1 (list): end point of the profile [xf,yf] n (int, optional): NUmber of samples for graph. Defaults to 100. """ _x = np.linspace(p0[0], p1[0], n) _y = np.linspace(p0[1], p1[1], n) X = np.array([_x, _y]) U = [] U1 = [] U2 = [] U3 = [] _X = [] def dist(X): return np.sqrt((p0[0]-X[0])**2+(p0[1]-X[1])**2) for i in range(n): for ee, e in enumerate(self.elements): if e.isInside(X.T[i]): z = e.inverseMapping(np.array([X.T[i]]).T) _, u, du = e.giveSolutionPoint(z, True) U += [u.tolist()] U1 += (self.C11[ee]*du[:, 0, 0] + self.C12[ee]*du[:, 1, 1]).tolist() U2 += (self.C12[ee]*du[:, 0, 0] + self.C22[ee]*du[:, 1, 1]).tolist() U3 += (self.C66[ee]*(du[:, 0, 1]+du[:, 1, 0])).tolist() _X.append(dist(X.T[i])) break fig = plt.figure() ax = fig.add_subplot(1, 3, 1) ax.plot(_X, U1, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\sigma_{xx}$') ax = fig.add_subplot(1, 3, 2) ax.plot(_X, U2, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\sigma_{yy}$') ax = fig.add_subplot(1, 3, 3) ax.plot(_X, U3, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\sigma_{xy}$') return _X, U class PlaneStressOrthotropicSparse(PlaneStressOrthotropic): """Creates a plane stress problem with orthotropic formulation and sparce matrices Args: geometry (Geometry): Input geometry E1 (Tuple[float, list]): Young moduli in direction 1 (x) E2 (Tuple[float, list]): Young moduli in direction 2 (y) G12 (Tuple[float, list]): Shear moduli v12 (Tuple[float, list]): Poisson moduli t (Tuple[float, list]): Thickness rho (Tuple[float, list], optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. """ def __init__(self, geometry: Geometry, E1: Tuple[float, list], E2: Tuple[float, list], G12: Tuple[float, list], v12: Tuple[float, list], t: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Creates a plane stress problem with orthotropic formulation and sparce matrices Args: geometry (Geometry): Input geometry E1 (Tuple[float, list]): Young moduli in direction 1 (x) E2 (Tuple[float, list]): Young moduli in direction 2 (y) G12 (Tuple[float, list]): Shear moduli v12 (Tuple[float, list]): Poisson moduli t (Tuple[float, list]): Thickness rho (Tuple[float, list], optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (Callable, optional): Force in x direction. Defaults to lambdax:0. fy (Callable, optional): Force in y direction. Defaults to lambdax:0. """ if not geometry.nvn == 2: logging.warning( 'Border conditions lost, please usea a geometry with 2 variables per node (nvn=2)\nRegenerating Geoemtry...') geometry.nvn = 2 geometry.cbe = [] geometry.cbn = [] geometry.initialize() if not geometry.fast: logging.warning("Use fast elements") geometry.fast = True geometry.initialize() PlaneStressOrthotropic.__init__( self, geometry, E1, E2, G12, v12, t, rho, fx, fy, sparse=True, **kargs) self.K = sparse.lil_matrix((self.ngdl, self.ngdl)) if self.calculateMass: self.M = sparse.lil_matrix((self.ngdl, self.ngdl)) self.name = 'Plane Stress Orthotropic sparse' def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) Fux = np.zeros([m, 1]) Fvx = np.zeros([m, 1]) # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates c11 = self.C11[ee] c12 = self.C12[ee] c22 = self.C22[ee] c66 = self.C66[ee] C = np.array([ [c11, c12, 0.0], [c12, c22, 0.0], [0.0, 0.0, c66]]) Fe = np.zeros([2*m, 1]) Ke = np.zeros([2*m, 2*m]) if self.calculateMass: Me = np.zeros([2*m, 2*m]) o = [0.0]*m for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) P = np.array([ [*_p[k], *o], [*o, *_p[k]]]) Ke += self.t[ee]*(B.T@C@B)*detjac[k]*e.W[k] if self.calculateMass: Me += self.rho[ee]*self.t[ee]*(P.T@P)*detjac[k]*e.W[k] Fe += self.t[ee]*([email protected]([[self.fx(_x[k])], [self.fy(_x[k])]]))*detjac[k]*e.W[k] if e.intBorders: # Cambiar esto a la notación matricial for j in range(len(e.borders)): border = e.borders[j] if (len(border.properties['load_x']) + len(border.properties['load_y'])): _x, _p = e.T(e.Tj[j](border.Z.T)) _s = border.TS(border.Z.T) detjac = border.coords[-1, 0]*0.5 for i in range(m): for fx in border.properties['load_x']: for k in range(len(border.Z)): Fux[i, 0] += fx(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] for fy in border.properties['load_y']: for k in range(len(border.Z)): Fvx[i, 0] += fy(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] subm = np.linspace(0, 2*m-1, 2*m).reshape([2, m]).astype(int) Fe[np.ix_(subm[0])] += Fux Fe[np.ix_(subm[1])] += Fvx self.F[np.ix_(e.gdlm)] += Fe self.K[np.ix_(e.gdlm, e.gdlm)] += Ke if self.calculateMass: self.M[np.ix_(e.gdlm, e.gdlm)] += Me def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method""" logging.info('Ensembling equation system...') if self.calculateMass: self.M = self.M.tocsr() logging.info('Done!') class PlaneStress(PlaneStressOrthotropic): """Create a Plain Stress problem Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], t: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Create a Plain Stress problem Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ G = E/2.0/(1.0+v) PlaneStressOrthotropic.__init__( self, geometry, E, E, G, v, t, rho, fx, fy, **kargs) self.name = 'Plane Stress Isotropic' class PlaneStressSparse(PlaneStressOrthotropicSparse): """Create a Plain Stress problem using sparse matrices Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], t: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Create a Plain Stress problem using sparse matrices Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ G = np.array(E)/2.0/(1.0+np.array(v)) PlaneStressOrthotropicSparse.__init__( self, geometry, E, E, G.tolist(), v, t, rho, fx, fy, **kargs) self.name = 'Plane Stress Isotropic sparse' class PlaneStrain(PlaneStress): """Create a Plain Strain problem Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Create a Plain Strain problem Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ PlaneStress.__init__( self, geometry, E, v, 1, rho, fx, fy, **kargs) self.C11 = [] self.C22 = [] self.C12 = [] self.C66 = [] for i in range(len(self.geometry.elements)): C11 = self.E1[i]*(1-self.v12[i])/(1+self.v12[i])/(1-2*self.v12[i]) C12 = self.E1[i]*(self.v12[i])/(1+self.v12[i])/(1-2*self.v12[i]) C66 = self.E1[i] / 2 / (1 + self.v12[i]) self.C11.append(C11) self.C22.append(C11) self.C12.append(C12) self.C66.append(C66) self.name = 'Plane Strain Isotropic' class PlaneStrainSparse(PlaneStressSparse): """Create a Plain Strain problem using sparse matrix Args: geometry (Geometry): 2D 2 variables per node geometry with fast elements E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Create a Plain Strain problem using sparse matrix Args: geometry (Geometry): 2D 2 variables per node geometry with fast elements E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ PlaneStressSparse.__init__( self, geometry, E, v, 1, rho, fx, fy, **kargs) self.C11 = [] self.C22 = [] self.C12 = [] self.C66 = [] for i in range(len(self.geometry.elements)): C11 = self.E1[i]*(1-self.v12[i])/(1+self.v12[i])/(1-2*self.v12[i]) C12 = self.E1[i]*(self.v12[i])/(1+self.v12[i])/(1-2*self.v12[i]) C66 = self.E1[i] / 2 / (1 + self.v12[i]) self.C11.append(C11) self.C22.append(C11) self.C12.append(C12) self.C66.append(C66) self.name = 'Plane Strain Isotropic sparse' class PlaneStressNonLocalSparse(PlaneStressSparse): """Create a Plain Stress nonlocal problem using sparse matrices and the Pisano 2006 formulation. Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) l (float): Internal lenght z1 (float): z1 factor Lr (float): Influence distance Lr af (Callable): Atenuation function rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], t: Tuple[float, list], l: float, z1: float, Lr: float, af: Callable, rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, notCalculateNonLocal=True, **kargs) -> None: """Create a Plain Stress nonlocal problem using sparse matrices and the Pisano 2006 formulation. Args: geometry (Geometry): 2D 2 variables per node geometry E (int or float or list): Young Moduli. If number, all element will have the same young moduli. If list, each position will be the element young moduli, so len(E) == len(self.elements) v (int or float or list): Poisson ratio. If number, all element will have the same Poisson ratio. If list, each position will be the element Poisson ratio, so len(v) == len(self.elements) t (int or float or list): Element thickness. If number, all element will have the same thickness. If list, each position will be the element thickness, so len(t) == len(self.elements) l (float): Internal lenght z1 (float): z1 factor Lr (float): Influence distance Lr af (Callable): Atenuation function rho (int or float or list, optional): Density. If not given, mass matrix will not be calculated. Defaults to None. fx (function, optional): Function fx, if fx is constant you can use fx = lambda x: [value]. Defaults to lambda x:0. fy (function, optional): Function fy, if fy is constant you can use fy = lambda x: [value]. Defaults to lambda x:0. """ self.l = l self.Lr = Lr self.af = af self.z1 = z1 self.z2 = 1.0-self.z1 PlaneStressSparse.__init__( self, geometry, E, v, t, rho, fx, fy, **kargs) self.properties['l'] = self.l self.properties['Lr'] = self.Lr self.properties['af'] = None self.properties['z1'] = self.z1 self.properties['z2'] = self.z2 if notCalculateNonLocal: nonlocals = self.geometry.detectNonLocal(Lr) for e, dno in zip(self.elements, nonlocals): e.enl = dno self.name = 'Plane Stress Isotropic non local sparse' self.KL = sparse.lil_matrix((self.ngdl, self.ngdl)) self.KNL = sparse.lil_matrix((self.ngdl, self.ngdl)) if self.calculateMass: self.M = sparse.lil_matrix((self.ngdl, self.ngdl)) def elementMatrices(self) -> None: """Calculate the elements matrices """ for ee in tqdm(range(len(self.elements)), unit='Local'): self.elementMatrix(ee) def elementMatrix(self, ee: 'Element') -> None: """Calculates a single element local and nonlocal matrices Args: ee (Element): Element to be calculated """ e = self.elements[ee] m = len(e.gdl.T) Fux = np.zeros([m, 1]) Fvx = np.zeros([m, 1]) _x, _p = e._x, e._p detjac = e.detjac dpx = e.dpx c11 = self.C11[ee] c12 = self.C12[ee] c22 = self.C22[ee] c66 = self.C66[ee] C = np.array([ [c11, c12, 0.0], [c12, c22, 0.0], [0.0, 0.0, c66]]) Fe = np.zeros([2*m, 1]) Ke = np.zeros([2*m, 2*m]) if self.calculateMass: Me = np.zeros([2*m, 2*m]) o = [0.0]*m for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) P = np.array([ [*_p[k], *o], [*o, *_p[k]]]) Ke += self.t[ee]*(B.T@C@B)*detjac[k]*e.W[k] if self.calculateMass: Me += self.rho[ee]*self.t[ee]*(P.T@P)*detjac[k]*e.W[k] Fe += self.t[ee]*([email protected]([[self.fx(_x[k])], [self.fy(_x[k])]]))*detjac[k]*e.W[k] if e.intBorders: # Cambiar esto a la notación matricial for j in range(len(e.borders)): border = e.borders[j] if (len(border.properties['load_x']) + len(border.properties['load_y'])): _x, _p = e.T(e.Tj[j](border.Z.T)) _s = border.TS(border.Z.T) detjac = border.coords[-1, 0]*0.5 for i in range(m): for fx in border.properties['load_x']: for k in range(len(border.Z)): Fux[i, 0] += fx(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] for fy in border.properties['load_y']: for k in range(len(border.Z)): Fvx[i, 0] += fy(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] subm = np.linspace(0, 2*m-1, 2*m).reshape([2, m]).astype(int) Fe[np.ix_(subm[0])] += Fux Fe[np.ix_(subm[1])] += Fvx self.F[np.ix_(e.gdlm)] += Fe self.KL[np.ix_(e.gdlm, e.gdlm)] += Ke if self.calculateMass: self.M[np.ix_(e.gdlm, e.gdlm)] += Me # e.knls = [] for inl in tqdm(e.enl, unit=' Nolocal'): enl = self.elements[inl] mnl = len(enl.gdl.T) onl = [0.0]*mnl Knl = np.zeros([2*m, 2*mnl]) _xnl = enl._x detjacnl = enl.detjac dpxnl = enl.dpx for k in range(len(e.Z)): B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) for knl in range(len(enl.Z)): ro = np.linalg.norm(_x[k]-_xnl[knl])/self.l azn = self.af(ro) Bnl = np.array([ [*dpxnl[knl, 0, :], *onl], [*onl, *dpxnl[knl, 1, :]], [*dpxnl[knl, 1, :], *dpxnl[knl, 0, :]]]) Knl += self.t[ee]*self.t[inl]*azn*(Bnl.T@C@B)*detjac[k] * \ e.W[k]*detjacnl[knl]*enl.W[knl] # e.knls.append(Knl) self.KNL[np.ix_(e.gdlm, enl.gdlm)] += Knl.T def profile(self, p0: list, p1: list, n: float = 100, plot=True) -> None: """Generate a profile between selected points Args: p0 (list): start point of the profile [x0,y0] p1 (list): end point of the profile [xf,yf] n (int, optional): NUmber of samples for graph. Defaults to 100. """ _x = np.linspace(p0[0], p1[0], n) _y = np.linspace(p0[1], p1[1], n) X = np.array([_x, _y]) U = [] U1 = [] U2 = [] U3 = [] _X = [] def dist(X): return np.sqrt((p0[0]-X[0])**2+(p0[1]-X[1])**2) for i in range(n): for _, e in enumerate(self.elements): if e.isInside(X.T[i]): z = e.inverseMapping(np.array([X.T[i]]).T) _, u, du = e.giveSolutionPoint(z, True) U += [u.tolist()] U1 += (du[:, 0, 0]).tolist() U2 += (du[:, 1, 1]).tolist() U3 += (1/2*(du[:, 0, 1]+du[:, 1, 0])).tolist() _X.append(dist(X.T[i])) break if plot: fig = plt.figure() ax = fig.add_subplot(1, 3, 1) ax.plot(_X, U1, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{xx}$') ax = fig.add_subplot(1, 3, 2) ax.plot(_X, U2, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{yy}$') ax = fig.add_subplot(1, 3, 3) ax.plot(_X, U3, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{xy}$') return _X, U1, U2, U3, U def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method """ logging.info('Ensembling equation system...') self.K = self.KL*self.z1 + self.KNL*self.z2 if self.calculateMass: self.M = self.M.tocsr() logging.info('Done!') def postProcess(self, mult: float = 1000, gs=None, levels=1000, **kargs) -> None: """Generate the stress surfaces and displacement fields for the geometry Args: mult (int, optional): Factor for displacements. Defaults to 1000. gs (list, optional): List of 4 gridSpec matplotlib objects. Defaults to None. """ X = [] Y = [] U1 = [] U2 = [] U3 = [] fig = plt.figure() if not gs: gss = gridspec.GridSpec(3, 3) gs = [gss[0, 0], gss[0, 1], gss[0, 2], gss[1:, :]] ax1 = fig.add_subplot(gs[0], projection='3d') ax2 = fig.add_subplot(gs[1], projection='3d') ax3 = fig.add_subplot(gs[2]) ax5 = fig.add_subplot(gs[3]) ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U1 += (du[:, 0, 0]).tolist() U2 += (du[:, 1, 1]).tolist() U3 += ((du[:, 0, 1]+du[:, 1, 0])).tolist() coordsNuevas = e._coordsg + e._Ueg * mult ax5.plot(*e._coordsg.T, '--', color='gray', alpha=0.7) ax5.plot(*coordsNuevas.T, '-', color='black') ax5.legend(['Original Shape', 'Deformed Shape (x'+format(mult)+')']) ax5.set_aspect('equal') ax3.set_aspect('equal') cmap = 'rainbow' surf = ax1.plot_trisurf(X, Y, U1, cmap=cmap, **kargs) plt.colorbar(surf, ax=ax1) ax1.set_title(r'$\varepsilon_{xx}$') surf = ax2.plot_trisurf(X, Y, U2, cmap=cmap, **kargs) plt.colorbar(surf, ax=ax2) ax2.set_title(r'$\varepsilon_{yy}$') surf = ax3.tricontourf(X, Y, U3, cmap=cmap, levels=levels, **kargs) plt.colorbar(surf, ax=ax3) ax3.set_title(r'$\varepsilon_{xy}$') class PlaneStressNonLocalSparseNonHomogeneous(PlaneStressSparse): def __init__(self, geometry: Geometry, E: Tuple[float, list], v: Tuple[float, list], t: Tuple[float, list], l: float, alpha: float, Lr: float, af: Callable, rho: Tuple[float, list] = None, fx: Callable = lambda x: 0, fy: Callable = lambda x: 0, **kargs) -> None: """Creates a plane stress non local non homogeneous finite element problem. This class implements the model proposed by Pisano et al (2009). It is not possible to use different tickness Args: geometry (Geometry): Input 2D Geometry E (Tuple[float, list]): List of Young moduli for each element v (Tuple[float, list]): List of Poisson coefficient for each element t (Tuple[float, list]): Thickness l (float): Internal lenght alpha (float): Non local weight factor Lr (float): Influence distance (6l) af (Callable): Atenuation function rho (Tuple[float, list], optional): Density. Defaults to None. fx (_type_, optional): Force in X direction. Defaults to lambdax:0. fy (_type_, optional): Force in Y direction. Defaults to lambdax:0. """ self.l = l self.Lr = Lr self.af = af self.alpha = alpha PlaneStressSparse.__init__( self, geometry, E, v, t, rho, fx, fy, **kargs) self.properties['l'] = self.l self.properties['Lr'] = self.Lr self.properties['af'] = None self.properties['alpha'] = self.alpha nonlocals = self.geometry.detectNonLocal(Lr) for e, dno in zip(self.elements, nonlocals): e.enl = dno self.name = 'Plane Stress Isotropic non local sparse' self.KL = sparse.lil_matrix((self.ngdl, self.ngdl)) self.KNL = sparse.lil_matrix((self.ngdl, self.ngdl)) if self.calculateMass: self.M = sparse.lil_matrix((self.ngdl, self.ngdl)) def elementMatrices(self) -> None: """Calculate the elements matrices """ logging.info('Calculating gamma functions for all elements') for i, e in enumerate(tqdm(self.elements, unit='Local')): m = len(e.gdl.T) c11 = self.C11[i] c12 = self.C12[i] c22 = self.C22[i] c66 = self.C66[i] C = np.array([ [c11, c12, 0.0], [c12, c22, 0.0], [0.0, 0.0, c66]]) o = [0.0]*m dpx = e.dpx e.gammas = [] knonlocn = 0.0 k = -1 for _xloc, _wloc, _detjacloc in zip(e._x, e.W, e.detjac): # TODO Hay que hacer que este ciclo recorra los dpx tambien porque son los # mismos que los puntos de Gauss k += 1 # El B debería calcularse una sola vez, se vuelve a calcular abajo B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) gamma = 0.0 for inl in e.enl: enl = self.elements[inl] for _xnloc, _wnloc, _detjacnloc in zip(enl._x, enl.W, enl.detjac): # TODO Esta integral solo debe hacerse si el elemento esta en la skin region ro = np.linalg.norm(_xloc-_xnloc)/self.l gamma += self.properties['t'][i] * \ self.af(ro) * _wnloc*_detjacnloc e.gammas.append(gamma) knonlocn += self.properties['t'][i]*B.T@((gamma**2) * C)@B # TODO El elemento no debería guardar esta matríz, debería ensamblarse directamente e.knonlocn = knonlocn self.KNL[np.ix_(e.gdlm, e.gdlm)] += knonlocn e.gammas = np.array(e.gammas) # Esto si es estrictamente necesario for e in tqdm(range(len(self.elements)), unit='Local'): self.elementMatrix(e) def elementMatrix(self, ee: 'Element') -> None: """Calculates a single element local and nonlocal matrices Args: ee (int): Element to be calculated """ e = self.elements[ee] m = len(e.gdl.T) Fux = np.zeros([m, 1]) Fvx = np.zeros([m, 1]) _x, _p = e._x, e._p detjac = e.detjac dpx = e.dpx c11 = self.C11[ee] c12 = self.C12[ee] c22 = self.C22[ee] c66 = self.C66[ee] C = np.array([ [c11, c12, 0.0], [c12, c22, 0.0], [0.0, 0.0, c66]]) Fe = np.zeros([2*m, 1]) Ke = np.zeros([2*m, 2*m]) if self.calculateMass: Me = np.zeros([2*m, 2*m]) o = [0.0]*m for k in range(len(e.Z)): # Iterate over gauss points on domain B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) P = np.array([ [*_p[k], *o], [*o, *_p[k]]]) Ke += self.t[ee]*(B.T@C@B)*detjac[k]*e.W[k] if self.calculateMass: Me += self.rho[ee]*self.t[ee]*(P.T@P)*detjac[k]*e.W[k] Fe += self.t[ee]*([email protected]([[self.fx(_x[k])], [self.fy(_x[k])]]))*detjac[k]*e.W[k] if e.intBorders: # Cambiar esto a la notación matricial for j in range(len(e.borders)): border = e.borders[j] if (len(border.properties['load_x']) + len(border.properties['load_y'])): _x, _p = e.T(e.Tj[j](border.Z.T)) _s = border.TS(border.Z.T) detjac = border.coords[-1, 0]*0.5 for i in range(m): for fx in border.properties['load_x']: for k in range(len(border.Z)): Fux[i, 0] += fx(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] for fy in border.properties['load_y']: for k in range(len(border.Z)): Fvx[i, 0] += fy(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] subm = np.linspace(0, 2*m-1, 2*m).reshape([2, m]).astype(int) Fe[np.ix_(subm[0])] += Fux Fe[np.ix_(subm[1])] += Fvx self.F[np.ix_(e.gdlm)] += Fe self.KL[np.ix_(e.gdlm, e.gdlm)] += Ke if self.calculateMass: self.M[np.ix_(e.gdlm, e.gdlm)] += Me # e.knls = [] for inl in tqdm(e.enl, unit=' Nolocal'): c11nl = self.C11[inl] c12nl = self.C12[inl] c22nl = self.C22[inl] c66nl = self.C66[inl] Cnl = np.array([ [c11nl, c12nl, 0.0], [c12nl, c22nl, 0.0], [0.0, 0.0, c66nl]]) enl = self.elements[inl] mnl = len(enl.gdl.T) onl = [0.0]*mnl Knl = np.zeros([2*m, 2*mnl]) _xnl = enl._x detjacnl = enl.detjac dpxnl = enl.dpx for k in range(len(e.Z)): B = np.array([ [*dpx[k, 0, :], *o], [*o, *dpx[k, 1, :]], [*dpx[k, 1, :], *dpx[k, 0, :]]]) for knl in range(len(enl.Z)): ro = np.linalg.norm(_x[k]-_xnl[knl])/self.l azn = self.af(ro) Bnl = np.array([ [*dpxnl[knl, 0, :], *onl], [*onl, *dpxnl[knl, 1, :]], [*dpxnl[knl, 1, :], *dpxnl[knl, 0, :]]]) q = 0.0 for inl2 in e.enl: enl2 = self.elements[inl2] c11nl2 = self.C11[inl2] c12nl2 = self.C12[inl2] c22nl2 = self.C22[inl2] c66nl2 = self.C66[inl2] Cnl2 = np.array([ [c11nl2, c12nl2, 0.0], [c12nl2, c22nl2, 0.0], [0.0, 0.0, c66nl2]]) for kk in range(len(enl2.Z)): rho1 = np.linalg.norm(_x[k]-enl2._x[kk])/self.l rho2 = np.linalg.norm(_xnl[knl]-enl2._x[kk])/self.l az1 = self.af(rho1) az2 = self.af(rho2) q += az1*az2*Cnl2*self.t[inl2] * \ enl2.detjac[kk]*enl2.W[kk] J = (e.gammas[k]*C+enl.gammas[knl]*Cnl)*azn - q Knl += self.t[ee]*self.t[inl]*(Bnl.T@J@B)*detjac[k] * \ e.W[k]*detjacnl[knl]*enl.W[knl] # e.knls.append(Knl) # ESTA DEBE SER TRANSPUESTA!!!!!!!!!!!!!!! self.KNL[np.ix_(e.gdlm, enl.gdlm)] -= Knl.T def profile(self, p0: list, p1: list, n: float = 100) -> None: """Generate a profile between selected points Args: p0 (list): start point of the profile [x0,y0] p1 (list): end point of the profile [xf,yf] n (int, optional): NUmber of samples for graph. Defaults to 100. """ _x = np.linspace(p0[0], p1[0], n) _y = np.linspace(p0[1], p1[1], n) X = np.array([_x, _y]) U = [] U1 = [] U2 = [] U3 = [] _X = [] def dist(X): return np.sqrt((p0[0]-X[0])**2+(p0[1]-X[1])**2) for i in range(n): for _, e in enumerate(self.elements): if e.isInside(X.T[i]): z = e.inverseMapping(np.array([X.T[i]]).T) _, u, du = e.giveSolutionPoint(z, True) U += [u.tolist()] U1 += (du[:, 0, 0]).tolist() U2 += (du[:, 1, 1]).tolist() U3 += (1/2*(du[:, 0, 1]+du[:, 1, 0])).tolist() _X.append(dist(X.T[i])) break fig = plt.figure() ax = fig.add_subplot(1, 3, 1) ax.plot(_X, U1, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{xx}$') ax = fig.add_subplot(1, 3, 2) ax.plot(_X, U2, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{yy}$') ax = fig.add_subplot(1, 3, 3) ax.plot(_X, U3, color='black') ax.grid() ax.set_xlabel('d') ax.set_ylabel(r'$\varepsilon_{xy}$') return _X, U1, U2, U3, U def ensembling(self) -> None: """Creation of the system sparse matrix. Force vector is ensembled in integration method """ logging.info('Ensembling equation system...') self.K = self.KL + self.KNL*self.alpha if self.calculateMass: self.M = self.M.tocsr() logging.info('Done!') def postProcess(self, mult: float = 1000, gs=None, levels=1000, **kargs) -> None: """Generate the stress surfaces and displacement fields for the geometry Args: mult (int, optional): Factor for displacements. Defaults to 1000. gs (list, optional): List of 4 gridSpec matplotlib objects. Defaults to None. """ X = [] Y = [] U1 = [] U2 = [] U3 = [] fig = plt.figure() if not gs: gss = gridspec.GridSpec(3, 3) gs = [gss[0, 0], gss[0, 1], gss[0, 2], gss[1:, :]] ax1 = fig.add_subplot(gs[0], projection='3d') ax2 = fig.add_subplot(gs[1], projection='3d') ax3 = fig.add_subplot(gs[2]) ax5 = fig.add_subplot(gs[3]) ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U1 += (du[:, 0, 0]).tolist() U2 += (du[:, 1, 1]).tolist() U3 += (1/2*(du[:, 0, 1]+du[:, 1, 0])).tolist() coordsNuevas = e._coordsg + e._Ueg * mult ax5.plot(*e._coordsg.T, '--', color='gray', alpha=0.7) ax5.plot(*coordsNuevas.T, '-', color='black') ax5.legend(['Original Shape', 'Deformed Shape (x'+format(mult)+')']) ax5.set_aspect('equal') # ax1.set_aspect('equal') ax3.set_aspect('equal') # ax2.set_aspect('equal') cmap = 'rainbow' surf = ax1.plot_trisurf(X, Y, U1, cmap=cmap, **kargs) plt.colorbar(surf, ax=ax1) ax1.set_title(r'$\varepsilon_{xx}$') surf = ax2.plot_trisurf(X, Y, U2, cmap=cmap, **kargs) plt.colorbar(surf, ax=ax2) ax2.set_title(r'$\varepsilon_{yy}$') surf = ax3.tricontourf(X, Y, U3, cmap=cmap, levels=levels, **kargs) plt.colorbar(surf, ax=ax3) ax3.set_title(r'$\varepsilon_{xy}$') mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elasticity2D.py
Elasticity2D.py
from re import M from .Core import Core, Geometry from .Solvers import NoLineal from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt from typing import Callable class NonLinealSimpleEquation(Core): """Creates a nonlineal 1D equation with the form: .. math:: -\\frac{d}{dx}\\left(a(x)u\\frac{du}{dx}\\right)=f(x) Args: geometry (Geometry): Input lineal geometry a (Callable): Function a f (Callable): Function f """ def __init__(self, geometry: Geometry, a: Callable, f: Callable, **kargs) -> None: """Creates a nonlineal 1D equation with the form: .. math:: -\\frac{d}{dx}\\left(a(x)u\\frac{du}{dx}\\right)=f(x) Args: geometry (Geometry): Input lineal geometry a (Callable): Function a f (Callable): Function f """ self.a = a self.f = f Core.__init__(self, geometry, solver=NoLineal.Newton, **kargs) self.name = '1D non lineal sample equation' self.properties['a'] = None self.properties['f'] = None def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's non lineal finite element model. Element matrices and forces are calculated with Gauss-Legendre quadrature. Point number depends of element discretization. """ for e in tqdm(self.elements, unit='Element'): e.Te = np.zeros(e.Ke.shape) e.Fe = np.zeros(e.Fe.shape) e.Ke = np.zeros(e.Ke.shape) # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates for i in range(e.n): # self part must be vectorized for j in range(e.n): for k in range(len(e.Z)): # Iterate over gauss points on domain e.Ke[i, j] += \ (self.a(_x[k])*e.Ue[0]@_p[k]*dpx[k][0] [i]*dpx[k][0][j])*detjac[k]*e.W[k] e.Te[i, j] += \ (_p[k][j]*dpx[k][0] [i]*e.Ue[0]@dpx[k][0])*detjac[k]*e.W[k] for k in range(len(e.Z)): # Iterate over gauss points on domain e.Fe[i][0] += self.f(_x[k])*_p[k][i]*detjac[k]*e.W[k] e.Te += e.Ke # e.Fe[:,0] = 2*self.G*self._phi*detjac@_p # e.Ke = (np.transpose(dpx,axes=[0,2,1]) @ dpx).T @ detjac def postProcess(self) -> None: """Generate graph of solution and solution derivative """ X = [] U1 = [] U2 = [] fig = plt.figure() ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() U1 += _u[0].tolist() U2 += (du[:, 0, 0]).tolist() ax1.plot(X, U1) ax2.plot(X, np.array(U2)) ax1.grid() ax2.grid() ax1.set_title(r'$U(x)$') ax2.set_title(r'$\frac{dU}{dx}$')
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/NonLinealExample.py
NonLinealExample.py
from .Solvers import NoLineal from .Elements.E1D.EulerBernoulliElement import EulerBernoulliElement from .Core import Core, Geometry from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt import logging class EulerBernoulliBeam(Core): """Creates a Euler Bernoulli beam problem Args: geometry (Geometry): 1D 2 variables per node problem geometry. Geometry must have Euler Bernoulli elements. EI (float): Young's moduli multiplied by second moment of area (inertia). cf (float, optional): Soil coeficient. Defaults to 0. f (float or int or function, optional): Force function applied to the beam. Defaults to 0.0 """ def __init__(self, geometry: Geometry, EI: float, cf: float = 0.0, f: float = 0.0) -> None: """Creates a Euler Bernoulli beam problem Args: geometry (Geometry): 1D 2 variables per node problem geometry. Geometry must have Lineal elements. EI (float): Young's moduli multiplied by second moment of area (inertia). cf (float, optional): Soil coeficient. Defaults to 0. f (float or int or function, optional): Force function applied to the beam. Defaults to 0.0 """ self.a = EI self.f = f self.cf = cf if isinstance(EI, float) or isinstance(EI, int): self.a = lambda x: EI if isinstance(f, float) or isinstance(f, int): self.f = lambda x: f if isinstance(cf, float) or isinstance(cf, int): self.cf = lambda x: cf if geometry.nvn == 1: logging.warning( 'Border conditions lost, please usea a geometry with 2 variables per node (nvn=2)') Core.__init__(self, geometry) for i in range(len(self.elements)): self.elements[i] = EulerBernoulliElement( self.elements[i].coords, self.elements[i].gdl) self.name = 'Euler Bernoulli' self.properties['EI'] = EI self.properties['f'] = f self.properties['cf'] = cf def elementMatrices(self) -> None: """Calculate the element matrices usign Guass Legendre quadrature. """ for e in tqdm(self.elements, unit='Element'): _x, _ = e.T(e.Z.T) _h = e.hermit(e.Z.T) jac, _ = e.J(e.Z.T) detjac = np.linalg.det(jac) # _j = np.linalg.inv(jac) # dpx = _j @ dpz _dh = e.dhermit(e.Z.T) for i in range(e.n): for j in range(e.n): for k in range(len(e.Z)): # + self.c(_x[k])*_p[k][i]*_p[k][j] e.Ke[i, j] += (self.a(_x[k])*_dh[1][i][k] * _dh[1][j][k]+self.cf(_x[k, 0])*_h[k][i]*_h[k][j])*detjac[k]*e.W[k] for k in range(len(e.Z)): e.Fe[i][0] += self.f(_x[k])*_h[k][i]*detjac[k]*e.W[k] def postProcess(self, plot=True) -> None: """Post process the solution. Shows graphs of displacement, rotation, shear and moment. """ X = [] U1 = [] U2 = [] U3 = [] U4 = [] for e in self.elements: _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() U1 += _u.tolist() U2 += (du[:, 0]).tolist() U3 += (du[:, 1]*self.a(_x.T[0])).tolist() U4 += (du[:, 2]*self.a(_x.T[0])).tolist() if plot: fig = plt.figure() ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) ax1.plot(X, U1) ax1.grid() ax2.plot(X, U2) ax2.grid() ax3.plot(X, U3) ax3.grid() ax4.plot(X, U4) ax4.grid() ax1.set_title(r'$U(x)$') ax2.set_title(r'$\frac{dU}{dx}$') ax3.set_title(r'$\frac{d^2U}{dx^2}$') ax4.set_title(r'$\frac{d^3U}{dx^3}$') return X, U1, U2, U3, U4 class EulerBernoulliBeamNonLineal(Core): """Creates a Euler Bernoulli beam problem Args: geometry (Geometry): 1D 2 variables per node problem geometry. Geometry must have Euler Bernoulli elements. EI (float): Young's moduli multiplied by second moment of area (inertia). EA (float): Young's moduli multiplied by area. cf (float, optional): Soil coeficient. Defaults to 0. fx (float or int or function, optional): Force function applied in the x direction to the beam. Defaults to 0.0 fy (float or int or function, optional): Force function applied in the y direction to the beam. Defaults to 0.0 """ def __init__(self, geometry: Geometry, EI: float, EA: float, fx: float = 0.0, fy: float = 0.0) -> None: """Creates a Euler Bernoulli beam problem Args: geometry (Geometry): 1D 2 variables per node problem geometry. Geometry must have Lineal elements. EI (float): Young's moduli multiplied by second moment of area (inertia). EA (float): Young's moduli multiplied by area. cf (float, optional): Soil coeficient. Defaults to 0. fx (float or int or function, optional): Force function applied in the x direction to the beam. Defaults to 0.0 fy (float or int or function, optional): Force function applied in the y direction to the beam. Defaults to 0.0 """ self.Axx = EI self.Dxx = EA self.fx0 = fx self.fy0 = fy if isinstance(EA, float) or isinstance(EA, int): self.Axx = lambda x: EA if isinstance(EI, float) or isinstance(EI, int): self.Dxx = lambda x: EI if isinstance(fx, float) or isinstance(fx, int): self.fx0 = lambda x: fx if isinstance(fy, float) or isinstance(fy, int): self.fy0 = lambda x: fy if geometry.nvn == 1: logging.warning( 'Border conditions lost, please usea a geometry with 2 variables per node (nvn=2)') Core.__init__(self, geometry, solver=NoLineal.LoadControl) self.properties['EI'] = EI self.properties['EA'] = EA self.properties['fx'] = fx self.properties['fy'] = fy for i in range(len(self.elements)): self.elements[i] = EulerBernoulliElement( self.elements[i].coords, self.elements[i].gdl, nvn=3) self.name = 'Euler Bernoulli non linear' def elementMatrices(self) -> None: """Calculate the element matrices usign Guass Legendre quadrature. """ for e in tqdm(self.elements, unit='Element'): k11 = np.zeros([2, 2]) k12 = np.zeros([2, 4]) k22 = np.zeros([4, 4]) f1 = np.zeros([2, 1]) f2 = np.zeros([4, 1]) # Integración completa _x, _p = e.T(e.Z.T) _h = e.hermit(e.Z.T) jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) dpx = _j @ dpz _dh = e.dhermit(e.Z.T) for i in range(4): for j in range(4): for k in range(len(e.Z)): k22[i, j] += (self.Dxx(_x[k])*_dh[1][i][k] * _dh[1][j][k])*detjac[k]*e.W[k] if i < 2 and j < 2: k11[i, j] += (self.Axx(_x[k])*dpx[k][0][i] * dpx[k][0][j])*detjac[k]*e.W[k] for k in range(len(e.Z)): if i < 2: f1[i][0] += self.fx(_x[k])*_p[k][i]*detjac[k]*e.W[k] f2[i][0] += self.fy(_x[k])*_h[k][i]*detjac[k]*e.W[k] # Integración reducida _x, _p = e.T(e.Zr.T) _h = e.hermit(e.Zr.T) jac, dpz = e.J(e.Zr.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) dpx = _j @ dpz _dh = e.dhermit(e.Zr.T) for i in range(4): for j in range(4): for k in range(len(e.Zr)): ue = e.Ue.flatten()[[1, 2, 4, 5]] dw = ue @ _dh[0, :, k].T # + self.c(_x[k])*_p[k][i]*_p[k][j] if i < 2: k12[i, j] += 1.0/2.0*(self.Axx(_x[k])*dw*dpx[k][0][i] * _dh[0][j][k])*detjac[k]*e.Wr[k] k22[i, j] += 1.0/2.0*(self.Axx(_x[k])*dw**2*_dh[0][i][k] * _dh[0][j][k])*detjac[k]*e.Wr[k] e.Ke[np.ix_([0, 3], [0, 3])] = k11 e.Ke[np.ix_([1, 2, 4, 5], [1, 2, 4, 5])] = k22 e.Ke[np.ix_([0, 3], [1, 2, 4, 5])] = k12 e.Ke[np.ix_([1, 2, 4, 5], [0, 3])] = 2*k12.T e.Fe[[0, 3]] = f1 e.Fe[[1, 2, 4, 5]] = f2 def postProcess(self, plot=True) -> None: """Post process the solution. Shows graphs of displacement, rotation, shear and moment. """ X = [] U1 = [] U2 = [] U3 = [] U4 = [] for e in self.elements: original = e.Ue.copy() ueflex = e.Ue.flatten()[[1, 2, 4, 5]] # ueax = e.Ue.flatten()[[0, 3]] e.Ue = ueflex _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() U1 += _u.tolist() U2 += (du[:, 0]).tolist() U3 += (du[:, 1]).tolist() U4 += (du[:, 2]).tolist() e.Ue = original if plot: fig = plt.figure() ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) ax1.plot(X, U1) ax1.grid() ax2.plot(X, U2) ax2.grid() ax3.plot(X, U3) ax3.grid() ax4.plot(X, U4) ax4.grid() ax1.set_title(r'$U(x)$') ax2.set_title(r'$\frac{dU}{dx}$') ax3.set_title(r'$\frac{d^2U}{dx^2}$') ax4.set_title(r'$\frac{d^3U}{dx^3}$') return X, U1, U2, U3, U4
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/EulerBernoulliBeam.py
EulerBernoulliBeam.py
# ------------------------------------------------------------------------------- # - # Python dual-logging setup (console and log file), - # supporting different log levels and colorized output - # - # Created by Fonic <https://github.com/fonic> - # Date: 04/05/20 - # - # Based on: - # https://stackoverflow.com/a/13733863/1976617 - # https://uran198.github.io/en/python/2016/07/12/colorful-python-logging.html - # https://en.wikipedia.org/wiki/ANSI_escape_code#Colors - # - # ------------------------------------------------------------------------------- # Imports import os import sys import logging from datetime import datetime class TimeFilter(logging.Filter): def filter(self, record): try: last = self.last except AttributeError: last = record.relativeCreated delta = datetime.fromtimestamp( record.relativeCreated/1000.0) - datetime.fromtimestamp(last/1000.0) record.relative = '{0:.4f}'.format( delta.seconds + delta.microseconds/1000000.0) self.last = record.relativeCreated return True class LogFormatter(logging.Formatter): """Creates a Logging Formatter Args: color (color): Color """ COLOR_CODES = { logging.CRITICAL: "\033[1;35m", # bright/bold magenta logging.ERROR: "\033[1;31m", # bright/bold red logging.WARNING: "\033[1;33m", # bright/bold yellow logging.INFO: "\033[0;37m", # white / light gray logging.DEBUG: "\033[1;30m" # bright/bold black / dark gray } RESET_CODE = "\033[0m" def __init__(self, color, *args, **kwargs): """Creates a Logging Formatter Args: color (color): Color """ super(LogFormatter, self).__init__(*args, **kwargs) self.color = color def format(self, record, *args, **kwargs): """Formats arecord Args: record (record): Record to be formatted """ if (self.color == True and record.levelno in self.COLOR_CODES): record.color_on = self.COLOR_CODES[record.levelno] record.color_off = self.RESET_CODE else: record.color_on = "" record.color_off = "" return super(LogFormatter, self).format(record, *args, **kwargs) class FEMLogger(): """Creation of a Logger for FEM purposes. Based on Python Logger by Fonic <https://github.com/fonic> """ def __init__(self, ad: str = '') -> None: """Creates a FEMLogger object Args: ad (str, optional): Aditional name. Defaults to ''. """ self.additional_name = ad def setup_logging(self, console_log_output="stdout", console_log_level="warning", console_log_color=True, logfile_file=None, logfile_log_level="debug", logfile_log_color=False, log_line_template="%(color_on)s[%(levelname)-8s] %(message)s%(color_off)s"): """Set the logger Args: console_log_output (str, optional): . Defaults to "stdout". console_log_level (str, optional): . Defaults to "warning". console_log_color (bool, optional): . Defaults to True. logfile_file (optional): . Defaults to None. logfile_log_level (str, optional): . Defaults to "debug". logfile_log_color (bool, optional): . Defaults to False. log_line_template (str, optional): . Defaults to "%(color_on)s[%(levelname)-8s] %(message)s%(color_off)s". """ script_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] additional_name = '' if self.additional_name: additional_name = '_' + self.additional_name if not logfile_file: logfile_file = script_name + additional_name + ".log" # Create logger # For simplicity, we use the root logger, i.e. call 'logging.getLogger()' # without name argument. This way we can simply use module methods for # for logging throughout the script. An alternative would be exporting # the logger, i.e. 'global logger; logger = logging.getLogger("<name>")' logger = logging.getLogger() logging.getLogger('matplotlib').setLevel(logging.WARNING) logging.getLogger('PIL').setLevel(logging.WARNING) # Set global log level to 'debug' (required for handler levels to work) logger.setLevel(logging.DEBUG) # Create console handler console_log_output = console_log_output.lower() if (console_log_output == "stdout"): console_log_output = sys.stdout elif (console_log_output == "stderr"): console_log_output = sys.stderr else: print("Failed to set console output: invalid output: '%s'" % console_log_output) return False console_handler = logging.StreamHandler(console_log_output) # Set console log level try: # only accepts uppercase level names console_handler.setLevel(console_log_level.upper()) except Exception as e: print("Failed to set console log level: invalid level: '%s'" % console_log_level, e) return False # Create and set formatter, add console handler to logger console_formatter = LogFormatter( fmt=log_line_template, color=console_log_color) console_handler.setFormatter(console_formatter) logger.addHandler(console_handler) # Create log file handler try: logfile_handler = logging.FileHandler(logfile_file) except Exception as exception: print("Failed to set up log file: %s" % str(exception)) return False # Set log file log level try: # only accepts uppercase level names logfile_handler.setLevel(logfile_log_level.upper()) except: print("Failed to set log file log level: invalid level: '%s'" % logfile_log_level) return False # Create and set formatter, add log file handler to logger logfile_formatter = LogFormatter( fmt='[%(asctime)s] (Delta duration: %(relative)ss) ' + log_line_template, color=logfile_log_color) logfile_handler.setFormatter(logfile_formatter) logger.addHandler(logfile_handler) [hndl.addFilter(TimeFilter()) for hndl in logger.handlers] self.start_time = datetime.now() logging.debug( f'============================{script_name}============================') logging.debug( f'Session started @ {self.start_time.strftime("%d/%m/%Y - %H:%M:%S")}') # Success return True def end_timer(self): """Ends the sesion time """ self.end_time = datetime.now() logging.debug( f'Session ended @ {self.end_time.strftime("%d/%m/%Y - %H:%M:%S")}') logging.debug( f'Duration: {(self.end_time-self.start_time)}') return self.end_time-self.start_time
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/FEMLogger.py
FEMLogger.py
from .Core import Core, tqdm, np, Geometry import matplotlib.pyplot as plt import matplotlib from typing import Callable, Tuple class Heat2D(Core): """Creates a Heat2D problem with convective borders The differential equation is: .. math:: -\\frac{\\partial}{\\partial x}\\left(k_x\\frac{\\partial T}{\\partial x}\\right) - \\frac{\\partial}{\\partial y}\\left(k_y\\frac{\\partial T}{\\partial y}\\right)=f(x,y) With convective border conditions: .. math:: k_x\\frac{\\partial T}{\\partial x}n_x+k_y\\frac{\\partial T}{\\partial y}n_y+\\beta (T-T_\\infty)=\\hat{q_n} Args: geometry (Geometry): Input 1 variable per node geometry kx (Tuple[float, list]): Heat transfer coeficient in x direction. If number, all element will have the same coefficient. If list, each position will be the element coefficient, so len(kx) == len(self.elements) ky (Tuple[float, list]): Heat transfer coeficient in y direction. If number, all element will have the same coefficient. If list, each position will be the element coefficient, so len(kx) == len(self.elements) f (Callable, optional): Internal heat generation function. Defaults to None. """ def __init__(self, geometry: Geometry, kx: Tuple[float, list], ky: Tuple[float, list], f: Callable = None, **kargs) -> None: """Creates a Heat2D problem with convective borders The differential equation is: .. math:: -\\frac{\\partial}{\\partial x}\\left(k_x\\frac{\\partial T}{\\partial x}\\right) - \\frac{\\partial}{\\partial y}\\left(k_y\\frac{\\partial T}{\\partial y}\\right)=f(x,y) With convective border conditions: .. math:: k_x\\frac{\\partial T}{\\partial x}n_x+k_y\\frac{\\partial T}{\\partial y}n_y+\\beta (T-T_\\infty)=\\hat{q_n} Args: geometry (Geometry): Input 1 variable per node geometry kx (Tuple[float, list]): Heat transfer coeficient in x direction. If number, all element will have the same coefficient. If list, each position will be the element coefficient, so len(kx) == len(self.elements) ky (Tuple[float, list]): Heat transfer coeficient in y direction. If number, all element will have the same coefficient. If list, each position will be the element coefficient, so len(kx) == len(self.elements) f (Callable, optional): Internal heat generation function. Defaults to None. """ if isinstance(kx, float) or isinstance(kx, int): kx = [kx]*len(geometry.elements) if isinstance(ky, float) or isinstance(ky, int): ky = [ky]*len(geometry.elements) self.kx = kx self.ky = ky self.f = f self.geometry = geometry Core.__init__(self, geometry, **kargs) self.name = '2D Heat transfer' self.properties['kx'] = self.kx self.properties['ky'] = self.ky self.properties['f'] = None def elementMatrices(self) -> None: """Calculate the element matrices using Gauss Legendre quadrature. """ for ee, e in enumerate(tqdm(self.elements, unit='Element')): m = len(e.gdl.T) K = np.zeros([m, m]) H = np.zeros([m, m]) F = np.zeros([m, 1]) P = np.zeros([m, 1]) _x, _p = e.T(e.Z.T) jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac) _j = np.linalg.inv(jac) dpx = _j @ dpz for i in range(m): for j in range(m): for k in range(len(e.Z)): K[i, j] += (self.kx[ee]*dpx[k, 0, i]*dpx[k, 0, j] + self.ky[ee]*dpx[k, 1, i]*dpx[k, 1, j])*detjac[k]*e.W[k] if self.f: for k in range(len(e.Z)): F[i][0] += _p[k, i] * self.f(_x[k])*detjac[k]*e.W[k] if e.intBorders: for j in range(len(e.borders)): border = e.borders[j] if len(border.properties['load_x']): _x, _p = e.T(e.Tj[j](border.Z.T)) _s = border.TS(border.Z.T) detjac = border.coords[-1, 0]*0.5 for i in range(m): for fx in border.properties['load_x']: for k in range(len(border.Z)): P[i, 0] += border.properties['Ta']*fx(_s[k, 0])*_p[k, i] * \ detjac*border.W[k] for j in range(m): for k in range(len(border.Z)): H[i, j] += fx(_s[k, 0])*_p[k, i] * _p[k, j] * \ detjac*border.W[k] e.Fe += F+P e.Ke += K+H def defineConvectiveBoderConditions(self, region: int, beta: float = 0, Ta: float = 0) -> None: """Define convective borders Args: region (int): region in wich load will be applied beta (float, optional): Convective coeficient :math:`\\beta` . Defaults to 0. Ta (float, optional): Ambient temperature in convective border. Defaults to 0. """ self.geometry.loadOnRegion( region, fx=lambda s: beta, add={'Ta': Ta}) def postProcess(self, levels=1000) -> None: """Generate the temperature surface for the geometry """ X = [] Y = [] U = [] DUX = [] DUY = [] fig = plt.figure() ax = fig.add_subplot(1, 2, 1) ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 _x, _u, _du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U += _u[0].tolist() DUX += (self.kx[ee]*_du[:, 0, 0]).tolist() DUY += (self.ky[ee]*_du[:, 0, 1]).tolist() surf = ax.tricontourf(X, Y, U, cmap='rainbow', levels=levels) CS = ax.tricontour(X, Y, U, colors='k', levels=12, alpha=0.6) ax.clabel(CS, CS.levels, inline=True, fmt=lambda x: format(x, '.3f'), colors='k', use_clabeltext=True, fontsize=7) plt.colorbar(surf, ax=ax) ax.set_title(r'$T$') ax.set_aspect('equal') ax2 = fig.add_subplot(1, 2, 2) # ax2.tricontourf(X, Y, U, cmap='rainbow', levels=levels) M = np.hypot(DUX, DUY) surf = ax2.quiver(X, Y, DUX, DUY, M, units='x', cmap='rainbow') plt.colorbar(surf, ax=ax2) ax2.set_title( r'$\{k_x\frac{\partial T}{\partial x},k_y\frac{\partial T}{\partial y}\}$') mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax2.set_aspect('equal')
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Heat2D.py
Heat2D.py
from .Core import Core, Geometry from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt class Torsion2D(Core): """Create a torsional finite element problem The differential equation is: .. math:: -\\frac{-1}{G}\\nabla^2\\Psi=2\\theta Where: .. math:: \\sigma_{xz}\\frac{\\partial\\Psi}{\\partial y} \\\\ \\sigma_{yz}=-\\frac{\\partial\\Psi}{\\partial x} With :math:`\\Psi=0` on the boundary. Args: geometry (Geometry): 2D 1 variable per node geometry G (float): Shear moduli of elements phi (float): Rotation angle in radians """ def __init__(self, geometry: Geometry, G: float, phi: float, **kargs) -> None: """Create a torsional finite element problem Args: geometry (Geometry): 2D 1 variable per node geometry G (float): Shear moduli of elements phi (float): Rotation angle in radians """ if isinstance(G, float) or isinstance(G, int): G = [G]*len(geometry.elements) self.G = G self._phi = phi geometry.cbeAllRegions(0) Core.__init__(self, geometry, **kargs) self.name = '2D Torsion' self.properties['_phi'] = self._phi self.properties['G'] = self.G def elementMatrices(self) -> None: """Calculate the element matrices usign Reddy's (2005) finite element model """ ee = -1 for e in tqdm(self.elements, unit='Element'): ee += 1 # Gauss points in global coordinates and Shape functions evaluated in gauss points _x, _p = e.T(e.Z.T) # Jacobian evaluated in gauss points and shape functions derivatives in natural coordinates jac, dpz = e.J(e.Z.T) detjac = np.linalg.det(jac)*e.W _j = np.linalg.inv(jac) # Jacobian inverse dpx = _j @ dpz # Shape function derivatives in global coordinates # for k in range(len(_x)): #Iterate over gauss points on domain # for i in range(e.n): #self part must be vectorized # for j in range(e.n): # e.Ke[i,j] += (dpx[k][0][i]*dpx[k][0][j] + dpx[k][1][i]*dpx[k][1][j])*detjac[k]*e.W[k] # e.Fe[i][0] += 2*self.G*self._phi*_p[k][i]*detjac[k]*e.W[k] e.Fe[:, 0] = self._phi*detjac@_p e.Ke = (np.transpose(dpx, axes=[0, 2, 1]) @ dpx).T @ detjac/2/self.G[ee] def postProcess(self, levels=30, derivatives=True) -> None: """Create graphs for stress function and derivatives. """ X = [] Y = [] U1 = [] U2 = [] U3 = [] U4 = [] if derivatives: fig = plt.figure() ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U2 += (-du[:, 0, 0]).tolist() U3 += du[:, 0, 1].tolist() U1 += _u[0].tolist() U4 += np.sqrt(du[:, 0, 0]**2 + du[:, 0, 1]**2).tolist() surf = ax1.tricontourf(X, Y, U1, cmap='rainbow', levels=levels) fig.colorbar(surf, ax=ax1) ax1.set_title(r'$\Psi$') surf = ax2.tricontourf(X, Y, U2, cmap='rainbow', levels=levels) ax2.set_title(r'$\sigma_{yz}$') fig.colorbar(surf, ax=ax2) surf = ax3.tricontourf(X, Y, U3, cmap='rainbow', levels=levels) ax3.set_title(r'$\sigma_{xz}$') fig.colorbar(surf, ax=ax3) surf = ax4.tricontourf(X, Y, U4, cmap='rainbow', levels=levels) ax4.set_title(r'$\sqrt{\sigma_{xz}^2+\sigma_{yz}^2}$') fig.colorbar(surf, ax=ax4) mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) ax4.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30) ax2.fill(Xs, Ys, color='white', zorder=30) ax3.fill(Xs, Ys, color='white', zorder=30) ax4.fill(Xs, Ys, color='white', zorder=30) ax4.set_aspect('equal') ax1.set_aspect('equal') ax2.set_aspect('equal') ax3.set_aspect('equal') else: fig = plt.figure() ax1 = fig.add_subplot(1, 1, 1) for e in tqdm(self.elements, unit='Element'): _x, _u, du = e.giveSolution(True) X += _x.T[0].tolist() Y += _x.T[1].tolist() U1 += _u[0].tolist() surf = ax1.tricontourf(X, Y, U1, cmap='rainbow', levels=levels) fig.colorbar(surf, ax=ax1) mask = self.geometry.mask if self.geometry.holes: for hole in self.geometry.holes: Xs = np.array(hole['vertices'])[:, 0] Ys = np.array(hole['vertices'])[:, 1] ax1.fill(Xs, Ys, color='white', zorder=30) if mask: mask = np.array(mask) cornersnt = np.array(mask[::-1]) xmin = np.min(cornersnt[:, 0]) xmax = np.max(cornersnt[:, 0]) ymin = np.min(cornersnt[:, 1]) ymax = np.max(cornersnt[:, 1]) Xs = [xmin, xmax, xmax, xmin]+cornersnt[:, 0].tolist() Ys = [ymin, ymin, ymax, ymax]+cornersnt[:, 1].tolist() ax1.fill(Xs, Ys, color='white', zorder=30)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Torsion2D.py
Torsion2D.py
import numpy as np import copy import logging from tqdm import tqdm from .Solver import Solver class NonLinealSolver(Solver): """General class for non lineal solvers Args: tol (float): Tolerance for the maximum absolute value for the delta vector n (int): Maximum number of iterations per step """ def __init__(self, FEMObject: "Core", tol: float, n: int) -> None: """General class for non lineal solvers Args: FEMObject (Core): FEM Object tol (float): Tolerance for the maximum absolute value for the delta vector n (int): Maximum number of iterations per step """ Solver.__init__(self, FEMObject) self.maxiter = n self.tol = tol self.type = 'non-lineal' def run(self, **kargs) -> None: """Solves the equation system using newtons method """ self.solve(**kargs) class Newton(NonLinealSolver): """Creates a Newton Raphson iterative solver Args: FEMObject (Core): Finite Element Model. The model have to calculate tangent matrix T in the self.elementMatrices() method. """ def __init__(self, FEMObject: 'Core', tol: float = 10**(-10), n: int = 50) -> None: """Creates a Newton Raphson iterative solver Args: FEMObject (Core): Finite Element Model. The model have to calculate tangent matrix T in the self.elementMatrices() method. tol (float, optional): Tolerance for the maximum absolute value for the delta vector. Defaults to 10**(-10). n (int, optional): Maximum number of iterations per step. Defaults to 50. """ NonLinealSolver.__init__(self, FEMObject, tol, n) self.type = 'non-lineal-newton' def solve(self, path: str = '', **kargs) -> None: """Solves the equation system using newtons method """ logging.info('Starting newton iterations.') logging.info(f'tol: {self.tol}, maxiter: {self.maxiter}') self.system.U = np.zeros(self.system.U.shape)+1.0 # self.setSolution(0) for i in self.system.cbe: self.system.U[int(i[0])] = i[1] for e in self.system.elements: e.restartMatrix() e.setUe(self.system.U) warn = 'Max number of iterations. Not convergence achived!' for i in tqdm(range(self.maxiter), unit="Newton iteration", disable=False): logging.debug( f'----------------- Newton iteration {i} -------------------') self.system.restartMatrix() logging.debug('Matrix at 0') self.system.elementMatrices() logging.debug('Calculating element matrix') self.system.ensembling() logging.debug('Matrices enssembling') self.system.borderConditions() logging.debug('Border conditions') R = [email protected] - self.system.S logging.debug('Residual') try: du = -np.linalg.solve(self.system.T, R) except Exception as e: logging.error(e) raise e logging.debug('delta u') self.system.U += du for e in self.system.elements: e.restartMatrix() e.setUe(self.system.U) logging.debug('Updated elements') err = np.max(np.abs(du)) logging.info( f'----------------- Iteration error {err} -------------------') if err < self.tol: warn = 'No warnings' break self.solutions = [self.system.U] self.solutions_info = [ {'solver-type': self.type, 'last-it-error': err, 'n-it': i, 'warnings': warn}] logging.info('Done!') class DirectIteration(NonLinealSolver): """docstring for DirectIteration """ def __init__(self, FEMObject: 'Core', tol: float = 10**(-10), n: int = 50) -> None: """Creates a Direct Iteration iterative solver Args: FEMObject (Core): Finite Element Model. The model have to calculate tangent matrix T in the self.elementMatrices() method. tol (float, optional): Tolerance for the maximum absolute value for the delta vector. Defaults to 10**(-10). n (int, optional): Maximum number of iterations per step. Defaults to 50. """ NonLinealSolver.__init__(self, FEMObject, tol, n) self.type = 'non-lineal-direct' def solve(self, path: str = '', guess=None, _guess=False, **kargs) -> None: """Solves the equation system using newtons method """ logging.info('Starting iterations.') logging.info(f'tol: {self.tol}, maxiter: {self.maxiter}') if _guess: self.system.U = guess else: self.system.U = np.zeros(self.system.U.shape) for i in self.system.cbe: self.system.U[int(i[0])] = i[1] for e in self.system.elements: e.restartMatrix() e.setUe(self.system.U) warn = 'Max number of iterations. Not convergence achived!' for i in tqdm(range(self.maxiter), unit="Iteration", disable=False): logging.debug( f'----------------- Iteration {i+1} -------------------') self.system.restartMatrix() logging.debug('Matrix at 0') self.system.elementMatrices() logging.debug('Calculating element matrix') self.system.ensembling() logging.debug('Matrices enssembling') self.system.borderConditions() logging.debug('Border conditions') uim11 = self.system.U.copy() try: self.system.U = np.linalg.solve(self.system.K, self.system.S) except Exception as e: logging.error(e) raise e logging.debug('Equation system solved') R = (self.system.U - uim11) logging.debug('Residual') for e in self.system.elements: e.restartMatrix() e.setUe(self.system.U) logging.debug('Updated elements') err = np.max(np.abs(R)) logging.info( f'----------------- Iteration error {err} -------------------') if err < self.tol: warn = 'No warnings' break self.solutions_info = [ {'solver-type': self.type, 'last-it-error': err, 'n-it': i, 'warnings': warn}] self.solutions = [self.system.U] logging.info('Done!') class LoadControl(DirectIteration): """General class for non lineal solvers Args: tol (float): Tolerance for the maximum absolute value for the delta vector n (int): Maximum number of iterations per step """ def __init__(self, FEMObject, tol: float = 10**(-10), n: int = 500, nls=10) -> None: """General class for non lineal solvers Args: tol (float): Tolerance for the maximum absolute value for the delta vector n (int): Maximum number of iterations per step nls (int): Number of load steps """ DirectIteration.__init__(self, FEMObject, tol=tol, n=n) self.nls = nls self.type += '-load-control' def run(self, **kargs) -> None: """Solves the equation system using newtons method """ guess = None solutioms = [] solutioms_info = [] for i in tqdm(range(self.nls), unit="Load Step", disable=False): logging.info(f'================LOAD STEP {i+1}===================') # FIXME WTF IS THIS. Esto solamente funciona para la clase de EB no lineal self.system.fx = lambda x: self.system.fx0(x)/self.nls*(i+1) self.system.fy = lambda x: self.system.fy0(x)/self.nls*(i+1) guess = self.system.U self.solve(guess=guess, _guess=(i >= 1), **kargs) solutioms.append(self.system.U) solutioms_info.append(copy.deepcopy(self.solutions_info[-1])) self.solutions_info = solutioms_info self.solutions = solutioms self.setSolution()
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Solvers/NoLineal.py
NoLineal.py
from scipy.linalg import eigh from scipy.sparse.linalg import eigsh import numpy as np import logging from scipy.sparse.linalg import spsolve from .Solver import Solver class Lineal(Solver): """Lineal Finite Element Solver. """ def __init__(self, FEMObject: 'Core'): """Lineal Finite Element Solver Args: FEMObject (Core): Finite Element Problem """ Solver.__init__(self, FEMObject) self.type = 'lineal' self.solutions = [] def run(self, path: str = '', **kargs): """Solves the equation system using numpy's solve function Args: path (str, optional): Path where the solution is stored. Defaults to ''. """ # TODO This should delete all matrices becaus the add feature logging.info('Creating element matrices...') self.system.elementMatrices() logging.info('Done!') self.system.ensembling() self.system.borderConditions() logging.info('Solving equation system...') self.solutions = [np.linalg.solve(self.system.K, self.system.S)] self.solutions_info = [{'solver-type': self.type}] self.setSolution() if not path == '': np.savetxt(path, self.system.U, delimiter=',') for e in self.system.elements: e.setUe(self.system.U) logging.info('Done!') class LinealSparse(Lineal): """Lineal Finite Element Solver using sparse matrix """ def __init__(self, FEMObject: 'Core'): """Lineal Finite Element Solver Args: FEMObject (Core): Finite Element Problem """ Lineal.__init__(self, FEMObject) self.type = 'lineal-sparse' def run(self, path: str = '', **kargs): """Solves the equation system using scipy's spsolve function Args: path (str, optional): Path where the solution is stored. Defaults to ''. """ logging.info('Creating element matrices...') self.system.elementMatrices() logging.info('Done!') self.system.ensembling() self.system.borderConditions() logging.info('Converting to csr format') self.system.K = self.system.K.tocsr() logging.info('Solving...') self.solutions = [spsolve(self.system.K, self.system.S)] self.solutions_info = [{'solver-type': self.type}] self.setSolution() if path: np.savetxt(path, self.system.U, delimiter=',') for e in self.system.elements: e.setUe(self.system.U) logging.info('Solved!') class LinealEigen(Lineal): """Eigen value solver Args: FEMObject (Core): FEM problem """ def __init__(self, FEMObject: 'Core'): """Eigen value solver Args: FEMObject (Core): FEM problem """ Lineal.__init__(self, FEMObject) self.type = 'lineal-sparse-eigen' def run(self, path: str = '', k=20, **kargs): """Solves the smallest k eigenvalues using scipy's eigen value solver Args: path (str, optional): Path where the solution is stored. Defaults to ''. k (int, optional): Number of eigenvalues to calculate. Defaults to 20. """ logging.info('Creating element matrices...') self.system.elementMatrices() logging.info('Done!') self.system.ensembling() self.system.condensedSystem() logging.info('Converting to csr format') K = self.system.K.tocsr() logging.info('Solving...') # eigv, eigvec = largest_eigsh( # self.system.K, k, self.system.M, which='SM') # N = self.system.K.shape[0] # eigv, eigvec = eigh( # self.system.K.todense(), self.system.M.todense(), eigvals=(N-k, N-1)) eigv, eigvec = eigsh( K, k, self.system.M, which='SM') idx = eigv.argsort() eigv = eigv[idx] eigvec = eigvec[:, idx] self.system.eigv = eigv self.system.eigvec = eigvec if path: np.savetxt(path.replace('.', '_eigv.'), self.system.eigv, delimiter=',', fmt='%s') np.savetxt(path.replace('.', '_eigvec.'), self.system.eigvec, delimiter=',', fmt='%s') eeevalues = [] for eigenvalue in eigvec.T: eeevalues.append(eigenvalue) self.solutions = np.array(eeevalues) self.solutions_info = [ {'solver-type': self.type, 'eigv': ei} for ei in self.system.eigv] self.setSolution(0) logging.info('Solved!')
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Solvers/Lineal.py
Lineal.py
import numpy as np import matplotlib.pyplot as plt import random import math from typing import Tuple def enmalladoEsferaFernando(L: float, n: float) -> Tuple[np.ndarray, list]: """Crea el enmallado de una esfera de diámetro L con n numero de elementos por lado. Para crear el enmallado se deforma un cubo a la forma de una esfera. Autor: Fernando Ramirez Rodriguez Traducido de Matlab. Args: L (float): Diametro de la esfera n (float): Número de elementos por lado. El numero deelemtnos final es n^3 Returns: Tuple[np.ndarray,list]: Matriz de coordenadas y conectividad """ # TODO Necesita revisión sidel = L nsdel = n delta = sidel/nsdel re = sidel/2 deltar = 2*re/nsdel nd = 0 coor = np.zeros([(nsdel+1)**3, 3]) for i in range(0, nsdel+1): z = (i)*delta for j in range(0, nsdel+1): y = (j)*delta for k in range(0, nsdel+1): x = (k)*delta coor[nd, 0] = x-sidel/2 coor[nd, 1] = y-sidel/2 coor[nd, 2] = z-sidel/2 nd = nd+1 ntnd = nd tol = 1e-6 for i in range(0, int(nsdel/2)): crl = sidel/2-(i)*delta rec = re-(i)*deltar for nd in range(0, ntnd): if abs(abs(coor[nd, 0]) - crl) < tol or abs(abs(coor[nd, 1]) - crl) < tol or abs(abs(coor[nd, 2]) - crl) < tol: d = np.sqrt((coor[nd, 0]) ** 2+(coor[nd, 1]) ** 2+(coor[nd, 2]) ** 2) dd = rec-d xu = coor[nd, 0]/d yu = coor[nd, 1]/d zu = coor[nd, 2]/d coor[nd, 0] = coor[nd, 0]+xu*dd coor[nd, 1] = coor[nd, 1]+yu*dd coor[nd, 2] = coor[nd, 2]+zu*dd for nd in range(0, ntnd): coor[nd, 1-1] = coor[nd, 1-1]+sidel/2 coor[nd, 2-1] = coor[nd, 2-1]+sidel/2 coor[nd, 3-1] = coor[nd, 3-1]+sidel/2 # return coor con = [] el = 0 for i in range(nsdel): for j in range(nsdel): for k in range(nsdel): ni = (i)*(nsdel+1)*(nsdel+1)+(j)*(nsdel+1)+k con.append([0]*8) con[el][1-1] = ni con[el][2-1] = ni+1 con[el][3-1] = con[el][2-1]+nsdel+1 con[el][4-1] = con[el][1-1]+nsdel+1 con[el][5-1] = con[el][1-1]+(nsdel+1)*(nsdel+1) con[el][6-1] = con[el][5-1]+1 con[el][7-1] = con[el][6-1]+nsdel+1 con[el][8-1] = con[el][5-1]+nsdel+1 el = el + 1 return coor, con def enmalladoFernando(lx: float, ly: float, nex: int, ney: int) -> np.ndarray: """Crea un enmallado 2D de un rectangulo Args: lx (floar): Base del rectángulo ly (float): Altura del rectámgulo nex (int): Numero de elementos en el eje x ney (int): Numero de elementos en el eje y Returns: np.ndarray: coordinates matrix (np.ndarray) and element dictionary (list) """ lx = float(lx) ly = float(ly) nex = int(nex) ney = int(ney) hx = lx/nex hy = ly/ney nnd = (ney+1)*(2*nex+1)+(ney)*(nex+1) x = np.zeros([nnd]) y = np.zeros([nnd]) nel = nex*ney elm = np.zeros([nel, 8]) # Coordinate Generation print('Generando Coordenadas') nd = -1 for i in range(1, ney+1): cy = (i-1)*hy for j in range(1, 2*nex+2): nd = nd+1 y[nd] = cy x[nd] = (j-1)*hx/2 cy = (i-1)*hy+hy/2 for j in range(1, nex+2): nd = nd+1 y[nd] = cy x[nd] = (j-1)*hx cy = ly for j in range(1, 2*nex+2): nd = nd+1 y[nd] = cy x[nd] = (j-1)*hx/2 # Element Node Connectivty print('Generando Elementos') ne = -1 for i in range(0, ney): ne = ne+1 elm[ne, 0] = (i)*(3*nex+2)+1 elm[ne, 1] = elm[ne, 0]+2 elm[ne, 3] = elm[ne, 0]+3*nex+2 elm[ne, 2] = elm[ne, 3]+2 elm[ne, 4] = elm[ne, 0]+1 elm[ne, 7] = elm[ne, 0]+2*nex+1 elm[ne, 6] = elm[ne, 3]+1 elm[ne, 5] = elm[ne, 7]+1 for j in range(1, nex): ne = ne+1 elm[ne, 0] = elm[ne-1, 1] elm[ne, 1] = elm[ne, 0]+2 elm[ne, 3] = elm[ne-1, 2] elm[ne, 2] = elm[ne, 3]+2 elm[ne, 4] = elm[ne, 0]+1 elm[ne, 7] = elm[ne-1, 5] elm[ne, 6] = elm[ne, 3]+1 elm[ne, 5] = elm[ne, 7]+1 # print('Guardando Archivo') # f = open(filename, 'w') # f.write(format(nnd)+'\t'+format(nel)+'\t0\t0\t0\t2'+'\n') # for i in range(nnd): # f.write(format(x[i])+'\t'+format(y[i])+'\n') # for i in range(nel): # f.write('C2V'+'\n') # for i in range(nel): # def fun(x): return str(int(x)-1) # f.write('\t'.join(map(fun, [elm[i, 0], elm[i, 1], elm[i, 2], # elm[i, 3], elm[i, 4], elm[i, 5], elm[i, 6], elm[i, 7]]))+'\n') # f.close() coords = np.array([x, y]).T dicc = (elm-1).astype(int).tolist() return coords, dicc def generatePolygon(ctrX: float = 10, ctrY: float = 10, aveRadius: float = 5, irregularity: float = 0.5, spikeyness: float = 0.5, numVerts: float = 6) -> list: """Generate a random polygon. Args: ctrX (float, optional): X centroid. Defaults to 10. ctrY (float, optional): Y centroid. Defaults to 10. aveRadius (float, optional): Average radious. Defaults to 5. irregularity (float, optional): Irregularity. Defaults to 0.5. spikeyness (float, optional): Spikeyness. Defaults to 0.5. numVerts (float, optional): Number of vertices. Defaults to 6. Returns: list: Poligon coordinates matrix. """ irregularity = clip(irregularity, 0, 1) * 2*math.pi / numVerts spikeyness = clip(spikeyness, 0, 1) * aveRadius # generate n angle steps angleSteps = [] lower = (2*math.pi / numVerts) - irregularity upper = (2*math.pi / numVerts) + irregularity suma = 0 for i in range(numVerts): tmp = random.uniform(lower, upper) angleSteps.append(tmp) suma = suma + tmp # normalize the steps so that point 0 and point n+1 are the same k = suma / (2*math.pi) for i in range(numVerts): angleSteps[i] = angleSteps[i] / k # now generate the points points = [] angle = random.uniform(0, 2*math.pi) for i in range(numVerts): r_i = clip(random.gauss(aveRadius, spikeyness), 0, 2*aveRadius) x = ctrX + r_i*math.cos(angle) y = ctrY + r_i*math.sin(angle) points.append((int(x), int(y))) angle = angle + angleSteps[i] return points def clip(x: float, mi: float, ma: float) -> float: """Clip 1D Args: x (float): Point x mi (float): min ma (float): max Returns: float: idk """ if (mi > ma): return x elif (x < mi): return mi elif (x > ma): return ma else: return x def dist(a: list, b: list) -> float: """Calculate the distancie between 2 points Args: a (list): point a b (list): point b Returns: float: Distance between a and b """ return np.linalg.norm(np.array(a)-np.array(b)) def isBetween(a: list, b: list, c: list, tol: float = 1*10**-5) -> bool: """Test if a point is between a line in a given tolerance. Works in 2D and 3D. Args: a (list): Start point of line b (list): End point of line c (list): Point to be tested between line tol (float): Tolerance. Defaults to 1*10**-5 Returns: bool: True if point is in line """ a = a.flatten() b = b.flatten() c = c.flatten() d1 = dist(a, c) d2 = dist(b, c) d3 = dist(a, b) d = d1+d2-d3 if abs(d) < tol: return True return False def roundCorner(P1: list, P2: list, P: list, r: float) -> tuple: """Calculates the origin, start angle and sweep angle of a given corner with a given radius Source: https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon Args: P1 (list): First point P2 (list): Second Point P (list): Center point r (float): Radius of corner Returns: tuple: Circle center coordinates, start angle, sweep angle """ def GetProportionPoint(point, segment, length, dx, dy): factor = segment / length return [point[0] - dx * factor, point[1] - dy * factor] dx1 = P[0]-P1[0] dy1 = P[1]-P1[1] dx2 = P[0]-P2[0] dy2 = P[1]-P2[1] angle = (np.arctan2(dy1, dx1)-np.arctan2(dy2, dx2))/2 tan = np.abs(np.tan(angle)) segment = r/tan len1 = np.sqrt(dx1**2+dy1**2) len2 = np.sqrt(dx2**2+dy2**2) length = np.min([len1, len2]) if segment > length: print('The fillet radius is big') p1Cross = GetProportionPoint(P, segment, len1, dx1, dy1) p2Cross = GetProportionPoint(P, segment, len2, dx2, dy2) dx = P[0]*2-p1Cross[0]-p2Cross[0] dy = P[1]*2-p1Cross[1]-p2Cross[1] L = (dx**2+dy**2)**0.5 d = (segment**2+r**2)**0.5 circlePoint = GetProportionPoint(P, d, L, dx, dy) sa = np.arctan2(p1Cross[1]-circlePoint[1], p1Cross[0]-circlePoint[0]) ea = np.arctan2(p2Cross[1]-circlePoint[1], p2Cross[0]-circlePoint[0]) s = ea-sa # if s < 0: # sa = ea # s = -s if s > np.pi: s = np.pi-s return circlePoint, sa, s def giveCoordsCircle(O: list, r: float, sa: float = 0, a: float = np.pi*2, n: int = 10, isFillet: bool = False) -> Tuple[list, list]: """Calculates the coordinates of a circle Args: O (list): Center coordinates of circle r (float): Circle radius sa (float): Start angle. Defaults to 0 a (float): End angle. Defaults to :math:`2\\pi` n (int, optional): Number of coords to calculate. Defaults to 10. isFillet (bool, optional): If the circle will be used as fillet. Defaults to False. Returns: list and list: Circle coordinates and regions """ coords = [] regions = [] h = a/n if isFillet: for i in range(n+1): regions += [[i, i+1]] theta = sa+h*i x = r*np.cos(theta) y = r*np.sin(theta) coords += [[O[0]+x, O[1]+y]] theta = a x = r*np.cos(theta) y = r*np.sin(theta) coords += [[O[0]+x, O[1]+y]] else: for i in range(n): if i < n-1: regions += [[i, i+1]] else: regions += [[i, 0]] theta = sa+h*i x = r*np.cos(theta) y = r*np.sin(theta) coords += [[O[0]+x, O[1]+y]] return coords, regions def angleBetweenAngles(start: float, end: float, mid: float) -> bool: """Evaluates if a angle is between 2 angles Args: start (float): Start angle end (float): End angle mid (float): Angle to be evaluated Returns: bool: Tru if mid is between start-end """ end = end - start + 2*np.pi if (end - start) < 0.0 else end - start mid = mid - start + 2*np.pi if (mid - start) < 0.0 else mid - start return (mid < end) def testNeighborg(e1, e2): # Este es el número de vertices mínimos para que un elemento sea vecino de otro MIN_VERTICES = 3 en_comun = 0 for c in e2.coords: test = any(np.equal(e1.coords, c).all(1)) if test: en_comun += 1 if en_comun >= MIN_VERTICES: return True return False def plot_list_elements(l, c="k", acum=False): if not acum: fig = plt.figure() ax = fig.add_subplot(projection="3d") else: ax = plt.gca() for e in l: ax.plot(*e._xcenter, "o", c=c)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Utils/polygonal.py
polygonal.py
import logging import numpy as np from typing import Callable # TODO make list of avaliable Element atributes. class Element(): """Generates a generic element. Args: coords (np.ndarray): Vertical coordinates matrix _coords (np.ndarray): Vertical coordinates matrix for graphical interfaces gdl (np.ndarray): Degree of freedom matrix. Each row is a variable. border (bool): True if the element is part of the border domain of another element. """ def __init__(self, coords: np.ndarray, _coords: np.ndarray, gdl: np.ndarray, border: bool = False, fast: bool = False) -> None: """Generates a generic element. Args: coords (np.ndarray): Vertical coordinates matrix _coords (np.ndarray): Vertical coordinates matrix for graphical interfaces gdl (np.ndarray): Degree of freedom matrix. Each row is a variable. border (bool): True if the element is part of the border domain of another element. fast (bool): If True, the element does not record Ke, Me, Fe, Qe. Defaults to False. """ self.coords = coords self._coords = _coords self.border = border self.gdl = gdl self.fast = fast self.gdlm = [] for i in range(len(self.gdl)): for j in range(len(self.gdl[i])): self.gdlm.append(self.gdl[i, j]) self.n = int(len(self.gdl)*len(self.gdl[0])) # TODO this was only intended for 2D plane stress/strain elements self.properties = {'load_x': [], 'load_y': []} self.intBorders = False self._x, self._p = self.T(self.Z.T) self.jacs, self.dpz = self.J(self.Z.T) self._xcenter = self.T(self.center.T)[0].flatten() if not self.border: if not self.fast: self.Ke = np.zeros([self.n, self.n]) self.Fe = np.zeros([self.n, 1]) self.Qe = np.zeros([self.n, 1]) # Specific transformations self.detjac = np.linalg.det(self.jacs) _j = np.linalg.inv(self.jacs) self.dpx = _j @ self.dpz self.Ue = np.zeros(self.gdl.shape) def restartMatrix(self) -> None: """Sets all element matrices and vectors to 0 state """ if not self.border: self.Ke[:, :] = 0.0 self.Fe[:, :] = 0.0 self.Ue[:, :] = 0.0 self.Qe[:, :] = 0.0 def T(self, z: np.ndarray) -> np.ndarray: """Give the global coordinates of given natural coordiantes over element Args: z (np.ndarray): Natural coordinates matrix. Each row is a dimension, each column is a point. Returns: np.ndarray: Global coordinates matrix and shape functions """ p = self.psis(z) return [email protected], p def TS(self, z): """Returns the transformation of a given set of points in the element. This method is used for border elements Args: z (np.ndarray): Natural coordinates matrix. Each row is a dimension, each column is a point. Returns: np.ndarray: Global coordinates matrix """ return self.s0+self.dir*self.T(z)[0] def inverseMapping(self, x0: np.ndarray, n: int = 100) -> np.ndarray: """Give the natural coordinates of given global coordinates over elements using Newton's method Args: x0(np.ndarray): Global coordinates matrix n(int, optional): Máximun number of iterations. Defaults to 100. Returns: np.ndarray: Natural coordinates matrix """ tol = 1*10**(-6) zi = np.zeros(x0.shape)+0.25 for _ in range(n): xi = x0 - self.T(zi)[0].T _J = np.linalg.inv(self.J(zi)[0]) xi = xi.T xi = xi.reshape(list(xi.shape)+[1]) dz = _J@xi zi += dz[:, :, 0].T if np.max(np.abs(dz)) < tol: break return zi def J(self, z: np.ndarray) -> np.ndarray: """Calculate the jacobian matrix over a set of natural coordinates Args: z(np.ndarray): Natural coordinates matrix. Each row is a dimension, each column is a point. Returns: np.ndarray: Jacobian's matrices and shape function derivatives """ dpsis = self.dpsis(z).T return dpsis @ self.coords, dpsis def giveSolution(self, SVSolution: bool = False, domain: str = 'domain') -> np.ndarray: """Calculate the interpolated solution over element domain Args: SVSolution(bool, optional): To calculate second variable solutions. Defaults to False. domain(str, optional): Where to give the solution ['domain' or 'gauss-points']. Defaults to 'domain'. Returns: np.ndarray: Arrays of coordinates, solutions and second variables solutions. """ _z = self.domain if domain == 'gauss-points': _z = self.Z _x, _p = self.T(_z.T) if SVSolution: j, dpz = self.J(_z.T) dpx = np.linalg.inv(j) @ dpz # print((self.Ue @ np.transpose(dpx,axes=[0,2,1])).shape) return _x, self.Ue@_p.T, self.Ue @ np.transpose(dpx, axes=[0, 2, 1]) return _x, self.Ue@_p.T def giveSolutionPoint(self, Z: np.ndarray, SVSolution: bool = False) -> np.ndarray: """Calculate the interpolated solution over given set of points Args: Z(np.ndarray): Natural coordintas to extract the solution SVSolution(bool, optional): To calculate second variable solution. Defaults to False. Returns: np.ndarray: Arrays of coordinates, solutions and second variables solutions. """ _x, _p = self.T(Z) if SVSolution: j, dpz = self.J(Z) dpx = np.linalg.inv(j) @ dpz return _x, self.Ue@_p.T, self.Ue @ np.transpose(dpx, axes=[0, 2, 1]) return _x, self.Ue@_p.T def setUe(self, U: np.ndarray) -> None: """Assing element local solution Args: U(np.ndarray): Global solution """ for i in range(len(self.gdl)): self.Ue[i] = U[np.ix_(self.gdl[i])].flatten() n = len(self._coords) m = len(self.gdl) self._Ueg = self.Ue[np.ix_(np.linspace( 0, m-1, m).astype(int), np.linspace(0, n-1, n).astype(int))] self._Ueg = np.array(self._Ueg.T.tolist()+[self._Ueg.T[0].tolist()]) def integrate(self, f: Callable) -> float: """Calculate the integral of f function over element domain Args: f(function): Function to be integrated Returns: float: Integral value """ integral = 0 for w, z in zip(self.W, self.Z): integral += f(z)*w return integral
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/Element.py
Element.py
from ..E2D.LTriangular import LTriangular from ..E2D.QTriangular import QTriangular from .Element3D import Element3D, np from .TetrahedralScheme import TetrahedralScheme class Tetrahedral(Element3D, TetrahedralScheme): """Creates a 3D tetrahedral element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a 3D tetrahedral element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ coords = np.array(coords) self.faces = [ [0, 1, 3], [1, 2, 3], [0, 3, 2], [0, 2, 1]] self.face_element = LTriangular TetrahedralScheme.__init__(self, n, **kargs) Element3D.__init__(self, coords, coords, gdl, **kargs) def psis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] L1 = 1-x-y-z L2 = x L3 = y L4 = z return np.array( [L1, L2, L3, L4]).T def dpsis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ x = _z[0] kernell = x-x return np.array([ [-1.0+kernell, -1.0+kernell, -1.0+kernell], [1.0+kernell, 0.0+kernell, 0.0+kernell], [0.0+kernell, 1.0+kernell, 0.0+kernell], [0.0+kernell, 0.0+kernell, 1.0+kernell]]) class TetrahedralO2(Element3D, TetrahedralScheme): """Creates a 3D second order tetrahedral element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a 3D second order tetrahedral element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ coords = np.array(coords) self.faces = [ [0, 1, 3, 4, 8, 7], [1, 2, 3, 5, 9, 8], [0, 3, 2, 7, 9, 6], [0, 2, 1, 6, 5, 4]] self.face_element = QTriangular TetrahedralScheme.__init__(self, n, **kargs) Element3D.__init__(self, coords, coords, gdl, **kargs) def psis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] L1 = 1-x-y-z L2 = x L3 = y L4 = z return np.array([ L1*(2*L1-1), L2*(2*L2-1), L3*(2*L3-1), L4*(2*L4-1), 4*L1*L2, 4*L2*L3, 4*L3*L1, 4*L1*L4, 4*L2*L4, 4*L3*L4]).T def dpsis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] return np.array([ [4*x + 4*y + 4*z - 3, 4*x + 4*y + 4*z - 3, 4*x + 4*y + 4*z - 3], [4*x - 1, 0, 0], [0, 4*y - 1, 0], [0, 0, 4*z - 1], [-8*x - 4*y - 4*z + 4, -4*x, -4*x], [4*y, 4*x, 0], [-4*y, -4*x - 8*y - 4*z + 4, -4*y], [-4*z, -4*z, -4*x - 4*y - 8*z + 4], [4*z, 0, 4*x], [0, 4*z, 4*y]])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E3D/Tetrahedral.py
Tetrahedral.py
from ..E2D.Quadrilateral import Quadrilateral from ..E2D.Serendipity import Serendipity from .Element3D import Element3D, np from .BrickScheme import BrickScheme class Brick(Element3D, BrickScheme): """Creates a 3D brick element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a 3D brick element Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ coords = np.array(coords) self.faces = [ [0, 1, 5, 4], [1, 2, 6, 5], [4, 5, 6, 7], [3, 2, 1, 0], [2, 3, 7, 6], [4, 7, 3, 0]] self.face_element = Quadrilateral BrickScheme.__init__(self, n, **kargs) Element3D.__init__(self, coords, coords, gdl, **kargs) def psis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ z = _z[0] n = _z[1] g = _z[2] return 1/8*np.array( [(1-z)*(1-n)*(1-g), (1+z)*(1-n)*(1-g), (1+z)*(1+n)*(1-g), (1-z)*(1+n)*(1-g), (1-z)*(1-n)*(1+g), (1+z)*(1-n)*(1+g), (1+z)*(1+n)*(1+g), (1-z)*(1+n)*(1+g)]).T def dpsis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] return 1/8*np.array( [[(y-1.0)*(1.0-z), (x-1)*(1-z), -(1-x)*(1-y)], [(1-y)*(1-z), (-1-x)*(1-z), -(1+x)*(1-y)], [(1+y)*(1-z), (1+x)*(1-z), -(1+x)*(1+y)], [(-1.0-y)*(1-z), (1-x)*(1-z), -(1-x)*(1+y)], [(1-y)*(-1-z), -(1-x)*(1+z), (1-x)*(1-y)], [(1-y)*(1+z), -(1+x)*(1+z), (1+x)*(1-y)], [(1+y)*(1+z), (1+x)*(1+z), (1+x)*(1+y)], [-(1+y)*(1+z), (1-x)*(1+z), (1-x)*(1+y)]]) class BrickO2(Element3D, BrickScheme): """Creates a 3D second order brick element. Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a 3D second order brick element. Args: coords (np.ndarray): Node coordinates matrix gdl (np.ndarray): Degrees of freedom matrix n (int, optional): Number of gauss points used for integration. Defaults to 3. """ coords = np.array(coords) self.faces = [ [0, 1, 5, 4, 8, 13, 16, 12], [1, 2, 6, 5, 9, 14, 17, 13], [4, 5, 6, 7, 16, 17, 18, 19], [3, 2, 1, 0, 10, 9, 8, 11], [2, 3, 7, 6, 10, 15, 18, 14], [4, 7, 3, 0, 19, 15, 11, 12]] self.face_element = Serendipity BrickScheme.__init__(self, n, **kargs) Element3D.__init__(self, coords, coords, gdl, **kargs) def psis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] return 1/8*np.array([ (1-x)*(1-y)*(1-z)*(-x-y-z-2), (1+x)*(1-y)*(1-z)*(x-y-z-2), (1+x)*(1+y)*(1-z)*(x+y-z-2), (1-x)*(1+y)*(1-z)*(-x+y-z-2), (1-x)*(1-y)*(1+z)*(-x-y+z-2), (1+x)*(1-y)*(1+z)*(x-y+z-2), (1+x)*(1+y)*(1+z)*(x+y+z-2), (1-x)*(1+y)*(1+z)*(-x+y+z-2), 2*(1-x**2)*(1-y)*(1-z), 2*(1+x)*(1-y**2)*(1-z), 2*(1-x**2)*(1+y)*(1-z), 2*(1-x)*(1-y**2)*(1-z), 2*(1-x)*(1-y)*(1-z**2), 2*(1+x)*(1-y)*(1-z**2), 2*(1+x)*(1+y)*(1-z**2), 2*(1-x)*(1+y)*(1-z**2), 2*(1-x**2)*(1-y)*(1+z), 2*(1+x)*(1-y**2)*(1+z), 2*(1-x**2)*(1+y)*(1+z), 2*(1-x)*(1-y**2)*(1+z) ]).T def dpsis(self, _z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ x = _z[0] y = _z[1] z = _z[2] # I JUST WANT THIS TO WORK PROPERLY return 1/8*np.array([[-(1 - x)*(1 - y)*(1 - z) + (1 - z)*(y - 1)*(-x - y - z - 2), -(1 - x)*(1 - y)*(1 - z) + (1 - z)*(x - 1)*(-x - y - z - 2), -(1 - x)*(1 - y)*(1 - z) - (1 - x)*(1 - y)*(-x - y - z - 2)], [(1 - y)*(1 - z)*(x + 1) + (1 - y)*(1 - z)*(x - y - z - 2), -(1 - y)*(1 - z)*(x + 1) + ( 1 - z)*(-x - 1)*(x - y - z - 2), -(1 - y)*(1 - z)*(x + 1) - (1 - y)*(x + 1)*(x - y - z - 2)], [(1 - z)*(x + 1)*(y + 1) + (1 - z)*(y + 1)*(x + y - z - 2), (1 - z)*(x + 1)*(y + 1) + ( 1 - z)*(x + 1)*(x + y - z - 2), -(1 - z)*(x + 1)*(y + 1) - (x + 1)*(y + 1)*(x + y - z - 2)], [-(1 - x)*(1 - z)*(y + 1) + (1 - z)*(-y - 1)*(-x + y - z - 2), (1 - x)*(1 - z)*(y + 1) + (1 - x) * (1 - z)*(-x + y - z - 2), -(1 - x)*(1 - z)*(y + 1) - (1 - x)*(y + 1)*(-x + y - z - 2)], [-(1 - x)*(1 - y)*(z + 1) + (1 - y)*(-z - 1)*(-x - y + z - 2), -(1 - x)*(1 - y)*(z + 1) - (1 - x)*(z + 1)*(-x - y + z - 2), (1 - x)*(1 - y)*(z + 1) + (1 - x)*(1 - y)*(-x - y + z - 2)], [(1 - y)*(x + 1)*(z + 1) + (1 - y)*(z + 1)*(x - y + z - 2), -(1 - y)*(x + 1)*(z + 1) - (x + 1)*(z + 1)*(x - y + z - 2), (1 - y)*(x + 1)*(z + 1) + (1 - y)*(x + 1)*(x - y + z - 2)], [(x + 1)*(y + 1)*(z + 1) + (y + 1)*(z + 1)*(x + y + z - 2), (x + 1)*(y + 1)*(z + 1) + (x + 1)*(z + 1)*(x + y + z - 2), (x + 1)*(y + 1)*(z + 1) + (x + 1)*(y + 1)*(x + y + z - 2)], [-(1 - x)*(y + 1)*(z + 1) - (y + 1)*(z + 1)*(-x + y + z - 2), (1 - x)*(y + 1)*(z + 1) + (1 - x) * (z + 1)*(-x + y + z - 2), (1 - x)*(y + 1)*(z + 1) + (1 - x)*(y + 1)*(-x + y + z - 2)], [-4*x*(1 - y)*(1 - z), (2 - 2*x**2) * (z - 1), (2 - 2*x**2)*(y - 1)], [2*(1 - y**2)*(1 - z), -2*y*(1 - z) * (2*x + 2), (2*x + 2)*(y**2 - 1)], [-4*x*(1 - z)*(y + 1), (1 - z) * (2 - 2*x**2), (2 - 2*x**2)*(-y - 1)], [-2*(1 - y**2)*(1 - z), -2*y*(1 - z) * (2 - 2*x), (2 - 2*x)*(y**2 - 1)], [-2*(1 - y)*(1 - z**2), (2 - 2*x) * (z**2 - 1), -2*z*(1 - y)*(2 - 2*x)], [2*(1 - y)*(1 - z**2), (2*x + 2) * (z**2 - 1), -2*z*(1 - y)*(2*x + 2)], [2*(1 - z**2)*(y + 1), (1 - z**2) * (2*x + 2), -2*z*(2*x + 2)*(y + 1)], [-2*(1 - z**2)*(y + 1), (1 - z**2) * (2 - 2*x), -2*z*(2 - 2*x)*(y + 1)], [-4*x*(1 - y)*(z + 1), (2 - 2*x**2) * (-z - 1), (1 - y)*(2 - 2*x**2)], [2*(1 - y**2)*(z + 1), -2*y*(2*x + 2) * (z + 1), (1 - y**2)*(2*x + 2)], [-4*x*(y + 1)*(z + 1), (2 - 2*x**2) * (z + 1), (2 - 2*x**2)*(y + 1)], [-2*(1 - y**2)*(z + 1), -2*y*(2 - 2*x)*(z + 1), (1 - y**2)*(2 - 2*x)]])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E3D/Brick.py
Brick.py
from .Element2D import Element2D, np from .TriangularScheme import TriangularScheme from ..E1D.QuadraticElement import QuadraticElement class QTriangular(Element2D, TriangularScheme): """Creates a lagrangian element of order 2 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 2. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a lagrangian element of order 2 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 3. """ coords = np.array(coords) delta = coords[0]-coords[1] he1 = np.linalg.norm(delta) e1 = QuadraticElement(np.array([[0], [he1*0.5], [he1]]), np.array([[-1, -1, -1]]), border=True) delta = coords[2]-coords[1] he2 = np.linalg.norm(delta) e2 = QuadraticElement(np.array([[0], [he2*0.5], [he2]]), np.array([[-1, -1, -1]]), border=True) delta = coords[0]-coords[2] he3 = np.linalg.norm(coords[0]-coords[2]) e3 = QuadraticElement(np.array([[0], [he3*0.5], [he3]]), np.array([[-1, -1, -1]]), border=True) self.borders = [e1, e2, e3] _coords = np.array([coords[i] for i in range(3)]) TriangularScheme.__init__(self, n, **kargs) Element2D.__init__(self, coords, _coords, gdl, **kargs) def psis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ return np.array([ 2.0*(z[0]+z[1]-1.0)*(z[0]+z[1]-0.5), 2.0*z[0]*(z[0]-0.5), 2.0*z[1]*(z[1]-0.5), -4.0*(z[0]+z[1]-1.0)*(z[0]), 4.0*z[0]*z[1], -4.0*z[1]*(z[0]+z[1]-1.0)]).T def dpsis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ return np.array([ [4.0*z[0]+4.0*z[1]-3.0, 4.0*z[1]+4.0*z[0]-3.0], [4.0*z[0]-1.0, 0*z[0]], [0*z[0], 4.0*z[1]-1.0], [-8.0*z[0]-4.0*(z[1]-1.0), -4.0*z[0]], [4.0*z[1], 4.0*z[0]], [-4.0*z[1], -8.0*z[1]-4.0*z[0]+4.0] ])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E2D/QTriangular.py
QTriangular.py
from .Element2D import Element2D, np from ..E1D.LinealElement import LinealElement from .TriangularScheme import TriangularScheme class LTriangular(Element2D, TriangularScheme): """Creates a lagrangian triangular element of order 1 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 2. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 2, **kargs) -> None: """Creates a lagrangian triangular element of order 1 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 2. """ coords = np.array(coords) he1 = np.linalg.norm(coords[1]-coords[0]) e1 = LinealElement(np.array([[0], [he1]]), np.array([[-1, -1]]), border=True) he2 = np.linalg.norm(coords[2]-coords[1]) e2 = LinealElement(np.array([[0], [he2]]), np.array([[-1, -1]]), border=True) he3 = np.linalg.norm(coords[0]-coords[2]) e3 = LinealElement(np.array([[0], [he3]]), np.array([[-1, -1]]), border=True) self.borders = [e1, e2, e3] TriangularScheme.__init__(self, n, **kargs) Element2D.__init__(self, coords, coords, gdl, **kargs) def psis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ return np.array([ 1.0-z[0]-z[1], z[0], z[1]]).T def dpsis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ kernell = (z[0]-z[0]) return np.array([ [-1.0*(1+kernell), -1.0*(1+kernell)], [1.0*(1+kernell), 0.0*(1+kernell)], [0.0*(1+kernell), 1.0*(1+kernell)] ])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E2D/LTriangular.py
LTriangular.py
from .Element2D import Element2D, np from .RectangularScheme import RectangularScheme from ..E1D.LinealElement import LinealElement class Quadrilateral(Element2D, RectangularScheme): """Creates a lagrangian rectangular element of order 1 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 2. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 2, **kargs) -> None: """Creates a lagrangian rectangular element of order 1 Args: coords (np.ndarray): Element coordinates matrix gdl (np.ndarray): Element gdl matrix n (int, optional): Number of Gauss Points. Defaults to 2. """ coords = np.array(coords) he1 = np.linalg.norm(coords[1]-coords[0]) e1 = LinealElement(np.array([[0], [he1]]), np.array([[-1, -1]]), border=True) he2 = np.linalg.norm(coords[2]-coords[1]) e2 = LinealElement(np.array([[0], [he2]]), np.array([[-1, -1]]), border=True) he3 = np.linalg.norm(coords[3]-coords[2]) e3 = LinealElement(np.array([[0], [he3]]), np.array([[-1, -1]]), border=True) he4 = np.linalg.norm(coords[0]-coords[3]) e4 = LinealElement(np.array([[0], [he4]]), np.array([[-1, -1]]), border=True) self.borders = [e1, e2, e3, e4] RectangularScheme.__init__(self, n, **kargs) Element2D.__init__(self, coords, coords, gdl, **kargs) def psis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ return np.array( [0.25*(1.0-z[0])*(1.0-z[1]), 0.25*(1.0+z[0])*(1.0-z[1]), 0.25*(1.0+z[0])*(1.0+z[1]), 0.25*(1.0-z[0])*(1.0+z[1])]).T def dpsis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ return np.array( [[0.25*(z[1]-1.0), 0.25*(z[0]-1.0)], [-0.25*(z[1]-1.0), -0.25*(z[0]+1.0)], [0.25*(z[1]+1.0), 0.25*(1.0+z[0])], [-0.25*(1.0+z[1]), 0.25*(1.0-z[0])]])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E2D/Quadrilateral.py
Quadrilateral.py
from .Element2D import Element2D, np from .RectangularScheme import RectangularScheme from ..E1D.QuadraticElement import QuadraticElement class Serendipity(Element2D, RectangularScheme): """Creates a Serendipity element Args: coords (np.ndarray): Coordinate matrix of element gdl (np.ndarray): Coordinate matrix of element GDL's n (int, optional): Number of gauss points. Defaults to 3. """ def __init__(self, coords: np.ndarray, gdl: np.ndarray, n: int = 3, **kargs) -> None: """Creates a Serendipity element Args: coords (np.ndarray): Coordinate matrix of element gdl (np.ndarray): Coordinate matrix of element GDL's n (int, optional): Number of gauss points. Defaults to 3. """ _coords = np.array([coords[i] for i in range(4)]) coords = np.array(coords) he1 = np.linalg.norm(coords[1]-coords[0]) e1 = QuadraticElement(np.array([[0], [he1*0.5], [he1]]), np.array([[-1, -1, -1]]), border=True) he2 = np.linalg.norm(coords[2]-coords[1]) e2 = QuadraticElement(np.array([[0], [he2*0.5], [he2]]), np.array([[-1, -1, -1]]), border=True) he3 = np.linalg.norm(coords[3]-coords[2]) e3 = QuadraticElement(np.array([[0], [he3*0.5], [he3]]), np.array([[-1, -1, -1]]), border=True) he4 = np.linalg.norm(coords[0]-coords[3]) e4 = QuadraticElement(np.array([[0], [he4*0.5], [he4]]), np.array([[-1, -1, -1]]), border=True) self.borders = [e1, e2, e3, e4] RectangularScheme.__init__(self, n, **kargs) Element2D.__init__(self, coords, _coords, gdl, **kargs) def psis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ return np.array([ 0.25*(1.0-z[0])*(1.0-z[1])*(-1.0-z[0]-z[1]), 0.25*(1.0+z[0])*(1.0-z[1])*(-1.0+z[0]-z[1]), 0.25*(1.0+z[0])*(1.0+z[1])*(-1.0+z[0]+z[1]), 0.25*(1.0-z[0])*(1.0+z[1])*(-1.0-z[0]+z[1]), 0.5*(1.0-z[0]**2.0)*(1.0-z[1]), 0.5*(1.0+z[0])*(1.0-z[1]**2.0), 0.5*(1.0-z[0]**2.0)*(1.0+z[1]), 0.5*(1.0-z[0])*(1.0-z[1]**2.0) ]).T def dpsis(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ return np.array( [[-0.25*(z[1]-1.0)*(2.0*z[0]+z[1]), -0.25*(z[0]-1.0)*(2.0*z[1]+z[0])], [-0.25*(z[1]-1.0)*(2.0*z[0]-z[1]), 0.25*(z[0]+1.0)*(2.0*z[1]-z[0])], [0.25*(z[1]+1.0)*(2.0*z[0]+z[1]), 0.25*(z[0]+1.0)*(2.0*z[1]+z[0])], [0.25*(z[1]+1.0)*(2.0*z[0]-z[1]), - 0.25*(z[0]-1.0)*(2.0*z[1]-z[0])], [(z[1]-1.0)*z[0], 0.5*(z[0]**2.0-1.0)], [-0.5*(z[1]**2.0-1.0), -z[1]*(z[0]+1.0)], [-(z[1]+1.0)*z[0], -0.5*(z[0]**2.0-1.0)], [0.5*(z[1]**2.0-1.0), z[1]*(z[0]-1.0)]])
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E2D/Serendipity.py
Serendipity.py
from ..Element import Element import numpy as np import matplotlib.pyplot as plt import matplotlib.path as mpltPath class Element2D(Element): """Create a 2D element Args: coords (np.ndarray): Element coordinate matrix _coords (np.ndarray): Element coordinate matrix for graphical interface purposes gdl (np.ndarray): Degree of freedom matrix """ def __init__(self, coords: np.ndarray, _coords: np.ndarray, gdl: np.ndarray, **kargs) -> None: """Create a 2D element Args: coords (np.ndarray): Element coordinate matrix _coords (np.ndarray): Element coordinate matrix for graphical interface purposes gdl (np.ndarray): Degree of freedom matrix """ Element.__init__(self, coords, _coords, gdl, **kargs) self._coordsg = np.array( self._coords.tolist()+[self._coords[0].tolist()]) for i, e in enumerate(self.borders): delta = self._coordsg[i+1]-self._coordsg[i] delta[0] *= -1 delta = delta[::-1] delta = delta/np.linalg.norm(delta) e.nx = delta[0] e.ny = delta[1] def draw(self) -> None: """Create a graph of element""" _z = self.domain _x, _p = self.T(_z.T) fig = plt.figure() ax = fig.add_subplot(projection='3d') l = [] l.append('Element') l.append('Nodes') for i in range(self.n): surf = ax.plot_trisurf(*_x.T, _p[:, i], alpha=0.3) surf._facecolors2d = surf._facecolor3d surf._edgecolors2d = surf._edgecolor3d l.append(r'$\psi_{'+format(i)+r'}$') __coords = np.array(self._coords.tolist()+[self._coords[0].tolist()]).T ax.plot(*__coords, [0]*len(__coords.T), '-', color='black') ax.plot(*self.coords.T, [0]*len(self.coords), 'o', color='blue') ax.legend(l) def jacobianGraph(self) -> None: """Create the determinant jacobian graph """ _z = self.domain _x, _p = self.T(_z.T) _j = self.J(_z.T)[0] __j = np.linalg.det(_j) fig = plt.figure() ax = fig.add_subplot(projection='3d') l = [] surf = ax.plot_trisurf(*_x.T, __j, cmap='magma') surf._facecolors2d = surf._facecolor3d surf._edgecolors2d = surf._edgecolor3d l.append('Element') l.append('Nodes') l.append(r'$|J|$') fig.colorbar(surf) __coords = np.array(self._coords.tolist()+[self._coords[0].tolist()]).T ax.plot(*__coords, [0]*len(__coords.T), '-', color='black') ax.plot(*self.coords.T, [0]*len(self.coords), 'o', color='blue') ax.legend(l) def isInside(self, x: np.ndarray) -> np.ndarray: """Test if a given points is inside element domain Args: x (np.ndarray): Point to be tested Returns: np.ndarray: Bolean array of test result """ path = mpltPath.Path(self._coords[:, :2]) inside2 = path.contains_points([x]) return inside2[0]
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E2D/Element2D.py
Element2D.py
from .LinealElement import LinealElement import numpy as np class EulerBernoulliElement(LinealElement): """Creates a 1D beam element """ def __init__(self, coords, gdl, n=2, nvn=2) -> None: """Creates a 1D beam element Args: coords (list): Beam coordiantes gdl (list): Degrees of freedom n (int, optional): Number of gauss points. Defaults to 2. nvn (int, optional): Number of vairables per node. Defaults to 2. """ gdlcopy = gdl.copy() gdl[0, 1] = gdlcopy[1, 0] gdl[1, 0] = gdlcopy[0, 1] if nvn > 2: gdl = gdlcopy.T.reshape([3, 2]) LinealElement.__init__(self, coords, gdl, n=n) self.he = np.linalg.norm(self.coords[-1]-self.coords[0]) self.Zr, self.Wr = np.polynomial.legendre.leggauss(n-1) self.n = 2*nvn def hermit(self, z: np.ndarray) -> np.ndarray: """ Calculates the shape functions of the lineal element of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function evaluated in Z points """ he = self.he x = (he)/2*z+(he)/2 return np.array([1-3*(x/he)**2+2*(x/he)**3, -x*(1-x/he)**2, 3*(x/he)**2-2*(x/he)**3, -x*((x/he)**2-x/he)]).T def dhermit(self, z: np.ndarray) -> np.ndarray: """Calculates the shape functions derivatives of the lineal element of a given natural coordinates Args: z (np.ndarray): Natural coordinates matrix Returns: np.ndarray: Shape function derivatives evaluated in Z points """ h = self.he x = (h)/2*z+(h)/2 return np.array([ [-6/h*x/h*(1-x/h), -(1+3*(x/h)**2-4*x/h), 6/h*x/h*(1-x/h), -x/h*(3*x/h-2)], [-6/h**2*(1-2*x/h), -2/h*(3*x/h-2), 6/h ** 2*(1-2*x/h), -2/h*(3*x/h-1)], [12/h**3+(x-x), -6/h**2+(x-x), -12/h**3+(x-x), -6/h**2+(x-x)]]) def giveSolution(self, SVSolution: bool = False, domain: str = 'domain') -> np.ndarray: """Calculate the interpolated solution over element domain Args: SVSolution(bool, optional): To calculate second variable solutions. Defaults to False. Returns: np.ndarray: Arrays of coordinates, solutions and second variables solutions. """ # TODO hacer una comprobación de frontera para evitar errores _z = self.domain if domain == 'gauss-points': _z = self.Z _x, _ = self.T(_z.T) _h = self.hermit(_z.T) if SVSolution: _dh = self.dhermit(_z.T) return _x, self.Ue.flatten()@_h.T, self.Ue.flatten() @ _dh.T return _x, self.Ue.flatten()@_h.T def giveSolutionPoint(self, Z: np.ndarray, SVSolution: bool = False) -> np.ndarray: """Calculate the interpolated solution over given set of points Args: Z(np.ndarray): Natural coordintas to extract the solution SVSolution(bool, optional): To calculate second variable solution. Defaults to False. Returns: np.ndarray: Arrays of coordinates, solutions and second variables solutions. """ # TODO hacer una comprobación de frontera para evitar errores _x, _p = self.T(Z) if SVSolution: j, dpz = self.J(Z) # TODO Revisar con Reddy dpx = np.linalg.inv(j) @ dpz # TODO REVISAR VS return _x, self.Ue@_p.T, self.Ue @ np.transpose(dpx, axes=[0, 2, 1]) return _x, self.Ue@_p.T
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Elements/E1D/EulerBernoulliElement.py
EulerBernoulliElement.py
import numpy as np from ..Elements import Quadrilateral, Serendipity, QTriangular, LTriangular from ..Utils import isBetween class Region(): """Creates a general region Args: coords (np.ndarray): Coordinates matrix of therefion. Each row is a coordinate, each column is a dimension. """ def __init__(self, coords: np.ndarray, desc: str = '') -> None: """Creates a general region Args: coords (np.ndarray): Coordinates matrix of therefion. Each row is a coordinate, each column is a dimension. desc (str, optional): Region description. """ if not isinstance(coords, np.ndarray): coords = np.array(coords) self.coords = coords self.nodes = [] self.description = desc def setNodesOfRegion(self, geometry: 'Geometry', tol: float = 10**(-5)) -> None: """Calculates the nodes of the geometry which are inside the region. Args: geometry (Geometry): Input geometry tol (float, optional): Near tolerance. Defaults to 10**(-5). """ self.nodes = [] for i, p in enumerate(geometry.gdls): if self.isBetween(p, tol): self.nodes.append(i) self.nodes = np.array(self.nodes) class Region1D(Region): """Creates a line region (1D element) Args: coords (np.ndarray): Coordinate matrix. Must be of two rows and 2 or 3 columns. 2 columns for 2D region, 3 columns for 3D region. """ def __init__(self, coords: np.ndarray, **kargs) -> None: """Creates a line region (1D element) Args: coords (np.ndarray): Coordinate matrix. Must be of two rows and 2 or 3 columns. 2 columns for 2D region, 3 columns for 3D region. """ Region.__init__(self, coords, **kargs) def isBetween(self, p: np.ndarray, tol: float = 1*10**(-5)) -> bool: """Check if a given point is inside the region. Args: p (np.ndarray): Point to be tested tol (float, optional): Tolerance for check. Defaults to 1*10**(-5). Returns: bool: True if the point is inside the region """ return isBetween(self.coords[0], self.coords[1], p, tol) class Region2D(Region): """Creates a square region (2D element) Args: coords (np.ndarray): Coordinate matrix. Must be of four rows and 3 columns. """ def __init__(self, coords: np.ndarray, **kargs) -> None: """Creates a 2D region using a 2D Element. The number of coordinates definesthe type of element. Args: coords (np.ndarray): Coordinate matrix. Must be of four rows and 3 columns. """ ndim = len(coords[0]) lc = len(coords) if ndim == 2: coordsn = np.zeros([lc, 3]) coordsn[:, :-1] = coords coords = coordsn if lc == 3: ELE = LTriangular elif lc == 4: ELE = Quadrilateral elif lc == 6: ELE = QTriangular elif lc == 8: ELE = Serendipity self.e = ELE(coords, np.array([[-1]*lc]), n=1, fast=True, border=True) self.center, _ = self.e.T(self.e.center.T) _j, _ = self.e.J(self.e.center.T) self.n = np.cross(_j[:, 0].T, _j[:, 1].T, axis=0) self.nnorm = np.linalg.norm(self.n) # Ax + By + Cz + D = 0 self.D = -np.dot(self.n.flatten(), self.center.flatten()) Region.__init__(self, coords, **kargs) def pointToPlaneDistance(self, p: np.ndarray) -> float: """Calculates the distance from a given point to the region. Args: p (np.ndarray): Point to be tested Returns: float: Distance between the plane and the point """ num = abs(np.dot(self.n.T.flatten(), p.flatten())+self.D) return num/self.nnorm def isBetween(self, p: np.ndarray, tol: float = 1*10**(-5)) -> bool: """Check if a given point is inside the region. Args: p (np.ndarray): Point to be tested tol (float, optional): Tolerance for check. Defaults to 1*10**(-5). Returns: bool: True if the point is inside the region """ ndim = len(p) if ndim == 2: return self.e.isInside(p) d = self.pointToPlaneDistance(p) return d <= tol
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Geometry/Region.py
Region.py
import time import triangle as tr import copy import numpy as np import json from ..Utils import isBetween, roundCorner, giveCoordsCircle, angleBetweenAngles, testNeighborg import matplotlib.pyplot as plt from ..Elements.E1D.LinealElement import LinealElement from ..Elements.E1D.CubicElement import CubicElement from ..Elements.E1D.QuadraticElement import QuadraticElement from ..Elements.E2D.Serendipity import Serendipity from ..Elements.E2D.Quadrilateral import Quadrilateral from ..Elements.E2D.QTriangular import QTriangular from ..Elements.E2D.LTriangular import LTriangular from ..Elements.E3D.Brick import Brick, BrickO2 from ..Elements.E3D.Tetrahedral import Tetrahedral, TetrahedralO2 from .Region import Region, Region1D, Region2D from typing import Callable from tqdm import tqdm from scipy.spatial import KDTree types = {'T1V': LTriangular, 'T2V': QTriangular, 'C1V': Quadrilateral, 'C2V': Serendipity, "L1V": LinealElement, "L2V": QuadraticElement, "L3V": CubicElement, "B1V": Brick, "B2V": BrickO2, "TE1V": Tetrahedral, "TE2V": TetrahedralO2} class Geometry: """Define a general geometry structure Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list, optional): List of domain regions. Defaults to []. fast (bool): If True, the created elements will have have the fast propertie (see Element class docs) """ def __init__(self, dictionary: list, gdls: list, types: list, nvn: int = 1, regions: list[Region] = None, fast=False) -> None: """Define geometry structure Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list, optional): List of domain regions. Defaults to None. fast (bool): If True, the created elements will have have the fast propertie (see Element class docs) """ self.mask = None self.holes = [] self.fillets = [] self.nvn = nvn self.dictionary = dictionary self.elements = [] self.gdls = np.array(gdls) self.types = types self.regions = regions or [] self.cbe = [] self.cbn = [] self.centroids = [] self.fast = fast self.additionalProperties = {} self.initialize() self.min_search_radius = -1 self.calculateCentroids() centroides = np.array(self.centroids)[:, 0, :] self.KDTree = KDTree(centroides) self.boundingBoxMin = np.min(centroides, axis=0) self.boundingBoxMax = np.max(centroides, axis=0) def calculateRegions(self) -> None: """Calculates the nodes of the geometry regions """ for region in tqdm(self.regions, unit="Region"): region.setNodesOfRegion(self) def maskFromRegions(self) -> None: """Create the display mask from geometry regions """ pass # TODO this have to be moved to the Geometry2D class # self.mask = [] # for s in self.regions: # self.mask += np.array(self.gdls)[np.ix_(s)].tolist() def initialize(self) -> None: """Calculates the total number of GDL's and generates the elements structure """ self.ngdl = int(len(self.gdls)*self.nvn) self.generateElements() self.calculateRegions() def detectNonLocal(self, lr: float) -> list: """Detect adjacent elements between a distance Lr. Uses KDTrees Args: lr (float): Distance to detect adjacent elements Returns: list: Non local element dictionary """ print('Detecting non local elements') diccionariosnl = [] for e in tqdm(self.elements, unit='Elements'): linea = self.KDTree.query_ball_point(e._xcenter, lr) diccionariosnl.append(linea) return diccionariosnl def detectNonLocalLegacy(self, lr: float) -> list: """Detect adjacent elements between a distance Lr. Uses iterative approach Args: lr (float): Distance to detect adjacent elements Returns: list: Non local element dictionary """ print('Detecting non local elements') diccionariosnl = [] centroids = np.array(self.centroids) for i in tqdm(range(len(self.dictionary)), unit='Elements'): ci = centroids[i] linea = [] linea.append(i) for j in range(len(self.dictionary)): if not j == i: cnl = centroids[j] d = np.linalg.norm(cnl-ci) if d <= lr: linea.append(j) diccionariosnl.append(linea) return diccionariosnl def generateElements(self) -> None: """Generate elements structure """ print('Generating element structure') self.elements = [0.0]*len(self.dictionary) for i, d in enumerate(tqdm(self.dictionary, unit='Element')): coords = self.gdls[np.ix_(d)] gdl = np.zeros([self.nvn, len(d)]) for j in range(self.nvn): gdl[j, :] = (np.array(d)*self.nvn+j) gdl = gdl.astype(int) self.elements[i] = types[self.types[i]]( coords, gdl, fast=self.fast) self.elements[i].index = i print('Done!') def show(self) -> None: """Creates a geometry graph""" pass def calculateCentroids(self) -> None: """Calculate elements centroids """ for e in self.elements: dist = e.coords-e._xcenter min_search_radius = max(np.sum(dist**2, axis=1)**0.5) self.min_search_radius = max( min_search_radius, self.min_search_radius) x, _ = e.T(e.center.T) self.centroids.append(x.tolist()) def setCbe(self, cbe: list) -> None: """This method have to be used to assign essential boundary conditions. Thes method prevents to assign duplicated border conditions Args: cbe (list): Border conditions to be applied """ res = [] for i in cbe: if i not in res: res.append(i) self.cbe = res def giveNodesOfRegion(self, region: int) -> np.ndarray: """Give nodes over a region Args: region (int): region number. Start with 0 Returns: np.ndarray: List of nodes in the specified region """ return self.regions[region].nodes def giveElementsOfRegion(self, region: int) -> list: """Give elements over a region Args: region (int): region number. Start with 0 Returns: list: List of elements in the specified region """ a = [] nodes = self.giveNodesOfRegion(region) for e in self.elements: if np.sum(np.isin(e.gdl[0], nodes*self.nvn)) > 0: a.append(e) return a def cbFromRegion(self, region: int, value: float, nv: int = 1) -> list: """Generate a list of border conditions from specified border. Args: region (int): region number value (float): Value of the bc nv (int, optional): Variable number, starts with 1. Defaults to 1. Returns: list: List of border conditions that can be concatenated or assigned to the geometry """ cb = [] nodes = self.giveNodesOfRegion(region) cbe = np.zeros([len(nodes), 2]) cbe[:, 0] = nodes*self.nvn+(nv-1) cbe[:, 1] = value cb += cbe.tolist() return cb def cbeAllRegions(self, value: float) -> None: """Set all regions border conditions to the specified value to all the variables. Args: value (float): Value of the border condition """ self.cbe = [] for s in range(len(self.regions)): for i in range(self.nvn): self.cbe += self.cbFromRegion(s, value, (i+1)) def exportJSON(self, filename: str = None) -> str: """Export geometry definition as JSON file or JSON string Args: filename (str, optional): If given, a JSON file is created. Defaults to None. Returns: str: JSON string """ x = { "nodes": self.gdls.tolist(), "dictionary": self.dictionary, "types": self.types, "regions": self.giveRegions(), "ebc": self.cbe, "nbc": self.cbn, "nvn": self.nvn, "ngdl": self.ngdl, "holes": self.holes, "fillets": self.fillets, **self.additionalProperties } y = json.dumps(x) if filename: with open(filename, "w") as f: f.write(y) return y def giveRegions(self) -> list: """Returns a list of regions coordinates matrix Returns: list: List of regions coordinates matrix """ coords = [] for reg in self.regions: coords += [reg.coords.tolist()] return coords def addRegions(self, regions: list[Region]) -> None: """Adds regions to an already created geometry Args: regions (list[Region]): Regions to be created """ for r in tqdm(regions, unit='Regions'): r.setNodesOfRegion(self) self.regions += regions # self.calculateRegions() @classmethod def importJSON(self, filename: str, **kargs) -> 'Geometry': """Import geometry definition from JSON file Args: filename (str): Path to the JSON file Returns: Geometry: Geometry generated using the JSON file """ with open(filename) as f: parsed = json.loads(f.read()) dcc = parsed['dictionary'] nodes = parsed['nodes'] types = parsed['types'] nvn = parsed['nvn'] regions = [] regions_parsed = parsed['regions'] for coords in regions_parsed: if len(coords) == 2: regions.append(Region1D(coords)) elif len(coords) == 4: regions.append(Region2D(coords)) o = self(dcc, nodes, types, nvn, regions, **kargs) o.cbe = parsed['ebc'] o.cbn = parsed['nbc'] o.holes = parsed['holes'] o.fillets = parsed['fillets'] return o class Geometry1D(Geometry): """Define an 1D geometry structure Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ def __init__(self, dictionary: list, gdls: list, types: list, nvn: int = 1, fast=False) -> None: """Define an 1D geometry structure Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ Geometry.__init__(self, dictionary, gdls, types, nvn, [], fast) def generateElements(self) -> None: """Generate elements structure """ for i, d in enumerate(self.dictionary): coords = np.array(self.gdls)[np.ix_(d)] gdl = np.zeros([self.nvn, len(d)]) for i in range(self.nvn): gdl[i, :] = (np.array(d)*self.nvn+i) gdl = gdl.astype(int) if self.types[i] == 'L1V': element = LinealElement(coords, gdl) elif self.types[i] == 'L2V': element = QuadraticElement(coords, gdl) self.elements.append(element) def show(self) -> None: """Create a geometry graph """ pass class Geometry2D(Geometry): """Creates a 2D geometry Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list[Region], optional): List of regions to apply in the geometry. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ def __init__(self, dictionary: list, gdls: list, types: list, nvn: int = 1, regions: list[Region] = None, fast=False) -> None: """Creates a 2D geometry Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list[Region], optional): List of regions to apply in the geometry. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ Geometry.__init__(self, dictionary, gdls, types, nvn, regions, fast) def generateRegionFromCoords(self, p0: list, p1: list) -> None: """Generates a geometry Region1D by specified coordinates Args: p0 (list): region start point p1 (list): region end point """ masCercano1 = None d1 = np.Inf masCercano2 = None d2 = np.Inf for i, gdl in enumerate(self.gdls): r1 = np.sqrt((p0[0]-gdl[0])**2+(p0[1]-gdl[1])**2) r2 = np.sqrt((p1[0]-gdl[0])**2+(p1[1]-gdl[1])**2) if r1 < d1: d1 = r1 masCercano1 = i if r2 < d2: d2 = r2 masCercano2 = i coords = np.array([self.gdls[masCercano1], self.gdls[masCercano2]]) self.regions.append(Region1D(coords)) self.regions[-1].setNodesOfRegion(self) def generateBCFromCoords(self, x: float, y: float, value: float = 0, nv: int = 1) -> list: """Generates border conditions by coordinates. The border condition is applied to the nearest node Args: x (float): X coordinate of point y (float): Y coordinate of point value (float, optional): Value of the border condition. Defaults to 0. nv (int, optional): Variable number. The first variable is 1. Defaults to 1. Returns: list: Matrix of border coordinates that can be concatenated """ masCercano1 = None d1 = np.Inf for i, gdl in enumerate(self.gdls): r1 = np.sqrt((x-gdl[0])**2+(y-gdl[1])**2) if r1 < d1: d1 = r1 masCercano1 = i return [[masCercano1*self.nvn+(nv-1), value]] def loadOnRegionVF(self, region: int, f: Callable = None, add=None) -> None: """Assign a load over a geometry region. The start point of region is the 0 point of load The end point of region is the end point of load Load must be defined as a function (normal or lambda) Args: region (int): region in wich load will be applied f (Callable, optional): Load Function. Defaults to None. """ c0, cf = self.regions[region].coords dy = cf[1]-c0[1] dx = cf[0]-c0[0] theta = np.arctan2(dy, dx) def fx(s): return f(c0[0]+s*np.cos(theta))[0] def fy(s): return f(c0[1]+s*np.sin(theta))[1] self.loadOnRegion(region=region, fx=fx, fy=fy, add=add) def loadOnRegion(self, region: int, fx: Callable = None, fy: Callable = None, add=None) -> None: """Assign a load over a geometry region. The start point of region is the 0 point of load The end point of region is the end point of load Load must be defined as a function (normal or lambda) Args: region (int): region in wich load will be applied fx (Callable, optional): Load Function x component. Defaults to None. fy (Callable, optional): Load Function y component. Defaults to None. """ a = self.giveElementsOfRegion(region) coordenadas = self.regions[region].coords vect_seg = coordenadas[1]-coordenadas[0] for e in a: e.intBorders = True for i in range(-1, len(e.borders)-1): pertenece1 = isBetween( coordenadas[0], coordenadas[1], e._coords[i]) pertenece2 = isBetween( coordenadas[0], coordenadas[1], e._coords[i+1]) if pertenece1 and pertenece2: vect_lad = e._coords[i+1]-e._coords[i] sign = np.sign(vect_seg@vect_lad) e.borders[i].dir = sign e.borders[i].s0 = np.linalg.norm( e._coords[i]-coordenadas[0]) if fx: e.borders[i].properties['load_x'].append(fx) if fy: e.borders[i].properties['load_y'].append(fy) if add: e.borders[i].properties.update(add) else: e.borders[i].dir = 0.0 def loadOnHole(self, hole: int, sa: float = 0, ea: float = 2*np.pi, fx: Callable = None, fy: Callable = None) -> None: """Assign loads over a hole. Args: hole (int): Hole index in wich load will be applied sa (float, optional): Start face angle. Defaults to 0. ea (float, optional): Finish face angle. Defaults to :math:`2\\pi`. fx (Callable, optional): Load Function x component. Defaults to None. fy (Callable, optional): Load Function y component. Defaults to None. """ holee = self.holes[hole] regions_apply = [] for i, region in enumerate(holee['regions']): seg_coords = self.gdls[region] centradas = seg_coords[1]-seg_coords[0] angle = np.arctan2(centradas[1], centradas[0]) angle += np.pi/2 if angle < 0: angle += 2*np.pi if angleBetweenAngles(sa, ea, angle): regions_apply.append(region) for region in regions_apply: for i, seg in enumerate(self.regions): if (seg.coords == self.gdls[np.ix_(region)]).all(): self.loadOnRegion(i, fx, fy) break def cbOnHole(self, hole: int, value: float, nv: int = 1, sa: float = 0, ea: float = 2*np.pi) -> list: """Generate a list of border conditions from specified hole. Args: hole (int): Hole index in wich load will be applied value (float): Value of the bc nv (int, optional): Variable number, starts with 1. Defaults to 1. sa (float, optional): Start face angle. Defaults to 0. ea (float, optional): Finish face angle. Defaults to :math:`2\\pi`. Returns: list: List of border conditions that can be concatenated or assigned to the geometry """ holee = self.holes[hole] regions_apply = [] bc = [] for i, region in enumerate(holee['regions']): seg_coords = self.gdls[region] centradas = seg_coords[1]-seg_coords[0] angle = np.arctan2(centradas[1], centradas[0]) angle += np.pi/2 if angle < 0: angle += 2*np.pi if angleBetweenAngles(sa, ea, angle): regions_apply.append(region) for region in regions_apply: for i, seg in enumerate(self.regions): if (seg.coords == self.gdls[np.ix_(region)]).all(): bc += self.cbFromRegion(i, value, nv) break return bc def show(self, texto: int = 10, bolita: int = 0, draw_segs: bool = True, draw_labels: bool = False, draw_bc: bool = False, label_bc: bool = False) -> None: """Create a geometry graph Args: texto (int, optional): Text size. Defaults to 10. bolita (int, optional): Node size. Defaults to 0. draw_segs (bool, optional): To draw or not draw the regions. Defaults to True. draw_labels (bool, optional): To draw or not draw element labels. Defaults to False. draw_bc (bool, optional): To draw border conditions. Defaults to False. label_bc (bool, optional): To draw labels on border conditions. Defaults to False. """ fig = plt.figure() ax = fig.add_subplot() ax.axes.set_aspect('equal') legend_items = [] for i, e in enumerate(self.elements): coords = e._coords coords = np.array(coords.tolist() + [coords[0].tolist()]) X = coords[:, 0] Y = coords[:, 1] ax.plot(X, Y, '-', color='black', alpha=1-0.6*draw_bc, zorder=-10) cx = self.centroids[i][0][0] cy = self.centroids[i][0][1] if draw_labels: ax.plot(cx, cy, 'o', markersize=texto + bolita, color='yellow') ax.annotate(format(i), [ cx, cy], size=texto, textcoords="offset points", xytext=(-0, -2.5), ha='center') if draw_segs: segs = self.regions for i, seg in enumerate(segs): ec = None if isinstance(seg, Region1D): ec = 'b' x, y = seg.coords.T[:2] segment_ = ax.fill( x, y, linewidth=3, zorder=0, edgecolor=ec, alpha=0.4-0.2*draw_bc, label=seg.description or format(i) # hatch="/" )[0] legend_items += [segment_] cx = np.average(x) cy = np.average(y) ax.plot(cx, cy, 'o', markersize=texto + bolita, color='pink', alpha=1-0.6*draw_bc) ax.annotate(format(i), [ cx, cy], alpha=1-0.6*draw_bc, size=texto, textcoords="offset points", xytext=(-0, -2.5), ha='center') for i, e in enumerate(self.elements): if e.intBorders and draw_bc: for i in range(-1, len(e.borders)-1): border = e.borders[i] if (len(border.properties['load_x']) + len(border.properties['load_y'])): coords_border_0 = e._coords[i] coords_border_1 = e._coords[i+1] ax.plot([coords_border_0[0], coords_border_1[0]], [coords_border_0[1], coords_border_1[1]], color='yellow', linewidth=5, zorder=50, ) cx = (coords_border_0 + coords_border_1)/2 ax.annotate(format(len(border.properties['load_x']) + len(border.properties['load_y'])), cx, size=texto, textcoords="offset points", xytext=(-0, -2.5), ha='center', zorder=55) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title('Domain') ax.legend(handles=legend_items) gdls = np.array(self.gdls) labels = np.linspace(0, gdls.shape[0] - 1, gdls.shape[0]).astype(int) if draw_labels: ax.plot(gdls[:, 0], gdls[:, 1], 'o', markersize=texto+bolita, color='gray') if draw_labels: for p, l in zip(gdls, labels): ax.annotate(l, p, size=texto, textcoords="offset points", xytext=(-0, -2.5), ha='center') maxx = np.max(gdls[:, 0]) maxy = np.max(gdls[:, 1]) minx = np.min(gdls[:, 0]) miny = np.min(gdls[:, 1]) coordmax = min(maxx-minx, maxy-miny) tFlecha = coordmax/80 if draw_bc: for i, cb in enumerate(self.cbe): coords_cb = gdls[int(cb[0]//self.nvn)] if cb[0] % self.nvn == 0: color = 'red' ax.annotate(f"{i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) elif cb[0] % self.nvn == 1: color = 'blue' ax.annotate(f"{i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0], coords_cb[1]+tFlecha), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) elif cb[0] % self.nvn == 2: color = 'yellow' ax.annotate(f"{i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]-tFlecha), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) else: color = 'black' ax.annotate(f"{i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) for i, cb in enumerate(self.cbn): coords_cb = gdls[int(cb[0]//self.nvn)] if cb[0] % self.nvn == 0: color = 'red' ax.annotate(f"NBC {i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) elif cb[0] % self.nvn == 1: color = 'blue' ax.annotate(f"NBC {i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0], coords_cb[1]+tFlecha), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) elif cb[0] % self.nvn == 2: color = 'yellow' ax.annotate(f"NBC {i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]-tFlecha), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) else: color = 'black' ax.annotate(f"NBC {i}: {cb[1]}"*label_bc, xy=coords_cb, xytext=( coords_cb[0]-tFlecha, coords_cb[1]), horizontalalignment='center', verticalalignment='center', arrowprops=dict(arrowstyle="->", facecolor=color)) figManager = plt.get_current_fig_manager() figManager.full_screen_toggle() class Geometry3D(Geometry): """Creates a 3D geometry Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list[Region], optional): List of regions to apply in the geometry. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ def __init__(self, dictionary: list, gdls: list, types: list, nvn: int = 1, regions: list[Region] = None, fast=False): """Creates a 2D geometry Args: dictionary (list): Matrix with element definitions. Each row is an element. The gdl are defined in columns gdls (list): List of domain coordinates types (list): Types of each element nvn (int, optional): Nunmber of variables per node. Defaults to 1. regions (list[Region], optional): List of regions to apply in the geometry. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ Geometry.__init__(self, dictionary, gdls, types, nvn, regions, fast) def show(self) -> None: """Creates a geometry graph """ def isBorder(self, e): neighbors = 0 potential = self.KDTree.query_ball_point( e._xcenter, self.min_search_radius*2) nb = [] for ie2 in potential: e2 = self.elements[ie2] if not e.index == e2.index: if testNeighborg(e, e2): neighbors += 1 nb.append(e2) if neighbors == len(e.faces): break if neighbors < len(e.faces): return True, nb return False, [] def _detectBorderElementsRecursive(self, e): """Return the indices of the elements list which are border The mehtods finds border elemets recursively so the first border element must be provided Args: e (Element): A border element Returns: List: Listo of indices """ r = [] if self.visited[e.index]: return r self.visited[e.index] = True isBorder, neighbors = self.isBorder(e) if not isBorder: return r r.append(e.index) self.pb.update(1) for bn in neighbors: r += self._detectBorderElementsRecursive(bn) return r def _detectBorderElementsIterative(self, e, plot=False): with plt.ion(): i = 0 le = [e.index] vecinos = [] self.visited[e.index] = True isBorder, neighbors = self.isBorder(e) vecinos.append(neighbors) centroides = [] for v in neighbors: centroides.append(v._xcenter.flatten()) if plot: fig = plt.figure() ax = fig.add_subplot(projection="3d") ax.axes.set_xlim3d( left=self.boundingBoxMin[0], right=self.boundingBoxMax[0]) ax.axes.set_ylim3d( bottom=self.boundingBoxMin[1], top=self.boundingBoxMax[1]) ax.axes.set_zlim3d( bottom=self.boundingBoxMin[2], top=self.boundingBoxMax[2]) encontrados = ax.plot(*np.array(centroides).T, "o", c="r") while i < len(le): e = self.elements[le[i]] neighbors = vecinos[i] for nb in neighbors: if not self.visited[nb.index]: self.visited[nb.index] = True ib, nbn = self.isBorder(nb) if ib: le.append(nb.index) vecinos.append(nbn) centroides.append(nb._xcenter.flatten()) if plot: cn = np.array(centroides).T encontrados[0].set_xdata(cn[0]) encontrados[0].set_ydata(cn[1]) encontrados[0].set_3d_properties(cn[2]) fig.canvas.draw() fig.canvas.flush_events() i += 1 self.pb.update(1) return le, vecinos def detectBorderElements(self, plot=False): self.visited = [False]*len(self.elements) print("Detecting border elements...") self.pb = tqdm(unit=" Border elements found") e = self.elements[self.KDTree.query(self.boundingBoxMin)[1]] res, vecinos = self._detectBorderElementsIterative(e, plot) self.visited = [False]*len(self.elements) del self.pb self.additionalProperties = { **self.additionalProperties, "border_elements": res} return res def detectBorderElementsLegacy(self): print("Detecting border elements...") border_elements = [] for e in tqdm(self.elements, unit=" Element"): if self.isBorder(e)[0]: border_elements.append(e.index) self.additionalProperties = { **self.additionalProperties, "border_elements": border_elements} return border_elements class Lineal(Geometry1D): """Generate a evenly spaced elements domain Args: lenght (float): Domain lenght n (int): Number of elements o (int): Element order, can be 1 or 2 nvn (int, optional): Number of variables per node. Defaults to 1. """ def __init__(self, lenght: float, n: int, o: int, nvn: int = 1) -> None: """Generate a evenly spaced elements domain Args: lenght (float): Domain lenght n (int): Number of elements o (int): Element order, can be 1 or 2 nvn (int, optional): Number of variables per node. Defaults to 1. """ self.lenght = lenght dictionary = [] gdls = [] he = self.lenght / (n) for i in range(0, n): xa = i * he if o == 1: gdls += [xa] dictionary += [[i, i+1]] elif o == 2: gdls += [xa, xa+he/2] dictionary += [[i*o, i*o+1, i*o+2]] else: gdls += [xa, xa+he/3, xa+2*he/3] dictionary += [[i*o, i*o+1, i*o+2, i*o+3]] gdls += [self.lenght] if o == 1: tipo = 'L1V' elif o == 2: tipo = 'L2V' else: tipo = 'L3V' types = [tipo]*len(dictionary) gdls = np.array(gdls).reshape([len(gdls), 1]) Geometry1D.__init__(self, dictionary, gdls, types, nvn=nvn) class Delaunay(Geometry2D): """Generate Delaunay triangulation using Triangle Args: vertices (list): matrix containing the domain vertices coordinates params (str): Triangulation parameters, use the aux function _strdelaunay nvn (int, optional): Number of variables per node. Defaults to 1. holes_dict (list, optional): A list of holes dicts. Defaults to None. fillets (list, optional): A list of fillets. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ def __init__(self, vertices: list, params: str, nvn: int = 1, holes_dict=None, fillets=None, fast=False) -> None: """Generate Delaunay triangulation Args: vertices (list): matrix containing the domain vertices coordinates params (str): Triangulation parameters, use the aux function _strdelaunay nvn (int, optional): Number of variables per node. Defaults to 1. holes_dict (list, optional): A list of holes dicts. Defaults to None. fillets (list, optional): A list of fillets. Defaults to None. fast (bool, optional): If True, the created elements will have have the fast propertie (see Element class docs) """ mask = copy.deepcopy(vertices) # try: # mask = mask.tolist() # except: # pass seg = [] for i in range(len(vertices)-1): seg.append([i, i+1]) seg.append([i+1, 0]) hh = [] mascarita = copy.deepcopy(seg) if fillets: for fillet in fillets: S1 = seg[fillet['start_region']] S2 = seg[fillet['end_region']] for i, maskarita in enumerate(mascarita): if maskarita == S1: indice_importante = i mizq = mascarita[:indice_importante] mder = mascarita[indice_importante:] P1 = vertices[S1[0]] P2 = vertices[S2[1]] P = vertices[S1[1]] r = fillet['r'] n = fillet['n'] if not S1[1] == S2[0]: raise Exception('The fillet regions are not valid') O, sa, a = roundCorner(P1, P2, P, r) f_vertices, f_regions = giveCoordsCircle(O, r, sa, a, n, True) vertices[S1[1]] = np.array(f_vertices[0]).tolist() sp = (np.array(f_regions)+len(vertices)-2)[1:].tolist() seg += [[S1[1], sp[1][0]]]+sp[1:] mder = mder[1:] spp = copy.deepcopy(sp) ss1 = copy.deepcopy(S1) if mder: mder[0][0] = spp[-1][-1] mascarita = mizq+[[mizq[-1][-1], ss1[1]], [ss1[1], spp[1][0]]]+spp[1:]+mder vertices += np.array(f_vertices)[1: -1].tolist() seg[fillet['end_region']][0] = len(vertices)-1 # vertices += [O] original = dict(vertices=np.array(vertices), segments=np.array(seg)) self.original = original if holes_dict: for hole in holes_dict: hh += [hole['center']] seg += (np.array(hole['regions'])+len(vertices)).tolist() hole['regions'] = ( np.array(hole['regions'])+len(vertices)).tolist() vertices += np.array(hole['vertices']).tolist() original = dict(vertices=np.array(vertices), segments=np.array(seg), holes=hh) triangular = tr.triangulate(original, params) self.triangulation = triangular dictionary = triangular['triangles'].tolist() if 'o2' in params: tipos = ['T2V']*len(dictionary) else: tipos = ['T1V']*len(dictionary) gdls = triangular['vertices'] if tipos[0] == 'T2V': for dicc in dictionary: a1 = dicc[5] a2 = dicc[3] a3 = dicc[4] dicc[3] = a1 dicc[4] = a2 dicc[5] = a3 regions_f = [] for s in seg: region = Region1D(gdls[np.ix_(s)]) regions_f.append(region) Geometry2D.__init__(self, dictionary, gdls, tipos, nvn=nvn, regions=regions_f, fast=fast) mask = [] for region in mascarita: mask += [gdls[region[0]]] self.mask = mask self.holes = holes_dict self.fillets = fillets @ staticmethod def _strdelaunay(constrained: bool = True, delaunay: bool = True, a: float = None, q: float = None, o: int = 1) -> str: """Create a string for the delaunay triangulation constructor Args: constrained (bool, optional): Makes the triangulation constrained. Defaults to True. delaunay (bool, optional): Makes all triangles delaunay. Defaults to True. a (float, optional): Maximum area of triange. Defaults to None. q (float, optional): Minimum triangle angle <35. Defaults to None. o (int, optional): Order of element if 2, quadratic elements are generated. Defaults to 1. Returns: str: A string containing the input parameters for the Delaunay1V constructor """ p = '' if o == 2: o = '-o2' else: o = '' if constrained: p = 'p' if a == None: a = '' else: a = 'a'+format(a) D = '' if delaunay: D = 'D' if q == None: q = '' else: if isinstance(1, int): if q > 35: raise "No se puede crear una triangulacion con angulos menores a 35 grados" q = 'q'+format(q) return p+a+D+q+'i'+o def extrude(self, h: float = 1.0, m: int = 5, **kargs) -> Geometry3D: nodes = self.gdls n = len(nodes) triangles = self.dictionary piramides = [] dz = h/(m-1) dddnodes = np.zeros([m*n, 3]) for i in range(m): dddnodes[n*(i):n*(i+1), :2] = nodes dddnodes[n*(i):n*(i+1), -1] = i*dz for i in range(m-1): for t in triangles: t = np.array(t) nodossup = n*(i) + t nodosinf = n*(i+1) + t p = nodossup.tolist()+nodosinf.tolist() piramides += [[p[2], p[5], p[0], p[1]]] piramides += [[p[0], p[5], p[3], p[4]]] piramides += [[p[0], p[5], p[4], p[1]]] # TODO Extrude regions return Geometry3D(piramides, dddnodes, ['TE1V']*len(piramides), 3, **kargs)
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Geometry/Geometry.py
Geometry.py
import numpy as np import matplotlib.pyplot as plt class Quadrant3D(): def __init__(self, p: tuple, dim: tuple) -> None: x, y, z = p w, h, d = dim self.x, self.y, self.z = x, y, z self.w, self.h, self.d = w, h, d self.coords = np.array([ [x-w, y-h, z-d], [x+w, y-h, z-d], [x+w, y+h, z-d], [x-w, y+h, z-d], [x-w, y-h, z+d], [x+w, y-h, z+d], [x+w, y+h, z+d], [x-w, y+h, z+d]]) self.maximos_self = np.max(self.coords, axis=0) self.minimos_self = np.min(self.coords, axis=0) self._xcenter = np.array([x, y, z]) def contains(self, e) -> bool: x = e._xcenter superior = (self.maximos_self-x) >= 0 inferior = (x-self.minimos_self) >= 0 return superior.all() and inferior.all() def boxes_disjoint(self, e): maxx1, maxy1, maxz1 = self.maximos_self minx1, miny1, minz1 = self.minimos_self maxx2, maxy2, maxz2 = e.maximos_self minx2, miny2, minz2 = e.minimos_self return (maxx2 <= minx1 or maxx1 <= minx2 or maxy2 <= miny1 or maxy1 <= miny2 or maxz2 <= minz1 or maxz1 <= minz2) def intesects_quadrant(self, e) -> bool: return not self.boxes_disjoint(e) def subdivide(self) -> list: divs = [] nw = self.w/2 nh = self.h/2 nd = self.d/2 x, y, z = self.x, self.y, self.z divs.append(Quadrant3D((x+nw, y+nh, z+nd), (nw, nh, nd))) divs.append(Quadrant3D((x+nw, y+nh, z-nd), (nw, nh, nd))) divs.append(Quadrant3D((x+nw, y-nh, z+nd), (nw, nh, nd))) divs.append(Quadrant3D((x+nw, y-nh, z-nd), (nw, nh, nd))) divs.append(Quadrant3D((x-nw, y+nh, z+nd), (nw, nh, nd))) divs.append(Quadrant3D((x-nw, y+nh, z-nd), (nw, nh, nd))) divs.append(Quadrant3D((x-nw, y-nh, z+nd), (nw, nh, nd))) divs.append(Quadrant3D((x-nw, y-nh, z-nd), (nw, nh, nd))) return divs def draw(self, ax): ax.plot([self.x, self.x], [self.y, self.y], [self.z-self.d, self.z+self.d], c="k", alpha=0.4) ax.plot([self.x-self.w, self.x+self.w], [self.y, self.y], [self.z, self.z], c="k", alpha=0.4) ax.plot([self.x, self.x], [self.y-self.h, self.y+self.h], [self.z, self.z], c="k", alpha=0.4) def draw_(self, ax): ax.plot(*self.coords.T, c="red") class Quadrant3DSpherical(Quadrant3D): def __init__(self, p: tuple, r: tuple): dim = [r, r, r] self.r = r Quadrant3D.__init__(self, p, dim) def contains(self, e) -> bool: return (sum((self._xcenter-e._xcenter)**2) <= self.r**2) class Geometree(): min_search_size = -1 def __init__(self, boundary, n: int = 1, depth: int = 1) -> None: self.boundary = boundary self.points = [] self.n = n self.divided = False self.children = [] self.depth = depth def draw_points(self, ax): if not self.divided: for e in self.points: plt.plot(*e._xcenter, "o", c="black", alpha=0.5) for c in self.children: c.draw_points(ax) def draw(self, ax): if self.divided: self.boundary.draw(ax) for c in self.children: c.draw(ax) def contains(self, p: tuple) -> bool: return self.boundary.contains(p) def subdivide(self) -> None: self.divided = True self.children = [] divs = self.boundary.subdivide() for d in divs: self.children.append(Geometree(d, self.n, self.depth+1)) def add_point(self, p: tuple) -> bool: dist = p.coords-p._xcenter min_search_size = max(np.sum(dist**2, axis=1)**0.5) self.min_search_size = max(min_search_size, self.min_search_size) if not self.contains(p): return False if len(self.points) < self.n and not self.divided: self.points.append(p) return True if not self.divided: self.subdivide() for p2 in self.points[::-1]: for sq in self.children: if sq.add_point(p2): self.points.pop() break for sq in self.children: if sq.add_point(p): return True raise Exception("This should never happen") def query_range(self, quadrant, plot=False, ax=None) -> bool: result = [] if not self.boundary.intesects_quadrant(quadrant): return result for p in self.points: if plot: ax.plot(*p._xcenter, "o", c="green", alpha=1) if quadrant.contains(p): result.append(p) if not self.divided: return result for sq in self.children: result += sq.query_range(quadrant, plot=plot, ax=ax) return result def query_range_point_radius(self, p, r=None, plot=False, ax=None): if r == None: r = 2*self.min_search_size q = Quadrant3DSpherical(p, r) selected = self.query_range(q, plot, ax) return selected def graph_query_range(self, p, r): fig = plt.figure() ax = fig.add_subplot(projection="3d") self.draw_points(ax) result = self.query_range_point_radius(p, r, True, ax) for p in result: ax.plot(*p._xcenter, "o", c="yellow", alpha=1) plt.show() def query_first_point_set(self): if self.divided: for ch in self.children: if ch.children or ch.points: return ch.query_first_point_set() else: return self.points raise Exception("This should not happen")
AFEM
/AFEM-1.0.34-py3-none-any.whl/FEM/Geometry/Geometree.py
Geometree.py
# AFKer ### This is a small, simple python program for going "afk" in video games. Features include: - customizable time to wait between movement in game - ability to check for respawn button - ability to make profiles for different games that are saved in the 'profiles.csv' file ### Setup / dependencies - You need to manually download python from https://www.python.org/ - Then install with pip: ``` $ pip install AFKer ``` - You are now all set up. To use the program, navigate to your default pip install location which can be found with: ``` $ pip show AFKer ``` And then type: ``` $ cd <insert path from above here>/AFK $ python afk.py ``` - To quit at any time press ctrl + c.
AFKer
/AFKer-0.1.1.tar.gz/AFKer-0.1.1/README.md
README.md
import time import keyboard import random from sys import exit from signal import signal, SIGINT import csv import pyautogui numIntervals = 0 intervalLength = 0 def yn_checker(char): char = char.lower() if char == 'y' or char == 'n': return True return False # handle ctrl-C def handler(signal_recieved, frame): if (intervalLength == 0): print("Goodbye!") exit() print("You were AFK for about", numIntervals, "intervals of", intervalLength, "seconds!") exit() signal(SIGINT, handler) def move(intervals): intervals = int(intervals) global numIntervals time.sleep(intervals) keyboard.press('w') time.sleep(0.2) keyboard.release('w') keyboard.press('s') time.sleep(0.2) keyboard.release('s') numIntervals += 1 def respawnGame(x, y): pyautogui.moveTo(x, y) pyautogui.click() def afk(interval=None, x=None, y=None, profile=None): global intervalLength intervalLength = interval # called w/out with profile if profile is None: if x is None or y is None: while True: move(interval) else: while True: move(interval) if numIntervals % interval == 0: respawnGame(x,y) # called with profile else: csvfile = open('profile.csv', 'r', newline='') data = csv.reader(csvfile) for row in data: if row[0] == profile: interval = int(row[1]) intervalLength = interval x = int(row[2]) y = int(row[3]) break if x == 0 and y == 0: while True: move(interval) else: while True: move(interval) if numIntervals % interval == 0: respawnGame(x,y) if __name__ == "__main__": # set up a new profile newProf = input("Would you like to create a new game profile?(y/n): ") while yn_checker(newProf) == False: newProf = input("Would you like to create a new game profile?(y/n): ") if (newProf.lower() == 'y'): profName = input("Name your profile: ") intervals = input("How long would you like to wait between movments in seconds?: ") while intervals.isnumeric() == False: intervals = input("Please input an integer: ") respawn = input("Would you like to check for a respawn button?(y/n): ") while yn_checker(respawn) == False: respawn = input("Would you like to check for a respawn button?(y/n): ") y = 0 x = 0 if (respawn.lower() == 'y'): print("If (0,0) is the top left corner of your screen, about where do you think the button is?") x = input("x coord: ") while x.isnumeric() == False: x = input("Please input an integer: ") y = input("y coord: ") while y.isnumeric() == False: y = input("Please input an integer: ") x = int(x) y = int(y) pyautogui.moveTo(x,y,duration=1) good = input("Is this correct?(y/n): ") while yn_checker(good) == False: good = input("Is this correct?(y/n): ") while (good.lower() != 'y'): x = input("x coord: ") while x.isnumeric() == False: x = input("Please input an integer: ") y = input("y coord: ") while y.isnumeric() == False: y = input("Please input an integer: ") x = int(x) y = int(y) pyautogui.moveTo(x,y,duration=1) good = input("Is this correct?(y/n): ") while yn_checker(good) == False: good = input("Is this correct?(y/n): ") data = {} data = [profName,intervals,x,y] csvfile = open('profile.csv', 'a', newline='') writer = csv.writer(csvfile) writer.writerow(data) print("When you're back, press ctrl+C to quit.") intervals = int(intervals) afk(interval=intervals, x=x, y=y) else: loadProf = input("Would you like to load a profile?(y/n): ") while yn_checker(loadProf) == False: loadProf = input("Would you like to load a profile?(y/n): ") if loadProf.lower() == 'y': profName = input("What is the name of your profile? (case sensitive): ") print("When you're back, press ctrl+C to quit.") afk(profile=profName) intervals = input("How long would you like the intervals between movments to be in seconds?: ") while intervals.isnumeric() == False: intervals = input("Please enter an integer: ") respawn = input("Would you like to check for respawn?(y/n): ") while yn_checker(respawn) == False: respawn = input("Would you like to check for respawn?(y/n): ") ycoord = 0 xcoord = 0 if (respawn.lower() == 'y'): print("If (0,0) is the top left corner of your screen, about where do you think the button is?") xcoord = input("x coord: ") while xcoord.isnumeric() == False: xcoord = input("Please input an integer: ") ycoord = input("y coord: ") while ycoord.isnumeric() == False: ycoord = input("Please input an integer: ") xcoord = int(xcoord) ycoord = int(ycoord) pyautogui.moveTo(xcoord,ycoord,duration=1) good = input("Is this correct?(y/n): ") while yn_checker(good) == False: good = input("Is this correct?(y/n): ") while (good.lower() != 'y'): xcoord = input("x coord: ") while xcoord.isnumeric() == False: xcoord = input("Please input an integer: ") ycoord = input("y coord: ") while ycoord.isnumeric() == False: ycoord = input("Please input an integer: ") xcoord = int(xcoord) ycoord = int(ycoord) pyautogui.moveTo(xcoord,ycoord,duration=1) good = input("Is this correct?(y/n): ") while yn_checker(good) == False: good = input("Is this correct?(y/n): ") print("When you're back, press ctrl+C to quit.") afk(interval=intervals, x=xcoord, y=ycoord) print("When you're back, press ctrl+C to quit.") afk(interval=intervals)
AFKer
/AFKer-0.1.1.tar.gz/AFKer-0.1.1/AFK/afk.py
afk.py
AFQ-browser is a software library for visualization of results from automated fiber quantification of human brain tractography. The software takes as input the results of analysis from the ``AFQ`` or ``pyAFQ`` software and produces a browser-based visualization of the data. Command-line tools allow users to create these visualizations from their data and upload them to share with others as a website. See [https://yeatmanlab.github.io/AFQ-Browser/](https://yeatmanlab.github.io/AFQ-Browser/) for documentation See: [https://yeatmanlab.github.io/AFQBrowser-demo/](https://yeatmanlab.github.io/AFQBrowser-demo/) for a working example [![DOI](https://zenodo.org/badge/61830890.svg)](https://zenodo.org/badge/latestdoi/61830890)
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/README.md
README.md
# Contributing to AFQ-Browser ### How do I contribute to AFQ-Browser development? Thank you for considering contributing to AFQ-Browser! Improving documentation, triaging bugs, creating examples, and writing tutorials are all examples of helpful contributions that you can make If you want to report a bug or another issue with the software, please do so by adding an issue [here](https://github.com/yeatmanlab/AFQ-Browser/issues/new). Please be sure to give us detailed information: what did you do when you ran into an issue? What operating system are you using? What browser? Tell us about the data that you are visualizing. If you are interested in creating new features, please create an issue for discussion first. Once we've hashed it out together, go ahead and create a Pull Request (see [this post for instructions](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github). >For something that is bigger than a one or two line fix: >1. Create your own fork of the code 2. Do the changes in your fork 3. If you like the change and think the project could use it: * Please be sure that your Python code is formatted according to [PEP8](https://www.python.org/dev/peps/pep-0008/) * Make sure to document functions you create. We follow the [Numpy documentation standards](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt) for Python function doc-strings * Write tests: we use [pytest](https://docs.pytest.org/en/latest/) to test Python code. [Travis](travis-ci.org) will automatically test your code when you submit a PR. If you have any questions about any of this, feel free to get in touch through our mailing list: [email protected]
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/CONTRIBUTING.md
CONTRIBUTING.md
.. _plots: plots.js -------- .. js:autofunction:: afqb.plots.buildFromNodes .. js:autofunction:: afqb.plots.buildTractCheckboxes .. js:autofunction:: afqb.plots.line .. js:autofunction:: afqb.plots.area .. js:autofunction:: afqb.plots.buildPlotGui .. js:autofunction:: afqb.plots.ready .. js:autofunction:: afqb.plots.changePlots .. js:autofunction:: afqb.plots.draw .. js:autofunction:: afqb.plots.zoomAxis .. js:autofunction:: afqb.plots.newBrush .. js:autofunction:: afqb.plots.updateBrush .. js:autofunction:: afqb.plots.showHideTractDetails .. js:autofunction:: afqb.plots.initCheckboxes
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/plots.rst
plots.rst
.. _installation_guide: Installing ``AFQ-Browser`` ========================== Installing the release version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The released version of the software is the one that is officially supported, and if you are getting started with ``AFQ-Browser``, this is probably where you should get started AFQ-browser depends on numpy_, pandas_ and scipy_. The `publish` mechanisms also requires PyGithub_ and GitPython_. These dependencies should all be installed automatically when the software is installed. To install it, in a shell or command line, issue the following:: pip install AFQ-Browser One easy way to install these, is by installing the Anaconda_ Python distribution, Installing the development version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The development version is probably less stable, but might include new features and fixes. There are two ways to install this version. The first uses ``pip``:: pip install git+https://github.com/yeatmanlab/AFQ-Browser.git The other requires that you clone the source code to your machine:: git clone https://github.com/yeatmanlab/AFQ-Browser.git Then, change your working directory into the top-level directory of this repo and issue:: python setup.py install .. _numpy: http://numpy.org .. _scipy: http://scipy.org .. _pandas: http://pandas.pydata.org/ .. _GitPython: http://gitpython.readthedocs.io/ .. _PyGithub: http://pygithub.github.io/PyGithub/v1/index.html .. _Anaconda: https://www.continuum.io/downloads
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/installation_guide.rst
installation_guide.rst
.. _long_term_preservation: Long-term data preservation =========================== GitHub is great, but long-term preservation of data stored on GitHub is not guaranteed. To promote long-term preservation of data from instances of ``AFQ-Browser``, and eventual integration across data-sets we record data published through ``AFQ-Browser`` (see :ref:`usage_guide`) in `AFQ Vault <http://afqvault.org>`_, a centralized data-base of ``AFQ-Browser`` instances. In addition, we recommend that users of the software create a digital object identifier for their instance of ``AFQ-Browser``. One great way to do that uses Zenodo (see below). Using Zenodo for long-term data preservation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Zenodo <http://zenodo.org/>`_ is as service developed by `CERN <https://home.cern/>`_ to provide a platform for open and sustainable science (see `About Zenodo <http://about.zenodo.org/>`_ for more details). To create a digital object identifier (DOI) for your GitHub-published instance of ``AFQ-Browser`` `join Zenodo <https://zenodo.org/signup/>`_ with your GitHub account. After you do that, you can deposit the GitHub repository that holds your instance of ``AFQ-Browser`` by following these steps: #. Flip the switch for your repo on `Zenodo's GitHub settings page <https://zenodo.org/account/settings/github/>`_ #. Create a release in the GitHub repo containing your instance of ``AFQ-Browser``. See `this page <https://help.github.com/articles/creating-releases/>`_ for instructions.
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/long_term_preservation.rst
long_term_preservation.rst
.. _home: .. figure:: _static/BDE_Banner_revised20160211-01.jpg :align: center :figclass: align-center :target: http://brainandeducation.com AFQ-Browser: ============ visualizing and sharing Automated Fiber Quantification results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This software generates a browser-based visualization of data processed with the `Automated Fiber Quantification (AFQ) <https://github.com/yeatmanlab/AFQ>`_ software. For `example <https://yeatmanlab.github.io/AFQBrowser-demo/>`_. Tractography based on diffusion weighted MRI (dMRI) is used to find the major white matter fascicles (tracts) in the living human brain. The health of these tracts is an important factor underlying many cognitive and neurological disorders. `AFQ` is a sofware package focused on automated delineation of the major fiber tracts in individual human brains, and quantification of the tissue properties within the tracts (`Yeatman et al. 2012 <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0049790>`_). This software package allows researchers to interactively query the data processed with `AFQ` (or other similar software) to explore patterns in the data. AFQ-Browser paper ~~~~~~~~~~~~~~~~~ If you are interested in technical details and motivation behind this project, please read our `paper <https://www.nature.com/articles/s41467-018-03297-7>`_. A slide presentation about the project is available `here <https://arokem.github.io/2019-OHBM-AFQ-Browser/>`_. Acknowledgements ~~~~~~~~~~~~~~~~~~~ this work was supported by a grant from the `Gordon & Betty Moore Foundation <https://www.moore.org/>`_, and from the `Alfred P. Sloan Foundation <http://www.sloan.org/>`_ to the `University of Washington eScience Institute <http://escience.washington.edu/>`_. Documentation ~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 2 installation_guide usage_guide getting_help dataformat long_term_preservation binder_integration api .. _AFQ: http://github.com/yeatmanlab/AFQ
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/index.rst
index.rst
.. _dataformat: The AFQ-Browser data format ============================ `AFQ-Browser` supports input from two major tractometry software pipelines: AFQ_ and Tracula_. However, data from other tractometry pipelines can also be used in `AFQ-Browser`, if the data is converted to the following data format specification. Internally, `AFQ-Browser` represents its data in three different files: 1. A json file (``streamlines.json``) that contains a description of tract trajectories in the 3D space of the anatomy, and populates the "Anatomy" panel. This file has the following structure:: {"tract_name1":{"coreFiber":[[x1, y1, z1], x2, y2, z2], ... ], "1":[[x1, y1, z1], [x2, y2, z2], ...], "2":[[x1, y1, z1], x1, y1, z1], ... ]}, "tract_name2":{"coreFiber":[[x1, y1, z1],...]...}, ... } Where ``tract_name1`` and ``tract_name2`` can be replaced by keys that are the names of the tracts that you wish to represent. Within each tract, ``coreFiber`` is a required key, and subsequent numerical keys are not required. When tract representation ("core fiber"/"streamlines") is selected in the GUI, either the core fibers for each tract are displayed, or all the numerically designated streamlines for that tract. Coordinates are kept in MNI space aligned to AC-PC. 2. A csv file (``nodes.csv``) that contains information about the tract profiles and populates the "Bundle details" panel. This table should have columns (and headers) named ``subjectID``, ``tractID``, and ``nodeID``. The ``subjectID`` identifies a unique subject in your dataset, and it can take any string value you want (e.g., ``patient1``), as long as it is consistent with the information in the ``subjects.csv`` file (see below). The ``tractID`` is the same key used in the ``streamlines.json`` file to identify the tracts (e.g., ``tract_name1``, ``tract_name2``, etc.). The ``nodeID`` runs from 0 to the n-1, where n is the number of nodes in the tract profile for that tract. Other columns in this table will hold the numerical values of statistics in this subject/tract/node/ combination. The headers for these columns can be named any string value that you would like (e.g., "FA", "MD", "my_statistic", etc.). 3. A csv file (``subjects.csv``) that contains information about the subjects and populates the "Subject metadata" panel. This file is required to have a ``subjectID`` column that matches the subject identifiers used in ``nodes.csv`` (see above). It does not require any other columns, but can include any number of columns that describe the subjects in the study, holding numerical or string values. To use data generated from another source, add these data files to the ``client/data`` folder in a copy of the `site` folder from the `AFQ-Browser` repo_. .. _AFQ: https://github.com/yeatmanlab/afq .. _Tracula: https://surfer.nmr.mgh.harvard.edu/fswiki/Tracula .. _repo: https://github.com/yeatmanlab/AFQ-Browser
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/dataformat.rst
dataformat.rst
.. _anatomy: anatomy.js ---------- .. js:autofunction:: afqb.three.initAndAnimate .. js:autofunction:: afqb.three.buildthreeGui .. js:autofunction:: afqb.three.init .. js:autofunction:: afqb.three.onWindowResize .. js:autofunction:: afqb.three.animate .. js:autofunction:: afqb.three.brushOn3D .. js:autofunction:: afqb.three.lightUpdate .. js:autofunction:: afqb.three.makeInvisible .. js:autofunction:: afqb.three.makeVisible .. js:autofunction:: afqb.three.highlightBundle .. js:autofunction:: afqb.three.mouseoutBundle .. js:autofunction:: afqb.three.mouseoverBundle
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/anatomy.rst
anatomy.rst
.. _binder_integration: Integration of ``AFQ-Browser`` and ``Binder`` ============================================= To facilitate further computations on data-sets published using ``AFQ-Browser``, we integrate data-sets that are published on GitHub (see :ref:`usage_guide`) with the `Binder <https://mybinder.org/>`_ service. Binder makes the contents of a GitHub repository available through a `Jupyter <jupyter.org>`_ computational notebook interface. This means that visitors to a published ``AFQ-Browser`` instance can start computing on the data immediately without having to download the data, or install any software. .. note:: For further information about Binder, please read about recent developments in this project in `this blog post <https://elifesciences.org/labs/8653a61d/introducing-binder-2-0-share-your-interactive-research-environment>`_. Software available on Binder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Binder environment automatically provisioned for ``AFQ-Browser`` instances has `scikit-learn <http://scikit-learn.org/>`_, `pandas <https://pandas.pydata.org>`_ and `seaborn <https://seaborn.pydata.org/>`_ installed into it. To add more software dependencies, you will need to edit the ``requirements.txt`` file in the GitHub (for example, see `this file <https://github.com/yeatmanlab/Sarica_2017/blob/gh-pages/requirements.txt>`_), `before` launching Binder for the first time from your instance.
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/binder_integration.rst
binder_integration.rst
from docutils import nodes, utils from docutils.parsers.rst.roles import set_classes def make_link_node(rawtext, app, type, slug, options): """Create a link to a github resource. :param rawtext: Text being replaced with link node. :param app: Sphinx application context :param type: Link type (issues, changeset, etc.) :param slug: ID of the thing to link to :param options: Options dictionary passed to role func. """ try: base = app.config.github_project_url if not base: raise AttributeError if not base.endswith('/'): base += '/' except AttributeError as err: raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) ref = base + type + '/' + slug + '/' set_classes(options) prefix = "#" if type == 'pull': prefix = "PR " + prefix node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref, **options) return node def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Link to a GitHub issue. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ try: issue_num = int(text) if issue_num <= 0: raise ValueError except ValueError: msg = inliner.reporter.error( 'GitHub issue number must be a number greater than or equal to 1; ' '"%s" is invalid.' % text, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app #app.info('issue %r' % text) if 'pull' in name.lower(): category = 'pull' elif 'issue' in name.lower(): category = 'issues' else: msg = inliner.reporter.error( 'GitHub roles include "ghpull" and "ghissue", ' '"%s" is invalid.' % name, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = make_link_node(rawtext, app, category, str(issue_num), options) return [node], [] def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Link to a GitHub user. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ app = inliner.document.settings.env.app #app.info('user link %r' % text) ref = 'https://www.github.com/' + text node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], [] def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Link to a GitHub commit. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ app = inliner.document.settings.env.app #app.info('user link %r' % text) try: base = app.config.github_project_url if not base: raise AttributeError if not base.endswith('/'): base += '/' except AttributeError as err: raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) ref = base + text node = nodes.reference(rawtext, text[:6], refuri=ref, **options) return [node], [] def setup(app): """Install the plugin. :param app: Sphinx application context. """ app.info('Initializing GitHub plugin') app.add_role('ghissue', ghissue_role) app.add_role('ghpull', ghissue_role) app.add_role('ghuser', ghuser_role) app.add_role('ghcommit', ghcommit_role) app.add_config_value('github_project_url', None, 'env') return
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/sphinxext/github.py
github.py
from __future__ import division, absolute_import, print_function import sys import re import pydoc import sphinx import inspect import collections if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = {'use_plots': app.config.numpydoc_use_plots, 'show_class_members': app.config.numpydoc_show_class_members, 'show_inherited_class_members': app.config.numpydoc_show_inherited_class_members, 'class_members_toctree': app.config.numpydoc_class_members_toctree} u_NL = sixu('\n') if what == 'module': # Strip top title pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' title_re = re.compile(sixu(pattern), re.I | re.S) lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) else: doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(u_NL) if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and obj.__name__): if hasattr(obj, '__module__'): v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: new_r = sixu("%s%d") % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): if not hasattr(app, 'add_config_value'): return # probably called by nose, better bail out global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) app.add_config_value('numpydoc_show_inherited_class_members', True, True) app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) # ------------------------------------------------------------------------------ # Docstring-mangling domains # ------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/sphinxext/numpydoc.py
numpydoc.py
import re def dollars_to_math(source): r""" Replace dollar signs with backticks. More precisely, do a regular expression search. Replace a plain dollar sign ($) by a backtick (`). Replace an escaped dollar sign (\$) by a dollar sign ($). Don't change a dollar sign preceded or followed by a backtick (`$ or $`), because of strings like "``$HOME``". Don't make any changes on lines starting with spaces, because those are indented and hence part of a block of code or examples. This also doesn't replaces dollar signs enclosed in curly braces, to avoid nested math environments, such as :: $f(n) = 0 \text{ if $n$ is prime}$ Thus the above line would get changed to `f(n) = 0 \text{ if $n$ is prime}` """ s = "\n".join(source) if s.find("$") == -1: return # This searches for "$blah$" inside a pair of curly braces -- # don't change these, since they're probably coming from a nested # math environment. So for each match, we replace it with a temporary # string, and later on we substitute the original back. global _data _data = {} def repl(matchobj): global _data s = matchobj.group(0) t = "___XXX_REPL_%d___" % len(_data) _data[t] = s return t s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) # matches $...$ dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$") # regular expression for \$ slashdollar = re.compile(r"\\\$") s = dollars.sub(r":math:`\1`", s) s = slashdollar.sub(r"$", s) # change the original {...} things in: for r in _data: s = s.replace(r, _data[r]) # now save results in "source" source[:] = [s] def process_dollars(app, docname, source): dollars_to_math(source) def mathdollar_docstrings(app, what, name, obj, options, lines): dollars_to_math(lines) def setup(app): app.connect("source-read", process_dollars) app.connect('autodoc-process-docstring', mathdollar_docstrings)
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/sphinxext/math_dollar.py
math_dollar.py
import re, inspect, textwrap, pydoc import sphinx from docscrape import NumpyDocString, FunctionDoc, ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param,param_type,desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc,8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::', ' :toctree:', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/sphinxext/docscrape_sphinx.py
docscrape_sphinx.py
from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc from warnings import warn import collections import sys class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data, list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self, n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(collections.Mapping): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Yields': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self, key): return self._parsed_data[key] def __setitem__(self, key, val): if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val def __iter__(self): return iter(self._parsed_data) def __len__(self): return len(self._parsed_data) def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self, doc): i = 0 j = 0 for i, line in enumerate(doc): if line.strip(): break for j, line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self, content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name, arg_type, desc)) return params _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|" r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() sections = list(self._read_sections()) section_names = set([section for section, content in sections]) has_returns = 'Returns' in section_names has_yields = 'Yields' in section_names # We could do more tests, but we are not. Arbitrarily. if has_returns and has_yields: msg = 'Docstring contains both a Returns and Yields section.' raise ValueError(msg) for (section, content) in sections: if not section.startswith('..'): section = (s.capitalize() for s in section.split(' ')) section = ' '.join(section) if section in ('Parameters', 'Returns', 'Yields', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*', '\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param, param_type, desc in self[name]: if param_type: out += ['%s : %s' % (param, param_type)] else: out += [param] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes', 'References', 'Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str, indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature if sys.version_info[0] >= 3: argspec = inspect.getfullargspec(func) else: argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*', '\*') signature = '%s%s' % (func_name, argspec) except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if self._role not in roles: print("Warning: invalid role %s" % self._role) out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls self.show_inherited_members = config.get( 'show_inherited_class_members', True) if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): def splitlines_x(s): if not s: return [] else: return s.splitlines() for field, items in [('Methods', self.methods), ('Attributes', self.properties)]: if not self[field]: doc_list = [] for name in sorted(items): try: doc_item = pydoc.getdoc(getattr(self._cls, name)) doc_list.append((name, '', splitlines_x(doc_item))) except AttributeError: pass # method doesn't exist self[field] = doc_list @property def methods(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and isinstance(func, collections.Callable) and self._is_show_member(name))] @property def properties(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if (not name.startswith('_') and (func is None or isinstance(func, property) or inspect.isgetsetdescriptor(func)) and self._is_show_member(name))] def _is_show_member(self, name): if self.show_inherited_members: return True # show all class members if name not in self._cls.__dict__: return False # class member is inherited, we do not show it return True
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/doc/sphinxext/docscrape.py
docscrape.py
import os import os.path as op import errno import warnings from glob import glob import json import shutil from collections import OrderedDict import scipy.io as sio import pandas as pd import numpy as np import afqbrowser as afqb from http.server import SimpleHTTPRequestHandler import socketserver MNI_AFF = np.array([[1., 0., 0., -98.], [0., 1., 0., -134.], [0., 0., 1., -72.], [0., 0., 0., 1.]]) def _extract_params(afq): """ Helper function to extract a params dict from the AFQ mat file """ afq_params = afq['params'] params_dict = {k: afq_params.item()[k].tolist() for k in afq_params.item().dtype.names} params_dict['track'] = {k: params_dict['track'][k].tolist() for k in params_dict['track'].dtype.names} for k in params_dict['track'].keys(): if hasattr(params_dict['track'][k], 'tolist'): params_dict['track'][k] = params_dict['track'][k].tolist() for k in params_dict.keys(): if hasattr(params_dict[k], 'tolist'): params_dict[k] = params_dict[k].tolist() # Some newer version of AFQ have scan params: if 'scanparams' in afq.dtype.names: scan_params = afq['scanparams'].item() scan_dict = {k: scan_params[k].tolist() for k in scan_params.dtype.names} for k in scan_dict.keys(): if hasattr(scan_dict[k], 'tolist'): scan_dict[k] = scan_dict[k].tolist() scan_dict = {k: scan_params[k].tolist() for k in scan_params.dtype.names} for k in scan_dict.keys(): if hasattr(scan_dict[k], 'tolist'): scan_dict[k] = scan_dict[k].tolist() # Older versions of AFQ don't have the scan params: else: scan_dict = {} params = {'analysis_params': params_dict, 'scan_params': scan_dict} return params def _create_metadata(subject_ids, meta_fname): """Helper function to create a minimal metadata file.""" meta_df = pd.DataFrame({"subjectID": subject_ids}, index=range(len(subject_ids))) meta_df.to_csv(meta_fname) def _copy_nodes_table(nodes_table_fname, out_path=None, metadata=None, streamlines=None): """ Replace default `nodes.csv` with user provided file. Parameters ---------- nodes_table_fname : str Full path to user-supplied AFQ-Browser nodes CSV file out_path : str, optional Full path to directory where the nodes table will be saved. If not provided will assume current working directory. metadata : str, optional Full path to a file with user-supplied metadata. If not provided a metadata file will be generated using subjectIDs in the nodes table. streamlines : str, optional Full path to a file with user-supplied streamlines. If not provided the default streamline file consisting of twenty tracts, each with a core fiber and a set of sub-sampled streamlines, each with 100 nodes from an examplar subject will be utilized as a representative anatomy. Returns ------- tuple: paths to the files that get generated: nodes_fname, meta_fname, streamlines_fname """ if not op.exists(nodes_table_fname): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), nodes_table_fname) if out_path is None: out_path = '.' nodes_fname = op.join(out_path, 'nodes.csv') shutil.copy(nodes_table_fname, nodes_fname) meta_fname = op.join(out_path, 'subjects.csv') if metadata is None: nodes_df = pd.read_csv(nodes_fname) _create_metadata(nodes_df.subjectID.unique(), meta_fname) else: shutil.copy(metadata, meta_fname) streamlines_fname = op.join(out_path, 'streamlines.json') if streamlines is None: warnings.warn('Using default anatomy') else: shutil.copy(streamlines, streamlines_fname) return nodes_fname, meta_fname, streamlines_fname def _validate(nodes_fname, meta_fname, streamlines_fname): """ Run checks to ensure requirements and warns of inconsistencies. Parameters ---------- nodes_fname : str Full path to nodes table CSV file meta_fname : str Full path to metadata CSV file streamlines_fname : str Full path to streamlines JSON file Returns ------- list : validation_errors """ nodes_df = pd.read_csv(nodes_fname) required_columns = ['subjectID', 'tractID', 'nodeID'] validation_errors = [] for column in required_columns: if column not in nodes_df.columns: if column.lower() not in [c.lower() for c in nodes_df.columns]: validation_errors.append(ValueError( f'Nodes table columns are case sensitive: {column}')) else: validation_errors.append(ValueError( f'Nodes table missing required column: {column}')) meta_df = pd.read_csv(meta_fname) # check subjcts consistent if 'subjectID' not in meta_df.columns: validation_errors.append(ValueError( 'Metadata file missing required column: subjectID')) if set(nodes_df.subjectID.unique()) != set(meta_df.subjectID): diff = set(nodes_df.subjectID.unique()) ^ set(meta_df.subjectID) warnings.warn('Metadata and Nodes table subjects are inconsistent: ' f'{diff}\n Some subjects may not appear.') with open(streamlines_fname) as fp: streamlines = json.load(fp) # check tracts consistent if set(nodes_df.tractID.unique()) != set(streamlines.keys()): diff = set(nodes_df.tractID.unique()) ^ set(streamlines.keys()) warnings.warn('Streamlines and Nodes table tracts are inconsistent: ' f'{diff}\n Some bundles may not appear.') # check nodes consistent for tractID in streamlines.keys(): if 'coreFiber' not in streamlines[tractID].keys(): validation_errors.append(ValueError( f'Streamlines {tractID} missing required key: coreFiber')) tract = nodes_df.loc[nodes_df.tractID == tractID] tract_num_nodes = len(tract.nodeID.unique()) for streamlineID in streamlines[tractID].keys(): streamline_num_nodes = len(streamlines[tractID][streamlineID]) if tract_num_nodes != streamline_num_nodes: validation_errors.append(ValueError( f'Streamlines {tractID} {streamlineID} and Nodes tables' 'nodes inconsistent length')) return validation_errors def tracula2nodes(stats_dir, out_path=None, metadata=None, params=None): """ Create a nodes table from a TRACULA `stats` directory. Read data processed with TRACULA [1]_ and generate an AFQ-browser compliant nodes table. Parameters ---------- stats_dir : str Full path to a directory containing stats results from TRACULA processing. out_path : str, optional Full path to directory where the nodes table will be saved. metadata : str, optional Full path to a file with user-supplied metadata. This has to be a csv file with column headers in the first row, including a column named "subjectID". For an example, see the 'data/subjects.csv' that comes with the software. params : str, optional Full path to a params file that describes the analysis and the scan params. This is a json file that has keys "analysis_params" and "scan_params" that can be filled with a dict with parameters pertaining to the analysis and to the scanned data. The fields used by AFQ are described in: `https://github.com/jyeatman/AFQ/blob/master/functions/AFQ_Create.m` Returns ------- tuple: paths to the files that get generated: nodes_fname, meta_fname, streamlines_fname, params_fname Notes ----- .. [1] Automated probabilistic reconstruction of white-matter pathways in health and disease using an atlas of the underlying anatomy. Yendiki A, Panneck P, Srinivasan P, Stevens A, Zollei L, Augustinack J, Wang R, Salat D, Ehrlich S, Behrens T, Jbabdi S, Gollub R and Fischl B (2011). Front. Neuroinform. 5:23. doi: 10.3389/fninf.2011.00023 """ txt_files = glob(op.join(stats_dir, '*.txt')) tracks = [] metrics = [] for txt_file in txt_files: tt = '.'.join(op.split(txt_file)[-1].split('.')[:2]) if not (tt.startswith('rh') or tt.startswith('lh')): tt = tt.split('.')[0] tracks.append(tt) metrics.append((op.splitext(op.split(txt_file)[-1])[0]).split('.')[-1]) tracks = list(set(tracks)) metrics = [metric for metric in list(set(metrics)) if metric not in ['mean', 'inputs']] streamlines = OrderedDict() dfs = [] for tt in tracks: coords_file = tt + '.avg33_mni_bbr.coords.mean.txt' cc = np.loadtxt(op.join(stats_dir, coords_file)) # We apply the MNI affine, to get back to AC/PC space in mm: coords = np.dot(cc, MNI_AFF[:-1, :-1].T) + MNI_AFF[:-1, -1][None, :] streamlines[tt] = {'coreFiber': coords.tolist()} first_metric = True for m in metrics: fname = op.join(stats_dir, tt + '.avg33_mni_bbr.' + m + '.txt') df_metric = pd.read_csv(fname, delimiter=' ') df_metric = df_metric.drop( filter(lambda x: x.startswith('Unnamed'), df_metric.columns), axis=1) n_nodes, n_subjects = df_metric.shape re_data = df_metric.values.T.reshape(n_nodes * n_subjects) if first_metric: re_nodes = np.tile(np.arange(n_nodes), n_subjects) re_subs = np.concatenate( [[s for i in range(n_nodes)] for s in df_metric.columns]) re_track = np.repeat(tt, n_subjects * n_nodes) re_df = pd.DataFrame({'subjectID': re_subs, 'tractID': re_track, 'nodeID': re_nodes, m: re_data}) first_metric = False else: re_df[m] = re_data dfs.append(re_df) nodes_df = pd.concat(dfs) if out_path is None: out_path = '.' nodes_fname = op.join(out_path, 'nodes.csv') nodes_df.to_csv(nodes_fname, index=False) meta_fname = op.join(out_path, 'subjects.csv') if metadata is None: _create_metadata(df_metric.columns, meta_fname) else: shutil.copy(metadata, meta_fname) streamlines_fname = op.join(out_path, 'streamlines.json') with open(streamlines_fname, 'w') as f: f.write(json.dumps(streamlines)) params_fname = op.join(out_path, 'params.json') if params is None: with open(params_fname, 'w') as f: f.write(json.dumps({"analysis_params": {}, "scan_params": {}})) else: shutil.copy(params, params_fname) return nodes_fname, meta_fname, streamlines_fname, params_fname def _create_subject_ids(n_subjects): if n_subjects > 1000: subject_ids = ['subject_%05d' % i for i in range(n_subjects)] elif n_subjects > 100: subject_ids = ['subject_%04d' % i for i in range(n_subjects)] elif n_subjects > 10: subject_ids = ['subject_%03d' % i for i in range(n_subjects)] else: subject_ids = ['subject_%02d' % i for i in range(n_subjects)] return subject_ids def afq_mat2tables(mat_file_name, subject_ids=None, stats=None, out_path=None, metadata=None): """ Create nodes table, subjects table and params dict from AFQ `.mat` file. Parameters ---------- mat_file_name : str Full path to an AFQ-processed mat-file subject_ids : list, optional Identifiers for the subjects. Default: ['subject_001', 'subject_002,' ...] stats : list, optional List of keys for statistics to pull from the AFQ data. Default: pull out all of the statistics that are in the mat file. out_path : str, optional Full path for the CSV/JSON files to be saved as output. Default: pwd. metadata : str, optional Full path to a file with user-supplied metadata. This has to be a csv file with column headers in the first row, including a column named "subjectID". For an example, see the 'data/subjects.csv' that comes with the software. Defaults to use the metadata stored in the afq mat file. If no metadata provided and there is no meadata in the afq mat file, create a minimal metadata table. Returns ------- tuple: paths to the files that get generated: nodes_fname, meta_fname, streamlines_fname, params_fname """ afq = sio.loadmat(mat_file_name, squeeze_me=True)['afq'] vals = afq['vals'].item() tract_ids = afq['fgnames'].item() n_tracts = len(tract_ids) if stats is None: stats = list(vals.dtype.fields.keys()) columns = ['subjectID', 'tractID', 'nodeID'] columns = columns + stats df = pd.DataFrame(columns=columns) shape = vals[stats[0]].item()[0].shape if len(shape) > 1: n_subjects, nodes_per_tract = shape else: n_subjects = 1 nodes_per_tract = shape[0] # Check if subject ids is defined in the afq structure if subject_ids is None: if 'sub_ids' in afq.dtype.names and len(afq['sub_ids'].item()): subject_ids = [str(x) for x in afq['sub_ids'].item()] else: subject_ids = _create_subject_ids(n_subjects) subject_ids = np.array(subject_ids) # Loop over subjects for subject in range(len(subject_ids)): sid = subject_ids[subject] # If the subject ID could be interperted as a number: if isinstance(sid, int) or sid.isdigit(): # Prepend an "s" so that sorting works on the IDs in the browser: sid = "s" + sid # Loop over tracts for tract in range(n_tracts): # Making a subject and tract specific dataframe subj_df = pd.DataFrame( columns=['subjectID', 'tractID', 'nodeID'], data=np.array([[sid] * nodes_per_tract, [tract_ids[tract]] * nodes_per_tract, np.arange(nodes_per_tract)]).T) # We're looping over the desired stats (eg fa, md) and adding them # to the subjects dataframe for stat in stats: if n_subjects == 1: scalar = vals[stat].item()[tract] else: scalar = vals[stat].item()[tract][subject, :] subj_df[stat] = scalar # The subject's dataframe for this tract is now appended to the # whole dataframe here: df = df.append(subj_df) # Set output path from the input kwarg: if out_path is None: out_path = '.' nodes_fname = op.join(out_path, 'nodes.csv') # Write to file df.to_csv(nodes_fname, index=False) # Next, the metadata: meta_fname = op.join(out_path, 'subjects.csv') if metadata is None: if 'metadata' in afq.dtype.names: try: # Create metadata from the AFQ struct: metadata = afq['metadata'].item() meta_df1 = pd.DataFrame({"subjectID": subject_ids}, index=range(len(subject_ids))) # Metadata has mixed types, and we want to preserve that # going into the DataFrame. Hence, we go through a dict: metadata_for_df = {k: v for k, v in zip(metadata.dtype.names, metadata.item())} meta_df2 = pd.DataFrame(metadata_for_df) meta_df = pd.concat([meta_df1, meta_df2], axis=1) meta_df.to_csv(meta_fname) except ValueError: # If we're here, that's because the metadata in the AFQ mat # Doesn't have the right shape or has some other # wonky behavior: _create_metadata(subject_ids, meta_fname) else: # If we're here, that's because there probably is no metadata # In the AFQ mat file: _create_metadata(subject_ids, meta_fname) else: shutil.copy(metadata, meta_fname) # using default streamline file streamlines_fname = op.join(out_path, 'streamlines.json') params_fname = op.join(out_path, 'params.json') params = _extract_params(afq) json.dump(params, open(params_fname, 'w')) return nodes_fname, meta_fname, streamlines_fname, params_fname def copy_and_overwrite(from_path, to_path): """Helper function: copies and overwrites.""" if op.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) def update_settings_json(settings_path, title=None, subtitle=None, link=None, sublink=None): """Update settings.json with user supplied values settings_path : str Path to the settings.json file to be updated title : str, optional. Custom page title. Default: None. subtitle : str, optional. Custom page subtitle. Default: None. link : str, optional. Custom href for page title. Default: None. sublink : str, optional. Custom href for page subtitle. Default: None. """ # Load the settings.json with open(settings_path) as fp: settings = json.load(fp) # Populate defaults from settings.json if they exist, # otherwise set to empty defaults = {} if settings.get('global') and settings.get('global').get('html'): html_settings = settings.get('global').get('html') else: html_settings = {} defaults['title'] = ('Page title', html_settings.get('title')) defaults['subtitle'] = ('Page subtitle', html_settings.get('subtitle')) defaults['link'] = ('Title hyperlink (including http(s)://)', html_settings.get('link')) defaults['sublink'] = ('Subtitle hyperlink (including http(s)://)', html_settings.get('sublink')) # python 2 compatible user input try: prompt = raw_input except NameError: prompt = input # Later, we'll iterate over key_list to get user input. But we don't # want to ask for user input if it's supplied as an argument to this # function, so if args are provided, use them as defaults, otherwise # append to key_list key_list = [] if title is not None: html_settings['title'] = title else: key_list.append('title') if subtitle is not None: html_settings['subtitle'] = subtitle else: key_list.append('subtitle') if link is not None: html_settings['link'] = link else: key_list.append('link') if sublink is not None: html_settings['sublink'] = sublink else: key_list.append('sublink') # Prompt for input for key in key_list: prompt_text, value = defaults[key] text = '{p:s} [{d!s}]: '.format(p=prompt_text, d=value) new_val = prompt(text) if not new_val: new_val = value if new_val is not None: html_settings[key] = new_val # Update the settings.json dict html_settings = {'global': {'html': html_settings}} settings.update(html_settings) # Write to file with open(settings_path, 'w') as fp: json.dump(settings, fp) def assemble(source, target=None, metadata=None, streamlines=None, title=None, subtitle=None, link=None, sublink=None): """ Spin up an instance of the AFQ-Browser with data provided as a mat file. Parameters ---------- source : str Path to a AFQ-Browser nodes csv-file, mat-file containing the AFQ data structure, or to a TRACULA stats folder. target : str, optional. Path to a file-system location to create this instance of the browser in. Default: pwd. metadata : str, optional. Path to an input csv metadata file. This file requires a "subjectID" column to work. If a file is provided, it will overwrite metadata provided through other. Default: read metadata from AFQ struct, or generate a metadata table with just a "subjectID" column (e.g., for TRACULA). streamlines : str, optional. Path to a user-supplied streamline JSON file. The file contains a description of tract trajectories in the 3D space of the anatomy. Within each tract, "coreFiber" is a required key, and subsequent numerical keys are not required. title : str, optional. Custom page title. Default: None. subtitle : str, optional. Custom page subtitle. Default: None. link : str, optional. Custom href for page title. Default: None. sublink : str, optional. Custom href for page subtitle. Default: None. """ if target is None: target = '.' site_dir = op.join(target, 'AFQ-browser') # This is where the template is stored: data_path = op.join(afqb.__path__[0], 'site') copy_and_overwrite(data_path, site_dir) out_path = op.join(site_dir, 'client', 'data') settings_path = op.join(site_dir, 'client', 'settings.json') update_settings_json(settings_path, title, subtitle, link, sublink) if op.isdir(source): # Assume we got a TRACULA stats path: nodes_fname, meta_fname, streamlines_fname, params_fname =\ tracula2nodes(source, out_path=out_path, metadata=metadata) else: ext = os.path.splitext(source)[-1].lower() if ext == '.mat': # We have an AFQ-generated mat-file on our hands: nodes_fname, meta_fname, streamlines_fname, params_fname =\ afq_mat2tables(source, out_path=out_path, metadata=metadata) elif ext == '.csv': # We have an nodes.csv file nodes_fname, meta_fname, streamlines_fname =\ _copy_nodes_table(source, out_path=out_path, metadata=metadata, streamlines=streamlines) else: raise ValueError( 'Unknown source argument must be on of: ' 'TRACULA directory, AFQ mat file, or nodes csv file') validation_errors = _validate(nodes_fname, meta_fname, streamlines_fname) if validation_errors: raise ValueError(validation_errors) def run(target=None, port=8080): """ Run a webserver for AFQ-browser. Parameters ---------- target : str Full path to the root folder where AFQ-browser files are stored. port : int Which port to run the server on. """ if target is None: target = '.' site_dir = op.join(target, 'AFQ-browser', 'client') os.chdir(site_dir) Handler = SimpleHTTPRequestHandler success = False while not success: try: httpd = socketserver.TCPServer(("", port), Handler) success = True except OSError: port = port + 1 print("Serving AFQ-browser on port", port) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close()
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/browser.py
browser.py
import os import os.path as op import getpass import tempfile import pandas as pd import github as gh import git def upload(target, repo_name, uname=None, upass=None, token=None, org=None, to_vault=True): """ Upload an assembled AFQ-Browser site to a github pages website. Parameters ---------- target : str Local path to the file-system location where the AFQ-Browser files are (need to run `assemble` before running this function) repo_name : str The website will be at https://<username>.github.io/<repo_name> uname : str, optional GitHub user-name upass : str, optional GitHub password org : str, optional When provided, this means that the website will be at: https://<org>.github.io/<repo_name>. Defaults to use the user-name. to_vault : bool, optional Whether to deposit the data to afqvault. Default: True """ # Get all the files that will be committed/pushed file_list = [] client_folder = op.join(target, 'client') for path, dirs, files in os.walk(client_folder): for f in files: file_list.append(os.path.abspath(op.join(path, f))) # Get credentials from the user if uname is None: uname = getpass.getpass("GitHub user-name? ") if not any([upass, token]): upass = getpass.getpass("GitHub password (leave blank if using 2FA " "and personal access token)? ") if not upass: token = getpass.getpass("GitHub personal access token? ") print('If prompted again for username and password, use your ' 'access token as the password.') login_uname = uname if token is None else token # Create the remote repo on GitHub (use PyGithub) g = gh.Github(login_uname, upass) u = g.get_user() if org is not None: gh_org = g.get_organization(org) remote = gh_org.create_repo(repo_name) else: remote = u.create_repo(repo_name) # Create the local repo using GitPython: r = git.Repo.init(client_folder) # Add all of the files to the repo's gh-pages branch r.index.add(file_list) r.index.commit("Commit everything") # Add a .nojekyll file f = open(op.join(client_folder, '.nojekyll'), 'w') f.close() r.index.add([os.path.abspath(f.name)]) r.index.commit("Add nojekyll file") # Push to GitHub branch = r.create_head("gh-pages") branch.checkout() o = r.create_remote("origin", remote.clone_url) assert o.exists() o.push("gh-pages") # Strangely, that last slash is crucial so that this works as a link: if org is not None: site_name = "https://" + org + ".github.io/" + repo_name + "/" else: site_name = "https://" + uname + ".github.io/" + repo_name + "/" if to_vault: # Next, we deposit to afqvault afqvault_repo = g.get_repo('afqvault/afqvault') # If you already have a fork, the following gives you the fork. # Otherwise, it creates the fork: my_fork = u.create_fork(afqvault_repo) # Create a local copy of your fork: tdir = tempfile.mkdtemp() av_repo = git.Repo.init(op.join(tdir, 'afqvault')) origin = av_repo.create_remote('origin', my_fork.clone_url) origin.fetch() av_repo.create_head('master', origin.refs.master) av_repo.heads.master.set_tracking_branch(origin.refs.master) av_repo.heads.master.checkout() origin.pull() # We create a new branch every time we do this, so that we can PR # More than one time branch_name = uname + "/" + repo_name + r.commit().hexsha branch = av_repo.create_head(branch_name) branch.checkout() # Edit the manifest file with your information: manifest_fname = op.join(tdir, 'afqvault', 'manifest.csv') manifest = pd.read_csv(manifest_fname, index_col=0) shape = manifest.shape manifest = manifest.append(pd.DataFrame(data=dict( username=[uname if org is None else org], repository_name=[repo_name]))) # Deduplicate -- if this site was already uploaded, we're done! manifest = manifest.drop_duplicates() manifest.to_csv(manifest_fname) # Otherwise, we need to make a PR against afqvault if manifest.shape != shape: # Commit this change: av_repo.index.add([os.path.abspath(manifest_fname)]) av_repo.index.commit("Adds %s" % site_name) # Push it to that branch on your fork origin.push(branch_name) # Then, we create the PR against the central repo: afqvault_repo.create_pull("Adds %s" % site_name, "Auto-created by afqbrowser-publish", "master", "%s:%s" % (uname, branch_name)) return site_name
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/gh_pages.py
gh_pages.py
# Welcome to the AFQ-Browser exploratory notebook ### What is this thing? This is a [Jupyter](http://jupyter.org/) notebook. You can add code cells to this notebook, and execute Python code that you write in these cells, by pressing the shift and enter keys on your keyboard. For example: ``` a = 1 print(a) ``` To embed figures in the notebook, please use the following Jupyter command at the top of your notebook: ``` %matplotlib inline ``` ### Reading AFQ-browser data To read data in from the AFQ-browser, you can use the [Pandas](pandas.pydata.org) library. ``` import pandas as pd subjects = pd.read_csv('./data/subjects.csv') subjects.head() nodes = pd.read_csv('./data/nodes.csv') nodes.head() ``` For an example analysis, see our [example analysis](example-analysis.ipynb) notebook ``` ```
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/index.ipynb
index.ipynb
# Example analysis This notebook provides a generic example for some analysis that you might want to conduct with the data provided through this AFQ-Browser instance. Note that this is just an example, and not may be a good approach to the data in this particular instance of the AFQ-Browser, and this data-set. Ultimately, the limits of the analysis you could do are the limits of your imagination. ``` %matplotlib inline import pandas as pd subjects = pd.read_csv('./data/subjects.csv') nodes = pd.read_csv('./data/nodes.csv') ``` ### Merging nodes and subjects The data from nodes (referring to diffusion statistics along the length of the tracts) can be merged together with the data about subjects into one table: ``` merged = pd.merge(nodes, subjects, on="subjectID") merged.head() ``` ### Visualizing the data You can use [Matplotlib](http://matplotlib.org/) and [Seaborn](https://seaborn.pydata.org/) to visualize the data: ``` import matplotlib.pyplot as plt import seaborn as sns ``` We focus on the calculated diffusion statistics that are included in the `nodes` table: ``` stats = nodes.columns.drop(["subjectID", "tractID", "nodeID"]) ``` And specifically on the very first one ``` print(stats[0]) stat = merged[["nodeID", "subjectID", "tractID", stats[0]]] ``` Select a single tract: ``` tract_stat = stat[stat["tractID"] == stat["tractID"].values[0]] tract_stat.head() tract_p = tract_stat.pivot(index='nodeID', columns='subjectID', values=stats[0]) import numpy as np sns.tsplot(tract_p.values.T, err_style="unit_traces", estimator=np.nanmean) ``` ### Analyzing data As an example of one approach to AFQ data, we include here an example of how you might use [Scikit Learn's implementation of the K-means algorithm](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) to cluster the subjects in these data into two clusters, based on this statistic/tract combination. ``` from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline ``` We create a pipeline that imputes nan values (that sometimes occur in tract profiles), and clusters the results into two clusters: ``` estimator = Pipeline([("impute", Imputer()), ("cluster", KMeans(n_clusters=2))]) ``` We compute the clusters and transform the data into cluster distance space ``` clusters = estimator.fit(tract_p.values.T).steps[1][1] labels = clusters.labels_ x, y = estimator.fit_transform(tract_p.values.T).T ``` We plot the results in the latent cluster space ``` plt.scatter(x, y, c=labels.astype(np.float)) ```
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/example-analysis.ipynb
example-analysis.ipynb
// =========== three js part /** * Combine the init and animate function calls for use in d3.queue() * * @param error - error passed through from previous function in d3.queue() */ afqb.three.initAndAnimate = function (error) { "use strict"; if (error) { throw error; } afqb.three.init(afqb.plots.initCheckboxes); afqb.three.animate(); }; /** * Build the dat.gui controls for the anatomy panel * * @param streamlinesExist - boolean to indicate existence of anatomy streamlines. * if streamlinesExist is true, then the fiberRepesentation controller will allow * both 'core fiber' and 'all fibers' options. Otherwise, only 'core fiber' will * be allowed. */ afqb.three.buildthreeGui = function (streamlinesExist) { var ThreeGuiConfigObj = function () { this.lhOpacity = parseFloat(afqb.three.settings.lHOpacity); this.rhOpacity = parseFloat(afqb.three.settings.rHOpacity); this.fiberOpacity = parseFloat(afqb.three.settings.fiberOpacity); this.highlight = afqb.three.settings.mouseoverHighlight; this.fiberRepresentation = streamlinesExist ? afqb.three.settings.fiberRepresentation : 'core fiber'; }; afqb.three.gui = new dat.GUI({ autoplace: false, width: 350, scrollable: false }); afqb.global.controls.threeControlBox = new ThreeGuiConfigObj(); var lhOpacityController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'lhOpacity') .min(0).max(1).step(0.01).name('Left Hemi Opacity'); lhOpacityController.onChange(function (value) { afqb.three.lh.traverse(function (child) { if (child instanceof THREE.Mesh) { child.material.opacity = value; } }); }); lhOpacityController.onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {three: {lHOpacity: value.toString()}} ); }); var rhOpacityController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'rhOpacity') .min(0).max(1).step(0.01).name('Right Hemi Opacity'); rhOpacityController.onChange(function (value) { afqb.three.rh.traverse(function (child) { if (child instanceof THREE.Mesh) { child.material.opacity = value; } }); }); rhOpacityController.onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {three: {rHOpacity: value.toString()}} ); }); var fiberOpacityController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'fiberOpacity') .min(0.0).max(1.0).step(0.01).name('Fiber Opacity'); fiberOpacityController.onChange(function (value) { afqb.three.greyGroup.traverse(function (child) { if (child instanceof THREE.LineSegments) { child.material.opacity = value; if (value === 0) { child.material.depthWrite = false; } else { child.material.depthWrite = true; } } }); afqb.three.greyCoreGroup.traverse(function (child) { if (child instanceof THREE.Mesh) { child.material.opacity = value; if (value === 0) { child.material.depthWrite = false; } else { child.material.depthWrite = true; } } }); }); fiberOpacityController.onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {three: {fiberOpacity: value.toString()}} ); }); // Add highlight controller var mouseoverHighlightController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'highlight') .name('Mouseover Highlight'); mouseoverHighlightController.onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {three: {mouseoverHighlight: value.toString()}} ); }); // Add fiber representation controller var fiberRepController; if (streamlinesExist) { // Allow both options: 'core fiber' and 'all fibers' fiberRepController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'fiberRepresentation', ['all fibers', 'core fiber']) .name('Fiber Representation'); } else { // Restrict to only 'core fiber' fiberRepController = afqb.three.gui .add(afqb.global.controls.threeControlBox, 'fiberRepresentation', ['core fiber']) .name('Fiber Representation'); } fiberRepController.onFinishChange(function (value) { // Toggle visibility of either core fibers or streamlines if (value === "all fibers") { afqb.three.colorGroup.traverse(afqb.three.makeVisible); afqb.three.greyGroup.traverse(afqb.three.makeVisible); afqb.three.convexGroup.traverse(afqb.three.makeVisible); afqb.three.colorCoreGroup.traverse(afqb.three.makeInvisible); afqb.three.greyCoreGroup.traverse(afqb.three.makeInvisible); afqb.three.colorGroup.traverse(function (child) { if (child instanceof THREE.LineSegments) { afqb.three.mouseoutBundle(child); } }); } else { afqb.three.colorGroup.traverse(afqb.three.makeInvisible); afqb.three.greyGroup.traverse(afqb.three.makeInvisible); afqb.three.convexGroup.traverse(afqb.three.makeInvisible); afqb.three.colorCoreGroup.traverse(afqb.three.makeVisible); afqb.three.greyCoreGroup.traverse(afqb.three.makeVisible); afqb.three.colorCoreGroup.traverse(function (child) { if (child instanceof THREE.Mesh) { afqb.three.mouseoutBundle(child); } }); } // Update the query string afqb.global.updateQueryString( {three: {fiberRepresentation: afqb.global.formatKeyName(value)}} ); }); var guiContainer = document.getElementById('three-gui-container'); guiContainer.appendChild(afqb.three.gui.domElement); afqb.three.gui.close(); }; /** * Initialize the three.js scene for subject's anatomy * * The scene consists of six object groups: * * - afqb.three.brain: the brain surface, loaded from freesurf.OBJ * - afqb.three.colorGroup: fiber bundle streamlines that display when * selected, one object for each bundle * - afqb.three.greyGroup: grey fiber bundles that are always displayed * underneath the color ones, all rendered together as one buffer geometry * - afqb.three.colorCoreGroup: core fibers that display when selected, one * object per bundle * - afqb.three.greyCoreGroup: grey core fibers that are always displayed * underneath the color bundles * - afqb.three.convexGroup: invisible convex hulls, one for each streamline * colorGroup. These are never displayed but hold all of the dom events for * the colorGroup objects. * * @param streamlinesCallback - function to be called after the streamlines * have been loaded from streamlines.json */ afqb.three.init = function (streamlinesCallback) { "use strict"; // contain all bundles in these Group objects afqb.three.colorGroup = new THREE.Group(); afqb.three.greyGroup = new THREE.Group(); afqb.three.colorCoreGroup = new THREE.Group(); afqb.three.greyCoreGroup = new THREE.Group(); afqb.three.convexGroup = new THREE.Group(); // We put the renderer inside a div with id #threejsbrain afqb.three.container = document.getElementById("threejsbrain"); // Get width and height var width = afqb.three.container.clientWidth; var height = afqb.three.container.clientHeight; // Show three.js stats if desired afqb.three.stats = {}; if (afqb.three.settings.showStats) { afqb.three.stats = new Stats(); afqb.three.container.appendChild(afqb.three.stats.dom); } // Set the camera position afqb.three.camera = new THREE.PerspectiveCamera(45, width / height, 1, 2000); afqb.three.camera.position.copy(new THREE.Vector3( afqb.three.settings.cameraPosition.x, afqb.three.settings.cameraPosition.y, afqb.three.settings.cameraPosition.z )); afqb.three.camera.up.set(0, 0, 1); // init scene afqb.three.scene = new THREE.Scene(); // Use ambient light var ambient = new THREE.AmbientLight(0x111111); afqb.three.scene.add(ambient); // And a directional light always pointing from the camera afqb.three.directionalLight = new THREE.DirectionalLight(0xffeedd, 1); afqb.three.directionalLight.position.set( afqb.three.camera.position.x, afqb.three.camera.position.y, afqb.three.camera.position.z ); afqb.three.scene.add(afqb.three.directionalLight); // renderer afqb.three.renderer = new THREE.WebGLRenderer({ alpha: true }); afqb.three.renderer.setSize(width, height); afqb.three.container.appendChild(afqb.three.renderer.domElement); // Add a mouseout listener for the entire 3D view, that will unhighlight // any bundles that didn't have time to register their own mouseout event // before the mouse escaped the container. afqb.three.renderer.domElement.addEventListener("mouseout", function () { var groups = [ afqb.three.colorGroup, afqb.three.colorCoreGroup, afqb.three.greyGroup, afqb.three.greyCoreGroup ]; groups.forEach(function (group) { group.traverse(function (child) { if (child instanceof THREE.LineSegments || child instanceof THREE.Mesh) { afqb.three.mouseoutBundle(child); } }); }); }); // model // load brain surface using OBJLoader var loader = new THREE.OBJLoader(); loader.load('data/freesurf.OBJ', function (object) { afqb.three.brain = object; afqb.three.rh = object.getObjectByName('rh.pial.asc'); afqb.three.lh = object.getObjectByName('lh.pial.asc'); object.traverse(function (child) { if (child instanceof THREE.Mesh) { child.material.depthWrite = true; child.material.transparent = true; child.rotation.x = Math.PI / 2; // Scale set by trial and error child.scale.set(1.75, 1.75, 1.75); // Set render order so that it doesn't occlude the // fiber bundles underneath when opacity is decreased child.renderOrder = 3; child.traverse(function (object) { object.renderOrder = 3; }); } }); // Separate the hemispheres a little bit so that depthWrite works as expected. afqb.three.lh.translateX(-0.05); afqb.three.rh.translateX(0.05); afqb.three.lh.material.opacity = afqb.three.settings.lHOpacity; afqb.three.rh.material.opacity = afqb.three.settings.rHOpacity; afqb.three.lh.material.color.setHex(0xe8e3d3); afqb.three.rh.material.color.setHex(0xe8e3d3); afqb.three.scene.add(object); }); // init dom events for later var domEvents = new THREEx.DomEvents(afqb.three.camera, afqb.three.renderer.domElement); // load fiber bundle using jQuery $.getJSON("data/streamlines.json", function (json) { var names = afqb.plots.tracts.map(function(name) { return afqb.global.formatKeyName(name); }); // init streamlinesExist to false and set to true later only if we find them var streamlinesExist = false; // Iterate through the bundles Object.keys(json).forEach(function (bundleKey) { var oneBundle = json[bundleKey]; var keyName = afqb.global.formatKeyName(bundleKey); var index = names.indexOf(keyName); // Retrieve the core fiber and then delete it from the bundle object var coreFiber = oneBundle['coreFiber']; delete oneBundle['coreFiber']; var corePath = coreFiber.map(function (element) { return new THREE.Vector3(element[0], element[1], element[2]); }); var coreCurve = new THREE.CatmullRomCurve3(corePath); var coreGeometry = new THREE.TubeBufferGeometry(coreCurve, 100, 2.8, 8, false); var greyCoreMaterial = new THREE.MeshBasicMaterial({ opacity: afqb.three.settings.fiberOpacity, transparent: true, depthWrite: true }); greyCoreMaterial.color.setHex(0x444444); var highlightCoreMaterial = new THREE.MeshBasicMaterial({ opacity: 1.0, // afqb.three.settings.highlightOpacity, transparent: false // true }); highlightCoreMaterial.color.setHex( afqb.global.highlightColors[index] ); var greyMesh = new THREE.Mesh(coreGeometry, greyCoreMaterial); greyMesh.scale.set(0.05,0.05,0.05); greyMesh.position.set(0, 0.8, -0.5); // Record some useful info for later greyMesh.name = keyName; greyMesh.defaultMaterial = greyCoreMaterial; greyMesh.highlightMaterial = highlightCoreMaterial; afqb.three.greyCoreGroup.add(greyMesh); var colorCoreMaterial = new THREE.MeshBasicMaterial({ opacity: afqb.three.settings.colorOpacity, transparent: true, depthWrite: true }); colorCoreMaterial.color.setHex(afqb.global.colors[index]); var tubeSegments = 100; var radiusSegments = 8; coreGeometry = new THREE.TubeBufferGeometry(coreCurve, tubeSegments, 3, radiusSegments, false); var colorMesh = new THREE.Mesh( coreGeometry, colorCoreMaterial ); // Set scale to match the brain surface, // (determined by trial and error) colorMesh.scale.set(0.05,0.05,0.05); colorMesh.position.set(0, 0.8, -0.5); // Record some useful info for later colorMesh.name = keyName; colorMesh.geometryLength = 2 * tubeSegments * radiusSegments * 3; colorMesh.defaultMaterial = colorCoreMaterial; colorMesh.highlightMaterial = highlightCoreMaterial; afqb.three.colorCoreGroup.add(colorMesh); // Now that we've dealt with the core fibers, see if there's anything // left over. If so, these are the streamlines. streamlinesExist = !$.isEmptyObject(oneBundle) || streamlinesExist; if (streamlinesExist) { // fiberKeys correspond to individual fibers in each fiber bundle // They may not be consecutive keys depending on the // downsampling of the input data, hence the need for `nFibers` // and `iFiber` // First loop simply counts the number of fibers in this bundle // and asserts that each individual fiber has been resampled to // the same size. var nFibers = 0; var firstKey = Object.keys(oneBundle)[0]; var referenceLength = oneBundle[firstKey].length; Object.keys(oneBundle).forEach(function (fiberKey) { ++nFibers; var oneFiber = oneBundle[fiberKey]; if (oneFiber.length !== referenceLength) { var errMessage = ('Streamlines have unexpected length. ' + 'faPlotLength = ' + referenceLength + ', ' + 'but oneFiber.length = ' + oneFiber.length); if (typeof Error !== 'undefined') { throw new Error(errMessage); } throw errMessage; } }); // Positions will hold x,y,z vertices for each fiber var positions = new Float32Array( nFibers * (referenceLength - 1) * 3 * 2 ); // Outer loop is along the length of each fiber. // Inner loop cycles through each fiber group. // This is counter-intuitive but we want spatial locality to // be preserved in index locality. This will make brushing // much easier in the end. var points = []; Object.keys(oneBundle).forEach(function (fiberKey, iFiber) { var oneFiber = oneBundle[fiberKey]; for (var i = 0; i < referenceLength - 1; i++) { // Vertices must be added in pairs. Later a // line segment will be drawn in between each pair. // This requires some repeat values to have a // continuous line but will allow us to avoid // having the beginning and end of the fiber // connect. var offset = i * nFibers * 6 + iFiber * 6; positions.set(oneFiber[i].concat(oneFiber[i + 1]), offset); points.push(new THREE.Vector3( oneFiber[i][0], oneFiber[i][1], oneFiber[i][2] )); } }); // Create a buffered geometry and line segments from these // positions. Buffered Geometry is slightly more performant // and necessary to interact with d3 brushing later on. var bundleGeometry = new THREE.BufferGeometry(); bundleGeometry.addAttribute( 'position', new THREE.BufferAttribute(positions, 3) ); var greyLineMaterial = new THREE.LineBasicMaterial({ opacity: afqb.three.settings.fiberOpacity, linewidth: afqb.three.settings.fiberLineWidth, transparent: true, depthWrite: true }); greyLineMaterial.color.setHex(0x444444); var highlightLineMaterial = new THREE.LineBasicMaterial({ opacity: afqb.three.settings.highlightOpacity, linewidth: afqb.three.settings.highlightLineWidth, transparent: true }); highlightLineMaterial.color.setHex(afqb.global.highlightColors[index]); var greyLine = new THREE.LineSegments(bundleGeometry, greyLineMaterial); greyLine.scale.set(0.05, 0.05, 0.05); greyLine.position.set(0, 0.8, -0.5); // Record some useful info for later greyLine.name = keyName; greyLine.nFibers = nFibers; greyLine.defaultMaterial = greyLineMaterial; greyLine.highlightMaterial = highlightLineMaterial; afqb.three.greyGroup.add(greyLine); var colorLineMaterial = new THREE.LineBasicMaterial({ opacity: afqb.three.settings.colorOpacity, linewidth: afqb.three.settings.colorLineWidth, transparent: true, depthWrite: true }); colorLineMaterial.color.setHex(afqb.global.colors[index]); var colorLine = new THREE.LineSegments( bundleGeometry, colorLineMaterial ); // Set scale to match the brain surface, // (determined by trial and error) colorLine.scale.set(0.05, 0.05, 0.05); colorLine.position.set(0, 0.8, -0.5); // Record some useful info for later colorLine.name = keyName; colorLine.nFibers = nFibers; colorLine.defaultMaterial = colorLineMaterial; colorLine.highlightMaterial = highlightLineMaterial; afqb.three.colorGroup.add(colorLine); var convexGeometry = new THREE.ConvexGeometry(points); var convexMaterial = new THREE.MeshBasicMaterial({ opacity: 0, transparent: true, depthWrite: false }); convexMaterial.color.setHex(afqb.global.colors[index]); var convexMesh = new THREE.Mesh(convexGeometry, convexMaterial); convexMesh.scale.set(0.05, 0.05, 0.05); convexMesh.position.set(0, 0.8, -0.5); convexMesh.name = keyName; afqb.three.convexGroup.add(convexMesh); } }); // Now that we know if there are streamlines, build the control panel afqb.three.buildthreeGui(streamlinesExist); // And add event listeners for mouseover, etc. // First add event listeners to all these groups var groups = [ afqb.three.greyCoreGroup, afqb.three.colorCoreGroup, afqb.three.convexGroup ]; groups.forEach(function (group) { group.traverse(function (child) { if (child instanceof THREE.LineSegments || child instanceof THREE.Mesh) { domEvents.addEventListener(child, 'mousemove', function() { afqb.global.mouse.mouseMove = true; }); domEvents.addEventListener(child, 'mousedown', function() { afqb.global.mouse.mouseMove = false; }); domEvents.addEventListener(child, 'mouseover', function() { if(!afqb.global.mouse.isDown) { afqb.three.mouseoverBundle(child); return afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); } }); domEvents.addEventListener(child, 'mouseout', function() { afqb.three.mouseoutBundle(child); return afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); }); domEvents.addEventListener(child, 'mouseup', function() { if(!afqb.global.mouse.mouseMove) { var myBundle = d3.selectAll("input.tracts").filter(function (d) { return afqb.global.formatKeyName(d) === child.name; })[0][0]; myBundle.checked = !myBundle.checked; afqb.plots.settings.checkboxes[myBundle.name] = myBundle.checked; afqb.plots.showHideTractDetails(myBundle.checked, myBundle.name); afqb.three.highlightBundle(myBundle.checked, myBundle.name); // Update the query string var checkboxes = {}; checkboxes[myBundle.name] = myBundle.checked; afqb.global.updateQueryString( {plots: {checkboxes: checkboxes}} ); return afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); } else { afqb.global.mouse.mouseMove = false; } }); } }); }); // Fix the render orders for each group // render orders should satisfy // surface > grey stuff > color stuff afqb.three.greyCoreGroup.renderOrder = 2; afqb.three.greyCoreGroup.traverse(function (object) { object.renderOrder = 2; }); afqb.three.greyGroup.renderOrder = 2; afqb.three.greyGroup.traverse(function (object) { object.renderOrder = 2; }); afqb.three.colorCoreGroup.renderOrder = 1; afqb.three.colorCoreGroup.traverse(function (object) { object.renderOrder = 1; }); afqb.three.colorGroup.renderOrder = 1; afqb.three.colorGroup.traverse(function (object) { object.renderOrder = 1; }); // Set the initial visibility based on the dat.gui control if (afqb.global.controls.threeControlBox.fiberRepresentation === "all fibers") { afqb.three.colorGroup.traverse(afqb.three.makeVisible); afqb.three.greyGroup.traverse(afqb.three.makeVisible); afqb.three.convexGroup.traverse(afqb.three.makeVisible); afqb.three.colorCoreGroup.traverse(afqb.three.makeInvisible); afqb.three.greyCoreGroup.traverse(afqb.three.makeInvisible); } else { afqb.three.colorGroup.traverse(afqb.three.makeInvisible); afqb.three.greyGroup.traverse(afqb.three.makeInvisible); afqb.three.convexGroup.traverse(afqb.three.makeInvisible); afqb.three.colorCoreGroup.traverse(afqb.three.makeVisible); afqb.three.greyCoreGroup.traverse(afqb.three.makeVisible); } // Finally add fiber bundle group to the afqb.three.scene. afqb.three.scene.add(afqb.three.colorGroup); afqb.three.scene.add(afqb.three.greyGroup); afqb.three.scene.add(afqb.three.colorCoreGroup); afqb.three.scene.add(afqb.three.greyCoreGroup); afqb.three.scene.add(afqb.three.convexGroup); // Use the callback function if (streamlinesCallback) { streamlinesCallback(null); } // Restore brushing if refreshing from a querystring afqb.three.brushOn3D(); }); // Add a window resize event listener window.addEventListener('resize', afqb.three.onWindowResize, false); // Add orbit controls (disabling keys and updating lighting when the camera moves) afqb.three.orbitControls = new THREE.OrbitControls(afqb.three.camera, afqb.three.renderer.domElement); afqb.three.orbitControls.addEventListener('change', afqb.three.lightUpdate); afqb.three.orbitControls.enableKeys = false; // We want to update the camera position in the querystring but only when // the user is done moving the camera, so attach this to the 'click' event // listener afqb.three.renderer.domElement.addEventListener('click', function() { // Update the query string var cameraPosition = afqb.three.camera.position.clone(); afqb.global.updateQueryString( {three: {cameraPosition: cameraPosition}} ); }, false); }; /** * Resize the three.js window on full window resize. */ afqb.three.onWindowResize = function () { "use strict"; var width = afqb.three.container.clientWidth; var height = afqb.three.container.clientHeight; afqb.three.camera.aspect = width / height; afqb.three.camera.updateProjectionMatrix(); afqb.three.renderer.setSize(width, height); }; /** * Define how the scene should update after init. * * Pretty standard here: request animation frame, render, update camera, * update stats if desired. If brushing, use afqb.three.brushOn3D to * change the drawRange of color objects. */ afqb.three.animate = function () { "use strict"; requestAnimationFrame(afqb.three.animate); afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); afqb.three.orbitControls.update(); if (afqb.three.settings.showStats) { afqb.three.stats.update(); } if (afqb.global.mouse.brushing) { afqb.three.brushOn3D(); } }; /** * Update the drawRange of each fiber bundle based on the d3 brushes in the * 2D plots panel. */ afqb.three.brushOn3D = function () { afqb.three.colorGroup.children.forEach(function (element) { // Get the extent of the brushes for this bundle var lo = Math.floor(afqb.plots.settings.brushes[element.name].brushExtent[0]); var hi = Math.ceil(afqb.plots.settings.brushes[element.name].brushExtent[1]) - 1; // loIdx is the low index and count is the number of indices // This is a little sloppy and sometimes the count will be too high // but the visual offset should be minimal. // TODO: Positions come in pairs, with all vertices except the first // and last being repeated. Take this into account to make loIdx and // count correct (not just good enough). var loIdx = lo * element.nFibers * 2; var count = (hi - lo) * element.nFibers * 2; // Set the drawing range based on the brush extent. if (afqb.global.controls.plotsControlBox.brushTract) { element.geometry.setDrawRange(loIdx, count); } else { element.geometry.setDrawRange(0, Infinity); } }); afqb.three.colorCoreGroup.children.forEach(function (element) { // Get the extent of the brushes for this bundle var lo = Math.floor(afqb.plots.settings.brushes[element.name].brushExtent[0]); var hi = Math.ceil(afqb.plots.settings.brushes[element.name].brushExtent[1]); // loIdx is the low index and count is the number of indices var totalLength = element.geometryLength; // loIdx should be the nearest multiple of 3 var loIdx = Math.floor(lo * totalLength / 100.0 / 3) * 3; var count = parseInt((hi - lo) * totalLength / 100.0); // Set the drawing range based on the brush extent. if (afqb.global.controls.plotsControlBox.brushTract) { element.geometry.setDrawRange(loIdx, count); } else { element.geometry.setDrawRange(0, Infinity); } }); }; /** * Update the directional light to always point from camera */ afqb.three.lightUpdate = function () { "use strict"; afqb.three.directionalLight.position.copy(afqb.three.camera.position); }; /** * Visibility toggle function to show/hide core fibers vs streamlines * * @param {object} object - the object to show */ afqb.three.makeVisible = function (object) { object.visible = true; }; /** * Visibility toggle function to show/hide core fibers vs streamlines * * @param {object} object - the object to hide */ afqb.three.makeInvisible = function (object) { object.visible = false; }; /** * Highlight specified bundle based on left panel checkboxes * If state is checked, show the color group and hide the grey group * * @param {string} state - checkbox state, checked or unchecked * @param {string} name - formatted bundle name */ afqb.three.highlightBundle = function (state, name) { "use strict"; var groups = [afqb.three.colorGroup, afqb.three.colorCoreGroup]; groups.forEach(function (group) { // Get the bundle corresponding to this name var bundle = group.children.filter(function (element) { return element.name === name; })[0]; // Toggle visibility if (bundle !== undefined) { if (state) { bundle.traverse(afqb.three.makeVisible) } else { bundle.traverse(afqb.three.makeInvisible) } } }); var groups = [afqb.three.greyGroup, afqb.three.greyCoreGroup]; groups.forEach(function (group) { // Get the bundle corresponding to this name var bundle = group.children.filter(function (element) { return element.name === name; })[0]; // Toggle visibility if (bundle !== undefined) { if (state) { bundle.traverse(afqb.three.makeInvisible) } else { bundle.traverse(afqb.three.makeVisible) } } }); // Render again return afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); }; /** * Restore bundle to original state (checked or unchecked) after mouseout * * @param {string} child - the child object (i.e. bundle) in some group */ afqb.three.mouseoutBundle = function (child) { // Specify streamline groups or core fiber groups depending on the // dat.gui controls if (afqb.global.controls.threeControlBox.fiberRepresentation === 'all fibers') { var groups = [afqb.three.colorGroup, afqb.three.greyGroup]; } else { var groups = [afqb.three.colorCoreGroup, afqb.three.greyCoreGroup]; } groups.forEach(function (group) { // Get the bundle corresponding to this child.name var bundle = group.children.filter(function (element) { return element.name === child.name; })[0]; // Restore the material back to the default if (bundle !== undefined) { bundle.material = bundle.defaultMaterial; } }); // Get the checkbox list bundle info from the child name var myBundle = d3.selectAll("input.tracts").filter(function (d) { return afqb.global.formatKeyName(d) === child.name; })[0][0]; // Restore highlighted state based on the checkbox. // Rendering is taken care of inside this function. afqb.three.highlightBundle(myBundle.checked, myBundle.name); }; /** * Highlight specified bundle based on mouseover * * @param {string} child - the child object (i.e. bundle) in some group */ afqb.three.mouseoverBundle = function (child) { "use strict"; if (afqb.global.controls.threeControlBox.highlight) { // Specify streamline groups or core fiber groups depending on the // dat.gui controls if (afqb.global.controls.threeControlBox.fiberRepresentation === 'all fibers') { var groups = [afqb.three.colorGroup, afqb.three.greyGroup]; } else { var groups = [afqb.three.colorCoreGroup, afqb.three.greyCoreGroup]; } groups.forEach(function (group) { // Get the bundle corresponding to this child.name var bundle = group.children.filter(function (element) { return element.name === child.name; })[0]; // Change the material to the highlight material if (bundle !== undefined) { bundle.material = bundle.highlightMaterial; } }); // Render scene return afqb.three.renderer.render(afqb.three.scene, afqb.three.camera); } };
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/anatomy.js
anatomy.js
//tractlist js afqb.plots.yzooms = {}; afqb.plots.zoomable = true; afqb.plots.m = {top: 20, right: 10, bottom: 10, left: 25}; afqb.plots.w = 400 - afqb.plots.m.left - afqb.plots.m.right; afqb.plots.h = 350 - afqb.plots.m.top - afqb.plots.m.bottom; afqb.plots.axisOffset = {bottom: 40}; // init variable to hold data later afqb.plots.tractData = d3.map(); afqb.plots.tractMean = d3.nest(); afqb.global.mouse.brushing = false; afqb.plots.lastPlotKey = null; // transition variable for consistency afqb.plots.t = d3.transition().duration(750); /** * Container function which calls other plots functions * once nodes.csv data has been read. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param useless - Obligatory callback argument that we don't use in the * function. * @param {data} data - JavaScript array created by d3.csv(data/nodes.csv). * */ afqb.plots.buildFromNodes = function (error, useless, data) { "use strict"; afqb.plots.buildTractCheckboxes(error, data); afqb.three.initAndAnimate(error); afqb.plots.buildPlotGui(error, data); afqb.plots.ready(error, data); afqb.table.restoreRowSelection(); afqb.plots.updateBrush(); afqb.plots.restoreBrush(); //afqb.plots.draw(); }; afqb.plots.brushes = []; /** * Builds tract selection list in "Bundles" section. Reads * unique tract IDs from nodes.csv and creates selectable * text for each tract. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param {data} data - JavaScript array created by d3.csv(data/nodes.csv). */ afqb.plots.buildTractCheckboxes = function (error, data) { "use strict"; if (error) { throw error; } // Read only the tractID field from nodes.csv afqb.plots.tracts = data.map(function (a) { return a.tractID; }); // Get only the unique entries from the tract list afqb.plots.tracts = [...new Set(afqb.plots.tracts)]; // Populate afqb.plots.settings.brushes using tract names if (!afqb.plots.settings.hasOwnProperty("brushes")) { afqb.plots.settings["brushes"] = {}; } afqb.plots.tracts.forEach(function (element) { var tractName = afqb.global.formatKeyName(element); if (!afqb.plots.settings.brushes.hasOwnProperty(tractName)) { afqb.plots.settings.brushes[tractName] = { brushOn: false, brushExtent: [0, 100] }; } }); //insert tractname checkboxes in the tractlist panel var svg = d3.select('#tractlist').selectAll(".input") .data(afqb.plots.tracts).enter().append('div'); svg.append('input') .attr("type", "checkbox") .attr("class", "tracts") .attr("id", function (d) { return "input-" + afqb.global.formatKeyName(d); }) .attr("name", function (d) { return afqb.global.formatKeyName(d); }); // add label to the checkboxes svg.append('label') .text(function (d) { return d; }) .attr("for", function (d) { return "input-" + afqb.global.formatKeyName(d); }) .attr("id", function (d) { return "label-" + afqb.global.formatKeyName(d); }); //add event handler to the checkbox d3.selectAll(".tracts") .on("change", function () { var state = this.checked; var name = this.name; //call tractdetails handler afqb.plots.settings.checkboxes[name] = state; afqb.plots.showHideTractDetails(state, name); afqb.three.highlightBundle(state, name); // Update the query string var checkboxes = {}; checkboxes[name] = state; afqb.global.updateQueryString( {plots: {checkboxes: checkboxes}} ); }); Object.keys(afqb.plots.settings.checkboxes).forEach(function (key) { var checked = afqb.plots.settings.checkboxes[key]; document.getElementById('input-' + key).checked = checked; }); // all select/un-select all checkbox d3.selectAll("#selectAllTracts") .on("change", function () { var state = this.checked; if (state) { d3.selectAll("input.tracts").each(function () { this.checked = true; afqb.plots.settings.checkboxes[this.name] = this.checked; afqb.plots.showHideTractDetails(this.checked, this.name); afqb.three.highlightBundle(this.checked, this.name); // Update the query string var checkboxes = {}; checkboxes[this.name] = this.checked; afqb.global.updateQueryString( {plots: {checkboxes: checkboxes}} ); }); } else { d3.selectAll("input.tracts").each(function () { this.checked = false; afqb.plots.settings.checkboxes[this.name] = this.checked; afqb.plots.showHideTractDetails(this.checked, this.name); afqb.three.highlightBundle(this.checked, this.name); // Update the query string var checkboxes = {}; checkboxes[this.name] = this.checked; afqb.global.updateQueryString( {plots: {checkboxes: checkboxes}} ); }); } }); var checked = true; d3.selectAll('input.tracts').each(function () { checked = checked && this.checked; }); document.getElementById('selectAllTracts').checked = checked; }; // initialize yScale and yAxis afqb.plots.yScale = d3.scale.linear() .range([afqb.plots.h - afqb.plots.axisOffset.bottom, 0]); afqb.plots.yAxis = d3.svg.axis() .scale(afqb.plots.yScale) .orient("left") .tickSize(0 - afqb.plots.w - 5) .ticks(5); afqb.plots.xAxisScale = d3.scale.linear() .range([afqb.plots.m.left + 30, afqb.plots.w + afqb.plots.m.left + 20]) .domain([0, 100]); /** * Creates line object with appropriate domain and range for each tract. * Called for draw and transformation operations involving subject lines * or mean lines in the 2D plots. * * @param {data} d - Subject or mean data for 2D plot of selected metric * @param {string} id - formatted tract name */ afqb.plots.line = function (d, id){ var line = d3.svg.line() .interpolate("basis") .x(function (d) { if (d.nodeID) { return afqb.plots.xScale[id](+d.nodeID); } else { return afqb.plots.xScale[id](+d.key); } }) .y(function (d) { if (d[afqb.global.controls.plotsControlBox.plotKey]) { return afqb.plots.yScale(+d[afqb.global.controls.plotsControlBox.plotKey]); } else { return afqb.plots.yScale(+d.values.mean); } }) .defined(function (d) { if (d[afqb.global.controls.plotsControlBox.plotKey]) { return !isNaN(d[afqb.global.controls.plotsControlBox.plotKey]); } else { return !isNaN(d.values.mean); } }); return line(d) }; /** * Creates area object with appropriate domain and range for each tract. * Called for draw and transformation operations involving standard deviation * and standard error in 2D plots. * * @param {data} d - Mean data for 2D plot of selected metric * @param {string} id - formatted tract name */ afqb.plots.area = function (d, id) { var area = d3.svg.area() .x(function(d) { return afqb.plots.xScale[id](+d.key) }) .y0(function (d) { if (afqb.global.controls.plotsControlBox.errorType === 'stderr') { return afqb.plots.yScale(+d.values.mean - +d.values.stderr); } else { return afqb.plots.yScale(+d.values.mean - +d.values.std); } }) .y1(function (d) { if (afqb.global.controls.plotsControlBox.errorType === 'stderr') { return afqb.plots.yScale(+d.values.mean + +d.values.stderr); } else { return afqb.plots.yScale(+d.values.mean + +d.values.std); } }); return area(d) }; /** * Builds control panel GUI for metric plots. Allows user * to select which metric to plot, error type (std err or * std dev) for shaded area, subject line opacity, and * whether or not brushing is allowed. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param {object} data - JavaScript object created by d3.csv(data/nodes.csv). */ afqb.plots.buildPlotGui = function (error, data) { "use strict"; if (error) { throw error; } var nonMetricCols = ['subjectID', 'tractID', 'nodeID']; var nodeKeys = Object.keys(data[0]).filter(function (element) { return !nonMetricCols.includes(element); }); var plotKey = nodeKeys.includes(afqb.plots.settings.plotKey) ? afqb.plots.settings.plotKey : nodeKeys[0]; var plotsGuiConfigObj = function () { this.brushTract = afqb.plots.settings.brushTract; this.plotKey = plotKey; this.lineOpacity = parseFloat(afqb.plots.settings.lineOpacity); this.errorType = afqb.plots.settings.errorType; }; afqb.plots.gui = new dat.GUI({ autoplace: false, width: 250, scrollable: false }); var plotsGuiContainer = document.getElementById('plots-gui-container'); plotsGuiContainer.appendChild(afqb.plots.gui.domElement); afqb.global.controls.plotsControlBox = new plotsGuiConfigObj(); // Add key controller afqb.plots.gui .add(afqb.global.controls.plotsControlBox, 'plotKey', nodeKeys) .name('Metric') .onChange(function (value) { d3.csv("data/nodes.csv", function(data, error) { afqb.plots.changePlots(data, error); // update y label d3.selectAll(".y.label").remove(); d3.select("#tractdetails").selectAll("svg").append("text") .attr("text-anchor", "middle") .attr("transform", "translate(" + (afqb.plots.m.left / 2 + 5) + "," + ((afqb.plots.h + afqb.plots.m.top) / 2) + ")rotate(-90)") .attr("class", "y label") .style("stroke", "#888888;") .text(function (d, i) { return value; }); afqb.plots.zoomAxis(); }); }) .onFinishChange(function(value) { // Update the query string afqb.global.updateQueryString( {plots: {plotKey: value}} ); }); // Add error controller afqb.plots.gui .add(afqb.global.controls.plotsControlBox, 'errorType', ['stderr', 'std']) .name('Error Type') .onChange(function () { d3.csv("data/nodes.csv", afqb.plots.changePlots); }) .onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {plots: {errorType: value}} ); }); // Add plot opacity controller afqb.plots.gui .add(afqb.global.controls.plotsControlBox, 'lineOpacity') .min(0).max(1) .name('Line Opacity') .onChange(function (value) { d3.select("#tractdetails") .selectAll("svg").selectAll(".tracts") .filter(function () { return (afqb.table.settings.selectedRows[this.id] !== true); }) .filter(function () { return (this.id.indexOf("mean") === -1); }) .select(".line") .style("opacity", value); }) .onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {plots: {lineOpacity: value}} ); }); // Add brush controller afqb.plots.gui .add(afqb.global.controls.plotsControlBox, 'brushTract') .name('Brushable Tracts') .onChange(function () { afqb.plots.updateBrush(); afqb.three.brushOn3D(); }) .onFinishChange(function (value) { // Update the query string afqb.global.updateQueryString( {plots: {brushTract: value}} ); }); afqb.plots.gui.close(); }; /** * Generates initial 2D plots. Turns nodes.csv into nested * json object. This object is used to determine x/y * range, plot subject lines, and calculate mean and * error for the default metric. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param {data} data - JavaScript array created by d3.csv(data/nodes.csv). */ afqb.plots.ready = function (error, data) { "use strict"; if (error) { throw error; } var plotKey = afqb.global.controls.plotsControlBox.plotKey; afqb.plots.lastPlotKey = plotKey; data.forEach(function (d) { if (typeof d.subjectID === 'number') { d.subjectID = "s" + afqb.global.formatKeyName(d.subjectID.toString()); } else { d.subjectID = afqb.global.formatKeyName(d.subjectID); } }); data = data.filter(function (d) { return Boolean(d[plotKey]); }); afqb.plots.tractData = d3.nest() .key(function (d) { return d.tractID; }) .key(function (d) { return d.subjectID; }) .entries(data); // compute mean and error afqb.plots.tractMean = d3.nest() .key(function (d) { return d.tractID; }) .key(function (d) { return d.nodeID; }) .rollup(function (v) { return { mean: d3.mean(v, function (d) { return +d[plotKey];}), stderr: (d3.deviation(v, function (d) { return +d[plotKey]; }) || 0.0)/Math.sqrt(v.length), std: (d3.deviation(v, function (d) { return +d[plotKey]; }) || 0.0) }; }) .entries(data); // initialize xScale dict afqb.plots.xScale = {}; // set x and y domains for the tract plots afqb.plots.tractData.forEach(function (d,i) { /*var len = 1; d.values.forEach(function (d){ if (d.values.length > len) { len = d.values.length; } });*/ var len = afqb.plots.tractMean[i].values.length; var id = afqb.global.formatKeyName(afqb.plots.tracts[i]); // Subject to ordering errors since we call afqb.plots.xScale[id] = d3.scale.linear() .range([afqb.plots.m.left + 30, afqb.plots.w + afqb.plots.m.left + 20]) .domain([0, len]); }); afqb.plots.yScale.domain(d3.extent(data, function (d) { return +d[plotKey]; })); afqb.plots.yAxis.scale(afqb.plots.yScale); afqb.plots.yzooms[plotKey] = d3.behavior.zoom() .y(afqb.plots.yScale) .on("zoom", afqb.plots.zoomable ? afqb.plots.zoomAxis : null) .on("zoomend",afqb.plots.zoomable ? afqb.plots.draw : null); // If we've already stored this type of plot's zoom settings, recover them if (afqb.plots.settings.zoom[plotKey] && afqb.plots.settings.zoom[plotKey].hasOwnProperty("scale") && afqb.plots.settings.zoom[plotKey].hasOwnProperty("translate")) { afqb.plots.yzooms[plotKey].scale( parseFloat(afqb.plots.settings.zoom[plotKey].scale) || 1); afqb.plots.yzooms[plotKey].translate( afqb.plots.settings.zoom[plotKey].translate.map(parseFloat) || [0, 0]); } else { // We need to store this for later use afqb.plots.settings.zoom[plotKey] = {}; afqb.plots.settings.zoom[plotKey].scale = afqb.plots.yzooms[plotKey].scale(); afqb.plots.settings.zoom[plotKey].translate = afqb.plots.yzooms[plotKey].translate(); } //initialize panels for each tract - and attach tract data with them var trPanels = d3.select("#tractdetails").selectAll("svg").data(afqb.plots.tractData); trPanels.enter().append("svg") .attr("id", function (d,i) { return "tract-" + afqb.global.formatKeyName(afqb.plots.tracts[i]); }) .attr("name", function (d,i) { return afqb.global.formatKeyName(afqb.plots.tracts[i]); }) .attr("width", afqb.plots.w + afqb.plots.m.left + afqb.plots.m.right + 40) .attr("height", afqb.plots.h + afqb.plots.m.top + afqb.plots.m.bottom + afqb.plots.axisOffset.bottom) .attr("display", "none") .append("g") .attr("transform", "translate(" + afqb.plots.m.left + "," + afqb.plots.m.top + ")") //y-axis .append("g") .attr("class", "y axis") .attr("transform", "translate(" + afqb.plots.m.left + ",0)") .call(afqb.plots.yAxis); // y axis label trPanels.append("text") .attr("text-anchor", "middle") .attr("transform", "translate("+ (afqb.plots.m.left/2+5) +","+ ((afqb.plots.h+afqb.plots.m.top)/2)+")rotate(-90)") .attr("class", "y label") .style("stroke", "#888888;") .text(function (d,i) { return afqb.global.controls.plotsControlBox.plotKey}); trPanels.append("svg:rect") .attr("class", "zoom y box") .attr("width", afqb.plots.m.left+20) .attr("height", afqb.plots.h - afqb.plots.m.top - afqb.plots.m.bottom) .style("visibility", "hidden") .attr("pointer-events", "all") .style("cursor", "row-resize") .call(afqb.plots.yzooms[plotKey]); //x-axis trPanels.select("g").each(function (d) { var g = d3.select(this); var id = afqb.global.formatKeyName(d.key); var xAxis = d3.svg.axis() .scale(afqb.plots.xAxisScale) //afqb.plots.xScale[id]) .orient("bottom") .tickPadding(8) .ticks(5); g.append("g") .attr("class", "x axis") .attr("transform", "translate(-20," + (afqb.plots.h - afqb.plots.axisOffset.bottom) + ")") .call(xAxis); }); trPanels.append("rect") .attr("class", "plot") .attr("width", afqb.plots.w + afqb.plots.m.left + afqb.plots.m.right + 30) .attr("height", afqb.plots.h + afqb.plots.m.top + afqb.plots.m.bottom + 15) .attr("x", 0) .attr("y", 0) .style("stroke", function (d,i) { return afqb.global.d3colors[i]; }) .style("fill", "none") .style("stroke-width", 2); trPanels.append("text") .attr("class", "plot_text") .attr("text-anchor", "middle") .attr("transform", "translate("+ (afqb.plots.w-afqb.plots.m.left) +","+ ((afqb.plots.h+afqb.plots.m.bottom+20))+")") .style("text-anchor", "end") .style("stroke", "#888888;") .text("% Distance Along Fiber Bundle"); // add tract name to top corner trPanels.append("text") .attr("class", "plot_text") .attr("text-anchor", "end") .attr("transform", "translate("+ (afqb.plots.w + afqb.plots.m.right + 30) +","+(afqb.plots.m.top)+")") .style("stroke", function(d,i){return afqb.global.d3colors[i];} ) .style("fill", function(d,i){return afqb.global.d3colors[i];} ) .text(function(d,i) { return afqb.plots.tracts[i]; }); trPanels.append("text") .attr("id", function (d,i) { return "brush-ext-" + afqb.global.formatKeyName(afqb.plots.tracts[i]); }) .attr("class", "brushExt") .attr("text-anchor", "end") .attr("transform", "translate("+ (afqb.plots.w + afqb.plots.m.right + 30) +","+(afqb.plots.m.top+15)+")") .style("stroke", function(d,i){return afqb.global.d3colors[i];} ) .style("fill", function(d,i){return afqb.global.d3colors[i];} ); // append g elements to each tract for error, subject lines, and mean lines trPanels.append("g").attr("id", "error-area"); trPanels.append("g").attr("id", "subject-lines"); trPanels.append("g").attr("id", "mean-lines"); // associate tractsline with each subject trPanels.each(function (data) { var id = afqb.global.formatKeyName(data.key); var tractLines = d3.select(this).select("#subject-lines").selectAll(".tracts").data(data.values); tractLines.enter().append("g") .attr("class", "tracts") .attr("id", function (d) { return d.values[0].subjectID; }) .on("mouseover", mouseover) .on("mousemove", mouseover) .on("mouseout", mouseout) .on("click", onclick); tractLines.append("path") .attr("class", "line") .attr("d", function (d) {return afqb.plots.line(d.values, id);}) .style("opacity", afqb.global.controls.plotsControlBox.lineOpacity) .style("stroke-width", "1px"); }); // Select existing g element for error area d3.select("#tractdetails").selectAll("svg").select("#error-area") .datum(afqb.plots.tractMean) .attr("class", "tracts means") //.attr("id", "mean0") // Append error shading .append("path") .attr("class", "area") .attr("d", function(d,i) { var id = afqb.global.formatKeyName(d[i].key); return afqb.plots.area(d[i].values, id); }) .style("opacity", 0.4); // Select existing g element for mean lines d3.select("#tractdetails").selectAll("svg").select("#mean-lines") .datum(afqb.plots.tractMean) .attr("class", "tracts means") //.attr("id", "mean0") // append mean lines .append("path") .attr("class", "line") .attr("d", function(d,i) { var id = afqb.global.formatKeyName(d[i].key); return afqb.plots.line(d[i].values, id); }) .style("opacity", 0.99) .style("stroke-width", "3px"); // Define the div for the tooltip var tt = d3.select("#tractdetails").append("div") .attr("class", "tooltip") .style("opacity", 0); function mouseover(d) { if (!afqb.global.mouse.brushing) { if ($("path",this).css("stroke-width") === "1px") { // uses the stroke-width of the line clicked on to // determine whether to turn the line on or off d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "1.1px"); } // save self as the 'this' for the mouseover function's context var self = this; // only show tooltip if mousing over selected lines if ($("path",this).css("stroke-width") === "2.1px") { // calculate the x,y coordinates close to the mouse var key = d3.select(self.parentNode).data()[0].key; var fkey = afqb.global.formatKeyName(key); var x0 = afqb.plots.xScale[fkey].invert(d3.mouse(this)[0]); var nodeIndex = Math.ceil(x0); var y0 = d.values[nodeIndex][afqb.global.controls.plotsControlBox.plotKey] // get the correct tract name for this plot var plotIdx = 0; afqb.plots.tractMean.forEach(function(val, idx){ if (val.key === key){ plotIdx = idx } }); // store the sort key, used for coloring if the table is sorted var sortKey = afqb.table.settings.sort.key; // initialize the variable for the z score var z0 = {}; // HACK: the structure of afqb.plots.tractMean varies depending on whether or not the table is sorted. // the check in the if statement checks whether or not we need to calculate z-scores against multiple groups if (Array.isArray(afqb.plots.tractMean[0].values[0].values)) { // for each group in afqb.plots.tractMean[plotIdx].values, calculate and score the y val afqb.plots.tractMean[plotIdx].values.forEach(function(val, idx, arr){ z0[idx] = (y0 - val.values[nodeIndex].values.mean) / val.values[nodeIndex].values.std z0[idx] = d3.format(",.2f")(z0[idx]) }) } else { // the table has NOT been sorted, calculate only 1 z-score var val = afqb.plots.tractMean[plotIdx].values // console.log("val[nodeIndex]", val[nodeIndex], val) z0[0] = (y0 - val[nodeIndex].values.mean) / val[nodeIndex].values.std z0[0] = d3.format(",.2f")(z0[0]) } // if the table hasn't been sorted, then afqb.table.groupScale and afqb.table.ramp are null. // define as functions that return default values if (afqb.table.groupScale === null){ afqb.table.groupScale = function(){return 0} } if (afqb.table.ramp == null){ afqb.table.ramp = function(){return "black"} } // get the subject's metadata from the table // used later to color the subject_id heading var tableVal = {}; afqb.table.subData.forEach(function(val){ if (val.subjectID === self.id){ tableVal = val } }); // select the tooltip, make it visible, format the HTML, and set the position d3.select("#tractdetails").select(".tooltip") .style("opacity", 1) .html(function(){ // get the label color from the ramp and groupScale functions var labelColor = afqb.table.ramp(afqb.table.groupScale(tableVal[sortKey])); // Add the title text below: var h = '<div class="titleTip"><b style="color:COLOR">'.replace("COLOR",labelColor)+self.id+ "</b>" + "<br>Node: " + nodeIndex + '<br><hr></div>'; // for each key in the z-score dict, format HTML var Nzkeys = Object.keys(z0).length for (key in z0){ // if a color is needed, format the heading: if (sortKey){ // TODO: this if for getting quantiles of sort key afqb.table.groupScale.quantiles() // JK: above line does'nt work anymore?? console.log("afqb.table.groupScale", afqb.table.groupScale); var quantiles = [] try { var Q = afqb.table.groupScale.quantiles(); //[0.5] //BRK this is just for tests. afqb.table.groupScale.quantiles() doesn't work?? for (var i=0; i<Q.length; i += 1){ quantiles.push(d3.format(",.2f")(Q[i])); } } catch (e) { console.log("no quantiles"); } finally { } var sortHeading = sortKey if (key == 0) { if (quantiles[key]){ sortHeading += ' < ' + quantiles[key]; } } else if (key != Nzkeys - 1) { sortHeading = quantiles[key - 1] + " < " + sortHeading + " < " + quantiles[key]; } else { if (quantiles[key - 1]){ sortHeading = quantiles[key - 1] + " < " + sortHeading; } } h += '<span style="color:COLOR">SORT</span><br>'.replace("SORT", sortHeading).replace("COLOR", afqb.table.ramp(key)) } // finally, add the z-score value h += '<div class="zTip"><span style="font-size:1.2em;margin-top=3px;">z = VAL</span><br></div>'.replace("VAL", z0[key]); } return h }) .style("left", (d3.event.pageX) + "px") .style("top", (d3.event.pageY - 28) + "px"); } // end if statement for tooltip if (afqb.global.mouse.isDown) { if ($("path",this).css("stroke-width") === "2.1px") { afqb.table.settings.selectedRows[this.id] = false; //uses the opacity of the row for selection and deselection d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", afqb.global.controls.plotsControlBox.lineOpacity) .style("stroke-width", "1px"); d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 0.3); } else { afqb.table.settings.selectedRows[this.id] = true; d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 1); } // Update the query string var selectedRows = {}; selectedRows[this.id] = afqb.table.settings.selectedRows[this.id]; afqb.global.updateQueryString( {table: {selectedRows: selectedRows}} ); } } } function onclick() { if (!afqb.global.mouse.brushing) { if ($("path",this).css("stroke-width") === "2.1px") { // uses the stroke-width of the line clicked on // to determine whether to turn the line on or off afqb.table.settings.selectedRows[this.id] = false; d3.selectAll('#' + this.id) .selectAll('path') .style("stroke-width", "1.1px"); d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 0.3); } else if ($("path",this).css("stroke-width") === "1.1px") { afqb.table.settings.selectedRows[this.id] = true; d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 1); } else if ($("path",this).css("opacity") === afqb.global.controls.plotsControlBox.lineOpacity) { afqb.table.settings.selectedRows[this.id] = true; d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 1); } // Update the query string var selectedRows = {}; selectedRows[this.id] = afqb.table.settings.selectedRows[this.id]; afqb.global.updateQueryString( {table: {selectedRows: selectedRows}} ); } } function mouseout() { if (!afqb.global.mouse.brushing) { if ($("path",this).css("stroke-width") === "1.1px") { // uses the stroke-width of the line clicked on to // determine whether to turn the line on or off d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", afqb.global.controls.plotsControlBox.lineOpacity) .style("stroke-width", "1px"); } // remove the tooltip d3.select("#tractdetails").select(".tooltip") .style("opacity", 0); } } d3.select("#tractdetails").selectAll("svg").each(function (d) { afqb.plots.newBrush(afqb.global.formatKeyName(d.key)); }); }; /** * Updates plots on sort or metric change. If data is * sorted, means are calculated for each group of subjects * defined by the sort. If metric changes, axes and means * are updated accordingly. Calls afqb.plots.draw() and * afqb.plots.zoomAxis(). * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param {data} data - JavaScript array created by d3.csv(data/nodes.csv). * */ afqb.plots.changePlots = function (error, data) { "use strict"; if (error) { throw error; } var plotKey = afqb.global.controls.plotsControlBox.plotKey; afqb.plots.lastPlotKey = plotKey; data.forEach(function (d) { if (typeof d.subjectID === 'number'){ d.subjectID = "s" + afqb.global.formatKeyName(d.subjectID.toString()); } else { d.subjectID = afqb.global.formatKeyName(d.subjectID); } }); data = data.filter(function (d) { return Boolean(d[plotKey]); }); afqb.plots.tractData = d3.nest() .key(function (d) { return d.tractID; }) .key(function (d) { return d.subjectID; }) .entries(data); if (afqb.table.splitGroups) { afqb.plots.tractMean = d3.nest() .key(function (d) { return d.tractID; }) .key(function (d) { return afqb.table.subGroups[d.subjectID]; }) .key(function (d) { return d.nodeID; }) .rollup(function (v) { return{ mean: d3.mean(v, function (d) { return +d[plotKey];}), stderr: (d3.deviation(v, function (d,i) { return +d[plotKey]; }) || 0.0)/Math.sqrt(v.length), std: (d3.deviation(v, function (d) { return +d[plotKey]; }) || 0.0) }; }) .entries(data); for (let iTract = 0; iTract < afqb.plots.tractMean.length; iTract++) { let index = afqb.plots.tractMean[iTract].values .findIndex(item => item.key === "null"); if (index !== -1) { afqb.plots.tractMean[iTract].values.splice(index, 1); } } } else { afqb.plots.tractMean = d3.nest() .key(function (d) { return d.tractID; }) .key(function (d) { return d.nodeID; }) .rollup(function (v) { return{ mean: d3.mean(v, function (d) { return +d[plotKey];}), stderr: (d3.deviation(v, function (d) { return +d[plotKey]; }) || 0.0)/Math.sqrt(v.length), std: (d3.deviation(v, function (d) { return +d[plotKey]; }) || 0.0) }; }) .entries(data); for (let iTract = 0; iTract < afqb.plots.tractMean.length; iTract++) { let index = afqb.plots.tractMean[iTract].values .findIndex(item => item.key === "null"); if (index !== -1) { afqb.plots.tractMean[iTract].values.splice(index, 1); } } } // update axes based on selected data afqb.plots.yScale.domain(d3.extent(data, function (d) { return +d[plotKey]; })); afqb.plots.yAxis.scale(afqb.plots.yScale); // Select the section we want to apply our changes to var svg = d3.select("#tractdetails").selectAll("svg") .data(afqb.plots.tractData).transition(); // update y zoom for new axis afqb.plots.yzooms[plotKey] = d3.behavior.zoom() .y(afqb.plots.yScale) .on("zoom", afqb.plots.zoomable ? afqb.plots.zoomAxis : null) .on("zoomend", afqb.plots.zoomable ? afqb.plots.draw : null); // If we've already stored this type of plot's zoom settings, recover them if (afqb.plots.settings.zoom[plotKey]) { afqb.plots.yzooms[plotKey].scale( parseFloat(afqb.plots.settings.zoom[plotKey].scale) || 1); afqb.plots.yzooms[plotKey].translate( afqb.plots.settings.zoom[plotKey].translate.map(parseFloat) || [0, 0]); } else { // We need to store this for later use afqb.plots.settings.zoom[plotKey] = {}; afqb.plots.settings.zoom[plotKey].scale = afqb.plots.yzooms[plotKey].scale(); afqb.plots.settings.zoom[plotKey].translate = afqb.plots.yzooms[plotKey].translate(); } d3.select("#tractdetails").selectAll("svg") .selectAll(".zoom.y.box").call(afqb.plots.yzooms[plotKey]);//.remove(); afqb.plots.draw(); afqb.plots.zoomAxis(); }; /** * Redraws subject and mean lines after new metric or * group selections. Calls afqb.plots.zoomAxis(). * */ afqb.plots.draw = function() { "use strict"; var plotKey = afqb.global.controls.plotsControlBox.plotKey; // Update the zoom settings to reflect the latest zoom parameters afqb.plots.settings.zoom[plotKey].scale = afqb.plots.yzooms[plotKey].scale(); afqb.plots.settings.zoom[plotKey].translate = afqb.plots.yzooms[plotKey].translate(); // Update the query string var zoom = {}; zoom[plotKey] = afqb.plots.settings.zoom[plotKey]; afqb.global.updateQueryString( {plots: {zoom: zoom}} ); // JOIN new data with old elements. var trLines = d3.select("#tractdetails").selectAll("svg").select("#subject-lines") .data(afqb.plots.tractData).selectAll(".tracts") .data(function (d) { return d.values; }).transition(); //.select("#path").attr("d", function (d) { return d.values; }); trLines.select("path") .duration(0) .attr("d", function (d) { var id = afqb.global.formatKeyName(d.values[0].tractID); return afqb.plots.line(d.values, id); }); // Remove old meanlines d3.select("#tractdetails").selectAll("svg").select("#error-area").selectAll(".area").remove(); d3.select("#tractdetails").selectAll("svg").select("#mean-lines").selectAll(".line").remove(); if (afqb.table.splitGroups) { var meanLines = d3.select("#tractdetails").selectAll("svg") .selectAll(".means") .data(function (d) { return afqb.plots.tractMean.filter(function(element) { return element.key === d.key; })[0].values; }); // Join new afqb.plots.tractMean data with old meanLines elements d3.select("#tractdetails").selectAll("svg").select("#error-area").selectAll("path") .data(function (d) { return afqb.plots.tractMean.filter(function(element) { return element.key === d.key; })[0].values; }) .enter() //.attr("id", function(d) { //return "mean" + d.key;}) // Append error area .append("path") .attr("class", "area") .attr("d", function(d) { var id = afqb.global.formatKeyName(this.parentNode.parentNode.id).replace('tract-', ''); return afqb.plots.area(d.values, id); }) .style("opacity", 0.25); d3.select("#tractdetails").selectAll("svg").select("#mean-lines").selectAll("path") .data(function (d) { return afqb.plots.tractMean.filter(function(element) { return element.key === d.key; })[0].values; }) .enter() //.attr("id", function(d) { //return "mean" + d.key;}) // Append mean lines .append("path") .attr("class", "line") .attr("d", function(d) { var id = afqb.global.formatKeyName(this.parentNode.parentNode.id).replace('tract-', ''); return afqb.plots.line(d.values, id); }) .style("opacity", 0.99) .style("stroke-width", "3px"); // set mean colors afqb.table.subData.forEach(afqb.global.idColor); // color lines d3.select("#tractdetails").selectAll("svg").select("#error-area").selectAll(".area") .style("fill", function (d, i) { return afqb.table.ramp(i); }); d3.select("#tractdetails").selectAll("svg").select("#mean-lines").selectAll(".line") .style("stroke", function (d, i) { return afqb.table.ramp(i); }); } else { // Gray meanLines for unsorted 'Plot Type' change // Select existing g element for error area d3.select("#tractdetails").selectAll("svg").select("#error-area") .datum(afqb.plots.tractMean) .attr("class", "tracts means") //.attr("id", "mean0") // Append error shading .append("path") .attr("class", "area") .attr("d", function(d,i) { var id = afqb.global.formatKeyName(d[i].key); return afqb.plots.area(d[i].values, id); }) .style("opacity", 0.4); // Select existing g element for mean lines d3.select("#tractdetails").selectAll("svg").select("#mean-lines") .datum(afqb.plots.tractMean) .attr("class", "tracts means") //.attr("id", "mean0") // append mean lines .append("path") .attr("class", "line") .attr("d", function(d,i) { var id = afqb.global.formatKeyName(d[i].key); return afqb.plots.line(d[i].values, id); }) .style("opacity", 0.99) .style("stroke-width", "3px"); } afqb.plots.zoomAxis(); }; /** * Updates y axis zoom on sort or metric change. * */ afqb.plots.zoomAxis = function () { "use strict"; d3.selectAll('.y.axis').call(afqb.plots.yAxis); }; /** * Initializes brush elements for 2D plots. Brush used to * highlight a portion of tract in the "Anatomy" panel. * * @param {string} name - formatted tract name. */ afqb.plots.newBrush = function (name) { "use strict"; var brush = d3.svg.brush() .x(afqb.plots.xAxisScale) .on("brush", brushed) .on("brushstart", brushStart) .on("brushend", brushEnd); function brushed() { var targetName = this.parentElement.getAttribute("name"); var targetBrush = afqb.plots.brushes.filter(function (b) { return b.name === targetName; })[0].brush; afqb.plots.settings.brushes[targetName].brushOn = !targetBrush.empty(); if (targetBrush.empty()) { afqb.plots.settings.brushes[targetName].brushExtent = [0, 100]; d3.select("#brush-ext-" + targetName).text(""); } else { afqb.plots.settings.brushes[targetName].brushExtent = targetBrush.extent(); var formatter = d3.format(".0f"); var ext = targetBrush.extent(); d3.select("#brush-ext-" + targetName).text("(" + formatter(ext[0]) + ", " + formatter(ext[1]) + ")"); } } function brushStart() { afqb.global.mouse.brushing = true; } function brushEnd() { afqb.global.mouse.brushing = false; afqb.three.brushOn3D(); // Update the query string var targetName = this.parentElement.getAttribute("name"); var brushes = {}; brushes[targetName] = afqb.plots.settings.brushes[targetName]; afqb.global.updateQueryString( {plots: {brushes: brushes}} ); } afqb.plots.brushes.push({name: name, brush: brush}); }; /** * Updates brush elements for 2D plots. Brush used to * highlight a portion of tract in the "Anatomy" panel. * */ afqb.plots.updateBrush = function () { "use strict"; if (afqb.global.controls.plotsControlBox.brushTract) { var callBrush = function () { var targetName = this.parentElement.getAttribute("name"); var targetBrush = afqb.plots.brushes.filter(function (b) { return b.name === targetName; })[0].brush; d3.select(this).call(targetBrush); }; var brushg = d3.select("#tractdetails").selectAll("svg") .append("g") .attr("class", "brush") .each(callBrush); brushg.selectAll("rect") .attr("y", afqb.plots.m.top) .attr("height", afqb.plots.h - afqb.plots.axisOffset.bottom); } else { d3.selectAll(".brush").data([]).exit().remove(); // Object.keys(afqb.plots.settings.brushes).forEach(function (bundle) { // afqb.plots.settings.brushes[bundle].brushExtent = [0, 100]; // }); } }; /** * Controls whether or not a plot is displayed for a given * tract, and changes color of the label in the "Bundles" * panel. * * @param {boolean} state - true if the tract is selected, * false if it is hidden. * @param {string} name - formatted tract name */ afqb.plots.showHideTractDetails = function (state, name) { "use strict"; if (state === true){ d3.select("#tract-" + name).style("display", "inline"); var names = afqb.plots.tracts.map(function(name) { return afqb.global.formatKeyName(name); }); var index = names.indexOf(name); var color = afqb.global.d3colors[parseInt(index)]; d3.select("#label-" + name).style("color", color); } else { d3.select("#tract-" + name).style("display", "none"); d3.select("#label-" + name).style("color", "#111111"); } }; /** * Initialize the selectable tract list. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. */ afqb.plots.initCheckboxes = function (error) { "use strict"; if (error) { throw error; } d3.selectAll("input.tracts").each(function() { var name = d3.select(this).attr("name"); var state = afqb.plots.settings.checkboxes[name]; afqb.plots.showHideTractDetails(state, name); afqb.three.highlightBundle(state, name); }); $('body').addClass('loaded'); }; afqb.global.queues.nodeQ = d3_queue.queue(); afqb.global.queues.nodeQ.defer(afqb.global.initSettings); afqb.global.queues.nodeQ.defer(d3.csv, "data/nodes.csv"); afqb.global.queues.nodeQ.await(afqb.plots.buildFromNodes);
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/plots.js
plots.js
afqb.global.formatKeyName = function(bundle) { "use strict"; // Standardize bundle names by making them lower case and // replacing all dots and spaces with dashes return bundle.toLowerCase().replace(/\s+/g, "-").replace(/\./g, "-"); }; /** * Updates the QuerySting object for proper reload * * @param {object} queryObj - object to stringify and merge with existing query string */ afqb.global.updateQueryString = function(queryObj) { "use strict"; // Pull down the current query string var urlSettings = Qs.parse(location.search.slice(1)); // Extend the existing query string obj with the input obj var updatedSettings = $.extend(true, {}, urlSettings, queryObj); // Convert back to a query string var settingsStr = "?" + Qs.stringify(updatedSettings, {encode: false}); // Push back up to the URL window.history.pushState({search: settingsStr}, '', settingsStr); }; /** * Initialize settings from querystring. * * AFQ-Browser settings are stored in four places, corresponding to the * different visualization panels (naming is self-explanatory): * * - afqb.three.settings * - afqb.plots.settings * - afqb.table.settings * - afqb.global.settings * * In the query string, all settings are lumped together. So we must parse * the query string and separate settings into their different groups. * * @param callback - function to call after the settings have been loaded */ afqb.global.initSettings = function (callback) { "use strict"; if (afqb.global.settings.loaded) { // Don't load settings again if called twice on accident if (callback) { callback(null); } } else { // Load default settings from settings.json d3.json("settings.json", function(settings) { "use strict"; // Update with values from query string var qsSettings = Qs.parse(location.search.slice(1)); var updatedSettings = $.extend(true, {}, settings, qsSettings); afqb.three.settings = $.extend(true, {}, afqb.three.settings, updatedSettings.three); afqb.plots.settings = $.extend(true, {}, afqb.plots.settings, updatedSettings.plots); afqb.table.settings = $.extend(true, {}, afqb.table.settings, updatedSettings.table); afqb.global.settings = $.extend(true, {}, afqb.global.settings, updatedSettings.global); // Restore spaces and capitalized words in splitMethod afqb.table.settings.splitMethod = afqb.table.settings.splitMethod .split("-").map(function (word) { return word.charAt(0).toUpperCase() + word.slice(1); }).join(" "); afqb.table.settings.sort.key = afqb.table.settings.sort.key === '' ? null : afqb.table.settings.sort.key; afqb.table.settings.prevSort.key = afqb.table.settings.prevSort.key === '' ? null : afqb.table.settings.prevSort.key; afqb.table.settings.restoring = true; // Parse all the checkbox strings as booleans Object.keys(afqb.plots.settings.checkboxes).forEach(function (bundle) { afqb.plots.settings.checkboxes[bundle] = ( afqb.plots.settings.checkboxes[bundle].toLowerCase() === 'true' ); }); // Parse the brushTract checkbox as boolean if (typeof afqb.plots.settings.brushTract !== 'boolean') { afqb.plots.settings.brushTract = (afqb.plots.settings.brushTract.toLowerCase() === 'true'); } // Parse the brushes if (afqb.plots.settings.hasOwnProperty("brushes")) { Object.keys(afqb.plots.settings.brushes).forEach(function (bundle) { if (afqb.plots.settings.brushes[bundle].hasOwnProperty("brushOn") && typeof afqb.plots.settings.brushes[bundle].brushOn !== "boolean") { afqb.plots.settings.brushes[bundle].brushOn = ( afqb.plots.settings.brushes[bundle].brushOn.toLowerCase() === "true" ); } if (afqb.plots.settings.brushes[bundle].hasOwnProperty("brushExtent")) { afqb.plots.settings.brushes[bundle].brushExtent = afqb.plots.settings.brushes[bundle].brushExtent.map(parseFloat); } }); } // Parse the zoom params as floats if (afqb.plots.settings.hasOwnProperty("zoom")) { Object.keys(afqb.plots.settings.zoom).forEach(function (key) { if (afqb.plots.settings.zoom[key].hasOwnProperty("scale")) { afqb.plots.settings.zoom[key].scale = parseFloat(afqb.plots.settings.zoom[key].scale); } if (afqb.plots.settings.zoom[key].hasOwnProperty("translate")) { afqb.plots.settings.zoom[key].translate = afqb.plots .settings.zoom[key].translate.map(parseFloat); } }); } // Parse lineOpacity as float afqb.plots.settings.lineOpacity = parseFloat(afqb.plots.settings.lineOpacity); // Parse table sorting counts as ints afqb.table.settings.sort.count = parseInt(afqb.table.settings.sort.count); afqb.table.settings.prevSort.count = parseInt(afqb.table.settings.prevSort.count); if (afqb.table.settings.selectedRows) { Object.keys(afqb.table.settings.selectedRows).forEach(function (subject) { if (typeof afqb.table.settings.selectedRows[subject] !== "boolean") { afqb.table.settings.selectedRows[subject] = ( afqb.table.settings.selectedRows[subject].toLowerCase() === "true" ); } }); } // Parse three.js opacities as floats afqb.three.settings.rHOpacity = parseFloat(afqb.three.settings.rHOpacity); afqb.three.settings.lHOpacity = parseFloat(afqb.three.settings.lHOpacity); afqb.three.settings.fiberOpacity = parseFloat(afqb.three.settings.fiberOpacity); // Parse mouseoverHighlight as boolean if (afqb.three.settings.hasOwnProperty("mouseoverHighlight")) { if (typeof afqb.three.settings.mouseoverHighlight !== 'boolean') { afqb.three.settings.mouseoverHighlight = ( afqb.three.settings.mouseoverHighlight.toLowerCase() === 'true' ); } } // Parse camera position as floats if (afqb.three.settings.hasOwnProperty("cameraPosition")) { Object.keys(afqb.three.settings.cameraPosition).forEach(function (coord) { afqb.three.settings.cameraPosition[coord] = parseFloat( afqb.three.settings.cameraPosition[coord] ) }); } // Parse fiber representation afqb.three.settings.fiberRepresentation = afqb.three.settings.fiberRepresentation .split("-").join(" "); afqb.global.settings.loaded = true; if (callback) { callback(null); } }); } }; /** * Restore brush settings on reload. * * Brush settings are stored in afqb.plots.settings.brushes. Iterate through * that and restore the brush extents. */ afqb.plots.restoreBrush = function () { "use strict"; Object.keys(afqb.plots.settings.brushes).forEach(function (tract) { if (afqb.plots.settings.brushes[tract].brushOn) { var targetBrush = afqb.plots.brushes.filter(function (b) { return b.name === tract; })[0].brush; d3.selectAll("#tract-" + tract) .selectAll(".brush") .call(targetBrush.extent( afqb.plots.settings.brushes[tract].brushExtent )); var formatter = d3.format(".0f"); var ext = targetBrush.extent(); d3.select("#brush-ext-" + tract).text("(" + formatter(ext[0]) + ", " + formatter(ext[1]) + ")"); } }); }; /** * Restore selected rows and subject lines on reload. * * This function iterates over afqb.table.settings.selectedRows and * changes the opacity of the associated table rows and plot lines. */ afqb.table.restoreRowSelection = function () { "use strict"; Object.keys(afqb.table.settings.selectedRows).forEach(function (rowID) { if (afqb.table.settings.selectedRows[rowID]) { d3.selectAll('#' + rowID) .selectAll('g') .style("opacity", 1); d3.selectAll('#' + rowID) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); } }); }; /** * Set the binder URL for the launch binder button * * If on localhost, disable the binder button. Otherwise, assume we're on * github pages and inspect the current URL to get the github user and repo. * Use that info to structure the binder URL. Then set the launch-binder * buttons href to the new binder URL */ var setupBinderURL = function () { // Disable the button if on localhost if (window.location.hostname == "localhost") { $("#launch-binder").addClass("disabled") } // Parse the URL, getting user and repo name var uri = new URI(location.href); var user = uri.hostname().split('.')[0]; var repo = uri.directory(); // Construct binder URL and set the button's href var binderUrl = 'https://mybinder.org/v2/gh/' + user + repo + '/gh-pages?filepath=index.ipynb'; $("#launch-binder").attr("href", binderUrl); return false; } setupBinderURL();
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/save-settings.js
save-settings.js
// ========== Adding Table code ============ afqb.table.fieldHeight = 30; afqb.table.rowPadding = 1; afqb.table.fieldWidth = 140; afqb.table.format = d3.time.format("%m/%d/%Y"); //var dateFn = function(date) { return format.parse(d.created_at) }; afqb.table.subData = []; afqb.table.subGroups = {}; afqb.table.splitGroups = false; afqb.table.ramp = null; /** * Initialize Table from subject metadata in subjects.csv. Subject rows * maintain their order from subjects.csv. * * @param error - Passed to prevent execution in case error occurs * in preceding functions. * @param useless - Obligatory callback argument that we don't use in the * function. * @param {object} data - JavaScript array created by d3.csv(data/subjects.csv). */ afqb.table.buildTable = function (error, useless, data) { "use strict"; data.forEach(function (d) { delete d[""]; Object.keys(d).forEach(function (key) { d[key] = +d[key] || d[key]; if (d[key] === "0") { d[key] = +d[key]; } }) }); data.forEach(function (d) { if (typeof d.subjectID === 'number') { d.subjectID = "s" + afqb.global.formatKeyName(d.subjectID.toString()); } else { d.subjectID = afqb.global.formatKeyName(d.subjectID); } afqb.table.subData.push(d); }); afqb.table.subFormats = {} Object.keys(afqb.table.subData[0]).forEach(function (key) { var column = afqb.table.subData.map( function (row) { return row[key]; }); column = column.filter(function (element) { return element !== undefined && element !== null; }); function isBinary (e) { return e === 1 || e === 0; } function isNum (e) { return !isNaN(+e); } function isInt(e) { return Number.isInteger(+e); } function identity (arg) { return arg; } if (column.every(isBinary)) { afqb.table.subFormats[key] = d3.format("0b"); } else if (column.every(isNum)) { if (column.every(isInt)) { afqb.table.subFormats[key] = d3.format("d"); } else { afqb.table.subFormats[key] = function (n) { return parseFloat(d3.format(".4f")(n)); } } } else { afqb.table.subFormats[key] = identity; } }); afqb.table.ramp = null; var headerSvg = d3.select("#header-div").append("svg") .attr("width", d3.keys(afqb.table.subData[0]).length * afqb.table.fieldWidth) .attr("height", afqb.table.fieldHeight + afqb.table.rowPadding); afqb.table.headerGrp = headerSvg.append("g").attr("class", "headerGrp") // .attr("flex", "0 0 auto"); .attr("height", afqb.table.fieldHeight + afqb.table.rowPadding); var rowsSvg = d3.select("#rows-div").append("svg") .attr("width", d3.keys(afqb.table.subData[0]).length * afqb.table.fieldWidth) // .attr("height", "100%") // .attr("overflow-y", "auto") // .attr("display", "flex") // .attr("flex-direction", "column") .attr("height", afqb.table.subData.length * (afqb.table.fieldHeight + afqb.table.rowPadding)); afqb.table.rowsGrp = rowsSvg.append("g").attr("class", "rowsGrp") // .attr("flex", "1 1 auto") // .attr("overflow-y", "auto") .attr("height", afqb.table.subData.length * (afqb.table.fieldHeight + afqb.table.rowPadding)); var tableElement = document.getElementById("table"); var headerDiv = document.getElementById("header-div"); tableElement.addEventListener("scroll", function() { headerDiv.style.position = "relative"; headerDiv.style.top = this.scrollTop + "px"; }, false); var TableGuiConfigObj = function () { this.groupCount = parseInt(afqb.table.settings.sort.count); this.splitMethod = afqb.table.settings.splitMethod; }; afqb.table.gui = new dat.GUI({ autoplace: false, width: 350, scrollable: false }); var tableGuiContainer = document.getElementById('table-gui-container'); tableGuiContainer.appendChild(afqb.table.gui.domElement); afqb.global.controls.tableControlBox = new TableGuiConfigObj(); // Add split method controller afqb.table.gui .add(afqb.global.controls.tableControlBox, 'splitMethod', ['Equal Size', 'Equal Interval']) .name('Grouping Method') .onFinishChange(function (value) { afqb.table.settings.splitMethod = value; // Update the query string afqb.global.updateQueryString( {table: {splitMethod: afqb.global.formatKeyName(value)}} ); afqb.table.refreshTable(); }); // Add group count controller var groupCountController = afqb.table.gui .add(afqb.global.controls.tableControlBox, 'groupCount') .min(1).step(1) .name('Number of Groups'); groupCountController.onFinishChange(function (value) { afqb.table.settings.prevSort.count = afqb.table.settings.sort.count; afqb.table.settings.sort.count = value; afqb.table.refreshTable(); }); afqb.table.gui.close(); afqb.table.refreshTable(); afqb.table.restoreRowSelection(); }; /** * Refresh the Table after sort operations. Subject rows are rearranged in * ascending or descending order and colored by group. Number of groups is * determined by the user specified value in the Table gui (default = 2). * Selection is retained on refresh. * */ afqb.table.refreshTable = function () { "use strict"; // create the table header // We want subjectId to be the first column, so sort the keys using a sort function that puts // subjectId before all other values, settings all other values to be equal // Use d3.entries followed by sort followed by a map that gets the keys // because d3.[keys, values, entries] all have an undefined order. We use d3.entries below to sort // the values so we use the same method here for the keys to ensure that the header row has the same // order as the body rows. var firstCol = "subjectID"; var sortedKeys = d3.entries(afqb.table.subData[0]) .sort(function (x,y) { return x.key === firstCol ? -1 : y.key === firstCol ? 1 : 0; }) .map(function (entry) { return entry.key; }); var header = afqb.table.headerGrp.selectAll("g") .data(sortedKeys) .enter().append("g") .attr("class", "t_header") .attr("transform", function (d, i) { return "translate(" + i * afqb.table.fieldWidth + ",0)"; }) .on("mouseover", function () { d3.select(this).style("cursor", "n-resize"); }) // this is where the magic happens...(d) is the column being sorted .on("click", function (d) { afqb.table.settings.prevSort.key = afqb.table.settings.sort.key; afqb.table.settings.sort.key = d; afqb.table.settings.prevSort.count = afqb.table.settings.sort.count; afqb.table.refreshTable(); }); header.append("rect") .attr("width", afqb.table.fieldWidth - 1) .attr("height", afqb.table.fieldHeight); header.append("text") .attr("x", afqb.table.fieldWidth / 2) .attr("y", afqb.table.fieldHeight / 2) .attr("dy", ".35em") .text(String); // fill the table // select rows var rows = afqb.table.rowsGrp.selectAll("g.row").data(afqb.table.subData, function (d) { return d.subjectID; }); // create rows rows.enter().append("svg:g") .attr("class", "row") .attr("id", function (d) { return d.subjectID; }) .attr("transform", function (d, i) { return "translate(0," + i * (afqb.table.fieldHeight + afqb.table.rowPadding) + ")"; }) //.on('click', afqb.table.rowSelect ) .on('mouseover', afqb.table.tableMouseDown) .on('mousedown', afqb.table.rowSelect); // select cells var cells = rows.selectAll("g.cell") .data(function (d) { return d3.entries(d).filter(function (entry) { return entry.key !== "group"; }).sort(function (x,y) { return x.key === firstCol ? -1 : y.key === firstCol ? 1 : 0; }).map(function (entry) { return afqb.table.subFormats[entry.key](entry.value); }); }); // create cells var cellsEnter = cells.enter().append("svg:g") .attr("class", "cell") .style("opacity", 0.3) .attr("transform", function (d, i) { return "translate(" + i * afqb.table.fieldWidth + ",0)"; }); cellsEnter.append("rect") .attr("width", afqb.table.fieldWidth - 1) .attr("height", afqb.table.fieldHeight); cellsEnter.append("text") .attr("x", afqb.table.fieldWidth / 2) .attr("y", afqb.table.fieldHeight / 2) .attr("dy", ".35em") .text(String); var sortOn = afqb.table.settings.sort.key; // Update if not in initialisation if (sortOn !== null) { // If sort.key and sort.count are the same, just update the row order var sameKey = (sortOn === afqb.table.settings.prevSort.key); var sameCount = (afqb.table.settings.sort.count === afqb.table.settings.prevSort.count); if (sameKey && sameCount && !afqb.table.settings.restoring) { if (afqb.table.settings.sort.order === "ascending") { rows.sort(function (a, b) { return afqb.table.descendingWithNull(a[sortOn], b[sortOn]); }); afqb.table.settings.prevSort.order = "ascending"; afqb.table.settings.sort.order = "descending"; } else { rows.sort(function (a, b) { return afqb.table.ascendingWithNull(a[sortOn], b[sortOn]); }); afqb.table.settings.prevSort.order = "descending"; afqb.table.settings.sort.order = "ascending"; } // Update row positions rows//.transition() //.duration(500) .attr("transform", function (d, i) { return "translate(0," + i * (afqb.table.fieldHeight + 1) + ")"; }); } if (!sameKey && !afqb.table.settings.restoring) { // Only resort the data if the sort key is different rows.sort(function (a, b) { return afqb.table.ascendingWithNull(a[sortOn], b[sortOn]); }); afqb.table.subData.sort(function (a, b) { return afqb.table.ascendingWithNull(a[sortOn], b[sortOn]); }); afqb.table.settings.sort.order = "ascending"; // Update row positions rows//.transition() //.duration(500) .attr("transform", function (d, i) { return "translate(0," + i * (afqb.table.fieldHeight + 1) + ")"; }); } if (!sameKey || !sameCount || afqb.table.settings.restoring) { console.assert(afqb.table.settings.splitMethod === "Equal Size" || afqb.table.settings.splitMethod === "Equal Interval", "Split method must be 'Equal Size' or 'Equal Interval'"); // Get unique, non-null values from the column `sortOn` var uniqueNotNull = function (value, index, self) { return (self.indexOf(value) === index) && (value !== null); }; var uniques = afqb.table.subData .map(function (element) { return element[sortOn]; }) .filter(uniqueNotNull); // usrGroups is the user requested number of groups // numGroups may be smaller if there are not enough unique values var usrGroups = afqb.table.settings.sort.count; var numGroups = Math.min(usrGroups, uniques.length); // var groupScale; // Create groupScale to map between the unique // values and the discrete group indices. // TODO: Use the datatype json instead of // just testing the first element of uniques if (typeof uniques[0] === 'number') { if (afqb.table.settings.splitMethod === "Equal Size" || numGroups === 1) { // Split into groups of equal size afqb.table.groupScale = d3.scale.quantile() .range(d3.range(numGroups)); afqb.table.groupScale.domain(uniques); } else { // Split into groups of equal interval afqb.table.groupScale = d3.scale.quantize() .range(d3.range(numGroups)); afqb.table.groupScale.domain([d3.min(uniques), d3.max(uniques)]); } } else { var rangeOrdinal = new Array(uniques.length); for (let i = 0; i < numGroups; i++) { rangeOrdinal.fill(i, i * uniques.length / numGroups, (i + 1) * uniques.length / numGroups); } afqb.table.groupScale = d3.scale.ordinal() .range(rangeOrdinal); afqb.table.groupScale.domain(uniques); } // Assign group index to each element of afqb.table.subData afqb.table.subData.forEach(function(element) { if (element[sortOn] === null) { element.group = null; afqb.table.subGroups[element.subjectID] = null; } else { element.group = afqb.table.groupScale(element[sortOn]); afqb.table.subGroups[element.subjectID] = afqb.table.groupScale(element[sortOn]); } }); // Prepare to split on group index afqb.table.splitGroups = d3.nest() .key(function (d) { return d.group; }) .entries(afqb.table.subData); // Create color ramp for subject groups afqb.table.ramp = d3.scale.linear() .domain([0, numGroups-1]).range(["red", "blue"]); afqb.global.idColor = function (element) { d3.selectAll('#' + element.subjectID) .selectAll('.line') .style("stroke", element.group === null ? "black" : afqb.table.ramp(element.group)); d3.selectAll('#' + element.subjectID) .selectAll('.cell').select('text') .style("fill", element.group === null ? "black" : afqb.table.ramp(element.group)); }; afqb.table.subData.forEach(afqb.global.idColor); // color lines d3.csv("data/nodes.csv", afqb.plots.changePlots); if (afqb.table.settings.restoring) { if (afqb.table.settings.sort.order === "ascending") { rows.sort(function (a, b) { return afqb.table.ascendingWithNull(a[sortOn], b[sortOn]); }); } else { rows.sort(function (a, b) { return afqb.table.descendingWithNull(a[sortOn], b[sortOn]); }); } // Update row positions rows//.transition() //.duration(500) .attr("transform", function (d, i) { return "translate(0," + i * (afqb.table.fieldHeight + 1) + ")"; }); } afqb.table.settings.restoring = false; } } // Update the query string var table = { prevSort: afqb.table.settings.prevSort, sort: afqb.table.settings.sort }; afqb.global.updateQueryString( {table: table} ); }; /** * Sort rows in ascending order. Elements a and b * are sorted with d3.ascending, and their associated * rows are similarly ordered. * * @param {element} a - value in sorting column for the first * object * @param {element} b - value in sorting column for the second * object */ afqb.table.ascendingWithNull = function (a, b) { "use strict"; // d3.ascending ignores null and undefined values // Return the same as d3.ascending but keep all null and // undefined values at the bottom of the list return b === null ? -1 : a === null ? 1 : d3.ascending(a, b); }; /** * Sort rows in descending order. Elements a and b * are sorted with d3.descending, and their associated * rows are similarly ordered. * * @param {element} a - value in sorting column for the first * object * @param {element} b - value in sorting column for the second * object */ afqb.table.descendingWithNull = function (a, b) { "use strict"; // d3.descending ignores null and undefined values // Return the same as d3.descending but keep all null and // undefined values at the bottom of the list return b === null ? -1 : a === null ? 1 : d3.descending(a, b); }; // onclick function to toggle on and off rows /** * Select subject by row. Change opacity of row * and corresponding subject lines in 2D plots. * */ afqb.table.rowSelect = function () { "use strict"; if($('g',this).css("opacity") == 0.3) { afqb.table.settings.selectedRows[this.id] = true; //uses the opacity of the row for selection and deselection d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 1); d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); } else { afqb.table.settings.selectedRows[this.id] = false; d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 0.3); d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", afqb.global.controls.plotsControlBox.lineOpacity) .style("stroke-width", "1.1px"); } // Update the query string var selectedRows = {}; selectedRows[this.id] = afqb.table.settings.selectedRows[this.id]; afqb.global.updateQueryString( {table: {selectedRows: selectedRows}} ); }; afqb.global.mouse.isDown = false; // Tracks status of mouse button $(document).mousedown(function() { "use strict"; // When mouse goes down, set isDown to true afqb.global.mouse.isDown = true; }) .mouseup(function() { "use strict"; // When mouse goes up, set isDown to false afqb.global.mouse.isDown = false; }); /** * Define subject selection and deselection by * drag. * */ afqb.table.tableMouseDown = function () { "use strict"; if(afqb.global.mouse.isDown) { if($('g',this).css("opacity") == 0.3) { afqb.table.settings.selectedRows[this.id] = true; //uses the opacity of the row for selection and deselection d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 1); d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", 1) .style("stroke-width", "2.1px"); } else { afqb.table.settings.selectedRows[this.id] = false; d3.selectAll('#' + this.id) .selectAll('g') .style("opacity", 0.3); d3.selectAll('#' + this.id) .selectAll('path') .style("opacity", afqb.global.controls.plotsControlBox.lineOpacity) .style("stroke-width", "1.1px"); } // Update the query string var selectedRows = {}; selectedRows[this.id] = afqb.table.settings.selectedRows[this.id]; afqb.global.updateQueryString( {table: {selectedRows: selectedRows}} ); } }; afqb.global.queues.subjectQ = d3_queue.queue(); afqb.global.queues.subjectQ.defer(afqb.global.initSettings); afqb.global.queues.subjectQ.defer(d3.csv, "data/subjects.csv"); afqb.global.queues.subjectQ.await(afqb.table.buildTable);
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/table.js
table.js
THREE.OrbitControls = function ( object, domElement ) { this.object = object; this.domElement = ( domElement !== undefined ) ? domElement : document; // Set to false to disable this control this.enabled = true; // "target" sets the location of focus, where the object orbits around this.target = new THREE.Vector3(); // How far you can dolly in and out ( PerspectiveCamera only ) this.minDistance = 0; this.maxDistance = Infinity; // How far you can zoom in and out ( OrthographicCamera only ) this.minZoom = 0; this.maxZoom = Infinity; // How far you can orbit vertically, upper and lower limits. // Range is 0 to Math.PI radians. this.minPolarAngle = 0; // radians this.maxPolarAngle = Math.PI; // radians // How far you can orbit horizontally, upper and lower limits. // If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ]. this.minAzimuthAngle = - Infinity; // radians this.maxAzimuthAngle = Infinity; // radians // Set to true to enable damping (inertia) // If damping is enabled, you must call controls.update() in your animation loop this.enableDamping = false; this.dampingFactor = 0.25; // This option actually enables dollying in and out; left as "zoom" for backwards compatibility. // Set to false to disable zooming this.enableZoom = true; this.zoomSpeed = 1.0; // Set to false to disable rotating this.enableRotate = true; this.rotateSpeed = 1.0; // Set to false to disable panning this.enablePan = true; this.keyPanSpeed = 7.0; // pixels moved per arrow key push // Set to true to automatically rotate around the target // If auto-rotate is enabled, you must call controls.update() in your animation loop this.autoRotate = false; this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60 // Set to false to disable use of the keys this.enableKeys = true; // The four arrow keys this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 }; // Mouse buttons this.mouseButtons = { ORBIT: THREE.MOUSE.LEFT, ZOOM: THREE.MOUSE.MIDDLE, PAN: THREE.MOUSE.RIGHT }; // for reset this.target0 = this.target.clone(); this.position0 = this.object.position.clone(); this.zoom0 = this.object.zoom; // // public methods // this.getPolarAngle = function () { return spherical.phi; }; this.getAzimuthalAngle = function () { return spherical.theta; }; this.reset = function () { scope.target.copy( scope.target0 ); scope.object.position.copy( scope.position0 ); scope.object.zoom = scope.zoom0; scope.object.updateProjectionMatrix(); scope.dispatchEvent( changeEvent ); scope.update(); state = STATE.NONE; }; // this method is exposed, but perhaps it would be better if we can make it private... this.update = function() { var offset = new THREE.Vector3(); // so camera.up is the orbit axis var quat = new THREE.Quaternion().setFromUnitVectors( object.up, new THREE.Vector3( 0, 1, 0 ) ); var quatInverse = quat.clone().inverse(); var lastPosition = new THREE.Vector3(); var lastQuaternion = new THREE.Quaternion(); return function () { var position = scope.object.position; offset.copy( position ).sub( scope.target ); // rotate offset to "y-axis-is-up" space offset.applyQuaternion( quat ); // angle from z-axis around y-axis spherical.setFromVector3( offset ); if ( scope.autoRotate && state === STATE.NONE ) { rotateLeft( getAutoRotationAngle() ); } spherical.theta += sphericalDelta.theta; spherical.phi += sphericalDelta.phi; // restrict theta to be between desired limits spherical.theta = Math.max( scope.minAzimuthAngle, Math.min( scope.maxAzimuthAngle, spherical.theta ) ); // restrict phi to be between desired limits spherical.phi = Math.max( scope.minPolarAngle, Math.min( scope.maxPolarAngle, spherical.phi ) ); spherical.makeSafe(); spherical.radius *= scale; // restrict radius to be between desired limits spherical.radius = Math.max( scope.minDistance, Math.min( scope.maxDistance, spherical.radius ) ); // move target to panned location scope.target.add( panOffset ); offset.setFromSpherical( spherical ); // rotate offset back to "camera-up-vector-is-up" space offset.applyQuaternion( quatInverse ); position.copy( scope.target ).add( offset ); scope.object.lookAt( scope.target ); if ( scope.enableDamping === true ) { sphericalDelta.theta *= ( 1 - scope.dampingFactor ); sphericalDelta.phi *= ( 1 - scope.dampingFactor ); } else { sphericalDelta.set( 0, 0, 0 ); } scale = 1; panOffset.set( 0, 0, 0 ); // update condition is: // min(camera displacement, camera rotation in radians)^2 > EPS // using small-angle approximation cos(x/2) = 1 - x^2 / 8 if ( zoomChanged || lastPosition.distanceToSquared( scope.object.position ) > EPS || 8 * ( 1 - lastQuaternion.dot( scope.object.quaternion ) ) > EPS ) { scope.dispatchEvent( changeEvent ); lastPosition.copy( scope.object.position ); lastQuaternion.copy( scope.object.quaternion ); zoomChanged = false; return true; } return false; }; }(); this.dispose = function() { scope.domElement.removeEventListener( 'contextmenu', onContextMenu, false ); scope.domElement.removeEventListener( 'mousedown', onMouseDown, false ); scope.domElement.removeEventListener( 'mousewheel', onMouseWheel, false ); scope.domElement.removeEventListener( 'MozMousePixelScroll', onMouseWheel, false ); // firefox scope.domElement.removeEventListener( 'touchstart', onTouchStart, false ); scope.domElement.removeEventListener( 'touchend', onTouchEnd, false ); scope.domElement.removeEventListener( 'touchmove', onTouchMove, false ); document.removeEventListener( 'mousemove', onMouseMove, false ); document.removeEventListener( 'mouseup', onMouseUp, false ); document.removeEventListener( 'mouseout', onMouseUp, false ); window.removeEventListener( 'keydown', onKeyDown, false ); //scope.dispatchEvent( { type: 'dispose' } ); // should this be added here? }; // // internals // var scope = this; var changeEvent = { type: 'change' }; var startEvent = { type: 'start' }; var endEvent = { type: 'end' }; var STATE = { NONE : - 1, ROTATE : 0, DOLLY : 1, PAN : 2, TOUCH_ROTATE : 3, TOUCH_DOLLY : 4, TOUCH_PAN : 5 }; var state = STATE.NONE; var EPS = 0.000001; // current position in spherical coordinates var spherical = new THREE.Spherical(); var sphericalDelta = new THREE.Spherical(); var scale = 1; var panOffset = new THREE.Vector3(); var zoomChanged = false; var rotateStart = new THREE.Vector2(); var rotateEnd = new THREE.Vector2(); var rotateDelta = new THREE.Vector2(); var panStart = new THREE.Vector2(); var panEnd = new THREE.Vector2(); var panDelta = new THREE.Vector2(); var dollyStart = new THREE.Vector2(); var dollyEnd = new THREE.Vector2(); var dollyDelta = new THREE.Vector2(); function getAutoRotationAngle() { return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed; } function getZoomScale() { return Math.pow( 0.95, scope.zoomSpeed ); } function rotateLeft( angle ) { sphericalDelta.theta -= angle; } function rotateUp( angle ) { sphericalDelta.phi -= angle; } var panLeft = function() { var v = new THREE.Vector3(); return function panLeft( distance, objectMatrix ) { v.setFromMatrixColumn( objectMatrix, 0 ); // get X column of objectMatrix v.multiplyScalar( - distance ); panOffset.add( v ); }; }(); var panUp = function() { var v = new THREE.Vector3(); return function panUp( distance, objectMatrix ) { v.setFromMatrixColumn( objectMatrix, 1 ); // get Y column of objectMatrix v.multiplyScalar( distance ); panOffset.add( v ); }; }(); // deltaX and deltaY are in pixels; right and down are positive var pan = function() { var offset = new THREE.Vector3(); return function( deltaX, deltaY ) { var element = scope.domElement === document ? scope.domElement.body : scope.domElement; if ( scope.object instanceof THREE.PerspectiveCamera ) { // perspective var position = scope.object.position; offset.copy( position ).sub( scope.target ); var targetDistance = offset.length(); // half of the fov is center to top of screen targetDistance *= Math.tan( ( scope.object.fov / 2 ) * Math.PI / 180.0 ); // we actually don't use screenWidth, since perspective camera is fixed to screen height panLeft( 2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix ); panUp( 2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix ); } else if ( scope.object instanceof THREE.OrthographicCamera ) { // orthographic panLeft( deltaX * ( scope.object.right - scope.object.left ) / scope.object.zoom / element.clientWidth, scope.object.matrix ); panUp( deltaY * ( scope.object.top - scope.object.bottom ) / scope.object.zoom / element.clientHeight, scope.object.matrix ); } else { // camera neither orthographic nor perspective console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - pan disabled.' ); scope.enablePan = false; } }; }(); function dollyIn( dollyScale ) { if ( scope.object instanceof THREE.PerspectiveCamera ) { scale /= dollyScale; } else if ( scope.object instanceof THREE.OrthographicCamera ) { scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom * dollyScale ) ); scope.object.updateProjectionMatrix(); zoomChanged = true; } else { console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' ); scope.enableZoom = false; } } function dollyOut( dollyScale ) { if ( scope.object instanceof THREE.PerspectiveCamera ) { scale *= dollyScale; } else if ( scope.object instanceof THREE.OrthographicCamera ) { scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom / dollyScale ) ); scope.object.updateProjectionMatrix(); zoomChanged = true; } else { console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' ); scope.enableZoom = false; } } // // event callbacks - update the object state // function handleMouseDownRotate( event ) { //console.log( 'handleMouseDownRotate' ); rotateStart.set( event.clientX, event.clientY ); } function handleMouseDownDolly( event ) { //console.log( 'handleMouseDownDolly' ); dollyStart.set( event.clientX, event.clientY ); } function handleMouseDownPan( event ) { //console.log( 'handleMouseDownPan' ); panStart.set( event.clientX, event.clientY ); } function handleMouseMoveRotate( event ) { //console.log( 'handleMouseMoveRotate' ); rotateEnd.set( event.clientX, event.clientY ); rotateDelta.subVectors( rotateEnd, rotateStart ); var element = scope.domElement === document ? scope.domElement.body : scope.domElement; // rotating across whole screen goes 360 degrees around rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed ); // rotating up and down along whole screen attempts to go 360, but limited to 180 rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed ); rotateStart.copy( rotateEnd ); scope.update(); } function handleMouseMoveDolly( event ) { //console.log( 'handleMouseMoveDolly' ); dollyEnd.set( event.clientX, event.clientY ); dollyDelta.subVectors( dollyEnd, dollyStart ); if ( dollyDelta.y > 0 ) { dollyIn( getZoomScale() ); } else if ( dollyDelta.y < 0 ) { dollyOut( getZoomScale() ); } dollyStart.copy( dollyEnd ); scope.update(); } function handleMouseMovePan( event ) { //console.log( 'handleMouseMovePan' ); panEnd.set( event.clientX, event.clientY ); panDelta.subVectors( panEnd, panStart ); pan( panDelta.x, panDelta.y ); panStart.copy( panEnd ); scope.update(); } function handleMouseUp( event ) { //console.log( 'handleMouseUp' ); } function handleMouseWheel( event ) { //console.log( 'handleMouseWheel' ); var delta = 0; if ( event.wheelDelta !== undefined ) { // WebKit / Opera / Explorer 9 delta = event.wheelDelta; } else if ( event.detail !== undefined ) { // Firefox delta = - event.detail; } if ( delta > 0 ) { dollyOut( getZoomScale() ); } else if ( delta < 0 ) { dollyIn( getZoomScale() ); } scope.update(); } function handleKeyDown( event ) { //console.log( 'handleKeyDown' ); switch ( event.keyCode ) { case scope.keys.UP: pan( 0, scope.keyPanSpeed ); scope.update(); break; case scope.keys.BOTTOM: pan( 0, - scope.keyPanSpeed ); scope.update(); break; case scope.keys.LEFT: pan( scope.keyPanSpeed, 0 ); scope.update(); break; case scope.keys.RIGHT: pan( - scope.keyPanSpeed, 0 ); scope.update(); break; } } function handleTouchStartRotate( event ) { //console.log( 'handleTouchStartRotate' ); rotateStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); } function handleTouchStartDolly( event ) { //console.log( 'handleTouchStartDolly' ); var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; var distance = Math.sqrt( dx * dx + dy * dy ); dollyStart.set( 0, distance ); } function handleTouchStartPan( event ) { //console.log( 'handleTouchStartPan' ); panStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); } function handleTouchMoveRotate( event ) { //console.log( 'handleTouchMoveRotate' ); rotateEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); rotateDelta.subVectors( rotateEnd, rotateStart ); var element = scope.domElement === document ? scope.domElement.body : scope.domElement; // rotating across whole screen goes 360 degrees around rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed ); // rotating up and down along whole screen attempts to go 360, but limited to 180 rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed ); rotateStart.copy( rotateEnd ); scope.update(); } function handleTouchMoveDolly( event ) { //console.log( 'handleTouchMoveDolly' ); var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; var distance = Math.sqrt( dx * dx + dy * dy ); dollyEnd.set( 0, distance ); dollyDelta.subVectors( dollyEnd, dollyStart ); if ( dollyDelta.y > 0 ) { dollyOut( getZoomScale() ); } else if ( dollyDelta.y < 0 ) { dollyIn( getZoomScale() ); } dollyStart.copy( dollyEnd ); scope.update(); } function handleTouchMovePan( event ) { //console.log( 'handleTouchMovePan' ); panEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); panDelta.subVectors( panEnd, panStart ); pan( panDelta.x, panDelta.y ); panStart.copy( panEnd ); scope.update(); } function handleTouchEnd( event ) { //console.log( 'handleTouchEnd' ); } // // event handlers - FSM: listen for events and reset state // function onMouseDown( event ) { if ( scope.enabled === false ) return; event.preventDefault(); if ( event.button === scope.mouseButtons.ORBIT ) { if ( scope.enableRotate === false ) return; handleMouseDownRotate( event ); state = STATE.ROTATE; } else if ( event.button === scope.mouseButtons.ZOOM ) { if ( scope.enableZoom === false ) return; handleMouseDownDolly( event ); state = STATE.DOLLY; } else if ( event.button === scope.mouseButtons.PAN ) { if ( scope.enablePan === false ) return; handleMouseDownPan( event ); state = STATE.PAN; } if ( state !== STATE.NONE ) { document.addEventListener( 'mousemove', onMouseMove, false ); document.addEventListener( 'mouseup', onMouseUp, false ); document.addEventListener( 'mouseout', onMouseUp, false ); scope.dispatchEvent( startEvent ); } } function onMouseMove( event ) { if ( scope.enabled === false ) return; event.preventDefault(); if ( state === STATE.ROTATE ) { if ( scope.enableRotate === false ) return; handleMouseMoveRotate( event ); } else if ( state === STATE.DOLLY ) { if ( scope.enableZoom === false ) return; handleMouseMoveDolly( event ); } else if ( state === STATE.PAN ) { if ( scope.enablePan === false ) return; handleMouseMovePan( event ); } } function onMouseUp( event ) { if ( scope.enabled === false ) return; handleMouseUp( event ); document.removeEventListener( 'mousemove', onMouseMove, false ); document.removeEventListener( 'mouseup', onMouseUp, false ); document.removeEventListener( 'mouseout', onMouseUp, false ); scope.dispatchEvent( endEvent ); state = STATE.NONE; } function onMouseWheel( event ) { if ( scope.enabled === false || scope.enableZoom === false || ( state !== STATE.NONE && state !== STATE.ROTATE ) ) return; event.preventDefault(); event.stopPropagation(); handleMouseWheel( event ); scope.dispatchEvent( startEvent ); // not sure why these are here... scope.dispatchEvent( endEvent ); } function onKeyDown( event ) { if ( scope.enabled === false || scope.enableKeys === false || scope.enablePan === false ) return; handleKeyDown( event ); } function onTouchStart( event ) { if ( scope.enabled === false ) return; switch ( event.touches.length ) { case 1: // one-fingered touch: rotate if ( scope.enableRotate === false ) return; handleTouchStartRotate( event ); state = STATE.TOUCH_ROTATE; break; case 2: // two-fingered touch: dolly if ( scope.enableZoom === false ) return; handleTouchStartDolly( event ); state = STATE.TOUCH_DOLLY; break; case 3: // three-fingered touch: pan if ( scope.enablePan === false ) return; handleTouchStartPan( event ); state = STATE.TOUCH_PAN; break; default: state = STATE.NONE; } if ( state !== STATE.NONE ) { scope.dispatchEvent( startEvent ); } } function onTouchMove( event ) { if ( scope.enabled === false ) return; event.preventDefault(); event.stopPropagation(); switch ( event.touches.length ) { case 1: // one-fingered touch: rotate if ( scope.enableRotate === false ) return; if ( state !== STATE.TOUCH_ROTATE ) return; // is this needed?... handleTouchMoveRotate( event ); break; case 2: // two-fingered touch: dolly if ( scope.enableZoom === false ) return; if ( state !== STATE.TOUCH_DOLLY ) return; // is this needed?... handleTouchMoveDolly( event ); break; case 3: // three-fingered touch: pan if ( scope.enablePan === false ) return; if ( state !== STATE.TOUCH_PAN ) return; // is this needed?... handleTouchMovePan( event ); break; default: state = STATE.NONE; } } function onTouchEnd( event ) { if ( scope.enabled === false ) return; handleTouchEnd( event ); scope.dispatchEvent( endEvent ); state = STATE.NONE; } function onContextMenu( event ) { event.preventDefault(); } // scope.domElement.addEventListener( 'contextmenu', onContextMenu, false ); scope.domElement.addEventListener( 'mousedown', onMouseDown, false ); scope.domElement.addEventListener( 'mousewheel', onMouseWheel, false ); scope.domElement.addEventListener( 'MozMousePixelScroll', onMouseWheel, false ); // firefox scope.domElement.addEventListener( 'touchstart', onTouchStart, false ); scope.domElement.addEventListener( 'touchend', onTouchEnd, false ); scope.domElement.addEventListener( 'touchmove', onTouchMove, false ); window.addEventListener( 'keydown', onKeyDown, false ); // force an update at start this.update(); }; THREE.OrbitControls.prototype = Object.create( THREE.EventDispatcher.prototype ); THREE.OrbitControls.prototype.constructor = THREE.OrbitControls; Object.defineProperties( THREE.OrbitControls.prototype, { center: { get: function () { console.warn( 'THREE.OrbitControls: .center has been renamed to .target' ); return this.target; } }, // backward compatibility noZoom: { get: function () { console.warn( 'THREE.OrbitControls: .noZoom has been deprecated. Use .enableZoom instead.' ); return ! this.enableZoom; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .noZoom has been deprecated. Use .enableZoom instead.' ); this.enableZoom = ! value; } }, noRotate: { get: function () { console.warn( 'THREE.OrbitControls: .noRotate has been deprecated. Use .enableRotate instead.' ); return ! this.enableRotate; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .noRotate has been deprecated. Use .enableRotate instead.' ); this.enableRotate = ! value; } }, noPan: { get: function () { console.warn( 'THREE.OrbitControls: .noPan has been deprecated. Use .enablePan instead.' ); return ! this.enablePan; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .noPan has been deprecated. Use .enablePan instead.' ); this.enablePan = ! value; } }, noKeys: { get: function () { console.warn( 'THREE.OrbitControls: .noKeys has been deprecated. Use .enableKeys instead.' ); return ! this.enableKeys; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .noKeys has been deprecated. Use .enableKeys instead.' ); this.enableKeys = ! value; } }, staticMoving : { get: function () { console.warn( 'THREE.OrbitControls: .staticMoving has been deprecated. Use .enableDamping instead.' ); return ! this.enableDamping; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .staticMoving has been deprecated. Use .enableDamping instead.' ); this.enableDamping = ! value; } }, dynamicDampingFactor : { get: function () { console.warn( 'THREE.OrbitControls: .dynamicDampingFactor has been renamed. Use .dampingFactor instead.' ); return this.dampingFactor; }, set: function ( value ) { console.warn( 'THREE.OrbitControls: .dynamicDampingFactor has been renamed. Use .dampingFactor instead.' ); this.dampingFactor = value; } } } );
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/OrbitControls.js
OrbitControls.js
// /** @namespace */ var THREEx = THREEx || {}; // # Constructor THREEx.DomEvents = function(camera, domElement) { this._camera = camera || null; this._domElement= domElement || document; this._raycaster = new THREE.Raycaster(); this._selected = null; this._boundObjs = {}; // Bind dom event for mouse and touch var _this = this; this._$onClick = function(){ _this._onClick.apply(_this, arguments); }; this._$onDblClick = function(){ _this._onDblClick.apply(_this, arguments); }; this._$onMouseMove = function(){ _this._onMouseMove.apply(_this, arguments); }; this._$onMouseDown = function(){ _this._onMouseDown.apply(_this, arguments); }; this._$onMouseUp = function(){ _this._onMouseUp.apply(_this, arguments); }; this._$onTouchMove = function(){ _this._onTouchMove.apply(_this, arguments); }; this._$onTouchStart = function(){ _this._onTouchStart.apply(_this, arguments); }; this._$onTouchEnd = function(){ _this._onTouchEnd.apply(_this, arguments); }; this._$onContextmenu = function(){ _this._onContextmenu.apply(_this, arguments); }; this._domElement.addEventListener( 'click' , this._$onClick , false ); this._domElement.addEventListener( 'dblclick' , this._$onDblClick , false ); this._domElement.addEventListener( 'mousemove' , this._$onMouseMove , false ); this._domElement.addEventListener( 'mousedown' , this._$onMouseDown , false ); this._domElement.addEventListener( 'mouseup' , this._$onMouseUp , false ); this._domElement.addEventListener( 'touchmove' , this._$onTouchMove , false ); this._domElement.addEventListener( 'touchstart' , this._$onTouchStart , false ); this._domElement.addEventListener( 'touchend' , this._$onTouchEnd , false ); this._domElement.addEventListener( 'contextmenu', this._$onContextmenu , false ); } // # Destructor THREEx.DomEvents.prototype.destroy = function() { // unBind dom event for mouse and touch this._domElement.removeEventListener( 'click' , this._$onClick , false ); this._domElement.removeEventListener( 'dblclick' , this._$onDblClick , false ); this._domElement.removeEventListener( 'mousemove' , this._$onMouseMove , false ); this._domElement.removeEventListener( 'mousedown' , this._$onMouseDown , false ); this._domElement.removeEventListener( 'mouseup' , this._$onMouseUp , false ); this._domElement.removeEventListener( 'touchmove' , this._$onTouchMove , false ); this._domElement.removeEventListener( 'touchstart' , this._$onTouchStart , false ); this._domElement.removeEventListener( 'touchend' , this._$onTouchEnd , false ); this._domElement.removeEventListener( 'contextmenu' , this._$onContextmenu , false ); } THREEx.DomEvents.eventNames = [ "click", "dblclick", "mouseover", "mouseout", "mousemove", "mousedown", "mouseup", "contextmenu", "touchstart", "touchend" ]; THREEx.DomEvents.prototype._getRelativeMouseXY = function(domEvent){ var element = domEvent.target || domEvent.srcElement; if (element.nodeType === 3) { element = element.parentNode; // Safari fix -- see http://www.quirksmode.org/js/events_properties.html } //get the real position of an element relative to the page starting point (0, 0) //credits go to brainjam on answering http://stackoverflow.com/questions/5755312/getting-mouse-position-relative-to-content-area-of-an-element var elPosition = { x : 0 , y : 0}; var tmpElement = element; //store padding var style = getComputedStyle(tmpElement, null); elPosition.y += parseInt(style.getPropertyValue("padding-top"), 10); elPosition.x += parseInt(style.getPropertyValue("padding-left"), 10); //add positions do { elPosition.x += tmpElement.offsetLeft; elPosition.y += tmpElement.offsetTop; style = getComputedStyle(tmpElement, null); elPosition.x += parseInt(style.getPropertyValue("border-left-width"), 10); elPosition.y += parseInt(style.getPropertyValue("border-top-width"), 10); } while(tmpElement = tmpElement.offsetParent); var elDimension = { width : (element === window) ? window.innerWidth : element.offsetWidth, height : (element === window) ? window.innerHeight : element.offsetHeight }; return { x : +((domEvent.pageX - elPosition.x) / elDimension.width ) * 2 - 1, y : -((domEvent.pageY - elPosition.y) / elDimension.height) * 2 + 1 }; }; /********************************************************************************/ /* domevent context */ /********************************************************************************/ // handle domevent context in object3d instance THREEx.DomEvents.prototype._objectCtxInit = function(object3d){ object3d._3xDomEvent = {}; } THREEx.DomEvents.prototype._objectCtxDeinit = function(object3d){ delete object3d._3xDomEvent; } THREEx.DomEvents.prototype._objectCtxIsInit = function(object3d){ return object3d._3xDomEvent ? true : false; } THREEx.DomEvents.prototype._objectCtxGet = function(object3d){ return object3d._3xDomEvent; } /********************************************************************************/ /* */ /********************************************************************************/ /** * Getter/Setter for camera */ THREEx.DomEvents.prototype.camera = function(value) { if( value ) this._camera = value; return this._camera; } THREEx.DomEvents.prototype.bind = function(object3d, eventName, callback, useCapture) { console.assert( THREEx.DomEvents.eventNames.indexOf(eventName) !== -1, "not available events:"+eventName ); if( !this._objectCtxIsInit(object3d) ) this._objectCtxInit(object3d); var objectCtx = this._objectCtxGet(object3d); if( !objectCtx[eventName+'Handlers'] ) objectCtx[eventName+'Handlers'] = []; objectCtx[eventName+'Handlers'].push({ callback : callback, useCapture : useCapture }); // add this object in this._boundObjs if( this._boundObjs[eventName] === undefined ){ this._boundObjs[eventName] = []; } this._boundObjs[eventName].push(object3d); } THREEx.DomEvents.prototype.addEventListener = THREEx.DomEvents.prototype.bind THREEx.DomEvents.prototype.unbind = function(object3d, eventName, callback, useCapture) { console.assert( THREEx.DomEvents.eventNames.indexOf(eventName) !== -1, "not available events:"+eventName ); if( !this._objectCtxIsInit(object3d) ) this._objectCtxInit(object3d); var objectCtx = this._objectCtxGet(object3d); if( !objectCtx[eventName+'Handlers'] ) objectCtx[eventName+'Handlers'] = []; var handlers = objectCtx[eventName+'Handlers']; for(var i = 0; i < handlers.length; i++){ var handler = handlers[i]; if( callback != handler.callback ) continue; if( useCapture != handler.useCapture ) continue; handlers.splice(i, 1) break; } // from this object from this._boundObjs var index = this._boundObjs[eventName].indexOf(object3d); console.assert( index !== -1 ); this._boundObjs[eventName].splice(index, 1); } THREEx.DomEvents.prototype.removeEventListener = THREEx.DomEvents.prototype.unbind THREEx.DomEvents.prototype._bound = function(eventName, object3d) { var objectCtx = this._objectCtxGet(object3d); if( !objectCtx ) return false; return objectCtx[eventName+'Handlers'] ? true : false; } /********************************************************************************/ /* onMove */ /********************************************************************************/ // # handle mousemove kind of events THREEx.DomEvents.prototype._onMove = function(eventName, mouseX, mouseY, origDomEvent) { //console.log('eventName', eventName, 'boundObjs', this._boundObjs[eventName]) // get objects bound to this event var boundObjs = this._boundObjs[eventName]; if( boundObjs === undefined || boundObjs.length === 0 ) return; // compute the intersection var vector = new THREE.Vector2(); // update the picking ray with the camera and mouse position vector.set( mouseX, mouseY ); this._raycaster.setFromCamera( vector, this._camera ); var intersects = this._raycaster.intersectObjects( boundObjs ); var oldSelected = this._selected; if( intersects.length > 0 ){ var notifyOver, notifyOut, notifyMove; var intersect = intersects[ 0 ]; var newSelected = intersect.object; this._selected = newSelected; // if newSelected bound mousemove, notify it notifyMove = this._bound('mousemove', newSelected); if( oldSelected != newSelected ){ // if newSelected bound mouseenter, notify it notifyOver = this._bound('mouseover', newSelected); // if there is a oldSelect and oldSelected bound mouseleave, notify it notifyOut = oldSelected && this._bound('mouseout', oldSelected); } }else{ // if there is a oldSelect and oldSelected bound mouseleave, notify it notifyOut = oldSelected && this._bound('mouseout', oldSelected); this._selected = null; } // notify mouseMove - done at the end with a copy of the list to allow callback to remove handlers notifyMove && this._notify('mousemove', newSelected, origDomEvent, intersect); // notify mouseEnter - done at the end with a copy of the list to allow callback to remove handlers notifyOver && this._notify('mouseover', newSelected, origDomEvent, intersect); // notify mouseLeave - done at the end with a copy of the list to allow callback to remove handlers notifyOut && this._notify('mouseout' , oldSelected, origDomEvent, intersect); } /********************************************************************************/ /* onEvent */ /********************************************************************************/ // # handle click kind of events THREEx.DomEvents.prototype._onEvent = function(eventName, mouseX, mouseY, origDomEvent) { //console.log('eventName', eventName, 'boundObjs', this._boundObjs[eventName]) // get objects bound to this event var boundObjs = this._boundObjs[eventName]; if( boundObjs === undefined || boundObjs.length === 0 ) return; // compute the intersection var vector = new THREE.Vector2(); // update the picking ray with the camera and mouse position vector.set( mouseX, mouseY ); this._raycaster.setFromCamera( vector, this._camera ); var intersects = this._raycaster.intersectObjects( boundObjs, true); // if there are no intersections, return now if( intersects.length === 0 ) return; // init some variables var intersect = intersects[0]; var object3d = intersect.object; var objectCtx = this._objectCtxGet(object3d); var objectParent = object3d.parent; while ( typeof(objectCtx) == 'undefined' && objectParent ) { objectCtx = this._objectCtxGet(objectParent); objectParent = objectParent.parent; } if( !objectCtx ) return; // notify handlers this._notify(eventName, object3d, origDomEvent, intersect); } THREEx.DomEvents.prototype._notify = function(eventName, object3d, origDomEvent, intersect) { var objectCtx = this._objectCtxGet(object3d); var handlers = objectCtx ? objectCtx[eventName+'Handlers'] : null; // parameter check console.assert(arguments.length === 4) // do bubbling if( !objectCtx || !handlers || handlers.length === 0 ){ object3d.parent && this._notify(eventName, object3d.parent, origDomEvent, intersect); return; } // notify all handlers var handlers = objectCtx[eventName+'Handlers']; for(var i = 0; i < handlers.length; i++){ var handler = handlers[i]; var toPropagate = true; handler.callback({ type : eventName, target : object3d, origDomEvent : origDomEvent, intersect : intersect, stopPropagation : function(){ toPropagate = false; } }); if( !toPropagate ) continue; // do bubbling if( handler.useCapture === false ){ object3d.parent && this._notify(eventName, object3d.parent, origDomEvent, intersect); } } } /********************************************************************************/ /* handle mouse events */ /********************************************************************************/ // # handle mouse events THREEx.DomEvents.prototype._onMouseDown = function(event){ return this._onMouseEvent('mousedown', event); } THREEx.DomEvents.prototype._onMouseUp = function(event){ return this._onMouseEvent('mouseup' , event); } THREEx.DomEvents.prototype._onMouseEvent = function(eventName, domEvent) { var mouseCoords = this._getRelativeMouseXY(domEvent); this._onEvent(eventName, mouseCoords.x, mouseCoords.y, domEvent); } THREEx.DomEvents.prototype._onMouseMove = function(domEvent) { var mouseCoords = this._getRelativeMouseXY(domEvent); this._onMove('mousemove', mouseCoords.x, mouseCoords.y, domEvent); this._onMove('mouseover', mouseCoords.x, mouseCoords.y, domEvent); this._onMove('mouseout' , mouseCoords.x, mouseCoords.y, domEvent); } THREEx.DomEvents.prototype._onClick = function(event) { // TODO handle touch ? this._onMouseEvent('click' , event); } THREEx.DomEvents.prototype._onDblClick = function(event) { // TODO handle touch ? this._onMouseEvent('dblclick' , event); } THREEx.DomEvents.prototype._onContextmenu = function(event) { //TODO don't have a clue about how this should work with touch.. this._onMouseEvent('contextmenu' , event); } /********************************************************************************/ /* handle touch events */ /********************************************************************************/ // # handle touch events THREEx.DomEvents.prototype._onTouchStart = function(event){ return this._onTouchEvent('touchstart', event); } THREEx.DomEvents.prototype._onTouchEnd = function(event){ return this._onTouchEvent('touchend' , event); } THREEx.DomEvents.prototype._onTouchMove = function(domEvent) { if( domEvent.touches.length != 1 ) return undefined; domEvent.preventDefault(); var mouseX = +(domEvent.touches[ 0 ].pageX / window.innerWidth ) * 2 - 1; var mouseY = -(domEvent.touches[ 0 ].pageY / window.innerHeight) * 2 + 1; this._onMove('mousemove', mouseX, mouseY, domEvent); this._onMove('mouseover', mouseX, mouseY, domEvent); this._onMove('mouseout' , mouseX, mouseY, domEvent); } THREEx.DomEvents.prototype._onTouchEvent = function(eventName, domEvent) { if( domEvent.touches.length != 1 ) return undefined; domEvent.preventDefault(); var mouseX = +(domEvent.touches[ 0 ].pageX / window.innerWidth ) * 2 - 1; var mouseY = -(domEvent.touches[ 0 ].pageY / window.innerHeight) * 2 + 1; this._onEvent(eventName, mouseX, mouseY, domEvent); }
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/threex.domevent.js
threex.domevent.js
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.dat=t():e.dat=t()}(this,function(){return function(e){function t(o){if(n[o])return n[o].exports;var i=n[o]={exports:{},id:o,loaded:!1};return e[o].call(i.exports,i,i.exports,t),i.loaded=!0,i.exports}var n={};return t.m=e,t.c=n,t.p="",t(0)}([function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(1),r=o(i);t.default=r.default,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(2),r=o(i),a=n(6),l=o(a),s=n(3),u=o(s),d=n(7),c=o(d),f=n(8),_=o(f),p=n(10),h=o(p),m=n(11),b=o(m),g=n(12),v=o(g),y=n(13),w=o(y),x=n(14),E=o(x),C=n(15),A=o(C),S=n(16),k=o(S),O=n(9),T=o(O),R=n(17),L=o(R);t.default={color:{Color:r.default,math:l.default,interpret:u.default},controllers:{Controller:c.default,BooleanController:_.default,OptionController:h.default,StringController:b.default,NumberController:v.default,NumberControllerBox:w.default,NumberControllerSlider:E.default,FunctionController:A.default,ColorController:k.default},dom:{dom:T.default},gui:{GUI:L.default},GUI:L.default},e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t,n){Object.defineProperty(e,t,{get:function(){return"RGB"===this.__state.space?this.__state[t]:(h.recalculateRGB(this,t,n),this.__state[t])},set:function(e){"RGB"!==this.__state.space&&(h.recalculateRGB(this,t,n),this.__state.space="RGB"),this.__state[t]=e}})}function a(e,t){Object.defineProperty(e,t,{get:function(){return"HSV"===this.__state.space?this.__state[t]:(h.recalculateHSV(this),this.__state[t])},set:function(e){"HSV"!==this.__state.space&&(h.recalculateHSV(this),this.__state.space="HSV"),this.__state[t]=e}})}t.__esModule=!0;var l=n(3),s=o(l),u=n(6),d=o(u),c=n(4),f=o(c),_=n(5),p=o(_),h=function(){function e(){if(i(this,e),this.__state=s.default.apply(this,arguments),this.__state===!1)throw new Error("Failed to interpret color arguments");this.__state.a=this.__state.a||1}return e.prototype.toString=function(){return(0,f.default)(this)},e.prototype.toHexString=function(){return(0,f.default)(this,!0)},e.prototype.toOriginal=function(){return this.__state.conversion.write(this)},e}();h.recalculateRGB=function(e,t,n){if("HEX"===e.__state.space)e.__state[t]=d.default.component_from_hex(e.__state.hex,n);else{if("HSV"!==e.__state.space)throw new Error("Corrupted color state");p.default.extend(e.__state,d.default.hsv_to_rgb(e.__state.h,e.__state.s,e.__state.v))}},h.recalculateHSV=function(e){var t=d.default.rgb_to_hsv(e.r,e.g,e.b);p.default.extend(e.__state,{s:t.s,v:t.v}),p.default.isNaN(t.h)?p.default.isUndefined(e.__state.h)&&(e.__state.h=0):e.__state.h=t.h},h.COMPONENTS=["r","g","b","h","s","v","hex","a"],r(h.prototype,"r",2),r(h.prototype,"g",1),r(h.prototype,"b",0),a(h.prototype,"h"),a(h.prototype,"s"),a(h.prototype,"v"),Object.defineProperty(h.prototype,"a",{get:function(){return this.__state.a},set:function(e){this.__state.a=e}}),Object.defineProperty(h.prototype,"hex",{get:function(){return"HEX"!==!this.__state.space&&(this.__state.hex=d.default.rgb_to_hex(this.r,this.g,this.b)),this.__state.hex},set:function(e){this.__state.space="HEX",this.__state.hex=e}}),t.default=h,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(4),r=o(i),a=n(5),l=o(a),s=[{litmus:l.default.isString,conversions:{THREE_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9])([A-F0-9])([A-F0-9])$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString()+t[1].toString()+t[2].toString()+t[2].toString()+t[3].toString()+t[3].toString(),0)}},write:r.default},SIX_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9]{6})$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString(),0)}},write:r.default},CSS_RGB:{read:function(e){var t=e.match(/^rgb\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3])}},write:r.default},CSS_RGBA:{read:function(e){var t=e.match(/^rgba\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3]),a:parseFloat(t[4])}},write:r.default}}},{litmus:l.default.isNumber,conversions:{HEX:{read:function(e){return{space:"HEX",hex:e,conversionName:"HEX"}},write:function(e){return e.hex}}}},{litmus:l.default.isArray,conversions:{RGB_ARRAY:{read:function(e){return 3===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2]}},write:function(e){return[e.r,e.g,e.b]}},RGBA_ARRAY:{read:function(e){return 4===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2],a:e[3]}},write:function(e){return[e.r,e.g,e.b,e.a]}}}},{litmus:l.default.isObject,conversions:{RGBA_OBJ:{read:function(e){return!!(l.default.isNumber(e.r)&&l.default.isNumber(e.g)&&l.default.isNumber(e.b)&&l.default.isNumber(e.a))&&{space:"RGB",r:e.r,g:e.g,b:e.b,a:e.a}},write:function(e){return{r:e.r,g:e.g,b:e.b,a:e.a}}},RGB_OBJ:{read:function(e){return!!(l.default.isNumber(e.r)&&l.default.isNumber(e.g)&&l.default.isNumber(e.b))&&{space:"RGB",r:e.r,g:e.g,b:e.b}},write:function(e){return{r:e.r,g:e.g,b:e.b}}},HSVA_OBJ:{read:function(e){return!!(l.default.isNumber(e.h)&&l.default.isNumber(e.s)&&l.default.isNumber(e.v)&&l.default.isNumber(e.a))&&{space:"HSV",h:e.h,s:e.s,v:e.v,a:e.a}},write:function(e){return{h:e.h,s:e.s,v:e.v,a:e.a}}},HSV_OBJ:{read:function(e){return!!(l.default.isNumber(e.h)&&l.default.isNumber(e.s)&&l.default.isNumber(e.v))&&{space:"HSV",h:e.h,s:e.s,v:e.v}},write:function(e){return{h:e.h,s:e.s,v:e.v}}}}}],u=void 0,d=void 0,c=function(){d=!1;var e=arguments.length>1?l.default.toArray(arguments):arguments[0];return l.default.each(s,function(t){if(t.litmus(e))return l.default.each(t.conversions,function(t,n){if(u=t.read(e),d===!1&&u!==!1)return d=u,u.conversionName=n,u.conversion=t,l.default.BREAK}),l.default.BREAK}),d};t.default=c,e.exports=t.default},function(e,t){"use strict";t.__esModule=!0,t.default=function(e,t){var n=e.__state.conversionName.toString(),o=Math.round(e.r),i=Math.round(e.g),r=Math.round(e.b),a=e.a,l=Math.round(e.h),s=e.s.toFixed(1),u=e.v.toFixed(1);if(t||"THREE_CHAR_HEX"===n||"SIX_CHAR_HEX"===n){for(var d=e.hex.toString(16);d.length<6;)d="0"+d;return"#"+d}return"CSS_RGB"===n?"rgb("+o+","+i+","+r+")":"CSS_RGBA"===n?"rgba("+o+","+i+","+r+","+a+")":"HEX"===n?"0x"+e.hex.toString(16):"RGB_ARRAY"===n?"["+o+","+i+","+r+"]":"RGBA_ARRAY"===n?"["+o+","+i+","+r+","+a+"]":"RGB_OBJ"===n?"{r:"+o+",g:"+i+",b:"+r+"}":"RGBA_OBJ"===n?"{r:"+o+",g:"+i+",b:"+r+",a:"+a+"}":"HSV_OBJ"===n?"{h:"+l+",s:"+s+",v:"+u+"}":"HSVA_OBJ"===n?"{h:"+l+",s:"+s+",v:"+u+",a:"+a+"}":"unknown format"},e.exports=t.default},function(e,t){"use strict";t.__esModule=!0;var n=Array.prototype.forEach,o=Array.prototype.slice,i={BREAK:{},extend:function(e){return this.each(o.call(arguments,1),function(t){var n=this.isObject(t)?Object.keys(t):[];n.forEach(function(n){this.isUndefined(t[n])||(e[n]=t[n])}.bind(this))},this),e},defaults:function(e){return this.each(o.call(arguments,1),function(t){var n=this.isObject(t)?Object.keys(t):[];n.forEach(function(n){this.isUndefined(e[n])&&(e[n]=t[n])}.bind(this))},this),e},compose:function(){var e=o.call(arguments);return function(){for(var t=o.call(arguments),n=e.length-1;n>=0;n--)t=[e[n].apply(this,t)];return t[0]}},each:function(e,t,o){if(e)if(n&&e.forEach&&e.forEach===n)e.forEach(t,o);else if(e.length===e.length+0){var i=void 0,r=void 0;for(i=0,r=e.length;i<r;i++)if(i in e&&t.call(o,e[i],i)===this.BREAK)return}else for(var a in e)if(t.call(o,e[a],a)===this.BREAK)return},defer:function(e){setTimeout(e,0)},debounce:function(e,t,n){var o=void 0;return function(){function i(){o=null,n||e.apply(r,a)}var r=this,a=arguments,l=n||!o;clearTimeout(o),o=setTimeout(i,t),l&&e.apply(r,a)}},toArray:function(e){return e.toArray?e.toArray():o.call(e)},isUndefined:function(e){return void 0===e},isNull:function(e){return null===e},isNaN:function(e){function t(t){return e.apply(this,arguments)}return t.toString=function(){return e.toString()},t}(function(e){return isNaN(e)}),isArray:Array.isArray||function(e){return e.constructor===Array},isObject:function(e){return e===Object(e)},isNumber:function(e){return e===e+0},isString:function(e){return e===e+""},isBoolean:function(e){return e===!1||e===!0},isFunction:function(e){return"[object Function]"===Object.prototype.toString.call(e)}};t.default=i,e.exports=t.default},function(e,t){"use strict";t.__esModule=!0;var n=void 0,o={hsv_to_rgb:function(e,t,n){var o=Math.floor(e/60)%6,i=e/60-Math.floor(e/60),r=n*(1-t),a=n*(1-i*t),l=n*(1-(1-i)*t),s=[[n,l,r],[a,n,r],[r,n,l],[r,a,n],[l,r,n],[n,r,a]][o];return{r:255*s[0],g:255*s[1],b:255*s[2]}},rgb_to_hsv:function(e,t,n){var o=Math.min(e,t,n),i=Math.max(e,t,n),r=i-o,a=void 0,l=void 0;return 0===i?{h:NaN,s:0,v:0}:(l=r/i,a=e===i?(t-n)/r:t===i?2+(n-e)/r:4+(e-t)/r,a/=6,a<0&&(a+=1),{h:360*a,s:l,v:i/255})},rgb_to_hex:function(e,t,n){var o=this.hex_with_component(0,2,e);return o=this.hex_with_component(o,1,t),o=this.hex_with_component(o,0,n)},component_from_hex:function(e,t){return e>>8*t&255},hex_with_component:function(e,t,o){return o<<(n=8*t)|e&~(255<<n)}};t.default=o,e.exports=t.default},function(e,t){"use strict";function n(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var o=function(){function e(t,o){n(this,e),this.initialValue=t[o],this.domElement=document.createElement("div"),this.object=t,this.property=o,this.__onChange=void 0,this.__onFinishChange=void 0}return e.prototype.onChange=function(e){return this.__onChange=e,this},e.prototype.onFinishChange=function(e){return this.__onFinishChange=e,this},e.prototype.setValue=function(e){return this.object[this.property]=e,this.__onChange&&this.__onChange.call(this,e),this.updateDisplay(),this},e.prototype.getValue=function(){return this.object[this.property]},e.prototype.updateDisplay=function(){return this},e.prototype.isModified=function(){return this.initialValue!==this.getValue()},e}();t.default=o,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o){function a(){s.setValue(!s.__prev)}i(this,t);var l=r(this,e.call(this,n,o)),s=l;return l.__prev=l.getValue(),l.__checkbox=document.createElement("input"),l.__checkbox.setAttribute("type","checkbox"),d.default.bind(l.__checkbox,"change",a,!1),l.domElement.appendChild(l.__checkbox),l.updateDisplay(),l}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),this.__prev=this.getValue(),n},t.prototype.updateDisplay=function(){return this.getValue()===!0?(this.__checkbox.setAttribute("checked","checked"),this.__checkbox.checked=!0,this.__prev=!0):(this.__checkbox.checked=!1,this.__prev=!1),e.prototype.updateDisplay.call(this)},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e){if("0"===e||a.default.isUndefined(e))return 0;var t=e.match(u);return a.default.isNull(t)?0:parseFloat(t[1])}t.__esModule=!0;var r=n(5),a=o(r),l={HTMLEvents:["change"],MouseEvents:["click","mousemove","mousedown","mouseup","mouseover"],KeyboardEvents:["keydown"]},s={};a.default.each(l,function(e,t){a.default.each(e,function(e){s[e]=t})});var u=/(\d+(\.\d+)?)px/,d={makeSelectable:function(e,t){void 0!==e&&void 0!==e.style&&(e.onselectstart=t?function(){return!1}:function(){},e.style.MozUserSelect=t?"auto":"none",e.style.KhtmlUserSelect=t?"auto":"none",e.unselectable=t?"on":"off")},makeFullscreen:function(e,t,n){var o=n,i=t;a.default.isUndefined(i)&&(i=!0),a.default.isUndefined(o)&&(o=!0),e.style.position="absolute",i&&(e.style.left=0,e.style.right=0),o&&(e.style.top=0,e.style.bottom=0)},fakeEvent:function(e,t,n,o){var i=n||{},r=s[t];if(!r)throw new Error("Event type "+t+" not supported.");var l=document.createEvent(r);switch(r){case"MouseEvents":var u=i.x||i.clientX||0,d=i.y||i.clientY||0;l.initMouseEvent(t,i.bubbles||!1,i.cancelable||!0,window,i.clickCount||1,0,0,u,d,!1,!1,!1,!1,0,null);break;case"KeyboardEvents":var c=l.initKeyboardEvent||l.initKeyEvent;a.default.defaults(i,{cancelable:!0,ctrlKey:!1,altKey:!1,shiftKey:!1,metaKey:!1,keyCode:void 0,charCode:void 0}),c(t,i.bubbles||!1,i.cancelable,window,i.ctrlKey,i.altKey,i.shiftKey,i.metaKey,i.keyCode,i.charCode);break;default:l.initEvent(t,i.bubbles||!1,i.cancelable||!0)}a.default.defaults(l,o),e.dispatchEvent(l)},bind:function(e,t,n,o){var i=o||!1;return e.addEventListener?e.addEventListener(t,n,i):e.attachEvent&&e.attachEvent("on"+t,n),d},unbind:function(e,t,n,o){var i=o||!1;return e.removeEventListener?e.removeEventListener(t,n,i):e.detachEvent&&e.detachEvent("on"+t,n),d},addClass:function(e,t){if(void 0===e.className)e.className=t;else if(e.className!==t){var n=e.className.split(/ +/);n.indexOf(t)===-1&&(n.push(t),e.className=n.join(" ").replace(/^\s+/,"").replace(/\s+$/,""))}return d},removeClass:function(e,t){if(t)if(e.className===t)e.removeAttribute("class");else{var n=e.className.split(/ +/),o=n.indexOf(t);o!==-1&&(n.splice(o,1),e.className=n.join(" "))}else e.className=void 0;return d},hasClass:function(e,t){return new RegExp("(?:^|\\s+)"+t+"(?:\\s+|$)").test(e.className)||!1},getWidth:function(e){var t=getComputedStyle(e);return i(t["border-left-width"])+i(t["border-right-width"])+i(t["padding-left"])+i(t["padding-right"])+i(t.width)},getHeight:function(e){var t=getComputedStyle(e);return i(t["border-top-width"])+i(t["border-bottom-width"])+i(t["padding-top"])+i(t["padding-bottom"])+i(t.height)},getOffset:function(e){var t=e,n={left:0,top:0};if(t.offsetParent)do n.left+=t.offsetLeft,n.top+=t.offsetTop,t=t.offsetParent;while(t);return n},isActive:function(e){return e===document.activeElement&&(e.type||e.href)}};t.default=d,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=n(5),f=o(c),_=function(e){function t(n,o,a){i(this,t);var l=r(this,e.call(this,n,o)),s=a,u=l;if(l.__select=document.createElement("select"),f.default.isArray(s)){var c={};f.default.each(s,function(e){c[e]=e}),s=c}return f.default.each(s,function(e,t){var n=document.createElement("option");n.innerHTML=t,n.setAttribute("value",e),u.__select.appendChild(n)}),l.updateDisplay(),d.default.bind(l.__select,"change",function(){var e=this.options[this.selectedIndex].value;u.setValue(e)}),l.domElement.appendChild(l.__select),l}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),n},t.prototype.updateDisplay=function(){return d.default.isActive(this.__select)?this:(this.__select.value=this.getValue(),e.prototype.updateDisplay.call(this))},t}(s.default);t.default=_,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o){function a(){u.setValue(u.__input.value)}function l(){u.__onFinishChange&&u.__onFinishChange.call(u,u.getValue())}i(this,t);var s=r(this,e.call(this,n,o)),u=s;return s.__input=document.createElement("input"),s.__input.setAttribute("type","text"),d.default.bind(s.__input,"keyup",a),d.default.bind(s.__input,"change",a),d.default.bind(s.__input,"blur",l),d.default.bind(s.__input,"keydown",function(e){13===e.keyCode&&this.blur()}),s.updateDisplay(),s.domElement.appendChild(s.__input),s}return a(t,e),t.prototype.updateDisplay=function(){return d.default.isActive(this.__input)||(this.__input.value=this.getValue()),e.prototype.updateDisplay.call(this)},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e){var t=e.toString();return t.indexOf(".")>-1?t.length-t.indexOf(".")-1:0}t.__esModule=!0;var s=n(7),u=o(s),d=n(5),c=o(d),f=function(e){function t(n,o,a){i(this,t);var s=r(this,e.call(this,n,o)),u=a||{};return s.__min=u.min,s.__max=u.max,s.__step=u.step,c.default.isUndefined(s.__step)?0===s.initialValue?s.__impliedStep=1:s.__impliedStep=Math.pow(10,Math.floor(Math.log(Math.abs(s.initialValue))/Math.LN10))/10:s.__impliedStep=s.__step,s.__precision=l(s.__impliedStep),s}return a(t,e),t.prototype.setValue=function(t){var n=t;return void 0!==this.__min&&n<this.__min?n=this.__min:void 0!==this.__max&&n>this.__max&&(n=this.__max),void 0!==this.__step&&n%this.__step!==0&&(n=Math.round(n/this.__step)*this.__step),e.prototype.setValue.call(this,n)},t.prototype.min=function(e){return this.__min=e,this},t.prototype.max=function(e){return this.__max=e,this},t.prototype.step=function(e){return this.__step=e,this.__impliedStep=e,this.__precision=l(e),this},t}(u.default);t.default=f,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t){var n=Math.pow(10,t);return Math.round(e*n)/n}t.__esModule=!0;var s=n(12),u=o(s),d=n(9),c=o(d),f=n(5),_=o(f),p=function(e){function t(n,o,a){function l(){var e=parseFloat(m.__input.value);_.default.isNaN(e)||m.setValue(e)}function s(){m.__onFinishChange&&m.__onFinishChange.call(m,m.getValue())}function u(){s()}function d(e){var t=b-e.clientY;m.setValue(m.getValue()+t*m.__impliedStep),b=e.clientY}function f(){c.default.unbind(window,"mousemove",d),c.default.unbind(window,"mouseup",f),s()}function p(e){c.default.bind(window,"mousemove",d),c.default.bind(window,"mouseup",f),b=e.clientY}i(this,t);var h=r(this,e.call(this,n,o,a));h.__truncationSuspended=!1;var m=h,b=void 0;return h.__input=document.createElement("input"),h.__input.setAttribute("type","text"),c.default.bind(h.__input,"change",l),c.default.bind(h.__input,"blur",u),c.default.bind(h.__input,"mousedown",p),c.default.bind(h.__input,"keydown",function(e){13===e.keyCode&&(m.__truncationSuspended=!0,this.blur(),m.__truncationSuspended=!1,s())}),h.updateDisplay(),h.domElement.appendChild(h.__input),h}return a(t,e),t.prototype.updateDisplay=function(){return this.__input.value=this.__truncationSuspended?this.getValue():l(this.getValue(),this.__precision),e.prototype.updateDisplay.call(this)},t}(u.default);t.default=p,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t,n,o,i){return o+(i-o)*((e-t)/(n-t))}t.__esModule=!0;var s=n(12),u=o(s),d=n(9),c=o(d),f=function(e){function t(n,o,a,s,u){function d(e){document.activeElement.blur(),c.default.bind(window,"mousemove",f),c.default.bind(window,"mouseup",_),f(e)}function f(e){e.preventDefault();var t=h.__background.getBoundingClientRect();return h.setValue(l(e.clientX,t.left,t.right,h.__min,h.__max)),!1}function _(){c.default.unbind(window,"mousemove",f),c.default.unbind(window,"mouseup",_),h.__onFinishChange&&h.__onFinishChange.call(h,h.getValue())}i(this,t);var p=r(this,e.call(this,n,o,{min:a,max:s,step:u})),h=p;return p.__background=document.createElement("div"),p.__foreground=document.createElement("div"),c.default.bind(p.__background,"mousedown",d),c.default.addClass(p.__background,"slider"),c.default.addClass(p.__foreground,"slider-fg"),p.updateDisplay(),p.__background.appendChild(p.__foreground),p.domElement.appendChild(p.__background),p}return a(t,e),t.prototype.updateDisplay=function(){var t=(this.getValue()-this.__min)/(this.__max-this.__min);return this.__foreground.style.width=100*t+"%",e.prototype.updateDisplay.call(this)},t}(u.default);t.default=f,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o,a){i(this,t);var l=r(this,e.call(this,n,o)),s=l;return l.__button=document.createElement("div"),l.__button.innerHTML=void 0===a?"Fire":a,d.default.bind(l.__button,"click",function(e){return e.preventDefault(),s.fire(),!1}),d.default.addClass(l.__button,"button"),l.domElement.appendChild(l.__button),l}return a(t,e),t.prototype.fire=function(){this.__onChange&&this.__onChange.call(this),this.getValue().call(this.object),this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue())},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t,n,o){e.style.background="",g.default.each(y,function(i){e.style.cssText+="background: "+i+"linear-gradient("+t+", "+n+" 0%, "+o+" 100%); "})}function s(e){e.style.background="",e.style.cssText+="background: -moz-linear-gradient(top, #ff0000 0%, #ff00ff 17%, #0000ff 34%, #00ffff 50%, #00ff00 67%, #ffff00 84%, #ff0000 100%);",e.style.cssText+="background: -webkit-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -o-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -ms-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);"}t.__esModule=!0;var u=n(7),d=o(u),c=n(9),f=o(c),_=n(2),p=o(_),h=n(3),m=o(h),b=n(5),g=o(b),v=function(e){function t(n,o){function a(e){h(e),f.default.bind(window,"mousemove",h),f.default.bind(window,"mouseup",u)}function u(){f.default.unbind(window,"mousemove",h),f.default.unbind(window,"mouseup",u),_()}function d(){var e=(0,m.default)(this.value);e!==!1?(y.__color.__state=e,y.setValue(y.__color.toOriginal())):this.value=y.__color.toString()}function c(){f.default.unbind(window,"mousemove",b),f.default.unbind(window,"mouseup",c),_()}function _(){y.__onFinishChange&&y.__onFinishChange.call(y,y.__color.toOriginal())}function h(e){e.preventDefault();var t=y.__saturation_field.getBoundingClientRect(),n=(e.clientX-t.left)/(t.right-t.left),o=1-(e.clientY-t.top)/(t.bottom-t.top);return o>1?o=1:o<0&&(o=0),n>1?n=1:n<0&&(n=0),y.__color.v=o,y.__color.s=n,y.setValue(y.__color.toOriginal()),!1}function b(e){e.preventDefault();var t=y.__hue_field.getBoundingClientRect(),n=1-(e.clientY-t.top)/(t.bottom-t.top);return n>1?n=1:n<0&&(n=0),y.__color.h=360*n,y.setValue(y.__color.toOriginal()),!1}i(this,t);var v=r(this,e.call(this,n,o));v.__color=new p.default(v.getValue()),v.__temp=new p.default(0);var y=v;v.domElement=document.createElement("div"),f.default.makeSelectable(v.domElement,!1),v.__selector=document.createElement("div"),v.__selector.className="selector",v.__saturation_field=document.createElement("div"),v.__saturation_field.className="saturation-field",v.__field_knob=document.createElement("div"),v.__field_knob.className="field-knob",v.__field_knob_border="2px solid ",v.__hue_knob=document.createElement("div"),v.__hue_knob.className="hue-knob",v.__hue_field=document.createElement("div"),v.__hue_field.className="hue-field",v.__input=document.createElement("input"),v.__input.type="text",v.__input_textShadow="0 1px 1px ",f.default.bind(v.__input,"keydown",function(e){13===e.keyCode&&d.call(this)}),f.default.bind(v.__input,"blur",d),f.default.bind(v.__selector,"mousedown",function(){f.default.addClass(this,"drag").bind(window,"mouseup",function(){f.default.removeClass(y.__selector,"drag")})});var w=document.createElement("div");return g.default.extend(v.__selector.style,{width:"122px",height:"102px",padding:"3px",backgroundColor:"#222",boxShadow:"0px 1px 3px rgba(0,0,0,0.3)"}),g.default.extend(v.__field_knob.style,{position:"absolute",width:"12px",height:"12px",border:v.__field_knob_border+(v.__color.v<.5?"#fff":"#000"),boxShadow:"0px 1px 3px rgba(0,0,0,0.5)",borderRadius:"12px",zIndex:1}),g.default.extend(v.__hue_knob.style,{position:"absolute",width:"15px",height:"2px",borderRight:"4px solid #fff",zIndex:1}),g.default.extend(v.__saturation_field.style,{width:"100px",height:"100px",border:"1px solid #555",marginRight:"3px",display:"inline-block",cursor:"pointer"}),g.default.extend(w.style,{width:"100%",height:"100%",background:"none"}),l(w,"top","rgba(0,0,0,0)","#000"),g.default.extend(v.__hue_field.style,{width:"15px",height:"100px",border:"1px solid #555",cursor:"ns-resize",position:"absolute",top:"3px",right:"3px"}),s(v.__hue_field),g.default.extend(v.__input.style,{outline:"none",textAlign:"center",color:"#fff",border:0,fontWeight:"bold",textShadow:v.__input_textShadow+"rgba(0,0,0,0.7)"}),f.default.bind(v.__saturation_field,"mousedown",a),f.default.bind(v.__field_knob,"mousedown",a),f.default.bind(v.__hue_field,"mousedown",function(e){b(e),f.default.bind(window,"mousemove",b),f.default.bind(window,"mouseup",c)}),v.__saturation_field.appendChild(w),v.__selector.appendChild(v.__field_knob),v.__selector.appendChild(v.__saturation_field),v.__selector.appendChild(v.__hue_field),v.__hue_field.appendChild(v.__hue_knob),v.domElement.appendChild(v.__input),v.domElement.appendChild(v.__selector),v.updateDisplay(),v}return a(t,e),t.prototype.updateDisplay=function(){var e=(0,m.default)(this.getValue());if(e!==!1){var t=!1;g.default.each(p.default.COMPONENTS,function(n){if(!g.default.isUndefined(e[n])&&!g.default.isUndefined(this.__color.__state[n])&&e[n]!==this.__color.__state[n])return t=!0,{}},this),t&&g.default.extend(this.__color.__state,e)}g.default.extend(this.__temp.__state,this.__color.__state),this.__temp.a=1;var n=this.__color.v<.5||this.__color.s>.5?255:0,o=255-n;g.default.extend(this.__field_knob.style,{marginLeft:100*this.__color.s-7+"px",marginTop:100*(1-this.__color.v)-7+"px",backgroundColor:this.__temp.toHexString(),border:this.__field_knob_border+"rgb("+n+","+n+","+n+")"}),this.__hue_knob.style.marginTop=100*(1-this.__color.h/360)+"px",this.__temp.s=1,this.__temp.v=1,l(this.__saturation_field,"left","#fff",this.__temp.toHexString()),this.__input.value=this.__color.toString(),g.default.extend(this.__input.style,{backgroundColor:this.__color.toHexString(),color:"rgb("+n+","+n+","+n+")",textShadow:this.__input_textShadow+"rgba("+o+","+o+","+o+",.7)"})},t}(d.default),y=["-moz-","-o-","-webkit-","-ms-",""];t.default=v,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t,n){var o=document.createElement("li");return t&&o.appendChild(t),n?e.__ul.insertBefore(o,n):e.__ul.appendChild(o),e.onResize(),o}function r(e,t){var n=e.__preset_select[e.__preset_select.selectedIndex];t?n.innerHTML=n.value+"*":n.innerHTML=n.value}function a(e,t,n){if(n.__li=t,n.__gui=e,U.default.extend(n,{options:function(t){if(arguments.length>1){var o=n.__li.nextElementSibling;return n.remove(),s(e,n.object,n.property,{before:o,factoryArgs:[U.default.toArray(arguments)]})}if(U.default.isArray(t)||U.default.isObject(t)){var i=n.__li.nextElementSibling;return n.remove(),s(e,n.object,n.property,{before:i,factoryArgs:[t]})}},name:function(e){return n.__li.firstElementChild.firstElementChild.innerHTML=e,n},listen:function(){return n.__gui.listen(n),n},remove:function(){return n.__gui.remove(n),n}}),n instanceof N.default){var o=new B.default(n.object,n.property,{ min:n.__min,max:n.__max,step:n.__step});U.default.each(["updateDisplay","onChange","onFinishChange","step"],function(e){var t=n[e],i=o[e];n[e]=o[e]=function(){var e=Array.prototype.slice.call(arguments);return i.apply(o,e),t.apply(n,e)}}),z.default.addClass(t,"has-slider"),n.domElement.insertBefore(o.domElement,n.domElement.firstElementChild)}else if(n instanceof B.default){var i=function(t){if(U.default.isNumber(n.__min)&&U.default.isNumber(n.__max)){var o=n.__li.firstElementChild.firstElementChild.innerHTML,i=n.__gui.__listening.indexOf(n)>-1;n.remove();var r=s(e,n.object,n.property,{before:n.__li.nextElementSibling,factoryArgs:[n.__min,n.__max,n.__step]});return r.name(o),i&&r.listen(),r}return t};n.min=U.default.compose(i,n.min),n.max=U.default.compose(i,n.max)}else n instanceof O.default?(z.default.bind(t,"click",function(){z.default.fakeEvent(n.__checkbox,"click")}),z.default.bind(n.__checkbox,"click",function(e){e.stopPropagation()})):n instanceof R.default?(z.default.bind(t,"click",function(){z.default.fakeEvent(n.__button,"click")}),z.default.bind(t,"mouseover",function(){z.default.addClass(n.__button,"hover")}),z.default.bind(t,"mouseout",function(){z.default.removeClass(n.__button,"hover")})):n instanceof j.default&&(z.default.addClass(t,"color"),n.updateDisplay=U.default.compose(function(e){return t.style.borderLeftColor=n.__color.toString(),e},n.updateDisplay),n.updateDisplay());n.setValue=U.default.compose(function(t){return e.getRoot().__preset_select&&n.isModified()&&r(e.getRoot(),!0),t},n.setValue)}function l(e,t){var n=e.getRoot(),o=n.__rememberedObjects.indexOf(t.object);if(o!==-1){var i=n.__rememberedObjectIndecesToControllers[o];if(void 0===i&&(i={},n.__rememberedObjectIndecesToControllers[o]=i),i[t.property]=t,n.load&&n.load.remembered){var r=n.load.remembered,a=void 0;if(r[e.preset])a=r[e.preset];else{if(!r[Q])return;a=r[Q]}if(a[o]&&void 0!==a[o][t.property]){var l=a[o][t.property];t.initialValue=l,t.setValue(l)}}}}function s(e,t,n,o){if(void 0===t[n])throw new Error('Object "'+t+'" has no property "'+n+'"');var r=void 0;if(o.color)r=new j.default(t,n);else{var s=[t,n].concat(o.factoryArgs);r=C.default.apply(e,s)}o.before instanceof S.default&&(o.before=o.before.__li),l(e,r),z.default.addClass(r.domElement,"c");var u=document.createElement("span");z.default.addClass(u,"property-name"),u.innerHTML=r.property;var d=document.createElement("div");d.appendChild(u),d.appendChild(r.domElement);var c=i(e,d,o.before);return z.default.addClass(c,oe.CLASS_CONTROLLER_ROW),r instanceof j.default?z.default.addClass(c,"color"):z.default.addClass(c,g(r.getValue())),a(e,c,r),e.__controllers.push(r),r}function u(e,t){return document.location.href+"."+t}function d(e,t,n){var o=document.createElement("option");o.innerHTML=t,o.value=t,e.__preset_select.appendChild(o),n&&(e.__preset_select.selectedIndex=e.__preset_select.length-1)}function c(e,t){t.style.display=e.useLocalStorage?"block":"none"}function f(e){var t=e.__save_row=document.createElement("li");z.default.addClass(e.domElement,"has-save"),e.__ul.insertBefore(t,e.__ul.firstChild),z.default.addClass(t,"save-row");var n=document.createElement("span");n.innerHTML="&nbsp;",z.default.addClass(n,"button gears");var o=document.createElement("span");o.innerHTML="Save",z.default.addClass(o,"button"),z.default.addClass(o,"save");var i=document.createElement("span");i.innerHTML="New",z.default.addClass(i,"button"),z.default.addClass(i,"save-as");var r=document.createElement("span");r.innerHTML="Revert",z.default.addClass(r,"button"),z.default.addClass(r,"revert");var a=e.__preset_select=document.createElement("select");if(e.load&&e.load.remembered?U.default.each(e.load.remembered,function(t,n){d(e,n,n===e.preset)}):d(e,Q,!1),z.default.bind(a,"change",function(){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].innerHTML=e.__preset_select[t].value;e.preset=this.value}),t.appendChild(a),t.appendChild(n),t.appendChild(o),t.appendChild(i),t.appendChild(r),q){var l=document.getElementById("dg-local-explain"),s=document.getElementById("dg-local-storage"),f=document.getElementById("dg-save-locally");f.style.display="block","true"===localStorage.getItem(u(e,"isLocal"))&&s.setAttribute("checked","checked"),c(e,l),z.default.bind(s,"change",function(){e.useLocalStorage=!e.useLocalStorage,c(e,l)})}var _=document.getElementById("dg-new-constructor");z.default.bind(_,"keydown",function(e){!e.metaKey||67!==e.which&&67!==e.keyCode||Z.hide()}),z.default.bind(n,"click",function(){_.innerHTML=JSON.stringify(e.getSaveObject(),void 0,2),Z.show(),_.focus(),_.select()}),z.default.bind(o,"click",function(){e.save()}),z.default.bind(i,"click",function(){var t=prompt("Enter a new preset name.");t&&e.saveAs(t)}),z.default.bind(r,"click",function(){e.revert()})}function _(e){function t(t){return t.preventDefault(),e.width+=i-t.clientX,e.onResize(),i=t.clientX,!1}function n(){z.default.removeClass(e.__closeButton,oe.CLASS_DRAG),z.default.unbind(window,"mousemove",t),z.default.unbind(window,"mouseup",n)}function o(o){return o.preventDefault(),i=o.clientX,z.default.addClass(e.__closeButton,oe.CLASS_DRAG),z.default.bind(window,"mousemove",t),z.default.bind(window,"mouseup",n),!1}var i=void 0;e.__resize_handle=document.createElement("div"),U.default.extend(e.__resize_handle.style,{width:"6px",marginLeft:"-3px",height:"200px",cursor:"ew-resize",position:"absolute"}),z.default.bind(e.__resize_handle,"mousedown",o),z.default.bind(e.__closeButton,"mousedown",o),e.domElement.insertBefore(e.__resize_handle,e.domElement.firstElementChild)}function p(e,t){e.domElement.style.width=t+"px",e.__save_row&&e.autoPlace&&(e.__save_row.style.width=t+"px"),e.__closeButton&&(e.__closeButton.style.width=t+"px")}function h(e,t){var n={};return U.default.each(e.__rememberedObjects,function(o,i){var r={},a=e.__rememberedObjectIndecesToControllers[i];U.default.each(a,function(e,n){r[n]=t?e.initialValue:e.getValue()}),n[i]=r}),n}function m(e){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].value===e.preset&&(e.__preset_select.selectedIndex=t)}function b(e){0!==e.length&&D.default.call(window,function(){b(e)}),U.default.each(e,function(e){e.updateDisplay()})}t.__esModule=!0;var g="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},v=n(18),y=o(v),w=n(19),x=o(w),E=n(20),C=o(E),A=n(7),S=o(A),k=n(8),O=o(k),T=n(15),R=o(T),L=n(13),B=o(L),M=n(14),N=o(M),H=n(16),j=o(H),P=n(21),D=o(P),V=n(22),F=o(V),I=n(9),z=o(I),G=n(5),U=o(G),X=n(23),K=o(X);y.default.inject(K.default);var Y="dg",J=72,W=20,Q="Default",q=function(){try{return"localStorage"in window&&null!==window.localStorage}catch(e){return!1}}(),Z=void 0,$=!0,ee=void 0,te=!1,ne=[],oe=function e(t){function n(){var e=o.getRoot();e.width+=1,U.default.defer(function(){e.width-=1})}var o=this,r=t||{};this.domElement=document.createElement("div"),this.__ul=document.createElement("ul"),this.domElement.appendChild(this.__ul),z.default.addClass(this.domElement,Y),this.__folders={},this.__controllers=[],this.__rememberedObjects=[],this.__rememberedObjectIndecesToControllers=[],this.__listening=[],r=U.default.defaults(r,{closeOnTop:!1,autoPlace:!0,width:e.DEFAULT_WIDTH}),r=U.default.defaults(r,{resizable:r.autoPlace,hideable:r.autoPlace}),U.default.isUndefined(r.load)?r.load={preset:Q}:r.preset&&(r.load.preset=r.preset),U.default.isUndefined(r.parent)&&r.hideable&&ne.push(this),r.resizable=U.default.isUndefined(r.parent)&&r.resizable,r.autoPlace&&U.default.isUndefined(r.scrollable)&&(r.scrollable=!0);var a=q&&"true"===localStorage.getItem(u(this,"isLocal")),l=void 0;if(Object.defineProperties(this,{parent:{get:function(){return r.parent}},scrollable:{get:function(){return r.scrollable}},autoPlace:{get:function(){return r.autoPlace}},closeOnTop:{get:function(){return r.closeOnTop}},preset:{get:function(){return o.parent?o.getRoot().preset:r.load.preset},set:function(e){o.parent?o.getRoot().preset=e:r.load.preset=e,m(this),o.revert()}},width:{get:function(){return r.width},set:function(e){r.width=e,p(o,e)}},name:{get:function(){return r.name},set:function(e){r.name=e,titleRowName&&(titleRowName.innerHTML=r.name)}},closed:{get:function(){return r.closed},set:function(t){r.closed=t,r.closed?z.default.addClass(o.__ul,e.CLASS_CLOSED):z.default.removeClass(o.__ul,e.CLASS_CLOSED),this.onResize(),o.__closeButton&&(o.__closeButton.innerHTML=t?e.TEXT_OPEN:e.TEXT_CLOSED)}},load:{get:function(){return r.load}},useLocalStorage:{get:function(){return a},set:function(e){q&&(a=e,e?z.default.bind(window,"unload",l):z.default.unbind(window,"unload",l),localStorage.setItem(u(o,"isLocal"),e))}}}),U.default.isUndefined(r.parent)){if(r.closed=!1,z.default.addClass(this.domElement,e.CLASS_MAIN),z.default.makeSelectable(this.domElement,!1),q&&a){o.useLocalStorage=!0;var s=localStorage.getItem(u(this,"gui"));s&&(r.load=JSON.parse(s))}this.__closeButton=document.createElement("div"),this.__closeButton.innerHTML=e.TEXT_CLOSED,z.default.addClass(this.__closeButton,e.CLASS_CLOSE_BUTTON),r.closeOnTop?(z.default.addClass(this.__closeButton,e.CLASS_CLOSE_TOP),this.domElement.insertBefore(this.__closeButton,this.domElement.childNodes[0])):(z.default.addClass(this.__closeButton,e.CLASS_CLOSE_BOTTOM),this.domElement.appendChild(this.__closeButton)),z.default.bind(this.__closeButton,"click",function(){o.closed=!o.closed})}else{void 0===r.closed&&(r.closed=!0);var d=document.createTextNode(r.name);z.default.addClass(d,"controller-name");var c=i(o,d),f=function(e){return e.preventDefault(),o.closed=!o.closed,!1};z.default.addClass(this.__ul,e.CLASS_CLOSED),z.default.addClass(c,"title"),z.default.bind(c,"click",f),r.closed||(this.closed=!1)}r.autoPlace&&(U.default.isUndefined(r.parent)&&($&&(ee=document.createElement("div"),z.default.addClass(ee,Y),z.default.addClass(ee,e.CLASS_AUTO_PLACE_CONTAINER),document.body.appendChild(ee),$=!1),ee.appendChild(this.domElement),z.default.addClass(this.domElement,e.CLASS_AUTO_PLACE)),this.parent||p(o,r.width)),this.__resizeHandler=function(){o.onResizeDebounced()},z.default.bind(window,"resize",this.__resizeHandler),z.default.bind(this.__ul,"webkitTransitionEnd",this.__resizeHandler),z.default.bind(this.__ul,"transitionend",this.__resizeHandler),z.default.bind(this.__ul,"oTransitionEnd",this.__resizeHandler),this.onResize(),r.resizable&&_(this),l=function(){q&&"true"===localStorage.getItem(u(o,"isLocal"))&&localStorage.setItem(u(o,"gui"),JSON.stringify(o.getSaveObject()))},this.saveToLocalStorageIfPossible=l,r.parent||n()};oe.toggleHide=function(){te=!te,U.default.each(ne,function(e){e.domElement.style.display=te?"none":""})},oe.CLASS_AUTO_PLACE="a",oe.CLASS_AUTO_PLACE_CONTAINER="ac",oe.CLASS_MAIN="main",oe.CLASS_CONTROLLER_ROW="cr",oe.CLASS_TOO_TALL="taller-than-window",oe.CLASS_CLOSED="closed",oe.CLASS_CLOSE_BUTTON="close-button",oe.CLASS_CLOSE_TOP="close-top",oe.CLASS_CLOSE_BOTTOM="close-bottom",oe.CLASS_DRAG="drag",oe.DEFAULT_WIDTH=245,oe.TEXT_CLOSED="Close Controls",oe.TEXT_OPEN="Open Controls",oe._keydownHandler=function(e){"text"===document.activeElement.type||e.which!==J&&e.keyCode!==J||oe.toggleHide()},z.default.bind(window,"keydown",oe._keydownHandler,!1),U.default.extend(oe.prototype,{add:function(e,t){return s(this,e,t,{factoryArgs:Array.prototype.slice.call(arguments,2)})},addColor:function(e,t){return s(this,e,t,{color:!0})},remove:function(e){this.__ul.removeChild(e.__li),this.__controllers.splice(this.__controllers.indexOf(e),1);var t=this;U.default.defer(function(){t.onResize()})},destroy:function(){this.autoPlace&&ee.removeChild(this.domElement),z.default.unbind(window,"keydown",oe._keydownHandler,!1),z.default.unbind(window,"resize",this.__resizeHandler),this.saveToLocalStorageIfPossible&&z.default.unbind(window,"unload",this.saveToLocalStorageIfPossible)},addFolder:function(e){if(void 0!==this.__folders[e])throw new Error('You already have a folder in this GUI by the name "'+e+'"');var t={name:e,parent:this};t.autoPlace=this.autoPlace,this.load&&this.load.folders&&this.load.folders[e]&&(t.closed=this.load.folders[e].closed,t.load=this.load.folders[e]);var n=new oe(t);this.__folders[e]=n;var o=i(this,n.domElement);return z.default.addClass(o,"folder"),n},open:function(){this.closed=!1},close:function(){this.closed=!0},onResize:function(){var e=this.getRoot();if(e.scrollable){var t=z.default.getOffset(e.__ul).top,n=0;U.default.each(e.__ul.childNodes,function(t){e.autoPlace&&t===e.__save_row||(n+=z.default.getHeight(t))}),window.innerHeight-t-W<n?(z.default.addClass(e.domElement,oe.CLASS_TOO_TALL),e.__ul.style.height=window.innerHeight-t-W+"px"):(z.default.removeClass(e.domElement,oe.CLASS_TOO_TALL),e.__ul.style.height="auto")}e.__resize_handle&&U.default.defer(function(){e.__resize_handle.style.height=e.__ul.offsetHeight+"px"}),e.__closeButton&&(e.__closeButton.style.width=e.width+"px")},onResizeDebounced:U.default.debounce(function(){this.onResize()},50),remember:function(){if(U.default.isUndefined(Z)&&(Z=new F.default,Z.domElement.innerHTML=x.default),this.parent)throw new Error("You can only call remember on a top level GUI.");var e=this;U.default.each(Array.prototype.slice.call(arguments),function(t){0===e.__rememberedObjects.length&&f(e),e.__rememberedObjects.indexOf(t)===-1&&e.__rememberedObjects.push(t)}),this.autoPlace&&p(this,this.width)},getRoot:function(){for(var e=this;e.parent;)e=e.parent;return e},getSaveObject:function(){var e=this.load;return e.closed=this.closed,this.__rememberedObjects.length>0&&(e.preset=this.preset,e.remembered||(e.remembered={}),e.remembered[this.preset]=h(this)),e.folders={},U.default.each(this.__folders,function(t,n){e.folders[n]=t.getSaveObject()}),e},save:function(){this.load.remembered||(this.load.remembered={}),this.load.remembered[this.preset]=h(this),r(this,!1),this.saveToLocalStorageIfPossible()},saveAs:function(e){this.load.remembered||(this.load.remembered={},this.load.remembered[Q]=h(this,!0)),this.load.remembered[e]=h(this),this.preset=e,d(this,e,!0),this.saveToLocalStorageIfPossible()},revert:function(e){U.default.each(this.__controllers,function(t){this.getRoot().load.remembered?l(e||this.getRoot(),t):t.setValue(t.initialValue),t.__onFinishChange&&t.__onFinishChange.call(t,t.getValue())},this),U.default.each(this.__folders,function(e){e.revert(e)}),e||r(this.getRoot(),!1)},listen:function(e){var t=0===this.__listening.length;this.__listening.push(e),t&&b(this.__listening)},updateDisplay:function(){U.default.each(this.__controllers,function(e){e.updateDisplay()}),U.default.each(this.__folders,function(e){e.updateDisplay()})}}),t.default=oe,e.exports=t.default},function(e,t){"use strict";e.exports={load:function(e,t){var n=t||document,o=n.createElement("link");o.type="text/css",o.rel="stylesheet",o.href=e,n.getElementsByTagName("head")[0].appendChild(o)},inject:function(e,t){var n=t||document,o=document.createElement("style");o.type="text/css",o.innerHTML=e;var i=n.getElementsByTagName("head")[0];try{i.appendChild(o)}catch(e){}}}},function(e,t){e.exports="<div id=dg-save class=\"dg dialogue\"> Here's the new load parameter for your <code>GUI</code>'s constructor: <textarea id=dg-new-constructor></textarea> <div id=dg-save-locally> <input id=dg-local-storage type=checkbox /> Automatically save values to <code>localStorage</code> on exit. <div id=dg-local-explain>The values saved to <code>localStorage</code> will override those passed to <code>dat.GUI</code>'s constructor. This makes it easier to work incrementally, but <code>localStorage</code> is fragile, and your friends may not see the same values you do. </div> </div> </div>"},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(10),r=o(i),a=n(13),l=o(a),s=n(14),u=o(s),d=n(11),c=o(d),f=n(15),_=o(f),p=n(8),h=o(p),m=n(5),b=o(m),g=function(e,t){var n=e[t];return b.default.isArray(arguments[2])||b.default.isObject(arguments[2])?new r.default(e,t,arguments[2]):b.default.isNumber(n)?b.default.isNumber(arguments[2])&&b.default.isNumber(arguments[3])?b.default.isNumber(arguments[4])?new u.default(e,t,arguments[2],arguments[3],arguments[4]):new u.default(e,t,arguments[2],arguments[3]):b.default.isNumber(arguments[4])?new l.default(e,t,{min:arguments[2],max:arguments[3],step:arguments[4]}):new l.default(e,t,{min:arguments[2],max:arguments[3]}):b.default.isString(n)?new c.default(e,t):b.default.isFunction(n)?new _.default(e,t,""):b.default.isBoolean(n)?new h.default(e,t):null};t.default=g,e.exports=t.default},function(e,t){"use strict";function n(e){setTimeout(e,1e3/60)}t.__esModule=!0,t.default=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||n,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var r=n(9),a=o(r),l=n(5),s=o(l),u=function(){function e(){i(this,e),this.backgroundElement=document.createElement("div"),s.default.extend(this.backgroundElement.style,{backgroundColor:"rgba(0,0,0,0.8)",top:0,left:0,display:"none",zIndex:"1000",opacity:0,WebkitTransition:"opacity 0.2s linear",transition:"opacity 0.2s linear"}),a.default.makeFullscreen(this.backgroundElement),this.backgroundElement.style.position="fixed",this.domElement=document.createElement("div"),s.default.extend(this.domElement.style,{position:"fixed",display:"none",zIndex:"1001",opacity:0,WebkitTransition:"-webkit-transform 0.2s ease-out, opacity 0.2s linear",transition:"transform 0.2s ease-out, opacity 0.2s linear"}),document.body.appendChild(this.backgroundElement),document.body.appendChild(this.domElement);var t=this;a.default.bind(this.backgroundElement,"click",function(){t.hide()})}return e.prototype.show=function(){var e=this;this.backgroundElement.style.display="block",this.domElement.style.display="block",this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)",this.layout(),s.default.defer(function(){e.backgroundElement.style.opacity=1,e.domElement.style.opacity=1,e.domElement.style.webkitTransform="scale(1)"})},e.prototype.hide=function e(){var t=this,e=function e(){t.domElement.style.display="none",t.backgroundElement.style.display="none",a.default.unbind(t.domElement,"webkitTransitionEnd",e),a.default.unbind(t.domElement,"transitionend",e),a.default.unbind(t.domElement,"oTransitionEnd",e)};a.default.bind(this.domElement,"webkitTransitionEnd",e),a.default.bind(this.domElement,"transitionend",e),a.default.bind(this.domElement,"oTransitionEnd",e),this.backgroundElement.style.opacity=0,this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)"},e.prototype.layout=function(){this.domElement.style.left=window.innerWidth/2-a.default.getWidth(this.domElement)/2+"px",this.domElement.style.top=window.innerHeight/2-a.default.getHeight(this.domElement)/2+"px"},e}();t.default=u,e.exports=t.default},function(e,t,n){t=e.exports=n(24)(),t.push([e.id,".dg ul{list-style:none;margin:0;padding:0;width:100%;clear:both}.dg.ac{position:fixed;top:0;left:0;right:0;height:0;z-index:0}.dg:not(.ac) .main{overflow:hidden}.dg.main{transition:opacity .1s linear}.dg.main.taller-than-window{overflow-y:auto}.dg.main.taller-than-window .close-button{opacity:1;margin-top:-1px;border-top:1px solid #2c2c2c}.dg.main ul.closed .close-button{opacity:1!important}.dg.main .close-button.drag,.dg.main:hover .close-button{opacity:1}.dg.main .close-button{transition:opacity .1s linear;border:0;line-height:19px;height:20px;cursor:pointer;text-align:center;background-color:#000}.dg.main .close-button.close-top{position:relative}.dg.main .close-button.close-bottom{position:absolute}.dg.main .close-button:hover{background-color:#111}.dg.a{float:right;margin-right:15px;overflow-y:visible}.dg.a.has-save>ul.close-top{margin-top:0}.dg.a.has-save>ul.close-bottom{margin-top:27px}.dg.a.has-save>ul.closed{margin-top:0}.dg.a .save-row{top:0;z-index:1002}.dg.a .save-row.close-top{position:relative}.dg.a .save-row.close-bottom{position:fixed}.dg li{transition:height .1s ease-out;transition:overflow .1s linear}.dg li:not(.folder){cursor:auto;height:27px;line-height:27px;padding:0 4px 0 5px}.dg li.folder{padding:0;border-left:4px solid transparent}.dg li.title{margin-left:-4px}.dg .closed li:not(.title),.dg .closed ul li,.dg .closed ul li>*{height:0;overflow:hidden;border:0}.dg .cr{clear:both;padding-left:3px;height:27px;overflow:hidden}.dg .property-name{cursor:default;float:left;clear:left;width:40%;overflow:hidden;text-overflow:ellipsis}.dg .c{float:left;width:60%;position:relative}.dg .c input[type=text]{border:0;margin-top:4px;padding:3px;width:100%;float:right}.dg .has-slider input[type=text]{width:30%;margin-left:0}.dg .slider{float:left;width:66%;margin-left:-5px;margin-right:0;height:19px;margin-top:4px}.dg .slider-fg{height:100%}.dg .c input[type=checkbox]{margin-top:7px}.dg .c select{margin-top:5px}.dg .cr.boolean,.dg .cr.boolean *,.dg .cr.function,.dg .cr.function *,.dg .cr.function .property-name{cursor:pointer}.dg .cr.color{overflow:visible}.dg .selector{display:none;position:absolute;margin-left:-9px;margin-top:23px;z-index:10}.dg .c:hover .selector,.dg .selector.drag{display:block}.dg li.save-row{padding:0}.dg li.save-row .button{display:inline-block;padding:0 6px}.dg.dialogue{background-color:#222;width:460px;padding:15px;font-size:13px;line-height:15px}#dg-new-constructor{padding:10px;color:#222;font-family:Monaco,monospace;font-size:10px;border:0;resize:none;box-shadow:inset 1px 1px 1px #888;word-wrap:break-word;margin:12px 0;display:block;width:440px;overflow-y:scroll;height:100px;position:relative}#dg-local-explain{display:none;font-size:11px;line-height:17px;border-radius:3px;background-color:#333;padding:8px;margin-top:10px}#dg-local-explain code{font-size:10px}#dat-gui-save-locally{display:none}.dg{color:#eee;font:11px Lucida Grande,sans-serif;text-shadow:0 -1px 0 #111}.dg.main::-webkit-scrollbar{width:5px;background:#1a1a1a}.dg.main::-webkit-scrollbar-corner{height:0;display:none}.dg.main::-webkit-scrollbar-thumb{border-radius:5px;background:#676767}.dg li:not(.folder){background:#1a1a1a;border-bottom:1px solid #2c2c2c}.dg li.save-row{line-height:25px;background:#dad5cb;border:0}.dg li.save-row select{margin-left:5px;width:108px}.dg li.save-row .button{margin-left:5px;margin-top:1px;border-radius:2px;font-size:9px;line-height:7px;padding:4px 4px 5px;background:#c5bdad;color:#fff;text-shadow:0 1px 0 #b0a58f;box-shadow:0 -1px 0 #b0a58f;cursor:pointer}.dg li.save-row .button.gears{background:#c5bdad url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAANCAYAAAB/9ZQ7AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAQJJREFUeNpiYKAU/P//PwGIC/ApCABiBSAW+I8AClAcgKxQ4T9hoMAEUrxx2QSGN6+egDX+/vWT4e7N82AMYoPAx/evwWoYoSYbACX2s7KxCxzcsezDh3evFoDEBYTEEqycggWAzA9AuUSQQgeYPa9fPv6/YWm/Acx5IPb7ty/fw+QZblw67vDs8R0YHyQhgObx+yAJkBqmG5dPPDh1aPOGR/eugW0G4vlIoTIfyFcA+QekhhHJhPdQxbiAIguMBTQZrPD7108M6roWYDFQiIAAv6Aow/1bFwXgis+f2LUAynwoIaNcz8XNx3Dl7MEJUDGQpx9gtQ8YCueB+D26OECAAQDadt7e46D42QAAAABJRU5ErkJggg==) 2px 1px no-repeat;height:7px;width:8px}.dg li.save-row .button:hover{background-color:#bab19e;box-shadow:0 -1px 0 #b0a58f}.dg li.folder{border-bottom:0}.dg li.title{padding-left:16px;background:#000 url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlI+hKgFxoCgAOw==) 6px 10px no-repeat;cursor:pointer;border-bottom:1px solid hsla(0,0%,100%,.2)}.dg .closed li.title{background-image:url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlGIWqMCbWAEAOw==)}.dg .cr.boolean{border-left:3px solid #806787}.dg .cr.color{border-left:3px solid}.dg .cr.function{border-left:3px solid #e61d5f}.dg .cr.number{border-left:3px solid #2fa1d6}.dg .cr.number input[type=text]{color:#2fa1d6}.dg .cr.string{border-left:3px solid #1ed36f}.dg .cr.string input[type=text]{color:#1ed36f}.dg .cr.boolean:hover,.dg .cr.function:hover{background:#111}.dg .c input[type=text]{background:#303030;outline:none}.dg .c input[type=text]:hover{background:#3c3c3c}.dg .c input[type=text]:focus{background:#494949;color:#fff}.dg .c .slider{background:#303030;cursor:ew-resize}.dg .c .slider-fg{background:#2fa1d6;max-width:100%}.dg .c .slider:hover{background:#3c3c3c}.dg .c .slider:hover .slider-fg{background:#44abda}",""])},function(e,t){e.exports=function(){var e=[];return e.toString=function(){for(var e=[],t=0;t<this.length;t++){var n=this[t];n[2]?e.push("@media "+n[2]+"{"+n[1]+"}"):e.push(n[1])}return e.join("")},e.i=function(t,n){"string"==typeof t&&(t=[[null,t,""]]);for(var o={},i=0;i<this.length;i++){var r=this[i][0];"number"==typeof r&&(o[r]=!0)}for(i=0;i<t.length;i++){var a=t[i];"number"==typeof a[0]&&o[a[0]]||(n&&!a[2]?a[2]=n:n&&(a[2]="("+a[2]+") and ("+n+")"),e.push(a))}},e}}])});
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/dat.gui.min.js
dat.gui.min.js
THREE.RenderableObject = function () { this.id = 0; this.object = null; this.z = 0; this.renderOrder = 0; }; // THREE.RenderableFace = function () { this.id = 0; this.v1 = new THREE.RenderableVertex(); this.v2 = new THREE.RenderableVertex(); this.v3 = new THREE.RenderableVertex(); this.normalModel = new THREE.Vector3(); this.vertexNormalsModel = [ new THREE.Vector3(), new THREE.Vector3(), new THREE.Vector3() ]; this.vertexNormalsLength = 0; this.color = new THREE.Color(); this.material = null; this.uvs = [ new THREE.Vector2(), new THREE.Vector2(), new THREE.Vector2() ]; this.z = 0; this.renderOrder = 0; }; // THREE.RenderableVertex = function () { this.position = new THREE.Vector3(); this.positionWorld = new THREE.Vector3(); this.positionScreen = new THREE.Vector4(); this.visible = true; }; THREE.RenderableVertex.prototype.copy = function ( vertex ) { this.positionWorld.copy( vertex.positionWorld ); this.positionScreen.copy( vertex.positionScreen ); }; // THREE.RenderableLine = function () { this.id = 0; this.v1 = new THREE.RenderableVertex(); this.v2 = new THREE.RenderableVertex(); this.vertexColors = [ new THREE.Color(), new THREE.Color() ]; this.material = null; this.z = 0; this.renderOrder = 0; }; // THREE.RenderableSprite = function () { this.id = 0; this.object = null; this.x = 0; this.y = 0; this.z = 0; this.rotation = 0; this.scale = new THREE.Vector2(); this.material = null; this.renderOrder = 0; }; // THREE.Projector = function () { var _object, _objectCount, _objectPool = [], _objectPoolLength = 0, _vertex, _vertexCount, _vertexPool = [], _vertexPoolLength = 0, _face, _faceCount, _facePool = [], _facePoolLength = 0, _line, _lineCount, _linePool = [], _linePoolLength = 0, _sprite, _spriteCount, _spritePool = [], _spritePoolLength = 0, _renderData = { objects: [], lights: [], elements: [] }, _vector3 = new THREE.Vector3(), _vector4 = new THREE.Vector4(), _clipBox = new THREE.Box3( new THREE.Vector3( - 1, - 1, - 1 ), new THREE.Vector3( 1, 1, 1 ) ), _boundingBox = new THREE.Box3(), _points3 = new Array( 3 ), _points4 = new Array( 4 ), _viewMatrix = new THREE.Matrix4(), _viewProjectionMatrix = new THREE.Matrix4(), _modelMatrix, _modelViewProjectionMatrix = new THREE.Matrix4(), _normalMatrix = new THREE.Matrix3(), _frustum = new THREE.Frustum(), _clippedVertex1PositionScreen = new THREE.Vector4(), _clippedVertex2PositionScreen = new THREE.Vector4(); // this.projectVector = function ( vector, camera ) { console.warn( 'THREE.Projector: .projectVector() is now vector.project().' ); vector.project( camera ); }; this.unprojectVector = function ( vector, camera ) { console.warn( 'THREE.Projector: .unprojectVector() is now vector.unproject().' ); vector.unproject( camera ); }; this.pickingRay = function ( vector, camera ) { console.error( 'THREE.Projector: .pickingRay() is now raycaster.setFromCamera().' ); }; // var RenderList = function () { var normals = []; var uvs = []; var object = null; var material = null; var normalMatrix = new THREE.Matrix3(); function setObject( value ) { object = value; material = object.material; normalMatrix.getNormalMatrix( object.matrixWorld ); normals.length = 0; uvs.length = 0; } function projectVertex( vertex ) { var position = vertex.position; var positionWorld = vertex.positionWorld; var positionScreen = vertex.positionScreen; positionWorld.copy( position ).applyMatrix4( _modelMatrix ); positionScreen.copy( positionWorld ).applyMatrix4( _viewProjectionMatrix ); var invW = 1 / positionScreen.w; positionScreen.x *= invW; positionScreen.y *= invW; positionScreen.z *= invW; vertex.visible = positionScreen.x >= - 1 && positionScreen.x <= 1 && positionScreen.y >= - 1 && positionScreen.y <= 1 && positionScreen.z >= - 1 && positionScreen.z <= 1; } function pushVertex( x, y, z ) { _vertex = getNextVertexInPool(); _vertex.position.set( x, y, z ); projectVertex( _vertex ); } function pushNormal( x, y, z ) { normals.push( x, y, z ); } function pushUv( x, y ) { uvs.push( x, y ); } function checkTriangleVisibility( v1, v2, v3 ) { if ( v1.visible === true || v2.visible === true || v3.visible === true ) return true; _points3[ 0 ] = v1.positionScreen; _points3[ 1 ] = v2.positionScreen; _points3[ 2 ] = v3.positionScreen; return _clipBox.intersectsBox( _boundingBox.setFromPoints( _points3 ) ); } function checkBackfaceCulling( v1, v2, v3 ) { return ( ( v3.positionScreen.x - v1.positionScreen.x ) * ( v2.positionScreen.y - v1.positionScreen.y ) - ( v3.positionScreen.y - v1.positionScreen.y ) * ( v2.positionScreen.x - v1.positionScreen.x ) ) < 0; } function pushLine( a, b ) { var v1 = _vertexPool[ a ]; var v2 = _vertexPool[ b ]; _line = getNextLineInPool(); _line.id = object.id; _line.v1.copy( v1 ); _line.v2.copy( v2 ); _line.z = ( v1.positionScreen.z + v2.positionScreen.z ) / 2; _line.renderOrder = object.renderOrder; _line.material = object.material; _renderData.elements.push( _line ); } function pushTriangle( a, b, c ) { var v1 = _vertexPool[ a ]; var v2 = _vertexPool[ b ]; var v3 = _vertexPool[ c ]; if ( checkTriangleVisibility( v1, v2, v3 ) === false ) return; if ( material.side === THREE.DoubleSide || checkBackfaceCulling( v1, v2, v3 ) === true ) { _face = getNextFaceInPool(); _face.id = object.id; _face.v1.copy( v1 ); _face.v2.copy( v2 ); _face.v3.copy( v3 ); _face.z = ( v1.positionScreen.z + v2.positionScreen.z + v3.positionScreen.z ) / 3; _face.renderOrder = object.renderOrder; // use first vertex normal as face normal _face.normalModel.fromArray( normals, a * 3 ); _face.normalModel.applyMatrix3( normalMatrix ).normalize(); for ( var i = 0; i < 3; i ++ ) { var normal = _face.vertexNormalsModel[ i ]; normal.fromArray( normals, arguments[ i ] * 3 ); normal.applyMatrix3( normalMatrix ).normalize(); var uv = _face.uvs[ i ]; uv.fromArray( uvs, arguments[ i ] * 2 ); } _face.vertexNormalsLength = 3; _face.material = object.material; _renderData.elements.push( _face ); } } return { setObject: setObject, projectVertex: projectVertex, checkTriangleVisibility: checkTriangleVisibility, checkBackfaceCulling: checkBackfaceCulling, pushVertex: pushVertex, pushNormal: pushNormal, pushUv: pushUv, pushLine: pushLine, pushTriangle: pushTriangle } }; var renderList = new RenderList(); this.projectScene = function ( scene, camera, sortObjects, sortElements ) { _faceCount = 0; _lineCount = 0; _spriteCount = 0; _renderData.elements.length = 0; if ( scene.autoUpdate === true ) scene.updateMatrixWorld(); if ( camera.parent === null ) camera.updateMatrixWorld(); _viewMatrix.copy( camera.matrixWorldInverse.getInverse( camera.matrixWorld ) ); _viewProjectionMatrix.multiplyMatrices( camera.projectionMatrix, _viewMatrix ); _frustum.setFromMatrix( _viewProjectionMatrix ); // _objectCount = 0; _renderData.objects.length = 0; _renderData.lights.length = 0; scene.traverseVisible( function ( object ) { if ( object instanceof THREE.Light ) { _renderData.lights.push( object ); } else if ( object instanceof THREE.Mesh || object instanceof THREE.Line || object instanceof THREE.Sprite ) { var material = object.material; if ( material.visible === false ) return; if ( object.frustumCulled === false || _frustum.intersectsObject( object ) === true ) { _object = getNextObjectInPool(); _object.id = object.id; _object.object = object; _vector3.setFromMatrixPosition( object.matrixWorld ); _vector3.applyProjection( _viewProjectionMatrix ); _object.z = _vector3.z; _object.renderOrder = object.renderOrder; _renderData.objects.push( _object ); } } } ); if ( sortObjects === true ) { _renderData.objects.sort( painterSort ); } // for ( var o = 0, ol = _renderData.objects.length; o < ol; o ++ ) { var object = _renderData.objects[ o ].object; var geometry = object.geometry; renderList.setObject( object ); _modelMatrix = object.matrixWorld; _vertexCount = 0; if ( object instanceof THREE.Mesh ) { if ( geometry instanceof THREE.BufferGeometry ) { var attributes = geometry.attributes; var groups = geometry.groups; if ( attributes.position === undefined ) continue; var positions = attributes.position.array; for ( var i = 0, l = positions.length; i < l; i += 3 ) { renderList.pushVertex( positions[ i ], positions[ i + 1 ], positions[ i + 2 ] ); } if ( attributes.normal !== undefined ) { var normals = attributes.normal.array; for ( var i = 0, l = normals.length; i < l; i += 3 ) { renderList.pushNormal( normals[ i ], normals[ i + 1 ], normals[ i + 2 ] ); } } if ( attributes.uv !== undefined ) { var uvs = attributes.uv.array; for ( var i = 0, l = uvs.length; i < l; i += 2 ) { renderList.pushUv( uvs[ i ], uvs[ i + 1 ] ); } } if ( geometry.index !== null ) { var indices = geometry.index.array; if ( groups.length > 0 ) { for ( var o = 0; o < groups.length; o ++ ) { var group = groups[ o ]; for ( var i = group.start, l = group.start + group.count; i < l; i += 3 ) { renderList.pushTriangle( indices[ i ], indices[ i + 1 ], indices[ i + 2 ] ); } } } else { for ( var i = 0, l = indices.length; i < l; i += 3 ) { renderList.pushTriangle( indices[ i ], indices[ i + 1 ], indices[ i + 2 ] ); } } } else { for ( var i = 0, l = positions.length / 3; i < l; i += 3 ) { renderList.pushTriangle( i, i + 1, i + 2 ); } } } else if ( geometry instanceof THREE.Geometry ) { var vertices = geometry.vertices; var faces = geometry.faces; var faceVertexUvs = geometry.faceVertexUvs[ 0 ]; _normalMatrix.getNormalMatrix( _modelMatrix ); var material = object.material; var isFaceMaterial = material instanceof THREE.MultiMaterial; var objectMaterials = isFaceMaterial === true ? object.material : null; for ( var v = 0, vl = vertices.length; v < vl; v ++ ) { var vertex = vertices[ v ]; _vector3.copy( vertex ); if ( material.morphTargets === true ) { var morphTargets = geometry.morphTargets; var morphInfluences = object.morphTargetInfluences; for ( var t = 0, tl = morphTargets.length; t < tl; t ++ ) { var influence = morphInfluences[ t ]; if ( influence === 0 ) continue; var target = morphTargets[ t ]; var targetVertex = target.vertices[ v ]; _vector3.x += ( targetVertex.x - vertex.x ) * influence; _vector3.y += ( targetVertex.y - vertex.y ) * influence; _vector3.z += ( targetVertex.z - vertex.z ) * influence; } } renderList.pushVertex( _vector3.x, _vector3.y, _vector3.z ); } for ( var f = 0, fl = faces.length; f < fl; f ++ ) { var face = faces[ f ]; material = isFaceMaterial === true ? objectMaterials.materials[ face.materialIndex ] : object.material; if ( material === undefined ) continue; var side = material.side; var v1 = _vertexPool[ face.a ]; var v2 = _vertexPool[ face.b ]; var v3 = _vertexPool[ face.c ]; if ( renderList.checkTriangleVisibility( v1, v2, v3 ) === false ) continue; var visible = renderList.checkBackfaceCulling( v1, v2, v3 ); if ( side !== THREE.DoubleSide ) { if ( side === THREE.FrontSide && visible === false ) continue; if ( side === THREE.BackSide && visible === true ) continue; } _face = getNextFaceInPool(); _face.id = object.id; _face.v1.copy( v1 ); _face.v2.copy( v2 ); _face.v3.copy( v3 ); _face.normalModel.copy( face.normal ); if ( visible === false && ( side === THREE.BackSide || side === THREE.DoubleSide ) ) { _face.normalModel.negate(); } _face.normalModel.applyMatrix3( _normalMatrix ).normalize(); var faceVertexNormals = face.vertexNormals; for ( var n = 0, nl = Math.min( faceVertexNormals.length, 3 ); n < nl; n ++ ) { var normalModel = _face.vertexNormalsModel[ n ]; normalModel.copy( faceVertexNormals[ n ] ); if ( visible === false && ( side === THREE.BackSide || side === THREE.DoubleSide ) ) { normalModel.negate(); } normalModel.applyMatrix3( _normalMatrix ).normalize(); } _face.vertexNormalsLength = faceVertexNormals.length; var vertexUvs = faceVertexUvs[ f ]; if ( vertexUvs !== undefined ) { for ( var u = 0; u < 3; u ++ ) { _face.uvs[ u ].copy( vertexUvs[ u ] ); } } _face.color = face.color; _face.material = material; _face.z = ( v1.positionScreen.z + v2.positionScreen.z + v3.positionScreen.z ) / 3; _face.renderOrder = object.renderOrder; _renderData.elements.push( _face ); } } } else if ( object instanceof THREE.Line ) { if ( geometry instanceof THREE.BufferGeometry ) { var attributes = geometry.attributes; if ( attributes.position !== undefined ) { var positions = attributes.position.array; for ( var i = 0, l = positions.length; i < l; i += 3 ) { renderList.pushVertex( positions[ i ], positions[ i + 1 ], positions[ i + 2 ] ); } if ( geometry.index !== null ) { var indices = geometry.index.array; for ( var i = 0, l = indices.length; i < l; i += 2 ) { renderList.pushLine( indices[ i ], indices[ i + 1 ] ); } } else { var step = object instanceof THREE.LineSegments ? 2 : 1; for ( var i = 0, l = ( positions.length / 3 ) - 1; i < l; i += step ) { renderList.pushLine( i, i + 1 ); } } } } else if ( geometry instanceof THREE.Geometry ) { _modelViewProjectionMatrix.multiplyMatrices( _viewProjectionMatrix, _modelMatrix ); var vertices = object.geometry.vertices; if ( vertices.length === 0 ) continue; v1 = getNextVertexInPool(); v1.positionScreen.copy( vertices[ 0 ] ).applyMatrix4( _modelViewProjectionMatrix ); var step = object instanceof THREE.LineSegments ? 2 : 1; for ( var v = 1, vl = vertices.length; v < vl; v ++ ) { v1 = getNextVertexInPool(); v1.positionScreen.copy( vertices[ v ] ).applyMatrix4( _modelViewProjectionMatrix ); if ( ( v + 1 ) % step > 0 ) continue; v2 = _vertexPool[ _vertexCount - 2 ]; _clippedVertex1PositionScreen.copy( v1.positionScreen ); _clippedVertex2PositionScreen.copy( v2.positionScreen ); if ( clipLine( _clippedVertex1PositionScreen, _clippedVertex2PositionScreen ) === true ) { // Perform the perspective divide _clippedVertex1PositionScreen.multiplyScalar( 1 / _clippedVertex1PositionScreen.w ); _clippedVertex2PositionScreen.multiplyScalar( 1 / _clippedVertex2PositionScreen.w ); _line = getNextLineInPool(); _line.id = object.id; _line.v1.positionScreen.copy( _clippedVertex1PositionScreen ); _line.v2.positionScreen.copy( _clippedVertex2PositionScreen ); _line.z = Math.max( _clippedVertex1PositionScreen.z, _clippedVertex2PositionScreen.z ); _line.renderOrder = object.renderOrder; _line.material = object.material; if ( object.material.vertexColors === THREE.VertexColors ) { _line.vertexColors[ 0 ].copy( object.geometry.colors[ v ] ); _line.vertexColors[ 1 ].copy( object.geometry.colors[ v - 1 ] ); } _renderData.elements.push( _line ); } } } } else if ( object instanceof THREE.Sprite ) { _vector4.set( _modelMatrix.elements[ 12 ], _modelMatrix.elements[ 13 ], _modelMatrix.elements[ 14 ], 1 ); _vector4.applyMatrix4( _viewProjectionMatrix ); var invW = 1 / _vector4.w; _vector4.z *= invW; if ( _vector4.z >= - 1 && _vector4.z <= 1 ) { _sprite = getNextSpriteInPool(); _sprite.id = object.id; _sprite.x = _vector4.x * invW; _sprite.y = _vector4.y * invW; _sprite.z = _vector4.z; _sprite.renderOrder = object.renderOrder; _sprite.object = object; _sprite.rotation = object.rotation; _sprite.scale.x = object.scale.x * Math.abs( _sprite.x - ( _vector4.x + camera.projectionMatrix.elements[ 0 ] ) / ( _vector4.w + camera.projectionMatrix.elements[ 12 ] ) ); _sprite.scale.y = object.scale.y * Math.abs( _sprite.y - ( _vector4.y + camera.projectionMatrix.elements[ 5 ] ) / ( _vector4.w + camera.projectionMatrix.elements[ 13 ] ) ); _sprite.material = object.material; _renderData.elements.push( _sprite ); } } } if ( sortElements === true ) { _renderData.elements.sort( painterSort ); } return _renderData; }; // Pools function getNextObjectInPool() { if ( _objectCount === _objectPoolLength ) { var object = new THREE.RenderableObject(); _objectPool.push( object ); _objectPoolLength ++; _objectCount ++; return object; } return _objectPool[ _objectCount ++ ]; } function getNextVertexInPool() { if ( _vertexCount === _vertexPoolLength ) { var vertex = new THREE.RenderableVertex(); _vertexPool.push( vertex ); _vertexPoolLength ++; _vertexCount ++; return vertex; } return _vertexPool[ _vertexCount ++ ]; } function getNextFaceInPool() { if ( _faceCount === _facePoolLength ) { var face = new THREE.RenderableFace(); _facePool.push( face ); _facePoolLength ++; _faceCount ++; return face; } return _facePool[ _faceCount ++ ]; } function getNextLineInPool() { if ( _lineCount === _linePoolLength ) { var line = new THREE.RenderableLine(); _linePool.push( line ); _linePoolLength ++; _lineCount ++; return line; } return _linePool[ _lineCount ++ ]; } function getNextSpriteInPool() { if ( _spriteCount === _spritePoolLength ) { var sprite = new THREE.RenderableSprite(); _spritePool.push( sprite ); _spritePoolLength ++; _spriteCount ++; return sprite; } return _spritePool[ _spriteCount ++ ]; } // function painterSort( a, b ) { if ( a.renderOrder !== b.renderOrder ) { return a.renderOrder - b.renderOrder; } else if ( a.z !== b.z ) { return b.z - a.z; } else if ( a.id !== b.id ) { return a.id - b.id; } else { return 0; } } function clipLine( s1, s2 ) { var alpha1 = 0, alpha2 = 1, // Calculate the boundary coordinate of each vertex for the near and far clip planes, // Z = -1 and Z = +1, respectively. bc1near = s1.z + s1.w, bc2near = s2.z + s2.w, bc1far = - s1.z + s1.w, bc2far = - s2.z + s2.w; if ( bc1near >= 0 && bc2near >= 0 && bc1far >= 0 && bc2far >= 0 ) { // Both vertices lie entirely within all clip planes. return true; } else if ( ( bc1near < 0 && bc2near < 0 ) || ( bc1far < 0 && bc2far < 0 ) ) { // Both vertices lie entirely outside one of the clip planes. return false; } else { // The line segment spans at least one clip plane. if ( bc1near < 0 ) { // v1 lies outside the near plane, v2 inside alpha1 = Math.max( alpha1, bc1near / ( bc1near - bc2near ) ); } else if ( bc2near < 0 ) { // v2 lies outside the near plane, v1 inside alpha2 = Math.min( alpha2, bc1near / ( bc1near - bc2near ) ); } if ( bc1far < 0 ) { // v1 lies outside the far plane, v2 inside alpha1 = Math.max( alpha1, bc1far / ( bc1far - bc2far ) ); } else if ( bc2far < 0 ) { // v2 lies outside the far plane, v2 inside alpha2 = Math.min( alpha2, bc1far / ( bc1far - bc2far ) ); } if ( alpha2 < alpha1 ) { // The line segment spans two boundaries, but is outside both of them. // (This can't happen when we're only clipping against just near/far but good // to leave the check here for future usage if other clip planes are added.) return false; } else { // Update the s1 and s2 vertices to match the clipped line segment. s1.lerp( s2, alpha1 ); s2.lerp( s1, 1 - alpha2 ); return true; } } } };
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/Projector.js
Projector.js
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Qs = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){ 'use strict'; var replace = String.prototype.replace; var percentTwenties = /%20/g; module.exports = { 'default': 'RFC3986', formatters: { RFC1738: function (value) { return replace.call(value, percentTwenties, '+'); }, RFC3986: function (value) { return value; } }, RFC1738: 'RFC1738', RFC3986: 'RFC3986' }; },{}],2:[function(require,module,exports){ 'use strict'; var stringify = require('./stringify'); var parse = require('./parse'); var formats = require('./formats'); module.exports = { formats: formats, parse: parse, stringify: stringify }; },{"./formats":1,"./parse":3,"./stringify":4}],3:[function(require,module,exports){ 'use strict'; var utils = require('./utils'); var has = Object.prototype.hasOwnProperty; var defaults = { allowDots: false, allowPrototypes: false, arrayLimit: 20, decoder: utils.decode, delimiter: '&', depth: 5, parameterLimit: 1000, plainObjects: false, strictNullHandling: false }; var parseValues = function parseQueryStringValues(str, options) { var obj = {}; var cleanStr = options.ignoreQueryPrefix ? str.replace(/^\?/, '') : str; var limit = options.parameterLimit === Infinity ? undefined : options.parameterLimit; var parts = cleanStr.split(options.delimiter, limit); for (var i = 0; i < parts.length; ++i) { var part = parts[i]; var bracketEqualsPos = part.indexOf(']='); var pos = bracketEqualsPos === -1 ? part.indexOf('=') : bracketEqualsPos + 1; var key, val; if (pos === -1) { key = options.decoder(part, defaults.decoder); val = options.strictNullHandling ? null : ''; } else { key = options.decoder(part.slice(0, pos), defaults.decoder); val = options.decoder(part.slice(pos + 1), defaults.decoder); } if (has.call(obj, key)) { obj[key] = [].concat(obj[key]).concat(val); } else { obj[key] = val; } } return obj; }; var parseObject = function parseObjectRecursive(chain, val, options) { if (!chain.length) { return val; } var root = chain.shift(); var obj; if (root === '[]') { obj = []; obj = obj.concat(parseObject(chain, val, options)); } else { obj = options.plainObjects ? Object.create(null) : {}; var cleanRoot = root.charAt(0) === '[' && root.charAt(root.length - 1) === ']' ? root.slice(1, -1) : root; var index = parseInt(cleanRoot, 10); if ( !isNaN(index) && root !== cleanRoot && String(index) === cleanRoot && index >= 0 && (options.parseArrays && index <= options.arrayLimit) ) { obj = []; obj[index] = parseObject(chain, val, options); } else { obj[cleanRoot] = parseObject(chain, val, options); } } return obj; }; var parseKeys = function parseQueryStringKeys(givenKey, val, options) { if (!givenKey) { return; } // Transform dot notation to bracket notation var key = options.allowDots ? givenKey.replace(/\.([^.[]+)/g, '[$1]') : givenKey; // The regex chunks var brackets = /(\[[^[\]]*])/; var child = /(\[[^[\]]*])/g; // Get the parent var segment = brackets.exec(key); var parent = segment ? key.slice(0, segment.index) : key; // Stash the parent if it exists var keys = []; if (parent) { // If we aren't using plain objects, optionally prefix keys // that would overwrite object prototype properties if (!options.plainObjects && has.call(Object.prototype, parent)) { if (!options.allowPrototypes) { return; } } keys.push(parent); } // Loop through children appending to the array until we hit depth var i = 0; while ((segment = child.exec(key)) !== null && i < options.depth) { i += 1; if (!options.plainObjects && has.call(Object.prototype, segment[1].slice(1, -1))) { if (!options.allowPrototypes) { return; } } keys.push(segment[1]); } // If there's a remainder, just add whatever is left if (segment) { keys.push('[' + key.slice(segment.index) + ']'); } return parseObject(keys, val, options); }; module.exports = function (str, opts) { var options = opts ? utils.assign({}, opts) : {}; if (options.decoder !== null && options.decoder !== undefined && typeof options.decoder !== 'function') { throw new TypeError('Decoder has to be a function.'); } options.ignoreQueryPrefix = options.ignoreQueryPrefix === true; options.delimiter = typeof options.delimiter === 'string' || utils.isRegExp(options.delimiter) ? options.delimiter : defaults.delimiter; options.depth = typeof options.depth === 'number' ? options.depth : defaults.depth; options.arrayLimit = typeof options.arrayLimit === 'number' ? options.arrayLimit : defaults.arrayLimit; options.parseArrays = options.parseArrays !== false; options.decoder = typeof options.decoder === 'function' ? options.decoder : defaults.decoder; options.allowDots = typeof options.allowDots === 'boolean' ? options.allowDots : defaults.allowDots; options.plainObjects = typeof options.plainObjects === 'boolean' ? options.plainObjects : defaults.plainObjects; options.allowPrototypes = typeof options.allowPrototypes === 'boolean' ? options.allowPrototypes : defaults.allowPrototypes; options.parameterLimit = typeof options.parameterLimit === 'number' ? options.parameterLimit : defaults.parameterLimit; options.strictNullHandling = typeof options.strictNullHandling === 'boolean' ? options.strictNullHandling : defaults.strictNullHandling; if (str === '' || str === null || typeof str === 'undefined') { return options.plainObjects ? Object.create(null) : {}; } var tempObj = typeof str === 'string' ? parseValues(str, options) : str; var obj = options.plainObjects ? Object.create(null) : {}; // Iterate over the keys and setup the new object var keys = Object.keys(tempObj); for (var i = 0; i < keys.length; ++i) { var key = keys[i]; var newObj = parseKeys(key, tempObj[key], options); obj = utils.merge(obj, newObj, options); } return utils.compact(obj); }; },{"./utils":5}],4:[function(require,module,exports){ 'use strict'; var utils = require('./utils'); var formats = require('./formats'); var arrayPrefixGenerators = { brackets: function brackets(prefix) { // eslint-disable-line func-name-matching return prefix + '[]'; }, indices: function indices(prefix, key) { // eslint-disable-line func-name-matching return prefix + '[' + key + ']'; }, repeat: function repeat(prefix) { // eslint-disable-line func-name-matching return prefix; } }; var toISO = Date.prototype.toISOString; var defaults = { delimiter: '&', encode: true, encoder: utils.encode, encodeValuesOnly: false, serializeDate: function serializeDate(date) { // eslint-disable-line func-name-matching return toISO.call(date); }, skipNulls: false, strictNullHandling: false }; var stringify = function stringify( // eslint-disable-line func-name-matching object, prefix, generateArrayPrefix, strictNullHandling, skipNulls, encoder, filter, sort, allowDots, serializeDate, formatter, encodeValuesOnly ) { var obj = object; if (typeof filter === 'function') { obj = filter(prefix, obj); } else if (obj instanceof Date) { obj = serializeDate(obj); } else if (obj === null) { if (strictNullHandling) { return encoder && !encodeValuesOnly ? encoder(prefix, defaults.encoder) : prefix; } obj = ''; } if (typeof obj === 'string' || typeof obj === 'number' || typeof obj === 'boolean' || utils.isBuffer(obj)) { if (encoder) { var keyValue = encodeValuesOnly ? prefix : encoder(prefix, defaults.encoder); return [formatter(keyValue) + '=' + formatter(encoder(obj, defaults.encoder))]; } return [formatter(prefix) + '=' + formatter(String(obj))]; } var values = []; if (typeof obj === 'undefined') { return values; } var objKeys; if (Array.isArray(filter)) { objKeys = filter; } else { var keys = Object.keys(obj); objKeys = sort ? keys.sort(sort) : keys; } for (var i = 0; i < objKeys.length; ++i) { var key = objKeys[i]; if (skipNulls && obj[key] === null) { continue; } if (Array.isArray(obj)) { values = values.concat(stringify( obj[key], generateArrayPrefix(prefix, key), generateArrayPrefix, strictNullHandling, skipNulls, encoder, filter, sort, allowDots, serializeDate, formatter, encodeValuesOnly )); } else { values = values.concat(stringify( obj[key], prefix + (allowDots ? '.' + key : '[' + key + ']'), generateArrayPrefix, strictNullHandling, skipNulls, encoder, filter, sort, allowDots, serializeDate, formatter, encodeValuesOnly )); } } return values; }; module.exports = function (object, opts) { var obj = object; var options = opts ? utils.assign({}, opts) : {}; if (options.encoder !== null && options.encoder !== undefined && typeof options.encoder !== 'function') { throw new TypeError('Encoder has to be a function.'); } var delimiter = typeof options.delimiter === 'undefined' ? defaults.delimiter : options.delimiter; var strictNullHandling = typeof options.strictNullHandling === 'boolean' ? options.strictNullHandling : defaults.strictNullHandling; var skipNulls = typeof options.skipNulls === 'boolean' ? options.skipNulls : defaults.skipNulls; var encode = typeof options.encode === 'boolean' ? options.encode : defaults.encode; var encoder = typeof options.encoder === 'function' ? options.encoder : defaults.encoder; var sort = typeof options.sort === 'function' ? options.sort : null; var allowDots = typeof options.allowDots === 'undefined' ? false : options.allowDots; var serializeDate = typeof options.serializeDate === 'function' ? options.serializeDate : defaults.serializeDate; var encodeValuesOnly = typeof options.encodeValuesOnly === 'boolean' ? options.encodeValuesOnly : defaults.encodeValuesOnly; if (typeof options.format === 'undefined') { options.format = formats.default; } else if (!Object.prototype.hasOwnProperty.call(formats.formatters, options.format)) { throw new TypeError('Unknown format option provided.'); } var formatter = formats.formatters[options.format]; var objKeys; var filter; if (typeof options.filter === 'function') { filter = options.filter; obj = filter('', obj); } else if (Array.isArray(options.filter)) { filter = options.filter; objKeys = filter; } var keys = []; if (typeof obj !== 'object' || obj === null) { return ''; } var arrayFormat; if (options.arrayFormat in arrayPrefixGenerators) { arrayFormat = options.arrayFormat; } else if ('indices' in options) { arrayFormat = options.indices ? 'indices' : 'repeat'; } else { arrayFormat = 'indices'; } var generateArrayPrefix = arrayPrefixGenerators[arrayFormat]; if (!objKeys) { objKeys = Object.keys(obj); } if (sort) { objKeys.sort(sort); } for (var i = 0; i < objKeys.length; ++i) { var key = objKeys[i]; if (skipNulls && obj[key] === null) { continue; } keys = keys.concat(stringify( obj[key], key, generateArrayPrefix, strictNullHandling, skipNulls, encode ? encoder : null, filter, sort, allowDots, serializeDate, formatter, encodeValuesOnly )); } var joined = keys.join(delimiter); var prefix = options.addQueryPrefix === true ? '?' : ''; return joined.length > 0 ? prefix + joined : ''; }; },{"./formats":1,"./utils":5}],5:[function(require,module,exports){ 'use strict'; var has = Object.prototype.hasOwnProperty; var hexTable = (function () { var array = []; for (var i = 0; i < 256; ++i) { array.push('%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase()); } return array; }()); exports.arrayToObject = function (source, options) { var obj = options && options.plainObjects ? Object.create(null) : {}; for (var i = 0; i < source.length; ++i) { if (typeof source[i] !== 'undefined') { obj[i] = source[i]; } } return obj; }; exports.merge = function (target, source, options) { if (!source) { return target; } if (typeof source !== 'object') { if (Array.isArray(target)) { target.push(source); } else if (typeof target === 'object') { if (options.plainObjects || options.allowPrototypes || !has.call(Object.prototype, source)) { target[source] = true; } } else { return [target, source]; } return target; } if (typeof target !== 'object') { return [target].concat(source); } var mergeTarget = target; if (Array.isArray(target) && !Array.isArray(source)) { mergeTarget = exports.arrayToObject(target, options); } if (Array.isArray(target) && Array.isArray(source)) { source.forEach(function (item, i) { if (has.call(target, i)) { if (target[i] && typeof target[i] === 'object') { target[i] = exports.merge(target[i], item, options); } else { target.push(item); } } else { target[i] = item; } }); return target; } return Object.keys(source).reduce(function (acc, key) { var value = source[key]; if (has.call(acc, key)) { acc[key] = exports.merge(acc[key], value, options); } else { acc[key] = value; } return acc; }, mergeTarget); }; exports.assign = function assignSingleSource(target, source) { return Object.keys(source).reduce(function (acc, key) { acc[key] = source[key]; return acc; }, target); }; exports.decode = function (str) { try { return decodeURIComponent(str.replace(/\+/g, ' ')); } catch (e) { return str; } }; exports.encode = function (str) { // This code was originally written by Brian White (mscdex) for the io.js core querystring library. // It has been adapted here for stricter adherence to RFC 3986 if (str.length === 0) { return str; } var string = typeof str === 'string' ? str : String(str); var out = ''; for (var i = 0; i < string.length; ++i) { var c = string.charCodeAt(i); if ( c === 0x2D // - || c === 0x2E // . || c === 0x5F // _ || c === 0x7E // ~ || (c >= 0x30 && c <= 0x39) // 0-9 || (c >= 0x41 && c <= 0x5A) // a-z || (c >= 0x61 && c <= 0x7A) // A-Z ) { out += string.charAt(i); continue; } if (c < 0x80) { out = out + hexTable[c]; continue; } if (c < 0x800) { out = out + (hexTable[0xC0 | (c >> 6)] + hexTable[0x80 | (c & 0x3F)]); continue; } if (c < 0xD800 || c >= 0xE000) { out = out + (hexTable[0xE0 | (c >> 12)] + hexTable[0x80 | ((c >> 6) & 0x3F)] + hexTable[0x80 | (c & 0x3F)]); continue; } i += 1; c = 0x10000 + (((c & 0x3FF) << 10) | (string.charCodeAt(i) & 0x3FF)); out += hexTable[0xF0 | (c >> 18)] + hexTable[0x80 | ((c >> 12) & 0x3F)] + hexTable[0x80 | ((c >> 6) & 0x3F)] + hexTable[0x80 | (c & 0x3F)]; } return out; }; exports.compact = function (obj, references) { if (typeof obj !== 'object' || obj === null) { return obj; } var refs = references || []; var lookup = refs.indexOf(obj); if (lookup !== -1) { return refs[lookup]; } refs.push(obj); if (Array.isArray(obj)) { var compacted = []; for (var i = 0; i < obj.length; ++i) { if (obj[i] && typeof obj[i] === 'object') { compacted.push(exports.compact(obj[i], refs)); } else if (typeof obj[i] !== 'undefined') { compacted.push(obj[i]); } } return compacted; } var keys = Object.keys(obj); keys.forEach(function (key) { obj[key] = exports.compact(obj[key], refs); }); return obj; }; exports.isRegExp = function (obj) { return Object.prototype.toString.call(obj) === '[object RegExp]'; }; exports.isBuffer = function (obj) { if (obj === null || typeof obj === 'undefined') { return false; } return !!(obj.constructor && obj.constructor.isBuffer && obj.constructor.isBuffer(obj)); }; },{}]},{},[2])(2) });
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/qs.js
qs.js
URI.js - Mutating URLs IPv6 Support Version: 1.19.0 Author: Rodney Rehm Web: http://medialize.github.io/URI.js/ Licensed under MIT License http://www.opensource.org/licenses/mit-license https://mths.be/punycode v1.4.0 by @mathias URI.js - Mutating URLs Second Level Domain (SLD) Support Version: 1.19.0 Author: Rodney Rehm Web: http://medialize.github.io/URI.js/ Licensed under MIT License http://www.opensource.org/licenses/mit-license URI.js - Mutating URLs Version: 1.19.0 Author: Rodney Rehm Web: http://medialize.github.io/URI.js/ Licensed under MIT License http://www.opensource.org/licenses/mit-license URI.js - Mutating URLs URI Template Support - http://tools.ietf.org/html/rfc6570 Version: 1.19.0 Author: Rodney Rehm Web: http://medialize.github.io/URI.js/ Licensed under MIT License http://www.opensource.org/licenses/mit-license URI.js - Mutating URLs jQuery Plugin Version: 1.19.0 Author: Rodney Rehm Web: http://medialize.github.io/URI.js/jquery-uri-plugin.html Licensed under MIT License http://www.opensource.org/licenses/mit-license */ (function(d,k){"object"===typeof module&&module.exports?module.exports=k():"function"===typeof define&&define.amd?define(k):d.IPv6=k(d)})(this,function(d){var k=d&&d.IPv6;return{best:function(g){g=g.toLowerCase().split(":");var d=g.length,b=8;""===g[0]&&""===g[1]&&""===g[2]?(g.shift(),g.shift()):""===g[0]&&""===g[1]?g.shift():""===g[d-1]&&""===g[d-2]&&g.pop();d=g.length;-1!==g[d-1].indexOf(".")&&(b=7);var p;for(p=0;p<d&&""!==g[p];p++);if(p<b)for(g.splice(p,1,"0000");g.length<b;)g.splice(p,0,"0000"); for(p=0;p<b;p++){d=g[p].split("");for(var k=0;3>k;k++)if("0"===d[0]&&1<d.length)d.splice(0,1);else break;g[p]=d.join("")}d=-1;var t=k=0,n=-1,q=!1;for(p=0;p<b;p++)q?"0"===g[p]?t+=1:(q=!1,t>k&&(d=n,k=t)):"0"===g[p]&&(q=!0,n=p,t=1);t>k&&(d=n,k=t);1<k&&g.splice(d,k,"");d=g.length;b="";""===g[0]&&(b=":");for(p=0;p<d;p++){b+=g[p];if(p===d-1)break;b+=":"}""===g[d-1]&&(b+=":");return b},noConflict:function(){d.IPv6===this&&(d.IPv6=k);return this}}}); (function(d){function k(b){throw new RangeError(A[b]);}function g(b,f){for(var h=b.length,d=[];h--;)d[h]=f(b[h]);return d}function u(b,f){var h=b.split("@"),d="";1<h.length&&(d=h[0]+"@",b=h[1]);b=b.replace(E,".");h=b.split(".");h=g(h,f).join(".");return d+h}function b(b){for(var f=[],h=0,d=b.length,g,a;h<d;)g=b.charCodeAt(h++),55296<=g&&56319>=g&&h<d?(a=b.charCodeAt(h++),56320==(a&64512)?f.push(((g&1023)<<10)+(a&1023)+65536):(f.push(g),h--)):f.push(g);return f}function p(b){return g(b,function(b){var f= "";65535<b&&(b-=65536,f+=y(b>>>10&1023|55296),b=56320|b&1023);return f+=y(b)}).join("")}function B(b,f){return b+22+75*(26>b)-((0!=f)<<5)}function t(b,h,d){var g=0;b=d?f(b/700):b>>1;for(b+=f(b/h);455<b;g+=36)b=f(b/35);return f(g+36*b/(b+38))}function n(b){var h=[],d=b.length,g=0,n=128,a=72,c,e;var m=b.lastIndexOf("-");0>m&&(m=0);for(c=0;c<m;++c)128<=b.charCodeAt(c)&&k("not-basic"),h.push(b.charCodeAt(c));for(m=0<m?m+1:0;m<d;){c=g;var l=1;for(e=36;;e+=36){m>=d&&k("invalid-input");var x=b.charCodeAt(m++); x=10>x-48?x-22:26>x-65?x-65:26>x-97?x-97:36;(36<=x||x>f((2147483647-g)/l))&&k("overflow");g+=x*l;var r=e<=a?1:e>=a+26?26:e-a;if(x<r)break;x=36-r;l>f(2147483647/x)&&k("overflow");l*=x}l=h.length+1;a=t(g-c,l,0==c);f(g/l)>2147483647-n&&k("overflow");n+=f(g/l);g%=l;h.splice(g++,0,n)}return p(h)}function q(h){var d,g,n,r=[];h=b(h);var a=h.length;var c=128;var e=0;var m=72;for(n=0;n<a;++n){var l=h[n];128>l&&r.push(y(l))}for((d=g=r.length)&&r.push("-");d<a;){var x=2147483647;for(n=0;n<a;++n)l=h[n],l>=c&& l<x&&(x=l);var q=d+1;x-c>f((2147483647-e)/q)&&k("overflow");e+=(x-c)*q;c=x;for(n=0;n<a;++n)if(l=h[n],l<c&&2147483647<++e&&k("overflow"),l==c){var v=e;for(x=36;;x+=36){l=x<=m?1:x>=m+26?26:x-m;if(v<l)break;var p=v-l;v=36-l;r.push(y(B(l+p%v,0)));v=f(p/v)}r.push(y(B(v,0)));m=t(e,q,d==g);e=0;++d}++e;++c}return r.join("")}var w="object"==typeof exports&&exports&&!exports.nodeType&&exports,h="object"==typeof module&&module&&!module.nodeType&&module,r="object"==typeof global&&global;if(r.global===r||r.window=== r||r.self===r)d=r;var v=/^xn--/,D=/[^\x20-\x7E]/,E=/[\x2E\u3002\uFF0E\uFF61]/g,A={overflow:"Overflow: input needs wider integers to process","not-basic":"Illegal input >= 0x80 (not a basic code point)","invalid-input":"Invalid input"},f=Math.floor,y=String.fromCharCode,C;var z={version:"1.3.2",ucs2:{decode:b,encode:p},decode:n,encode:q,toASCII:function(b){return u(b,function(b){return D.test(b)?"xn--"+q(b):b})},toUnicode:function(b){return u(b,function(b){return v.test(b)?n(b.slice(4).toLowerCase()): b})}};if("function"==typeof define&&"object"==typeof define.amd&&define.amd)define("punycode",function(){return z});else if(w&&h)if(module.exports==w)h.exports=z;else for(C in z)z.hasOwnProperty(C)&&(w[C]=z[C]);else d.punycode=z})(this); (function(d,k){"object"===typeof module&&module.exports?module.exports=k():"function"===typeof define&&define.amd?define(k):d.SecondLevelDomains=k(d)})(this,function(d){var k=d&&d.SecondLevelDomains,g={list:{ac:" com gov mil net org ",ae:" ac co gov mil name net org pro sch ",af:" com edu gov net org ",al:" com edu gov mil net org ",ao:" co ed gv it og pb ",ar:" com edu gob gov int mil net org tur ",at:" ac co gv or ",au:" asn com csiro edu gov id net org ",ba:" co com edu gov mil net org rs unbi unmo unsa untz unze ", bb:" biz co com edu gov info net org store tv ",bh:" biz cc com edu gov info net org ",bn:" com edu gov net org ",bo:" com edu gob gov int mil net org tv ",br:" adm adv agr am arq art ato b bio blog bmd cim cng cnt com coop ecn edu eng esp etc eti far flog fm fnd fot fst g12 ggf gov imb ind inf jor jus lel mat med mil mus net nom not ntr odo org ppg pro psc psi qsl rec slg srv tmp trd tur tv vet vlog wiki zlg ",bs:" com edu gov net org ",bz:" du et om ov rg ",ca:" ab bc mb nb nf nl ns nt nu on pe qc sk yk ", ck:" biz co edu gen gov info net org ",cn:" ac ah bj com cq edu fj gd gov gs gx gz ha hb he hi hl hn jl js jx ln mil net nm nx org qh sc sd sh sn sx tj tw xj xz yn zj ",co:" com edu gov mil net nom org ",cr:" ac c co ed fi go or sa ",cy:" ac biz com ekloges gov ltd name net org parliament press pro tm ","do":" art com edu gob gov mil net org sld web ",dz:" art asso com edu gov net org pol ",ec:" com edu fin gov info med mil net org pro ",eg:" com edu eun gov mil name net org sci ",er:" com edu gov ind mil net org rochest w ", es:" com edu gob nom org ",et:" biz com edu gov info name net org ",fj:" ac biz com info mil name net org pro ",fk:" ac co gov net nom org ",fr:" asso com f gouv nom prd presse tm ",gg:" co net org ",gh:" com edu gov mil org ",gn:" ac com gov net org ",gr:" com edu gov mil net org ",gt:" com edu gob ind mil net org ",gu:" com edu gov net org ",hk:" com edu gov idv net org ",hu:" 2000 agrar bolt casino city co erotica erotika film forum games hotel info ingatlan jogasz konyvelo lakas media news org priv reklam sex shop sport suli szex tm tozsde utazas video ", id:" ac co go mil net or sch web ",il:" ac co gov idf k12 muni net org ","in":" ac co edu ernet firm gen gov i ind mil net nic org res ",iq:" com edu gov i mil net org ",ir:" ac co dnssec gov i id net org sch ",it:" edu gov ",je:" co net org ",jo:" com edu gov mil name net org sch ",jp:" ac ad co ed go gr lg ne or ",ke:" ac co go info me mobi ne or sc ",kh:" com edu gov mil net org per ",ki:" biz com de edu gov info mob net org tel ",km:" asso com coop edu gouv k medecin mil nom notaires pharmaciens presse tm veterinaire ", kn:" edu gov net org ",kr:" ac busan chungbuk chungnam co daegu daejeon es gangwon go gwangju gyeongbuk gyeonggi gyeongnam hs incheon jeju jeonbuk jeonnam k kg mil ms ne or pe re sc seoul ulsan ",kw:" com edu gov net org ",ky:" com edu gov net org ",kz:" com edu gov mil net org ",lb:" com edu gov net org ",lk:" assn com edu gov grp hotel int ltd net ngo org sch soc web ",lr:" com edu gov net org ",lv:" asn com conf edu gov id mil net org ",ly:" com edu gov id med net org plc sch ",ma:" ac co gov m net org press ", mc:" asso tm ",me:" ac co edu gov its net org priv ",mg:" com edu gov mil nom org prd tm ",mk:" com edu gov inf name net org pro ",ml:" com edu gov net org presse ",mn:" edu gov org ",mo:" com edu gov net org ",mt:" com edu gov net org ",mv:" aero biz com coop edu gov info int mil museum name net org pro ",mw:" ac co com coop edu gov int museum net org ",mx:" com edu gob net org ",my:" com edu gov mil name net org sch ",nf:" arts com firm info net other per rec store web ",ng:" biz com edu gov mil mobi name net org sch ", ni:" ac co com edu gob mil net nom org ",np:" com edu gov mil net org ",nr:" biz com edu gov info net org ",om:" ac biz co com edu gov med mil museum net org pro sch ",pe:" com edu gob mil net nom org sld ",ph:" com edu gov i mil net ngo org ",pk:" biz com edu fam gob gok gon gop gos gov net org web ",pl:" art bialystok biz com edu gda gdansk gorzow gov info katowice krakow lodz lublin mil net ngo olsztyn org poznan pwr radom slupsk szczecin torun warszawa waw wroc wroclaw zgora ",pr:" ac biz com edu est gov info isla name net org pro prof ", ps:" com edu gov net org plo sec ",pw:" belau co ed go ne or ",ro:" arts com firm info nom nt org rec store tm www ",rs:" ac co edu gov in org ",sb:" com edu gov net org ",sc:" com edu gov net org ",sh:" co com edu gov net nom org ",sl:" com edu gov net org ",st:" co com consulado edu embaixada gov mil net org principe saotome store ",sv:" com edu gob org red ",sz:" ac co org ",tr:" av bbs bel biz com dr edu gen gov info k12 name net org pol tel tsk tv web ",tt:" aero biz cat co com coop edu gov info int jobs mil mobi museum name net org pro tel travel ", tw:" club com ebiz edu game gov idv mil net org ",mu:" ac co com gov net or org ",mz:" ac co edu gov org ",na:" co com ",nz:" ac co cri geek gen govt health iwi maori mil net org parliament school ",pa:" abo ac com edu gob ing med net nom org sld ",pt:" com edu gov int net nome org publ ",py:" com edu gov mil net org ",qa:" com edu gov mil net org ",re:" asso com nom ",ru:" ac adygeya altai amur arkhangelsk astrakhan bashkiria belgorod bir bryansk buryatia cbg chel chelyabinsk chita chukotka chuvashia com dagestan e-burg edu gov grozny int irkutsk ivanovo izhevsk jar joshkar-ola kalmykia kaluga kamchatka karelia kazan kchr kemerovo khabarovsk khakassia khv kirov koenig komi kostroma kranoyarsk kuban kurgan kursk lipetsk magadan mari mari-el marine mil mordovia mosreg msk murmansk nalchik net nnov nov novosibirsk nsk omsk orenburg org oryol penza perm pp pskov ptz rnd ryazan sakhalin samara saratov simbirsk smolensk spb stavropol stv surgut tambov tatarstan tom tomsk tsaritsyn tsk tula tuva tver tyumen udm udmurtia ulan-ude vladikavkaz vladimir vladivostok volgograd vologda voronezh vrn vyatka yakutia yamal yekaterinburg yuzhno-sakhalinsk ", rw:" ac co com edu gouv gov int mil net ",sa:" com edu gov med net org pub sch ",sd:" com edu gov info med net org tv ",se:" a ac b bd c d e f g h i k l m n o org p parti pp press r s t tm u w x y z ",sg:" com edu gov idn net org per ",sn:" art com edu gouv org perso univ ",sy:" com edu gov mil net news org ",th:" ac co go in mi net or ",tj:" ac biz co com edu go gov info int mil name net nic org test web ",tn:" agrinet com defense edunet ens fin gov ind info intl mincom nat net org perso rnrt rns rnu tourism ", tz:" ac co go ne or ",ua:" biz cherkassy chernigov chernovtsy ck cn co com crimea cv dn dnepropetrovsk donetsk dp edu gov if in ivano-frankivsk kh kharkov kherson khmelnitskiy kiev kirovograd km kr ks kv lg lugansk lutsk lviv me mk net nikolaev od odessa org pl poltava pp rovno rv sebastopol sumy te ternopil uzhgorod vinnica vn zaporizhzhe zhitomir zp zt ",ug:" ac co go ne or org sc ",uk:" ac bl british-library co cym gov govt icnet jet lea ltd me mil mod national-library-scotland nel net nhs nic nls org orgn parliament plc police sch scot soc ", us:" dni fed isa kids nsn ",uy:" com edu gub mil net org ",ve:" co com edu gob info mil net org web ",vi:" co com k12 net org ",vn:" ac biz com edu gov health info int name net org pro ",ye:" co com gov ltd me net org plc ",yu:" ac co edu gov org ",za:" ac agric alt bourse city co cybernet db edu gov grondar iaccess imt inca landesign law mil net ngo nis nom olivetti org pix school tm web ",zm:" ac co com edu gov net org sch ",com:"ar br cn de eu gb gr hu jpn kr no qc ru sa se uk us uy za ",net:"gb jp se uk ", org:"ae",de:"com "},has:function(d){var b=d.lastIndexOf(".");if(0>=b||b>=d.length-1)return!1;var k=d.lastIndexOf(".",b-1);if(0>=k||k>=b-1)return!1;var u=g.list[d.slice(b+1)];return u?0<=u.indexOf(" "+d.slice(k+1,b)+" "):!1},is:function(d){var b=d.lastIndexOf(".");if(0>=b||b>=d.length-1||0<=d.lastIndexOf(".",b-1))return!1;var k=g.list[d.slice(b+1)];return k?0<=k.indexOf(" "+d.slice(0,b)+" "):!1},get:function(d){var b=d.lastIndexOf(".");if(0>=b||b>=d.length-1)return null;var k=d.lastIndexOf(".",b-1); if(0>=k||k>=b-1)return null;var u=g.list[d.slice(b+1)];return!u||0>u.indexOf(" "+d.slice(k+1,b)+" ")?null:d.slice(k+1)},noConflict:function(){d.SecondLevelDomains===this&&(d.SecondLevelDomains=k);return this}};return g}); (function(d,k){"object"===typeof module&&module.exports?module.exports=k(require("./punycode"),require("./IPv6"),require("./SecondLevelDomains")):"function"===typeof define&&define.amd?define(["./punycode","./IPv6","./SecondLevelDomains"],k):d.URI=k(d.punycode,d.IPv6,d.SecondLevelDomains,d)})(this,function(d,k,g,u){function b(a,c){var e=1<=arguments.length,m=2<=arguments.length;if(!(this instanceof b))return e?m?new b(a,c):new b(a):new b;if(void 0===a){if(e)throw new TypeError("undefined is not a valid argument for URI"); a="undefined"!==typeof location?location.href+"":""}if(null===a&&e)throw new TypeError("null is not a valid argument for URI");this.href(a);return void 0!==c?this.absoluteTo(c):this}function p(a){return a.replace(/([.*+?^=!:${}()|[\]\/\\])/g,"\\$1")}function B(a){return void 0===a?"Undefined":String(Object.prototype.toString.call(a)).slice(8,-1)}function t(a){return"Array"===B(a)}function n(a,c){var e={},b;if("RegExp"===B(c))e=null;else if(t(c)){var l=0;for(b=c.length;l<b;l++)e[c[l]]=!0}else e[c]= !0;l=0;for(b=a.length;l<b;l++)if(e&&void 0!==e[a[l]]||!e&&c.test(a[l]))a.splice(l,1),b--,l--;return a}function q(a,c){var e;if(t(c)){var b=0;for(e=c.length;b<e;b++)if(!q(a,c[b]))return!1;return!0}var l=B(c);b=0;for(e=a.length;b<e;b++)if("RegExp"===l){if("string"===typeof a[b]&&a[b].match(c))return!0}else if(a[b]===c)return!0;return!1}function w(a,c){if(!t(a)||!t(c)||a.length!==c.length)return!1;a.sort();c.sort();for(var e=0,b=a.length;e<b;e++)if(a[e]!==c[e])return!1;return!0}function h(a){return a.replace(/^\/+|\/+$/g, "")}function r(a){return escape(a)}function v(a){return encodeURIComponent(a).replace(/[!'()*]/g,r).replace(/\*/g,"%2A")}function D(a){return function(c,e){if(void 0===c)return this._parts[a]||"";this._parts[a]=c||null;this.build(!e);return this}}function E(a,c){return function(e,b){if(void 0===e)return this._parts[a]||"";null!==e&&(e+="",e.charAt(0)===c&&(e=e.substring(1)));this._parts[a]=e;this.build(!b);return this}}var A=u&&u.URI;b.version="1.19.0";var f=b.prototype,y=Object.prototype.hasOwnProperty; b._parts=function(){return{protocol:null,username:null,password:null,hostname:null,urn:null,port:null,path:null,query:null,fragment:null,preventInvalidHostname:b.preventInvalidHostname,duplicateQueryParameters:b.duplicateQueryParameters,escapeQuerySpace:b.escapeQuerySpace}};b.preventInvalidHostname=!1;b.duplicateQueryParameters=!1;b.escapeQuerySpace=!0;b.protocol_expression=/^[a-z][a-z0-9.+-]*$/i;b.idn_expression=/[^a-z0-9\._-]/i;b.punycode_expression=/(xn--)/i;b.ip4_expression=/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$/; b.ip6_expression=/^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$/; b.find_uri_expression=/\b((?:[a-z][\w-]+:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}\/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?\u00ab\u00bb\u201c\u201d\u2018\u2019]))/ig;b.findUri={start:/\b(?:([a-z][a-z0-9.+-]*:\/\/)|www\.)/gi,end:/[\s\r\n]|$/,trim:/[`!()\[\]{};:'".,<>?\u00ab\u00bb\u201c\u201d\u201e\u2018\u2019]+$/,parens:/(\([^\)]*\)|\[[^\]]*\]|\{[^}]*\}|<[^>]*>)/g};b.defaultPorts={http:"80",https:"443",ftp:"21", gopher:"70",ws:"80",wss:"443"};b.hostProtocols=["http","https"];b.invalid_hostname_characters=/[^a-zA-Z0-9\.\-:_]/;b.domAttributes={a:"href",blockquote:"cite",link:"href",base:"href",script:"src",form:"action",img:"src",area:"href",iframe:"src",embed:"src",source:"src",track:"src",input:"src",audio:"src",video:"src"};b.getDomAttribute=function(a){if(a&&a.nodeName){var c=a.nodeName.toLowerCase();if("input"!==c||"image"===a.type)return b.domAttributes[c]}};b.encode=v;b.decode=decodeURIComponent;b.iso8859= function(){b.encode=escape;b.decode=unescape};b.unicode=function(){b.encode=v;b.decode=decodeURIComponent};b.characters={pathname:{encode:{expression:/%(24|26|2B|2C|3B|3D|3A|40)/ig,map:{"%24":"$","%26":"&","%2B":"+","%2C":",","%3B":";","%3D":"=","%3A":":","%40":"@"}},decode:{expression:/[\/\?#]/g,map:{"/":"%2F","?":"%3F","#":"%23"}}},reserved:{encode:{expression:/%(21|23|24|26|27|28|29|2A|2B|2C|2F|3A|3B|3D|3F|40|5B|5D)/ig,map:{"%3A":":","%2F":"/","%3F":"?","%23":"#","%5B":"[","%5D":"]","%40":"@", "%21":"!","%24":"$","%26":"&","%27":"'","%28":"(","%29":")","%2A":"*","%2B":"+","%2C":",","%3B":";","%3D":"="}}},urnpath:{encode:{expression:/%(21|24|27|28|29|2A|2B|2C|3B|3D|40)/ig,map:{"%21":"!","%24":"$","%27":"'","%28":"(","%29":")","%2A":"*","%2B":"+","%2C":",","%3B":";","%3D":"=","%40":"@"}},decode:{expression:/[\/\?#:]/g,map:{"/":"%2F","?":"%3F","#":"%23",":":"%3A"}}}};b.encodeQuery=function(a,c){var e=b.encode(a+"");void 0===c&&(c=b.escapeQuerySpace);return c?e.replace(/%20/g,"+"):e};b.decodeQuery= function(a,c){a+="";void 0===c&&(c=b.escapeQuerySpace);try{return b.decode(c?a.replace(/\+/g,"%20"):a)}catch(e){return a}};var C={encode:"encode",decode:"decode"},z,F=function(a,c){return function(e){try{return b[c](e+"").replace(b.characters[a][c].expression,function(e){return b.characters[a][c].map[e]})}catch(m){return e}}};for(z in C)b[z+"PathSegment"]=F("pathname",C[z]),b[z+"UrnPathSegment"]=F("urnpath",C[z]);C=function(a,c,e){return function(m){var l=e?function(a){return b[c](b[e](a))}:b[c]; m=(m+"").split(a);for(var d=0,f=m.length;d<f;d++)m[d]=l(m[d]);return m.join(a)}};b.decodePath=C("/","decodePathSegment");b.decodeUrnPath=C(":","decodeUrnPathSegment");b.recodePath=C("/","encodePathSegment","decode");b.recodeUrnPath=C(":","encodeUrnPathSegment","decode");b.encodeReserved=F("reserved","encode");b.parse=function(a,c){c||(c={preventInvalidHostname:b.preventInvalidHostname});var e=a.indexOf("#");-1<e&&(c.fragment=a.substring(e+1)||null,a=a.substring(0,e));e=a.indexOf("?");-1<e&&(c.query= a.substring(e+1)||null,a=a.substring(0,e));"//"===a.substring(0,2)?(c.protocol=null,a=a.substring(2),a=b.parseAuthority(a,c)):(e=a.indexOf(":"),-1<e&&(c.protocol=a.substring(0,e)||null,c.protocol&&!c.protocol.match(b.protocol_expression)?c.protocol=void 0:"//"===a.substring(e+1,e+3)?(a=a.substring(e+3),a=b.parseAuthority(a,c)):(a=a.substring(e+1),c.urn=!0)));c.path=a;return c};b.parseHost=function(a,c){a||(a="");a=a.replace(/\\/g,"/");var e=a.indexOf("/");-1===e&&(e=a.length);if("["===a.charAt(0)){var m= a.indexOf("]");c.hostname=a.substring(1,m)||null;c.port=a.substring(m+2,e)||null;"/"===c.port&&(c.port=null)}else{var l=a.indexOf(":");m=a.indexOf("/");l=a.indexOf(":",l+1);-1!==l&&(-1===m||l<m)?(c.hostname=a.substring(0,e)||null,c.port=null):(m=a.substring(0,e).split(":"),c.hostname=m[0]||null,c.port=m[1]||null)}c.hostname&&"/"!==a.substring(e).charAt(0)&&(e++,a="/"+a);c.preventInvalidHostname&&b.ensureValidHostname(c.hostname,c.protocol);c.port&&b.ensureValidPort(c.port);return a.substring(e)|| "/"};b.parseAuthority=function(a,c){a=b.parseUserinfo(a,c);return b.parseHost(a,c)};b.parseUserinfo=function(a,c){var e=a.indexOf("/"),m=a.lastIndexOf("@",-1<e?e:a.length-1);-1<m&&(-1===e||m<e)?(e=a.substring(0,m).split(":"),c.username=e[0]?b.decode(e[0]):null,e.shift(),c.password=e[0]?b.decode(e.join(":")):null,a=a.substring(m+1)):(c.username=null,c.password=null);return a};b.parseQuery=function(a,c){if(!a)return{};a=a.replace(/&+/g,"&").replace(/^\?*&*|&+$/g,"");if(!a)return{};for(var e={},m=a.split("&"), l=m.length,d,f,h=0;h<l;h++)if(d=m[h].split("="),f=b.decodeQuery(d.shift(),c),d=d.length?b.decodeQuery(d.join("="),c):null,y.call(e,f)){if("string"===typeof e[f]||null===e[f])e[f]=[e[f]];e[f].push(d)}else e[f]=d;return e};b.build=function(a){var c="";a.protocol&&(c+=a.protocol+":");a.urn||!c&&!a.hostname||(c+="//");c+=b.buildAuthority(a)||"";"string"===typeof a.path&&("/"!==a.path.charAt(0)&&"string"===typeof a.hostname&&(c+="/"),c+=a.path);"string"===typeof a.query&&a.query&&(c+="?"+a.query);"string"=== typeof a.fragment&&a.fragment&&(c+="#"+a.fragment);return c};b.buildHost=function(a){var c="";if(a.hostname)c=b.ip6_expression.test(a.hostname)?c+("["+a.hostname+"]"):c+a.hostname;else return"";a.port&&(c+=":"+a.port);return c};b.buildAuthority=function(a){return b.buildUserinfo(a)+b.buildHost(a)};b.buildUserinfo=function(a){var c="";a.username&&(c+=b.encode(a.username));a.password&&(c+=":"+b.encode(a.password));c&&(c+="@");return c};b.buildQuery=function(a,c,e){var m="",l,d;for(l in a)if(y.call(a, l)&&l)if(t(a[l])){var f={};var h=0;for(d=a[l].length;h<d;h++)void 0!==a[l][h]&&void 0===f[a[l][h]+""]&&(m+="&"+b.buildQueryParameter(l,a[l][h],e),!0!==c&&(f[a[l][h]+""]=!0))}else void 0!==a[l]&&(m+="&"+b.buildQueryParameter(l,a[l],e));return m.substring(1)};b.buildQueryParameter=function(a,c,e){return b.encodeQuery(a,e)+(null!==c?"="+b.encodeQuery(c,e):"")};b.addQuery=function(a,c,e){if("object"===typeof c)for(var m in c)y.call(c,m)&&b.addQuery(a,m,c[m]);else if("string"===typeof c)void 0===a[c]? a[c]=e:("string"===typeof a[c]&&(a[c]=[a[c]]),t(e)||(e=[e]),a[c]=(a[c]||[]).concat(e));else throw new TypeError("URI.addQuery() accepts an object, string as the name parameter");};b.setQuery=function(a,c,e){if("object"===typeof c)for(var m in c)y.call(c,m)&&b.setQuery(a,m,c[m]);else if("string"===typeof c)a[c]=void 0===e?null:e;else throw new TypeError("URI.setQuery() accepts an object, string as the name parameter");};b.removeQuery=function(a,c,e){var m;if(t(c))for(e=0,m=c.length;e<m;e++)a[c[e]]= void 0;else if("RegExp"===B(c))for(m in a)c.test(m)&&(a[m]=void 0);else if("object"===typeof c)for(m in c)y.call(c,m)&&b.removeQuery(a,m,c[m]);else if("string"===typeof c)void 0!==e?"RegExp"===B(e)?!t(a[c])&&e.test(a[c])?a[c]=void 0:a[c]=n(a[c],e):a[c]!==String(e)||t(e)&&1!==e.length?t(a[c])&&(a[c]=n(a[c],e)):a[c]=void 0:a[c]=void 0;else throw new TypeError("URI.removeQuery() accepts an object, string, RegExp as the first parameter");};b.hasQuery=function(a,c,e,m){switch(B(c)){case "String":break; case "RegExp":for(var l in a)if(y.call(a,l)&&c.test(l)&&(void 0===e||b.hasQuery(a,l,e)))return!0;return!1;case "Object":for(var d in c)if(y.call(c,d)&&!b.hasQuery(a,d,c[d]))return!1;return!0;default:throw new TypeError("URI.hasQuery() accepts a string, regular expression or object as the name parameter");}switch(B(e)){case "Undefined":return c in a;case "Boolean":return a=!(t(a[c])?!a[c].length:!a[c]),e===a;case "Function":return!!e(a[c],c,a);case "Array":return t(a[c])?(m?q:w)(a[c],e):!1;case "RegExp":return t(a[c])? m?q(a[c],e):!1:!(!a[c]||!a[c].match(e));case "Number":e=String(e);case "String":return t(a[c])?m?q(a[c],e):!1:a[c]===e;default:throw new TypeError("URI.hasQuery() accepts undefined, boolean, string, number, RegExp, Function as the value parameter");}};b.joinPaths=function(){for(var a=[],c=[],e=0,m=0;m<arguments.length;m++){var l=new b(arguments[m]);a.push(l);l=l.segment();for(var d=0;d<l.length;d++)"string"===typeof l[d]&&c.push(l[d]),l[d]&&e++}if(!c.length||!e)return new b("");c=(new b("")).segment(c); ""!==a[0].path()&&"/"!==a[0].path().slice(0,1)||c.path("/"+c.path());return c.normalize()};b.commonPath=function(a,c){var e=Math.min(a.length,c.length),b;for(b=0;b<e;b++)if(a.charAt(b)!==c.charAt(b)){b--;break}if(1>b)return a.charAt(0)===c.charAt(0)&&"/"===a.charAt(0)?"/":"";if("/"!==a.charAt(b)||"/"!==c.charAt(b))b=a.substring(0,b).lastIndexOf("/");return a.substring(0,b+1)};b.withinString=function(a,c,e){e||(e={});var m=e.start||b.findUri.start,d=e.end||b.findUri.end,f=e.trim||b.findUri.trim,h= e.parens||b.findUri.parens,g=/[a-z0-9-]=["']?$/i;for(m.lastIndex=0;;){var n=m.exec(a);if(!n)break;var r=n.index;if(e.ignoreHtml){var k=a.slice(Math.max(r-3,0),r);if(k&&g.test(k))continue}var v=r+a.slice(r).search(d);k=a.slice(r,v);for(v=-1;;){var q=h.exec(k);if(!q)break;v=Math.max(v,q.index+q[0].length)}k=-1<v?k.slice(0,v)+k.slice(v).replace(f,""):k.replace(f,"");k.length<=n[0].length||e.ignore&&e.ignore.test(k)||(v=r+k.length,n=c(k,r,v,a),void 0===n?m.lastIndex=v:(n=String(n),a=a.slice(0,r)+n+a.slice(v), m.lastIndex=r+n.length))}m.lastIndex=0;return a};b.ensureValidHostname=function(a,c){var e=!!a,m=!1;c&&(m=q(b.hostProtocols,c));if(m&&!e)throw new TypeError("Hostname cannot be empty, if protocol is "+c);if(a&&a.match(b.invalid_hostname_characters)){if(!d)throw new TypeError('Hostname "'+a+'" contains characters other than [A-Z0-9.-:_] and Punycode.js is not available');if(d.toASCII(a).match(b.invalid_hostname_characters))throw new TypeError('Hostname "'+a+'" contains characters other than [A-Z0-9.-:_]'); }};b.ensureValidPort=function(a){if(a){var c=Number(a);if(!(/^[0-9]+$/.test(c)&&0<c&&65536>c))throw new TypeError('Port "'+a+'" is not a valid port');}};b.noConflict=function(a){if(a)return a={URI:this.noConflict()},u.URITemplate&&"function"===typeof u.URITemplate.noConflict&&(a.URITemplate=u.URITemplate.noConflict()),u.IPv6&&"function"===typeof u.IPv6.noConflict&&(a.IPv6=u.IPv6.noConflict()),u.SecondLevelDomains&&"function"===typeof u.SecondLevelDomains.noConflict&&(a.SecondLevelDomains=u.SecondLevelDomains.noConflict()), a;u.URI===this&&(u.URI=A);return this};f.build=function(a){if(!0===a)this._deferred_build=!0;else if(void 0===a||this._deferred_build)this._string=b.build(this._parts),this._deferred_build=!1;return this};f.clone=function(){return new b(this)};f.valueOf=f.toString=function(){return this.build(!1)._string};f.protocol=D("protocol");f.username=D("username");f.password=D("password");f.hostname=D("hostname");f.port=D("port");f.query=E("query","?");f.fragment=E("fragment","#");f.search=function(a,c){var b= this.query(a,c);return"string"===typeof b&&b.length?"?"+b:b};f.hash=function(a,c){var b=this.fragment(a,c);return"string"===typeof b&&b.length?"#"+b:b};f.pathname=function(a,c){if(void 0===a||!0===a){var e=this._parts.path||(this._parts.hostname?"/":"");return a?(this._parts.urn?b.decodeUrnPath:b.decodePath)(e):e}this._parts.path=this._parts.urn?a?b.recodeUrnPath(a):"":a?b.recodePath(a):"/";this.build(!c);return this};f.path=f.pathname;f.href=function(a,c){var e;if(void 0===a)return this.toString(); this._string="";this._parts=b._parts();var d=a instanceof b,l="object"===typeof a&&(a.hostname||a.path||a.pathname);a.nodeName&&(l=b.getDomAttribute(a),a=a[l]||"",l=!1);!d&&l&&void 0!==a.pathname&&(a=a.toString());if("string"===typeof a||a instanceof String)this._parts=b.parse(String(a),this._parts);else if(d||l)for(e in d=d?a._parts:a,d)y.call(this._parts,e)&&(this._parts[e]=d[e]);else throw new TypeError("invalid input");this.build(!c);return this};f.is=function(a){var c=!1,e=!1,d=!1,l=!1,f=!1, h=!1,n=!1,r=!this._parts.urn;this._parts.hostname&&(r=!1,e=b.ip4_expression.test(this._parts.hostname),d=b.ip6_expression.test(this._parts.hostname),c=e||d,f=(l=!c)&&g&&g.has(this._parts.hostname),h=l&&b.idn_expression.test(this._parts.hostname),n=l&&b.punycode_expression.test(this._parts.hostname));switch(a.toLowerCase()){case "relative":return r;case "absolute":return!r;case "domain":case "name":return l;case "sld":return f;case "ip":return c;case "ip4":case "ipv4":case "inet4":return e;case "ip6":case "ipv6":case "inet6":return d; case "idn":return h;case "url":return!this._parts.urn;case "urn":return!!this._parts.urn;case "punycode":return n}return null};var G=f.protocol,H=f.port,I=f.hostname;f.protocol=function(a,c){if(a&&(a=a.replace(/:(\/\/)?$/,""),!a.match(b.protocol_expression)))throw new TypeError('Protocol "'+a+"\" contains characters other than [A-Z0-9.+-] or doesn't start with [A-Z]");return G.call(this,a,c)};f.scheme=f.protocol;f.port=function(a,c){if(this._parts.urn)return void 0===a?"":this;void 0!==a&&(0===a&& (a=null),a&&(a+="",":"===a.charAt(0)&&(a=a.substring(1)),b.ensureValidPort(a)));return H.call(this,a,c)};f.hostname=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0!==a){var e={preventInvalidHostname:this._parts.preventInvalidHostname};if("/"!==b.parseHost(a,e))throw new TypeError('Hostname "'+a+'" contains characters other than [A-Z0-9.-]');a=e.hostname;this._parts.preventInvalidHostname&&b.ensureValidHostname(a,this._parts.protocol)}return I.call(this,a,c)};f.origin=function(a, c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a){var e=this.protocol();return this.authority()?(e?e+"://":"")+this.authority():""}e=b(a);this.protocol(e.protocol()).authority(e.authority()).build(!c);return this};f.host=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a)return this._parts.hostname?b.buildHost(this._parts):"";if("/"!==b.parseHost(a,this._parts))throw new TypeError('Hostname "'+a+'" contains characters other than [A-Z0-9.-]');this.build(!c);return this}; f.authority=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a)return this._parts.hostname?b.buildAuthority(this._parts):"";if("/"!==b.parseAuthority(a,this._parts))throw new TypeError('Hostname "'+a+'" contains characters other than [A-Z0-9.-]');this.build(!c);return this};f.userinfo=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a){var e=b.buildUserinfo(this._parts);return e?e.substring(0,e.length-1):e}"@"!==a[a.length-1]&&(a+="@");b.parseUserinfo(a, this._parts);this.build(!c);return this};f.resource=function(a,c){if(void 0===a)return this.path()+this.search()+this.hash();var e=b.parse(a);this._parts.path=e.path;this._parts.query=e.query;this._parts.fragment=e.fragment;this.build(!c);return this};f.subdomain=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a){if(!this._parts.hostname||this.is("IP"))return"";var e=this._parts.hostname.length-this.domain().length-1;return this._parts.hostname.substring(0,e)||""}e=this._parts.hostname.length- this.domain().length;e=this._parts.hostname.substring(0,e);e=new RegExp("^"+p(e));a&&"."!==a.charAt(a.length-1)&&(a+=".");if(-1!==a.indexOf(":"))throw new TypeError("Domains cannot contain colons");a&&b.ensureValidHostname(a,this._parts.protocol);this._parts.hostname=this._parts.hostname.replace(e,a);this.build(!c);return this};f.domain=function(a,c){if(this._parts.urn)return void 0===a?"":this;"boolean"===typeof a&&(c=a,a=void 0);if(void 0===a){if(!this._parts.hostname||this.is("IP"))return"";var e= this._parts.hostname.match(/\./g);if(e&&2>e.length)return this._parts.hostname;e=this._parts.hostname.length-this.tld(c).length-1;e=this._parts.hostname.lastIndexOf(".",e-1)+1;return this._parts.hostname.substring(e)||""}if(!a)throw new TypeError("cannot set domain empty");if(-1!==a.indexOf(":"))throw new TypeError("Domains cannot contain colons");b.ensureValidHostname(a,this._parts.protocol);!this._parts.hostname||this.is("IP")?this._parts.hostname=a:(e=new RegExp(p(this.domain())+"$"),this._parts.hostname= this._parts.hostname.replace(e,a));this.build(!c);return this};f.tld=function(a,c){if(this._parts.urn)return void 0===a?"":this;"boolean"===typeof a&&(c=a,a=void 0);if(void 0===a){if(!this._parts.hostname||this.is("IP"))return"";var b=this._parts.hostname.lastIndexOf(".");b=this._parts.hostname.substring(b+1);return!0!==c&&g&&g.list[b.toLowerCase()]?g.get(this._parts.hostname)||b:b}if(a)if(a.match(/[^a-zA-Z0-9-]/))if(g&&g.is(a))b=new RegExp(p(this.tld())+"$"),this._parts.hostname=this._parts.hostname.replace(b, a);else throw new TypeError('TLD "'+a+'" contains characters other than [A-Z0-9]');else{if(!this._parts.hostname||this.is("IP"))throw new ReferenceError("cannot set TLD on non-domain host");b=new RegExp(p(this.tld())+"$");this._parts.hostname=this._parts.hostname.replace(b,a)}else throw new TypeError("cannot set TLD empty");this.build(!c);return this};f.directory=function(a,c){if(this._parts.urn)return void 0===a?"":this;if(void 0===a||!0===a){if(!this._parts.path&&!this._parts.hostname)return""; if("/"===this._parts.path)return"/";var e=this._parts.path.length-this.filename().length-1;e=this._parts.path.substring(0,e)||(this._parts.hostname?"/":"");return a?b.decodePath(e):e}e=this._parts.path.length-this.filename().length;e=this._parts.path.substring(0,e);e=new RegExp("^"+p(e));this.is("relative")||(a||(a="/"),"/"!==a.charAt(0)&&(a="/"+a));a&&"/"!==a.charAt(a.length-1)&&(a+="/");a=b.recodePath(a);this._parts.path=this._parts.path.replace(e,a);this.build(!c);return this};f.filename=function(a, c){if(this._parts.urn)return void 0===a?"":this;if("string"!==typeof a){if(!this._parts.path||"/"===this._parts.path)return"";var e=this._parts.path.lastIndexOf("/");e=this._parts.path.substring(e+1);return a?b.decodePathSegment(e):e}e=!1;"/"===a.charAt(0)&&(a=a.substring(1));a.match(/\.?\//)&&(e=!0);var d=new RegExp(p(this.filename())+"$");a=b.recodePath(a);this._parts.path=this._parts.path.replace(d,a);e?this.normalizePath(c):this.build(!c);return this};f.suffix=function(a,c){if(this._parts.urn)return void 0=== a?"":this;if(void 0===a||!0===a){if(!this._parts.path||"/"===this._parts.path)return"";var e=this.filename(),d=e.lastIndexOf(".");if(-1===d)return"";e=e.substring(d+1);e=/^[a-z0-9%]+$/i.test(e)?e:"";return a?b.decodePathSegment(e):e}"."===a.charAt(0)&&(a=a.substring(1));if(e=this.suffix())d=a?new RegExp(p(e)+"$"):new RegExp(p("."+e)+"$");else{if(!a)return this;this._parts.path+="."+b.recodePath(a)}d&&(a=b.recodePath(a),this._parts.path=this._parts.path.replace(d,a));this.build(!c);return this};f.segment= function(a,c,b){var e=this._parts.urn?":":"/",d=this.path(),f="/"===d.substring(0,1);d=d.split(e);void 0!==a&&"number"!==typeof a&&(b=c,c=a,a=void 0);if(void 0!==a&&"number"!==typeof a)throw Error('Bad segment "'+a+'", must be 0-based integer');f&&d.shift();0>a&&(a=Math.max(d.length+a,0));if(void 0===c)return void 0===a?d:d[a];if(null===a||void 0===d[a])if(t(c)){d=[];a=0;for(var n=c.length;a<n;a++)if(c[a].length||d.length&&d[d.length-1].length)d.length&&!d[d.length-1].length&&d.pop(),d.push(h(c[a]))}else{if(c|| "string"===typeof c)c=h(c),""===d[d.length-1]?d[d.length-1]=c:d.push(c)}else c?d[a]=h(c):d.splice(a,1);f&&d.unshift("");return this.path(d.join(e),b)};f.segmentCoded=function(a,c,e){var d;"number"!==typeof a&&(e=c,c=a,a=void 0);if(void 0===c){a=this.segment(a,c,e);if(t(a)){var f=0;for(d=a.length;f<d;f++)a[f]=b.decode(a[f])}else a=void 0!==a?b.decode(a):void 0;return a}if(t(c))for(f=0,d=c.length;f<d;f++)c[f]=b.encode(c[f]);else c="string"===typeof c||c instanceof String?b.encode(c):c;return this.segment(a, c,e)};var J=f.query;f.query=function(a,c){if(!0===a)return b.parseQuery(this._parts.query,this._parts.escapeQuerySpace);if("function"===typeof a){var e=b.parseQuery(this._parts.query,this._parts.escapeQuerySpace),d=a.call(this,e);this._parts.query=b.buildQuery(d||e,this._parts.duplicateQueryParameters,this._parts.escapeQuerySpace);this.build(!c);return this}return void 0!==a&&"string"!==typeof a?(this._parts.query=b.buildQuery(a,this._parts.duplicateQueryParameters,this._parts.escapeQuerySpace),this.build(!c), this):J.call(this,a,c)};f.setQuery=function(a,c,e){var d=b.parseQuery(this._parts.query,this._parts.escapeQuerySpace);if("string"===typeof a||a instanceof String)d[a]=void 0!==c?c:null;else if("object"===typeof a)for(var f in a)y.call(a,f)&&(d[f]=a[f]);else throw new TypeError("URI.addQuery() accepts an object, string as the name parameter");this._parts.query=b.buildQuery(d,this._parts.duplicateQueryParameters,this._parts.escapeQuerySpace);"string"!==typeof a&&(e=c);this.build(!e);return this};f.addQuery= function(a,c,e){var d=b.parseQuery(this._parts.query,this._parts.escapeQuerySpace);b.addQuery(d,a,void 0===c?null:c);this._parts.query=b.buildQuery(d,this._parts.duplicateQueryParameters,this._parts.escapeQuerySpace);"string"!==typeof a&&(e=c);this.build(!e);return this};f.removeQuery=function(a,c,e){var d=b.parseQuery(this._parts.query,this._parts.escapeQuerySpace);b.removeQuery(d,a,c);this._parts.query=b.buildQuery(d,this._parts.duplicateQueryParameters,this._parts.escapeQuerySpace);"string"!== typeof a&&(e=c);this.build(!e);return this};f.hasQuery=function(a,c,e){var d=b.parseQuery(this._parts.query,this._parts.escapeQuerySpace);return b.hasQuery(d,a,c,e)};f.setSearch=f.setQuery;f.addSearch=f.addQuery;f.removeSearch=f.removeQuery;f.hasSearch=f.hasQuery;f.normalize=function(){return this._parts.urn?this.normalizeProtocol(!1).normalizePath(!1).normalizeQuery(!1).normalizeFragment(!1).build():this.normalizeProtocol(!1).normalizeHostname(!1).normalizePort(!1).normalizePath(!1).normalizeQuery(!1).normalizeFragment(!1).build()}; f.normalizeProtocol=function(a){"string"===typeof this._parts.protocol&&(this._parts.protocol=this._parts.protocol.toLowerCase(),this.build(!a));return this};f.normalizeHostname=function(a){this._parts.hostname&&(this.is("IDN")&&d?this._parts.hostname=d.toASCII(this._parts.hostname):this.is("IPv6")&&k&&(this._parts.hostname=k.best(this._parts.hostname)),this._parts.hostname=this._parts.hostname.toLowerCase(),this.build(!a));return this};f.normalizePort=function(a){"string"===typeof this._parts.protocol&& this._parts.port===b.defaultPorts[this._parts.protocol]&&(this._parts.port=null,this.build(!a));return this};f.normalizePath=function(a){var c=this._parts.path;if(!c)return this;if(this._parts.urn)return this._parts.path=b.recodeUrnPath(this._parts.path),this.build(!a),this;if("/"===this._parts.path)return this;c=b.recodePath(c);var e="";if("/"!==c.charAt(0)){var d=!0;c="/"+c}if("/.."===c.slice(-3)||"/."===c.slice(-2))c+="/";c=c.replace(/(\/(\.\/)+)|(\/\.$)/g,"/").replace(/\/{2,}/g,"/");d&&(e=c.substring(1).match(/^(\.\.\/)+/)|| "")&&(e=e[0]);for(;;){var f=c.search(/\/\.\.(\/|$)/);if(-1===f)break;else if(0===f){c=c.substring(3);continue}var h=c.substring(0,f).lastIndexOf("/");-1===h&&(h=f);c=c.substring(0,h)+c.substring(f+3)}d&&this.is("relative")&&(c=e+c.substring(1));this._parts.path=c;this.build(!a);return this};f.normalizePathname=f.normalizePath;f.normalizeQuery=function(a){"string"===typeof this._parts.query&&(this._parts.query.length?this.query(b.parseQuery(this._parts.query,this._parts.escapeQuerySpace)):this._parts.query= null,this.build(!a));return this};f.normalizeFragment=function(a){this._parts.fragment||(this._parts.fragment=null,this.build(!a));return this};f.normalizeSearch=f.normalizeQuery;f.normalizeHash=f.normalizeFragment;f.iso8859=function(){var a=b.encode,c=b.decode;b.encode=escape;b.decode=decodeURIComponent;try{this.normalize()}finally{b.encode=a,b.decode=c}return this};f.unicode=function(){var a=b.encode,c=b.decode;b.encode=v;b.decode=unescape;try{this.normalize()}finally{b.encode=a,b.decode=c}return this}; f.readable=function(){var a=this.clone();a.username("").password("").normalize();var c="";a._parts.protocol&&(c+=a._parts.protocol+"://");a._parts.hostname&&(a.is("punycode")&&d?(c+=d.toUnicode(a._parts.hostname),a._parts.port&&(c+=":"+a._parts.port)):c+=a.host());a._parts.hostname&&a._parts.path&&"/"!==a._parts.path.charAt(0)&&(c+="/");c+=a.path(!0);if(a._parts.query){for(var e="",f=0,h=a._parts.query.split("&"),n=h.length;f<n;f++){var g=(h[f]||"").split("=");e+="&"+b.decodeQuery(g[0],this._parts.escapeQuerySpace).replace(/&/g, "%26");void 0!==g[1]&&(e+="="+b.decodeQuery(g[1],this._parts.escapeQuerySpace).replace(/&/g,"%26"))}c+="?"+e.substring(1)}return c+=b.decodeQuery(a.hash(),!0)};f.absoluteTo=function(a){var c=this.clone(),e=["protocol","username","password","hostname","port"],d,f;if(this._parts.urn)throw Error("URNs do not have any generally defined hierarchical components");a instanceof b||(a=new b(a));if(c._parts.protocol)return c;c._parts.protocol=a._parts.protocol;if(this._parts.hostname)return c;for(d=0;f=e[d];d++)c._parts[f]= a._parts[f];c._parts.path?(".."===c._parts.path.substring(-2)&&(c._parts.path+="/"),"/"!==c.path().charAt(0)&&(e=(e=a.directory())?e:0===a.path().indexOf("/")?"/":"",c._parts.path=(e?e+"/":"")+c._parts.path,c.normalizePath())):(c._parts.path=a._parts.path,c._parts.query||(c._parts.query=a._parts.query));c.build();return c};f.relativeTo=function(a){var c=this.clone().normalize();if(c._parts.urn)throw Error("URNs do not have any generally defined hierarchical components");a=(new b(a)).normalize();var e= c._parts;var d=a._parts;var f=c.path();a=a.path();if("/"!==f.charAt(0))throw Error("URI is already relative");if("/"!==a.charAt(0))throw Error("Cannot calculate a URI relative to another relative URI");e.protocol===d.protocol&&(e.protocol=null);if(e.username===d.username&&e.password===d.password&&null===e.protocol&&null===e.username&&null===e.password&&e.hostname===d.hostname&&e.port===d.port)e.hostname=null,e.port=null;else return c.build();if(f===a)return e.path="",c.build();f=b.commonPath(f,a); if(!f)return c.build();d=d.path.substring(f.length).replace(/[^\/]*$/,"").replace(/.*?\//g,"../");e.path=d+e.path.substring(f.length)||"./";return c.build()};f.equals=function(a){var c=this.clone(),e=new b(a);a={};var d;c.normalize();e.normalize();if(c.toString()===e.toString())return!0;var f=c.query();var h=e.query();c.query("");e.query("");if(c.toString()!==e.toString()||f.length!==h.length)return!1;c=b.parseQuery(f,this._parts.escapeQuerySpace);h=b.parseQuery(h,this._parts.escapeQuerySpace);for(d in c)if(y.call(c, d)){if(!t(c[d])){if(c[d]!==h[d])return!1}else if(!w(c[d],h[d]))return!1;a[d]=!0}for(d in h)if(y.call(h,d)&&!a[d])return!1;return!0};f.preventInvalidHostname=function(a){this._parts.preventInvalidHostname=!!a;return this};f.duplicateQueryParameters=function(a){this._parts.duplicateQueryParameters=!!a;return this};f.escapeQuerySpace=function(a){this._parts.escapeQuerySpace=!!a;return this};return b}); (function(d,k){"object"===typeof module&&module.exports?module.exports=k(require("./URI")):"function"===typeof define&&define.amd?define(["./URI"],k):d.URITemplate=k(d.URI,d)})(this,function(d,k){function g(b){if(g._cache[b])return g._cache[b];if(!(this instanceof g))return new g(b);this.expression=b;g._cache[b]=this;return this}function u(b){this.data=b;this.cache={}}var b=k&&k.URITemplate,p=Object.prototype.hasOwnProperty,B=g.prototype,t={"":{prefix:"",separator:",",named:!1,empty_name_separator:!1, encode:"encode"},"+":{prefix:"",separator:",",named:!1,empty_name_separator:!1,encode:"encodeReserved"},"#":{prefix:"#",separator:",",named:!1,empty_name_separator:!1,encode:"encodeReserved"},".":{prefix:".",separator:".",named:!1,empty_name_separator:!1,encode:"encode"},"/":{prefix:"/",separator:"/",named:!1,empty_name_separator:!1,encode:"encode"},";":{prefix:";",separator:";",named:!0,empty_name_separator:!1,encode:"encode"},"?":{prefix:"?",separator:"&",named:!0,empty_name_separator:!0,encode:"encode"}, "&":{prefix:"&",separator:"&",named:!0,empty_name_separator:!0,encode:"encode"}};g._cache={};g.EXPRESSION_PATTERN=/\{([^a-zA-Z0-9%_]?)([^\}]+)(\}|$)/g;g.VARIABLE_PATTERN=/^([^*:.](?:\.?[^*:.])*)((\*)|:(\d+))?$/;g.VARIABLE_NAME_PATTERN=/[^a-zA-Z0-9%_.]/;g.LITERAL_PATTERN=/[<>{}"`^| \\]/;g.expand=function(b,d,k){var h=t[b.operator],n=h.named?"Named":"Unnamed";b=b.variables;var v=[],q,p;for(p=0;q=b[p];p++){var w=d.get(q.name);if(0===w.type&&k&&k.strict)throw Error('Missing expansion value for variable "'+ q.name+'"');if(w.val.length){if(1<w.type&&q.maxlength)throw Error('Invalid expression: Prefix modifier not applicable to variable "'+q.name+'"');v.push(g["expand"+n](w,h,q.explode,q.explode&&h.separator||",",q.maxlength,q.name))}else w.type&&v.push("")}return v.length?h.prefix+v.join(h.separator):""};g.expandNamed=function(b,g,k,h,r,v){var n="",q=g.encode;g=g.empty_name_separator;var p=!b[q].length,f=2===b.type?"":d[q](v),t;var w=0;for(t=b.val.length;w<t;w++){if(r){var u=d[q](b.val[w][1].substring(0, r));2===b.type&&(f=d[q](b.val[w][0].substring(0,r)))}else p?(u=d[q](b.val[w][1]),2===b.type?(f=d[q](b.val[w][0]),b[q].push([f,u])):b[q].push([void 0,u])):(u=b[q][w][1],2===b.type&&(f=b[q][w][0]));n&&(n+=h);k?n+=f+(g||u?"=":"")+u:(w||(n+=d[q](v)+(g||u?"=":"")),2===b.type&&(n+=f+","),n+=u)}return n};g.expandUnnamed=function(b,g,k,h,r){var n="",q=g.encode;g=g.empty_name_separator;var p=!b[q].length,w;var f=0;for(w=b.val.length;f<w;f++){if(r)var t=d[q](b.val[f][1].substring(0,r));else p?(t=d[q](b.val[f][1]), b[q].push([2===b.type?d[q](b.val[f][0]):void 0,t])):t=b[q][f][1];n&&(n+=h);if(2===b.type){var u=r?d[q](b.val[f][0].substring(0,r)):b[q][f][0];n+=u;n=k?n+(g||t?"=":""):n+","}n+=t}return n};g.noConflict=function(){k.URITemplate===g&&(k.URITemplate=b);return g};B.expand=function(b,d){var k="";this.parts&&this.parts.length||this.parse();b instanceof u||(b=new u(b));for(var h=0,r=this.parts.length;h<r;h++)k+="string"===typeof this.parts[h]?this.parts[h]:g.expand(this.parts[h],b,d);return k};B.parse=function(){var b= this.expression,d=g.EXPRESSION_PATTERN,k=g.VARIABLE_PATTERN,h=g.VARIABLE_NAME_PATTERN,r=g.LITERAL_PATTERN,v=[],p=0,u=function(b){if(b.match(r))throw Error('Invalid Literal "'+b+'"');return b};for(d.lastIndex=0;;){var A=d.exec(b);if(null===A){v.push(u(b.substring(p)));break}else v.push(u(b.substring(p,A.index))),p=A.index+A[0].length;if(!t[A[1]])throw Error('Unknown Operator "'+A[1]+'" in "'+A[0]+'"');if(!A[3])throw Error('Unclosed Expression "'+A[0]+'"');var f=A[2].split(",");for(var y=0,B=f.length;y< B;y++){var z=f[y].match(k);if(null===z)throw Error('Invalid Variable "'+f[y]+'" in "'+A[0]+'"');if(z[1].match(h))throw Error('Invalid Variable Name "'+z[1]+'" in "'+A[0]+'"');f[y]={name:z[1],explode:!!z[3],maxlength:z[4]&&parseInt(z[4],10)}}if(!f.length)throw Error('Expression Missing Variable(s) "'+A[0]+'"');v.push({expression:A[0],operator:A[1],variables:f})}v.length||v.push(u(b));this.parts=v;return this};u.prototype.get=function(b){var d=this.data,g={type:0,val:[],encode:[],encodeReserved:[]}; if(void 0!==this.cache[b])return this.cache[b];this.cache[b]=g;d="[object Function]"===String(Object.prototype.toString.call(d))?d(b):"[object Function]"===String(Object.prototype.toString.call(d[b]))?d[b](b):d[b];if(void 0!==d&&null!==d)if("[object Array]"===String(Object.prototype.toString.call(d))){var h=0;for(b=d.length;h<b;h++)void 0!==d[h]&&null!==d[h]&&g.val.push([void 0,String(d[h])]);g.val.length&&(g.type=3)}else if("[object Object]"===String(Object.prototype.toString.call(d))){for(h in d)p.call(d, h)&&void 0!==d[h]&&null!==d[h]&&g.val.push([h,String(d[h])]);g.val.length&&(g.type=2)}else g.type=1,g.val.push([void 0,String(d)]);return g};d.expand=function(b,k){var n=(new g(b)).expand(k);return new d(n)};return g}); (function(d,k){"object"===typeof module&&module.exports?module.exports=k(require("jquery"),require("./URI")):"function"===typeof define&&define.amd?define(["jquery","./URI"],k):k(d.jQuery,d.URI)})(this,function(d,k){function g(b){return b.replace(/([.*+?^=!:${}()|[\]\/\\])/g,"\\$1")}function u(b){var d=b.nodeName.toLowerCase();if("input"!==d||"image"===b.type)return k.domAttributes[d]}function b(b){return{get:function(h){return d(h).uri()[b]()},set:function(h,g){d(h).uri()[b](g);return g}}}function p(b, g){if(!u(b)||!g)return!1;var h=g.match(q);if(!h||!h[5]&&":"!==h[2]&&!t[h[2]])return!1;var k=d(b).uri();if(h[5])return k.is(h[5]);if(":"===h[2]){var r=h[1].toLowerCase()+":";return t[r]?t[r](k,h[4]):!1}r=h[1].toLowerCase();return B[r]?t[h[2]](k[r](),h[4],r):!1}var B={},t={"=":function(b,d){return b===d},"^=":function(b,d){return!!(b+"").match(new RegExp("^"+g(d),"i"))},"$=":function(b,d){return!!(b+"").match(new RegExp(g(d)+"$","i"))},"*=":function(b,d,k){"directory"===k&&(b+="/");return!!(b+"").match(new RegExp(g(d), "i"))},"equals:":function(b,d){return b.equals(d)},"is:":function(b,d){return b.is(d)}};d.each("origin authority directory domain filename fragment hash host hostname href password path pathname port protocol query resource scheme search subdomain suffix tld username".split(" "),function(h,g){B[g]=!0;d.attrHooks["uri:"+g]=b(g)});var n=function(b,g){return d(b).uri().href(g).toString()};d.each(["src","href","action","uri","cite"],function(b,g){d.attrHooks[g]={set:n}});d.attrHooks.uri.get=function(b){return d(b).uri()}; d.fn.uri=function(b){var d=this.first(),g=d.get(0),h=u(g);if(!h)throw Error('Element "'+g.nodeName+'" does not have either property: href, src, action, cite');if(void 0!==b){var n=d.data("uri");if(n)return n.href(b);b instanceof k||(b=k(b||""))}else{if(b=d.data("uri"))return b;b=k(d.attr(h)||"")}b._dom_element=g;b._dom_attribute=h;b.normalize();d.data("uri",b);return b};k.prototype.build=function(b){if(this._dom_element)this._string=k.build(this._parts),this._deferred_build=!1,this._dom_element.setAttribute(this._dom_attribute, this._string),this._dom_element[this._dom_attribute]=this._string;else if(!0===b)this._deferred_build=!0;else if(void 0===b||this._deferred_build)this._string=k.build(this._parts),this._deferred_build=!1;return this};var q=/^([a-zA-Z]+)\s*([\^\$*]?=|:)\s*(['"]?)(.+)\3|^\s*([a-zA-Z0-9]+)\s*$/;var w=d.expr.createPseudo?d.expr.createPseudo(function(b){return function(d){return p(d,b)}}):function(b,d,g){return p(b,g[3])};d.expr[":"].uri=w;return d});
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/URI.js
URI.js
THREE.OBJLoader = ( function () { // o object_name | g group_name var object_pattern = /^[og]\s*(.+)?/; // mtllib file_reference var material_library_pattern = /^mtllib /; // usemtl material_name var material_use_pattern = /^usemtl /; function ParserState() { var state = { objects: [], object: {}, vertices: [], normals: [], colors: [], uvs: [], materialLibraries: [], startObject: function ( name, fromDeclaration ) { // If the current object (initial from reset) is not from a g/o declaration in the parsed // file. We need to use it for the first parsed g/o to keep things in sync. if ( this.object && this.object.fromDeclaration === false ) { this.object.name = name; this.object.fromDeclaration = ( fromDeclaration !== false ); return; } var previousMaterial = ( this.object && typeof this.object.currentMaterial === 'function' ? this.object.currentMaterial() : undefined ); if ( this.object && typeof this.object._finalize === 'function' ) { this.object._finalize( true ); } this.object = { name: name || '', fromDeclaration: ( fromDeclaration !== false ), geometry: { vertices: [], normals: [], colors: [], uvs: [] }, materials: [], smooth: true, startMaterial: function ( name, libraries ) { var previous = this._finalize( false ); // New usemtl declaration overwrites an inherited material, except if faces were declared // after the material, then it must be preserved for proper MultiMaterial continuation. if ( previous && ( previous.inherited || previous.groupCount <= 0 ) ) { this.materials.splice( previous.index, 1 ); } var material = { index: this.materials.length, name: name || '', mtllib: ( Array.isArray( libraries ) && libraries.length > 0 ? libraries[ libraries.length - 1 ] : '' ), smooth: ( previous !== undefined ? previous.smooth : this.smooth ), groupStart: ( previous !== undefined ? previous.groupEnd : 0 ), groupEnd: - 1, groupCount: - 1, inherited: false, clone: function ( index ) { var cloned = { index: ( typeof index === 'number' ? index : this.index ), name: this.name, mtllib: this.mtllib, smooth: this.smooth, groupStart: 0, groupEnd: - 1, groupCount: - 1, inherited: false }; cloned.clone = this.clone.bind( cloned ); return cloned; } }; this.materials.push( material ); return material; }, currentMaterial: function () { if ( this.materials.length > 0 ) { return this.materials[ this.materials.length - 1 ]; } return undefined; }, _finalize: function ( end ) { var lastMultiMaterial = this.currentMaterial(); if ( lastMultiMaterial && lastMultiMaterial.groupEnd === - 1 ) { lastMultiMaterial.groupEnd = this.geometry.vertices.length / 3; lastMultiMaterial.groupCount = lastMultiMaterial.groupEnd - lastMultiMaterial.groupStart; lastMultiMaterial.inherited = false; } // Ignore objects tail materials if no face declarations followed them before a new o/g started. if ( end && this.materials.length > 1 ) { for ( var mi = this.materials.length - 1; mi >= 0; mi -- ) { if ( this.materials[ mi ].groupCount <= 0 ) { this.materials.splice( mi, 1 ); } } } // Guarantee at least one empty material, this makes the creation later more straight forward. if ( end && this.materials.length === 0 ) { this.materials.push( { name: '', smooth: this.smooth } ); } return lastMultiMaterial; } }; // Inherit previous objects material. // Spec tells us that a declared material must be set to all objects until a new material is declared. // If a usemtl declaration is encountered while this new object is being parsed, it will // overwrite the inherited material. Exception being that there was already face declarations // to the inherited material, then it will be preserved for proper MultiMaterial continuation. if ( previousMaterial && previousMaterial.name && typeof previousMaterial.clone === 'function' ) { var declared = previousMaterial.clone( 0 ); declared.inherited = true; this.object.materials.push( declared ); } this.objects.push( this.object ); }, finalize: function () { if ( this.object && typeof this.object._finalize === 'function' ) { this.object._finalize( true ); } }, parseVertexIndex: function ( value, len ) { var index = parseInt( value, 10 ); return ( index >= 0 ? index - 1 : index + len / 3 ) * 3; }, parseNormalIndex: function ( value, len ) { var index = parseInt( value, 10 ); return ( index >= 0 ? index - 1 : index + len / 3 ) * 3; }, parseUVIndex: function ( value, len ) { var index = parseInt( value, 10 ); return ( index >= 0 ? index - 1 : index + len / 2 ) * 2; }, addVertex: function ( a, b, c ) { var src = this.vertices; var dst = this.object.geometry.vertices; dst.push( src[ a + 0 ], src[ a + 1 ], src[ a + 2 ] ); dst.push( src[ b + 0 ], src[ b + 1 ], src[ b + 2 ] ); dst.push( src[ c + 0 ], src[ c + 1 ], src[ c + 2 ] ); }, addVertexLine: function ( a ) { var src = this.vertices; var dst = this.object.geometry.vertices; dst.push( src[ a + 0 ], src[ a + 1 ], src[ a + 2 ] ); }, addNormal: function ( a, b, c ) { var src = this.normals; var dst = this.object.geometry.normals; dst.push( src[ a + 0 ], src[ a + 1 ], src[ a + 2 ] ); dst.push( src[ b + 0 ], src[ b + 1 ], src[ b + 2 ] ); dst.push( src[ c + 0 ], src[ c + 1 ], src[ c + 2 ] ); }, addColor: function ( a, b, c ) { var src = this.colors; var dst = this.object.geometry.colors; dst.push( src[ a + 0 ], src[ a + 1 ], src[ a + 2 ] ); dst.push( src[ b + 0 ], src[ b + 1 ], src[ b + 2 ] ); dst.push( src[ c + 0 ], src[ c + 1 ], src[ c + 2 ] ); }, addUV: function ( a, b, c ) { var src = this.uvs; var dst = this.object.geometry.uvs; dst.push( src[ a + 0 ], src[ a + 1 ] ); dst.push( src[ b + 0 ], src[ b + 1 ] ); dst.push( src[ c + 0 ], src[ c + 1 ] ); }, addUVLine: function ( a ) { var src = this.uvs; var dst = this.object.geometry.uvs; dst.push( src[ a + 0 ], src[ a + 1 ] ); }, addFace: function ( a, b, c, ua, ub, uc, na, nb, nc ) { var vLen = this.vertices.length; var ia = this.parseVertexIndex( a, vLen ); var ib = this.parseVertexIndex( b, vLen ); var ic = this.parseVertexIndex( c, vLen ); this.addVertex( ia, ib, ic ); if ( ua !== undefined ) { var uvLen = this.uvs.length; ia = this.parseUVIndex( ua, uvLen ); ib = this.parseUVIndex( ub, uvLen ); ic = this.parseUVIndex( uc, uvLen ); this.addUV( ia, ib, ic ); } if ( na !== undefined ) { // Normals are many times the same. If so, skip function call and parseInt. var nLen = this.normals.length; ia = this.parseNormalIndex( na, nLen ); ib = na === nb ? ia : this.parseNormalIndex( nb, nLen ); ic = na === nc ? ia : this.parseNormalIndex( nc, nLen ); this.addNormal( ia, ib, ic ); } if ( this.colors.length > 0 ) { this.addColor( ia, ib, ic ); } }, addLineGeometry: function ( vertices, uvs ) { this.object.geometry.type = 'Line'; var vLen = this.vertices.length; var uvLen = this.uvs.length; for ( var vi = 0, l = vertices.length; vi < l; vi ++ ) { this.addVertexLine( this.parseVertexIndex( vertices[ vi ], vLen ) ); } for ( var uvi = 0, l = uvs.length; uvi < l; uvi ++ ) { this.addUVLine( this.parseUVIndex( uvs[ uvi ], uvLen ) ); } } }; state.startObject( '', false ); return state; } // function OBJLoader( manager ) { this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; this.materials = null; } OBJLoader.prototype = { constructor: OBJLoader, load: function ( url, onLoad, onProgress, onError ) { var scope = this; var loader = new THREE.FileLoader( scope.manager ); loader.setPath( this.path ); loader.load( url, function ( text ) { onLoad( scope.parse( text ) ); }, onProgress, onError ); }, setPath: function ( value ) { this.path = value; }, setMaterials: function ( materials ) { this.materials = materials; return this; }, parse: function ( text ) { console.time( 'OBJLoader' ); var state = new ParserState(); if ( text.indexOf( '\r\n' ) !== - 1 ) { // This is faster than String.split with regex that splits on both text = text.replace( /\r\n/g, '\n' ); } if ( text.indexOf( '\\\n' ) !== - 1 ) { // join lines separated by a line continuation character (\) text = text.replace( /\\\n/g, '' ); } var lines = text.split( '\n' ); var line = '', lineFirstChar = ''; var lineLength = 0; var result = []; // Faster to just trim left side of the line. Use if available. var trimLeft = ( typeof ''.trimLeft === 'function' ); for ( var i = 0, l = lines.length; i < l; i ++ ) { line = lines[ i ]; line = trimLeft ? line.trimLeft() : line.trim(); lineLength = line.length; if ( lineLength === 0 ) continue; lineFirstChar = line.charAt( 0 ); // @todo invoke passed in handler if any if ( lineFirstChar === '#' ) continue; if ( lineFirstChar === 'v' ) { var data = line.split( /\s+/ ); switch ( data[ 0 ] ) { case 'v': state.vertices.push( parseFloat( data[ 1 ] ), parseFloat( data[ 2 ] ), parseFloat( data[ 3 ] ) ); if ( data.length === 8 ) { state.colors.push( parseFloat( data[ 4 ] ), parseFloat( data[ 5 ] ), parseFloat( data[ 6 ] ) ); } break; case 'vn': state.normals.push( parseFloat( data[ 1 ] ), parseFloat( data[ 2 ] ), parseFloat( data[ 3 ] ) ); break; case 'vt': state.uvs.push( parseFloat( data[ 1 ] ), parseFloat( data[ 2 ] ) ); break; } } else if ( lineFirstChar === 'f' ) { var lineData = line.substr( 1 ).trim(); var vertexData = lineData.split( /\s+/ ); var faceVertices = []; // Parse the face vertex data into an easy to work with format for ( var j = 0, jl = vertexData.length; j < jl; j ++ ) { var vertex = vertexData[ j ]; if ( vertex.length > 0 ) { var vertexParts = vertex.split( '/' ); faceVertices.push( vertexParts ); } } // Draw an edge between the first vertex and all subsequent vertices to form an n-gon var v1 = faceVertices[ 0 ]; for ( var j = 1, jl = faceVertices.length - 1; j < jl; j ++ ) { var v2 = faceVertices[ j ]; var v3 = faceVertices[ j + 1 ]; state.addFace( v1[ 0 ], v2[ 0 ], v3[ 0 ], v1[ 1 ], v2[ 1 ], v3[ 1 ], v1[ 2 ], v2[ 2 ], v3[ 2 ] ); } } else if ( lineFirstChar === 'l' ) { var lineParts = line.substring( 1 ).trim().split( " " ); var lineVertices = [], lineUVs = []; if ( line.indexOf( "/" ) === - 1 ) { lineVertices = lineParts; } else { for ( var li = 0, llen = lineParts.length; li < llen; li ++ ) { var parts = lineParts[ li ].split( "/" ); if ( parts[ 0 ] !== "" ) lineVertices.push( parts[ 0 ] ); if ( parts[ 1 ] !== "" ) lineUVs.push( parts[ 1 ] ); } } state.addLineGeometry( lineVertices, lineUVs ); } else if ( ( result = object_pattern.exec( line ) ) !== null ) { // o object_name // or // g group_name // WORKAROUND: https://bugs.chromium.org/p/v8/issues/detail?id=2869 // var name = result[ 0 ].substr( 1 ).trim(); var name = ( " " + result[ 0 ].substr( 1 ).trim() ).substr( 1 ); state.startObject( name ); } else if ( material_use_pattern.test( line ) ) { // material state.object.startMaterial( line.substring( 7 ).trim(), state.materialLibraries ); } else if ( material_library_pattern.test( line ) ) { // mtl file state.materialLibraries.push( line.substring( 7 ).trim() ); } else if ( lineFirstChar === 's' ) { result = line.split( ' ' ); // smooth shading // @todo Handle files that have varying smooth values for a set of faces inside one geometry, // but does not define a usemtl for each face set. // This should be detected and a dummy material created (later MultiMaterial and geometry groups). // This requires some care to not create extra material on each smooth value for "normal" obj files. // where explicit usemtl defines geometry groups. // Example asset: examples/models/obj/cerberus/Cerberus.obj /* * http://paulbourke.net/dataformats/obj/ * or * http://www.cs.utah.edu/~boulos/cs3505/obj_spec.pdf * * From chapter "Grouping" Syntax explanation "s group_number": * "group_number is the smoothing group number. To turn off smoothing groups, use a value of 0 or off. * Polygonal elements use group numbers to put elements in different smoothing groups. For free-form * surfaces, smoothing groups are either turned on or off; there is no difference between values greater * than 0." */ if ( result.length > 1 ) { var value = result[ 1 ].trim().toLowerCase(); state.object.smooth = ( value !== '0' && value !== 'off' ); } else { // ZBrush can produce "s" lines #11707 state.object.smooth = true; } var material = state.object.currentMaterial(); if ( material ) material.smooth = state.object.smooth; } else { // Handle null terminated files without exception if ( line === '\0' ) continue; throw new Error( 'THREE.OBJLoader: Unexpected line: "' + line + '"' ); } } state.finalize(); var container = new THREE.Group(); container.materialLibraries = [].concat( state.materialLibraries ); for ( var i = 0, l = state.objects.length; i < l; i ++ ) { var object = state.objects[ i ]; var geometry = object.geometry; var materials = object.materials; var isLine = ( geometry.type === 'Line' ); // Skip o/g line declarations that did not follow with any faces if ( geometry.vertices.length === 0 ) continue; var buffergeometry = new THREE.BufferGeometry(); buffergeometry.addAttribute( 'position', new THREE.Float32BufferAttribute( geometry.vertices, 3 ) ); if ( geometry.normals.length > 0 ) { buffergeometry.addAttribute( 'normal', new THREE.Float32BufferAttribute( geometry.normals, 3 ) ); } else { buffergeometry.computeVertexNormals(); } if ( geometry.colors.length > 0 ) { buffergeometry.addAttribute( 'color', new THREE.Float32BufferAttribute( geometry.colors, 3 ) ); } if ( geometry.uvs.length > 0 ) { buffergeometry.addAttribute( 'uv', new THREE.Float32BufferAttribute( geometry.uvs, 2 ) ); } // Create materials var createdMaterials = []; for ( var mi = 0, miLen = materials.length; mi < miLen; mi ++ ) { var sourceMaterial = materials[ mi ]; var material = undefined; if ( this.materials !== null ) { material = this.materials.create( sourceMaterial.name ); // mtl etc. loaders probably can't create line materials correctly, copy properties to a line material. if ( isLine && material && ! ( material instanceof THREE.LineBasicMaterial ) ) { var materialLine = new THREE.LineBasicMaterial(); materialLine.copy( material ); material = materialLine; } } if ( ! material ) { material = ( ! isLine ? new THREE.MeshPhongMaterial() : new THREE.LineBasicMaterial() ); material.name = sourceMaterial.name; } material.flatShading = sourceMaterial.smooth ? false : true; createdMaterials.push( material ); } // Create mesh var mesh; if ( createdMaterials.length > 1 ) { for ( var mi = 0, miLen = materials.length; mi < miLen; mi ++ ) { var sourceMaterial = materials[ mi ]; buffergeometry.addGroup( sourceMaterial.groupStart, sourceMaterial.groupCount, mi ); } mesh = ( ! isLine ? new THREE.Mesh( buffergeometry, createdMaterials ) : new THREE.LineSegments( buffergeometry, createdMaterials ) ); } else { mesh = ( ! isLine ? new THREE.Mesh( buffergeometry, createdMaterials[ 0 ] ) : new THREE.LineSegments( buffergeometry, createdMaterials[ 0 ] ) ); } mesh.name = object.name; container.add( mesh ); } console.timeEnd( 'OBJLoader' ); return container; } }; return OBJLoader; } )();
AFQ-Browser
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/OBJLoader.js
OBJLoader.js
``` #example process how to use AFR functions #import necessary nodules and functions import pandas as pd import numpy as np from utils.checkdata import checkdata from utils.corsel import corsel from utils.opt_size import opt_size from utils.reg_test import reg_test from utils.dec_plot import dec_plot from utils.vif_reg import vif_reg from utils.pt_multi import pt_multi from utils.pt_one import pt_one from utils.regsel_f import regsel_f from utils.check_betas import check_betas from utils.aic_score import aic_score from utils.bic_score import bic_score from utils.sbic_score import sbic_score from utils.adjr2_score import adjr2_score #get information about the function by help() help(checkdata) #upload dataset(-s) finrat = pd.read_csv('./load/finratKZ.csv') macro = pd.read_csv('./load/macroKZ.csv') macro.head() #preliminary check data for missing values, numeric format and outliers for further work checkdata(macro) #choose potential regressors to check them for multicollinearity data = macro[['poil', 'cpi', 'usdkzt', 'GDP_DEF', 'exp', 'GDD_Agr_R', 'rurkzt', 'tonia_rate', 'cred_portfolio', 'fed_fund_rate']] #check for multicollinearity by setting threshold and numeric/boolean format corsel(data) #import module to build OLS regression from statsmodels.formula.api import ols import statsmodels.api as sm #build OLS regression model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() #plot the decomposition of the regressors to the regression dec_plot(model, macro) #check for multicollinearity by VIF coefficient vif_reg(model) #check whether there are enough observations for your regression opt_size(model) #check model for violation/compliance with Gauss-Markov assumptions reg_test(model) #AIC metrics for the model print(aic_score(model)) #BIC metrics for the model print(bic_score(model)) #SBIC metrics for the model print(sbic_score(model)) #Adj R2 metrics for the model print(adjr2_score(model)) #select best model based on stepwise forward regression from your chosen regression X = macro[['poil', 'cpi', 'usdkzt', 'GDP_DEF', 'exp', 'GDD_Agr_R', 'rurkzt', 'tonia_rate', 'cred_portfolio', 'fed_fund_rate']] y = macro['real_gdp'] regsel_f(X, y, macro, scoring = 'aic') #all possible regression subsets #customize the width of the columns pd.set_option('display.max_columns', None) pd.set_option('max_colwidth', None) check_betas(X, y, criterion = 'bic', intercept = False) #to check default probability of low default credit portfolio for 1 year portfolio_distribution = np.array([10,20,30,40,10,20]) num_defaults = np.array([1, 2, 1, 0, 3, 2]) conf_level = 0.99 num_years = 3 pt_one(portfolio_distribution, num_defaults, conf_level) #to check default probability of low default credit portfolio for n years pt_multi(portfolio_distribution, num_defaults, conf_level, num_years) ```
AFR
/AFR-0.2.3.tar.gz/AFR-0.2.3/example_process.ipynb
example_process.ipynb
# Package ‘AFR’ Statistical toolkit aimed to help statisticians, data analysts, data scientists, bankers and other professionals to analyze financial data. It was designed by the team of the Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market (ARDFM). AFR toolkit offers functions to upload, preliminary check, analyze data and regressions and interpret results. ##### Authors: Timur Abilkassymov, the Advisor to the Chairperson of the ARDFM. Alua Makhmetova, chief specialist of the Department of Banking Analytics and Stress Testing of the ARDFM. ##### Contact: Alua Makhmetova [email protected] [email protected] ##### Copyright: The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market. The [link](https://www.gov.kz/memleket/entities/ardfm?lang=en) ## Datasets AFR has built-in datasets named _macroKZ_ and _finratKZ_ that were gathered by the ARDFM team. More details below. ## finratKZ dataset Dataset *finratKZ* was gathered during the supervisory procedure of the banking sector of Kazakhstan. ARDFM team analyzed financial statements of the corporate borrowers and calculated 29 financial ratios. The data was collected during regular supervisory asset quality review(AQR) procedure. During the AQR corporate borrowers were classified as default and standard (IFRS stage 1). **Dataset contains following data** : - **Default** - Dummy variable where 0 - standard(IFRS stage 1) borrower, 1 - default borrower - **Rev_gr** - Revenue growth rate - **EBITDA_gr** - EBITDA growth rate - **Cap_gr** - Capital growth rate - **CR** - Current ratio - **QR** - Quick ratio - **Cash_ratio** - Cash ratio - **WC_cycle** - Working capital cycle - **DTA** - Debt-to-assets - **DTE** -Debt-to-equity - **LR** - Leverage ratio (Total assets/Total equity) - **EBITDA_debt** - EBITDA-to-debt - **IC** - Interest coverage (Income statement) - **CTI** - Cash-to-income - **IC_CF** - Interest coverage (Cash flow statement) - **DCR** - Debt coverage ratio (Cash flow from operations/Total debt) - **CFR** - Cash flow to revenue - **CRA** - Cash return on assets (Cash flow from operations/Total assets) - **CRE** - Cash return on equity (Cash flow from operations/Total equity) - **ROA** - Return on assets - **ROE** - Return on equity - **NPM** - Net profit margin - **GPM** - Gross profit margin - **OPM** - Operating profit margin - **RecT** - Receivables turnover - **InvT** - Inventory turnover - **PayT** - Payables turnover - **TA** - Total assets turnover - **FA** - Fixed assets turnover - **WC** - Working capital turnover **Example** : .. code-block:: python import AFR finrat = load_finratKZ() **Reference** : The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market. ## macroKZ dataset The dataset was gathered by the ARDFM based on Kazakhstan' official and public data from the [Bureau of National Statistics](https://stat.gov.kz/). The dataset contains 50 historic macroeconomic and 10 hypothetical financial data over 52 quarters of 2010-2022 period. The *macroKZ* dataset will be updated periodically as the official statistical information is released. **Dataset contains following data** : - **real_gdp** Real GDP, in 2005 base-year prices (bln KZT) - **GDD_Agr_R** Real gross value added Agriculture, in 2005 base-year prices (bln KZT) - **GDD_Min_R** Real gross value added Mining, in 2005 base-year prices (bln KZT) - **GDD_Man_R** Real gross value added Manufacture, in 2005 base-year prices (bln KZT) - **GDD_Elc_R** Real gross value added Electricity, in 2005 base-year prices (bln KZT) - **GDD_Con_R** Real gross value added Construction, in 2005 base-year prices (bln KZT) - **GDD_Trd_R** Real gross value added Trade, in 2005 base-year prices (bln KZT) - **GDD_Trn_R** Real gross value added Transportation, in 2005 base-year prices (bln KZT) - **GDD_Inf_R** Real gross value added Information, in 2005 base-year prices (bln KZT) - **GDD_Est_R** Real gross value added for Real estate, in 2005 base-year prices (bln KZT) - **GDD_R** Real gross value added, in 2005 base-year prices (bln KZT) - **GDP_DEF** GDP deflator, relative to 2005 base-year prices (bln KZT) - **Rincpop_q** Real population average monthly income, in 2005 base-year prices (ths KZT) - **Rexppop_q** Real population average monthly expenses, in 2005 base-year prices (ths KZT) - **Rwage_q** Real population average monthly wage, in 2005 base-year prices (ths KZT) - **imp** Import (mln KZT) - **exp** Export (mln kZT) - **cpi** Inflation, relative to 2005 base-year prices - **realest_resed_prim** Real price for estate in primary market (Q\Q) - **realest_resed_sec** Real price for estate in secondary market (Q\Q) - **realest_comm** Real price for commercial estate (Q\Q) - **index_stock_weighted** Change in stock value for traded companies (Q\Q) - **ntrade_Agr** Change in stock value for non-traded companies Agriculture (Q\Q) - **ntrade_Min** Change in stock value for non-traded companies Mining (Q\Q) - **ntrade_Man** Change in stock value for non-traded companies Manufacture (Q\Q) - **ntrade_Elc** Change in stock value for non-traded companies Electricity (Q\Q) - **ntrade_Con** Change in stock value for non-traded companies Construction (Q\Q) - **ntrade_Trd** Change in stock value for non-traded companies Trade (Q\Q) - **ntrade_Trn** Change in stock value for non-traded companies Transportation (Q\Q) - **ntrade_Inf** Change in stock value for non-traded companies Information (Q\Q) - **fed_fund_rate** Federal Funds Rate (%) - **govsec_rate_kzt_3m** Return on government securities in KZT, 3 m (%) - **govsec_rate_kzt_1y** Return on government securities in KZT, 1 year (%) - **govsec_rate_kzt_7y** Return on government securities in KZT, 7 years (%) - **govsec_rate_kzt_10y** Return on government securities in KZT, 10 years (%) - **tonia_rate** TONIA (%) - **rate_kzt_mort_0y_1y** Weighted average mortgage lending rate for new loans, less than a year (%) - **rate_kzt_mort_1y_iy** Weighted average mortgage lending rate for new loans, more than a year (%) - **rate_kzt_corp_0y_1y** Weighted average mortgage lending rate for new loans to non-financial organizations in KZT, less than a year (%) - **rate_usd_corp_0y_1y** Weighted average mortgage lending rate for new loans to non-financial organizations in CKB, less than a year (%) - **rate_kzt_corp_1y_iy** Weighted average mortgage lending rate for new loans to non-financial organizations in KZT, more than a year (%) - **rate_usd_corp_1y_iy** Weighted average mortgage lending rate for new loans to non-financial organizations in CKB, more than a year (%) - **rate_kzt_indv_0y_1y** Weighted average mortgage lending rate for consumer loans in KZT, less than a year (%) - **rate_kzt_indv_1y_iy** Weighted average mortgage lending rate for consumer loans in KZT, less than a year (%) - **usdkzt** USD KZT exchange rate - **eurkzt** EUR KZT exchange rate - **rurkzt** RUB KZT exchange rate - **poil** Price for Brent ($/barrel) - **realest_resed_prim_rus** Real price for estate in primary market in Russia (Q\Q) - **realest_resed_sec_rus** Real price for estate in secondary market in Russia (Q\Q) - **cred_portfolio** credit portfolio (mln KZT) - **coef_k1** k1 prudential coefficient - **coef_k3** k3 prudential coefficient - **provisions** provisions - **percent_margin** percent margin - **com_inc** commissionary income (ths KZT) - **com_exp** commissionary expenses (ths KZT) - **oper_inc** operational income (ths KZT) - **oth_inc** other income (ths KZT) - **DR** default rate **Example** : .. code-block:: python import AFR macroKZ = load_macroKZ() **Reference** : The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market. ## Functions * **adjr2_score** Performs calculation of Adjusted R squared (Adj R2) for the given model. _Arguments_: model: OLS model _Result_: result: Adj R2 metrics. _Example_: print(adjr2_score(model)) * **aic_score** Performs calculation of Akaike Information criteria(AIC) for the given model. _Arguments_: model: OLS model _Result_: result: AIC metrics. _Example_: print(aic_score(model)) * **bic_score** Performs calculation of Akaike Information criteria(AIC) for the given model. _Arguments_: model: OLS model _Result_: result: AIC metrics. _Example_: print(bic_score(model)) * **check_betas** Performs all possible subsets regression analysis for the given X and y with with an option to select criterion. Possible criteria are r2, AIC, BIC. _Arguments_: X: The predictor variables. y: The response variable criteria (str): The information criteria based on which intercept (bool): Logical; whether to include intercept term in the models. _Result_: pandas DataFrame: A table of subsets number, predictor variables and beta coefficients for each subset model. _Example_: X = macro[['poil', 'cpi', 'usdkzt', 'GDP_DEF', 'exp', 'tonia_rate']] y = macro['real_gdp'] check_betas(X, y, criterion = 'bic', intercept = False) * **checkdata** Preliminary check of dataset for missing values, numeric format, outliers. _Arguments_: dataset: name of the CSV file with a dataset for analysis for preliminary check _Result_: str: A conclusion of the preliminary analysis. _Example_: import pandas as pd macro = pd.read_csv('./load/macroKZ.csv') checkdata(macro) * **corsel** Correlation matrix for a dataset with an option to set a correlation threshold and option to correlation value as a number or boolean True/False. _Arguments_: data: pandas DataFrame or path to CSV file with a dataset for analysis for preliminary check thrs (float): correlation threshold numeric value to use for filtering. Default is 0.65 value_type (str): type of correlation value as a "numeric" or "boolean" value. Default representation is numeric. _Result_: pd.DataFrame or boolean : A pair of value name and correlation of the correlation matrix based on the threshold. Type of data varies in accordance with a chosen value_type parameter. _Example_: data = macro[['poil', 'cpi', 'usdkzt', 'GDP_DEF', 'exp', 'GDD_Agr_R', 'rurkzt', 'tonia_rate', 'cred_portfolio', 'fed_fund_rate']] corsel(data, thrs = 0.65, value_type = "boolean") * **dec_plot** The function depicts decomposition of regressors as a stacked barplot. _Arguments_: model: OLS linear regression model. data(pandas.DataFrame): A dataset based on which the model was built. _Result_: plot : matplotlib figure. _Example_: model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() dec_plot(model, macro) * **load_finratKZ** Loads finratKZ dataset. More details in the description of the dataset. _Result_: dataset : finratKZ dataset. _Example_: df = load_finratKZ() * **load_macroKZ** Loads macroKZ dataset. More details in the description of the dataset. _Result_: dataset : macroKZ dataset. _Example_: df = load_macroKZ() * **opt_size** Calculation of the number of observations necessary to generate the regression for a given number of regressors. _Arguments_: model: OLS linear regression model. _Result_: size (int) : Number of observations necessary to generate the model. _Example_: model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() opt_size(model) * **pt_multi** Estimates the multi-year probability of default using the Pluto and Tasche model. _Arguments_: portfolio_distribution (numpy array): The distribution of the portfolio loss rate. num_defaults (numpy array): The number of defaults observed in each portfolio over the num_years. conf_level (float): The confidence level for the estimate. num_years (int): Number of years of observations. _Result_: The estimated multi-year probability of default for each portfolio. _Example_: portfolio_distribution = np.array([10,20,30,40,10,20]) num_defaults = np.array([1, 2, 1, 0, 3, 2]) conf_level = 0.99 num_years = 3 pt_multi(portfolio_distribution, num_defaults, conf_level, num_years) * **pt_one** Estimates the one-year probability of default using the Pluto and Tasche model. _Arguments_: portfolio_dist (numpy array): The distribution of the portfolio loss rate. num_defaults (numpy array): The number of defaults observed in each portfolio. conf_level (float): The confidence level for the estimate. _Result_: The estimated one-year probability of default for each portfolio. _Example_: portfolio_distribution = np.array([10,20,30,40,10,20]) num_defaults = np.array([1, 2, 1, 0, 3, 2]) conf_level = 0.99 pt_one(portfolio_distribution, num_defaults, conf_level) * **reg_test** Tests for detecting violation of Gauss-Markov assumptions. _Arguments_: model : OLS linear regression model. _Result_: results (dict) : A dictionary containing the results of the four tests. _Example_: model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() reg_test(model) * **regsel_f** Allows to select the best model based on stepwise forward regression analysis with all possible models. The best model is chosen according to the specified scoring. _Arguments_: X: independent/predictor variable(-s) y: dependent/response variable data (pandas.DataFrame): dataset p_value (float): Variables with p-value less than {p_value} will enter into the model. scoring (str): Statistical metrics used to estimate the best model. The default value is R squared, r2. Other possible options for {scoring} are: 'aic' , 'bic', 'sbic', 'accuracy', 'r2', 'adjr2', 'explained_variance' and other. _Result_: results: The best model and the plot of the coefficients. _Example_: X = macro[['poil', 'cpi', 'usdkzt', 'GDP_DEF', 'exp', 'tonia_rate']] y = macro['real_gdp'] regsel_f(X, y, macro, scoring = 'aic') * **sbic_score** Performs calculation of Schwarz Bayesian Information criteria(SBIC) for the given model. _Arguments_: model: OLS model _Result_: result: SBIC metrics. _Example_: model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() print(sbic_score(model)) * **vif_reg** Calculates the variation inflation factors of all predictors in regression models. _Arguments_: model: OLS linear regression model. _Result_: vif (pandas DataFrame): A Dataframe containing the vIF score for each independent factor. _Example_: model = ols('real_gdp ~ poil + cpi + usdkzt + imp', data = macro).fit() vif_reg(model)
AFR
/AFR-0.2.3.tar.gz/AFR-0.2.3/AFR manual.md
AFR manual.md
StatsD was popularized by Etsy, and we refer to their implementation as "Etsy-standard" (https://github.com/etsy/statsd/). It's a light-weight method of gathering statistics from your applications. As an application developer, all you need to do is include a small library, and sprinkle one-liners like this throughout your code: Statsd.increment("my.important.event") Statsd.gauge("my.important.value", important_value) Statsd.timing("my.important.process", important_process_time) In the Etsy version, this will cause a UDP packet to be sent to a designated server that is running their collection and visualization packages. The AppFirst client API looks the same to the application developer, but sends data via POSIX message queue or Windows Mailslot to the collector and takes advantage of AppFirst collection and visualization technologies. If you are already running an AppFirst collector on your server, then all you need to do is use an AppFirst StatsD library instead of an Etsy-only library. This library will aggregate your metrics, and then use a message queue to pass them to the AppFirst collector, which will pass them up to our Big Data store, where they will be visible on your AppFirst dashboards and Correlate charts. This is more efficient than the UDP method and you don't need to set up the Etsy collection and visualization environment. If you are already using Etsy StatsD, you can make a gradual transition. Our libraries can be used in Etsy mode, so you can configure them to send UDP to your existing Etsy monitoring apparatus. Our collector also accepts StatsD UDP messages, so you can just point your existing Etsy-only StatsD library to localhost:8125, until you are ready to transition to an AppFirst StatsD library. For more information on enabling UDP StatsD on the collector click here: http://support.appfirst.com/appfirst-statsd-beta/#other_clients
AFStatsd
/AFStatsd-1.1.0.tar.gz/AFStatsd-1.1.0/README.txt
README.txt
from __future__ import absolute_import, print_function """ The AppFirst Statsd Transport """ __all__=['AFTransport', 'Statsd', 'UDPTransport'] import sys import os import errno try: import ctypes except ImportError: ctypes = None try: import win32file import win32con except ImportError: win32file = None win32con = None from .client import UDPTransport, Statsd PYTHON3 = sys.version_info[0] == 3 WINDOWS = sys.platform.lower().startswith("win") STATSD_SEVERITY = 3 LOGGER = None def set_logger(logger): global LOGGER LOGGER = logger class AFTransport(UDPTransport): def __init__(self, use_udp=True, verbosity=False, logger=None): set_logger(logger) self.mqueue_name = "/afcollectorapi" if PYTHON3: # Convert from Python 3's default unicode self.mqueue_name = self.mqueue_name.encode('ascii') self.flags = 0o4001 self.msgLen = 2048 self.mqueue = None self.verbosity = verbosity self.shlib = self._loadlib() self.use_udp = use_udp def _loadlib(self): if ctypes: try: ctypes.cdll.LoadLibrary("librt.so.1") return ctypes.CDLL("librt.so.1", use_errno=True) except: return None def _handleError(self, data, emsg=""): if LOGGER: LOGGER.error("Statsd Error: {0} when sending {1}".format(emsg, data)) if self.mqueue: self.close() self.mqueue = None def _createQueue(self): try: if WINDOWS and win32file is not None: self.mqueue = win32file.CreateFile(r'\\.\mailslot\{0}'.format(self.mqueue_name), win32file.GENERIC_WRITE, win32file.FILE_SHARE_READ, None, win32con.OPEN_EXISTING, 0, None) elif WINDOWS: raise MQError("Statsd Error: required Python win32 extension is not available") elif not self.shlib: raise MQError("Statsd Error: native support for AFTransport is not available") else: self.mqueue = self.shlib.mq_open(self.mqueue_name, self.flags) except MQError: raise except Exception as e: raise MQError("Statsd Error: unknown error occur when open mqueue " "({0.__class__.__name__}: {0})".format(e)) else: if LOGGER: LOGGER.info("Statsd mqueue {0} opened successfully".format(self.mqueue)) if (self.mqueue < 0): raise MQError("Statsd Error: Failed to open queue") def emit(self, data): if self.verbosity and LOGGER: LOGGER.info("Sending {0}".format(data)) try: if not self.mqueue: self._createQueue() if self.mqueue: self._emit(data) except MQSendError as e: if LOGGER: LOGGER.error("{0.__class__.__name__}: {0}".format(e)) except MQError as e: if LOGGER: LOGGER.error("{0.__class__.__name__}: {0}".format(e)) if self.use_udp: if self.verbosity and LOGGER: LOGGER.info("Trying to use UDP Transport.") UDPTransport().emit(data) except Exception as e: self._handleError(data, str(e)) def _emit(self, data): """ Actually send the data to the collector via the POSIX/Mailslot mq. Try bundling multiple messages into one if they won't exceed the max size """ to_post_list = [] for name, value in data.items(): send_data = "{0}:{1}".format(name, value.format_string(udp=False)) if PYTHON3: # Unicode not currently supported send_data = send_data.encode('ascii') mlen = min(len(send_data), self.msgLen) post = send_data[:mlen] if self.verbosity and LOGGER: LOGGER.info("Sending data: {0}".format(repr(post))) if len(to_post_list) == 0: to_post_list.append(post) else: previous = to_post_list[-1] if PYTHON3: # More unicode fun combined = "{0}::{1}".format(previous.decode('ascii'), post.decode('ascii')).encode('ascii') else: combined = "{0}::{1}".format(previous, post) if len(combined) > self.msgLen: # Combined message would be too long to_post_list.append(post) else: # Combine messages to use less space in POSIX mq to_post_list[-1] = combined for post in to_post_list: if WINDOWS and PYTHON3: # Create bytearray with pid & send to mailslot data_string = "{0}:{1}:{2}".format(os.getpid(), 3, post.decode('ascii')) data_bytes = bytearray(data_string.encode('utf-16')) rc, _ = win32file.WriteFile(self.mqueue, data_bytes, None) if rc < 0 and LOGGER: LOGGER.error("Statsd Error: failed to write to Mailslot") elif WINDOWS: # Create bytearray from unicode with pid & send to mailslot data_string = unicode("{0}:{1}:{2}").format(os.getpid(), 3, unicode(post)) data_bytes = bytearray(data_string.encode('utf-16')) rc, _ = win32file.WriteFile(self.mqueue, data_bytes, None) if rc < 0 and LOGGER: LOGGER.error("Statsd Error: failed to write to Mailslot") else: # Send data to POSIX mq rc = self.shlib.mq_send(self.mqueue, post, len(post), STATSD_SEVERITY) if rc < 0: errornumber = ctypes.get_errno() if errno.errorcode[errornumber] != "EAGAIN": errmsg = os.strerror(errornumber) if LOGGER: LOGGER.error("Statsd Error: failed to mq_send: {0}".format(errmsg)) elif LOGGER: LOGGER.error("StatsD queue full; Failed to send message: {0}".format(post)) def close(self): if self.mqueue: if LOGGER: LOGGER.warning("MQ {0} is being closed".format(self.mqueue_name)) try: if WINDOWS: self.mqueue.Close() else: _ = self.shlib.mq_close(self.mqueue) except: pass self.mqueue = None def __del__(self): self.close() class MQError(Exception): def __init__(self, msg=None): self.msg = msg or "Statsd Error" def __str__(self): return str(self.msg) class MQSendError(Exception): def __init__(self, rc, msg=None): self.rc = rc self.msg = msg if msg is not None else 'Statsd Error' def __str__(self): return "{0}; return errcode: {1}".format(self.msg, errno.errorcode(self.rc)) Statsd.set_transport(AFTransport()) Statsd.set_aggregation(True) if __name__ == '__main__': # Test code Statsd.set_transport(AFTransport(verbosity=True)) count = 1 @Statsd.count('python.test.count') @Statsd.time('python.test.time') def test_stats(): Statsd.timing('python.test.timer', 500) Statsd.gauge('python.test.gauge', 500) Statsd.increment('python.test.counter') Statsd.decrement('python.test.counter') Statsd.update_stats('python.test.counter', 5, sample_rate=1) Statsd.update_stats('python.test.counter', -5, sample_rate=0) while count < 100000: test_stats() count += 1
AFStatsd
/AFStatsd-1.1.0.tar.gz/AFStatsd-1.1.0/afstatsd/afclient.py
afclient.py
import sys import time import random import atexit import threading from socket import socket, AF_INET, SOCK_DGRAM #--------------------------------------------------------------------------- # Default UDP Transport #--------------------------------------------------------------------------- class UDPTransport(object): def __init__(self, host='localhost', port=8125): self.host = host self.port = port def emit(self, data): """ Send the metrics over UDP """ addr=(self.host, self.port) udp_sock = socket(AF_INET, SOCK_DGRAM) try: for name, value in data.items(): send_data = "{0}:{1}".format(name, value.format_string(udp=True)) udp_sock.sendto(send_data, addr) except Exception as e: sys.stderr.write("Error emitting stats over UDP: {0.__class__.__name__}: {0}\n".format(e)) def close(self): pass #--------------------------------------------------------------------------- # Statsd Aggregator to buffer stats of the same bucket and dump them together #--------------------------------------------------------------------------- class StatsdAggregator(object): def __init__(self, interval, transport): self.running = False self.interval = interval self.transport = transport self.buf = {} self.lock = threading.Lock() self._service_thread = None self.left_buffers = {} # 2 buffer groups, each stored in a dict self.right_buffers = {} # one of each for each thread self.rbufs = self.left_buffers # buffer group currently being read from self.wbufs = self.right_buffers # buffer group currently being written to def service_loop(self): while self.running: time.sleep(self.interval/2.0) self.swap_buffers() time.sleep(self.interval/2.0) self.dump() def start(self): """ Start aggregation """ if self.running: return else: self.running = True if self._service_thread == None: self._service_thread = threading.Thread(target=self.service_loop) self._service_thread.daemon = True self._service_thread.start() def stop(self): """ Stop aggregation """ if self.running: self.running = False self.dump() self.swap_buffers() self.dump() def is_empty(self): """ Check if data in self.buf """ if self.buf: return False else: return True def add(self, bucket): # is setdefault atomic (thread safe)? It's faster! write_buffer = self.wbufs.setdefault(threading.currentThread(), {}) """ if threading.currentThread() in self.wbufs: write_buffer = self.wbufs[threading.currentThread()] else: #print "creating new write buffer for new thread" write_buffer = {} self.lock.acquire() self.wbufs[threading.currentThread()] = write_buffer self.lock.release() """ if bucket.name in write_buffer: # aggregate if bucket is already in bucket write_buffer[bucket.name].aggregate(bucket.stat) else: # otherwise add write_buffer[bucket.name] = bucket return def dump(self): """ aggregate data across all read buffers and then emit """ send_buffer = {} for th in self.rbufs: read_buffer = self.rbufs[th] for name, bucket in read_buffer.items(): if name in send_buffer: send_buffer[name].aggregate(bucket.stat) else: send_buffer[name]=bucket read_buffer.clear() self.transport.emit(send_buffer) def swap_buffers(self): if self.rbufs == self.left_buffers: self.rbufs = self.right_buffers self.wbufs = self.left_buffers else: self.rbufs = self.left_buffers self.wbufs = self.right_buffers class Bucket(object): def format_string(self, udp=False): if udp: return self._to_udp_string() else: return self._to_af_string() def _to_udp_string(self): raise NotImplementedError def _to_af_string(self): return self._to_udp_string() class CounterBucket(Bucket): def __init__(self, name, stat, rate=1): self.name = name self.stat = stat self.rate = rate def _to_udp_string(self): return "{0}|c".format(self.stat) def aggregate(self, stat): """ CounterBuckets are aggregated by adding new values to the current value. """ # Note: This is non-standard. We should not divide this out, # but instead send the semple rate upstream (with @rate) self.stat += int(stat/self.rate) class TimerBucket(Bucket): def __init__(self, name, stat): self.name = name self.stat = [stat] self.count = 1 def _to_af_string(self): """ Sending up the full list of values by default so AppFirst can calculate the max/min during the interval as well. """ return "{0}|ms".format(','.join([str(n) for n in self.stat])) def _to_udp_str(self): """ Only send up the average if emitting over UDP so we don't break existing StatsD implementations. """ avg = sum(self.stat) / self.count return "{0}|ms".format(avg) def aggregate(self, stat): """ TimerBuckets are aggregated by adding new time values to the existing time values and incrementing a counter used to get an average time. """ self.stat.extend(stat) self.count += 1 class GaugeBucket(Bucket): def __init__(self, name, stat): self.name = name self.stat = stat self.timestamp=int(time.time()) def _to_udp_string(self): return "{0}|g|{1}".format(self.stat, self.timestamp) def aggregate(self, stat): """ GuageBuckets are updated by setting the current gauge value to the new value. No actual aggregation is done. """ self.stat = stat self.timestamp = int(time.time()) #--------------------------------------------------------------------------- # Statsd Client #--------------------------------------------------------------------------- class Statsd(object): _transport = UDPTransport() _aggregator = StatsdAggregator(20, _transport) @staticmethod def set_transport(transport): Statsd._transport.close() Statsd._transport = transport Statsd._aggregator.transport = transport @staticmethod def set_aggregation(should_aggregate): if should_aggregate and not Statsd._aggregator.running: Statsd._aggregator.start() if not should_aggregate and Statsd._aggregator.running: Statsd._aggregator.stop() @staticmethod def gauge(name, reading): """ Log gauge information >>> from client import Statsd >>> Statsd.gauge('some.gauge', 500) """ GaugeBucket(name, reading) Statsd.send(GaugeBucket(name, reading)) @staticmethod def timing(name, elapse): """ Log timing information >>> from client import Statsd >>> Statsd.timing('some.time', 500) """ Statsd.send(TimerBucket(name, int(round(elapse)))) @staticmethod def increment(names, sample_rate=1): """ Increments one or more stats counters >>> Statsd.increment('some.int') >>> Statsd.increment('some.int', 0.5) """ Statsd.update_stats(names, 1, sample_rate) @staticmethod def decrement(names, sample_rate=1): """ Decrements one or more stats counters >>> Statsd.decrement('some.int') """ Statsd.update_stats(names, -1, sample_rate) @staticmethod def update_stats(names, delta=1, sample_rate=1): """ Updates one or more stats counters by arbitrary amounts >>> Statsd.update_stats('some.int', 10) Sample rate is a decimal value representing the proportion of stats to keep. For example, if sample_rate is 0.5, then 50% of stats will be discarded. Default value is 1 and does not discard any stats. """ if sample_rate < 1 and random.random() > sample_rate: return if not isinstance(names, list): names = [names] for name in names: Statsd.send(CounterBucket(name, int(round(delta)), sample_rate)) @staticmethod def send(bucket): if Statsd._aggregator.running: Statsd._aggregator.add(bucket) else: bucket = {bucket.name: bucket} Statsd._transport.emit(bucket) @staticmethod def flush(buf): Statsd._transport.emit(buf.dump()) @staticmethod def shutdown(): Statsd._aggregator.stop() Statsd._transport.close() @staticmethod def time(name, enabled=True): """ Function Decorator to report function execution time. >>> @Statsd.time("some.timer.bucket") >>> def some_func(): >>> pass # do something """ def wrap_timer(method): if not enabled: return method def send_statsd(*args, **kwargs): start = time.time() result = method(*args, **kwargs) duration = (time.time() - start) * 1000 Statsd.timing(name, duration) return result return send_statsd return wrap_timer @staticmethod def count(name, sample_rate=1, enabled=True): """ Function Decorator to count how many times a function is invoked. >>> @Statsd.count("some.counter.bucket") >>> def some_func(): >>> pass #do something """ def wrap_counter(method): if not enabled: return method def send_statsd(*args, **kwargs): result = method(*args, **kwargs) Statsd.increment(name, sample_rate) return result return send_statsd return wrap_counter # shutdown automatically on application exit... atexit.register(Statsd.shutdown)
AFStatsd
/AFStatsd-1.1.0.tar.gz/AFStatsd-1.1.0/afstatsd/client.py
client.py
import os import glob import numpy as np import imageio from tqdm import tqdm import time from osgeo import gdal from osgeo import osr from osgeo import ogr from osgeo.gdalconst import GA_Update class GeoTiff: @staticmethod def to_img(img_format, gtif_path, save_path): gtifs = [] if os.path.isdir(gtif_path): gtifs = glob.glob(gtif_path + r'\*.tif') elif os.path.isfile(gtif_path): if '.tif' in gtif_path or '.TIF' in gtif_path: gtifs.append(gtif_path) else: print('input path error.') exit(0) print('Geotiff convert to {0}:'.format(str.upper(img_format))) pbar = tqdm(total=len(gtifs)) for gtif in gtifs: gtif_name = os.path.split(gtif)[-1] file_name = os.path.splitext(gtif_name)[0] src_ds = gdal.Open(gtif) if src_ds is None: # print('unable to open raster file:', gtif_name, '.') continue ncol = src_ds.RasterXSize nrow = src_ds.RasterYSize nband = src_ds.RasterCount data_uint8 = np.zeros((nrow, ncol, nband), dtype=np.uint8) for i in range(nband): srcband = src_ds.GetRasterBand(i + 1) data_origin = srcband.ReadAsArray(0, 0, ncol, nrow) if data_origin.dtype == np.uint8: data_uint8[:, :, i] = data_origin else: data_float = data_origin.astype(np.float) data_unique = np.unique(data_float) data_unique = data_unique.argsort() # 取第二的最大最小值,防止黑边白边干扰 data_max = data_unique[-2] data_min = data_unique[1] data_float = (data_float - data_min) / (data_max - data_min) * 255 data_uint8 = data_float.astype(np.uint8) imageio.imwrite(os.path.join(save_path, file_name + '.' + img_format), data_uint8) time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def from_img_wf(epsg_code, img_format, wf_format, img_bands, img_path, wf_path, save_path): if not img_bands: img_bands = [0, 1, 2] imgs = [] wf_dir = '' if os.path.isdir(img_path) and os.path.isdir(wf_path): imgs = glob.glob(img_path + r'\*.' + img_format) wf_dir = wf_path if os.path.isfile(img_path) and os.path.isdir(wf_path): if '.' + img_format in img_path and '.' + wf_format in wf_path: imgs.append(img_path) wf_dir = os.path.split(wf_path)[0] if not imgs: print('input path error.') exit(0) print('Creat GeoTiff with {0} and {1}:'.format(str.upper(img_format), str.upper(wf_format))) pbar = tqdm(total=len(imgs)) for img in imgs: img_name = os.path.split(img)[-1] file_name = os.path.splitext(img_name)[0] wf_name = file_name + '.' + wf_format wf = os.path.join(wf_dir, wf_name) if not os.path.exists(wf): # print(wf_name, 'is not found.') continue print(img_name) img_data = imageio.imread(img) wf_paras = [] with open(wf, 'r') as f: wf_para = f.readline() while wf_para: wf_paras.append(float(wf_para)) wf_para = f.readline() geotransform = (wf_paras[4], wf_paras[0], wf_paras[1], wf_paras[5], wf_paras[2], wf_paras[3]) driver = gdal.GetDriverByName('GTiff') new_raster = driver.Create(os.path.join(save_path, file_name + '.tif'), img_data.shape[1], img_data.shape[0], len(img_bands), gdal.GDT_Byte) new_raster.SetGeoTransform(geotransform) new_band = None for i in range(len(img_bands)): new_band = new_raster.GetRasterBand(i + 1) new_band.WriteArray(img_data[:, :, img_bands[i]]) new_raster_srs = osr.SpatialReference() new_raster_srs.ImportFromEPSG(epsg_code) new_raster.SetProjection(new_raster_srs.ExportToWkt()) new_band.FlushCache() time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def from_img_gtif(epsg_code, img_format, img_bands, img_path, gtif_path, save_path): if not img_bands: img_bands = [0, 1, 2] imgs = [] gtif_dir = '' if os.path.isdir(img_path) and os.path.isdir(gtif_path): imgs = glob.glob(img_path + r'\*.' + img_format) gtif_dir = gtif_path if os.path.isfile(img_path) and '.' + img_format in img_path: if '.' + img_format in img_path and '.tif' in gtif_path: imgs.append(img_path) gtif_dir = os.path.split(gtif_path)[0] if not imgs: print('input path error.') exit(0) print('Creat GeoTiff with {0} and GeoTiff:'.format(str.upper(img_format))) pbar = tqdm(total=len(imgs)) for img in imgs: img_name = os.path.split(img)[-1] file_name = os.path.splitext(img_name)[0] gtif_name = file_name + '.tif' gtif = os.path.join(gtif_dir, gtif_name) if not os.path.exists(gtif): # print(gtif_name, 'is not found.') continue src_ds = gdal.Open(os.path.join(gtif_dir, gtif_name)) if src_ds is None: # print('unable to open raster file:', gtif_name, '.') continue img_data = imageio.imread(img) geotransform = src_ds.GetGeoTransform() driver = gdal.GetDriverByName('GTiff') new_raster = driver.Create(os.path.join(save_path, file_name + '.tif'), img_data.shape[1], img_data.shape[0], len(img_bands), gdal.GDT_Byte) new_raster.SetGeoTransform(geotransform) new_band = None for i in range(len(img_bands)): new_band = new_raster.GetRasterBand(i + 1) new_band.WriteArray(img_data[:, :, img_bands[i]]) new_raster_srs = osr.SpatialReference() new_raster_srs.ImportFromEPSG(epsg_code) new_raster.SetProjection(new_raster_srs.ExportToWkt()) new_band.FlushCache() time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def set_nodata(nodata_val, gtif_path): gtifs = [] if os.path.isdir(gtif_path): gtifs = glob.glob(gtif_path + r'\*.tif') elif os.path.isfile(gtif_path): if '.tif' in gtif_path or '.TIF' in gtif_path: gtifs.append(gtif_path) else: print('input path error.') exit(0) print('Set nodata value of Geotiff:') pbar = tqdm(total=len(gtifs)) for gtif in gtifs: src_ds = gdal.Open(gtif, GA_Update) if src_ds is None: # print('unable to open raster file:', gtif_name) continue nband = src_ds.RasterCount for i in range(nband): src_band = src_ds.GetRasterBand(i + 1) # 注意黑边的数值,也可能是白边 src_band.SetNoDataValue(nodata_val) time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def gtif_to_shp(epsg_code, gtif_path, save_path): gtifs = [] if os.path.isdir(gtif_path): gtifs = glob.glob(gtif_path + r'\*.tif') elif os.path.isfile(gtif_path): if '.tif' in gtif_path or '.TIF' in gtif_path: gtifs.append(gtif_path) else: print('input path error.') exit(0) print('Convert Geotiff to ShapeFile:') gdal.UseExceptions() srs = osr.SpatialReference() srs.ImportFromEPSG(epsg_code) pbar = tqdm(total=len(gtifs)) for gtif in gtifs: gtif_name = os.path.split(gtif)[-1] file_name = os.path.splitext(gtif_name)[0] src_ds = gdal.Open(gtif) if src_ds is None: # print('unable to open gtif file:', gtif_name) continue src_band = src_ds.GetRasterBand(1) dst_layer_name = "POLYGONIZED_STUFF" driver = ogr.GetDriverByName("ESRI Shapefile") dst_ds = driver.CreateDataSource(os.path.join(save_path, file_name + '.shp')) dst_layer = dst_ds.CreateLayer(dst_layer_name, srs=srs) gdal.Polygonize(src_band, src_band, dst_layer, -1, [], callback=None) time.sleep(0.05) pbar.update(1) pbar.close() if __name__ == '__main__': # GeoTiff.to_img('png', r'G:\GIS\Test\LC81320402017350LGN00', r'G:\GIS\Test') # GeoTiff.from_img_wf(3857, 'png', 'pgw', [], r'G:\GIS\Test', r'G:\GIS\Test', r'G:\GIS\Test') # GeoTiff.from_img_gtif(3857, 'png', [0], r'G:\GIS\Test', r'G:\GIS\Test\TIF', r'G:\GIS\Test') # GeoTiff.set_nodata(0, r'G:\GIS\Test') # GeoTiff.gtif_to_shp(3857, r'G:\GIS\Test', r'G:\GIS\Test\SHP') exit(0)
AGDPK
/AGDPK-1.1.4.tar.gz/AGDPK-1.1.4/raster_pro.py
raster_pro.py
import os import glob from tqdm import tqdm import time from osgeo import osr from osgeo import ogr class ShpPolygon: @staticmethod def to_gtif(): exit(0) @staticmethod def merge_shp(epsg_code, shp_path, save_path): if not os.path.isdir(shp_path): print('input path error.') exit(0) shps = glob.glob(shp_path + r'\*.shp') print('Merge ShapeFiles:') driver = ogr.GetDriverByName("ESRI Shapefile") out_ds = driver.CreateDataSource(save_path) srs = osr.SpatialReference() srs.ImportFromEPSG(epsg_code) dst_layername = "MERGED_STUFF" out_layer = out_ds.CreateLayer(dst_layername, srs=srs) new_field = ogr.FieldDefn("Name", ogr.OFTString) out_layer.CreateField(new_field) pbar = tqdm(total=len(shps)) for shp in shps: file_name = os.path.splitext(os.path.split(shp)[-1])[0] ds = ogr.Open(shp) lyr = ds.GetLayer() for feat in lyr: out_feat = ogr.Feature(out_layer.GetLayerDefn()) out_feat.SetField("Name", file_name) out_feat.SetGeometry(feat.GetGeometryRef().Clone()) out_layer.CreateFeature(out_feat) out_layer.SyncToDisk() time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def calc_area(shp_path): shps = [] if os.path.isdir(shp_path): shps = glob.glob(shp_path + r'\*.shp') elif os.path.isfile(shp_path): if '.shp' in shp_path: shps.append(shp_path) else: print('input path error.') exit(0) print('Calculate the area of features:') pbar = tqdm(total=len(shps)) for shp in shps: driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp, 1) src_layer = src_ds.GetLayer() new_field = ogr.FieldDefn("Area", ogr.OFTReal) new_field.SetPrecision(2) src_layer.CreateField(new_field) for src_feat in src_layer: src_geom = src_feat.GetGeometryRef() src_feat.SetField("Area", src_geom.GetArea()) src_layer.SetFeature(src_feat) time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def sum_numattr(filed_name, shp_path): if not os.path.isfile(shp_path): print('input path error.') exit(0) print('Calculate sum of all features value of a number attribute:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp_path, 0) src_layer = src_ds.GetLayer() sum_val = 0 for src_feat in src_layer: sum_val += src_feat.GetField(filed_name) return sum_val @staticmethod def mean_numattr(filed_name, shp_path): if not os.path.isfile(shp_path): print('input path error.') exit(0) print('Calculate mean of all features value of a number attribute:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp_path, 0) src_layer = src_ds.GetLayer() sum_val = 0 for src_feat in src_layer: sum_val += src_feat.GetField(filed_name) mean_val = sum_val / len(src_layer) return mean_val @staticmethod def max_numattr(filed_name, shp_path): if not os.path.isfile(shp_path): print('input path error.') exit(0) print('Calculate max of all features value of a number attribute:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp_path, 0) src_layer = src_ds.GetLayer() max_val = None for src_feat in src_layer: val = src_feat.GetField(filed_name) if max_val is None or val > max_val: max_val = val return max_val @staticmethod def min_numattr(filed_name, shp_path): if not os.path.isfile(shp_path): print('input path error.') exit(0) print('Calculate min of all features value of a number attribute:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp_path, 0) src_layer = src_ds.GetLayer() min_val = None for src_feat in src_layer: val = src_feat.GetField(filed_name) if min_val is None or val < min_val: min_val = val return min_val @staticmethod def remove_by_numattr(fl_cond, shp_path): shps = [] if os.path.isdir(shp_path): shps = glob.glob(shp_path + r'\*.shp') elif os.path.isfile(shp_path): if '.shp' in shp_path: shps.append(shp_path) else: print('input path error.') exit(0) print('Remove features by number attribute:') pbar = tqdm(total=len(shps)) for shp in shps: driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp, 1) src_layer = src_ds.GetLayer() fl_cond_str = '' if_first = True for sel_field, conds in fl_cond.items(): if not if_first: fl_cond_str += ' and ' else: if_first = False for cond in conds: fl_cond_str += sel_field + ' ' + cond if cond != conds[-1]: fl_cond_str += ' and ' src_layer.SetAttributeFilter(fl_cond_str) for src_feat in src_layer: src_layer.DeleteFeature(src_feat.GetFID()) src_ds.ExecuteSQL('REPACK ' + src_layer.GetName()) time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def remove_by_strattr(fl_cond, shp_path): shps = [] if os.path.isdir(shp_path): shps = glob.glob(shp_path + r'\*.shp') elif os.path.isfile(shp_path): if '.shp' in shp_path: shps.append(shp_path) else: print('input path error.') exit(0) print('Remove features by number attribute') pbar = tqdm(total=len(shps)) for shp in shps: driver = ogr.GetDriverByName("ESRI Shapefile") src_ds = driver.Open(shp, 1) src_layer = src_ds.GetLayer() fl_cond_str = '' if_first = True for sel_field, val_fixed in fl_cond.items(): if not if_first: fl_cond_str += ' and ' else: if_first = False for i in range(len(val_fixed)): if i != 0: fl_cond_str += ' and ' fl_cond_str += sel_field + ' = ' + "'" + str(val_fixed[0]) + "'" src_layer.SetAttributeFilter(fl_cond_str) for src_feat in src_layer: src_layer.DeleteFeature(src_feat.GetFID()) src_ds.ExecuteSQL('REPACK ' + src_layer.GetName()) time.sleep(0.05) pbar.update(1) pbar.close() @staticmethod def intersection(epsg_code, shp1_path, shp2_path, save_path): if shp1_path.rsplit('.', 1)[-1] != 'shp' or shp2_path.rsplit('.', 1)[-1] != 'shp': print('input path error.') exit(0) print('Intersect 2 Polygon ShapeFile:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src1_ds = driver.Open(shp1_path, 0) src2_ds = driver.Open(shp2_path, 0) src1_layer = src1_ds.GetLayer() src2_layer = src2_ds.GetLayer() src1_union = ogr.Geometry(ogr.wkbMultiPolygon) for src1_feat in src1_layer: geom = src1_feat.GetGeometryRef() src1_union = src1_union.Union(geom) src2_union = ogr.Geometry(ogr.wkbMultiPolygon) for src2_feat in src2_layer: geom = src2_feat.GetGeometryRef() src2_union = src2_union.Union(geom) intersection = src1_union.Intersection(src2_union) out_ds = driver.CreateDataSource(save_path) srs = osr.SpatialReference() srs.ImportFromEPSG(epsg_code) dst_layername = "INTERSECTION_STUFF" out_layer = out_ds.CreateLayer(dst_layername, srs=srs) out_feat = ogr.Feature(out_layer.GetLayerDefn()) for geom in intersection: if geom.GetGeometryName() != 'POLYGON': continue out_feat.SetGeometry(geom) out_layer.CreateFeature(out_feat) out_layer.SyncToDisk() @staticmethod def union(epsg_code, shp1_path, shp2_path, save_path): if shp1_path.rsplit('.', 1)[-1] != 'shp' or shp2_path.rsplit('.', 1)[-1] != 'shp': print('input path error.') exit(0) print('Union 2 Polygon ShapeFile:') print('......') driver = ogr.GetDriverByName("ESRI Shapefile") src1_ds = driver.Open(shp1_path, 0) src2_ds = driver.Open(shp2_path, 0) src1_layer = src1_ds.GetLayer() src2_layer = src2_ds.GetLayer() src1_union = ogr.Geometry(3) for src1_feat in src1_layer: geom = src1_feat.GetGeometryRef() src1_union = src1_union.Union(geom) src2_union = ogr.Geometry(3) for src2_feat in src2_layer: geom = src2_feat.GetGeometryRef() src2_union = src2_union.Union(geom) union = src1_union.Union(src2_union) out_ds = driver.CreateDataSource(save_path) srs = osr.SpatialReference() srs.ImportFromEPSG(epsg_code) dst_layername = "UNION_STUFF" out_layer = out_ds.CreateLayer(dst_layername, srs=srs) out_feat = ogr.Feature(out_layer.GetLayerDefn()) for geom in union: if geom.GetGeometryName() != 'POLYGON': continue out_feat.SetGeometry(geom) out_layer.CreateFeature(out_feat) out_layer.SyncToDisk() if __name__ == '__main__': # ShpPolygon.merge_shp(3857, r'G:\GIS\Test\SHP', r'G:\GIS\Test\SHP\merge.shp') ShpPolygon.calc_area(r'G:\GIS\Nepal\GoogleBing\OLD\Segmentation2\SHP\val') # ShpPolygon.remove_by_numattr({'Area': ['< 100']}, r'G:\GIS\Nepal\GoogleBing\Segmentation\val\SHP\union.shp') # ShpPolygon.remove_by_strattr({'Name': ['1', '3']}, r'G:\GIS\Test\SHP') # ShpPolygon.intersection(3857, r'G:\GIS\Test\SHP\1.shp', r'G:\GIS\Test\SHP\mask.shp', r'G:\GIS\Test\SHP\GeoProcessing\intersection.shp') # ShpPolygon.union(3857, r'G:\GIS\Test\SHP\1.shp', r'G:\GIS\Test\SHP\mask.shp', r'G:\GIS\Test\SHP\GeoProcessing\union.shp') # print(ShpPolygon.sum_numattr('Area', r'G:\GIS\Nepal\GoogleBing\Segmentation\val\SHP\intersection.shp')) # print(ShpPolygon.sum_numattr('Area', r'G:\GIS\Nepal\GoogleBing\Segmentation\val\SHP\union.shp')) # print(ShpPolygon.mean_numattr('Area', r'G:\GIS\Nepal\GoogleBing\Truth\val\SHP\val.shp')) # print(ShpPolygon.max_numattr('Area', r'G:\GIS\Nepal\GoogleBing\Truth\val\SHP\val.shp')) # print(ShpPolygon.min_numattr('Area', r'G:\GIS\Nepal\GoogleBing\Truth\val\SHP\val.shp')) exit(0)
AGDPK
/AGDPK-1.1.4.tar.gz/AGDPK-1.1.4/vector_pro.py
vector_pro.py
本框架是一个service启动框架,适合一些分模块的定时任务 安装说明 fdfs-client-py pip安装是2.X版本 无法正常在3.X运行 所以安装前需要先手工安装https://github.com/jefforeilly/fdfs_client-py.git 数据库默认支持PostgresSQL 使用说明 启动方法 import SDLCSService SDLCSService.ServiceManager.Main() 各模块需要放在应用目录的ServiceMoudels目录 各模块单独文件夹以模块名命名 各模块的入口也是同样文件名.py 入口文件里面的入口类也是同样的名字并继承于SDLCSService.Core.ServiceBase 配置文件为应用目录Config.json 内容示例 { "ServiceModules":{ "SDLCSUnit":{ "loopTime":"5" } }, "InterfaceModules": { "SDLCTester":{ } }, "options": { "fileSize": "2048", "filePath": "/data/", "port": "80", "debug":"True", "host":"0.0.0.0", "threadCount":"2", "IsPrintError":"True", "UnLimitedIP":["*"], "AsyncTheadCount":"4", "AsyncTimeOut":"20", "ExitPassword":"123456", "Database":{ "serverip":"10.86.87.79", "port":"5432", "dbname":"postgres", "username":"postgres", "password":"postgres", "poolcount":"100", "isecho":"False" }, "Logger":{ "Level":"0", "IsPrint":"True", "SaveDay":"30" } } }
AGFramework
/AGFramework-1.0.1.68.tar.gz/AGFramework-1.0.1.68/README.txt
README.txt
import pdfplumber import json import os import re def get_file_path(file_name): current_directory = os.getcwd() # Concatenate the file name or relative path file_path = os.path.join(current_directory, file_name) return file_path def text_extractor(pdf_file): with pdfplumber.open(pdf_file) as pdf: text = '' for page in pdf.pages: text = text + page.extract_text(encoding='utf-8') #storing all the pdf texts in a single string #text = text.replace('\n','') pdf.close() return text class PDFparser: def __init__(self) -> None: pass def pdftotext(self,pdf_file,output_text_file): text = text_extractor(pdf_file) with open(output_text_file, "w", encoding="utf-8") as file: file.write(text) file.close() file_path = get_file_path(output_text_file) print('Text file saved at location '+ file_path) def simplePdftoJson(self,pdf_file,output_json_file): text = text_extractor(pdf_file) temp_dict = {'text':text} with open(output_json_file,'w', encoding="utf-8")as file: json.dump(temp_dict,file,indent=4) file_path = get_file_path(output_json_file) print('JSON file saved at location '+ file_path) def pdftojsonl(self,pdf_file,output_jsonl_file): #This function converts pdf file containing question answer pairs into jsonl document '''To process documents in an optimal way, the PDF file containing Question-Answer pairs should be in the format: Question: some_random_question Answer: Answer to the question''' text = text_extractor(pdf_file) updated_text = text.replace('\n',' ') # Define the pattern for question-answer pairs using regular expressions pattern = r"Question:\s*(.*?)\s*Answer:\s*(.*?)(?=\s*Question:|$)" # Find all matches of question-answer pairs in the input string matches = re.findall(pattern, updated_text, re.DOTALL) qa_pairs = [{'prompt': match[0].strip(), 'completion': match[1].strip()} for match in matches] with open(output_jsonl_file,'w', encoding="utf-8")as file: json.dump(qa_pairs,file) file_path = get_file_path(output_jsonl_file) print('JSONL file saved at location '+ file_path)
AGIpdf2json
/AGIpdf2json-1.0.1-py3-none-any.whl/AGIPDF2JSON/PDFParserClass.py
PDFParserClass.py
# AGLOW [![Documentation Status](https://readthedocs.org/projects/aglow/badge/?version=latest)](http://aglow.readthedocs.io/en/latest/?badge=latest) [![Build Status](https://travis-ci.org/apmechev/AGLOW.svg?branch=master)](https://travis-ci.org/apmechev/AGLOW) Automated Grid-Enabled LOFAR Workflows ==== AGLOW is a combination of the GRID LOFAR Reduction Tools and Apache Airflow. In order to efficiently use the Computational Resources provided to us at SURFsara, the AGLOW package includes custom Airflow Operators. These operators can be combined to build LOFAR workflows, from a single NDPPP run to full Direction Dependent Imaging. Setup ===== The AGLOW package is best set up in a conda environment. The included environment.yml package will create a conda env named 'AGLOW' and set up all the prerequisites to run an AGLOW server. The usave is as such: ```bash conda env create -f environment.yml ``` ```bash mkdir ~/AGLOW_tutorial{,/dags} export AIRFLOW_HOME=~/AGLOW_tutorial conda create -n AGLOW_tutorial python=3.6 source activate AGLOW_tutorial export SLUGIFY_USES_TEXT_UNIDECODE=yes pip install aglow #To install postgress in Userspace: ./AGLOW/scripts/setup_postgres.sh ## If launching fails, check the log, you might need to change the configurations at ## ${AIRFLOW_HOME}/postgres/database/postgresql.conf #Run each of these command inside a screen: airflow scheduler airflow webserver ```
AGLOW
/AGLOW-0.1.1.tar.gz/AGLOW-0.1.1/README.md
README.md
"""Class for AGONS""" #Author: Christopher W. Smith #Date: 01/17/2023 #Data processing import numpy as np import pandas as pd from IPython.display import display #Data plotting from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter import matplotlib.pyplot as plt import matplotlib.ticker as ticker import ipympl import seaborn as sns sns.set_style('ticks') #Modeling and Scoring from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, Normalizer, StandardScaler from AGONS_nano.Custom_Transformers import RowStandardScaler, RowMinMaxScaler from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold, LeaveOneOut, RepeatedStratifiedKFold from sklearn.feature_selection import SelectKBest, f_classif from sklearn.decomposition import PCA from sklearn.model_selection import RandomizedSearchCV as rscv import time from sklearn.model_selection import cross_val_score class AGONS: """Class for AGONS modeling nanosensor array data""" def __init__(self, k_max = 10, cv_method = 'Stratified K Fold' , cv_fold = 5, random_state = None, rep_fold = 5): """Set initial parameters to run AGONS. Parameters ---------- k_max: int or (pandas DataFrame.shape[1] + 1). Defines the maximum k value for feature selection to run. For sensor design it is reccommended to use the total number of sensors to trial all ranked important sensors. cv_method = 'Stratified K Fold' (default),'Repeated Stratified K Fold', 'Leave One Out' or 'Custom Value'. Choice between different methods of cross-validation see https://scikit-learn.org/stable/modules/cross_validation.html for further details. 'Custom Value' does not use a specific method to cross-validate and instead only cross-validates based on the RandomizedSearchCV algorithm. Note, Stratified K Fold is fastest. cv_fold = 5 (default), int. The total number of folds performed in cross-validation. random_state = None or int. Sets a reproducible state for data. Recommended to obtain reproducible results. rep_fold = 5 (default), int. Used on for repeating stratified K Fold, where each cross-validation fold is repeated n number of times """ self.k_max = k_max self.cv_method = cv_method self.cv_fold = cv_fold self.random_state = random_state self.rep_fold = rep_fold def activate(self, xtrain, ytrain, xval, yval): """Fits AGONS modeling on training then will predict on validation. Parameters ---------- xtrain : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' training data. ytrain : {array-like, sparse matrix} of shape (n_samples,) or \ (n_samples, n_outputs) validation target values. xval : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' validation data. yval : {array-like, sparse matrix} of shape (n_samples,) or \ (n_samples, n_outputs) validation target values.""" self.xtrain = xtrain self.ytrain = ytrain self.xval = xval self.yval = yval #Pipeline for classifer involves a feature_selection, scaler, PCA and SVM pipe = Pipeline([ ('anova', SelectKBest(f_classif)), ('scaler', MinMaxScaler()), ('pca', PCA()), ('svm', SVC(probability=True)) ]) #Setting Randomized Search parameters for pipe. ran_pam= { 'scaler': [MinMaxScaler(), Normalizer(), StandardScaler(), RowMinMaxScaler(), RowStandardScaler()], 'anova__k': list(np.arange(3, self.k_max)), 'pca__n_components': list(np.arange(2, 10,1)), 'pca__svd_solver': ['full'], 'pca__whiten': [True, False], 'svm__C': np.arange(0.01, 2, 0.01), 'svm__gamma': np.arange(0, 1, 0.01), "svm__kernel": ["rbf","linear","poly","sigmoid"], "svm__random_state": [self.random_state]} #Cross-validator selector cv_dict = {'Stratified K Fold': StratifiedKFold(n_splits=self.cv_fold), 'Repeated Stratified K Fold': RepeatedStratifiedKFold(n_splits=self.cv_fold, n_repeats = self.rep_fold, random_state= self.random_state), 'Leave One Out':LeaveOneOut(), 'Custom Value': self.cv_fold} cv_inner = cv_dict[self.cv_method] #Setting Randomized search. search = rscv( estimator=pipe, param_distributions=ran_pam, n_iter=1000, n_jobs=-1, cv=cv_inner, verbose=2, random_state=self.random_state, return_train_score=True, error_score=0) #Pre-fit confirmation and CV-Check print('Modeling Initiated') #Fitting data print('Fitting Data') search.fit(self.xtrain, self.ytrain) #Puts code to sleep for 5 seconds to delay print time.sleep(5) #Results print('*****'*10) print('The best model score is {}'.format(search.best_score_*100)) #Generation of selected parameter table dfparameters = pd.DataFrame(search.cv_results_['params']) dfTr_meanscores = pd.DataFrame(search.cv_results_['mean_train_score'], columns=['Train Accuracy']) dfTr_stdscores = pd.DataFrame(search.cv_results_['std_train_score'], columns=['Train Standard Dev']) dfTe_meanscores = pd.DataFrame(search.cv_results_['mean_test_score'], columns=['Cross-Validation Accuracy']) dfTe_stdscores = pd.DataFrame(search.cv_results_['std_test_score'], columns=['Cross-Validation Standard Dev']) modelscores = pd.concat([dfparameters, dfTr_meanscores, dfTr_stdscores, dfTe_meanscores, dfTe_stdscores], axis=1) modelscores = modelscores[modelscores['svm__gamma']>0.0]#Filters out gamma<=0.0 modelscores = modelscores[modelscores['Train Accuracy']>0.8]#Filters out poor trainers modelscores = modelscores[modelscores['Cross-Validation Accuracy']>0.90]#Filters out poor trainers #Re-index topscores for further use top_score = modelscores.sort_values(by=['Cross-Validation Accuracy', 'anova__k','pca__n_components'], ascending=[False, True, True]) top_score = top_score.reset_index() #Resets index for ease of selection top_score = top_score.drop(['index'], axis=1) print('Validate Top Selected Parameters at minimal 80% Train Accuracy') print('Initiate Validation data step') #Extracting Parameters from top_score name=top_score.columns[0:9] select=[] for i in top_score.columns[0:9]: select.append(top_score[i])#[0:100]) #Setting list for cross-validation score on validation data scores=[] #Outer Cross validation cv_outer = cv_inner #Processing parameters through algorithm for best validation score. for i in range(0,(top_score.shape[0]), 1): pipe_assess = Pipeline([ ('anova', SelectKBest(f_classif,k=select[8][i])), ('scaler', select[4][i]), ('pca', PCA(n_components=select[7][i], svd_solver=select[6][i], whiten=select[5][i])), ('svm', SVC(C=select[3][i], gamma=select[2][i], kernel=select[1][i],probability=True, random_state=select[0][i])) ]) modeling=pipe_assess.fit(self.xtrain, self.ytrain) scores.append(modeling.score(self.xval, self.yval)) #Adding validation score to table top_score['Validation Accuracy']=scores #Reorganizing output top_score=top_score.sort_values(by=['Validation Accuracy', 'Train Accuracy', 'anova__k', 'Cross-Validation Accuracy', 'pca__n_components'], ascending = [False, False, True, False, False]) #False top_score = top_score[top_score['Validation Accuracy'] > 0.85] top_score = top_score.reset_index(drop = True) #top_score=top_score.drop('index', axis=1) print('Max Validation Accuracy', top_score['Validation Accuracy'].max()*100) self.top_score = top_score display(self.top_score) #Returns pandas DataFrame of all parameters return self def parameter_table(self,): return self.top_score def featuredisplay(self, toggle_interactive = False): """Displays ranking and scores for all features. Parameters: ----------- toggle_interactive : True (default), boolean. Controls whether or not to activate matplotlib interactive window.""" if toggle_interactive == True: get_ipython().run_line_magic('matplotlib', 'qt') else: get_ipython().run_line_magic('matplotlib', 'inline') self.score_table = self.top_score #self.xtrain = xtrain #self.ytrain = ytrain print('Total Nanosensor Frequency is: \n', self.score_table['anova__k'].value_counts()/self.score_table.shape[0] *100) #Visualizing most important sensors #Feature Selection bestfeatures = SelectKBest(score_func=f_classif, k='all') fit = bestfeatures.fit(self.xtrain, self.ytrain) dfscores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(self.xtrain.columns) #concat two dataframes for better visualization featureScores = pd.concat([dfcolumns,dfscores],axis=1) featureScores.columns = ['Sensor','Score'] #naming the dataframe columns display(featureScores.sort_values(by='Score', ascending=False)) #Saving Feature Scores self.featureScores = featureScores #Plotting Feature Ranks fig, ax = plt.subplots() #Color palette form color_palette = "Greens_r" #@param ["viridis", "rocket", "mako", "crest", "light:seagreen", "light:seagreen_r", "light:b", "light:b_r", "dark:salmon", "dark:salmon_r", "Blues", "Blues_r", "YlOrBr", "tab10"] {allow-input: true} #plotting feature selection bar=sns.barplot(data=featureScores.sort_values('Score', ascending=False), x='Sensor', y='Score', palette=color_palette, linewidth=1.5,edgecolor=".1", saturation=1) ax.tick_params(direction='out', length=4, width=2, colors='k', grid_color='k', grid_alpha=1,grid_linewidth=2) #Setting Plot Spines Function def spines(top=True, right=True, bottom=True, left=True): """ If a value is True then the plot will have a spine in the respective position as defined by the function""" ax.spines['top'].set_visible(top) ax.spines['top'].set_color('k') ax.spines['top'].set_linewidth(2) ax.spines['right'].set_visible(right) ax.spines['right'].set_color('k') ax.spines['right'].set_linewidth(2) ax.spines['bottom'].set_visible(bottom) ax.spines['bottom'].set_color('k') ax.spines['bottom'].set_linewidth(2) ax.spines['left'].set_visible(left) ax.spines['left'].set_color('k') ax.spines['left'].set_linewidth(2) self.spines = spines(top=False,right=False) self.spines plt.ylim(-1, featureScores['Score'].max() + 10) plt.xlabel('nanosensors', fontsize=12, fontweight=False, color='k') plt.ylabel('importance score', fontsize=12, fontweight=False, color='k') plt.xticks(fontsize=12, fontweight=False, rotation=90) plt.yticks(fontsize=12, fontweight=False) plt.show() return self def featureselect(self, toggle_interactive = False): """Step wise breakdown for the selected features. Parameters: ----------- toggle_interactive : True (default), boolean. Controls whether or not to activate matplotlib interactive window.""" if toggle_interactive == True: get_ipython().run_line_magic('matplotlib', 'qt') else: get_ipython().run_line_magic('matplotlib', 'inline') fet_select=list(self.featureScores.sort_values(by=['Score'], ascending=[False])[0:self.score_table['anova__k'][0]]['Sensor']) print('Selected Nanosensors are:', fet_select) self.fet_select = fet_select #Feature Conversion X_train_fet = self.xtrain[self.fet_select] X_val_fet = self.xval[self.fet_select] self.xtrainfet = X_train_fet self.xvalfet = X_val_fet def heatmap(x, y): df = x df['Label'] = y re = df.groupby('Label')[list(df.columns[0:])].mean() sns.set_style("whitegrid") plt.figure(figsize=(5, 5)) sns.set(font_scale=1.05) color_bar_orientation = "horizontal" #@param ["vertical", "horizontal"] ax=sns.heatmap(re, cmap="Greens", linecolor='black', linewidths=1, cbar_kws={'label':r'$\Delta$F (a.u)', "orientation": color_bar_orientation, "aspect":20, 'pad':0.005, }) ax.xaxis.tick_top() # x axis on top ax.xaxis.set_label_position('top') ax.yaxis.tick_left() ax.yaxis.set_label_position('left') fontsize = 12 #@param {type:"slider", min:2, max:20, step:1} x_rotation = 45#@param {type:"number"} y_rotation = 0#@param {type:"number"} x_label = '' #@param {type:"string"} y_label = '' #@param {type:"string"} plt.xlabel(x_label, fontsize=fontsize,) #color='black',) plt.ylabel(y_label, fontsize=fontsize,)# color='black' ) plt.xticks(fontsize=fontsize, rotation=x_rotation,)# color='black') plt.yticks(fontsize=fontsize, rotation=y_rotation,)# color='black') plt.show() #In order to use heatmap, copies of self must #be made or else the function merges the primary #self instances self.x = self.xtrainfet.copy() self.y = self.ytrain.copy() heatmap(self.x, self.y) return def pca_transform(self): """Function for scaling the data and transforming through PCA determined by AGONS modeling.""" #Setting Transformer for Scaling scaler=self.score_table['scaler'][0] self.scaler = scaler #Scaling data by top parameter xtrains = self.scaler.fit_transform(self.xtrainfet) xvals = self.scaler.transform(self.xvalfet) self.xtrains = xtrains self.xvals = xvals #Setting PCA pca = PCA(n_components=self.score_table['pca__n_components'][0], svd_solver=self.score_table['pca__svd_solver'][0], whiten=self.score_table['pca__whiten'][0]) self.pca = pca #Tranforming by PCA self.xtrainpca = self.pca.fit_transform(self.xtrains) self.xvalpca = self.pca.transform(self.xvals) #Setting up dataframes n = self.score_table['pca__n_components'][0] number_list = list(np.arange(1, n+1, 1)) pca_list = [] for n in number_list: pca_list.append('PCA: {}'.format(n)) self.pcatrain=pd.DataFrame(self.xtrainpca, columns=pca_list).reset_index(drop = True) self.pcatrain['Label']=self.ytrain.reset_index(drop = True) self.pcatrain = self.pcatrain.sort_values(by='Label', ascending=True) self.pcaval=pd.DataFrame(self.xvalpca, columns=pca_list).reset_index(drop = True) self.pcaval['Label']=self.yval.reset_index(drop=True) self.pcaval = self.pcaval.sort_values(by='Label', ascending=True) return self def pca_diagnostic(self, toggle_interactive = False): """ Function for plotting the cumalitive explained variance for each number of PCA components. Parameters ---------- toggle_interactive : False (default), boolean. Controls whether or not to activate matplotlib interactive window.""" if toggle_interactive == True: get_ipython().run_line_magic('matplotlib', 'qt') else: get_ipython().run_line_magic('matplotlib', 'inline') sns.set_style('ticks') self.pca_diag=PCA(whiten=self.score_table['pca__whiten'][0]).fit(self.xtrains) fig, ax = plt.subplots() sns.lineplot(x=np.arange(1, len(np.cumsum(self.pca_diag.explained_variance_ratio_))+1, 1), y=np.cumsum(self.pca_diag.explained_variance_ratio_)*100, color='k', linewidth=2.5) ax.tick_params(direction='out', length=5, width=3, colors='k', grid_color='k', grid_alpha=1,grid_linewidth=2) plt.xticks(fontsize=12, fontweight=None) plt.yticks(fontsize=12, fontweight=None) plt.xlabel('number of components', fontsize=12, fontweight=None, color='k') plt.ylabel('cumulative explained variance (%)', fontsize=12, fontweight=None, color='k') print('PCA cumulative explained variance values', np.cumsum(self.pca_diag.explained_variance_ratio_)) self.spines plt.show() return self def pca2D(self, loadings = False, toggle_interactive = False): """Visualize PCA sepration at 2D for training data. Parameters ---------- loadings : False (default), boolean. Controls whether to show how each feature controls the PCA directionality and correlation. toggle_interactive : False (default), boolean. Controls whether or not to activate matplotlib interactive window. """ self.loadings = loadings self.pca1=self.pca_diag.explained_variance_ratio_[0] self.pca2=self.pca_diag.explained_variance_ratio_[1] if toggle_interactive == True: get_ipython().run_line_magic('matplotlib', 'qt') else: get_ipython().run_line_magic('matplotlib', 'inline') def myplot(score, vector, loadings = False, pca1 = self.pca1, pca2 = self.pca2, fet_col = self.xtrainfet): xvector = self.pca.components_[0] yvector = self.pca.components_[1] xs = score['PCA: 1'] ys = score['PCA: 2'] fig, ax = plt.subplots() sns.scatterplot(x='PCA: 1', y='PCA: 2', hue='Label', palette=sns.color_palette('bright', score['Label'].nunique()), data=score, linewidth=0, s=75, legend='full') if loadings ==True: for i in range(len(xvector)): # arrows project features (ie columns from csv) as vectors onto PC axes plt.arrow(0, 0, xvector[i]*max(xs), yvector[i]*max(ys), color='k', width=0.0005, head_width=0.0025) plt.text(xvector[i]*max(xs)*1.2, yvector[i]*max(ys)*1.2, list(fet_col.columns)[i], color='k') else: pass #Set labels plt.xlim(score['PCA: 1'].min() + (score['PCA: 1'].min()*.25), score['PCA: 1'].max() + (score['PCA: 1'].max()*.25)) plt.ylim(score['PCA: 2'].min() + (score['PCA: 2'].min()*.25), score['PCA: 2'].max() + (score['PCA: 2'].max()*.25)) round_pca1 = 3 #@param {type:"integer"} round_pca2 = 3 #@param {type:"integer"} plt.xlabel("PC1 {}%".format(round(pca1, round_pca1)*100), fontsize=12,color='k', fontweight= False) plt.ylabel("PC2 {}%".format(round(pca2, round_pca2)*100), fontsize=12,color='k', fontweight= False) #Set ticks plt.xticks(fontsize=12,color='k', fontweight=False) plt.yticks(fontsize=12,color='k', fontweight=False) plt.legend(fontsize='medium',bbox_to_anchor=(1.00, 1), loc='upper left') plt.axvline(0, color='k',linestyle='dashed') plt.axhline(0, color='k',linestyle='dashed') #Call the function. Use only the 2 PCs. myplot(self.pcatrain, np.transpose(self.pca.components_[0:2, :]), loadings = self.loadings, pca1 = self.pca1, pca2 = self.pca2, fet_col = self.xtrainfet) plt.show() return self def pca3D(self, toggle_interactive =True): """Function for plotting a 3D PCA plot if PCA components are greater than or equal to three. Parameters ---------- toggle_interactive : True (default), boolean. Controls whether or not to activate matplotlib interactive window.""" if self.pcatrain.shape[1] >= 3: if toggle_interactive == True: get_ipython().run_line_magic('matplotlib', 'qt') else: pass #Variables for plot loop pca_lab = list(self.pcatrain['Label'].unique()) markers = ['o', '^', 'v', 'D', 's', 'X', 'p', '+', 'X', '8', '<', '>'] coloring= ['blue', 'red', 'lime', 'purple', 'yellow', 'hotpink', 'black', 'darkorange', 'cyan'] #Grouping and plotting loop fig=plt.figure(figsize=(7,7)) axes = plt.subplot(111, projection='3d') for i, j, h in zip(pca_lab, markers, coloring): grouper = self.pcatrain.groupby('Label') setter = grouper.get_group(i) x=setter['PCA: 1'] y=setter['PCA: 2'] z=setter['PCA: 3'] axes.scatter(x, y, z, label=i, marker=j, c=h, s=40, edgecolors='black', alpha=1) #Dashed 0 line z axis xz=[0, 0] yz=[0, 0] zz=[self.pcatrain['PCA: 3'].min(), self.pcatrain['PCA: 3'].max()] axes.plot(xz, yz, zz, c='black', linestyle='dashed') #Dashed 0 line x axis xx=[self.pcatrain['PCA: 1'].min(),self.pcatrain['PCA: 1'].max()] yx=[0,0] zx=[0,0] axes.plot(xx, yx, zx, c='black', linestyle='dashed') #Dashed 0 line y axis xy=[0,0] yy=[self.pcatrain['PCA: 2'].min(),self.pcatrain['PCA: 2'].max()] zy=[0,0] axes.plot(xy, yy, zy, c='black', linestyle='dashed') #Setting background panes axes.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) axes.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) axes.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) #setting grid line color #RGB Scale axes.xaxis._axinfo["grid"]['color'] = (1,1,1,1) axes.yaxis._axinfo["grid"]['color'] = (1,1,1,1) axes.zaxis._axinfo["grid"]['color'] = (1,1,1,1) #Setting plot limits axes.set_xlim3d(round(self.pcatrain['PCA: 1'].min(),1), round(self.pcatrain['PCA: 1'].max(),1)) axes.set_ylim3d(round(self.pcatrain['PCA: 2'].min(),1), round(self.pcatrain['PCA: 2'].max(),1)) axes.set_zlim3d(round(self.pcatrain['PCA: 3'].min(),1), round(self.pcatrain['PCA: 3'].max(),1)) #Setting Labels self.pca3=self.pca_diag.explained_variance_ratio_[2] round_pca1 = 3 #@param {type:"integer"} round_pca2 = 3 #@param {type:"integer"} round_pca3 = 3 #@param {type:"integer"} axes.set_xlabel("PC1 {}%".format(round((self.pca1*100), round_pca1)),#pca. explained_variance_ratio_[0],2)*100), fontsize=12,color='k', fontweight= False, labelpad=5) axes.set_ylabel("PC2 {}%".format(round((self.pca2*100), round_pca2)), fontsize=12,color='k', fontweight= False, labelpad=7) axes.set_zlabel("PC3 {}%".format(round((self.pca3*100), round_pca3)), fontsize=12,color='k', fontweight= False, labelpad=7) #Set legend horizontal = 0.5 #@param {type:"number"} vertical = -0.4#@param {type:"number"} plt.legend(loc="best", bbox_to_anchor=(horizontal, vertical,1,1)) plt.show() return def set_final_model(self, model_params = 'best', x_fit = None, y_fit = None): """ Fit the model to be used for unknown prediction. Parameters: ----------- model_params : str or dict, if 'best' uses the top performing model selected by AGONS from parameter_table attribute. Else, insert a dict using parameter_table attribute .iloc[row, 0:9].to_dict() to select a different parameter subset. x_fit : DataFrame or numpy array, used to fit the final decided model.y_fit : DataFrame, series or numpy array, used to fit the final decided model.""" self.model_params = model_params self.x_fit = x_fit self.y_fit = y_fit if self.model_params == 'best': self.model_params = self.top_score.iloc[0, 0:9].to_dict() else: pass pipe = Pipeline([ ('anova', SelectKBest(f_classif)), ('scaler', MinMaxScaler()), ('pca', PCA()), ('svm', SVC(probability=True)) ]) self.final_model = pipe.set_params(**self.model_params) self.final_model = self.final_model.fit(self.x_fit, self.y_fit) def predict(self, xtest, ytest): self.xtest = xtest self.ytest = ytest self.ypred = self.final_model.predict(xtest) return self.ypred def predict_probe(self, xtest, ytest): self.xtest = xtest self.ytest = ytest self.ypred_prob = self.final_model.predict_proba(xtest) return self.ypred_prob # %%
AGONS-nano
/AGONS_nano-1.1.9.6.tar.gz/AGONS_nano-1.1.9.6/AGONS_nano/AGONSModule.py
AGONSModule.py
import pandas as pd from bs4 import BeautifulSoup import requests import warnings from tqdm import tqdm warnings.filterwarnings('ignore') class Parser: default_sites_url = ['https://классификация-туризм.рф/displayAccommodation/index?Accommodation%5BFullName%5D=&Accommodation%5BRegion%5D=%27%20+%20region%20+%20%27&Accommodation%5BKey%5D=&Accommodation%5BOrganizationId%5D=&Accommodation%5BCertificateNumber%5D=&Accommodation%5BInn%5D=&Accommodation%5BOgrn%5D=&Accommodation%5BSolutionNumber%5D=&yt0=Найти&Accommodation%25BFullName%25D=&AccommodationRegion=%25Москва%25&AccommodationKey=&AccommodationOrganizationId=&AccommodationCertificateNumber=&AccommodationBInn=&AccommodationBOgrn=&AccommodationSolutionNumber=&yt0=Найти&Accommodation_page=1'] default_sites = 'классификация-туризм' def __init__(self, site): if site == 'классификация-туризм': self.site = site else: self._site = None def get_sites(self): self.default_sites_url.append(self._site) return(self.default_sites_url) def get_regions(self): turism_url = self.default_sites_url headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'} r = requests.get(turism_url[0], headers = headers) soup = BeautifulSoup(r.text, "html.parser") regions = [] for i in soup.select('option'): if i.text == 'Выберите ': continue elif(i.text == 'Выберите организацию'): break else: regions.append(i.text) return regions def search_string(self,region, url = 'off', status_code = 'off', page = '1',headers = {'User-Agent': \ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}): turism_url = 'https://xn----7sba3acabbldhv3chawrl5bzn.xn--p1ai/displayAccommodation/index?Accommodation%5BFullName%5D=&Accommodation%5BRegion%5D=' + region + '&Accommodation%5BKey%5D=&Accommodation%5BOrganizationId%5D=&Accommodation%5BCertificateNumber%5D=&Accommodation%5BInn%5D=&Accommodation%5BOgrn%5D=&Accommodation%5BSolutionNumber%5D=&yt0=%D0%9D%D0%B0%D0%B9%D1%82%D0%B8&Accommodation%25BFullName%25D=&AccommodationRegion=%25%D0%9C%D0%BE%D1%81%D0%BA%D0%B2%D0%B0%25&AccommodationKey=&AccommodationOrganizationId=&AccommodationCertificateNumber=&AccommodationBInn=&AccommodationBOgrn=&AccommodationSolutionNumber=&yt0=%D0%9D%D0%B0%D0%B9%D1%82%D0%B8&Accommodation_page=' + page r = requests.get(turism_url, headers = headers, timeout = None) if status_code == 'on': print(r.status_code) #print(r.url) soup = BeautifulSoup(r.text, "html.parser") #tag_lst = soup.find_all(class_ = "page") #max_page_info = tag_lst[-1].findChild("a")['href'] #max_page = max_page_info.split('javascript:goToPage')[1][1:-1] max_page_info = soup.select("li.last a") #print(max_page_info) turism_url_lst = [] if len(max_page_info) == 0: turism_url_lst.append(turism_url) return turism_url_lst for element in max_page_info: max_page = element.attrs.get('href').split('Accommodation_page=')[1] #print(max_page) if int(max_page) == 1: turism_url_lst.append(turism_url) return turism_url_lst print(turism_url) print(max_page) #if int(element.attrs.get('href').split('Accommodation_page=')[1]) == 1: # return turism_url for page_ in tqdm(range(1, int(max_page) + 1)): turism_url = turism_url.split('Accommodation_page=')[0] + 'Accommodation_page=' + str(page_) if url == 'on': print(turism_url) turism_url_lst.append(turism_url) return turism_url_lst def search_table_url(self, turism_url_lst, url = 'off', status_code = 'off',headers = {'User-Agent': \ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}): hotel_url_lst = [] for url in tqdm(turism_url_lst): r = requests.get(url, headers = headers) if status_code == 'on': print(r.status_code) soup = BeautifulSoup(r.text, "html.parser") regnumberlst = [] for link in soup.find_all('a'): #print(link.get('href')) regnumberlst.append(link.get("href")) for link in regnumberlst: if link != None and link.find('displayAccommodation/') != -1: cnt = 0 for i in range(len(link.split('/displayAccommodation/')[1])): if link.split('/displayAccommodation/')[1][i].isdigit(): cnt += 1 if cnt == len(link.split('/displayAccommodation/')[1]): #print(link) hotel_url_lst.append(link) hotel_url_lst_ = [] for element in set(hotel_url_lst): #print(element) hotel_url_lst_.append(element) for element in range(len(hotel_url_lst_)): hotel_url_lst_[element] = 'https://классификация-туризм.рф' + hotel_url_lst_[element] if url == 'on': print(hotel_url_lst_[element]) return hotel_url_lst_ def hotel_table_get_df(self, hotel_url_lst, url = 'off', status_code = 'off'): df = pd.DataFrame(columns = ['Порядковый номер в Федеральном перечне', 'Тип','Название гостиницы', 'Название организации', 'ИНН', 'ОГРЭН', 'Регион','Адрес места нахождения', 'Почтовый индекс', 'Сайт','E-mail', 'Телефон', 'Звездность', 'Дата присвоения звёздности', 'Регистрационный номер', 'Регистрационный номер свидетельства', 'Дата выдачи', 'Срок действия до', 'Статус', 'Количество номеров']) headers = {'User-Agent': \ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'} for i in tqdm(range(len(hotel_url_lst))): url = hotel_url_lst[i] #print(i) if url == 'on': print(url) r = requests.get(url, headers = headers) if status_code == 'on': print(r.status_code) soup = BeautifulSoup(r.text, "html.parser") if len(soup.select('div.detail-field')[0].find_all('span', class_ = 'detail-value')) != 0: Serial_number_in_the_Federal_List = soup.select('div.detail-field')[0].find_all('span', class_ = 'detail-value')[0].text else: Serial_number_in_the_Federal_List = None if len(soup.select('div.detail-field')[1].find_all('span', class_ = 'detail-value')) != 0: kind = soup.select('div.detail-field')[1].find_all('span', class_ = 'detail-value')[0].text else: kind = None if len(soup.select('div.detail-field')[3].find_all('span', class_ = 'detail-value')) != 0: name = soup.select('div.detail-field')[3].find_all('span', class_ = 'detail-value')[0].text else: name = None if len(soup.select('div.detail-field')[4].find_all('span', class_ = 'detail-value')) != 0: organization = soup.select('div.detail-field')[4].find_all('span', class_ = 'detail-value')[0].text else: organization = None if len(soup.select('div.detail-field')[5].find_all('span', class_ = 'detail-value')) != 0: region = soup.select('div.detail-field')[5].find_all('span', class_ = 'detail-value')[0].text else: region = None if len(soup.select('div.detail-field')[6].find_all('span', class_ = 'detail-value')) != 0: inn = soup.select('div.detail-field')[6].find_all('span', class_ = 'detail-value')[0].text else: inn = None if len(soup.select('div.detail-field')[7].find_all('span', class_ = 'detail-value')) != 0: ogrn = soup.select('div.detail-field')[7].find_all('span', class_ = 'detail-value')[0].text else: ogrn = None if len(soup.select('div.detail-field')[8].find_all('span', class_ = 'detail-value')) != 0: adress = soup.select('div.detail-field')[8].find_all('span', class_ = 'detail-value')[0].text else: adress = None if len(soup.select('div.detail-field')[8].find_all('span', class_ = 'detail-value')) != 0: postcode = adress.replace(' ', '') postcode_ = postcode.split(',') for ind in postcode_: if len(ind) == 6: if ind.isdigit() == True: postcode = ind break else: postcode = None else : postcode = None else: postcode = None if len(soup.select('div.detail-field')[9].find_all('span', class_ = 'detail-value')) != 0: number_ = soup.select('div.detail-field')[9].find_all('span', class_ = 'detail-value')[0].text.replace(' ', '').replace(')', '').replace('(', '').replace('-', '').replace('+7', '') if len(number_) > 2 : if number_[0] == '8': number_ = number_[1:] number = '+7' + '(' + number_[0:3] + ')' + '-' + number_[3:6] + '-' + number_[6:8] + '-' + number_[8:] else: number = None else: number = None if len(soup.select('div.detail-field')[11].find_all('span', class_ = 'detail-value'))!= 0: email = soup.select('div.detail-field')[11].find_all('span', class_ = 'detail-value')[0].text else: email = None if len(soup.select('div.detail-field')[12].find_all('span', class_ = 'detail-value')) != 0: site = soup.select('div.detail-field')[12].find_all('span', class_ = 'detail-value')[0].text else: site = None classification_info = len(soup.select('div.classification-info')) - 1 type_of_linces = len(soup.select('div.classification-info')[classification_info]) if type_of_linces == 21: stars = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[0].text stars_data = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[2].text reg_number = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[1].text reg_number_sertificat = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[3].text data_start = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[4].text data_stop = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[5].text status = None else: stars = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[0].text stars_data = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[2].text reg_number = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[1].text reg_number_sertificat = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[3].text data_start = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[4].text data_stop = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[5].text status = soup.select('div.classification-info')[classification_info].find_all('span', class_ = 'detail-value')[6].text rooms_index = len(soup.select('table.rooms-output')[0].find_all('td')[3:]) + 2 rooms = 0 for element in range(5, rooms_index, 4): #print(element) #print(soup.select('table.rooms-output')[0].find_all('td')[element].text) if len(soup.select('table.rooms-output')[0].find_all('td')[element].text.replace('n', '').replace("r",'').replace("\\",'')) != 0: rooms += int(soup.select('table.rooms-output')[0].find_all('td')[element].text) new_row = {'Порядковый номер в Федеральном перечне': Serial_number_in_the_Federal_List,\ 'Тип': kind, 'Название гостиницы': name, 'Название организации': organization,\ 'ИНН': inn, 'ОГРЭН': ogrn, 'Регион': region, 'Адрес места нахождения':adress,\ 'Почтовый индекс': postcode, 'Сайт':site, 'E-mail':email, 'Телефон': number, 'Звездность':stars,\ 'Дата присвоения звёздности':stars_data, 'Регистрационный номер':reg_number, 'Регистрационный номер свидетельства': reg_number_sertificat,\ 'Дата выдачи': data_start, 'Срок действия до':data_stop, 'Статус':status, 'Количество номеров':rooms} df = df.append(new_row, ignore_index=True) return df
AGParser
/AGParser-0.4.0-py3-none-any.whl/AG_Parser/parserr.py
parserr.py
import asyncio from cprint import * import time,os from argox.scoketscanner.socketscanner import portscanner from argox.pyffuf.pyffuf import FUZZER from argox.spider.spider import WebSpider from argox.reverse.reverse import DnsEnum from argox.cmsDetect.cmsDetect import analyzier class Argo: def __init__(self,url): self.url = url self.now = time.time() def scanport(self, host=None, fullscan=False,debug=False): if host is None: host = self.url pscan = portscanner(host,fullscan,debug) loop = asyncio.get_event_loop() future = asyncio.ensure_future(pscan.main(loop)) loop.run_until_complete(future) cprint.info(str('Total time: {time}'.format(time=time.time() - self.now))) return future.result() def fuzzer(self, wordlist=None): if wordlist is None: list = r"/usr/share/wordlists/dirb/common.txt" if os.path.isfile(list): pass else: return "Please specify wordlist" else: list = wordlist fuzz = FUZZER(self.url, list) if "PUFF" not in self.url: return ["PUFF keyword not found!"] #data = fuzz.main() loop = asyncio.get_event_loop() future = asyncio.ensure_future(fuzz.main()) loop.run_until_complete(future) cprint.info(str('Total time: {time}'.format(time=time.time() - self.now))) return future.result() def webspider(self): spider = WebSpider(self.url) loop = asyncio.get_event_loop() future = asyncio.ensure_future(spider.run()) loop.run_until_complete(future) cprint.info(str('Total time: {time}'.format(time=time.time() - self.now))) return future.result() def DnsEnum(self): denum = DnsEnum(self.url) loop = asyncio.get_event_loop() future = asyncio.ensure_future(denum.run()) loop.run_until_complete(future) cprint.info(str('Total time: {time}'.format(time=time.time() - self.now))) return future.result() def analyze(self): cms = analyzier(self.url) data = cms.run() return data.replace("\t","").split(), data
AGROX
/AGROX-1.9-py3-none-any.whl/argox/argo.py
argo.py
from socket import gethostbyname, gethostbyaddr import asyncio import subprocess import dns.resolver from cprint import * class DnsEnum: def __init__(self, host): self.url = host.replace("http://","").replace("https://","") self.x = [] async def getnamebyip(self): host = "" try: try: new = dns.resolver.query(self.url.replace("http://", "").replace("https://", ""), "A") for A in new: host = A.to_text() except: pass if host: name = gethostbyaddr(host) else: name = gethostbyaddr(self.url) try: domain = subprocess.check_output("nslookup -querytype=PTR " + host, shell=True) name = str(name) + "\n"+str(domain) except Exception as e: cprint.err(e) return name except Exception as e: cprint.err(e) return "1" async def get_records(self): ids = [ "NONE", "A", "NS", "MD", "MF", "CNAME", "SOA", "MB", "MG", "MR", "NULL", "WKS", "PTR", "HINFO", "MINFO", "MX", "TXT", "RP", "AFSDB", "X25", "ISDN", "RT", "NSAP", "NSAP-PTR", "SIG", "KEY", "PX", "GPOS", "AAAA", "LOC", "NXT", "SRV", "NAPTR", "KX", "CERT", "A6", "DNAME", "OPT", "APL", "DS", "SSHFP", "IPSECKEY", "RRSIG", "NSEC", "DNSKEY", "DHCID", "NSEC3", "NSEC3PARAM", "TLSA", "HIP", "CDS", "CDNSKEY", "CSYNC", "SPF", "UNSPEC", "EUI48", "EUI64", "TKEY", "TSIG", "IXFR", "AXFR", "MAILB", "MAILA", "ANY", "URI", "CAA", "TA", "DLV", ] for a in ids: try: answers = dns.resolver.query(self.url, a) for rdata in answers: self.x.append(str(a + ":" + rdata.to_text())) except: pass async def run(self): domain = await self.getnamebyip() txt = await self.get_records() self.x.append(domain) return self.x
AGROX
/AGROX-1.9-py3-none-any.whl/argox/reverse/reverse.py
reverse.py
import argparse import asyncio import sys from datetime import datetime import aiohttp from bs4 import BeautifulSoup as bs from clint.textui import puts, colored, indent from argox.pyffuf.checker import CHECKER class FUZZER: def __init__(self, urlx, wordlist): self.urlx = urlx self.wordlist = wordlist self.totalsend = 1 self.totalsuc = 0 self.total = 0 async def checkpage(self, html): soup = bs(html, 'html.parser') try: chex = str(soup.select_one('title').text).upper() # print(chex) if "NOT FOUND" in chex or "404" in chex: return False else: return True except: pass async def printf(self, cldata, data, status_code="", responsex=""): try: with indent(4, quote='>>>'): if status_code != 404 and status_code != 403: if await self.checkpage(responsex): puts(colored.green(str(cldata)) + colored.green(data) + colored.green(str(status_code))) self.totalsuc = 1 + self.totalsuc return str(data) + " " + str(status_code) else: print('>>> ' + colored.red(str(cldata)) + data, end="\r") else: print('>>> ' + colored.red(str(cldata)) + data, end="\r") except KeyboardInterrupt: pass async def fetch(self, session, url): try: async with session.get(url, ssl=False) as response: txt = await response.text() data = await self.printf(f"[{self.totalsend}/{self.total}]" + " ", url + " > ", status_code=response.status, responsex=txt) self.totalsend = 1 + self.totalsend return data except Exception as e: await self.printf("Error : " + str(e), "") self.totalsend = 1 + self.totalsend async def readlist(self): try: async with aiohttp.ClientSession() as session: with open(self.wordlist, mode='r') as f: tasks = [self.fetch(session, str(self.urlx.replace( "PUFF", line).replace("\n", ""))) for line in f] data = await asyncio.gather(*tasks) return data except KeyboardInterrupt: self.done() return 0 except UnicodeDecodeError: print('>>> ' + colored.red(str("There is a encoding error Please use a diffrent wordlist!")), end="\r") def done(self): with indent(4, quote='>>>'): puts(colored.red(str(">>>>>>>>>>>>>>>>>>>>>>>>>>>> DONE!"))) puts(colored.red(str("Total Dir Found : ")) + str(self.totalsuc)) puts(colored.red(str("End Time : ")) + str(datetime.now().strftime('%H:%M:%S'))) sys.exit() async def fucker(self,to): for i in to: yield i async def main(self): url = [] ch = CHECKER(self.urlx, self.wordlist) self.total = ch.check() #self.fuzz() nicedata = await self.readlist() async for element in self.fucker(nicedata): if element != None: url.append(element) else: pass return url
AGROX
/AGROX-1.9-py3-none-any.whl/argox/pyffuf/pyffuf.py
pyffuf.py
# Abstract games stuff ## Install `pip install AGStuff` ## Requirements - python>=3.6 ## Usage ### Cards ```python >>> from agstuff.cards.core import Card, Cards, Deck >>> >>> card1 = Card('As') >>> card1 A♠ >>> card2 = Card('8d') >>> card2 8♦ >>> card1 > card2 True >>> >>> cards = Cards('As/Ks/Qs') >>> cards [A♠, K♠, Q♠] >>> card1 in cards True >>> >>> cards = Cards(cards=(card2, Card('Tc'), Card('4h'))) >>> cards [8♦, 4♥, T♣] >>> card2 in cards True >>> >>> deck = Deck() >>> cards = Cards() >>> cards.pull(deck, 5) >>> cards [9♥, 2♦, 8♣, 6♠, K♥] ``` ### Dices ```python >>> from agstuff.dices.core import Dice, DiceBox >>> >>> dice1 = Dice(faces_count=6) >>> dice1 1 of [1, 2, 3, 4, 5, 6] >>> dice2 = Dice(6) >>> dice2 6 of [1, 2, 3, 4, 5, 6] >>> dice1.rolling() 5 >>> dice1 5 of [1, 2, 3, 4, 5, 6] >>> dice2.rolling() 1 >>> dice2 1 of [1, 2, 3, 4, 5, 6] >>> dice1 + dice2 6 >>> dice1 > dice2 True >>> >>> dice3 = Dice(faces_items=('Q', 'W', 'E', 'R', 'T', 'Y')) >>> dice3 E of ['Q', 'W', 'E', 'R', 'T', 'Y'] >>> dice3.rolling() 'Y' >>> dice3 Y of ['Q', 'W', 'E', 'R', 'T', 'Y'] >>> >>> dice_box = DiceBox() >>> dice_box.add(dice1) >>> dice_box.add(dice2) >>> dice_box.rolling() 8 >>> dice1 5 of [1, 2, 3, 4, 5, 6] >>> dice2 3 of [1, 2, 3, 4, 5, 6] >>> dice1.value 5 >>> dice2.value 3 ```
AGStuff
/AGStuff-1.1.0.tar.gz/AGStuff-1.1.0/README.md
README.md
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- import random from collections.abc import Iterable from agstuff.exceptions.dices.core import ( DiceEmptyInialItemsError, DiceWrongFacesCountTypeError, DiceWrongFacesCountError, DiceWrongFacesItemsTypeError, DiceWrongFacesItemsCountError, DiceBoxWrongItemAdditionError, ) class Dice: """ Customizable dice. Could be created by faces count or faces items. Faces count is an integer greater or equal MIN_FACES_COUNT. If dice is created by faces count its faces will be integers from 1 to faces count number. ```python >>> dice = Dice(faces_count=6) >>> print(dice.items) [1, 2, 3, 4, 5, 6] ``` Faces items is an iterable contains comparable and addible to each other objects. ```python >>> dice = Dice(faces_items='QWERTY') >>> print(dice.items) ['Q', 'W', 'E', 'R', 'T', 'Y'] ``` """ MIN_FACES_COUNT = 2 def __init__(self, faces_count=None, faces_items=None): if faces_count: if not isinstance(faces_count, int): raise DiceWrongFacesCountTypeError() if faces_count < self.MIN_FACES_COUNT: raise DiceWrongFacesCountError(self.MIN_FACES_COUNT) self.items = list(range(1, faces_count + 1)) elif faces_items: if not isinstance(faces_items, Iterable): raise DiceWrongFacesItemsTypeError() faces_items = list(faces_items) if len(faces_items) < self.MIN_FACES_COUNT: raise DiceWrongFacesItemsCountError(self.MIN_FACES_COUNT) self.items = faces_items else: raise DiceEmptyInialItemsError() self._value = None self.rolling() def __str__(self): return f'{self._value} of {self.items}' def __repr__(self): return f'{self._value} of {self.items}' def __lt__(self, other): return self.value < other.value def __gt__(self, other): return self.value > other.value def __eq__(self, other): return self.value == other.value def __ne__(self, other): return self.value != other.value def __add__(self, other): if not other: return self.value elif isinstance(other, Dice): return self.value + other.value return self.value + other @property def value(self): return self._value def rolling(self): self._value = random.choice(self.items) return self._value class DiceBox: """ Multiple dices handler. """ def __init__(self): self.items = [] def add(self, dice): if not isinstance(dice, Dice): raise DiceBoxWrongItemAdditionError() self.items.append(dice) def rolling(self): result = None for dice in self.items: dice.rolling() result = dice + result return result
AGStuff
/AGStuff-1.1.0.tar.gz/AGStuff-1.1.0/agstuff/dices/core.py
core.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- import random from agstuff.exceptions.cards import ( CardWeightSymbolError, CardSuitSymbolError, DeckCountTypeError, DeckCountNumberError, CardsStringTypeError, CardsCardTypeError ) from agstuff.validators.cards import CardSymbolValidator class Card: """ Some card from standard 52 cards deck. Takes one positional argument consisting of two symbols 1st symbol is card weight one of '1' (Ace), '2' (Two), '3' (Three), '4' (Four), '5' (Five), '6' (Six), '7' (Seven), '8' (Eight), '9' (Nine), 'T' (Ten), 'J' (Jack), 'Q' (Queen), 'K' (King), 'A' (Ace). 2nd symbol is card suit one of 'c' (clubs), 'd' (diamonds), 'h' (hearts), 's' (spades). Five of spades looks like Card('5s'). Also possible create an abstract card (with one symbol). Abstract Five looks like Card('5'). Abstract spades looks like Card('s'). Keyword arguments are ignored. Positional arguments are ignored since 2nd one. Symbols in 1st positional argument are ignored since 3rd one. """ class Weight: """ Card weight. From Two to Ace (Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten, Jack, Queen, King, Ace). Ace actually may worth smaller than Two at some point. Attributes: symbol -- one of '1' (Ace), '2' (Two), '3' (Three), '4' (Four), '5' (Five), '6' (Six), '7' (Seven), '8' (Eight), '9' (Nine), 'T' (Ten), 'J' (Jack), 'Q' (Queen), 'K' (King), 'A' (Ace). Five looks like Weight('5'). """ SYMBOLS = '123456789TJQKA' REAL_SYMBOLS = '23456789TJQKA' NAMES = 'Ace/Two/Three/Four/Five/Six/Seven/Eight/Nine/Ten/Jack/Queen/King/Ace'.split('/') NUMBERS_BY_SYMBOLS = {s: i for i, s in enumerate(SYMBOLS)} NAMES_BY_SYMBOLS = dict(zip(SYMBOLS, NAMES)) @CardSymbolValidator(SYMBOLS, CardWeightSymbolError) def __init__(self, symbol): self.symbol = symbol self.number = self.NUMBERS_BY_SYMBOLS[symbol] self.name = self.NAMES_BY_SYMBOLS[symbol] def __str__(self): return self.symbol def __repr__(self): return self.symbol def __lt__(self, other): return self.number < other.number def __gt__(self, other): return self.number > other.number def __eq__(self, other): return self.number == other.number def __ne__(self, other): return self.number != other.number class Suit: """ Card suit. May be clubs, diamonds, hearts or spades. Attributes: symbol -- one of 'c' (clubs), 'd' (diamonds), 'h' (hearts), 's' (spades). Spades looks like Suit('s'). """ SYMBOLS = 'cdhs' PRETTY_SYMBOLS = { 'c': '\u2663', 'd': '\u2666', 'h': '\u2665', 's': '\u2660' } NAMES = 'clubs/diamonds/hearts/spades'.split('/') NUMBERS_BY_SYMBOLS = {s: i for i, s in enumerate(SYMBOLS)} NAMES_BY_SYMBOLS = dict(zip(SYMBOLS, NAMES)) @CardSymbolValidator(SYMBOLS, CardSuitSymbolError) def __init__(self, symbol): self.symbol = symbol self.number = self.NUMBERS_BY_SYMBOLS[symbol] self.name = self.NAMES_BY_SYMBOLS[symbol] def __str__(self): return self.pretty_symbol def __repr__(self): return self.pretty_symbol def __eq__(self, other): return self.symbol == other.symbol def __ne__(self, other): return self.symbol != other.symbol @property def pretty_symbol(self): return self.PRETTY_SYMBOLS[self.symbol] def __init__(self, sign): # standard card with weight and suit if len(sign[:2]) == 2: self.weight = self.Weight(sign[0]) self.suit = self.Suit(sign[1]) self.name = f"{self.weight.name} of {self.suit.name}" # abstract card else: # with weight only try: self.weight = self.Weight(sign) self.suit = None self.name = self.weight.name # with suit only except CardWeightSymbolError: self.weight = None self.suit = self.Suit(sign) self.name = self.suit.name self.in_hand = False def __str__(self): weight = str(self.weight) if self.weight else 'X' suit = str(self.suit) if self.suit else 'x' return f"{weight}{suit}" def __repr__(self): weight = repr(self.weight) if self.weight else 'X' suit = repr(self.suit) if self.suit else 'x' return f"{weight}{suit}" def __hash__(self): waight_number = (self.weight.number + 1 if self.weight else 0) suit_number = (self.suit.number + 1 if self.suit else 0) return 10 * waight_number + suit_number def __lt__(self, other): return self.weight < other.weight def __gt__(self, other): return self.weight > other.weight def __eq__(self, other): return self.weight == other.weight if self.weight and other.weight else self.suit == other.suit def __ne__(self, other): return self.weight != other.weight if self.weight and other.weight else self.suit != other.suit class Deck: """ Standard 52 cards deck. There are 13 weights (Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten, Jack, Queen, King, Ace) and 4 suits (clubs, diamonds, hearts, spades). """ def __init__(self, card=None): self.cards = [] self.refresh() def __str__(self): return str(self.cards) def __repr__(self): return repr(self.cards) @property def size(self): return len(self.cards) def push_cards(self, count): count_type = type(count) if not count_type is int: raise DeckCountTypeError(count_type) if count < 1 or count > len(self.cards): raise DeckCountNumberError(count) for i in range(count): yield self.cards.pop(random.choice(range(len(self.cards)))) def refresh(self): self.cards = [Card(f'{w}{s}') for w in Card.Weight.REAL_SYMBOLS for s in Card.Suit.SYMBOLS] class Cards: """ Several cards. Cards could be set by cards string or by some iterable of Card instanses Cards set of (Three of diamonds, Ten of clubs and Ace of spades) looks like Cards('3d/Tc/As') or Cards([Card('3d'), Card('Tc'), Card('As')]) Also cards could be set from deck after initialization """ def __init__(self, cards_string=None, cards=None, max_count=52): self.max_count = max_count if cards_string: cards_string_type = type(cards_string) if not cards_string_type is str: raise CardsStringTypeError(cards_string_type) self.items = [Card(sign) for sign in cards_string.split('/')[:max_count]] elif cards: items = list(set(cards))[:max_count] for card in items: card_type = type(card) if not card_type is Card: raise CardsCardTypeError(card_type) self.items = items else: self.items = [] def __str__(self): return str(self.items) def __repr__(self): return repr(self.items) def __contains__(self, item): return item in self.items @property def size(self): return len(self.items) def pull(self, deck, count): max_to_add = self.max_count - self.size count_to_add = max_to_add if count > max_to_add else count if count_to_add > 0: self.items.extend(deck.push_cards(count_to_add)) def clean(self): self.items = []
AGStuff
/AGStuff-1.1.0.tar.gz/AGStuff-1.1.0/agstuff/cards/core.py
core.py
import time from selenium import webdriver from selenium.webdriver.common.by import By from webdriver_manager.chrome import ChromeDriverManager from bs4 import BeautifulSoup import re import csv def get_cars(url, count_cars): driver = webdriver.Chrome(executable_path=ChromeDriverManager().install()) driver.get(url) soup = BeautifulSoup(driver.page_source, 'html.parser') time.sleep(4) accept_cookies = driver.find_element(By.XPATH, '//button[text()="I Accept"]') accept_cookies.click() time.sleep(3) cars = [] counter = 0 electricity_cars_count = 0 while True: soup = BeautifulSoup(driver.page_source, 'html.parser') allcars = soup.find_all('article', class_='list-item') for car in allcars: title = car.find('h2', class_='item-title').text price = re.sub("[^0-9]", "", car.find('div', class_='item-price').text) description = car.find('div', class_='item-description').text if "Electricity" in description: electricity_cars_count += 1 continue description_list = list(filter(None, map(str.strip, ''.join(description.splitlines()).split(',')))) if len(description_list) >= 5: liter = description_list[0].strip()[:3] if len(liter) > 3: liter = "N/A" fuel_type = description_list[1].strip() if len(fuel_type) > 12: fuel_type = "N/A" year = description_list[2].strip()[:4] if len(year) > 7: year = "N/A" transmission = description_list[3].strip()[:10] transmission = re.sub(r'(Automatic.*)', r'Automatic', transmission) if len(transmission) > 10: transmission = "N/A" city = description_list[-1].strip() else: liter = "N/A" fuel_type = "N/A" year = "N/A" transmission = "N/A" city = "N/A" cars.append({'title': title, 'price': price, 'liter': liter, 'fuel_type': fuel_type, 'year': year, 'transmission': transmission, 'city': city}) counter += 1 if counter >= count_cars: break if counter >= count_cars: break next_page_button = driver.find_elements(By.XPATH, ("//div[@class='next-page-inner']")) if not next_page_button: break else: next_page_button[0].click() time.sleep(3) print("Cars scraped: ", len(cars)) print("Skipped electric cars: ", electricity_cars_count) print("Cars list: ", cars) return cars def write_to_csv(cars): with open('cars.csv', mode='w', newline='') as csv_file: fieldnames = ['title', 'price', 'liter', 'fuel_type', 'year', 'transmission', 'city'] wrt = csv.DictWriter(csv_file, fieldnames=fieldnames) wrt.writeheader() for car in cars: car['title'] = car['title'].strip() car['price'] = car['price'].strip() wrt.writerow(car) def main(): url = "https://autogidas.lt/en/skelbimai/automobiliai/?f_1%5B0%5D=BMW&f_model_14%5B0%5D=520&f_215=&f_216=&f_41=&f_42=&f_376=" count_cars = 4 cars = get_cars(url,count_cars) write_to_csv(cars) main()
AGidasScraper
/AGidasScraper-0.5.tar.gz/AGidasScraper-0.5/ascraper/ascrape.py
ascrape.py